path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
django/notebooks/commit/Frontpage order windowing function.ipynb | ###Markdown
Use sql in postgresql to reorder frontpage stories efficiently.
###Code
import sqlparse
from django.db.models import Func, Value, BooleanField
from django.db.models.functions import Rank, DenseRank, RowNumber, Cast
from django.db.models import F, Window, DurationField, ExpressionWrapper, DateTimeField, IntegerField, FloatField
from django.db.models import OuterRef, Subquery, Sum
class Days(Func):
"""Cast float field to Interval in days"""
output_field=DurationField()
template="to_char(%(expressions)s, 'S9990.0999 \"days\"')::interval"
class Epoch(Func):
"""Get epoch timestamp from date time field """
output_field=FloatField()
template="extract(epoch from %(expressions)s ) / 3600"
class English(Func):
"""Is the language english"""
output_field=BooleanField()
template="%(expressions)s = 'en'"
adjusted_publication_time=F('story__publication_date') + Days('priority')
adjusted_ranking = Window(expression=RowNumber(), partition_by=[English('story__language')], order_by=adjusted_publication_time.desc(nulls_last=True))
qry = FrontpageStory.objects.published().annotate(ranking=adjusted_ranking).values('id', 'ranking')
raw_query = sqlparse.format(str(qry.query), reindent=True, keyword_case='upper')
print(raw_query)
update_ordering = f"""
WITH ordered_frontpage AS ( {raw_query} )
UPDATE "frontpage_frontpagestory" SET "order" = "ordered_frontpage"."ranking"
FROM "ordered_frontpage"
WHERE "frontpage_frontpagestory"."id" = "ordered_frontpage"."id";
"""
from django.db import connection
with connection.cursor() as cursor:
cursor.execute(update_ordering)
print(update_ordering)
FrontpageStory.objects.update(order=0)
FrontpageStory.objects.first().save()
#FrontpageStory.objects.reorder()
list(FrontpageStory.objects.all().values('order', 'story__language', 'pk', 'headline', 'story__publication_date').order_by('order'))[:10]
qry = FrontpageStory.objects.annotate(ranking=adjusted_ranking).values('id', 'ranking', 'order')
raw_query = sqlparse.format(str(qry.query), reindent=True, keyword_case='upper')
print(raw_query)
list(qry[:20])
###Output
_____no_output_____ |
additionalNotebooks/Metadata.ipynb | ###Markdown
Installation and imports
###Code
!pip install kfmd --upgrade --user
!pip install pandas --upgrade --user
from kfmd import metadata
import pandas
from datetime import datetime
###Output
Collecting kfmd
Downloading https://files.pythonhosted.org/packages/cf/72/048a49042dacd93925f6f4253cb765aeddef34da4cbec05066dc1ac555f5/kfmd-0.1.8.tar.gz
Building wheels for collected packages: kfmd
Building wheel for kfmd (setup.py) ... [?25ldone
[?25h Stored in directory: /home/jovyan/.cache/pip/wheels/3d/ef/17/5f5099e588c582d66506547e0bd28bd7071959137a88b110ca
Successfully built kfmd
Installing collected packages: kfmd
Successfully installed kfmd-0.1.8
[33mWARNING: You are using pip version 19.1.1, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already up-to-date: pandas in ./.local/lib/python3.6/site-packages (0.25.3)
Requirement already satisfied, skipping upgrade: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas) (2019.2)
Requirement already satisfied, skipping upgrade: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas) (2.8.0)
Requirement already satisfied, skipping upgrade: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from pandas) (1.16.4)
Requirement already satisfied, skipping upgrade: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.6.1->pandas) (1.11.0)
[33mWARNING: You are using pip version 19.1.1, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
###Markdown
Create a workspace, run and execution
###Code
ws1 = metadata.Workspace(
# Connect to metadata-service in namesapce kubeflow in k8s cluster.
backend_url_prefix="metadata-service.kubeflow.svc.cluster.local:8080",
name="ws1",
description="a workspace for testing",
labels={"n1": "v1"})
r = metadata.Run(
workspace=ws1,
name="run-" + datetime.utcnow().isoformat("T") ,
description="a run in ws_1",
)
exec = metadata.Execution(
name = "execution" + datetime.utcnow().isoformat("T") ,
workspace=ws1,
run=r,
description="execution example",
)
###Output
_____no_output_____
###Markdown
Log data set, model and its evaluation
###Code
data_set = exec.log_input(
metadata.DataSet(
description="an example data",
name="mytable-dump",
owner="[email protected]",
uri="file://path/to/dataset",
version="v1.0.0",
query="SELECT * FROM mytable"))
model = exec.log_output(
metadata.Model(
name="MNIST",
description="model to recognize handwritten digits",
owner="[email protected]",
uri="gcs://my-bucket/mnist",
model_type="neural network",
training_framework={
"name": "tensorflow",
"version": "v1.0"
},
hyperparameters={
"learning_rate": 0.5,
"layers": [10, 3, 1],
"early_stop": True
},
version="v0.0.1",
labels={"mylabel": "l1"}))
metrics = exec.log_output(
metadata.Metrics(
name="MNIST-evaluation",
description="validating the MNIST model to recognize handwritten digits",
owner="[email protected]",
uri="gcs://my-bucket/mnist-eval.csv",
data_set_id=data_set.id,
model_id=model.id,
metrics_type=metadata.Metrics.VALIDATION,
values={"accuracy": 0.95},
labels={"mylabel": "l1"}))
###Output
_____no_output_____
###Markdown
List all the models in the workspace
###Code
pandas.DataFrame.from_dict(ws1.list(metadata.Model.ARTIFACT_TYPE_NAME))
###Output
_____no_output_____
###Markdown
Get basic lineage
###Code
print("model id is %s\n" % model.id)
###Output
model id is 2
###Markdown
Find the execution that produces this model.
###Code
output_events = ws1.client.list_events2(model.id).events
assert len(output_events) == 1
execution_id = output_events[0].execution_id
print(execution_id)
###Output
1
###Markdown
Find all events related to that execution.
###Code
all_events = ws1.client.list_events(execution_id).events
assert len(all_events) == 3
print("\nAll events related to this model:")
pandas.DataFrame.from_dict([e.to_dict() for e in all_events])
###Output
All events related to this model:
|
STDA Workshop-Aufgaben.ipynb | ###Markdown
Einführung in Python Warum Python?Python ist eine sog. general-purpose Programmiersprache. Sie genießt weite Verbreitung in einer Vielzahl von Einsatzgebieten, darunter insbesondere die Web und Internet Entwicklung, das Wissenschaftliche Rechnen sowie die Lehre. Eines der wichtigsten Ziele von Python ist es, lesbaren und klaren Programmcode zu ermöglichen. Daher gilt die Sprache gemeinhin auch als leicht zu erlernen.Zu Python gehört außerdem ein großes Ökosystem aus Bibliotheken, Frameworks und Tools, sodass für viele Anwendungsfälle bereits performante Standardlösungen existieren. Die Sprache Python ist imperativ:Imperative Sprachen verwenden sequentielle Ausdrücke um den Zustand des Programms zu ändern.
###Code
a = 5
b = 3
c = a * b
print("a + b = %d" % c)
###Output
a + b = 15
###Markdown
Python ist funktional:Funktionale Sprachen bieten die Möglichkeit, ein Programm oder Teile dessen in Form von mathematischen Funktionen auszudrücken um Details der eigentlichen Ausführung zu abstrahieren.
###Code
even_numbers = [n for n in range(20) if n % 2 == 0]
print(even_numbers)
###Output
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
###Markdown
Python ist prozedural:Prozedurale Sprachen fassen Programmsegmente, die wiederholt ausgeführt werden, in sogenannten Prozeduren (auch Funktionen genannt) zusammen.
###Code
def fib(n):
if n == 1 or n == 0:
return 1
else:
return fib(n-1) + fib(n-2)
print("fib(3) = %d" % fib(3))
print("fib(5) = %d" % fib(5))
print("fib(8) = %d" % fib(8))
###Output
fib(3) = 3
fib(5) = 8
fib(8) = 34
###Markdown
Aufgaben ImportierenDamit wir nicht mit jedem neuen Programm auch alle grundlegenden Funktionen neu definieren müssen, können wir in Python sogenannte Pakete und Module importieren. Daher ist der erste Schritt beim Schreiben eines Programms für gewöhnlich das Importieren der benötigten Funktionen und Klassen. Syntax`[from ] import [as alias]` AufgabeImportiert `numpy` mit dem Namen `np`, das Modul `pyplot` aus dem Package `matplotlib` mit dem Namen `plt` und die Funktionen `sin` und `cos` aus `math`. Danach sollte die darauffolgende Kontrollcode die Funktion sin auf dem Intervall [0, 2pi) Zeichnen. Kontrolle
###Code
# Der Abstand zwischen x-Werten
dx = 0.1
# Alle werte in [0, 2pi) mit dem Abstand step_size:
# xs = [0, 0.1, 0.2, ... 2pi - 0.1]
xs = np.arange(0, 2 * np.pi, dx)
# Werte sin für alle Werte in xs aus:
ys = np.vectorize(sin)(xs)
# Zeichne den Funktionsgraphen von sin als blau gestrichelte Linie:
plt.plot(xs, ys, '-.b')
plt.show()
###Output
_____no_output_____
###Markdown
Definieren einer Funktion AufgabeDefiniert eine Funktion `y(t)` mit der folgenden Abbildungsvorschrift:\begin{equation} y = \frac{sin(t^2)}{t}\end{equation}Sowie die Ableitung `y_d(t, y)` mit:\begin{equation}\begin{split} \dot y &= 2cos(t^2) - \frac{sin(t^2)}{t^2} \\ &= 2cos(t^2) - \frac{y}{t}\end{split}\end{equation}Dafür braucht ihr:- `def f(x):` um eine Funktion zu definieren- `return x` um einen Werte zurück zu geben- `x**y` um $x^y$ zu berechnen Kontrolle
###Code
# Mit assert können wir zur Laufzeit sicherstellen, dass bestimmte Konditionen erfüllt werden.
# Hier sieht man außerdem den Zeilenumbruch mit \ und das Einfügen variabler Werte mittels %f in einen String.
assert y(1) == 0.8414709848078965, \
"y(1) sollte 0.8414709848078965 sein, ist aber %f" %y(1)
assert y_d(1, 1) == 0.08060461173627953, \
"y_d(1, 1)$ sollte 0.08060461173627953 sein, ist aber %f" %y_d(1, 1)
###Output
_____no_output_____
###Markdown
SimulationEin mögliches Einsatzgebiet von Python ist die Simulation, und eine der einfachsten Methoden zur Simulation ist der sogenannte explizite Euler:\begin{equation} y_{j+1} = y_j + \Delta t * f(t_j, y_j)\end{equation}Wobei für $f(t, y)$ gilt:\begin{equation} \dot y = f(t, y)\end{equation} AufgabeFür $f(t, y)$ verwenden wir im folgenden Teil die bereits definierte Funktion `y_d(t, y)`. Also brauchen wir jetzt noch eine Funktione `expl_euler(f, t, y, dt=0.1)`. Diese soll später unsere Funktion als f, die Zeit t und den aktuellen Wert y übergeben bekommen. Kontrolle
###Code
# Unsere Simulationsparameter:
dt = 0.1
n_steps = 60
y0 = 0
t0 = 0.1
# Ein Vektor mit n_steps Elementen, alle zu Begin 0:
ys = simulate(y_d, t0, y0, dt, n_steps)
# Wir zeichnen unser Simulationsergebnis:
ts = np.arange(t0, t0 + n_steps * dt, dt)
plt.plot(ts, ys)
plt.legend(["Simulation"])
plt.show()
###Output
_____no_output_____
###Markdown
FehleranalyseUm abzuschätzen, wie große Fehler wir mit der expliziten Eulermethode machen, können wir unser Simulationsergebnis mit der uns bekannten Methode `y(t)` vergleichen. AufgabeBerechnet eine Liste `true_ys` mit `y(t)` über `ts`. Damit könnt ihr im Anschluss die absoluten Abweichungen zwischen der Simulation und den wahren Ergebnissen als `error` und dessen Kumulative Summe als `cum_error`. Kontrolle
###Code
# Einstellen der Plot Dimensionen
plt.figure(figsize=(15,4))
# Hier werden zwei Plots (1 Reihe, 2 Spalten) gleichzeitig gezeichnet:
plt.subplot(1, 2, 1)
plt.plot(ts, true_ys, 'b')
plt.plot(ts, ys, '-.g')
plt.legend(["y(t)", "Simulation"])
plt.subplot(1, 2, 2)
plt.plot(ts, error, '-.r')
plt.plot(ts, cum_error, '-.m')
plt.legend(["Error", "Cumulative Error"])
plt.show()
###Output
_____no_output_____ |
examples/lgt.ipynb | ###Markdown
LGT Quick Start LGT stands for Local and Global Trend, which is an important model type in orbit package. In the model equation, there is a local trend term and a global trend term.In this notebook we will show how to use Orbit LGT models with the US unemployment claims data.**Note: Negative response values are not allowed in LGT model, due to the existence of the global trend term.**
###Code
import pandas as pd
import numpy as np
from orbit.models.lgt import LGTMAP, LGTAggregated, LGTFull
from orbit.diagnostics.plot import plot_predicted_data
from orbit.diagnostics.plot import plot_predicted_components
from orbit.utils.dataset import load_iclaims
###Output
_____no_output_____
###Markdown
Data *iclaims_example* is a dataset containing the weekly initial claims for US unemployment benefits against a few related google trend queries (unemploy, filling and job)from Jan 2010 - June 2018. This aims to mimick the dataset from the paper [Predicting the Present with Bayesian Structural Time Series](https://people.ischool.berkeley.edu/~hal/Papers/2013/pred-present-with-bsts.pdf) by SCOTT and VARIAN (2014).Number of claims are obtained from [Federal Reserve Bank of St. Louis](https://fred.stlouisfed.org/series/ICNSA) while google queries are obtained through [Google Trends API](https://trends.google.com/trends/?geo=US).Note that dataset is transformed by natural log before fitting in order to be fitted as a multiplicative model.
###Code
# load data
df = load_iclaims()
# define date and response column
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
Train / Test Split
###Code
test_size = 52
train_df = df[:-test_size]
test_df = df[-test_size:]
train_df.head(5)
###Output
_____no_output_____
###Markdown
LGT Model In orbit, we have three types of LGT models, `LGTMAP`, `LGTAggregated` and `LGTFull`.Orbit follows the sklearn model API. We can create an instance of the Orbit class and then call its fit and predict methods. LGTMAP LGT model for MAP (Maximum a Posteriori) prediction
###Code
lgt = LGTMAP(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
_ = plot_predicted_data(training_actual_df=train_df, predicted_df=predicted_df,
date_col=date_col, actual_col=response_col,
test_actual_df=test_df, title='Prediction with LGTMAP Model')
###Output
_____no_output_____
###Markdown
LGTFull LGT model for full prediction. In full prediction, the prediction occurs as a function of each parameter posterior sample, and the prediction results are aggregated after prediction. Prediction will always return the median (aka 50th percentile) along with any additional percentiles that are specified.
###Code
lgt = LGTFull(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
predicted_df.tail(5)
_ = plot_predicted_data(training_actual_df=train_df, predicted_df=predicted_df,
date_col=lgt.date_col, actual_col=lgt.response_col,
test_actual_df=test_df, title='Prediction with LGTFull Model')
###Output
_____no_output_____
###Markdown
LGTAggregated LGT model for aggregated posterior prediction. In aggregated prediction, the parameter posterior samples are reduced using `aggregate_method ({ 'mean', 'median' })` before performing a single prediction.
###Code
lgt = LGTAggregated(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
predicted_df.tail(5)
_ = plot_predicted_data(training_actual_df=train_df, predicted_df=predicted_df,
date_col=lgt.date_col, actual_col=lgt.response_col,
test_actual_df=test_df, title='Predictibon with LGTAggregated Model')
###Output
_____no_output_____
###Markdown
LGT Quick Start LGT stands for Local and Global Trend, which is an important model type in orbit package. In the model equation, there is a local trend term and a global trend term.In this notebook we will show how to use Orbit LGT models with the US unemployment claims data.**Note: Negative response values are not allowed in LGT model, due to the existence of the global trend term.**
###Code
import pandas as pd
import numpy as np
from orbit.models.lgt import LGTMAP, LGTAggregated, LGTFull
from orbit.diagnostics.plot import plot_predicted_data, plot_predicted_components
from orbit.utils.dataset import load_iclaims
###Output
_____no_output_____
###Markdown
Data *iclaims_example* is a dataset containing the weekly initial claims for US unemployment benefits against a few related google trend queries (unemploy, filling and job) from Jan 2010 - June 2018. This aims to mimick the dataset from the paper [Predicting the Present with Bayesian Structural Time Series](https://people.ischool.berkeley.edu/~hal/Papers/2013/pred-present-with-bsts.pdf) by SCOTT and VARIAN (2014).Number of claims are obtained from [Federal Reserve Bank of St. Louis](https://fred.stlouisfed.org/series/ICNSA) while google queries are obtained through [Google Trends API](https://trends.google.com/trends/?geo=US).Note that dataset is transformed by natural log before fitting in order to be fitted as a multiplicative model.
###Code
# load data
df = load_iclaims()
# define date and response column
DATE_COL = 'week'
RESPONSE_COL = 'claims'
df.dtypes
df.head()
###Output
_____no_output_____
###Markdown
Train / Test Split
###Code
test_size = 52
train_df = df[:-test_size]
test_df = df[-test_size:]
train_df.head()
###Output
_____no_output_____
###Markdown
LGT Model In orbit, we have three types of LGT models, `LGTMAP`, `LGTAggregated` and `LGTFull`.Orbit follows the sklearn model API. We can create an instance of the Orbit class and then call its fit and predict methods. LGTMAP LGT model for MAP (Maximum a Posteriori) prediction
###Code
lgt = LGTMAP(response_col=RESPONSE_COL,
date_col=DATE_COL,
seasonality=52,
seed=8888)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Prediction with LGTMAP Model')
###Output
_____no_output_____
###Markdown
LGTFull LGT model for full prediction. In full prediction, the prediction occurs as a function of each parameter posterior sample, and the prediction results are aggregated after prediction. Prediction will always return the median (aka 50th percentile) along with any additional percentiles that are specified.
###Code
lgt = LGTFull(response_col=RESPONSE_COL,
date_col=DATE_COL,
seasonality=52,
seed=8888)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
predicted_df.tail()
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Prediction with LGTFull Model')
###Output
_____no_output_____
###Markdown
LGTAggregated LGT model for aggregated posterior prediction. In aggregated prediction, the parameter posterior samples are reduced using `aggregate_method ({ 'mean', 'median' })` before performing a single prediction.
###Code
lgt = LGTAggregated(response_col=RESPONSE_COL,
date_col=DATE_COL,
seasonality=52,
seed=8888)
%%time
lgt.fit(df=train_df)
predicted_df = lgt.predict(df=test_df)
predicted_df.tail()
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Predictibon with LGTAggregated Model')
###Output
_____no_output_____
###Markdown
LGT Quick Start LGT stands for Local and Global Trend, which is an important model type in orbit package. In the model equation, there is a local trend term and a global trend term.In this notebook we will show how to use Orbit LGT models with the US unemployment claims data.**Note: Negative response values are not allowed in LGT model, due to the existence of the global trend term.**
###Code
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from orbit.models import LGT
from orbit.diagnostics.plot import plot_predicted_data, plot_predicted_components
from orbit.utils.dataset import load_iclaims
from orbit.utils.plot import get_orbit_style
plt.style.use(get_orbit_style())
###Output
_____no_output_____
###Markdown
Data *iclaims_example* is a dataset containing the weekly initial claims for US unemployment benefits against a few related google trend queries (unemploy, filling and job) from Jan 2010 - June 2018. This aims to mimick the dataset from the paper [Predicting the Present with Bayesian Structural Time Series](https://people.ischool.berkeley.edu/~hal/Papers/2013/pred-present-with-bsts.pdf) by SCOTT and VARIAN (2014).Number of claims are obtained from [Federal Reserve Bank of St. Louis](https://fred.stlouisfed.org/series/ICNSA) while google queries are obtained through [Google Trends API](https://trends.google.com/trends/?geo=US).Note that dataset is transformed by natural log before fitting in order to be fitted as a multiplicative model.
###Code
# load data
df = load_iclaims()
# define date and response column
DATE_COL = 'week'
RESPONSE_COL = 'claims'
df.dtypes
df.head()
###Output
_____no_output_____
###Markdown
Train / Test Split
###Code
test_size = 52
train_df = df[:-test_size]
test_df = df[-test_size:]
train_df.head()
###Output
_____no_output_____
###Markdown
LGT Model In orbit, we have three types of LGT models, `LGTMAP`, `LGTAggregated` and `LGTFull`.Orbit follows the sklearn model API. We can create an instance of the Orbit class and then call its fit and predict methods. LGT-MAP LGT model for MAP (Maximum a Posteriori) prediction
###Code
lgt = LGT(response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=['sp500'],
seasonality=52,
estimator='stan-map',
seed=8888)
%%time
lgt.fit(df=train_df)
lgt.get_regression_coefs()
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.head()
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Prediction with LGTMAP Model')
###Output
_____no_output_____
###Markdown
LGT-MCMC LGT model for full prediction. In full prediction, the prediction occurs as a function of each parameter posterior sample, and the prediction results are aggregated after prediction. Prediction will always return the median (aka 50th percentile) along with any additional percentiles that are specified.
###Code
lgt = LGT(response_col=RESPONSE_COL,
date_col=DATE_COL,
estimator='stan-mcmc',
regressor_col=['sp500'],
seasonality=52,
seed=8888)
%%time
lgt.fit(df=train_df, point_method=None)
lgt.get_regression_coefs()
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.tail()
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Prediction with LGTFull Model')
lgt.fit(df=train_df, point_method='mean')
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.tail()
lgt.get_regression_coefs()
lgt.fit(df=train_df, point_method='median')
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.tail()
lgt.get_regression_coefs()
###Output
_____no_output_____
###Markdown
LGT-SVI
###Code
lgt = LGT(response_col=RESPONSE_COL,
date_col=DATE_COL,
estimator='pyro-svi',
regressor_col=['sp500'],
seasonality=52,
seed=8888,
num_steps=101)
%%time
lgt.fit(df=train_df, point_method=None)
lgt.get_regression_coefs()
predicted_df = lgt.predict(df=test_df, point_method=None, decompose=True)
predicted_df.tail()
_ = plot_predicted_data(training_actual_df=train_df,
predicted_df=predicted_df,
date_col=DATE_COL,
actual_col=RESPONSE_COL,
test_actual_df=test_df,
title='Prediction with LGTFull Model')
lgt.fit(df=train_df, point_method='mean')
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.tail()
lgt.fit(df=train_df, point_method='median')
predicted_df = lgt.predict(df=test_df, decompose=True)
predicted_df.tail()
###Output
INFO:root:Guessed max_plate_nesting = 2
|
src/inspection/pcl statistics.ipynb | ###Markdown
statistics on original data
###Code
_ = plt.hist(my_pcl[:,0].flatten(), bins='auto')
plt.title("X axis")
plt.show()
_ = plt.hist(my_pcl[:,1].flatten(), bins='auto')
plt.title("Y axis")
plt.show()
_ = plt.hist(my_pcl[:,2].flatten(), bins='auto')
plt.title("Z axis")
plt.show()
_ = plt.hist(my_pcl[:,3].flatten(), bins='auto')
plt.title("Labels")
plt.show()
###Output
_____no_output_____
###Markdown
After transformation statistics
###Code
t_pcl = my_pcl.copy()
t_pcl[:,2] = (t_pcl[:,2])
# t_pcl = t_pcl[t_pcl[:,2] >= 0]
_ = plt.hist(t_pcl[:,2].flatten(), bins='auto')
plt.title("Z axis")
plt.show()
###Output
_____no_output_____
###Markdown
Kitti
###Code
kitti_pcl = np.load("arr.npy")
kitti_pcl.shape
_ = plt.hist(kitti_pcl['x'], bins='auto')
plt.title("X axis")
plt.show()
_ = plt.hist(kitti_pcl['y'], bins='auto')
plt.title("Y axis")
plt.show()
_ = plt.hist(kitti_pcl['z'], bins='auto')
plt.title("Z axis")
plt.show()
arr = np.asarray(kitti_pcl)
t = np.zeros((arr.shape[0],4))
t[:,0] = arr['x']
t[:,1] = arr['y']
t[:,2] = arr['z']
t[:,3] = arr['intensity']
np.save("kitti_sample",t)
###Output
_____no_output_____ |
Bacteria_Ori_Analysis/Bacteria_Analysis.ipynb | ###Markdown
Title: Locating the Ori of Bacteria Author: Surur Khan Why Try To Locate the Ori?1. Gene Therapy - intentionally infect a patient who lacks a crucial gene with a viral vector containing an artificial gene that encodes a therapeutic protein. Once inside the cell, the vector replicates and eventually produces many copies of the therapeutic protein, which in turn treats the patient’s disease. To ensure that the vector actually replicates inside the cell, biologists must know where ori is in the vector’s genome and ensure that the genetic manipulations that they perform do not affect it(This usage is also referred to as Genetic Engineering). The scripts were tested against an E.Coli Genome dataset from the NCBI and yielded significant results for finding it's ori by analyzing the GC content changes and implications of a potential 9mer DnaA box due to a conservative sequence found in a relatively short segment of the Genome.2. Ensure Genetic Manipulations do not affect overall cell functions What I Test onThe package Bioezy (v 0.0.4) contains several scripts for analyzing genomic data, mainly to locate replication origins. For this analysis, the bacterium **E. Coli (Vibrio Cholerae)** is used. For Credibility, the nucleotide sequence appearing in the ori of E. Coli was predetermined as follows:atcaatgatcaacgtaagcttctaagcatgatcaaggtgctcacacagtttatccacaacctgagtggatgacatcaagataggtcgttgtatctccttcctctcgtactctcatgaccacggaaagatgatcaagagaggatgatttcttggccatatcgcaatgaatacttgtgacttgtgcttccaattgacatcttcagcgccatattgcgctggccaaggtgacggagcgggattacgaaagcatgatcatggctgttgttctgtttatcttgttttgactgagacttgttaggatagacggtttttcatcactgactagccaaagccttactctgcctgacatcgaccgtaaattgataatgaatttacatgcttccgcgacgatttacctcttgatcatcgatccgattgaagatcttcaattgttaattctcttgcctcgactcatagccatgatgagctcttgatcatgtttccttaaccctctattttttacggaagaatgatcaagctgctgctcttgatcatcgtttchereinafter referred to as **vibrio_cholerae_ori.txt**The actual Genome spans 1,108,250 nucleotides long, which can be found at:[E.Coli_Genome](https://bioinformaticsalgorithms.com/data/realdatasets/Replication/Vibrio_cholerae.txt)I will not be analyzing this, rather, I will be using a segment for the sake of runtime, denoted **"vibrio_cholerae_genome_short"** What I Look forThere is no shortage of agreement that DNA replication occurs in all cells as it is the nature of living organisms. This however begs the question, how does the cell know where to begin replication in this short region within the otherwise huge genome? It is known that a protein that initiates replication, **DnaA** by binding to a short segment in the ori, **DnaA Box**. My goal to locate these DnaA Boxes is based on a theorum by William Legrand in Edgar Allan Poe's story "The Gold-Bug":_Assuming DNA is a language of its own, locate frequent words within the ori because for various biological processes, certain nucleotide strings often appear surprisingly often in small regions of the genome_This is often because certain proteins can only bind to DNA if a specific string of nucleotides is present, and if there are more occurrences of the string, then it is more likely that binding will successfully occur. (It is also less likely that a mutation will disrupt the binding process.) Hence, I will extensively analyze the E. Coli Genome to look for its Ori by locating conservative sequences in a relatively short segment of the whole genome
###Code
from bioezy import bzy
import os
print(os.getcwd())
#Important note: frequent words is not usually used for finding frequent words because it has a runtime of O(n^2); (|Text| − k + 1) · (|Text| − k + 1) · k == |Text|^2 · k
#On specific ori region
with open("vibrio_cholerae_ori.txt","r") as input_file:
vc_ori = ''.join(line.rstrip() for line in input_file).lower() #manipulation to make it 1 consecutive string
input_file.close()
print(vc_ori)
FreqWords = bzy.frequent_words(vc_ori,9)
for kmer in FreqWords:
print(f' 9mer: {kmer} appeared {bzy.pattern_count(vc_ori,kmer)} times')
###Output
9mer: atgatcaag appeared 3 times
9mer: ctcttgatc appeared 3 times
9mer: tcttgatca appeared 3 times
9mer: cttgatcat appeared 3 times
###Markdown
SignificanceWe highlight a most frequent 9-mer instead of using some other value of k because experiments have revealed that bacterial DnaA boxes are usually nine nucleotides long. The probability that there exists a 9-mer appearing three or more times in a randomly generated DNA string of length 500 is approximately 1/1300. Important observation: atgatcaag and cttgatcat are reverse complements of each other, hence **now we must account for the reverse compliment of each frequent 9mer appearing as well!**. We can strongly deduce that DnaA does not need to bind to specifically bind to either the 5'-> 3' forward(lagging) strand nor the 3' -> 5' reverse(leading) strand!Before concluding that we have found the DnaA box of Vibrio cholerae, we'll check if there are other short regions in the Vibrio cholerae genome exhibiting multiple occurrences of atgatcaag or cttgatcat. Maybe these strings occur as repeats throughout the entire Vibrio cholerae genome, rather than just in the ori region. We will use the pattern matching function to determine this.
###Code
print(bzy.pattern_matching("atgatcaag",vc_ori)) #Locations of atgatcaag in ori
print(bzy.pattern_matching("cttgatcat",vc_ori))
"""
TESTING ON SHORT GENOME FOR ATGATCAAG
"""
with open("vibrio_cholerae_genome_short.txt","r") as input_file:
vc_genome_short = ''.join(line.rstrip() for line in input_file).lower()
input_file.close()
#print(vc_genome)
print(bzy.pattern_matching("atgatcaag",vc_genome_short))
###Output
[116556, 149355, 151913, 152013, 152394, 186189, 194276, 200076, 224527, 307692, 479770, 610980, 653338, 679985, 768828, 878903, 985368]
###Markdown
Significance After solving the Pattern Matching Problem, we discover that ATGATCAAG appears 17 times in the starting positions labelled above. With the exception of the three occurrences of ATGATCAAG in ori at starting positions 151913, 152013, and 152394, no other instances of ATGATCAAG form clumps, i.e., appear close to each other in a small region of the genome.However, a definite conclusion cannot be drawn without checking if it even appears in known ori regions from other bacteria. The case may be that the clumps of my 9mers are statistically insignificant and have nothing to do with replication. Now, the bacterium **Thermotoga petrophila**, an extremophile will be examined to locate it's ori. A potential discovery may be that different bacterium have different oris.
###Code
with open("thermotoga_petrophila.txt","r") as input_file:
tp_ori = ''.join(line.rstrip() for line in input_file).lower()
input_file.close()
FreqWords = bzy.frequent_words(tp_ori,9)
for kmer in FreqWords:
print(f' 9mer: {kmer} appeared {bzy.pattern_count(tp_ori,kmer)} times')
print(bzy.pattern_matching("acctaccac",tp_ori))
print("Other frequent 9mers that appear more than 2 but less than 5 times: AACCTACCA 3 AAACCTACC 3 ACCTACCAC 5 CCTACCACC 3 GGTAGGTTT 3 TGGTAGGTT 3")
###Output
9mer: acctaccac appeared 5 times
[184, 379, 390, 401, 479]
Other frequent 9mers that appear more than 2 but less than 5 times: AACCTACCA 3 AAACCTACC 3 ACCTACCAC 5 CCTACCACC 3 GGTAGGTTT 3 TGGTAGGTT 3
###Markdown
It is worth noting that AACCTACCA and TGGTAGGTT are reverse complements, as are AAACCTACC and GGTAGGTTT The Caveat Searching for “clumps” of either ATGATCAAG (reverse compliment -CTTGATCAT) or CCTACCACC(reverse compliment - GGTGGTAGG) is unlikely to help, since this new genome may use a completely different DnaA Box. Instead of finding clumps of a specific k-mer, try to find every k-mer that forms a clump in the genome, hoping they shed light on the location of ori. The Clump Finding Problem Slide a window of fixed length L along the genome, looking for a region where a k-mer appears several times in short succession. The parameter value L = 500 reflects the typical length of ori in bacterial genomes. Define a k-mer as a "clump" if it appears many times within a short interval of the genome. More formally, given integers L and t, a k-mer Pattern forms an (L, t)-clump inside a (longer) string Genome if there is an interval of Genome of length L in which this k-mer appears at least t times. (This definition assumes that the k-mer completely fits within the interval. This also does not take reverse complements into account yet.) eg. TGCA forms a (25,3)-clump in the following Genome:gatcagcataagggtccC**TGCA**A**TGCA**TGACAAGCC**TGCA**GTtgttttac From our previous examples of ori regions, ATGATCAAG forms a (500,3)-clump in the Vibrio cholerae genome, and CCTACCACC forms a (500,3)-clump in the Thermotoga petrophila genome.
###Code
clumps = bzy.find_clumps(vc_genome_short,9,500,3)
print(len(clumps))
#Refer to scan_clumps_eff_algo for the true solution (sourced online)
###Output
acaatgagg ttcgagctc tcgagctct aaccggctg tgcgcatac gcgcatacg gccatccga cagcgtcta gtctattca ggctatgca ctatgcagg caggctact tggttcgta gtaagaact aagaacttt gaactttag gccttacct gctttagtc ctttagtcg tttagtcgt tagtcgtgg cgatctggg gtgtggctc tggctctcg cgcgtatgg gtatggtct gtctgtcta ttctaaccc aacccgcgc ccgcgctac tctgagtgc caggtctac ggtctactc gtctactcc ctactcctt cctttcggc gcactagtg gccatgggc caacgggca acgggcagc agcattact ctgttctta cttaaacgg ctggttcca ggttccaag ttccaaggc tgcttgtgg cttgtggcc ggccgtact tggtgcact ggtgcactg gtgcactgg ggtcacgca agagcgtgg gagcgtggt gtttcggtc tcggtctgg tggaacgtc
58
###Markdown
Using the scan_clumps program with **vibrio_cholerae_genome_full**, more than 1600 (500,3)-clumped 9mers were foundThe result is futile in the search for DnaA Boxes, as the algorithm will locate ALL frequent kmers in the region, which may occur purely by random chance. A new method must be tested. The Analysis of the Cytosine Content and it's relevanceDNA replication is an asymmetric process, the faith of the forward (leading) and reverse (lagging) half strands are different. This is because DNA Polymerase, the enzyme responsible for creating the complementary base sequence against each strand can only run in the 3' -> 5' direction. The Reverse half strand indeed runs in this direction and can hence be replicated continuously, however the same cannot be said for the forward half strand, which runs in the 5' -> 3' direction. For replication to occur on a forward half strand, the replication fork has to open by about 2000 nucleotides, allowing DNA Polymerase to replicate the strand backwards towards Ori. When this fork closes, replication terminates until it reopens. Due to the inconsistent replication, _okazaki fragments_ are generated, which are partially replicated strands of nucleotides. As a result, many primers are required to fully replicate a forward strand and DNA ligase must be used to join the strand together. The built strand is made of introns and exons, where the former along with the primers are removed by some molecular splicing process. The significance of this stems from the fact that because the forward half strand has to wait for its fork to open for replication to occur, it spends most of its life **single stranded**. Naturally, due to the absence of the strongly covalent G-C bonds that would form in a complete double helix, the nucleotide bases are exposed to much higher mutation potentials. Particularly, **Cytosine has a high likeliness to mutate into Thymine** through **deamination**. As a result, **the forward half strand has a much lower Cytosine content than the reverse half strand**. Considering the ori is made up half by the forward and the other half by the reverse, I anticipate that **half of the genome will have a decreasing Cytosine content, indicating that the forward half strand is being observed**. Overall: G-C decreasing = reverse half strand (high C low G)G-C increasing = forward half strand (low C high G) The analysis is now directed towards the idea that **Ori occurs where the reverse half strand transitions into the forward half strand, hence it will be located where an increasing Cytosine content begins to exponentially decrease** Generating a Skew Diagram Compute Skewi+1(Genome) from Skewi(Genome) according to the nucleotide in position i of Genome. If this nucleotide is G, then Skewi+1(Genome) = Skewi(Genome) + 1; if this nucleotide is C, then Skewi+1(Genome)= Skewi(Genome) – 1; otherwise, Skewi+1(Genome) = Skewi(Genome). eg. CATGGGCATCGGCCATACGCC has skew diagram 0 -1 -1 -1 0 1 2 1 1 1 0 1 2 1 0 0 0 0 -1 0 -1 -2 hence, ori will be found where the skew attains a **minimum**
###Code
with open("Ecoli_genome.txt","r") as input_file:
ecoli_genome = ''.join(line.rstrip() for line in input_file).lower() #manipulation to make it 1 consecutive string
input_file.close()
# Running bzy.minimum_skew(vc_genome_short) is futile, because the algorithm is not robust enough to work with the million length sequence. It will continuously converge to a local optimum, which is not ideal
# For demonstration purposes, a sample input will be used
bzy.minimum_skew("TAAAGACTGCCGAGAGGCCAACACGAGTGCTAGAACGAGGGGCGTAAACGCGGGTCCGAT")
#Using a more robust analysis, the ori of Vibrio Cholerae is found at approximately position 3923620 of the genome, representing the global optimum.
###Output
_____no_output_____
###Markdown
Now, the objective is looking for a hidden message representing a potential DnaA box near this location. Solving the Frequent Words Problem in a window of length 500 starting at position **3923620** reveals no 9-mers (along with their reverse complements) that appear three or more times. Even if we have located ori in E. coli, it appears that the location of the DnaA Box is unanimous. However, accounting for mismatches, it is seen that in addition to the three occurrences of ATGATCAAG and three occurrences of its reverse complement CTTGATCAT, the Vibrio cholerae ori contains additional occurrences of ATGATCAAC and CATGATCAT, which differ from ATGATCAAG and CTTGATCAT in only a single nucleotide polymorphism. This is referred to as "Hamming Distance". From this, I developed the function approx_pattern_match to return every location of the occurrence of a pattern within the genome with a particular amount of mismatches. Furthermore, the function approx_pattern_count returns the number of occurrences of a pattern in the specified genome with a particular amount of mismatches **For runtime sake, this portion will be ran on the already identified ori region of e.coli**
###Code
bzy.approx_pattern_match(vc_genome_short,"atgatcaag",2)
#Note they're 2581 occurrences
bzy.approx_pattern_count("atgatcaag",vc_genome_short,2)
###Output
_____no_output_____
###Markdown
Caveat: For these scripts, All 4k k-mers Pattern, compute approx_pattern_count for each k-mer Pattern, and then find k-mers with the maximum number of approximate occurrences. This is an inefficient approach, since many of the 4k k-mers should not be considered because neither they nor their mutated versions (with up to d mismatches) appear in Text. Instead, the algorithm freq_words_mismatch will be used. It uses a single map that counts the number of times a given string has an approximate match in Text. For a given k-mer substring Pattern of Text, we need to increase 1 to the count of every k-mer that has Hamming distance at most d from Pattern. Note here, the vc_ori is used as oppose to Ecoli_genome.txt. My algorithm's runtime is too slow to work with the million length dataset.
###Code
bzy.freq_words_mismatch(vc_ori,9,1)
###Output
_____no_output_____ |
casa/Calibrator imaging.ipynb | ###Markdown
Apply calibration to gain calibrator and split out source
###Code
split(vis=msfile,
field=g_cal,
outputvis=splitted_ms,
datacolumn='corrected')
visstat(splitted_ms, axis='amp', field=g_cal)
plotms(vis=splitted_ms,
xaxis='time',
yaxis='amp',
correlation='XX,YY',
coloraxis='corr',
averagedata=True,
avgbaseline=True,
avgchannel='4096')
plotms(vis=splitted_ms,
xaxis='freq',
yaxis='amp',
correlation='XX,YY',
coloraxis='corr',
averagedata=True,
avgtime='3300',
avgbaseline=True)
with pt.table(splitted_ms+'/SPECTRAL_WINDOW') as tb:
ref_freq = tb.getcol("REF_FREQUENCY")[0] # Hz
print(tb.getvarcol("NUM_CHAN"))
print(ref_freq)
###Output
Successful readonly open of default-locked table gcal_1117-248_split.ms/SPECTRAL_WINDOW: 14 columns, 1 rows
{'r1': array([900], dtype=int32)}
1411114746.09
###Markdown
Dirty image
###Code
dirty_msfile = prefix + '.dirty'
print('Deleting existing image files for {}'.format(dirty_msfile))
os.system('rm -rf '+ dirty_msfile + '.*')
print(dirty_msfile)
tclean(vis=splitted_ms,
imagename=dirty_msfile,
stokes='I',
restfreq=ref_freq,
imsize=5540,
cell='10arcsec',
nchan=3,
weighting='briggs',
robust=-1.5,
specmode='mfs',
gain=0.9,
threshold = '10mJy',
niter=0)
dirty_image = dirty_msfile + '.image'
print(dirty_image)
noise_stat = imstat(imagename=dirty_image)
rms = str(noise_stat['rms'][0]*3)+'Jy'
print(rms)
viewer(dirty_image, zoom=8)
###Output
_____no_output_____
###Markdown
Clean image
###Code
clean_msfile = prefix + '.clean'
print('Deleting existing image files for {}'.format(clean_msfile))
os.system('rm -rf '+ clean_msfile + '.*')
print(clean_msfile)
tclean(vis=splitted_ms,
imagename=clean_msfile,
stokes='I',
restfreq=ref_freq,
imsize=5540,
cell='10arcsec',
nchan=3,
weighting='briggs',
robust=-1.5,
specmode='mfs',
gain=0.9,
threshold = '10mJy',
niter=20000)
clean_image = clean_msfile + '.image'
print(clean_image)
noise_stat = imstat(imagename=clean_image)
rms = str(noise_stat['rms'][0]*3)+'Jy'
print(rms)
viewer(clean_image, zoom=14)
###Output
_____no_output_____ |
06 - Receipts with Form Recognizer.ipynb | ###Markdown
Анализ квитанций в службе Распознавателя документов (Form Recognizer)
![Робот держит квитанцию](./images/receipt_analysis.jpg)
В сфере искусственного интеллекта компьютерного зрения для чтения печатных или рукописных документов обычно используется оптическое распознавание символов (OCR). Часто текст просто извлекается из документов в формате, который можно использовать для дальнейшей обработки или анализа.
При более продвинутом сценарии OCR информация извлекается из таких бланков, как заказы на поставку или счета-фактуры, с семантическим пониманием того, что представляют собой поля в документе. Служба **Распознавателя документов** специально разработана для решения такого рода задач ИИ.
Просмотреть квитанцию
В данном примере для анализа квитанций используется встроенная модель Распознавателя документов.
Нажмите кнопку **Выполнить код в ячейке** (&9655;) (слева от ячейки) внизу, чтобы запустить ее и посмотреть пример квитанции, которую вы будете использовать для анализа с помощью Распознавателя документов.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Создайте ресурс Распознавателя документов
Давайте начнем с создания ресурса Распознавателя документов в вашей подписке Azure.
1. В другой вкладке браузера откройте портал Azure по адресу: https://portal.azure.com, войдя в систему под учетной записью Microsoft.
2. Выберите **+ Создать ресурс**, после чего найдите *Распознаватель документов*.
3. В списке служб выберите **Распознаватель документов**.
4. В колонке **Распознаватель документов** выберите **Создать**.
5. В колонке **Создать** введите следующие данные и нажмите **Создать**.
- **Имя**: Уникальное имя для вашей службы
- **Подписка**: Ваша подписка Azure
- **Регион**: Любой доступный регион
- **Ценовая категория**: классы F0
- **Группа ресурсов**: Существующая группа ресурсов, которую вы использовали ранее
- **Подтверждаю, что приведенное ниже уведомление прочитано и понято**: Выбрано.
6. Дождитесь завершения создания службы.
7. Просмотрите вновь созданную службу Распознавателя документов на портале Azure и на странице **Ключи и конечная точка** скопируйте значения **Ключ1** и **Конечная точка** и вставьте их в кодовую ячейку ниже, заменив **YOUR_FORM_KEY** и **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Проанализировать квитанцию
Теперь вы готовы использовать Распознаватель документов для анализа квитанции.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Form Recognizer를 사용하여 영수증 분석
![영수증을 들고 있는 로봇](./images/receipt_analysis.jpg)
Computer Vision의 AI(인공 지능) 분야에서 OCR(광학 인식)은 인쇄된 문서나 필기 문서를 읽는 데 주로 사용됩니다. 종종 텍스트는 추가적인 처리 또는 분석에 사용할 수 있는 형식으로 문서에서 간단히 추출됩니다.
보다 진보된 OCR 시나리오는 양식의 필드가 나타내는 의미를 이해하면서 구매 주문서나 송장 같은 양식에서 정보를 추출하는 것입니다. **Form Recognizer** 서비스는 이러한 종류의 AI 문제를 위해 특별히 설계되었습니다.
영수증 보기
이 예에서는 Form Recognizer의 기본 제공 모델을 사용하여 영수증을 분석합니다.
아래의 **셀 실행**(&9655;) 단추(셀 왼쪽에 있음)를 클릭하여 실행하고 Form Recognizer를 사용하여 분석할 영수증의 예를 확인해 보세요.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Form Recognizer 리소스 만들기
먼저 Azure 구독에서 Form Recognizer 리소스를 만듭니다.
1. 다른 브라우저 탭에서 Azure Portal(https://portal.azure.com) 을 열고 Microsoft 계정으로 로그인합니다.
2. **+ 리소스 만들기**를 선택하고 *Form Recognizer*를 검색합니다.
3. 서비스 목록에서 **Form Recognizer**를 선택합니다.
4. **Form Recognizer** 블레이드에서 **만들기**를 선택합니다.
5. **만들기** 블레이드에서 다음 세부 정보를 입력하고 **만들기**를 선택합니다.
- **이름**: 서비스의 고유한 이름
- **구독**: 사용자의 Azure 구독
- **지역**: 사용 가능한 영역
- **가격 책정 계층**: F0
- **리소스 그룹**: 이전에 사용한 기존 리소스 그룹
- **아래 알림을 읽고 이해했음을 확인합니다**. 선택됨.
6. 서비스가 생성될 때까지 기다립니다.
7. Azure Portal에서 새로 생성된 Form Recognizer 서비스를 확인합니다. 그리고 **키 및 엔드포인트** 페이지에서 **Key1** 및 **엔드포인트** 값을 복사하고 아래 코드 셀에 붙여 넣어 **YOUR_FORM_KEY** 및 **YOUR_FORM_ENDPOINT**를 대체합니다.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
영수증 분석
이제 Form Recognizer를 사용하여 영수증을 분석할 준비가 되었습니다.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resourceStart by creating a Form Recognizer resource in your Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: formrec-deploymentID - **Subscription**: Your Azure subscription - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: The existing resource group you used previously - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resourceStart by creating a Form Recognizer resource in your Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: A unique name for your service - **Subscription**: Your Azure subscription - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: The existing resource group you used previously - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Form Recognizer で領収書を分析します
![領収書を持っているロボット](./images/receipt_analysis.jpg)
Computer Vision の人工知能 (AI) の分野では、印刷文書や手書き文書を読み取るために光学式文字認識 (OCR) が一般的に使用されます。多くの場合、それらの文書からテキストを単に抽出したあと、さらなる処理や分析は抽出先のフォーマットを使用して行われます。
より高度な OCR のシナリオには、注文書や請求書などのフォームから情報を抽出し、それと同時にフォームの各フィールドが表す情報を意味論的に理解することです。**Form Recognizer** サービスは、AI のこの種の課題に対応できるように特別に設計されています。
領収書を表示する
この例では、Form Recognizer の組み込みモデルを使用して領収書を分析します。
セルの左側にある 「**セルの実行**」(&9655;) ボタンをクリックして実行し、Form Recognizer を使用して分析する領収書の例を確認します。
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Form Recognizer リソースを作成する
Azure サブスクリプションに Form Recognizer リソースを作成することから始めます。
1. ブラウザーの新しいタブで Azure portal (https://portal.azure.com) を開き、Microsoft アカウントでサインインします。
2. 「**+ リソースの作成**」 を選択し、*Form Recognizer* を検索します。
3. サービスの一覧から 「**Form Recognizer**」 を選択します。
4. 「**Form Recognizer**」 ブレードで 「**作成**」 を選択します。
5. 「**作成**」 ブレードで次の詳細を入力し、「**作成**」 を選択します。
- **名前**: サービスの一意の名前
- **サブスクリプション**: 使用する Azure サブスクリプション
- **リージョン**: 利用可能な任意のリージョン
- **価格レベル**: F0
- **リソース グループ**: 前に使用した既存のリソース グループ
- **注意事項を読み理解しました**: 選択されています。
6. サービスが作成されるまで待ちます。
7. 新しく作成した Form Recognizer サービスを Azure portal で表示し、「**キーとエンドポイント**」 ページから 「**キー1**」 と 「**エンドポイント**」 の値をコピーして以下のコード セルに貼り付けます (**YOUR_FORM_KEY**、**YOUR_FORM_ENDPOINT** とそれぞれ置き換える)。
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
領収書を分析する
これで、Form Recognizer を使用して領収書を分析する準備が整いました。
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analysieren von Kassenbelegen mit der Formularerkennung
![Ein Roboter, der einen Kassenbeleg hält](./images/receipt_analysis.jpg)
Maschinelles Sehen gehört zur Künstlichen Intelligenz (KI) und wird häufig eingesetzt, um mit optischer Zeichenerkennung (Optical Character Recognition, OCR) gedruckten oder handschriftlichen Text zu lesen. Dazu wird der Text aus den Dokumenten oft in ein Format extrahiert, das zur weiteren Verarbeitung oder Analyse verwendet werden kann.
In komplexeren OCR-Szenarien können Informationen aus Formularen wie etwa Bestellungen oder Rechnungen extrahiert werden, wobei mithilfe einer semantischen Analyse die Bedeutung der Felder interpretiert werden kann. Die **Formularerkennung** wurde speziell für diese Art von KI-Problemen entwickelt.
Anzeigen eines Kassenbelegs
In diesem Beispiel verwenden Sie das integrierte Modell der Formularerkennung zur Analyse von Kassenbelegen.
Klicken Sie links neben der Zelle auf die Schaltfläche **Zelle ausführen** (&9655;), um die Zelle auszuführen und ein Beispiel für einen Kassenbeleg anzuzeigen, den Sie mit der Formularerkennung analysieren.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Erstellen einer Formularerkennungsressource
Erstellen Sie zunächst eine Formularerkennungsressource in Ihrem Azure-Abonnement:
1. Öffnen Sie das Azure-Portal unter „https://portal.azure.com“ in einer neuen Browserregisterkarte, und melden Sie sich mit Ihrem Microsoft-Konto an.
2. Klicken Sie auf **+ Ressource erstellen**, und suchen Sie nach *Formularerkennung*.
3. Wählen Sie in der Liste der Dienste **Formularerkennung** aus.
4. Wählen Sie im Blatt **Formularerkennung** die Option **Erstellen** aus.
5. Geben Sie auf dem Blatt **Erstellen** die folgenden Informationen ein, und wählen Sie dann **Erstellen** aus.
* **Name**: Ein eindeutiger Name für Ihren Dienst
* **Abonnement**: Ihr Azure-Abonnement
* **Region**: Eine beliebige verfügbare Region
* **Tarif**: F0
* **Ressourcengruppe**: Die zuvor verwendete vorhandene Ressourcengruppe
* **Ich bestätige, dass ich den folgenden Hinweis gelesen und verstanden habe**: Ausgewählt
6. Warten Sie, bis der Dienst erstellt ist.
7. Öffnen Sie Ihren neu erstellten Formularerkennungsdienst im Azure-Portal, kopieren Sie auf der Seite **Schlüssel und Endpunkt** den **Schlüssel1** und **Endpunkt** für Ihre Ressource, und fügen Sie die Werte in die folgende Codezelle anstelle von **YOUR_FORM_KEY** und **YOUR_FORM_ENDPOINT** ein.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analysieren eines Kassenbelegs
Jetzt können Sie die Formularerkennung verwenden, um einen Kassenbeleg zu analysieren.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Menganalisis Tanda Terima dengan Form Recognizer
![Robot memegang tanda terima](./images/receipt_analysis.jpg)
Di bidang kecerdasan buatan (AI) pada visi komputer, pengenalan karakter optik (OCR) umumnya digunakan untuk membaca dokumen cetak atau tulisan tangan. Seringkali, teks hanya diekstrak dari dokumen ke dalam format yang dapat digunakan untuk pemrosesan atau analisis lebih lanjut.
Skenario OCR yang lebih canggih adalah ekstraksi informasi dari formulir, seperti pesanan atau faktur pembelian, dengan pemahaman semantik tentang apa yang disajikan bidang dalam formulir. Layanan **Form Recognizer** secara spesifik didesain untuk masalah AI jenis ini.
Melihat tanda terima
Dalam contoh ini, Anda akan menggunakan model bawaan Form Recognizer untuk menganalisis tanda terima.
Klik tombol **Jalankan sel** (&9655;) (di sebelah kiri sel) di bawah untuk menjalankannya dan melihat contoh tanda terima yang akan digunakan untuk menganalisis Form Recognizer.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Membuat sumber daya Form Recognizer
>**Catatan:** Anda dapat menggunakan sumber daya Cognitive Services atau sumber daya Form Recognizer untuk mengakses layanan Form Recognizer.
Untuk membuat sumber daya Form Recognizer di langganan Azure Anda:
1. Di tab browser lain, buka portal Microsoft Azure di https://portal.azure.com, masuk menggunakan akun Microsoft Anda.
2. Pilih **+ Buat sumber daya**, dan cari *Form Recognizer*.
3. Dalam daftar layanan, pilih **Form Recognizer**.
4. Pada blade **Form Recognizer**, pilih **Buat**.
5. Pada bilah **Buat**, masukkan detail berikut dan pilih **Buat**
- **Nama**: Nama unik untuk layanan Anda
- **Langganan**: Langganan Azure Anda
- **Wilayah**: Wilayah yang tersedia
- **Tingkat Harga**: F0
- **Grup Sumber Daya**: Grup sumber daya yang ada yang Anda gunakan sebelumnya
- **Saya mengonfirmasi bahwa saya telah membaca dan memahami pemberitahuan di bawah**: Dipilih.
6. Tunggu hingga layanan dibuat.
7. Lihat layanan Form Recognizer yang baru dibuat di portal Microsoft Azure dan di halaman **Kunci dan Titik Akhir**, salin nilai **Key1** dan **Titik Akhir** lalu tempel pada sel kode di bawah, menggantikan **YOUR_FORM_KEY** dan **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Menganalisis tanda terima
Sekarang Anda siap menggunakan Form Recognizer untuk menganalisis tanda terima.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analizza ricevute con Riconoscimento modulo
![Un robot che tiene in mano una ricevuta](./images/receipt_analysis.jpg)
Nel campo dell'intelligenza artificiale (IA) della visione artificiale, il riconoscimento ottico dei caratteri (OCR) è comunemente usato per leggere documenti stampati o scritti a mano. Spesso, il testo viene semplicemente estratto dai documenti in un formato che può essere utilizzato per ulteriori elaborazioni o analisi.
Uno scenario OCR più avanzato consiste nell'estrazione di informazioni da moduli, come ordini di acquisto o fatture, con una comprensione semantica di quanto rappresentato dai campi del modulo. Il servizio **Riconoscimento modulo** è specificamente progettato per questo tipo di problemi di AI.
Visualizza una ricevuta
In questo esempio, userai il modello integrato di Riconoscimento modulo per analizzare le ricevute.
Fai clic sul pulsante **Esegui cella** (&9655;) (a sinistra della cella) seguente per eseguirlo e vedere un esempio di ricevuta da analizzare usando Riconoscimento modulo.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Crea una risorsa Riconoscimento modulo
>**Nota:** È possibile utilizzare una risorsa di Servizio cognitivo o una risorsa di Riconoscimento modulo per accedere ai servizi di Riconoscimento modulo.
Per creare una risorsa Riconoscimento modulo nella tua sottoscrizione di Azure:
1. In un'altra scheda del browser, apri il portale di Azure all'indirizzo https://portal.azure.com, accedendo con il tuo account Microsoft.
2. Seleziona **+ Crea una risorsa** e cerca *Riconoscimento modulo*.
3. Nell'elenco dei servizi, seleziona **Riconoscimento modulo**.
4. Nel pannello **Riconoscimento modulo**, seleziona **Crea**.
5. Nel pannello **Crea**, immetti i seguenti dettagli e seleziona **Crea**
- **Nome**: Un nome univoco per il tuo servizio
- **Sottoscrizione**: La tua sottoscrizione di Azure
- **Area geografica**: Una qualsiasi area disponibile
- **Piano tariffario**: F0
- **Gruppo di risorse**: Il gruppo di risorse esistente che hai usato in precedenza
- **Confermo di aver letto e compreso l'avviso seguente**: Selezionato.
6. Attendi che il servizio venga creato.
7. Visualizza il servizio Riconoscimento modulo appena creato nel portale di Azure e nella pagina **Chiavi ed endpoint**, copia i valori **Key1** ed **Endpoint** e incollali nella cella di codice sottostante, sostituendo **YOUR_FORM_KEY** e **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analizza una ricevuta
Ora è tutto pronto per usare Riconoscimento modulo per analizzare una ricevuta.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resource>**Note:** You can either use a Cognitive Service resource or a Form Recognizer resource to access Form Recognizer services. To create a Form Recognizer resource in your Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: A unique name for your service - **Subscription**: Your Azure subscription - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: The existing resource group you used previously - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'ce53b770811c4651a5283a48b9afd9cd'
form_endpoint = 'https://csvisionx.cognitiveservices.azure.com/'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
Ready to use form recognizer at https://csvisionx.cognitiveservices.azure.com/ using key ce53b770811c4651a5283a48b9afd9cd
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
Analyzing receipt...
Receipt Type: Itemized
Merchant Address: 123 Main Street
Merchant Phone: +15551234567
Transaction Date: 2020-02-17
Receipt items:
Item #1
- Name: Apple
- Price: 0.9
Item #2
- Name: Orange
- Price: 0.8
Subtotal: 1.7
Tax: 0.17
Total: 1.87
###Markdown
使用表单识别器分析收据
![拿着收据的机器人](./images/receipt_analysis.jpg)
在计算机视觉的人工智能 (AI) 领域中,光学字符识别 (OCR) 通常用于读取印刷体文档或手写文档。通常只需从文档中提取文本,并将其处理为可供进一步处理或分析的格式。
更复杂的 OCR 场景是从表单(如采购订单或发票)中提取信息,并从语义上理解表单中各字段表示的意思。**表单识别器**服务专为此类 AI 问题而设计。
查看收据
本例将使用表单识别器的内置模型来分析收据。
单击下面的“**运行单元格**”(&9655;) 按钮(位于单元格左侧)以运行它,并查看将使用表单识别器来进行分析的收据示例。
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
创建表单识别器资源
首先在 Azure 订阅中创建表单识别器资源:
1. 在另一个浏览器标签页中,打开 Azure 门户 (https://portal.azure.com) 并使用 Microsoft 帐户登录。
2. 选择“**+ 创建资源**”并搜索“*表单识别器*”。
3. 在服务列表中,选择“**表单识别器**”。
4. 在“**表单识别器**”边栏选项卡中选择“**创建**”。
5. 在“**创建**”边栏选项卡中,输入以下详细信息,然后选择“**创建**”
* **名称**:服务的唯一名称
* **订阅**:你的 Azure 订阅
* **区域**:任何可用区域
* **定价层**:F0
* **资源组**:之前使用的现有资源组
* **我确认我已阅读并理解以下通知**:已选中。
6. 等待服务创建完毕。
7. 在 Azure 门户中查看新建的表单识别器服务,并在“**密钥和终结点**”页面上复制“**Key1**”和**终结点**值,然后将这两个值粘贴到下方的代码单元格中,替换“**YOUR_FORM_KEY**”和“**YOUR_FORM_ENDPOINT**”。
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
分析收据
现在可以使用表单识别器来分析收据了。
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer servicer
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resource>**Note:** You can either use a Cognitive Service resource or a Form Recognizer resource to access Form Recognizer services. To create a Form Recognizer resource in your Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: A unique name for your service - **Subscription**: Your Azure subscription - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: The existing resource group you used previously - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resourceStart by creating a Form Recognizer resource in your Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: formrec-deploymentID - **Subscription**: Your Azure subscription - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: Select existing resource group with name AI900-deploymentID - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyser des reçus avec Form Recognizer
![Robot tenant un reçu](./images/receipt_analysis.jpg)
Dans le domaine de l’intelligence artificielle (IA) de la vision par ordinateur, la reconnaissance optique de caractères (OCR) est couramment utilisée pour lire des documents imprimés ou manuscrits. Souvent, le texte est simplement extrait des documents dans un format qui peut être utilisé pour un traitement ou une analyse ultérieure.
Un scénario d’OCR plus avancé est l’extraction d’informations de formulaires, tels que des bons de commande ou des factures, avec une compréhension sémantique de ce que les champs du formulaire représentent. Le service **Form Recognizer** est spécialement conçu pour ce type de problème d’IA.
Afficher un reçu
Dans cet exemple, vous allez utiliser le modèle intégré de Form Recognizer pour analyser des reçus.
Cliquez sur le bouton **Exécuter la cellule** (&9655;) (à gauche de la cellule) ci-dessous pour l’exécuter et afficher un exemple de reçu pour lequel vous utiliserez Form Recognizer pour l’analyser.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Créer une ressource Form Recognizer
Commencez par Créer une ressource Form Recognizer dans votre abonnement Azure :
1. Sous un autre onglet du navigateur, ouvrez le portail Azure à l’adresse https://portal.azure.com, en vous connectant avec votre compte Microsoft.
2. Sélectionnez **+ Créer une ressource**, puis recherchez *Form Recognizer*.
3. Dans la liste des services, sélectionnez **Form Recognizer**.
4. Dans le panneau **Form Recognizer**, sélectionnez **Créer**.
5. Dans le panneau **Créer**, entrez les détails suivants et sélectionnez **Créer**
- **Nom** : Nom unique de votre service
- **Abonnement** : Votre abonnement Azure
- **Région** : Région disponible
- **Niveau tarifaire** : F0
- **Groupe de ressources** : Groupe de ressources existant que vous avez utilisé précédemment
- **Je confirme avoir lu et compris l’avis ci-dessous** : Sélectionné.
6. Attendez que le service soit créé.
7. Affichez votre service Form Recognizer nouvellement créé dans le portail Azure et sur la page **Clés et point de terminaison**. Copiez ensuite les valeurs **Clé1** et **Point de terminaison** et collez-les dans la cellule de code ci-dessous, en remplaçant les valeurs **YOUR_FORM_KEY** et **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyser un reçu
Vous êtes maintenant prêt à utiliser Form Recognizer pour analyser un reçu.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analizar recibos con Form Recognizer
![Un robot sujetando un recibo](./images/receipt_analysis.jpg)
En el campo de inteligencia artificial de Computer Vision, el reconocimiento óptico de caracteres (OCR) se suele usar para leer documentos impresos o escritos a mano. A menudo, el texto se extrae de los documentos en un formato que se puede procesar o analizar.
Un caso más avanzado del uso de un OCR sería la extracción de información de formularios, como pedidos o facturas, con una comprensión semántica del significado de los campos del formulario. El servicio **Form Recognizer** está diseñado especialmente para este TIPO de situaciones de IA.
Ver un recibo
En este ejemplo, usará el modelo integrado de Form Recognizer para analizar recibos.
Haga clic en el botón **Run cell** (&9655;) a la izquierda de la celda siguiente para ejecutarla y ver un ejemplo de un recibo que analizaremos con Form Recognizer.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Creación de un recurso de Form Recognizer
Cree un recurso de Form Recognizer en su suscripción de Azure:
1. En la pestaña de otro explorador, abra Azure Portal (https://portal.azure.com) e inicie sesión con su cuenta de Microsoft.
2. Seleccione **+ Crear un recurso** y busque *Form Recognizer*.
3. En la lista de servicios, seleccione **Form Recognizer**.
4. En la hoja **Form Recognizer**, seleccione **Crear**.
5. En la hoja **Crear**, escriba la siguiente información y haga clic en **Crear**.
- **Nombre**: un nombre exclusivo para su servicio
- **Suscripción**: su suscripción de Azure
- **Región**: cualquier región disponible
- **Plan de tarifa**: F0
- **Grupo de recursos**: el grupo de recursos utilizado anteriormente
- **Confirmo que he leído y comprendido la notificación siguiente**: seleccionado.
6. Espere a que se cree el servicio.
7. Consulte su nuevo servicio Form Recognizer en Azure Portal. En la página **Keys and Endpoint**, copie los valores de **Key1** y **Endpoint** y péguelos en el siguiente código, en sustitución de **YOUR_FORM_KEY** y **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Análisis de un recibo
Ya está listo para usar Form Recognizer para analizar un recibo.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
用表格辨識器分析收據
![一個傀儡程式正持有一份收據](./images/receipt_analysis.jpg)
在電腦視覺的 [人工智慧 (AI)] 欄位中,光學字元辨識 (OCR) 一般用於讀取列印或手寫文件。通常,文字直接從文件中擷取到一種可以用於進一步處理或分析的格式中。
詳細的進階 OCR 案例是從表單 (例如購買訂單或發票) 中擷取資訊,並且提供對表單中欄位所代表含義的語意理解。**表格辨識器**服務是為這類 AI 問題特別設計的。
檢視收據
在此範例中,您可以使用表格辨識器的內建模型來分析收據。
按一下下方 **[執行儲存格]** (&9655;) 按鈕 (儲存格左側) 以執行儲存格並參閱您將使用表格辨識器分析收據的範例。
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# 載入並顯示收據影像
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
建立表格辨識器資源
> **備註:**您要麼使用認知服務資源,要麼使用表格辨識器資源來存取表格辨識器服務。
在您的 Azure 訂用帳戶中建立表格辨識器資源:
1.在其它瀏覽器索引標籤中,透過 https://portal.azure.com 開啟 Azure 入口網站,並用您的 Microsoft 帳戶登入。
2.選取 **[+ 建立資源]**,並搜尋*表格辨識器*。
3.在服務清單中,選取 **[表格辨識器]**。
4.在 **[表格辨識器]** 刀鋒視窗中,選取 **[建立]**。
5.在 **[建立]**刀鋒視窗中,輸入下列詳細資料並選取 **[建立]**
- **名稱**:您的服務之唯一名稱
- **訂用帳戶**:您的 Azure 訂用帳戶
- **區域**:任一可用區域
- **定價層**:F0
- **資源群組**:您之前使用過的現有資源群組
- **我確認已閱讀下方通知並理解通知內容**:已選取。
6.等待服務建立。
7.檢視您在 Azure 入口網站中新建立的表格辨識器服務並在 **[金鑰和端點]** 頁面,複製**金鑰 1** 和**端點**值,然後將他們貼上到下方程式碼儲存格中,取代 **YOUR_FORM_KEY** 和 **YOUR_FORM_ENDPOINT**。
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
分析收據
現在您可以準備使用表格辨識器來分析收據。
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Análise de recibos com o Reconhecimento de Formulários
![Um robô segurando um recibo](./images/receipt_analysis.jpg)
No campo de pesquisa visual computacional da inteligência artificial (IA), o reconhecimento óptico de caracteres (OCR) é comumente usado para ler documentos impressos ou manuscritos. Em geral, o texto é simplesmente extraído dos documentos em um formato que possa ser usado para processamento ou análise posterior.
Um cenário de OCR mais avançado é a extração de informações de formulários, como pedidos ou faturas de compras, com um reconhecimento semântico do que os campos daquele formulário representam. O serviço de **Reconhecimento de Formulários** foi desenvolvido especificamente para esse tipo de problema de IA.
Ver um recibo
Neste exemplo, você usará o modelo integrado do Reconhecimento de Formulários para analisar recibos.
Clique no botão **Executar célula** (&9655;) abaixo (à esquerda da célula) para executá-la e ver um exemplo de recibo que você analisará com o Reconhecimento de Formulários.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Criar um recurso do Reconhecimento de Formulários
Comece criando um recurso de Reconhecimento de Formulários na sua assinatura do Azure:
1. Em outra guia do navegador, abra o portal do Azure em https://portal.azure.com, entrando com sua conta Microsoft.
2. Selecione **+ Criar um recurso** e pesquise por * Reconhecimento de Formulários*.
3. Na lista de serviços, selecione **Reconhecimento de Formulários**.
4. Na lâmina **Reconhecimento de Formulários**, selecione **Criar**.
5. Na lâmina **Criar**, insira os detalhes abaixo e selecione **Criar**
- **Nome**: um nome exclusivo para seu serviço
- **Assinatura**: sua assinatura do Azure
- **Região**: Qualquer região disponível
- **Tipo de preço**: F0
- **Grupo de recursos**: O grupo de recursos existente usado anteriormente
- **Confirmo que li e entendi os avisos abaixo**: Selecionado.
6. Aguarde até que o serviço seja criado.
7. Veja seu serviço de Reconhecimento de Formulários recém-criado no portal do Azure e na página **Chaves e pontos de extremidade**, copie os valores da **Chave 1** e do **Ponto de extremidade** e cole no código abaixo, substituindo **YOUR_FORM_KEY** e **YOUR_FORM_ENDPOINT**.
###Code
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analisar um recibo
Agora você já pode usar o Reconhecimento de Formulários para analisar um recibo.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____
###Markdown
Analyzing Receipts with Form Recognizer![A robot holding a receipt](./images/receipt_analysis.jpg)In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem. View a receiptIn this example, you'll use the Form Recognizer's built-in model for analyzing receipts.Click the **Run cell** (&9655;) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
###Code
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Create a Form Recognizer resourceStart by creating a Form Recognizer resource in the Azure subscription:1. In another browser tab, open the Azure portal at https://portal.azure.com, signin with the lab credentials.2. Select **+ Create a resource**, and search for *Form Recognizer*.3. In the list of services, select **Form Recognizer**.4. In the **Form Recognizer** blade, select **Create**.5. In the **Create** blade, enter the following details and select **Create** - **Name**: formrec-uniqueid - **Subscription**: Select the subscription where you are performing the lab - **Region**: Any available region - **Pricing tier**: F0 - **Resource Group**: Select the existing resource group. - **I confirm I have read and understood the notice below**: Selected.6. Wait for the service to be created.7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
###Code
#Replace YOUR_FORM_KEY and YOUR_FORM_ENDPOINT with the form recognizer key and endpoint values
form_key = 'YOUR_FORM_KEY'
form_endpoint = 'YOUR_FORM_ENDPOINT'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
###Output
_____no_output_____
###Markdown
Analyze a receiptNow you're ready to use Form Recognizer to analyze a receipt.
###Code
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
###Output
_____no_output_____ |
examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_hotel_reviews.ipynb | ###Markdown
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_hotel_reviews.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 3 class Tripadvisor Hotel review classifier trainingWith the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode) 1. Install Java 8 and NLU
###Code
import os
from sklearn.metrics import classification_report
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install pyspark==2.4.7
! pip install nlu > /dev/null
import nlu
###Output
_____no_output_____
###Markdown
2. Download hotel reviews dataset https://www.kaggle.com/andrewmvd/trip-advisor-hotel-reviewsHotels play a crucial role in traveling and with the increased access to information new pathways of selecting the best ones emerged.With this dataset, consisting of 20k reviews crawled from Tripadvisor, you can explore what makes a great hotel and maybe even use this model in your travels!
###Code
! wget http://ckl-it.de/wp-content/uploads/2021/01/tripadvisor_hotel_reviews.csv
import pandas as pd
test_path = '/content/tripadvisor_hotel_reviews.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
train_df
###Output
_____no_output_____
###Markdown
3. Train Deep Learning Classifier using nlu.load('train.classifier')You dataset label column should be named 'y' and the feature column with text data should be named 'text'
###Code
# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns
# Since there are no
trainable_pipe = nlu.load('train.classifier')
fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] )
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:50] )
preds
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
###Markdown
Test the fitted pipe on new example
###Code
fitted_pipe.predict("It was a good experince!")
###Output
_____no_output_____
###Markdown
Configure pipe training parameters
###Code
trainable_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setMaxEpochs(3) | Info: Maximum number of epochs to train | Currently set to : 3
pipe['classifier_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005
pipe['classifier_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64
pipe['classifier_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5
pipe['classifier_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
>>> pipe['default_tokenizer'] has settable params:
pipe['default_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]) | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]
pipe['default_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['default_tokenizer'].setMinLength(0) | Info: Set the minimum allowed legth for each token | Currently set to : 0
pipe['default_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed legth for each token | Currently set to : 99999
>>> pipe['default_name'] has settable params:
pipe['default_name'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512
pipe['default_name'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
###Markdown
Retrain with new parameters
###Code
# Train longer!
trainable_pipe['classifier_dl'].setMaxEpochs(5)
fitted_pipe = trainable_pipe.fit(train_df.iloc[:100])
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
preds
###Output
precision recall f1-score support
average 0.48 0.76 0.59 33
great 0.86 0.51 0.64 35
poor 0.74 0.62 0.68 32
accuracy 0.63 100
macro avg 0.69 0.63 0.64 100
weighted avg 0.70 0.63 0.64 100
###Markdown
Try training with different Embeddings
###Code
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
from sklearn.metrics import classification_report
trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['classifier_dl'].setMaxEpochs(90)
trainable_pipe['classifier_dl'].setLr(0.0005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
#preds
###Output
sent_small_bert_L12_768 download started this may take some time.
Approximate size to download 392.9 MB
[OK!]
precision recall f1-score support
average 0.66 0.65 0.65 2184
great 0.79 0.81 0.80 2184
poor 0.77 0.78 0.78 2184
accuracy 0.74 6552
macro avg 0.74 0.74 0.74 6552
weighted avg 0.74 0.74 0.74 6552
###Markdown
5. Lets save the model
###Code
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
###Output
Stored model in ./models/classifier_dl_trained
###Markdown
6. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
###Code
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It was a good experince!')
preds
hdd_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
>>> pipe['regex_tokenizer'] has settable params:
pipe['regex_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['regex_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['regex_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed length for each token | Currently set to : 99999
pipe['regex_tokenizer'].setMinLength(0) | Info: Set the minimum allowed length for each token | Currently set to : 0
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
>>> pipe['glove'] has settable params:
pipe['glove'].setBatchSize(32) | Info: Batch size. Large values allows faster processing but requires more memory. | Currently set to : 32
pipe['glove'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False
pipe['glove'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768
pipe['glove'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128
pipe['glove'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False
pipe['glove'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setClasses(['average', 'great', 'poor']) | Info: get the tags used to trained this NerDLModel | Currently set to : ['average', 'great', 'poor']
pipe['classifier_dl'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
|
MachineLearning/2_RegresionLinealMultiple/RegresionLinealMultiple.ipynb | ###Markdown
Recordá abrir en una nueva pestaña Regresión Lineal Múltiple
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
# Indicamos que los tipos de datos float se muestren con 2 decimales
pd.options.display.float_format = '{:.2f}'.format
###Output
/usr/local/lib/python3.7/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
###Markdown
Dataset propiedadesContinuamos en la misma consultora estadística de la clase pasada y ahora nos contrata una empresa que se dedica a comprar y remodelar propiedades para luego venderlas. En esta oportunidad quiere que realicemos algunos modelos para predecir los precios de las propiedades y además poder entender como influyen en el precio determinadas variables para entender que modificaciones pueden llegar a aumentar el precio.El dataset consiste en los anuncios de ventas de Properati de propiedades en la Ciudad de Buenos Aires durante el primer semestre de 2021. Nuestra variable a predecir es el precio de la propiedad en dolares y las posibles variables predictoras son:* Superficie Total: superficie total de la propiedad en metros cuadrados* Superficie Cubierta: superficie cubierta de la propiedad en metros cuadrados* Ambientes/Cuartos: cantidad de ambientes/cuartos (excluyendo baños)* Baños: cantidad de baños * Tipo de propiedad: si la propiedad es una casa, departamento o propiedad horizontal (PH) * Latitud* Longitud* Barrio: barrio donde se encuentra la propiedad (l3)Datos provistos por ProperatiExploremos un poco los datos:
###Code
# Lectura del dataset
df = pd.read_csv('https://datasets-humai.s3.amazonaws.com/datasets/properati_caba_2021.csv')
# Definimos la semilla
SEMILLA = 1992
###Output
_____no_output_____
###Markdown
1. Analisis exploratorios
###Code
# Observamos los primeros registros del dataframe
df.head()
# Observamos la matriz de correlación entre las variables numéricas
df.corr()
###Output
_____no_output_____
###Markdown
Observamos que las variables de **superficie** presentan una correlación positiva alta con el precio y una correlación muy fuerte entre sí.La variable de **baños** presenta una correlación positiva bastante alta seguida por la variable de **ambientes**.Por último, las variables de latitud y longitud presentan un correlación positiva baja con el precio.Observemos los gráficos de dispersión de algunas de estas variables con el precio
###Code
# Graficamos algunas de estas relaciones con el precio
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4, figsize=(20, 5))
df.plot.scatter(x='surface_total', y='price', ax=ax1)
df.plot.scatter(x='rooms', y='price', ax=ax2)
df.plot.scatter(x='bathrooms', y='price', ax=ax3)
df.plot.scatter(x='lon', y='price', ax=ax4);
###Output
_____no_output_____
###Markdown
Observamos que de estas 4 variables la que parece tener una relación aproximadamente lineal con el precio es la **superficie total**.Por otro lado, vemos que existen algunas observaciones que son **outliers** (valores atípicos) en las variables. Por ejemplo: hay propiedades con 30 y 35 ambientes y algunas 10 baños o másAhora que ya conocemos un poco sobre los datos podemos proceder a diseñar e implementar algunos modelos para estimar el precio de las propiedades 2. Modelos: Estimación e interpretaciónVamos a desarrollar modelos de regresión lineal múltiple con la siguiente especificación:$E(Y|X_1, X_2, ..., X_p) = \beta_0 + \beta_1 \cdot X_1 + \beta_2 \cdot X_2 + ... + \beta_p \cdot X_p $En esta sección desarrollaremos distintos modelos con las variables numéricas que vimos previamente y observaremos los coeficientes estimados y su interpretación 2.1 Preparación de los datosComo se vio en la clase previa, para evaluar los modelos que diseñemos es necesario separar a los datos en:* Conjunto de **entrenamiento**: datos con los cuales vamos a entrenar los modelos* Conjunto de **evaluación**: datos con los cuales vamos a evaluar la performance del modeloEsto lo vamos a realizar con la función [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html). Le pasamos como argumentos el set de datos de variables predictoras, la serie de la variable a predecir y el porcentaje de datos que deseamos que forme nuestro set de evaluación.
###Code
# Separamos al dataset en X (variables predictoras) e y (variable a predecir)
X = df[['lat', 'lon', 'rooms', 'bathrooms', 'surface_total', 'surface_covered', 'property_type']]
y = df['price']
from sklearn.model_selection import train_test_split
# Realizamos el split de X e y en los sets de entrenamiento (train) y test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=SEMILLA)
print(f"El dataset de entrenamiento cuenta con {len(X_train)} observaciones")
print(f"El dataset de evaluación cuenta con {len(X_test)} observaciones")
###Output
El dataset de entrenamiento cuenta con 32124 observaciones
El dataset de evaluación cuenta con 8032 observaciones
###Markdown
2.2 Modelo Superficie y BañosPor lo que vimos en nuestro análisis de datos las variables de **superficie total** y **baños** presentan un correlación positiva bastante alta con el precio. Entonces, comencemos con un modelo que incluya ambas variables como predictoras:$E(precio|.) = \beta_0 + \beta_1 \cdot superficieTotal + \beta_2 \cdot baños$
###Code
# Definimos las variables exogenas (predictores)
variables_exogenas = ['surface_total', 'bathrooms']
# Construimos la matriz de X
X_train_modelo_sup_baños = X_train[variables_exogenas]
X_train_modelo_sup_baños.head()
# Importamos el modelo lineal
from sklearn.linear_model import LinearRegression
# Definimos una instancia del modelo lineal con scikit learn
modelo_lineal_sup_baños = LinearRegression(fit_intercept=True)
# Realizamos el proceso de estimación
modelo_lineal_sup_baños.fit(X_train_modelo_sup_baños, y_train)
# Accedemos a los coeficientes estimados
modelo_lineal_sup_baños.coef_
# Accedemos al intercepto
modelo_lineal_sup_baños.intercept_
# Creamos variables para guardar los coeficientes estimados
coeficientes = modelo_lineal_sup_baños.coef_
intercepto = modelo_lineal_sup_baños.intercept_
beta_1, beta_2 = coeficientes[0], coeficientes[1]
print(f"El intercepto es {intercepto:.2f}")
print(f"El coeficiente estimado para Beta 1 es {beta_1:.2f}")
print(f"El coeficiente estimado para Beta 2 es {beta_2:.2f}")
# Definimos una función para obtener los coeficientes en un dataframe
def obtener_coeficientes(modelo, lista_variables):
'''Crea un dataframe con los coeficientes estimados de un modelo'''
# Creo la lista de nombres de variables
lista_variables = ['intercepto'] + lista_variables
# Intercepto
intercepto = modelo.intercept_
# Lista coeficientes excepto el intercepto
coeficientes = list(modelo.coef_)
# Lista completa coeficientes
lista_coeficientes = [intercepto] + coeficientes
return pd.DataFrame({"variable": lista_variables, "coeficiente": lista_coeficientes})
# Obtenemos nuestro dataframe
coeficientes_modelo_sup_baños = obtener_coeficientes(modelo_lineal_sup_baños, variables_exogenas)
coeficientes_modelo_sup_baños
###Output
_____no_output_____
###Markdown
¿Cómo interpretamos estos coeficientes?$\hat{\beta_0} = -107213.75$El valor esperado/promedio/predicho de una propiedad sin superficie ni baños es de -107213.65 dólares$\hat{\beta_1} = 2069.77$El valor esperado/promedio/predicho de una propiedad aumenta en 2069.77 dólares frente a un aumento de 1 metro cuadrado de la superficie total dada la cantidad de baños$\hat{\beta_2} = 113359.64$El valor esperado/promedio/predicho de una propiedad aumenta en 113359.64 dólares frente a un aumento de 1 baño dada la superficie total 2.2 Modelo Superficie y CuartosTambién vimos en nuestro análisis de datos que la variable **cuartos** presentan un correlación positiva con el precio. Entonces, realicemos un modelo con los cuartos y la superficie total como predictoras:$E(precio|.) = \beta_0 + \beta_1 \cdot superficieTotal + \beta_2 \cdot cuartos$
###Code
# Definimos las variables exogenas (predictores)
variables_exogenas = ['surface_total', 'rooms']
# Construimos la matriz de X
X_train_modelo_sup_cuartos = X_train[variables_exogenas]
# Definimos una instancia del modelo lineal con scikit learn
modelo_lineal_sup_cuartos = LinearRegression(fit_intercept=True)
# Realizamos el proceso de estimación
modelo_lineal_sup_cuartos.fit(X_train_modelo_sup_cuartos, y_train)
# Obtenemos los coeficientes en el dataframe
coeficientes_modelo_sup_cuartos = obtener_coeficientes(modelo_lineal_sup_cuartos, variables_exogenas)
coeficientes_modelo_sup_cuartos
###Output
_____no_output_____
###Markdown
¿Qué sucedió con los coeficientes estimados del modelo?Lo que nos puede llamar la atención en modelo es que el coeficiente de la variable **ambientes** es negativo. Alguien podría decirnos que la interpretación de este coeficiente es extraña y contraintuitiva si decimos que: el coeficiente $\hat{\beta_2} = -21992$ indica que frente al aumento de un ambiente el precio esperado de la propiedad cae en 21992 dólares. Sin embargo, esto es incorrecto ¿Por qué?Porque la interpretación del coeficiente estimado en el modelo de regresión lineal múltiple se realiza **dadas las otras variables constantes** Entonces ¿Cúal es la interpretación correcta?La interpretación correcta de $\hat{\beta_2} = 21992$ es:El valor esperado de una propiedad cae en 21992 dólares frente al aumento de 1 ambiente dada la superficie total.Esto quiere decir que si para una propiedad que tiene una superficie dada se crea un ambiente nuevo (dividir la superficie en más cuartos) se espera que su valor caiga en aproximadamente 22000 dolares 3. Modelo con variables categóricasEn esta sección vamos a incorporar la variable categórica del **tipo de propiedad** en nuestros modelos.Primero veamos como se relaciona esta variable con el precio de la propiedad
###Code
# Boxplot del precio por tipo de propiedad
sns.boxplot(x='property_type', y='price', data=df);
###Output
_____no_output_____
###Markdown
Se observa que existen múltiples **outliers** que dificultan la comparación entre los distintos tipos de propiedad. Las casas y departamentos se caracterizan por tener más outliers que los PH.Acotemos el gráfico a propiedades con precios menores a 1 millón de pesos
###Code
# Boxplot del precio por tipo de propiedad
sns.boxplot(x='property_type', y='price', data=df.query("price<=1000000"));
###Output
_____no_output_____
###Markdown
Ahora podemos observar que los departamentos y PH tienen una mediana similar mientras que las casas tiene una mediana de precio más elevada. Sin embargo, recordemos que en este gráfico no estamos controlando por otras variables. 3.1 Modelo Tipo de Propiedad y SuperficieComo parecen existir diferencias en el precio que se pueden explicar por el tipo de propiedad definimos un modelo que incluya está información.Recordemos que como se trata de una variable con 3 categorías deberemos crear dos variables binarias, quedando una categoría contenida en el intercepto.$E(precio|X) = \beta_0 + \beta_1 \cdot superficieTotal + \beta_{2} \cdot X_{casa} + \beta_{3} \cdot X_{depto}$Las variables dummies que debemos crear son:$X_{casa}=\begin{cases} 0 & \text{si la propiedad NO es una casa} \\ 1 & \text{si la observación es una casa}\end{cases}$$X_{depto}=\begin{cases} 0 & \text{si la observación NO es un departamento} \\ 1 & \text{si la observación es un departamento}\end{cases}$Veamos cuál es la manera de crear estas variables Creación de las variables dummiesPara crear las variables dummies vamos a utilizar el transformer (es un tipo de clase) [OneHotEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html). Los argumentos que vamos a utilizar son:* `categories:` la lista de categorías que tiene la variable categórica* `drop='first'`: indica que se va a "tirar" la primera categoría (es la que queda contenida en el intercepto)
###Code
from sklearn.preprocessing import OneHotEncoder
# Definimos una instancia del transformer
one_hot_encoder = OneHotEncoder(categories=[['PH', 'Casa', 'Departamento']], drop='first')
# Realizamos el fit con los datos de entrenamiento
one_hot_encoder.fit(X_train[['property_type']])
# Accedemos a las categorias del encoder
one_hot_encoder.categories_
# Generamos las variables dummies de la variable property type (notemos que tenemos 2 columnas!)
matriz_dummies = one_hot_encoder.transform(X_train[['property_type']]).toarray()
matriz_dummies
# Generamos los nombres de las variables dummies (notemos que tenemos 2 columnas!)
nombres_dummies = one_hot_encoder.get_feature_names(['tipo'])
nombres_dummies
# Generamos el dataframe con las variables dummies con las matrices y columnas
df_dummies = pd.DataFrame(matriz_dummies, columns=nombres_dummies, index=X_train.index)
df_dummies.head()
# Agregamos la información a nuestra matriz de variables predictoras
X_train = X_train.join(df_dummies)
X_train.head()
# Definimos las variables exogenas (predictores)
variables_exogenas = ['surface_total', 'tipo_Casa', 'tipo_Departamento']
# Construimos la matriz de X
X_train_modelo_sup_propiedad = X_train[variables_exogenas]
# Definimos una instancia del modelo lineal con scikit learn
modelo_lineal_sup_propiedad = LinearRegression(fit_intercept=True)
# Realizamos el proceso de estimación
modelo_lineal_sup_propiedad.fit(X_train_modelo_sup_propiedad, y_train)
coeficientes_modelo_sup_propiedad = obtener_coeficientes(modelo_lineal_sup_propiedad, variables_exogenas)
coeficientes_modelo_sup_propiedad
###Output
_____no_output_____
###Markdown
¿Cómo se interpretan estos coeficientes?$\hat{\beta_0} = -208630$El precio esperado de un PH sin superficie es de -208630 dólares$\hat{\beta_1} = 3489$El precio esperado aumenta en 3489 dólares cuando aumenta la superficie total en 1 m2, independientemente del tipo de propiedad $\hat{\beta_2} = -212763$Si la propiedad es una casa, el precio esperado será 212763 dólares menor respecto a un PH **dada la misma superficie total** $\hat{\beta_3} = 173962$Si la propiedad es una departamento, el precio esperado será 173962 dólares mayor respecto a un PH **dada la misma superficie total** 3.2 InteracciónTambién es posible que exista un efecto distinto de la superficie total en cada uno de los tipos de propiedades. Para ello vamos a definir un modelo con interacción de la siguiente manera:$E(precio|X) = \beta_0 + \beta_1 \cdot superficieTotal + \beta_{2} \cdot X_{casa} + \beta_{3} \cdot X_{depto} + \beta_{4} \cdot (X_{casa} * superficieTotal) + \beta_{5} \cdot (X_{depto}* superficieTotal)$
###Code
# Creamos las dos variables de interacción
X_train['interaccion_sup_casa'] = X_train['tipo_Casa'] * X_train['surface_total']
X_train['interaccion_sup_depto'] = X_train['tipo_Departamento'] * X_train['surface_total']
# Definimos las variables exogenas (predictores)
variables_exogenas = ['surface_total', 'tipo_Casa', 'tipo_Departamento', 'interaccion_sup_casa', 'interaccion_sup_depto']
# Construimos la matriz de X
X_train_modelo_interaccion = X_train[variables_exogenas]
# Definimos una instancia del modelo lineal con scikit learn
modelo_lineal_interaccion = LinearRegression(fit_intercept=True)
# Realizamos el proceso de estimación
modelo_lineal_interaccion.fit(X_train_modelo_interaccion, y_train)
coeficientes_modelo_interaccion = obtener_coeficientes(modelo_lineal_interaccion, variables_exogenas)
coeficientes_modelo_interaccion
###Output
_____no_output_____
###Markdown
¿Cómo se interpretan coeficientes ?$\hat{\beta_0} = 68563$El precio esperado de un PH sin superficie es de 68563 dólares$\hat{\beta_1} = 1058$El precio esperado de un PH aumenta en 1058 dólares cuando aumenta la superficie total en 1 m2.Es muy importante notar que ahora $\hat{\beta_1}$ nos habla sólo del cambio esperado de la superficie en el precio en los PH, ya que esta es la categoría que quedó en el nivel basal o de comparación$\hat{\beta_2} = -86032$Si la propiedad es una casa, el precio esperado será 86032 dólares menor respecto a un PH **dada la misma superficie total** $\hat{\beta_3} = -166791$Si la propiedad es una departamento, el precio esperado será 166791 dólares menor respecto a un PH **dada la misma superficie total** $\hat{\beta_4} = 729$Si la propiedad es una casa, el precio esperado aumenta 729 dólares más respecto a un PH cuando la superficie aumenta en 1 m2.Esto equivale a decir que para una **casa** el precio esperado aumenta en 1787 ($\hat{\beta_1} + \hat{\beta_4}$) dólares cuando la superficie aumenta en 1 m2$\hat{\beta_5} = 3228$Si la propiedad es una departamento, el precio esperado aumenta 3228 dólares más respecto a un PH cuando la superficie aumenta en 1 m2.Esto equivale a decir que para un **departamento** el precio esperado aumenta en 4286 ($\hat{\beta_1} + \hat{\beta_5}$) dólares cuando la superficie aumenta en 1 m2 4. EvaluaciónEn esta parte vamos a evaluar los resultados obtenidos por algunos de los modelos previos. Por un lado nos interesará observar los resultados de los tests estadísticos de significatividad individual y global y por el otro observar algunas métricas de performance 4.1 Tests estadísticosPara realizar la evaluación con un enfoque estadístico más tradicional debemos utilizar el modulo [statsmodels](https://www.statsmodels.org/stable/regression.html). Para poder acceder a la información que nos interesa vamos a tener que crear los modelos con esta librería.La librería sklearn no cuenta con las funciones necesarias para realizar la evaluación de los tests estadísticos de los coeficientes estimados.Comencemos preparando los datos para la implementación del modelo lineal en statsmodels
###Code
# En statsmodels se le agrega el intercepto (en scikit se lo pasamos como un parametro a la instancia del modelo)
X_train_modelo_sup_baños_stats = sm.add_constant(X_train_modelo_sup_baños)
X_train_modelo_sup_baños_stats.head()
# Construimos el modelo
modelo_sup_baños_stats = sm.OLS(y_train, X_train_modelo_sup_baños_stats)
# Guardamos los resultados
resultados_sup_baños_stats = modelo_sup_baños_stats.fit()
# Accedemos a los coeficientes estimados
resultados_sup_baños_stats.params
###Output
_____no_output_____
###Markdown
En primer lugar observamos que los coeficientes estimados son iguales a los que obtuvimos utilizando la implementación de scikit learn.Ahora veamos los p valores asociados a los tests de significatividad individual. Recordemos que las hipótesis son:$H_0: \beta_j = 0$$H_A: \beta_j \neq 0$Para rechazar la hipótesis de que el parámetro es igual a cero debemos observar un p valor inferior a 0.05
###Code
# Accedemos a los p valores de los tests de significancia individual
resultados_sup_baños_stats.pvalues
###Output
_____no_output_____
###Markdown
Continuamos con el test de significatividad global. Las hipotesis son:$H_0: \text{Todos los } \beta_j = 0$$H_A: \text{Algún } \beta_j \neq 0$
###Code
# Test significatividad global
resultados_sup_baños_stats.f_pvalue
###Output
_____no_output_____
###Markdown
Observamos el R cuadrado y R cuadrado ajustado
###Code
# R cuadrado
resultados_sup_baños_stats.rsquared
# R cuadrado ajustado
resultados_sup_baños_stats.rsquared_adj
###Output
_____no_output_____
###Markdown
Toda esta información a la que fuimos accediendo (junto a mucha información más) se puede obtener imprimiendo el `summary` de los resultados.
###Code
print(resultados_sup_baños_stats.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: price R-squared: 0.571
Model: OLS Adj. R-squared: 0.571
Method: Least Squares F-statistic: 2.140e+04
Date: Wed, 07 Jul 2021 Prob (F-statistic): 0.00
Time: 02:50:21 Log-Likelihood: -4.3859e+05
No. Observations: 32124 AIC: 8.772e+05
Df Residuals: 32121 BIC: 8.772e+05
Df Model: 2
Covariance Type: nonrobust
=================================================================================
coef std err t P>|t| [0.025 0.975]
---------------------------------------------------------------------------------
const -1.072e+05 2338.152 -45.854 0.000 -1.12e+05 -1.03e+05
surface_total 2069.7756 22.252 93.016 0.000 2026.161 2113.390
bathrooms 1.134e+05 2032.921 55.762 0.000 1.09e+05 1.17e+05
==============================================================================
Omnibus: 33294.388 Durbin-Watson: 2.000
Prob(Omnibus): 0.000 Jarque-Bera (JB): 7432291.005
Skew: 4.728 Prob(JB): 0.00
Kurtosis: 76.914 Cond. No. 291.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Ahora observemos estos elementos para el modelo de interacción que contaba con una mayor cantidad de variables predictoras. Tengamos en cuenta que vamos a observar el resumen para tratar de contestar las siguientes preguntas:* ¿Tiene sentido utilizar esta especificación del modelo para explicar/predecir el precio de las propiedades? (significatividad global)* ¿Cada variable presenta una relación estadísticamente significativa con el precio? (significatividad individual)* ¿Qué porcentaje de la variabilidad explica el modelo? ¿Cómo se compara respecto al modelo con menos variables que estimamos antes? (R cuadrado)
###Code
# En stats models se le agrega el intercepto
X_train_interaccion_stats = sm.add_constant(X_train_modelo_interaccion)
#Construimos el modelo
modelo_interaccion_stats = sm.OLS(y_train, X_train_interaccion_stats)
# Estimamos los parámetros
resultados_interaccion = modelo_interaccion_stats.fit()
# Imprimimos el resumen
print(resultados_interaccion.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: price R-squared: 0.675
Model: OLS Adj. R-squared: 0.675
Method: Least Squares F-statistic: 1.335e+04
Date: Wed, 07 Jul 2021 Prob (F-statistic): 0.00
Time: 02:50:21 Log-Likelihood: -4.3413e+05
No. Observations: 32124 AIC: 8.683e+05
Df Residuals: 32118 BIC: 8.683e+05
Df Model: 5
Covariance Type: nonrobust
=========================================================================================
coef std err t P>|t| [0.025 0.975]
-----------------------------------------------------------------------------------------
const 6.856e+04 6310.502 10.865 0.000 5.62e+04 8.09e+04
surface_total 1058.2632 46.590 22.714 0.000 966.944 1149.582
tipo_Casa -8.603e+04 1.11e+04 -7.751 0.000 -1.08e+05 -6.43e+04
tipo_Departamento -1.668e+05 6546.818 -25.477 0.000 -1.8e+05 -1.54e+05
interaccion_sup_casa 729.6612 57.091 12.781 0.000 617.760 841.562
interaccion_sup_depto 3228.1323 49.676 64.983 0.000 3130.765 3325.500
==============================================================================
Omnibus: 34358.273 Durbin-Watson: 1.996
Prob(Omnibus): 0.000 Jarque-Bera (JB): 8193012.339
Skew: 4.987 Prob(JB): 0.00
Kurtosis: 80.599 Cond. No. 1.85e+03
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.85e+03. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
4.2 Métricas de performanceEn la sección anterior observamos algunas formas de evaluación típicas del enfoque estadístico para nuestros modelos. Ahora veamos algunas métricas de evaluación muy usuales para los problemas de regresión en Machine Learning.Vamos a observar los valores de las siguientes métricas:**Mean Squared Error /Error Cuadrático Medio**$MSE = \frac{1}{n} \sum_{i=1}^{n} (Y_i - \hat{Y_i})^2$**Root Mean Squared Error /Raiz del Error Cuadrático Medio**$RMSE = \sqrt{MSE}$**Mean Absolute Error /Error Absoluto Medio**$MAE = \frac{1}{n} \sum_{i=1}^{n} |Y_i - \hat{Y_i}|$Además nos va a interesar comparar los valores de estas métricas para el set de entrenamiento y para el set de evaluación
###Code
# Importamos las métricas desde scikit-learn
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
###Output
_____no_output_____
###Markdown
Todas estas funciones toman como argumentos: `y_true`: vector/array/serie de los valores reales de Y`y_pred`: vector/array/serie de los valores predichos de YPara obtener los valores predichos de y vamos a utilizar el método `predict()` de los modelos que hemos creado.
###Code
# Predecimos los valores de y con nuestro modelo
y_train_sup_baños = modelo_lineal_sup_baños.predict(X_train_modelo_sup_baños)
y_train_sup_baños
# Calculamos R cuadrado
r2_score(y_train, y_train_sup_baños)
# Calculamos MSE
mean_squared_error(y_train, y_train_sup_baños)
# Calculamos RMSE
np.sqrt(mean_squared_error(y_train, y_train_sup_baños))
# Calculamos MAE
mean_absolute_error(y_train, y_train_sup_baños)
###Output
_____no_output_____
###Markdown
Como nos interesa obtener estas 4 métricas para los modelos podemos crear una función que las calcule y nos devuelva un dataframe
###Code
def obtener_metricas_performance(y_verdadera, y_predicha, tipo_dataset):
r2 = r2_score(y_verdadera, y_predicha)
mse = mean_squared_error(y_verdadera, y_predicha)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_verdadera, y_predicha)
return pd.DataFrame({'metrica': ['R2', 'MSE', 'RMSE', 'MAE'],
'valor':[r2, mse, rmse, mae],
'tipo_dataset':tipo_dataset})
# Obtenemos nuestro dataframe de métricas de performance
performance_train_sup_baños = obtener_metricas_performance(y_train, y_train_sup_baños,'entrenamiento')
performance_train_sup_baños
# Ahora observemos las métricas del modelo de interacción en entrenamiento
y_train_interaccion = modelo_lineal_interaccion.predict(X_train_modelo_interaccion)
performance_train_interaccion = obtener_metricas_performance(y_train, y_train_interaccion, 'entrenamiento')
performance_train_interaccion
###Output
_____no_output_____
###Markdown
Ahora observemos las métricas de performance de estos dos modelos en el dataset de evaluación
###Code
# Creamos la matrix de X para el modelo de superficie y baños
X_test_sup_baños = X_test[['surface_total', 'bathrooms']]
# Predecimos los valores
y_test_sup_baños = modelo_lineal_sup_baños.predict(X_test_sup_baños)
# Obtenemos nuestro dataframe de métricas de performance
performance_test_sup_baños =obtener_metricas_performance(y_test, y_test_sup_baños, 'evaluacion')
# Mostramos en conjunto las métricas para entrenamiento y evaluación
pd.concat([performance_train_sup_baños,performance_test_sup_baños])
###Output
_____no_output_____
###Markdown
Realicemos lo mismo para el modelo de interacción. Primero debemos generar las variables binarias para el tipo de propiedad y las variables de interacción para poder utilizar el modelo
###Code
# Generamos las variables dummies de la variable property type (notemos que tenemos 2 columnas!)
matriz_dummies_test = one_hot_encoder.transform(X_test[['property_type']]).toarray()
# Generamos el dataframe con las variables dummies con las matrices y columnas
df_dummies_test = pd.DataFrame(matriz_dummies_test, columns=nombres_dummies, index=X_test.index)
# Agregamos la información a nuestra matriz de variables predictoras
X_test = X_test.join(df_dummies_test)
# Creamos las dos variables de interacción
X_test['interaccion_sup_casa'] = X_test['tipo_Casa'] * X_test['surface_total']
X_test['interaccion_sup_depto'] = X_test['tipo_Departamento'] * X_test['surface_total']
# Vemos el dataframe
X_test.head()
# Generamos el dataset de predictoras
X_test_interaccion = X_test[['surface_total', 'tipo_Casa', 'tipo_Departamento', 'interaccion_sup_casa', 'interaccion_sup_depto']]
# Predecimos los valores
y_test_interaccion = modelo_lineal_interaccion.predict(X_test_interaccion)
# Obtenemos nuestro dataframe de métricas de performance
performance_test_interaccion = obtener_metricas_performance(y_test, y_test_interaccion, 'evaluacion')
# Mostramos en conjunto las métricas para entrenamiento y evaluación
pd.concat([performance_train_interaccion, performance_test_interaccion])
###Output
_____no_output_____
###Markdown
5. DiagnósticoEn esta sección vamos a realizar el gráfico de residuos vs valores predichos para observar si estos dos modelos cumplen o no con los supuestos del modelo lineal
###Code
# Calculamos los residuos para el modelo de superficie y baños
residuos_sup_baños = y_train - y_train_sup_baños
# Realizamos el gráfico
plt.figure(figsize=(10,7))
plt.scatter(x=y_train_sup_baños, y=residuos_sup_baños,
alpha=0.6, c='royalblue', edgecolor='black')
plt.axhline(y=0, c='black', ls='--', linewidth=2.5)
plt.title("Modelo Superficie y Baños");
# Calculamos los residuos para el modelo de interacción
residuos_interaccion = y_train - y_train_interaccion
# Realizamos el gráfico
plt.figure(figsize=(10,7))
plt.scatter(x=y_train_sup_baños, y=residuos_interaccion,
alpha=0.6, c='green', edgecolor='black')
plt.axhline(y=0, c='black', ls='--', linewidth=2.5)
plt.title("Modelo Interacción");
###Output
_____no_output_____ |
vit_jax.ipynb | ###Markdown
See https://github.com/google-research/vision_transformer/TODO: Add arxiv linkThis Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer. Copyright 2020 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print('Registered TPU:', config.FLAGS.jax_backend_target)
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_hp.py')
files.view('vision_transformer/vit_jax/train.py')
files.view('vision_transformer/vit_jax/hyper.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_hp
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512 # Reduce to 256 if running on a single GPU.
# Note the datasets are configured in input_pipeline.DATASET_PRESETS
# Have a look in the editor at the right.
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
# tf.data.Datset for training, infinite repeats.
ds_train = input_pipeline.get_data(
dataset=dataset, mode='train', repeats=None, batch_size=batch_size,
)
# tf.data.Datset for evaluation, single repeat.
ds_test = input_pipeline.get_data(
dataset=dataset, mode='test', repeats=1, batch_size=batch_size,
)
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
###Output
2020-10-22 15:57:49,688 [WARNING] vit_jax.logging: Inspect recovered empty keys:
{'pre_logits'}
2020-10-22 15:57:49,692 [INFO] vit_jax.logging: Inspect extra keys:
{'pre_logits/kernel', 'pre_logits/bias'}
2020-10-22 15:57:49,702 [INFO] vit_jax.logging: Resformer: drop-head variant
2020-10-22 15:57:49,711 [INFO] vit_jax.logging: Resformer: resized variant: (1, 197, 768) to (1, 577, 768)
2020-10-22 15:57:49,712 [INFO] vit_jax.logging: Resformer: grid-size from 14 to 24
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
2020-10-22 16:19:29,245 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_hp.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR10
get_accuracy(opt_repl.target)
###Output
2020-10-22 16:49:53,565 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Inference
###Code
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.76433 : convertible
0.01839 : beach_wagon, station_wagon, wagon, estate_car, beach_waggon, station_waggon, waggon
0.01566 : car_mirror
0.01226 : cab, hack, taxi, taxicab
0.01132 : limousine, limo
0.01067 : golfcart, golf_cart
0.01041 : recreational_vehicle, RV, R.V.
0.01026 : Model_T
0.00805 : minibus
0.00767 : odometer, hodometer, mileometer, milometer
###Markdown
**NOTE** Currently this notebook runs with MlpMixer on GPUs and TPUs, but VisionTransformers only run on GPUs. This is due to a temporary regression in the TPUNode setup that is used for Colab and will be fixed soon. See code at https://github.com/google-research/vision_transformer/See papers at- Vision Transformer: https://arxiv.org/abs/2010.11929- MLP-Mixer: https://arxiv.org/abs/2105.01601- How to train your ViT: https://arxiv.org/abs/2106.10270- When Vision Transformers Outperform ResNets without Pretraining or Strong Data Augmentations: https://arxiv.org/abs/2106.01548This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer.If you just want to load a pre-trained checkpoint from a large repository anddirectly use it for inference, you probably want to go the other Colabhttps://colab.sandbox.google.com/github/google-research/vision_transformer/blob/linen/vit_jax_augreg.ipynb Copyright 2021 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/imagenet*
!gsutil ls -lh gs://vit_models/sam
!gsutil ls -lh gs://mixer_models/*
# Download a pre-trained model.
# Note: you can really choose any of the above, but this Colab has been tested
# with the models of below selection...
model_name = 'ViT-B_32' #@param ["ViT-B_32", "Mixer-B_16"]
if model_name.startswith('ViT'):
![ -e "$model_name".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model_name".npz .
if model_name.startswith('Mixer'):
![ -e "$model_name".npz ] || gsutil cp gs://mixer_models/imagenet21k/"$model_name".npz .
import os
assert os.path.exists(f'{model_name}.npz')
# Google Colab "TPU" runtimes are configured in "2VM mode", meaning that JAX
# cannot see the TPUs because they're not directly attached. Instead we need to
# setup JAX to communicate with a second machine that has the TPUs attached.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
print('Connected to TPU.')
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
from absl import logging
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
logging.set_verbosity(logging.INFO)
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/configs/common.py')
files.view('vision_transformer/vit_jax/configs/models.py')
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import input_pipeline
from vit_jax import utils
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
from vit_jax.configs import common as common_config
from vit_jax.configs import models as models_config
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512
config = common_config.with_dataset(common_config.get_config(), dataset)
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
config.batch = batch_size
config.pp.crop = 224
# For details about setting up datasets, see input_pipeline.py on the right.
ds_train = input_pipeline.get_data_from_tfds(config=config, mode='train')
ds_test = input_pipeline.get_data_from_tfds(config=config, mode='test')
del config # Only needed to instantiate datasets.
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Note how images are cropped/scaled differently.
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
model_config = models_config.MODEL_CONFIGS[model_name]
model_config
# Load model definition & initialize random parameters.
# This also compiles the model to XLA (takes some minutes the first time).
if model_name.startswith('Mixer'):
model = models.MlpMixer(num_classes=num_classes, **model_config)
else:
model = models.VisionTransformer(num_classes=num_classes, **model_config)
variables = jax.jit(lambda: model.init(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
batch['image'][0, :1],
train=False,
), backend='cpu')()
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model_name}.npz',
init_params=variables['params'],
model_config=model_config,
)
###Output
INFO:absl:Inspect extra keys:
{'pre_logits/bias', 'pre_logits/kernel'}
INFO:absl:load_pretrained: drop-head variant
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['head']['bias']).__name__,
params['head']['bias'].shape)
print('params_repl.cls:', type(params_repl['head']['bias']).__name__,
params_repl['head']['bias'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(lambda params, inputs: model.apply(
dict(params=params), inputs, train=False))
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [01:07<00:00, 3.58s/it]
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
lr_fn = utils.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
update_fn_repl = train.make_update_fn(
apply_fn=model.apply, accum_steps=accum_steps, lr_fn=lr_fn)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
# Initialize PRNGs for dropout.
update_rng_repl = flax.jax_utils.replicate(jax.random.PRNGKey(0))
losses = []
lrs = []
# Completes in ~20 min on the TPU runtime.
for step, batch in zip(
tqdm.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
):
opt_repl, loss_repl, update_rng_repl = update_fn_repl(
opt_repl, flax.jax_utils.replicate(step), batch, update_rng_repl)
losses.append(loss_repl[0])
lrs.append(lr_fn(step))
plt.plot(losses)
plt.figure()
plt.plot(lrs)
# Should be ~96.7% for Mixer-B/16 or 97.7% for ViT-B/32 on CIFAR10 (both @224)
get_accuracy(opt_repl.target)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [00:32<00:00, 1.73s/it]
###Markdown
Inference
###Code
# Download a pre-trained model.
if model_name.startswith('Mixer'):
# Download model trained on imagenet2012
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://mixer_models/imagenet1k/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.MlpMixer(num_classes=1000, **model_config)
else:
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.VisionTransformer(num_classes=1000, **model_config)
import os
assert os.path.exists(f'{model_name}_imagenet2012.npz')
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model_name}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
resolution = 224 if model_name.startswith('Mixer') else 384
!wget https://picsum.photos/$resolution -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = model.apply(dict(params=params), (np.array(img) / 128 - 1)[None, ...], train=False)
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.13330 : sandbar, sand_bar
0.09332 : seashore, coast, seacoast, sea-coast
0.05257 : jeep, landrover
0.05188 : Arabian_camel, dromedary, Camelus_dromedarius
0.01251 : horned_viper, cerastes, sand_viper, horned_asp, Cerastes_cornutus
0.00753 : tiger_beetle
0.00744 : dung_beetle
0.00711 : sidewinder, horned_rattlesnake, Crotalus_cerastes
0.00703 : leatherback_turtle, leatherback, leathery_turtle, Dermochelys_coriacea
0.00647 : pole
###Markdown
See code at https://github.com/google-research/vision_transformer/See paper at https://arxiv.org/abs/2010.11929This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer. Copyright 2020 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print('Registered TPU:', config.FLAGS.jax_backend_target)
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
files.view('vision_transformer/vit_jax/hyper.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512 # Reduce to 256 if running on a single GPU.
# Note the datasets are configured in input_pipeline.DATASET_PRESETS
# Have a look in the editor at the right.
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
# tf.data.Datset for training, infinite repeats.
ds_train = input_pipeline.get_data(
dataset=dataset, mode='train', repeats=None, batch_size=batch_size,
)
# tf.data.Datset for evaluation, single repeat.
ds_test = input_pipeline.get_data(
dataset=dataset, mode='test', repeats=1, batch_size=batch_size,
)
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
###Output
2020-10-22 15:57:49,688 [WARNING] vit_jax.logging: Inspect recovered empty keys:
{'pre_logits'}
2020-10-22 15:57:49,692 [INFO] vit_jax.logging: Inspect extra keys:
{'pre_logits/kernel', 'pre_logits/bias'}
2020-10-22 15:57:49,702 [INFO] vit_jax.logging: Resformer: drop-head variant
2020-10-22 15:57:49,711 [INFO] vit_jax.logging: Resformer: resized variant: (1, 197, 768) to (1, 577, 768)
2020-10-22 15:57:49,712 [INFO] vit_jax.logging: Resformer: grid-size from 14 to 24
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
2020-10-22 16:19:29,245 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR10
get_accuracy(opt_repl.target)
###Output
2020-10-22 16:49:53,565 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Inference
###Code
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.76433 : convertible
0.01839 : beach_wagon, station_wagon, wagon, estate_car, beach_waggon, station_waggon, waggon
0.01566 : car_mirror
0.01226 : cab, hack, taxi, taxicab
0.01132 : limousine, limo
0.01067 : golfcart, golf_cart
0.01041 : recreational_vehicle, RV, R.V.
0.01026 : Model_T
0.00805 : minibus
0.00767 : odometer, hodometer, mileometer, milometer
###Markdown
**NOTE** Currently this notebook runs with MlpMixer on GPUs and TPUs, but VisionTransformers only run on GPUs. This is due to a temporary regression in the TPUNode setup that is used for Colab and will be fixed soon. See code at https://github.com/google-research/vision_transformer/See papers at- Vision Transformer: https://arxiv.org/abs/2010.11929- MLP-Mixer: https://arxiv.org/abs/2105.01601- How to train your ViT: https://arxiv.org/abs/2106.TODOThis Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer.If you just want to load a pre-trained checkpoint from a large repository anddirectly use it for inference, you probably want to go the other Colabhttps://colab.sandbox.google.com/github/google-research/vision_transformer/blob/linen/vit_jax_augreg.ipynb Copyright 2021 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/imagenet*
!gsutil ls -lh gs://mixer_models/*
# Download a pre-trained model.
# Note: you can really choose any of the above, but this Colab has been tested
# with the models of below selection...
model_name = 'ViT-B_32' #@param ["ViT-B_32", "Mixer-B_16"]
if model_name.startswith('ViT'):
![ -e "$model_name".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model_name".npz .
if model_name.startswith('Mixer'):
![ -e "$model_name".npz ] || gsutil cp gs://mixer_models/imagenet21k/"$model_name".npz .
import os
assert os.path.exists(f'{model_name}.npz')
# Google Colab "TPU" runtimes are configured in "2VM mode", meaning that JAX
# cannot see the TPUs because they're not directly attached. Instead we need to
# setup JAX to communicate with a second machine that has the TPUs attached.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
print('Connected to TPU.')
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
from absl import logging
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
logging.set_verbosity(logging.INFO)
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/configs/common.py')
files.view('vision_transformer/vit_jax/configs/models.py')
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import input_pipeline
from vit_jax import utils
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
from vit_jax.configs import common as common_config
from vit_jax.configs import models as models_config
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512
config = common_config.with_dataset(common_config.get_config(), dataset)
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
config.batch = batch_size
config.pp.crop = 224
# For details about setting up datasets, see input_pipeline.py on the right.
ds_train = input_pipeline.get_data_from_tfds(config=config, mode='train')
ds_test = input_pipeline.get_data_from_tfds(config=config, mode='test')
del config # Only needed to instantiate datasets.
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Note how images are cropped/scaled differently.
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
model_config = models_config.MODEL_CONFIGS[model_name]
model_config
# Load model definition & initialize random parameters.
# This also compiles the model to XLA (takes some minutes the first time).
if model_name.startswith('Mixer'):
model = models.MlpMixer(num_classes=num_classes, **model_config)
else:
model = models.VisionTransformer(num_classes=num_classes, **model_config)
variables = jax.jit(lambda: model.init(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
batch['image'][0, :1],
train=False,
), backend='cpu')()
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model_name}.npz',
init_params=variables['params'],
model_config=model_config,
)
###Output
INFO:absl:Inspect extra keys:
{'pre_logits/bias', 'pre_logits/kernel'}
INFO:absl:load_pretrained: drop-head variant
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['head']['bias']).__name__,
params['head']['bias'].shape)
print('params_repl.cls:', type(params_repl['head']['bias']).__name__,
params_repl['head']['bias'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(lambda params, inputs: model.apply(
dict(params=params), inputs, train=False))
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [01:07<00:00, 3.58s/it]
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
lr_fn = utils.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
update_fn_repl = train.make_update_fn(
apply_fn=model.apply, accum_steps=accum_steps, lr_fn=lr_fn)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
# Initialize PRNGs for dropout.
update_rng_repl = flax.jax_utils.replicate(jax.random.PRNGKey(0))
losses = []
lrs = []
# Completes in ~20 min on the TPU runtime.
for step, batch in zip(
tqdm.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
):
opt_repl, loss_repl, update_rng_repl = update_fn_repl(
opt_repl, flax.jax_utils.replicate(step), batch, update_rng_repl)
losses.append(loss_repl[0])
lrs.append(lr_fn(step))
plt.plot(losses)
plt.figure()
plt.plot(lrs)
# Should be ~96.7% for Mixer-B/16 or 97.7% for ViT-B/32 on CIFAR10 (both @224)
get_accuracy(opt_repl.target)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [00:32<00:00, 1.73s/it]
###Markdown
Inference
###Code
# Download a pre-trained model.
if model_name.startswith('Mixer'):
# Download model trained on imagenet2012
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://mixer_models/imagenet1k/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.MlpMixer(num_classes=1000, **model_config)
else:
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.VisionTransformer(num_classes=1000, **model_config)
import os
assert os.path.exists(f'{model_name}_imagenet2012.npz')
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model_name}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
resolution = 224 if model_name.startswith('Mixer') else 384
!wget https://picsum.photos/$resolution -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = model.apply(dict(params=params), (np.array(img) / 128 - 1)[None, ...], train=False)
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.13330 : sandbar, sand_bar
0.09332 : seashore, coast, seacoast, sea-coast
0.05257 : jeep, landrover
0.05188 : Arabian_camel, dromedary, Camelus_dromedarius
0.01251 : horned_viper, cerastes, sand_viper, horned_asp, Cerastes_cornutus
0.00753 : tiger_beetle
0.00744 : dung_beetle
0.00711 : sidewinder, horned_rattlesnake, Crotalus_cerastes
0.00703 : leatherback_turtle, leatherback, leathery_turtle, Dermochelys_coriacea
0.00647 : pole
###Markdown
See code at https://github.com/google-research/vision_transformer/See paper at https://arxiv.org/abs/2010.11929This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer. Copyright 2020 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print('Registered TPU:', config.FLAGS.jax_backend_target)
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
files.view('vision_transformer/vit_jax/hyper.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512 # Reduce to 256 if running on a single GPU.
# Note the datasets are configured in input_pipeline.DATASET_PRESETS
# Have a look in the editor at the right.
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
# tf.data.Datset for training, infinite repeats.
ds_train = input_pipeline.get_data(
dataset=dataset, mode='train', repeats=None, batch_size=batch_size,
)
# tf.data.Datset for evaluation, single repeat.
ds_test = input_pipeline.get_data(
dataset=dataset, mode='test', repeats=1, batch_size=batch_size,
)
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
###Output
2020-10-22 15:57:49,688 [WARNING] vit_jax.logging: Inspect recovered empty keys:
{'pre_logits'}
2020-10-22 15:57:49,692 [INFO] vit_jax.logging: Inspect extra keys:
{'pre_logits/kernel', 'pre_logits/bias'}
2020-10-22 15:57:49,702 [INFO] vit_jax.logging: Resformer: drop-head variant
2020-10-22 15:57:49,711 [INFO] vit_jax.logging: Resformer: resized variant: (1, 197, 768) to (1, 577, 768)
2020-10-22 15:57:49,712 [INFO] vit_jax.logging: Resformer: grid-size from 14 to 24
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
2020-10-22 16:19:29,245 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR100
get_accuracy(opt_repl.target)
###Output
2020-10-22 16:49:53,565 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Inference
###Code
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.76433 : convertible
0.01839 : beach_wagon, station_wagon, wagon, estate_car, beach_waggon, station_waggon, waggon
0.01566 : car_mirror
0.01226 : cab, hack, taxi, taxicab
0.01132 : limousine, limo
0.01067 : golfcart, golf_cart
0.01041 : recreational_vehicle, RV, R.V.
0.01026 : Model_T
0.00805 : minibus
0.00767 : odometer, hodometer, mileometer, milometer
###Markdown
See code at https://github.com/google-research/vision_transformer/See paper at https://arxiv.org/abs/2010.11929This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer. Copyright 2020 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
# use_gdrive = 'no' #@param ["yes", "no"]
# if use_gdrive == 'yes':
# from google.colab import drive
# drive.mount('/gdrive')
# root = '/gdrive/My Drive/vision_transformer_colab'
# import os
# if not os.path.isdir(root):
# os.mkdir(root)
# os.chdir(root)
# print(f'\nChanged CWD to "{root}"')
# else:
# from IPython import display
# display.display(display.HTML(
# '<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# # Clone repository and pull latest changes.
# ![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
# !cd vision_transformer && git pull
###Output
Already up to date.
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
# import os
# if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# # Make sure the Colab Runtime is set to Accelerator: TPU.
# import requests
# if 'TPU_DRIVER_MODE' not in globals():
# url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
# resp = requests.post(url)
# TPU_DRIVER_MODE = 1
# # The following is required to use TPU Driver as JAX's backend.
# from jax.config import config
# config.FLAGS.jax_xla_backend = "tpu_driver"
# config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
# print('Registered TPU:', config.FLAGS.jax_backend_target)
# else:
# print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
# from google.colab import files
# files.view('vision_transformer/vit_jax/checkpoint.py')
# files.view('vision_transformer/vit_jax/input_pipeline.py')
# files.view('vision_transformer/vit_jax/models.py')
# files.view('vision_transformer/vit_jax/momentum_clip.py')
# files.view('vision_transformer/vit_jax/train.py')
# files.view('vision_transformer/vit_jax/hyper.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'imagenet2012'
batch_size = 512 # Reduce to 256 if running on a single GPU.
# Note the datasets are configured in input_pipeline.DATASET_PRESETS
# Have a look in the editor at the right.
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
# tf.data.Datset for training, infinite repeats.
ds_train = input_pipeline.get_data(
dataset=dataset, mode='train', repeats=None, batch_size=batch_size,
)
# tf.data.Datset for evaluation, single repeat.
ds_test = input_pipeline.get_data(
dataset=dataset, mode='test', repeats=1, batch_size=batch_size,
)
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
###Output
2020-10-22 15:57:49,688 [WARNING] vit_jax.logging: Inspect recovered empty keys:
{'pre_logits'}
2020-10-22 15:57:49,692 [INFO] vit_jax.logging: Inspect extra keys:
{'pre_logits/kernel', 'pre_logits/bias'}
2020-10-22 15:57:49,702 [INFO] vit_jax.logging: Resformer: drop-head variant
2020-10-22 15:57:49,711 [INFO] vit_jax.logging: Resformer: resized variant: (1, 197, 768) to (1, 577, 768)
2020-10-22 15:57:49,712 [INFO] vit_jax.logging: Resformer: grid-size from 14 to 24
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
2020-10-22 16:19:29,245 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR100
get_accuracy(opt_repl.target)
###Output
2020-10-22 16:49:53,565 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Inference
###Code
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.76433 : convertible
0.01839 : beach_wagon, station_wagon, wagon, estate_car, beach_waggon, station_waggon, waggon
0.01566 : car_mirror
0.01226 : cab, hack, taxi, taxicab
0.01132 : limousine, limo
0.01067 : golfcart, golf_cart
0.01041 : recreational_vehicle, RV, R.V.
0.01026 : Model_T
0.00805 : minibus
0.00767 : odometer, hodometer, mileometer, milometer
###Markdown
**NOTE** Currently this notebook runs with MlpMixer on GPUs and TPUs, but VisionTransformers only run on GPUs. This is due to a temporary regression in the TPUNode setup that is used for Colab and will be fixed soon. See code at https://github.com/google-research/vision_transformer/See papers at- Vision Transformer: https://arxiv.org/abs/2010.11929- MLP-Mixer: https://arxiv.org/abs/2105.01601- How to train your ViT: https://arxiv.org/abs/2106.10270- When Vision Transformers Outperform ResNets without Pretraining or Strong Data Augmentations: https://arxiv.org/abs/2106.01548This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer.If you just want to load a pre-trained checkpoint from a large repository anddirectly use it for inference, you probably want to go the other Colabhttps://colab.sandbox.google.com/github/google-research/vision_transformer/blob/linen/vit_jax_augreg.ipynb Copyright 2021 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/imagenet*
!gsutil ls -lh gs://vit_models/sam
!gsutil ls -lh gs://mixer_models/*
# Download a pre-trained model.
# Note: you can really choose any of the above, but this Colab has been tested
# with the models of below selection...
model_name = 'ViT-B_32' #@param ["ViT-B_32", "Mixer-B_16"]
if model_name.startswith('ViT'):
![ -e "$model_name".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model_name".npz .
if model_name.startswith('Mixer'):
![ -e "$model_name".npz ] || gsutil cp gs://mixer_models/imagenet21k/"$model_name".npz .
import os
assert os.path.exists(f'{model_name}.npz')
# Google Colab "TPU" runtimes are configured in "2VM mode", meaning that JAX
# cannot see the TPUs because they're not directly attached. Instead we need to
# setup JAX to communicate with a second machine that has the TPUs attached.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
print('Connected to TPU.')
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
from absl import logging
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
logging.set_verbosity(logging.INFO)
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/configs/common.py')
files.view('vision_transformer/vit_jax/configs/models.py')
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import input_pipeline
from vit_jax import utils
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
from vit_jax.configs import common as common_config
from vit_jax.configs import models as models_config
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512
config = common_config.with_dataset(common_config.get_config(), dataset)
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
config.batch = batch_size
config.pp.crop = 224
# For details about setting up datasets, see input_pipeline.py on the right.
ds_train = input_pipeline.get_data_from_tfds(config=config, mode='train')
ds_test = input_pipeline.get_data_from_tfds(config=config, mode='test')
del config # Only needed to instantiate datasets.
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Note how images are cropped/scaled differently.
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
model_config = models_config.MODEL_CONFIGS[model_name]
model_config
# Load model definition & initialize random parameters.
# This also compiles the model to XLA (takes some minutes the first time).
if model_name.startswith('Mixer'):
model = models.MlpMixer(num_classes=num_classes, **model_config)
else:
model = models.VisionTransformer(num_classes=num_classes, **model_config)
variables = jax.jit(lambda: model.init(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
batch['image'][0, :1],
train=False,
), backend='cpu')()
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model_name}.npz',
init_params=variables['params'],
model_config=model_config,
)
###Output
INFO:absl:Inspect extra keys:
{'pre_logits/bias', 'pre_logits/kernel'}
INFO:absl:load_pretrained: drop-head variant
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['head']['bias']).__name__,
params['head']['bias'].shape)
print('params_repl.cls:', type(params_repl['head']['bias']).__name__,
params_repl['head']['bias'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(lambda params, inputs: model.apply(
dict(params=params), inputs, train=False))
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [01:07<00:00, 3.58s/it]
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
lr_fn = utils.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
update_fn_repl = train.make_update_fn(
apply_fn=model.apply, accum_steps=accum_steps, lr_fn=lr_fn)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
# Initialize PRNGs for dropout.
update_rng_repl = flax.jax_utils.replicate(jax.random.PRNGKey(0))
losses = []
lrs = []
# Completes in ~20 min on the TPU runtime.
for step, batch in zip(
tqdm.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
):
opt_repl, loss_repl, update_rng_repl = update_fn_repl(
opt_repl, flax.jax_utils.replicate(step), batch, update_rng_repl)
losses.append(loss_repl[0])
lrs.append(lr_fn(step))
plt.plot(losses)
plt.figure()
plt.plot(lrs)
# Should be ~96.7% for Mixer-B/16 or 97.7% for ViT-B/32 on CIFAR10 (both @224)
get_accuracy(opt_repl.target)
###Output
INFO:absl:Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
100%|██████████| 19/19 [00:32<00:00, 1.73s/it]
###Markdown
Inference
###Code
# Download a pre-trained model.
if model_name.startswith('Mixer'):
# Download model trained on imagenet2012
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://mixer_models/imagenet1k/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.MlpMixer(num_classes=1000, **model_config)
else:
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model_name"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model_name".npz "$model_name"_imagenet2012.npz
model = models.VisionTransformer(num_classes=1000, **model_config)
import os
assert os.path.exists(f'{model_name}_imagenet2012.npz')
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model_name}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
resolution = 224 if model_name.startswith('Mixer') else 384
!wget https://picsum.photos/$resolution -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = model.apply(dict(params=params), (np.array(img) / 128 - 1)[None, ...], train=False)
preds = np.array(jax.nn.softmax(logits))
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.13330 : sandbar, sand_bar
0.09332 : seashore, coast, seacoast, sea-coast
0.05257 : jeep, landrover
0.05188 : Arabian_camel, dromedary, Camelus_dromedarius
0.01251 : horned_viper, cerastes, sand_viper, horned_asp, Cerastes_cornutus
0.00753 : tiger_beetle
0.00744 : dung_beetle
0.00711 : sidewinder, horned_rattlesnake, Crotalus_cerastes
0.00703 : leatherback_turtle, leatherback, leathery_turtle, Dermochelys_coriacea
0.00647 : pole
###Markdown
See code at https://github.com/google-research/vision_transformer/See paper at https://arxiv.org/abs/2010.11929This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer. Copyright 2020 Google LLC.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
SetupNeeds to be executed once in every VM.The cell below downloads the code from Github and install necessary dependencies.
###Code
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'no' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
###Output
_____no_output_____
###Markdown
Imports
###Code
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print('Registered TPU:', config.FLAGS.jax_backend_target)
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_hp.py')
files.view('vision_transformer/vit_jax/train.py')
files.view('vision_transformer/vit_jax/hyper.py')
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
%load_ext autoreload
%autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_hp
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
###Output
_____no_output_____
###Markdown
Load dataset
###Code
dataset = 'cifar10'
batch_size = 512 # Reduce to 256 if running on a single GPU.
# Note the datasets are configured in input_pipeline.DATASET_PRESETS
# Have a look in the editor at the right.
num_classes = input_pipeline.get_dataset_info(dataset, 'train')['num_classes']
# tf.data.Datset for training, infinite repeats.
ds_train = input_pipeline.get_data(
dataset=dataset, mode='train', repeats=None, batch_size=batch_size,
)
# tf.data.Datset for evaluation, single repeat.
ds_test = input_pipeline.get_data(
dataset=dataset, mode='test', repeats=1, batch_size=batch_size,
)
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
batch['image'].shape
# Show some imags with their labels.
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][0][:9], batch['label'][0][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
###Output
_____no_output_____
###Markdown
Load pre-trained
###Code
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
###Output
2020-10-22 15:57:49,688 [WARNING] vit_jax.logging: Inspect recovered empty keys:
{'pre_logits'}
2020-10-22 15:57:49,692 [INFO] vit_jax.logging: Inspect extra keys:
{'pre_logits/kernel', 'pre_logits/bias'}
2020-10-22 15:57:49,702 [INFO] vit_jax.logging: Resformer: drop-head variant
2020-10-22 15:57:49,711 [INFO] vit_jax.logging: Resformer: resized variant: (1, 197, 768) to (1, 577, 768)
2020-10-22 15:57:49,712 [INFO] vit_jax.logging: Resformer: grid-size from 14 to 24
###Markdown
Evaluate
###Code
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
###Output
2020-10-22 16:19:29,245 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Fine-tune
###Code
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_hp.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR10
get_accuracy(opt_repl.target)
###Output
2020-10-22 16:49:53,565 [INFO] absl: Load dataset info from /root/tensorflow_datasets/cifar10/3.0.2
###Markdown
Inference
###Code
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
###Output
0.76433 : convertible
0.01839 : beach_wagon, station_wagon, wagon, estate_car, beach_waggon, station_waggon, waggon
0.01566 : car_mirror
0.01226 : cab, hack, taxi, taxicab
0.01132 : limousine, limo
0.01067 : golfcart, golf_cart
0.01041 : recreational_vehicle, RV, R.V.
0.01026 : Model_T
0.00805 : minibus
0.00767 : odometer, hodometer, mileometer, milometer
|
student-projects/fall-2020/GGWP-Identify-Toxic-Behavior-in-Gaming-with-Public-Data/scraping/Twitch_Scraper.ipynb | ###Markdown
The channels I watched LOL are: Tubbo yamikazexz riotgames nightblue3 iwilldominate sneakylol thebausffs tfblade loltyler1 shiphtur tarzaned jankos ratirl drututt katevolved wingsofdeath trick2g karasmai ipav999 anniebot boxbox corejj lol_selfmade nisqyy ikeepittaco gamergirl tobiasfateThe channels I watched PUBG are: Tubbo danucd tgltn ibiza hambinooo break halifax pubg_andymh5 grizz alisa summit1g chocotaco feyd fuzzfaze49 ashek gagod chad taryn jowybear shrimzyhttps://www.twitch.tv/tubboThe tutorial: https://www.learndatasci.com/tutorials/how-stream-text-data-twitch-sockets-python/ Setup
###Code
from google.colab import drive
drive.mount('/content/drive')
my_project_folder = 'Data-X: GGWP Toxic Behavior Public Data'
%cd drive/My Drive/{my_project_folder}/scraping
server = 'irc.chat.twitch.tv'
port = 6667
nickname = 'aki_niki' # your username
token = '' # your token
channel = '#tobiasfate'
# instantiate a socket
import socket
sock = socket.socket()
sock.connect((server, port))
sock.send(f"PASS {token}\n".encode('utf-8'))
sock.send(f"NICK {nickname}\n".encode('utf-8'))
sock.send(f"JOIN {channel}\n".encode('utf-8'))
resp = sock.recv(2048).decode('utf-8')
resp
###Output
_____no_output_____
###Markdown
Write into files
###Code
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s — %(message)s',
datefmt='%Y-%m-%d_%H:%M:%S',
handlers=[logging.FileHandler('chat.log', encoding='utf-8')])
logging.info(resp)
# This process is keep collecting data. Interrupt it to go to the next step
from emoji import demojize
while True:
resp = sock.recv(2048).decode('utf-8')
if resp.startswith('PING'):
sock.send("PONG\n".encode('utf-8'))
elif len(resp) > 0:
logging.info(demojize(resp))
# run in terminal: tail -f chat.log
# sample message
msg = "2020-11-24_16:54:53 — :[email protected] PRIVMSG #tobiasfate :I think"
#'2018-12-10_11:26:40 — :[email protected] PRIVMSG #ninja :Chat, let Ninja play solos'
# date: split it off and parse it
from datetime import datetime
time_logged = msg.split()[0].strip()
time_logged = datetime.strptime(time_logged, '%Y-%m-%d_%H:%M:%S')
time_logged
username_message = msg.split('—')[1:]
username_message = '—'.join(username_message).strip()
username_message
import re
username, channel, message = re.search(':(.*)\!.*@.*\.tmi\.twitch\.tv PRIVMSG #(.*) :(.*)', username_message).groups()
# (.*) — will capture part of the string
print(f"Channel: {channel} \nUsername: {username} \nMessage: {message}")
import pandas as pd
def get_chat_dataframe(file):
data = []
with open(file, 'r', encoding='utf-8') as f:
lines = f.read().split('\n\n\n')
for line in lines:
try:
time_logged = line.split('—')[0].strip()
time_logged = datetime.strptime(time_logged, '%Y-%m-%d_%H:%M:%S')
username_message = line.split('—')[1:]
username_message = '—'.join(username_message).strip()
username, channel, message = re.search(
':(.*)\!.*@.*\.tmi\.twitch\.tv PRIVMSG #(.*) :(.*)', username_message
).groups()
d = {
'dt': time_logged,
'channel': channel,
'username': username,
'message': message
}
data.append(d)
except Exception:
pass
return pd.DataFrame().from_records(data)
df = get_chat_dataframe('chat.log')
df.set_index('dt', inplace=True)
print(df.shape)
df
sock.close()
###Output
_____no_output_____
###Markdown
Conver to CSV
###Code
# Get only the message column as only that is required for the model prediction
df2 = df['message'].to_frame()
df2 = df2.drop_duplicates() # drop duplicates
game_platform = "pubg_twitch"
df2.to_csv('../data/scraped/' + game_platform + '.csv', encoding='utf-8')
###Output
_____no_output_____ |
notebooks/pre-processing/GunViolenceArchive-PreProcessing-Script.ipynb | ###Markdown
Mass Shooting From Gun Violence Archive Data downloaded from : [Gun Violence Archive](https://www.gunviolencearchive.org/reports)Raw Data Files Present in : raw_data/GVA
###Code
! ls -l raw_data/* | grep Mass
# Read MassShootingAllYears.csv first
# Import Library
import pandas as pd
import numpy as np
MassShootingAllYears_df = pd.read_csv("raw_data/GVA/MassShootingAllYears.csv")
MassShootingAllYears_df.shape
MassShootingAllYears2014_df = pd.read_csv("raw_data/GVA/MassShootingAllYears2014.csv")
MassShootingAllYears2014_df.shape
# Reading All Other Csv files
MassShootingAllYears2015_df = pd.read_csv("raw_data/GVA/MassShootingAllYears2015.csv")
MassShootingAllYears2015_df.shape
# Reading All Other Csv files
MassShootingAllYears2016_df = pd.read_csv("raw_data/GVA/MassShootingAllYears2016.csv")
MassShootingAllYears2016_df.shape
# Reading All Other Csv files
MassShootingAllYears2017_df = pd.read_csv("raw_data/GVA/MassShootingAllYears2017.csv")
MassShootingAllYears2017_df.shape
# Contatenate All Dataframes into One Dataframe
dataframe_toConcate = [MassShootingAllYears_df,
MassShootingAllYears2014_df,
MassShootingAllYears2015_df,
MassShootingAllYears2016_df,
MassShootingAllYears2017_df]
MassShootingAllYears_Processed = pd.concat(dataframe_toConcate)
# Write MassShootingAllYears_df into CSV File into PreProcessed Data
MassShootingAllYears_Processed.to_csv("processed_data/GVA/MassShootingTotalYears.csv", index=False)
MassShootingAllYears_Processed.shape
###Output
_____no_output_____
###Markdown
Accidental Deaths from Gun Violence Archive Data downloaded from : [Gun Violence Archive](https://www.gunviolencearchive.org/reports)Raw Data Files Present in : raw_data/ Folder
###Code
! ls -l raw_data/* | grep Deaths
###Output
-rw-r--r--@ 1 vijaykalmath staff 152081 Dec 11 16:40 Accidental_Deaths_All.csv
-rw-r--r--@ 1 vijaykalmath staff 56376 Dec 11 16:43 Accidental_Deaths_Children.csv
-rw-r--r--@ 1 vijaykalmath staff 76427 Dec 11 16:50 Accidental_Deaths_Teen.csv
###Markdown
Notes : 1. Accidental_Deaths_All contains all Deaths 2. Accidental_Deaths_Children.csv has only deaths involving Children3. Accidental_Deaths_Teen.csv has only deaths involving Teens 4. We can safely assume that anything that is not classified as Children or Teen can be classified as Adult
###Code
Accidental_DeathsAll_df = pd.read_csv("raw_data/GVA/Accidental_Deaths_All.csv")
Accidental_DeathsAll_df.shape
Accidental_DeathsAllChildren_df = pd.read_csv("raw_data/GVA/Accidental_Deaths_Children.csv")
Accidental_DeathsAllChildren_df.shape
Accidental_DeathsAllTeen_df = pd.read_csv("raw_data/GVA/Accidental_Deaths_Teen.csv")
Accidental_DeathsAllTeen_df.shape
# Setting Age to Adult
Accidental_DeathsAll_df['Age'] = 'Adult'
# Setting Age to Children
Accidental_DeathsAllChildren_df['Age'] = "Child"
# Setting Age to Teen
Accidental_DeathsAllTeen_df['Age'] = "Teen"
Accidental_DeathsAllChildren_df
dataframe_toConcate = [Accidental_DeathsAll_df,Accidental_DeathsAllChildren_df,Accidental_DeathsAllTeen_df]
Accidental_DeathsAll_Processed = pd.concat(dataframe_toConcate)
Accidental_DeathsAll_Processed.drop_duplicates(subset=['Incident ID'], keep='last',inplace=True)
Accidental_DeathsAll_Processed
Accidental_DeathsAll_Processed.to_csv("processed_data/GVA/Accidental_DeathsAll.csv", index=False)
###Output
_____no_output_____
###Markdown
Accidental Injuries from Gun Violence Archive Data downloaded from : [Gun Violence Archive](https://www.gunviolencearchive.org/reports)Raw Data Files Present in : raw_data/ Folder
###Code
! ls -l raw_data/* | grep Injuries
###Output
-rw-r--r--@ 1 vijaykalmath staff 151836 Dec 11 16:50 Accidental_Injuries_All.csv
-rw-r--r--@ 1 vijaykalmath staff 122115 Dec 11 16:51 Accidental_Injuries_Children.csv
-rw-r--r--@ 1 vijaykalmath staff 140351 Dec 11 16:51 Accidental_Injuries_Teen.csv
###Markdown
Notes : 1. Accidental_Injuries_All contains all Injuries 2. Accidental_Injuries_Children.csv has only Injuries involving Children3. Accidental_Injuries_Teen.csv has only Injuries involving Teens 4. We can safely assume that anything that is not classified as Children or Teen can be classified as Adult
###Code
Accidental_InjuriesAll_df = pd.read_csv("raw_data/GVA/Accidental_Injuries_All.csv")
Accidental_InjuriesAll_df.shape
Accidental_InjuriesAllChildren_df = pd.read_csv("raw_data/GVA/Accidental_Injuries_Children.csv")
Accidental_InjuriesAllChildren_df.shape
Accidental_InjuriesAllTeen_df = pd.read_csv("raw_data/GVA/Accidental_Injuries_Teen.csv")
Accidental_InjuriesAllTeen_df.shape
# Setting Age to Adult
Accidental_InjuriesAll_df['Age'] = 'Adult'
# Setting Age to Children
Accidental_InjuriesAllChildren_df['Age'] = "Child"
# Setting Age to Teen
Accidental_InjuriesAllTeen_df['Age'] = "Teen"
Accidental_InjuriesAllChildren_df
dataframe_toConcate = [Accidental_InjuriesAll_df,Accidental_InjuriesAllChildren_df,Accidental_InjuriesAllTeen_df]
Accidental_InjuriesAll_Processed = pd.concat(dataframe_toConcate)
Accidental_InjuriesAll_Processed.drop_duplicates(subset=['Incident ID'], keep='last',inplace=True)
Accidental_InjuriesAll_Processed
Accidental_InjuriesAll_Processed.to_csv("processed_data/GVA/Accidental_InjuriesAll.csv", index=False)
###Output
_____no_output_____
###Markdown
Final Data Check
###Code
MassShootingAllYears_Processed = pd.read_csv("processed_data/GVA/MassShootingTotalYears.csv")
Accidental_DeathsAll_Processed = pd.read_csv("processed_data/GVA/Accidental_DeathsAll.csv")
Accidental_InjuriesAll_Processed = pd.read_csv("processed_data/GVA/Accidental_InjuriesAll.csv")
MassShootingAllYears_Processed.shape
Accidental_DeathsAll_Processed.shape
Accidental_InjuriesAll_Processed.shape
MassShootingAllYears_Processed
Accidental_DeathsAll_Processed
Accidental_InjuriesAll_Processed
###Output
_____no_output_____ |
examples/demo_lime.ipynb | ###Markdown
This notebook demonstrates how LIME - Local Interpretable Model-Agnostic Explanations can be used with models learnt with the AIF 360 toolkit to generate explanations for model predictions.For more information on LIME, see [https://github.com/marcotcr/lime](https://github.com/marcotcr/lime).
###Code
from __future__ import print_function
%matplotlib inline
import sklearn.model_selection
import sklearn.metrics
import sklearn.datasets
import sklearn.ensemble
import sklearn.preprocessing
import numpy as np
import lime
import lime.lime_tabular
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
import sys
sys.path.append("../")
import numpy as np
from aif360.datasets import BinaryLabelDataset
from aif360.metrics.binary_label_dataset_metric import BinaryLabelDatasetMetric
from aif360.metrics.classification_metric import ClassificationMetric
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions import load_preproc_data_adult
from aif360.algorithms.preprocessing.reweighing import Reweighing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
from aif360.datasets.lime_encoder import LimeEncoder
from aif360.datasets.adult_dataset import AdultDataset
###Output
_____no_output_____
###Markdown
**Load dataset and display statistics**
###Code
np.random.seed(1)
dataset_orig = AdultDataset()
dataset_orig_train, dataset_orig_test = dataset_orig.split([0.7], shuffle=True)
# Metric for the original dataset
sens_attr = dataset_orig_train.protected_attribute_names[0]
sens_idx = dataset_orig_train.protected_attribute_names.index(sens_attr)
privileged_groups = [{sens_attr:dataset_orig_train.privileged_protected_attributes[sens_idx][0]}]
unprivileged_groups = [{sens_attr:dataset_orig_train.unprivileged_protected_attributes[sens_idx][0]}]
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print("Difference in mean outcomes between privileged and unprivileged groups = %f" % metric_orig_train.mean_difference())
###Output
_____no_output_____
###Markdown
**Transform the data using the Re-Weighing (pre-processing) algorithm**
###Code
RW = Reweighing(unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
RW.fit(dataset_orig_train)
dataset_transf_train = RW.transform(dataset_orig_train)
###Output
_____no_output_____
###Markdown
**Learn and test models from the transformed data using Logistic Regression**
###Code
#Train model on given dataset
dataset = dataset_transf_train # data to train on
scale = StandardScaler().fit(dataset.features) # remember the scale
model = LogisticRegression() # model to learn
X_train = scale.transform(dataset.features) #apply the scale
y_train = dataset.labels.ravel()
model.fit(X_train, y_train, sample_weight=dataset.instance_weights)
#save model
lr_orig = model
lr_scale_orig = scale
#Test model on given dataset and find threshold for best balanced accuracy
import numpy as np
from tqdm import tqdm
thresh_arr = np.linspace(0.01, 0.5, 50)
scale = lr_scale_orig
model = lr_orig #model to test
dataset = dataset_orig_test #data to test on
X_test = scale.transform(dataset.features) #apply the same scale as applied to the training data
y_test = dataset.labels.ravel()
y_test_pred_prob = model.predict_proba(X_test)
bal_acc_arr = []
disp_imp_arr = []
avg_odds_diff_arr = []
for thresh in tqdm(thresh_arr):
y_test_pred = (y_test_pred_prob[:,1] > thresh).astype(np.double)
dataset_pred = dataset.copy()
dataset_pred.labels = y_test_pred
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
TPR = classified_metric.true_positive_rate()
TNR = classified_metric.true_negative_rate()
bal_acc = 0.5*(TPR+TNR)
acc = accuracy_score(y_true=dataset.labels,
y_pred=dataset_pred.labels)
bal_acc_arr.append(bal_acc)
avg_odds_diff_arr.append(classified_metric.average_odds_difference())
disp_imp_arr.append(metric_pred.disparate_impact())
thresh_arr_best_ind = np.where(bal_acc_arr == np.max(bal_acc_arr))[0][0]
thresh_arr_best = np.array(thresh_arr)[thresh_arr_best_ind]
best_bal_acc = bal_acc_arr[thresh_arr_best_ind]
disp_imp_at_best_bal_acc = np.abs(1.0-np.array(disp_imp_arr))[thresh_arr_best_ind]
avg_odds_diff_at_best_bal_acc = avg_odds_diff_arr[thresh_arr_best_ind]
#Plot balanced accuracy, abs(1-disparate impact)
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, np.abs(1.0-np.array(disp_imp_arr)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
#Plot average odds difference
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, avg_odds_diff_arr, color='r')
ax2.set_ylabel('avg. odds diff.', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind], color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
rf_thresh_arr_orig_best = thresh_arr_best
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
rf_best_bal_acc_arr_orig = best_bal_acc
print("Best balance accuracy: %6.4f" % rf_best_bal_acc_arr_orig)
rf_disp_imp_at_best_bal_acc_orig = disp_imp_at_best_bal_acc
print("Corresponding abs(1-disparate impact) value: %6.4f" % rf_disp_imp_at_best_bal_acc_orig)
rf_avg_odds_diff_at_best_bal_acc_orig = avg_odds_diff_at_best_bal_acc
print("Corresponding average odds difference value: %6.4f" % rf_avg_odds_diff_at_best_bal_acc_orig)
###Output
Threshold corresponding to Best balance accuracy: 0.1900
Best balance accuracy: 0.8245
Corresponding abs(1-disparate impact) value: 0.2483
Corresponding average odds difference value: -0.0234
###Markdown
** Use LIME to generate explanations for predictions made using the learnt Logistic Regression model**
###Code
limeData = LimeEncoder().fit(dataset_orig_train)
s_train = limeData.transform(dataset_orig_train.features)
s_test = limeData.transform(dataset_orig_test.features)
scale = lr_scale_orig
model = lr_orig #model to test
explainer = lime.lime_tabular.LimeTabularExplainer(s_train ,class_names=limeData.s_class_names,
feature_names = limeData.s_feature_names,
categorical_features=limeData.s_categorical_features,
categorical_names=limeData.s_categorical_names,
kernel_width=3, verbose=False,discretize_continuous=True)
s_predict_fn = lambda x: model.predict_proba(scale.transform(limeData.inverse_transform(x)))
import random
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
i1 = 1
exp = explainer.explain_instance(s_test[i1], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i1]))
i2 = 100
exp = explainer.explain_instance(s_test[i2], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i2]))
###Output
Threshold corresponding to Best balance accuracy: 0.1900
Actual label: [1.]
Actual label: [0.]
###Markdown
**Learn and test models from the transformed data using Random Forests**
###Code
#Train model on given dataset
dataset = dataset_transf_train # data to train on
scale = StandardScaler().fit(dataset.features) # remember the scale
model = sklearn.ensemble.RandomForestClassifier(n_estimators=500) # model to learn
X_train = scale.transform(dataset.features) #apply the scale
y_train = dataset.labels.ravel()
model.fit(X_train, y_train, sample_weight=dataset.instance_weights)
#save model
rf_orig = model
rf_scale_orig = scale
#Test model on given dataset and find threshold for best balanced accuracy
import numpy as np
from tqdm import tqdm
thresh_arr = np.linspace(0.01, 0.5, 50)
scale = rf_scale_orig
model = rf_orig #model to test
dataset = dataset_orig_test #data to test on
X_test = scale.transform(dataset.features) #apply the same scale as applied to the training data
y_test = dataset.labels.ravel()
y_test_pred_prob = model.predict_proba(X_test)
bal_acc_arr = []
disp_imp_arr = []
avg_odds_diff_arr = []
for thresh in tqdm(thresh_arr):
y_test_pred = (y_test_pred_prob[:,1] > thresh).astype(np.double)
dataset_pred = dataset.copy()
dataset_pred.labels = y_test_pred
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
TPR = classified_metric.true_positive_rate()
TNR = classified_metric.true_negative_rate()
bal_acc = 0.5*(TPR+TNR)
acc = accuracy_score(y_true=dataset.labels,
y_pred=dataset_pred.labels)
bal_acc_arr.append(bal_acc)
avg_odds_diff_arr.append(classified_metric.average_odds_difference())
disp_imp_arr.append(metric_pred.disparate_impact())
thresh_arr_best_ind = np.where(bal_acc_arr == np.max(bal_acc_arr))[0][0]
thresh_arr_best = np.array(thresh_arr)[thresh_arr_best_ind]
best_bal_acc = bal_acc_arr[thresh_arr_best_ind]
disp_imp_at_best_bal_acc = np.abs(1.0-np.array(disp_imp_arr))[thresh_arr_best_ind]
avg_odds_diff_at_best_bal_acc = avg_odds_diff_arr[thresh_arr_best_ind]
#Plot balanced accuracy, abs(1-disparate impact)
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, np.abs(1.0-np.array(disp_imp_arr)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
#Plot average odds difference
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, avg_odds_diff_arr, color='r')
ax2.set_ylabel('avg. odds diff.', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind], color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
rf_thresh_arr_orig_best = thresh_arr_best
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
rf_best_bal_acc_arr_orig = best_bal_acc
print("Best balance accuracy: %6.4f" % rf_best_bal_acc_arr_orig)
rf_disp_imp_at_best_bal_acc_orig = disp_imp_at_best_bal_acc
print("Corresponding abs(1-disparate impact) value: %6.4f" % rf_disp_imp_at_best_bal_acc_orig)
rf_avg_odds_diff_at_best_bal_acc_orig = avg_odds_diff_at_best_bal_acc
print("Corresponding average odds difference value: %6.4f" % rf_avg_odds_diff_at_best_bal_acc_orig)
###Output
Threshold corresponding to Best balance accuracy: 0.2600
Best balance accuracy: 0.8083
Corresponding abs(1-disparate impact) value: 0.4090
Corresponding average odds difference value: -0.0698
###Markdown
** Use LIME to generate explanations for predictions made using the learnt Logistic Regression model**
###Code
limeData = LimeEncoder().fit(dataset_orig_train)
s_train = limeData.transform(dataset_orig_train.features)
s_test = limeData.transform(dataset_orig_test.features)
scale = rf_scale_orig
model = rf_orig #model to test
explainer = lime.lime_tabular.LimeTabularExplainer(s_train ,class_names=limeData.s_class_names,
feature_names = limeData.s_feature_names,
categorical_features=limeData.s_categorical_features,
categorical_names=limeData.s_categorical_names,
kernel_width=3, verbose=False,discretize_continuous=True)
s_predict_fn = lambda x: model.predict_proba(scale.transform(limeData.inverse_transform(x)))
import random
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
exp = explainer.explain_instance(s_test[i1], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i1]))
exp = explainer.explain_instance(s_test[i2], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i2]))
###Output
Threshold corresponding to Best balance accuracy: 0.2600
Actual label: [1.]
Actual label: [0.]
###Markdown
This notebook demonstrates how LIME - Local Interpretable Model-Agnostic Explanations can be used with models learnt with the AIF 360 toolkit to generate explanations for model predictions.For more information on LIME, see [https://github.com/marcotcr/lime](https://github.com/marcotcr/lime).
###Code
from __future__ import print_function
%matplotlib inline
import sklearn.model_selection
import sklearn.metrics
import sklearn.datasets
import sklearn.ensemble
import sklearn.preprocessing
import numpy as np
import lime
import lime.lime_tabular
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
import sys
sys.path.append("../")
import numpy as np
from aif360.datasets import BinaryLabelDataset
from aif360.metrics.binary_label_dataset_metric import BinaryLabelDatasetMetric
from aif360.metrics.classification_metric import ClassificationMetric
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions import load_preproc_data_adult
from aif360.algorithms.preprocessing.reweighing import Reweighing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
from aif360.datasets.lime_encoder import LimeEncoder
from aif360.datasets.adult_dataset import AdultDataset
###Output
_____no_output_____
###Markdown
**Load dataset and display statistics**
###Code
np.random.seed(1)
dataset_orig = AdultDataset()
dataset_orig_train, dataset_orig_test = dataset_orig.split([0.7], shuffle=True)
# Metric for the original dataset
sens_attr = dataset_orig_train.protected_attribute_names[0]
sens_idx = dataset_orig_train.protected_attribute_names.index(sens_attr)
privileged_groups = [{sens_attr:dataset_orig_train.privileged_protected_attributes[sens_idx][0]}]
unprivileged_groups = [{sens_attr:dataset_orig_train.unprivileged_protected_attributes[sens_idx][0]}]
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print("Difference in mean outcomes between privileged and unprivileged groups = %f" % metric_orig_train.mean_difference())
###Output
_____no_output_____
###Markdown
**Transform the data using the Re-Weighing (pre-processing) algorithm**
###Code
RW = Reweighing(unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
RW.fit(dataset_orig_train)
dataset_transf_train = RW.transform(dataset_orig_train)
###Output
_____no_output_____
###Markdown
**Learn and test models from the transformed data using Logistic Regression**
###Code
#Train model on given dataset
dataset = dataset_transf_train # data to train on
scale = StandardScaler().fit(dataset.features) # remember the scale
model = LogisticRegression() # model to learn
X_train = scale.transform(dataset.features) #apply the scale
y_train = dataset.labels.ravel()
model.fit(X_train, y_train, sample_weight=dataset.instance_weights)
#save model
lr_orig = model
lr_scale_orig = scale
#Test model on given dataset and find threshold for best balanced accuracy
import numpy as np
from tqdm import tqdm
thresh_arr = np.linspace(0.01, 0.5, 50)
scale = lr_scale_orig
model = lr_orig #model to test
dataset = dataset_orig_test #data to test on
X_test = scale.transform(dataset.features) #apply the same scale as applied to the training data
y_test = dataset.labels.ravel()
y_test_pred_prob = model.predict_proba(X_test)
bal_acc_arr = []
disp_imp_arr = []
avg_odds_diff_arr = []
for thresh in tqdm(thresh_arr):
y_test_pred = (y_test_pred_prob[:,1] > thresh).astype(np.double)
dataset_pred = dataset.copy()
dataset_pred.labels = y_test_pred
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
TPR = classified_metric.true_positive_rate()
TNR = classified_metric.true_negative_rate()
bal_acc = 0.5*(TPR+TNR)
acc = accuracy_score(y_true=dataset.labels,
y_pred=dataset_pred.labels)
bal_acc_arr.append(bal_acc)
avg_odds_diff_arr.append(classified_metric.average_odds_difference())
disp_imp_arr.append(metric_pred.disparate_impact())
thresh_arr_best_ind = np.where(bal_acc_arr == np.max(bal_acc_arr))[0][0]
thresh_arr_best = np.array(thresh_arr)[thresh_arr_best_ind]
best_bal_acc = bal_acc_arr[thresh_arr_best_ind]
disp_imp_at_best_bal_acc = np.abs(1.0-np.array(disp_imp_arr))[thresh_arr_best_ind]
avg_odds_diff_at_best_bal_acc = avg_odds_diff_arr[thresh_arr_best_ind]
#Plot balanced accuracy, abs(1-disparate impact)
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, np.abs(1.0-np.array(disp_imp_arr)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
#Plot average odds difference
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, avg_odds_diff_arr, color='r')
ax2.set_ylabel('avg. odds diff.', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind], color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
rf_thresh_arr_orig_best = thresh_arr_best
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
rf_best_bal_acc_arr_orig = best_bal_acc
print("Best balance accuracy: %6.4f" % rf_best_bal_acc_arr_orig)
rf_disp_imp_at_best_bal_acc_orig = disp_imp_at_best_bal_acc
print("Corresponding abs(1-disparate impact) value: %6.4f" % rf_disp_imp_at_best_bal_acc_orig)
rf_avg_odds_diff_at_best_bal_acc_orig = avg_odds_diff_at_best_bal_acc
print("Corresponding average odds difference value: %6.4f" % rf_avg_odds_diff_at_best_bal_acc_orig)
###Output
Threshold corresponding to Best balance accuracy: 0.1900
Best balance accuracy: 0.8245
Corresponding abs(1-disparate impact) value: 0.2483
Corresponding average odds difference value: -0.0234
###Markdown
** Use LIME to generate explanations for predictions made using the learnt Logistic Regression model**
###Code
limeData = LimeEncoder().fit(dataset_orig_train)
s_train = limeData.transform(dataset_orig_train.features)
s_test = limeData.transform(dataset_orig_test.features)
scale = lr_scale_orig
model = lr_orig #model to test
explainer = lime.lime_tabular.LimeTabularExplainer(s_train ,class_names=limeData.s_class_names,
feature_names = limeData.s_feature_names,
categorical_features=limeData.s_categorical_features,
categorical_names=limeData.s_categorical_names,
kernel_width=3, verbose=False,discretize_continuous=True)
s_predict_fn = lambda x: model.predict_proba(scale.transform(limeData.inverse_transform(x)))
import random
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
i1 = 1
exp = explainer.explain_instance(s_test[i1], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i1]))
i2 = 100
exp = explainer.explain_instance(s_test[i2], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i2]))
###Output
Threshold corresponding to Best balance accuracy: 0.1900
Actual label: [1.]
Actual label: [0.]
###Markdown
**Learn and test models from the transformed data using Random Forests**
###Code
#Train model on given dataset
dataset = dataset_transf_train # data to train on
scale = StandardScaler().fit(dataset.features) # remember the scale
model = sklearn.ensemble.RandomForestClassifier(n_estimators=500) # model to learn
X_train = scale.transform(dataset.features) #apply the scale
y_train = dataset.labels.ravel()
model.fit(X_train, y_train, sample_weight=dataset.instance_weights)
#save model
rf_orig = model
rf_scale_orig = scale
#Test model on given dataset and find threshold for best balanced accuracy
import numpy as np
from tqdm import tqdm
thresh_arr = np.linspace(0.01, 0.5, 50)
scale = rf_scale_orig
model = rf_orig #model to test
dataset = dataset_orig_test #data to test on
X_test = scale.transform(dataset.features) #apply the same scale as applied to the training data
y_test = dataset.labels.ravel()
y_test_pred_prob = model.predict_proba(X_test)
bal_acc_arr = []
disp_imp_arr = []
avg_odds_diff_arr = []
for thresh in tqdm(thresh_arr):
y_test_pred = (y_test_pred_prob[:,1] > thresh).astype(np.double)
dataset_pred = dataset.copy()
dataset_pred.labels = y_test_pred
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
TPR = classified_metric.true_positive_rate()
TNR = classified_metric.true_negative_rate()
bal_acc = 0.5*(TPR+TNR)
acc = accuracy_score(y_true=dataset.labels,
y_pred=dataset_pred.labels)
bal_acc_arr.append(bal_acc)
avg_odds_diff_arr.append(classified_metric.average_odds_difference())
disp_imp_arr.append(metric_pred.disparate_impact())
thresh_arr_best_ind = np.where(bal_acc_arr == np.max(bal_acc_arr))[0][0]
thresh_arr_best = np.array(thresh_arr)[thresh_arr_best_ind]
best_bal_acc = bal_acc_arr[thresh_arr_best_ind]
disp_imp_at_best_bal_acc = np.abs(1.0-np.array(disp_imp_arr))[thresh_arr_best_ind]
avg_odds_diff_at_best_bal_acc = avg_odds_diff_arr[thresh_arr_best_ind]
#Plot balanced accuracy, abs(1-disparate impact)
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, np.abs(1.0-np.array(disp_imp_arr)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
#Plot average odds difference
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.plot(thresh_arr, bal_acc_arr)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(thresh_arr, avg_odds_diff_arr, color='r')
ax2.set_ylabel('avg. odds diff.', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(thresh_arr)[thresh_arr_best_ind], color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
rf_thresh_arr_orig_best = thresh_arr_best
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
rf_best_bal_acc_arr_orig = best_bal_acc
print("Best balance accuracy: %6.4f" % rf_best_bal_acc_arr_orig)
rf_disp_imp_at_best_bal_acc_orig = disp_imp_at_best_bal_acc
print("Corresponding abs(1-disparate impact) value: %6.4f" % rf_disp_imp_at_best_bal_acc_orig)
rf_avg_odds_diff_at_best_bal_acc_orig = avg_odds_diff_at_best_bal_acc
print("Corresponding average odds difference value: %6.4f" % rf_avg_odds_diff_at_best_bal_acc_orig)
###Output
Threshold corresponding to Best balance accuracy: 0.2600
Best balance accuracy: 0.8083
Corresponding abs(1-disparate impact) value: 0.4090
Corresponding average odds difference value: -0.0698
###Markdown
** Use LIME to generate explanations for predictions made using the learnt Logistic Regression model**
###Code
limeData = LimeEncoder().fit(dataset_orig_train)
s_train = limeData.transform(dataset_orig_train.features)
s_test = limeData.transform(dataset_orig_test.features)
scale = rf_scale_orig
model = rf_orig #model to test
explainer = lime.lime_tabular.LimeTabularExplainer(s_train ,class_names=limeData.s_class_names,
feature_names = limeData.s_feature_names,
categorical_features=limeData.s_categorical_features,
categorical_names=limeData.s_categorical_names,
kernel_width=3, verbose=False,discretize_continuous=True)
s_predict_fn = lambda x: model.predict_proba(scale.transform(limeData.inverse_transform(x)))
import random
print("Threshold corresponding to Best balance accuracy: %6.4f" % rf_thresh_arr_orig_best)
exp = explainer.explain_instance(s_test[i1], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i1]))
exp = explainer.explain_instance(s_test[i2], s_predict_fn, num_features=5)
exp.as_pyplot_figure()
print(" Actual label: " + str(dataset_orig_test.labels[i2]))
###Output
Threshold corresponding to Best balance accuracy: 0.2600
Actual label: [1.]
Actual label: [0.]
|
nlm_workshop_ovarian_cancer.ipynb | ###Markdown
Start up an AWS ubuntu instance and install docker. For this analysis, the following specs should be more than enough to run the bioinformatics pipeline.
###Code
docker build .
docker push
###Output
_____no_output_____
###Markdown
Pull the docker image.
###Code
sudo docker pull jonessarae/nlm_workshop:seq_tools
###Output
_____no_output_____
###Markdown
Download SRA files with sra_download.sh (check permissions) Already hardcoded with the SRA accession number.
###Code
./sra_download.sh
###Output
_____no_output_____
###Markdown
Run the docker container and add home directory to container. The rest of this notebook will be done within the container.---
###Code
sudo docker run -it -v ~:/mnt jonessarae/nlm_workshop:seq_tools
###Output
_____no_output_____
###Markdown
Convert SRA files into FASTQ files and split the files into read 1 and read 2. The command fastq-dump for SRA-toolkit does not use multi-threading. Better to find another package that can multi-thread. Can avoid splitting? In BWA can use -p option. How about others?
###Code
fastq-dump --split-files --origfmt --gzip SRR2989954.sra
gunzip SRR2989954_1.fastq.gz SRR2989954_1.fastq.gz
###Output
_____no_output_____
###Markdown
Download reference genome hg19.2bit and tool twoBitToFa, convert 2bit file to fasta file, and index it with bwa. Use -t to add more cores for running bwa index faster.
###Code
wget http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.2bit
rsync -aP rsync://hgdownload.soe.ucsc.edu/genome/admin/exe/linux.x86_64/twoBitToFa
chmod 744 twoBitToFa
./twoBitToFa hg19.2bit hg19.fa
bwa index -a bwtsw hg19.fa
###Output
_____no_output_____
###Markdown
Download reference genome hg19 annotation file (GTF). There are two files to choose from. Not sure which to use. https://www.gencodegenes.org/releases/19.html
###Code
wget ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz
###Output
_____no_output_____
###Markdown
Exome: Align paired reads to the reference genome and convert it to a bam file. With an AWS instance with 96 cores, this step took ~25 minutes (on file 54) with 96 threads.
###Code
bwa mem -t 8 ref/hg19.fa SRR2989954_1.fastq SRR2989954_2.fastq | samtools view -Sb - > SRR2989954.bam
###Output
_____no_output_____
###Markdown
Sort the bam files by genomic order.
###Code
samtools sort -@ 4 SRR2989954.bam -o SRR2989954_sorted.bam
samtools sort -@ 8 SRR2989963.bam -o SRR2989963_sorted.bam
###Output
_____no_output_____
###Markdown
RNA: Make genome indices that STAR requires. Always ensure read/write priveleges on files/folders. Check what read length should be: ReadLength-1. Does it matter if not 99? Change number of threads to 8. Mention spec requirements.
###Code
mkdir star_indices
STAR --runThreadN 4 --runMode genomeGenerate --genomeDir star_indices --genomeFastaFiles ref/hg19.fa --sjdbGTFfile gtf//gencode.v19.annotation.gtf --sjdbOverhang 100
###Output
_____no_output_____
###Markdown
RNA: Align paired reads to the reference genome. Don't forget to make a mapped_rna directory.
###Code
mkdir mapped_rna
STAR --readFilesIn SRR2989969_1.fastq SRR2989969_2.fastq --runThreadN 8 --genomeDir star_indices --outFileNamePrefix mapped_rna/SRR29899692pass --genomeLoad NoSharedMemory --sjdbGTFfile gtf/gencode.v19.annotation.gtf --outSAMtype BAM SortedByCoordinate --twopassMode Basic
###Output
_____no_output_____
###Markdown
Exome: GATK4 from bioconda.
###Code
java -Xmx4g -jar GenomeAnalysisTK.jar -I SRR2989954_sorted.bam -R ref/hg19.fa -T RealignerTargetCreator -o SRR2989954.intervals –known Mills_and_1000G_gold_standard.indels.hg19.vcf --read_filter MappingQualityZero
java -Xmx4g -jar GenomeAnalysisTK.jar -I SRR2989954_sorted.bam -R ref/hg19.fa -T IndelRealigner -targetIntervals sample.intervals -o SRR2989954_realign.bam -known Mills_and_1000G_gold_standard.indels.hg19.vcf --read_filter MappingQualityZero
java -Xmx4g -jar GenomeAnalysisTK.jar -I SRR2989963_sorted.bam -R ref/hg19.fa -T RealignerTargetCreator -o SRR2989954.intervals –known Mills_and_1000G_gold_standard.indels.hg19.vcf --read_filter MappingQualityZero
java -Xmx4g -jar GenomeAnalysisTK.jar -I SRR2989963_sorted.bam -R ref/hg19.fa -T IndelRealigner -targetIntervals SRR2989963.intervals -o SRR2989963_realign.bam -known Mills_and_1000G_gold_standard.indels.hg19.vcf --read_filter MappingQualityZero
###Output
_____no_output_____
###Markdown
Filter out reads that have mapping quality of < 20.
###Code
samtools view -b -q 20 SRR2989954_realign.bam > SRR2989954_m20.bam
samtools view -b -q 20 SRR2989963_realign.bam > SRR2989963_m20.bam
samtools view -b -q 20 mapped_rna/SRR29899692passAligned.sortedByCoord.out.bam > SRR2989969_m20.bam
###Output
_____no_output_____
###Markdown
Deduplicate files. We installed picard separately, but picard is also part of GATK4.
###Code
java -jar picard.jar MarkDuplicates I=SRR2989954_m20.bam O=SRR2989954_m20dedup.bam REMOVE_DUPLICATES=true METRICS_FILE=metrics.txt
java -jar picard.jar MarkDuplicates I=SRR2989963_m20.bam O=SRR2989963_m20dedup.bam REMOVE_DUPLICATES=true METRICS_FILE=metrics.txt
picard MarkDuplicates I=SRR2989969_m20.bam O=SRR2989969_m20dedup.bam REMOVE_DUPLICATES=true METRICS_FILE=metrics.txt
###Output
_____no_output_____
###Markdown
Generate VCF for multiple BAM files. Not sure how they run this except that it outputs vcf format. Not sure where the normal sample is in the VCF file. Assumed they combined all three with normal.
###Code
samtools mpileup -f hg.19a SRR2989954_m20dedup.bam SRR2989963_m20dedup.bam SRR2989969_m20dedup.bam -v -o p1_ov.vcf
###Output
_____no_output_____
###Markdown
Calls variants from a mpileup dataset and produces a VCF.
###Code
varscan mpileup2snp p1_ov.vcf --min-coverage 5 --min-reads2 0 --min-avg-qual 20 --min-var-freq 0 --output-vcf 13
###Output
_____no_output_____
###Markdown
Run snpEff, a variant annotation and effect prediction tool. It annotates and predicts the effects of variants on genes. Not sure which version of the GRCh37 database was used.
###Code
#snpEff databases | grep -i sapiens
snpEff download GRCh37.75
snpEff GRCh37.75 p1_ov.vcf > p1_ov.ann.vcf
###Output
_____no_output_____
###Markdown
Count total reads and compare to supplementary figure 1.
###Code
samtools flagstat <sample.bam>
###Output
_____no_output_____ |
tutorials/large_scale_LEM/.ipynb_checkpoints/large_scale_LEMs-checkpoint.ipynb | ###Markdown
Large scale landscape evolution model with Priority flood flow router and Space_v2The priority flood flow director is designed to calculate flow properties over large scale grids. In the following notebook we illustrate how the priority flood flow accumulator can be used to simulate landscape evolution using the SPAVE_V2 Landlab component
###Code
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import time
from landlab import imshow_grid, RasterModelGrid
from landlab.components import (
FlowAccumulator,
DepressionFinderAndRouter,
Space,
SpaceLargeScaleEroder,
PriorityFloodFlowRouter,
)
###Output
_____no_output_____
###Markdown
Create raster grid
###Code
# nr = 20
# nc = 20
nr = 75
nc = 75
xy_spacing = 10.0
mg = RasterModelGrid((nr, nc), xy_spacing=xy_spacing)
z = mg.add_zeros("topographic__elevation", at="node")
mg.at_node["topographic__elevation"][mg.core_nodes] += np.random.rand(
mg.number_of_core_nodes
)
s = mg.add_zeros("soil__depth", at="node", dtype=float)
mg.at_node["soil__depth"][mg.core_nodes] += 0.5
mg.at_node["topographic__elevation"] += mg.at_node["soil__depth"]
fr = FlowAccumulator(mg, flow_director='D8')
df = DepressionFinderAndRouter(mg)
ha = Space(mg, K_sed=0.00005, K_br=0.00005, phi=0.3, H_star=1)
br = mg.at_node["bedrock__elevation"]
z = mg.at_node["topographic__elevation"]
space_dt = 500
z_ori = np.array(z)
t1 = time.time()
for i in tqdm(range(50)):
# Uplift
br[mg.core_nodes] += 0.001 * space_dt
z[mg.core_nodes] = br[mg.core_nodes] + s[mg.core_nodes]
fr.run_one_step()
df.map_depressions()
ha.run_one_step(dt=space_dt)
t_span1 = time.time() - t1
print('Total run time is %.f s' %t_span1)
plt.figure(figsize=(10,10))
imshow_grid(mg, "topographic__elevation", cmap="terrain")
plt.title("Final topographic__elevation")
mg2 = RasterModelGrid((nr, nc), xy_spacing=xy_spacing)
z2 = mg2.add_zeros("topographic__elevation", at="node")
mg2.at_node["topographic__elevation"][mg2.core_nodes] += np.random.rand(
mg2.number_of_core_nodes
)
s2 = mg2.add_zeros("soil__depth", at="node", dtype=float)
mg2.at_node["soil__depth"][mg2.core_nodes] += 0.5
mg2.at_node["topographic__elevation"] += mg2.at_node["soil__depth"]
fr2 = PriorityFloodFlowRouter(mg2, flow_metric="D8", update_flow_depressions=True)
ha2 = SpaceLargeScaleEroder(mg2, K_sed=0.00005, K_br=0.00005, phi=0.3, H_star=1)
br2 = mg2.at_node["bedrock__elevation"]
z2 = mg2.at_node["topographic__elevation"]
z_ori = np.array(z2)
t2 = time.time()
for i in tqdm(range(50)):
# Uplift
br2[mg2.core_nodes] += 0.001 * space_dt
z2[mg2.core_nodes] = br2[mg2.core_nodes] + s2[mg2.core_nodes]
fr2.run_one_step()
ha2.run_one_step(dt=space_dt)
t_span2 = time.time() - t2
print('Total run time is %.f s' %t_span2)
plt.figure(figsize=(10,10))
imshow_grid(mg2, "topographic__elevation", cmap="terrain")
plt.title("Final topographic__elevation")
plt.figure()
plt.bar(['Default flow accumulator','Priority Flood flow accumulator'],[t_span1,t_span2])
plt.ylabel('Seconds')
###Output
_____no_output_____ |
segment-words.ipynb | ###Markdown
Load documents and scrutinize unwanted punctuations**Load document labels**
###Code
raw_path = u'./corpus/raw-docs' # it will listdir into unicode
doc_labels = [fn for fn in os.listdir(raw_path) if isdir(join(raw_path, fn))] # list only folders
print 'Showing one sample document label'
print 'Unicode codepoints representation:', repr(doc_labels[0]), '::', type(doc_labels[0])
print 'The actual glyph (appearance):', doc_labels[0]
###Output
Showing one sample document label
Unicode codepoints representation: u'\u0e1a\u0e23\u0e34\u0e2b\u0e32\u0e23\u0e18\u0e38\u0e23\u0e01\u0e34\u0e08' :: <type 'unicode'>
The actual glyph (appearance): บริหารธุรกิจ
###Markdown
**Show all document labels**
###Code
doc_labels_idx = {} # maps label name to its corresponding index
print 'Total labels:', len(doc_labels)
for i, label in enumerate(doc_labels):
doc_labels_idx[label] = i
print "%d: %s" % (i, label)
###Output
Total labels: 20
0: บริหารธุรกิจ
1: ประมง
2: มนุษยศาสตร์
3: วนศาสตร์
4: วิทยาการจัดการ
5: วิทยาศาสตร์
6: วิทยาศาสตร์การกีฬา
7: วิศวกรรมศาสตร์
8: ศิลปศาสตร์และวิทยาศาสตร์
9: ศึกษาศาสตร์
10: ศึกษาศาสตร์และพัฒนศาสตร์
11: สถาปัตยกรรมศาสตร์
12: สังคมศาสตร์
13: สัตวแพทยศาสตร์
14: สิ่งแวดล้อม
15: อุตสาหกรรมเกษตร
16: เกษตร
17: เศรษฐศาสตร์
18: โครงการจัดตั้งวิทยาเขตสุพรรณบุรี
19: โครงการสหวิทยาการระดับบัณฑิตศึกษา
###Markdown
**Open documents from each folder**
###Code
%%time
label_freqs = []
dataset_contents, dataset_labels, dataset_filenames, content_lengths = [], [], [], [] # will be used later
for i, label in enumerate(doc_labels):
curr_dir = join(raw_path, label)
fns = os.listdir(curr_dir)
# print len(fns), label
label_freqs.append(len(fns))
for fn in fns:
file_path = join(curr_dir, fn)
with open(file_path, 'r') as f:
content = unicode(f.read(), 'utf8')
content_lengths.append(len(content))
dataset_contents.append(content)
dataset_labels.append(i)
dataset_filenames.append(fn)
###Output
Wall time: 50.6 s
###Markdown
**Show number of files in each folder**
###Code
plt.figure()
plt.bar(np.arange(len(doc_labels))-0.5, label_freqs, 1)
plt.xticks(np.arange(len(doc_labels)))
plt.xlabel('Label')
plt.ylabel('Frequency')
plt.yticks(np.arange(0, max(label_freqs)+50, 50))
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
**Show dataset statistics**
###Code
print 'Total documents:', len(dataset_contents)
print 'Label Frequencies:', label_freqs
print 'Label Frequencies Mean:', np.mean(label_freqs)
print 'Content Lengths Mean:', np.mean(content_lengths)
###Output
Total documents: 2549
Label Frequencies: [99, 76, 129, 119, 6, 251, 32, 468, 25, 312, 6, 15, 110, 19, 58, 203, 326, 209, 4, 82]
Label Frequencies Mean: 127.45
Content Lengths Mean: 189040.900353
###Markdown
**Remove outliers**
###Code
# idx = np.argmax(content_lengths)
# del content_lengths[idx]
# del dataset_contents[idx]
# del dataset_labels[idx]
# del dataset_filenames[idx]
###Output
_____no_output_____
###Markdown
**Show histogram of all contents' length**
###Code
plt.figure()
plt.hist(content_lengths, bins=200)
plt.xlabel('Content Length (characters count)')
plt.ylabel('Document Count')
plt.show()
def has_thai_char(s):
return any(u'\u0e00' < c < u'\u0f00' for c in s)
print has_thai_char(u'สวัสดีจ้ะ english')
print has_thai_char(u'Ianalysis')
###Output
True
False
###Markdown
Scrutinize unwanted punctuations**Define scrutinize() function**
###Code
punctuations = set(u'!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
stemmer = SnowballStemmer('english')
limit = 70
def scrutinize(s):
scrutinized = u''.join(u' ' if c in punctuations else c for c in s) # remove all punctuations inside s
segmented = []
for sentence in scrutinized.split(): # split contiguous english words if possible
if not has_thai_char(sentence):
sentence = clean(sentence)
try:
if len(sentence) > limit:
raise ValueError('too long to segment len > %d' % limit)
sentence = u' '.join(stemmer.stem(word) for word in segment(sentence))
except Exception, e:
print 'skip (len=%d, word=%s..., exception=%s)' % (len(sentence), sentence[:50], str(e))
sentence = None
if sentence:
segmented.append(sentence)
return u' '.join(segmented)
###Output
###Markdown
**Sample Original Content**
###Code
sample = dataset_contents[3][:2**9]
print sample
# sample
###Output
I50731645
Iวิทยานิพนธ์
Iการพัฒนาประสิทธิผลในการทางานของผู้ทาบัญชี
Iหลังจากเข้ารับการพัฒนาความรู้ต่อเนื่องทางวิชาชีพ
Ieffectiveness development in practicing
Iof bookkeepers after attending
Icontinuing professional development
Iนางสาวสุภาพันธุ์ สายทองอินทร์
Iบัณฑิตวิทยาลัย มหาวิทยาลัยเกษตรศาสตร์
Iพ . ศ . 2554
Iใบรับรองวิทยานิพนธ์
Iบัณฑิตวิทยาลัย มหาวิทยาลัยเกษตรศาสตร์
Iบริหารธุรกิจมหาบัณฑิต
Iปริญญา
Iสาขา
Iภาควิชา
Iเรื่อง
Iการพัฒนาประสิทธิผลในการทางานของผู้ทาบัญชี หลังจากเข้ารับ
Iการพัฒนาความรู้ต่อเนื่องทางว
###Markdown
**Sample Content Scrutinized**
###Code
scrutinized = scrutinize(sample)
print scrutinized
# scrutinized
###Output
i50731645 Iวิทยานิพนธ์ Iการพัฒนาประสิทธิผลในการทางานของผู้ทาบัญชี Iหลังจากเข้ารับการพัฒนาความรู้ต่อเนื่องทางวิชาชีพ i effect develop in practic iof bookkeep after attend i continu profession develop Iนางสาวสุภาพันธุ์ สายทองอินทร์ Iบัณฑิตวิทยาลัย มหาวิทยาลัยเกษตรศาสตร์ Iพ ศ 2554 Iใบรับรองวิทยานิพนธ์ Iบัณฑิตวิทยาลัย มหาวิทยาลัยเกษตรศาสตร์ Iบริหารธุรกิจมหาบัณฑิต Iปริญญา Iสาขา Iภาควิชา Iเรื่อง Iการพัฒนาประสิทธิผลในการทางานของผู้ทาบัญชี หลังจากเข้ารับ Iการพัฒนาความรู้ต่อเนื่องทางว
###Markdown
**Scrutinize all contents**
###Code
%%time
for i in xrange(len(dataset_contents)):
print i,
dataset_contents[i] = scrutinize(dataset_contents[i])
content_lengths[i] = len(dataset_contents[i])
print 'New Content Lengths Mean:', np.mean(content_lengths)
###Output
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 skip (len=4153, word=patgctgttattaattatatttttaatattaattaaagtattaatattta..., exception=too long to segment len > 200)
skip (len=1979, word=patgtatagcacgtcaaaaattaacaatgcgcgcgttgtcgcatctcaac..., exception=too long to segment len > 200)
skip (len=3527, word=patttgtcttaattttaagatgtaattattttatgtaaaaaaaaaatgaa..., exception=too long to segment len > 200)
skip (len=2151, word=patgaaaatatattcttacaatgaactcaaaacgcgctttgcagaatatg..., exception=too long to segment len > 200)
skip (len=812, word=pgaaagttttaataatattaaaaaagaaaaaatattgctatcttaacagc..., exception=too long to segment len > 200)
478 479 480 481 skip (len=980, word=pnnnnnnnnngactttttacgacacttgagaagatcaaaaaacaactaat..., exception=too long to segment len > 200)
skip (len=974, word=pnnnntttgnnnnngcatcctcctcgtacagtatgaaggtgagggcagag..., exception=too long to segment len > 200)
skip (len=852, word=pttccattgattangctatttgaagcggtataacccaactacgtgcaagg..., exception=too long to segment len > 200)
skip (len=974, word=pgnnntnattagncaattagttagggttcatttgattttattggactaaa..., exception=too long to segment len > 200)
skip (len=980, word=pnntnttannnactttttacgaaacttgagaagatcaaaaaacaactaat..., exception=too long to segment len > 200)
skip (len=986, word=pnnnnnnttggggaaagcntcctcctcgtaccagtatgaaggtgagggca..., exception=too long to segment len > 200)
skip (len=848, word=pnttttcttattgcagatgggtaagcattggattacctaaatgagccatc..., exception=too long to segment len > 200)
skip (len=983, word=pnnnntnataagccaanttagttaagggttcatttgattttattggacta..., exception=too long to segment len > 200)
skip (len=1178, word=pgnnnnnntgnnnnatcctcctcgtaccagtatgaaggtgagggcagagt..., exception=too long to segment len > 200)
skip (len=1176, word=pcnnnnnnnnntgganaagggnaancattggattacactaaatgagccat..., exception=too long to segment len > 200)
skip (len=1054, word=pannnnnntnnnnccnattnagttaaggnttcatttgattttattggact..., exception=too long to segment len > 200)
skip (len=664, word=pacaaatattagaaccttcgcggtttgaagattgatggctcatttagtgt..., exception=too long to segment len > 200)
skip (len=1199, word=pttngngtnnnatctctcgtaccagtatgaaggtgagggcagagtaccaa..., exception=too long to segment len > 200)
skip (len=1219, word=pcnnnggnnntagggtaagcattggattacactaaatgatgccatcaatc..., exception=too long to segment len > 200)
skip (len=942, word=pnnnatnanttaanggttcatttgattttattggactaaactattacacc..., exception=too long to segment len > 200)
skip (len=1251, word=pnnnnnnntctncatttcgcgggtgaagantgatggctcatttagtgtaa..., exception=too long to segment len > 200)
482 483 484 485 486 487 488 489 490 491 492 skip (len=1513, word=patggccacggcgatcccgcagcggcagctcttcgtcgccggcgagtggc..., exception=too long to segment len > 200)
skip (len=504, word=pmataipqrqlfvagewrapalgrrlpvvnpatespigeipagtaedvda..., exception=too long to segment len > 200)
skip (len=1516, word=patggccacggcgatcccgcagcggcagctcttcgtcgccggcgagtggc..., exception=too long to segment len > 200)
skip (len=505, word=pmataipqrqlfvagewrapalgrrlpvvnpatespigeipagtaedvda..., exception=too long to segment len > 200)
skip (len=1513, word=patggccacggcgatcccgcagcggcagctcttcgtcgccggcgagtggc..., exception=too long to segment len > 200)
skip (len=504, word=pmataipqrqlfvagewrapalgrrlpvvnpatespigeipagtaedvda..., exception=too long to segment len > 200)
skip (len=1513, word=patggccacggcgatcccgcagcggcagctcttcgtcgccggcgagtggc..., exception=too long to segment len > 200)
skip (len=504, word=pmataipqrqlfvagewrapalgrrlpvvnpatespigeipagtaedvda..., exception=too long to segment len > 200)
skip (len=1513, word=patggccacggcgatcccgcagcggcagctcttcgtcgccggcgagtggc..., exception=too long to segment len > 200)
skip (len=504, word=pmataipqrqlfvagewrapalgrrlpvvnpatespigeipagtaedvda..., exception=too long to segment len > 200)
493 494 495 496 497 498 499 skip (len=226, word=ptaattcgcaagagaataaatttcatggcaagtgacgcccttccaatcgt..., exception=too long to segment len > 200)
skip (len=476, word=ptgactgcgtaccaattcactattctaagggtgactttgattctcttcaa..., exception=too long to segment len > 200)
skip (len=201, word=pctatcgtcttcaacatcacaatttttatatctcaaatattccacgtggc..., exception=too long to segment len > 200)
skip (len=281, word=pctcatcgagttaaaaggacaaaaatttaaaaggtattgaaactgcaata..., exception=too long to segment len > 200)
skip (len=339, word=pctcatcgagttaaaaggacaaaaatttaaaaggtattgaaactgcaata..., exception=too long to segment len > 200)
skip (len=350, word=pctggaaatgcggacgtacattgacacctgcaacgaagattcgcctcgga..., exception=too long to segment len > 200)
skip (len=330, word=pcctatccgcgagcgacagtcgtccatcacgagcacaacggcatcctcga..., exception=too long to segment len > 200)
skip (len=283, word=pgaatgccaaaggggggttccgtcagtaacggcgtgattgatcgtcaatc..., exception=too long to segment len > 200)
skip (len=450, word=ptatgtttagatataatggagatgctttacacattcctctctaatgtagt..., exception=too long to segment len > 200)
skip (len=291, word=ptgactgcgtaccaattcactcacctacaaaagctaattgaccgctggag..., exception=too long to segment len > 200)
skip (len=341, word=pctcattgagttaaaaggacaaaaatttaaaaggtattgaaactgcaata..., exception=too long to segment len > 200)
skip (len=338, word=pctggaaatgcggacgtacattgacacctgcaacgaagattcgcctcaga..., exception=too long to segment len > 200)
skip (len=243, word=pacaaagtttcgcagaagggtatccgcctacgtagtggactggaaagatt..., exception=too long to segment len > 200)
skip (len=238, word=pcacagcagcagcaacaatggcaagcacagacacaatcaaggagaatgct..., exception=too long to segment len > 200)
skip (len=288, word=pccaaacccttttccttacataaggatgtcaaaagatccacaacaaggta..., exception=too long to segment len > 200)
500 skip (len=230, word=patcatcgccgacaacctcgggaggagcctggagcgggcgttggcgccgc..., exception=too long to segment len > 200)
skip (len=481, word=pcaataatgattttattttgactgatagtgacctgttcgttgcaacaaat..., exception=too long to segment len > 200)
skip (len=512, word=pcaataatgattttattttgactgatagtgacctgttcgttgcaacaaat..., exception=too long to segment len > 200)
skip (len=1171, word=patggaagcgaacggctaccgcataactcacagcgccgacgggccggcga..., exception=too long to segment len > 200)
skip (len=1171, word=patggccaacctccacgcgttgcgcagggagcagagggctcaaggtcctg..., exception=too long to segment len > 200)
501 skip (len=476, word=pgcgcgaattcggcaagatagagataaagcggatcgaaaacaccacaaat..., exception=too long to segment len > 200)
skip (len=476, word=pgcgcgaattcggcaagatagagataaagcggatcgaaaacaccacaaat..., exception=too long to segment len > 200)
skip (len=1064, word=ptcatcttgcttccattttctgcatctctcctactcagatttgtagaaac..., exception=too long to segment len > 200)
skip (len=331, word=ptaagctttgagagtgagcatcaacttcttgccctctattagtctctgta..., exception=too long to segment len > 200)
skip (len=242, word=pmaypsdsretspqrrmgrgkieikrienttnrqvtfckrrngllkkaye..., exception=too long to segment len > 200)
skip (len=476, word=pgcgcgaattcggcaagatagagataaagcggatcgaaaacaccacaaat..., exception=too long to segment len > 200)
skip (len=603, word=pcgggctggagagcaggctagagaaggaattagtagaattcggtccaaaa..., exception=too long to segment len > 200)
skip (len=635, word=pttggaanaaaaggctanagaaaggaattagtagaattcggtccaaaaag..., exception=too long to segment len > 200)
skip (len=979, word=pgcgcgaattcgggaaaattgaaattaagcggatcgaaaacaccacaaat..., exception=too long to segment len > 200)
skip (len=1013, word=pgcgcgaattcgggaaaattgaaattaagcggatcgaaaacaccacaaat..., exception=too long to segment len > 200)
skip (len=1064, word=pagctgccatggcataccccagcgattcccgggagacttcaccgcagagg..., exception=too long to segment len > 200)
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 skip (len=1238, word=pmssilepkkdqsveeeivaihltngqedgranrrltdfvvhdangtpkp..., exception=too long to segment len > 200)
skip (len=1384, word=pmfflvsdfrtkkkkgktkssvsnaskeltnntkgkkrsssrqnedpass..., exception=too long to segment len > 200)
skip (len=721, word=pmphlfkddsddvilarchyrqaevdghniynlyddahvkaadgedsyic..., exception=too long to segment len > 200)
skip (len=747, word=pmpffegatwlfvvislkddsddvilarchyrqaevdghniynlyddahv..., exception=too long to segment len > 200)
skip (len=203, word=paaggtaatcttcaaataatcttcaaaccgagtatattaaaaatcaagtt..., exception=too long to segment len > 200)
skip (len=206, word=paaggtaatcttcaaataatcttcaaaccgagtatattaaaaatcaagtt..., exception=too long to segment len > 200)
skip (len=456, word=pgaacccaaaactcaagtcttgaaacaaggaccattcatgaagataaagt..., exception=too long to segment len > 200)
skip (len=333, word=gttttccactaccgagcaggactcatgataaccgagcaggactcatggcg..., exception=too long to segment len > 200)
skip (len=331, word=pattggattgagctctatgaacctttctgttgcttctcagtccctctctc..., exception=too long to segment len > 200)
skip (len=247, word=tagctacttcatctcctcttgactaagcccacactccctatcatcgactc..., exception=too long to segment len > 200)
skip (len=5849, word=patgttctttttggtctctgatttcagtacattggtctcatctcagtatt..., exception=too long to segment len > 200)
skip (len=1031, word=pacaggttaaattgtccactgggcaagtggttgatttaattccatggtgt..., exception=too long to segment len > 200)
skip (len=5075, word=patgttctttttggtctctgatttcagtacattggtctcatctcagtatt..., exception=too long to segment len > 200)
skip (len=1803, word=pgcttcacttcttgagatgggctatcaggtagagtttgttgcaagtaaat..., exception=too long to segment len > 200)
skip (len=4301, word=patgcctcatcttttgtagaagtcttatatttcgttgatattactccctt..., exception=too long to segment len > 200)
skip (len=3026, word=paacctgctgaggtagctatacatcaatcaaccaacttgtgcaatggtta..., exception=too long to segment len > 200)
skip (len=2925, word=patgccatttttcgagggtgctacttggctctttgttgtcatttccctgt..., exception=too long to segment len > 200)
skip (len=4472, word=pttatgacttggtgcatgaacataaagaatttgattgcttatttcctacg..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=226, word=patcatgagtcctgctcggtcccatgacggtctaaaagttcgtgaatggc..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=225, word=pgatgagtcctgagtaacttcaacgcgctagcaggtgccccagactgatg..., exception=too long to segment len > 200)
skip (len=225, word=patcatgagtcctgctcggtccctccgcatcaagatagtcaatcgagaga..., exception=too long to segment len > 200)
skip (len=258, word=pgatgagtcctgagtaacatagttcagtaccacatgtttgaaaattagat..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=405, word=pgatgagtcctgagtaactcctggtcctctctggagacgcgaggttgtgt..., exception=too long to segment len > 200)
skip (len=405, word=pgatgagtcctgagtaactcctggtcctctctggagacgcgaggttgtgt..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=341, word=pgatgagtcctgagtaacttgacgatgagtcctgagtaagcatatcaata..., exception=too long to segment len > 200)
skip (len=225, word=pgatgagtcctgagtaacttcaacgcgctagcaggtgccccagactgatg..., exception=too long to segment len > 200)
skip (len=344, word=pgatgagtcctgagtaactatccaattgaaaaataattcgtatttcccgt..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=263, word=pgatgagtcctgagtaacattatgggggagacgaacgggtgaacatactt..., exception=too long to segment len > 200)
skip (len=263, word=pgatgagtcctgagtaacattatgggggagacgaacgggcgaacatactt..., exception=too long to segment len > 200)
skip (len=471, word=patcatgagtcctgctcggtaaggagaggctactggggtaacaagatcgg..., exception=too long to segment len > 200)
skip (len=471, word=patcatgagtcctgctcggtaaggagaggctactggggtaacaagatcgg..., exception=too long to segment len > 200)
skip (len=356, word=pcctttggctcgagcttgtctttcagctcgttaagcggcagcacgcgcgc..., exception=too long to segment len > 200)
skip (len=295, word=pgatgagtcctgagtaactgcacatcataaaaaaacaccgccccagagag..., exception=too long to segment len > 200)
528 529 530 531 skip (len=327, word=pmesgsfpvinmellqgpqrpaamallrdacenwgffellnhgithelmd..., exception=too long to segment len > 200)
skip (len=634, word=pmegcdciepqwpadellvkyqyisdlfialayfsipleliyfvkkssff..., exception=too long to segment len > 200)
skip (len=784, word=pcgagatttggaggcggtaaacaaggaacattaccggcggttccgagaac..., exception=too long to segment len > 200)
skip (len=833, word=pctcaaggacacttgctatagtgatgactgtagcaaaagttttgactgca..., exception=too long to segment len > 200)
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 skip (len=453, word=pcatatttgtgagttttgtataaggtcattgtctttactgcatggagaga..., exception=too long to segment len > 200)
skip (len=497, word=ptggttggctctcggtgtcctcgagttgtaattgagcagctgttagctta..., exception=too long to segment len > 200)
skip (len=357, word=paataacaacaataacaccaagaagaagaagtaagcaagaatgttaggag..., exception=too long to segment len > 200)
skip (len=654, word=paaaacatcctctaatagagcaaacagtgccaaaacggtcacaagacagg..., exception=too long to segment len > 200)
skip (len=454, word=pgaggaaaagaattgcactatccttgaaaccaaacagatctcttcctcta..., exception=too long to segment len > 200)
skip (len=454, word=pgaggaaaagaattgcactatccttgaaaccaaacagatctcttcctcta..., exception=too long to segment len > 200)
skip (len=464, word=pctgcagatcgctataaaacagccttttgtattcctgatgctcaatatca..., exception=too long to segment len > 200)
skip (len=562, word=pcgccgccttgtcgtaagctctagcagcttcctcagccgtatcaaacgtg..., exception=too long to segment len > 200)
skip (len=603, word=pccagttgccctaattgtgaatggtctaatggcacttattgccccaatta..., exception=too long to segment len > 200)
skip (len=529, word=pagtcgttacaggagatacagggtataatttctcctttgttgaggattgc..., exception=too long to segment len > 200)
skip (len=638, word=pggcacactccttgagatccccaattataccgacgaggaagccttgtttg..., exception=too long to segment len > 200)
skip (len=524, word=pcccttgcacaagtcacgctcagagatattatcttaggtaagagatcaac..., exception=too long to segment len > 200)
skip (len=409, word=paggctcagcaggtatggtaaggggatagtgactacagaggccgaaaggt..., exception=too long to segment len > 200)
skip (len=391, word=pctgcaggagggatttttatttcgtggtgtcaattgtgtattccagattg..., exception=too long to segment len > 200)
skip (len=282, word=ptgcgcctacataataatagttcgacccaaattacttcattatatgtgtt..., exception=too long to segment len > 200)
skip (len=448, word=paccagtggagtccacagcagcagatctacagaaaataatggcatatttc..., exception=too long to segment len > 200)
skip (len=551, word=paatttaccatcagtattagctagatctgccttcagtttgctaataagct..., exception=too long to segment len > 200)
skip (len=426, word=ptaagtttagaaaagctagggttccatagcgcttcgaacagttgcatcac..., exception=too long to segment len > 200)
skip (len=389, word=pagtctttcgatgatgcatgcagcatttgcctcgaggaattttgtgaaag..., exception=too long to segment len > 200)
skip (len=422, word=patcatgactgactgctcctcggaagacaaatcttcacttacttccacac..., exception=too long to segment len > 200)
skip (len=363, word=ptcagcctaaccaaatagaaatcaaagttcaagcctttttcgaaaagtaa..., exception=too long to segment len > 200)
skip (len=359, word=pacaattcatttgatatatctaccgcaaatgattattgtatgcaggaatc..., exception=too long to segment len > 200)
skip (len=344, word=patcatggacatcctagtaactttcactttgttattcggtaggccatggt..., exception=too long to segment len > 200)
skip (len=271, word=pgtcctagacatcctagtaaccattgcattattattgggaaagccatggt..., exception=too long to segment len > 200)
skip (len=251, word=ptcgctcatgacaccatcccaggaattgcaccaggtttcggtgccttaaa..., exception=too long to segment len > 200)
skip (len=217, word=pctttccagagcagtatttacagaagcaccagcagggcggccttttagct..., exception=too long to segment len > 200)
skip (len=434, word=pctctttactgagccatctgtactgagagtaacccagtcttgagcaggac..., exception=too long to segment len > 200)
skip (len=392, word=pctaaatgtccttaggatggggtttggaagaggtattggttccaatggcc..., exception=too long to segment len > 200)
skip (len=387, word=pcacatgtgatagagagtcagcaacttgattttctatcctacgctaatac..., exception=too long to segment len > 200)
skip (len=351, word=pactcgatattgcaggattgcatgaattggactgggtcctgtcatagttg..., exception=too long to segment len > 200)
skip (len=484, word=paaggctagtaggacttgcggatgtgctaatgttgatacaaatggagatg..., exception=too long to segment len > 200)
skip (len=391, word=pgacgattgtaaataatgagacgttggattactaaggtcaataagataaa..., exception=too long to segment len > 200)
skip (len=372, word=ptatggtgttcatgacttcattgcatttgctctcttacatcctagatgta..., exception=too long to segment len > 200)
skip (len=303, word=ptccgcatcaaagtcttcgtattcacttacaaagctagatgcttcctgtc..., exception=too long to segment len > 200)
skip (len=496, word=pctgacactcacggggggtctgtaccgccgtggtactcctaaagagctat..., exception=too long to segment len > 200)
skip (len=414, word=ptataataacagctatcatgcaagcatcgagatggctccttatgtggcct..., exception=too long to segment len > 200)
skip (len=386, word=pggcaaaagtccctttgagatcattatgggacacagcccctcactcccaa..., exception=too long to segment len > 200)
skip (len=316, word=pagatgattgaccagattccagttatagacaatgaagatgaaaaagggaa..., exception=too long to segment len > 200)
skip (len=323, word=ptgaatttgtgtgtatctgtgtgagttttattgaaaattatgccgtggca..., exception=too long to segment len > 200)
skip (len=254, word=paaatagtttaggtgggtgcaaaagaagaaacggtgaccaacaagcaggt..., exception=too long to segment len > 200)
skip (len=324, word=pgattctgaagctttcaacaaagctttagagcttagcggtagtcaacttg..., exception=too long to segment len > 200)
skip (len=530, word=pgagttagataagcttgggatcctggatctgtcttacaaccagctcatcg..., exception=too long to segment len > 200)
skip (len=365, word=paaacagatcatgttagcatgttactatcataaggcatcagaaactctga..., exception=too long to segment len > 200)
skip (len=627, word=ptaggagatgtccttttaccgacttgccattcccggcaacaaaagatatt..., exception=too long to segment len > 200)
skip (len=511, word=pctacagaaagatacaagacagccttttgtatcccagatgcacaatatca..., exception=too long to segment len > 200)
skip (len=403, word=ptgccaatgtactcactcctggttcgttttagcttgatcacttctccttt..., exception=too long to segment len > 200)
skip (len=706, word=pttgtgtcatcttgaaagaccaaggcgccaagctactaaagctaaatttg..., exception=too long to segment len > 200)
skip (len=485, word=ptcaagattcactggcacattaagggaatggttctctcacttgtctgaat..., exception=too long to segment len > 200)
skip (len=482, word=pggcagagttggctgaagcgcgctcgctcgcacgaaggattgtggtggtt..., exception=too long to segment len > 200)
skip (len=427, word=pgtatttgcttttgtttcggagtgttaccgatttctctctcagcttcctc..., exception=too long to segment len > 200)
skip (len=271, word=pgtcctagacatcctagtaaccattgcattattattgggaaagccatggt..., exception=too long to segment len > 200)
skip (len=400, word=pgccgtatggctgaccggcgattactagcgattccggcttcatgcaggcg..., exception=too long to segment len > 200)
skip (len=417, word=pggaaagtgaaagctggataacaccggtaccatcatcgagaagcagacgg..., exception=too long to segment len > 200)
skip (len=530, word=pgagttagataagcttgggatcctggatctgtcttacaaccagctcatcg..., exception=too long to segment len > 200)
skip (len=431, word=pgtatttgtttctatccctttaccttggggattcttctctcaacttcctt..., exception=too long to segment len > 200)
skip (len=530, word=pgagttagataagcttgggatcctggatctgtcttacaaccagctcatcg..., exception=too long to segment len > 200)
skip (len=391, word=pgacgattgtaaataatgagacgttggattactaaggtcaataagataaa..., exception=too long to segment len > 200)
skip (len=362, word=pggcttatgcacttctcttcttggtaatttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=372, word=pagcttatgcacttctcttcttggtactttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=369, word=pagcttatgcacttctcttcttggtactttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=356, word=pgacttgggtaaacctttgagcatggtcatgaggtcctgtatcaagaacg..., exception=too long to segment len > 200)
skip (len=366, word=pagcttatgcacttctcttcttggtactttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=496, word=pctgacactcacggggggtctgtaccgccgtggtactcctaaagagctat..., exception=too long to segment len > 200)
skip (len=361, word=pggcttatgcacttctcttcttggtaatttttgtgatgctcccaatagtc..., exception=too long to segment len > 200)
skip (len=494, word=pggcttatgcacttctcttcttggtaatttttgtgatgctcccaatagtc..., exception=too long to segment len > 200)
skip (len=363, word=pagcttatgcacttctattcttggtactttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=363, word=pagcttatgcacttctattcttggtactttttgtgatgcttccaatagtc..., exception=too long to segment len > 200)
skip (len=368, word=ptccaagatctaatggacttgcaagtcctccatcagtaaattcccatggg..., exception=too long to segment len > 200)
skip (len=368, word=ptccaaggtctaatggacttgcaagtcctccgtcagtaaattcccatggg..., exception=too long to segment len > 200)
skip (len=367, word=ptacccactatgactgatatagtgggttataaactatacatgaggtatgt..., exception=too long to segment len > 200)
skip (len=286, word=pttactacttacgtgtgtggcattttcagggtccatctgcagtggtggag..., exception=too long to segment len > 200)
skip (len=502, word=pagtatgaaaaattatgctcaatttgcattccctatggtattggtaaaca..., exception=too long to segment len > 200)
skip (len=502, word=pagtatgaaaaattatgctcaatttgcattccctatggtattggtaaaca..., exception=too long to segment len > 200)
skip (len=400, word=pttgagatgctttgggatggggttaggcagaggtatgggttctaatggtt..., exception=too long to segment len > 200)
skip (len=291, word=pacaacaaagtagatttgagaagtattggaaaattcccgaaagatcagca..., exception=too long to segment len > 200)
skip (len=290, word=ptgggttttcctttcctgcatgtctgtacagttgctctcatctgcctggt..., exception=too long to segment len > 200)
skip (len=260, word=pattgcattttcgctcacatttctgtttcctagtgttattgttactatat..., exception=too long to segment len > 200)
skip (len=260, word=pattgcattttcgctcacatttctgtttcctagtattattgttactatat..., exception=too long to segment len > 200)
skip (len=277, word=ptctcaatttctattggtgaataaagtaatttgcatgctgtcctacgtgc..., exception=too long to segment len > 200)
skip (len=377, word=ptctcaatttctattggtgaataaagtaatttgcatgctgtcctacgtgc..., exception=too long to segment len > 200)
skip (len=349, word=patttctagatttatggaaaagttttccatattatgttttccttctacag..., exception=too long to segment len > 200)
skip (len=349, word=patttctagatttatggaaaagttttccatattatgttttccttctacag..., exception=too long to segment len > 200)
skip (len=461, word=pgatctgggtaaacccttgagcacagtcatgagatcctgtattagaaatg..., exception=too long to segment len > 200)
skip (len=529, word=pagtcgttacaggagatacagggtataatttctcctttgttgaggattgc..., exception=too long to segment len > 200)
skip (len=484, word=paaggctagtaggacttgcggatgtgctaatgttgatacaaatggagatg..., exception=too long to segment len > 200)
skip (len=552, word=pctaaagatgtatcactttcttggaatggagattattcagagtgaaaaag..., exception=too long to segment len > 200)
skip (len=374, word=pcactatgccaaggtggcatctacggtttggagaaaacgaggagcttacg..., exception=too long to segment len > 200)
skip (len=498, word=ptgtgcgcatgcaggcttgggcagataaaaataaggcaatatctaacctt..., exception=too long to segment len > 200)
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 skip (len=418, word=petpilskttpegardylvpsrvhegeffalpqspqlfkqllmvggmdry..., exception=too long to segment len > 200)
589 590 591 592 593 594 595 596 597 598 599 600 601 skip (len=662, word=pgatcctctacaaggaacaacaggtttggttcctctgttaggaattgatg..., exception=too long to segment len > 200)
skip (len=671, word=paatcaggatcctctacaaggaacaacaggtttggttcctctgttaggaa..., exception=too long to segment len > 200)
skip (len=647, word=ptgctgcgcggggacgggcccgtgcacggtgtcgtcaccttcgagcaaaa..., exception=too long to segment len > 200)
skip (len=302, word=pcgtgctgcgcggggacgggcccgtgcacggtgtcgtcaccttcgagcaa..., exception=too long to segment len > 200)
skip (len=677, word=pggaacatggtggggctgaaggccgtgtgcgtgctgcgcggggacgggcc..., exception=too long to segment len > 200)
skip (len=666, word=paaggccgtgtgcgtgctgcgcggggacgggcccgtgcacggtgtcgtca..., exception=too long to segment len > 200)
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 skip (len=1485, word=pttattgctgctgattccgaatcacaggtacgacattggaaccactccaa..., exception=too long to segment len > 200)
skip (len=1631, word=pacttttaggtgaactatagaatactcaagctatgcatccaacgcgttgg..., exception=too long to segment len > 200)
skip (len=1197, word=pgattggctcctttaggcgaatggcccgagttgcatgttccggcgccatg..., exception=too long to segment len > 200)
skip (len=1502, word=pcaacgcgttgggagctctcccatatggtcgacctgcaggcggccgcgaa..., exception=too long to segment len > 200)
skip (len=1111, word=pccaagatttttccattgcaccaagcaatggcccctataatttccaatag..., exception=too long to segment len > 200)
skip (len=1105, word=ptagggtttagtgttgttccagtttggaacaagagttccctttttaaaga..., exception=too long to segment len > 200)
skip (len=1369, word=patgatttgtctgctcagtgccaatgagcgcatgcatgaccaagcagcat..., exception=too long to segment len > 200)
skip (len=1017, word=pcacaccggcacgagaaacctaccaatttaatttccaccaacgatcacct..., exception=too long to segment len > 200)
skip (len=1291, word=pctctctctggtagcccaccgttcttttgaatcttctgatcgaaaccggc..., exception=too long to segment len > 200)
skip (len=837, word=pgtccggtgaagtgttcggatcgcggcgacggaggcggttcgccgcctac..., exception=too long to segment len > 200)
skip (len=837, word=pgtccggtgaagtgttcggatcgcggcgacggaggcggttcgccgcctac..., exception=too long to segment len > 200)
skip (len=867, word=pttaaactcagcgggtaatctcgcctgacctggggtcgctaaggatgaag..., exception=too long to segment len > 200)
skip (len=1422, word=paactctgtgttcagatactgtgcaatcataggatagtgtaagaattcaa..., exception=too long to segment len > 200)
skip (len=1017, word=paactctgtgttcagatactgtgcaatgataaaatagtgtaagaatacga..., exception=too long to segment len > 200)
skip (len=989, word=paactctgtgttcagatactgtgcaatgataaaatagtgtaagaatacga..., exception=too long to segment len > 200)
skip (len=341, word=ptccgaagaaaaagaaagaattgtgaattggatgatctgtactagacgat..., exception=too long to segment len > 200)
skip (len=482, word=pcccagctgcagactttgagactcccgtctgcccctctactgaacaacca..., exception=too long to segment len > 200)
skip (len=317, word=pgtaatgccttttggactgacaaatgcccctgctacgtttcagtcattga..., exception=too long to segment len > 200)
skip (len=481, word=ptcaaccaatgattgcctacttataagaagaaaaagaacatcgaaaactg..., exception=too long to segment len > 200)
skip (len=638, word=pgcttcatggagagaagaagactctgcttccttagatacacttgttagaa..., exception=too long to segment len > 200)
skip (len=419, word=paccacaacatgaggcgccaacactacaatagacccaaaatctcccccag..., exception=too long to segment len > 200)
skip (len=556, word=pgatgagtcctgagtaacttaggagatgtgagttttcaaaacatgagaca..., exception=too long to segment len > 200)
skip (len=501, word=pgatgagtcctgagtaacagcaaaagagtcataagtaccacgctcaagta..., exception=too long to segment len > 200)
skip (len=649, word=pgatgagtcctgagtaactgtctagatgcgtaggccactacgcagccacg..., exception=too long to segment len > 200)
skip (len=287, word=pgagccgctggtcggatggtgctgggtgtggccgatgttcttgaagatct..., exception=too long to segment len > 200)
skip (len=294, word=ptgagtaacaacatgcttccaaacacaccacaaagcaactaggtcttggt..., exception=too long to segment len > 200)
skip (len=288, word=patcctacaaaagaaaataagagataaattcagtacccatgtcacatatg..., exception=too long to segment len > 200)
628 629 630 631 skip (len=11622, word=pctggcatcggtggagggttggcatgcatgtgctagcctgcgccatccag..., exception=too long to segment len > 200)
skip (len=3145, word=patgcggaggaggaggaggaggagagcgaggacgacgcgggacggggagg..., exception=too long to segment len > 200)
skip (len=17498, word=patgcgggctgcagcgccaaaaggtgtgaaggaaattggcgcgagcccgc..., exception=too long to segment len > 200)
skip (len=6616, word=patgcgggctgcagcgccaaaaggcgtgcgcgccggcgttaagcagcaac..., exception=too long to segment len > 200)
skip (len=2204, word=pmraaapkgvragvkqqlgsgqddmvapskhteatanvkgvkraadkpad..., exception=too long to segment len > 200)
skip (len=10131, word=pcaggcgggaagggggcatcagttgatgggactgtatgggactcttgatg..., exception=too long to segment len > 200)
skip (len=3274, word=patggcaaccggcgtgcgctcccggagcaagactgcaacgacgccaagca..., exception=too long to segment len > 200)
skip (len=10131, word=pcaggcgggaagggggcatcagttgatgggactgtatgggactcttgatg..., exception=too long to segment len > 200)
skip (len=5527, word=pacagtttgttgcgatagaagcgtttctgcgctgctacgggtggcttgct..., exception=too long to segment len > 200)
skip (len=8004, word=pggaccattctcccccaaccgccccccccctccctctcgccccccgcaga..., exception=too long to segment len > 200)
skip (len=4003, word=patgctcgggatccagggcttggcgccgcgaaggcagccacttcgcttgg..., exception=too long to segment len > 200)
skip (len=6804, word=patgctcgggatccagggcttggcgccgcgaaggcagccacttcgcttgg..., exception=too long to segment len > 200)
skip (len=4003, word=patgctcgggatccagggcttggcgccgcgaaggcagccacttcgcttgg..., exception=too long to segment len > 200)
skip (len=3564, word=pgtgaagacatcgctgcagctgacaccgtatcagccagcaaggtgagata..., exception=too long to segment len > 200)
skip (len=1621, word=patggccaccgccggtgacacgcaggcagcggccagcaacagcagcggca..., exception=too long to segment len > 200)
skip (len=2563, word=patggccaccgccggtgacacgcaggcagcggccagcaacagcagcggca..., exception=too long to segment len > 200)
skip (len=1620, word=patggccaccgccggtgacacgcaggcagcggccagcaacagcagcggca..., exception=too long to segment len > 200)
skip (len=539, word=pmatagdtqaaasnssgtgtstggandggvvrildlysgvgclhaalgrp..., exception=too long to segment len > 200)
skip (len=14451, word=pggtcgcgcggccgggccctggatcccgaagcagaaaagaccagtcgcgt..., exception=too long to segment len > 200)
skip (len=7518, word=patgtccgcgctctcagccagcacgtctcgtggatgcgctggtgcttcta..., exception=too long to segment len > 200)
skip (len=14051, word=paaatctagcagattgataatggagacggcctggcacagcgcctagcgga..., exception=too long to segment len > 200)
skip (len=7601, word=paaatctagcagattgataatggagacggcctggcacagcgcctagcgga..., exception=too long to segment len > 200)
skip (len=1599, word=phpefepsrqwlaaaaqrpvhgslsyraggvlanggtlvvspegrlvpvp..., exception=too long to segment len > 200)
skip (len=9214, word=pgcagtgtccgcggcgcagacccggctgcgtctaagtacgttgcaaatga..., exception=too long to segment len > 200)
skip (len=2908, word=patgggctgcgtgcacgggccgctgcatgagacgccgccggagcagcttc..., exception=too long to segment len > 200)
skip (len=969, word=pmgcvhgplhetppeqlppdplgdqpppregaaggkdssgggaqasagsn..., exception=too long to segment len > 200)
skip (len=8814, word=patggatgggcaaggggcctcacctgcccccgggggagcgcagcacgcag..., exception=too long to segment len > 200)
skip (len=3475, word=patggatgggcaaggggcctcacctgcccccgggggagcgcagcacgcag..., exception=too long to segment len > 200)
skip (len=1159, word=pmdgqgaspapggaqhaaeleyetddeiiadarpnwdeveiiakhapedy..., exception=too long to segment len > 200)
skip (len=7430, word=pccgtcatgccaatcctcgcgatgccctgcattctgacagggcggcccgt..., exception=too long to segment len > 200)
skip (len=3538, word=patgccaatcctcgcgatgccctgcattctgacagggcggcccgtgggca..., exception=too long to segment len > 200)
skip (len=6804, word=patgctcgggatccagggcttggcgccgcgaaggcagccacttcgcttgg..., exception=too long to segment len > 200)
skip (len=4003, word=patgctcgggatccagggcttggcgccgcgaaggcagccacttcgcttgg..., exception=too long to segment len > 200)
skip (len=1331, word=pmlgiqglaprrqplrlgssmcmlqarflpagtrrashaavlrtssqqhm..., exception=too long to segment len > 200)
632 633 634 635 636 637 638 639 640 641 642 643 skip (len=324, word=pgcggtcttagtctcgagtgcctgctttatgttaatatatgctcttcggt..., exception=too long to segment len > 200)
skip (len=311, word=pgcagaagttagtctcgagtgcctgctttatgttaatatatgctcttcgg..., exception=too long to segment len > 200)
skip (len=335, word=pggcttcgcagttagtgcatgatagtatttagcattttgtatttcaaact..., exception=too long to segment len > 200)
skip (len=336, word=pgggctatcgcaattagtgcatgaatagtatttagcattttgtatttcaa..., exception=too long to segment len > 200)
skip (len=336, word=pggcttagccagttagtgcatgatagtatttagcattttgtatttcaaac..., exception=too long to segment len > 200)
skip (len=314, word=pgcttgcgcttgagagatgagagagagaaggtcatgatagaataatgaga..., exception=too long to segment len > 200)
skip (len=317, word=pgtgtgggagctatggctatcgctctctttatctaatgaccctttttcct..., exception=too long to segment len > 200)
skip (len=254, word=paggtacttctccttcgatattatggtattactattacattcttcctttc..., exception=too long to segment len > 200)
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 skip (len=1667, word=pzxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..., exception=too long to segment len > 200)
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 skip (len=406, word=pgtcaagcccgacacgatgaaactggtcgtcaactggagcggcaaagagt..., exception=too long to segment len > 200)
skip (len=406, word=pgtcaaaccggacacgatgaaactggtcgtcaactggagcggcaaagagt..., exception=too long to segment len > 200)
skip (len=406, word=pgttaagcccgacacaatgaagcttgtagttaactggagcggtcgcgaat..., exception=too long to segment len > 200)
skip (len=406, word=pgtcaagcccgacacgatgaaactggtcgtcaactggagcggcaaagagt..., exception=too long to segment len > 200)
skip (len=406, word=pgtcaaaccggacacgatgaaactggtcgtcaactggagcggcaaagagt..., exception=too long to segment len > 200)
skip (len=406, word=pgttaagcccgacacaatgaagcttgtagttaactggagcggtcgcgaat..., exception=too long to segment len > 200)
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 skip (len=1369, word=pgattgatggtgcttgcacctgattgacgatggatcaccagtgagtggcg..., exception=too long to segment len > 200)
skip (len=1369, word=pcaagtcgagcgagctgaattcaaagatcccttcggggtgatttgttgga..., exception=too long to segment len > 200)
skip (len=1368, word=pgtcgagcgagctgaattcaaagatcccttcggggtgatttgttggatgc..., exception=too long to segment len > 200)
1654 1655 1656 1657 1658 1659 skip (len=1141, word=patgactaacatccgaaaatcccacccactaatcaaaatcatcaatcatt..., exception=too long to segment len > 200)
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 skip (len=1487, word=pgagcgcgggaagcaagctgatcctcttcggaggtgacgcttgtggnaaa..., exception=too long to segment len > 200)
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 skip (len=495, word=ptaactccgggtgtgcagtggatacgggcaggcttgaggtaggcagggga..., exception=too long to segment len > 200)
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 skip (len=506, word=aatgtacatttgcgacgatagctttttgttttaacccatttcacaattct..., exception=too long to segment len > 200)
skip (len=509, word=pgccgagccaccataccgcgaatcgaacactcctcctttaaacgccgcag..., exception=too long to segment len > 200)
skip (len=519, word=ptagtgatatttttaggcgatgctttttgttttaacccatttcacaattc..., exception=too long to segment len > 200)
skip (len=502, word=pgcgagcaaccataccgcgaatcgaacactcctcctttaaacgccgcagc..., exception=too long to segment len > 200)
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 skip (len=536, word=pcngtaggggcttcctacctgatccgaggtcaaccttaagtaaagattta..., exception=too long to segment len > 200)
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 skip (len=724, word=pttacaatacgtaactaattttatgtcgacgcctgctacctccaaaaaga..., exception=too long to segment len > 200)
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 skip (len=280, word=pgatcaaatattgagaaaagttctggtgcagcgcctggtttttcccaata..., exception=too long to segment len > 200)
skip (len=331, word=pggacttgggtatcagatacttgccttgacagatgatgtccaggagaatg..., exception=too long to segment len > 200)
skip (len=299, word=pactgcgtaccaattctgactgcgtaccaattctgtggatgatttgaagg..., exception=too long to segment len > 200)
skip (len=331, word=pgaaggcgaaccctgcggattgagtaccttactcttcttctgaaacgatt..., exception=too long to segment len > 200)
skip (len=312, word=pcaaaaaactcaagttgtggcaagcgttggccaagacactatcagaaaag..., exception=too long to segment len > 200)
skip (len=331, word=pgactgcgtgttaattcatgactgcgtaccaattcttcctcggcactaga..., exception=too long to segment len > 200)
skip (len=215, word=pccccaaagtgaaacggaagaagcagaggcacctcaaagagactgattac..., exception=too long to segment len > 200)
skip (len=216, word=pccccaaagtgtatcggaggaagcagaagcacctcaaggagactgattac..., exception=too long to segment len > 200)
skip (len=345, word=pacccgcaaccccgtggtaacttcagcttctggcagttactcaggactca..., exception=too long to segment len > 200)
skip (len=295, word=ptaaaaaaaagaaagtaaataaagttacctttgagacgtgctggaaaaga..., exception=too long to segment len > 200)
skip (len=247, word=pacccgcaaccccgaggtaacttcagcttctggctgttactcaggactca..., exception=too long to segment len > 200)
skip (len=253, word=paaccgcaacaatgcatagacacaggctagagagagaggaacatcttttc..., exception=too long to segment len > 200)
skip (len=245, word=pgtggactgcgtaccaaatcagattgttggagtttgattgataaggttgt..., exception=too long to segment len > 200)
skip (len=249, word=pgaccttgagaggaaattttgcagaaagcaatgaattggatgctgctgaa..., exception=too long to segment len > 200)
2499 skip (len=845, word=pagagcttagcttagacacttgagagatcaaaaacaactaattattcgaa..., exception=too long to segment len > 200)
skip (len=851, word=paaaagctctggacatctcctccacagtatgaggtgagggcagagtacca..., exception=too long to segment len > 200)
skip (len=845, word=paccccttttctggaaagggtaagcattggattaactaaatgagccatca..., exception=too long to segment len > 200)
skip (len=854, word=ptttttaggattacatagttagggttatttgatttattggactaaactat..., exception=too long to segment len > 200)
skip (len=851, word=pnnncccttgacttacgacacttgagagatcaaaaaacaactaattattc..., exception=too long to segment len > 200)
skip (len=858, word=pnnnnggatttggagcacctcccgtaccgtatgaaggtgagggcagagta..., exception=too long to segment len > 200)
skip (len=853, word=pnnnccctcatttggaataggtaagcattggattacctaaatgagccatc..., exception=too long to segment len > 200)
skip (len=858, word=pnnnnaacgtagccatagttagggttatttgatttattggactaaactat..., exception=too long to segment len > 200)
skip (len=952, word=pnnnaacttcacttacacacttgagagatcaaaaacaactaattattcga..., exception=too long to segment len > 200)
skip (len=951, word=pnnnngggatttggacacctcncgtaccgtatgaaggtgagggcagagta..., exception=too long to segment len > 200)
skip (len=953, word=pnnnaactcattttgaataggtaagcattggattacctaaatgagccatc..., exception=too long to segment len > 200)
skip (len=955, word=pnnnngggttagcaatagttagggttatttgattttttggactaaactat..., exception=too long to segment len > 200)
skip (len=953, word=pnnttacttaacttacacacttgagagatcaaaaacaactaattattcga..., exception=too long to segment len > 200)
skip (len=951, word=pnnnnttgatttggacacctcccgtaccgtatgaaggtgagggcagagta..., exception=too long to segment len > 200)
skip (len=954, word=pnnaaactcatttggaataggtaagcattggattacctaaatgagccatc..., exception=too long to segment len > 200)
skip (len=952, word=pnnnaaagattagcatagttagggttatttgatttattggactaaactat..., exception=too long to segment len > 200)
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 skip (len=801, word=pagcgcgccgttgatattgtctcgcaaatgacattggatgagaaggtcaa..., exception=too long to segment len > 200)
skip (len=951, word=pggnnnnnactgattacgccagctatttaggtgacactatagaatactca..., exception=too long to segment len > 200)
skip (len=951, word=pggnnnnancatccgtgcgggtgctggcgctgtgatgtgctcctacaacc..., exception=too long to segment len > 200)
skip (len=951, word=pnnnnnngctancactgcacctcaggttcgagtagttgaagtggtgtagc..., exception=too long to segment len > 200)
skip (len=825, word=prildelaysppyypspwangqgdwaqayqravdivsqmtldekvnlttg..., exception=too long to segment len > 200)
skip (len=953, word=pagaccggtcttctcgtagtgcccaacttgaactgaggaacagtcatgtc..., exception=too long to segment len > 200)
skip (len=934, word=pgatcaaaaaacaactaattattcgaaacgatgagatttccttctatttt..., exception=too long to segment len > 200)
skip (len=951, word=patccccttgggccaatggccagggcgactgggcgcaggcataccagcgc..., exception=too long to segment len > 200)
skip (len=935, word=pcagcttccgcgaggagcttccgacaaacaccatcttgggatacgacgta..., exception=too long to segment len > 200)
skip (len=935, word=patgtgctcctacaaccagatcaacaacagctatggctgccagaacagct..., exception=too long to segment len > 200)
skip (len=964, word=pcactcacgttcgagtagttgaagtggtgtagctcaaaccatagccgaac..., exception=too long to segment len > 200)
skip (len=2575, word=pgcacatcaccatcaccatcatcaccatgctgcagtacgtagaattcttg..., exception=too long to segment len > 200)
skip (len=939, word=pgggccatggccagggcgactgggcgcaggcataccagcgcgccgttgat..., exception=too long to segment len > 200)
skip (len=1035, word=pcgttacgacacttgagaagatcaaaaaacaactaattattcgaaacgat..., exception=too long to segment len > 200)
skip (len=939, word=pgggccatggccagggcgactgggcgcaggcataccagcgcgccgttgat..., exception=too long to segment len > 200)
skip (len=972, word=pacctcaggttcgagtagttgaaagtggtgtagctcaaaccatagccgaa..., exception=too long to segment len > 200)
skip (len=2575, word=pgcacatcaccatcaccatcatcaccatgctgcagtacgtagaattcttg..., exception=too long to segment len > 200)
skip (len=973, word=pagcagaccggtcttctcgtagtgcccaacttgaactgaggaacagtcat..., exception=too long to segment len > 200)
skip (len=1014, word=pcgttacgacacttgagaagatcaaaaaacaactaattattcgaaacgat..., exception=too long to segment len > 200)
skip (len=567, word=patggccagggcgactgggcgcaggcataccagcgcgccgttgatattgt..., exception=too long to segment len > 200)
skip (len=897, word=pccgtgcgggtgctggcgctgtgatgtgctcctacaaccagatcaacaac..., exception=too long to segment len > 200)
skip (len=972, word=pcactcaggttcgagtagttgaaagtggtgtagctcaaaccatagccgaa..., exception=too long to segment len > 200)
skip (len=2575, word=pgcacatcaccatcaccatcatcaccatgctgcagtacgtagaattcttg..., exception=too long to segment len > 200)
skip (len=973, word=pcagaccggtcttctcgtaagtgcccaacttgaactgaggaacagtcatg..., exception=too long to segment len > 200)
skip (len=993, word=pagagatcaaaaaacaactaattattcgaaacgatgagatttccttctat..., exception=too long to segment len > 200)
skip (len=950, word=pcatccccttgggccatggccagggcgactgggcgcaggcataccagcgc..., exception=too long to segment len > 200)
skip (len=949, word=pcactcaggttcgagtagttgaagtggtgtagctcaaaccatagccgaac..., exception=too long to segment len > 200)
skip (len=2575, word=pgcacatcaccatcaccatcatcaccatgctgcagtacgtagaattcttg..., exception=too long to segment len > 200)
skip (len=939, word=pcagaccggtcttctcgtagtgcccaacttgaactgaggaacagtcatgt..., exception=too long to segment len > 200)
skip (len=1004, word=pgttacgacacttgagaagatcaaaaaacaactaattattcgaaacgatg..., exception=too long to segment len > 200)
skip (len=873, word=pccatggccagggcgactgggcgcaggcataccagcgcgccgttgatatt..., exception=too long to segment len > 200)
skip (len=971, word=pactcaggttcgagtagttgaagtggtgtagctcaaaccatagccgaact..., exception=too long to segment len > 200)
skip (len=2575, word=pgcacatcaccatcaccatcatcaccatgctgcagtacgtagaattcttg..., exception=too long to segment len > 200)
2513 2514 2515 2516 2517 2518 2519 2520 skip (len=948, word=ptancnagtcgcatgctccggccgccatggcggccgcgggaattcgattc..., exception=too long to segment len > 200)
skip (len=952, word=pnanngacncnntatngngaattnnnnnacgtcgcatgnnnnnnnncatg..., exception=too long to segment len > 200)
skip (len=1177, word=pcggttgaaccggttaaatttccctctagaataattttgtttaactttaa..., exception=too long to segment len > 200)
skip (len=1157, word=ptcacgcgaaacagcgtcatgagccgaagtggcgagcccgattttcccat..., exception=too long to segment len > 200)
skip (len=856, word=patgggcagcagccatcatcatcatcatagccacagcagcggcctggtgc..., exception=too long to segment len > 200)
skip (len=941, word=pgatgcgttcccaaaattagtttgttttaaaaaacgtattgaagctatcc..., exception=too long to segment len > 200)
skip (len=943, word=ptcggatctggttcgcgtggatccccggaattcacggattctgaggcgtt..., exception=too long to segment len > 200)
skip (len=718, word=pacggattctgaggcgttatccaaagacgaggaaaagatagtaggaggcg..., exception=too long to segment len > 200)
skip (len=939, word=ptttacgacacttgagaagatcaaaaaacaactaattattcgaaacgatg..., exception=too long to segment len > 200)
skip (len=940, word=pctatgccagcatgctgctaaagaagaaggggtatctctcgagaaaagag..., exception=too long to segment len > 200)
skip (len=1015, word=patgagatttccttcaatttttactgctgttttattcgcagcatcctccg..., exception=too long to segment len > 200)
2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 New Content Lengths Mean: 180772.845037
Wall time: 2h 31s
###Markdown
**New Content Lengths after Scrutinizing**
###Code
plt.figure()
plt.hist(content_lengths, bins=200)
plt.xlabel('Content Length (characters count)')
plt.ylabel('Document Count')
plt.show()
###Output
_____no_output_____
###Markdown
**Save scrutinized contents**
###Code
%%time
scrutinized_path = './corpus/scrutinized-docs'
for content, label, fn in zip(dataset_contents, dataset_labels, dataset_filenames):
str_label = doc_labels[label]
folder_path = join(scrutinized_path, str_label)
if not exists(folder_path):
os.makedirs(folder_path)
file_path = join(folder_path, fn)
with open(file_path, 'w') as f:
f.write(content.encode('utf8'))
del dataset_contents
###Output
Wall time: 1min 21s
###Markdown
Segment each document and save them**Create new folders if necessary**
###Code
segmented_path = u'./corpus/segmented-docs'
for label in doc_labels:
folder_path = join(segmented_path, label)
if not exists(folder_path):
os.makedirs(folder_path)
print 'New folder', folder_path
###Output
New folder ./corpus/segmented-docs\บริหารธุรกิจ
New folder ./corpus/segmented-docs\ประมง
New folder ./corpus/segmented-docs\มนุษยศาสตร์
New folder ./corpus/segmented-docs\วนศาสตร์
New folder ./corpus/segmented-docs\วิทยาการจัดการ
New folder ./corpus/segmented-docs\วิทยาศาสตร์
New folder ./corpus/segmented-docs\วิทยาศาสตร์การกีฬา
New folder ./corpus/segmented-docs\วิศวกรรมศาสตร์
New folder ./corpus/segmented-docs\ศิลปศาสตร์และวิทยาศาสตร์
New folder ./corpus/segmented-docs\ศึกษาศาสตร์
New folder ./corpus/segmented-docs\ศึกษาศาสตร์และพัฒนศาสตร์
New folder ./corpus/segmented-docs\สถาปัตยกรรมศาสตร์
New folder ./corpus/segmented-docs\สังคมศาสตร์
New folder ./corpus/segmented-docs\สัตวแพทยศาสตร์
New folder ./corpus/segmented-docs\สิ่งแวดล้อม
New folder ./corpus/segmented-docs\อุตสาหกรรมเกษตร
New folder ./corpus/segmented-docs\เกษตร
New folder ./corpus/segmented-docs\เศรษฐศาสตร์
New folder ./corpus/segmented-docs\โครงการจัดตั้งวิทยาเขตสุพรรณบุรี
New folder ./corpus/segmented-docs\โครงการสหวิทยาการระดับบัณฑิตศึกษา
###Markdown
** Create temporary paths file then call Java LongLexTo on that file to segment all documents**
###Code
%%time
try:
os.chdir('LongLexTo')
except:
pass
print os.getcwdu()
tmp_paths = u'tmp_paths.txt'
tmp_output = u'tmp_output.txt'
with open(tmp_paths, 'w') as f:
contents = []
for label, fn in zip(dataset_labels, dataset_filenames):
str_label = doc_labels[label]
ifp = join('..', scrutinized_path, str_label, fn) # input file path
ofp = join('..', segmented_path, str_label, fn) # output file path
if not isfile(ifp):
print 'Error:', ifp, ofp
raise AssertionError('input file path is invalid')
content = ifp + u'\n' + ofp + u'\n'
contents.append(content)
content = u'q\n'
contents.append(content)
f.write(''.join(contents).encode('utf8'))
print 'Running...'
return_code = call(u'java LongLexTo -Dfile.encoding=UTF-8 < %s > %s' % (tmp_paths, tmp_output), shell=True)
print 'return code:', return_code
print 'Please see %s and %s for more info' % (tmp_paths, tmp_output)
if return_code:
print 'You may need to call the Java commmand yourself because I failed'
print 'The paths creation process was successful but the segmentation went wrong'
print 'Go into the folder LongLexTo, open a shell then type the following command'
print 'java -Dfile.encoding=UTF-8 LongLexTo < tmp_paths.txt'
print 'Wait a minute and go check at the segmented-docs folder to see if the segmentation went right.'
print 'The file will be encoded in UTF-8 and there will be no punctuations in each file.'
os.chdir('..')
###Output
D:\off99555\Documents\GitHub\Thai-thesis-classification\LongLexTo
Running...
return code: 1
Please see tmp_paths.txt and tmp_output.txt for more info
You may need to call the Java commmand yourself because I failed
The paths creation process was successful but the segmentation went wrong
Go into the folder LongLexTo, open a shell then type the following command
java -Dfile.encoding=UTF-8 LongLexTo < tmp_paths.txt
Wait a minute and go check at the segmented-docs folder to see if the segmentation went right.
The file will be encoded in UTF-8 and there will be no punctuations in each file.
Wall time: 1.94 s
|
05/CS480_Assignment_5.ipynb | ###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Collecting pydicom
Downloading pydicom-2.3.0-py3-none-any.whl (2.0 MB)
[K |████████████████████████████████| 2.0 MB 23.3 MB/s
[?25hInstalling collected packages: pydicom
Successfully installed pydicom-2.3.0
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# TODO: Without loading the data, how many slices are there?
###Output
_____no_output_____
###Markdown
There are 220 slices.
###Code
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABf8AAAOqCAYAAAAi9C9RAAAgAElEQVR4nOzdfZTV9X0n8Pc88PyUEQcfEImDsbHRtDpj4p6YgKl2E2ySJitsmnPabFMXkj1paluzsietCTZNh9RuNicbd8Xs5iTZ3baMbQ1GtgayIEmV6kAe1DwUQR4UUIRRBgaG4c7dP/AOw5PMhRlmuPN6nTNnmPv73Xs/c1XO8f39fj+fqvXPbigGAAAAAACoGFVJhP8AAAAAAFBBqge7AAAAAAAAoH8J/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMII/wEAAAAAoMLUJPnc2XzDCRMmZNq0aamvr8/kyZOP+hozZkw6OjrS3d19NksCAAAAAICyVVVVpaGhIf/pP/2ntLW1Zdu2bSe9d8KECfmP//E/ZtKkSdm0aVMKhcLA1pakOKDv0MvIkSPzW7/1W7nttttSW1t71LXq6ups3rw5d999d55++umzVRIAAAAAAJTtDW94Q2bOnJnf+q3fytixY5Mk3/jGN/LQQw/l4MGDR937S7/0S7njjjtywQUXpLu7O6tXr87SpUvz7LPPDlh9ZzX8HzFiRB544IG88MILefDBB1NTU3PU9S984Qv5kz/5kzz88MNnqyQAAAAAACjLddddl9/8zd/M1VdfnSQpFoupqqpKoVDIP/7jP+Zb3/pWOjo6kiQzZ87Mv/t3/y6TJ0/uuS9JXnrppSxfvjxLly7N/v37+73G2lPfckRVVVXGjRt3Wm9ULBZTW1ub7u7utLa25rvf/e5x93zyk5/M2LFjM2bMmFRXV/d8COU4dOhQDhw4cFo1AgAAAADAyYwcOTK/+7u/m3e+852ZOHFiisXDe+urqqpSLBZTU1OTW265JW984xvzV3/1V3n3u9+dD37wgxk3btxRwX+xWMyUKVPy4Q9/OI2NjVm8eHG/nwLo887/cePG5QMf+EBuv/32VFVV9TmYLxaLPR9Ad3d3rrzyyvzhH/5h/uf//J/H3bt8+fJcfvnl2bFjR8+pgOrq8mYS79q1K3/8x3+sdRAAAAAAAP1m8uTJ+fznP5+pU6ceFfqXlB5Lkn379uUrX/lKbr755jQ1NZ30/tLP3d3d+a//9b9mxYoV/VZvn3f+19fX54/+6I/y3HPPpaWlpc+hfHV1dQqFQg52duZQoZC77747I0eOPOG9NTU1eeihh7J8+fLU1tZm7JgxKZQ5/Pe//Jf/kiuuuEL4DwAAAABAv2lra8vKlStz6623ZvTo0T2BfinEr6qqyoEDB/LP//zPWbx4cdrb2/P444/nPe95T2699dZMmTLlqPtLisVinn766fzwhz/s13r7HP53d3f3DCJYsmRJWW/yxS9+MdOmTcuIESPylre85bhe/yXjxo3Le9/73px33nkZM3p0/vZv/zZLWlrKeq9Pf/rTAz4lGQAAAACA4aW7uztLlizJjh078ju/8zuZMmXKUUH+z372syxdujT/9E//dNTz/vEf/zE/+clPcuutt+Yd73hHxowZ03OtUCjkkUceyTe/+c2eGQH9paye/8Vi8ajC+mr16tV56KGH8jd/8zf5vd/7vfzzP//zCe9buHBh6uvr8+d//uc5dOhQPvn7v1/2e9XWlvUrAQAAAABAn61evTovvPBCPvaxj+Xqq6/Onj178sADD+TRRx9NW1vbCZ+zbdu2fOUrX8kPfvCDzJkzJ7/8y7+cQqGQ++67LytWrBiQDe1nJSn/zne+k3//7/99/uiP/igf//jH8+qrr57wvmXLluU3fuM30tXVlVtuuSXbt28/G+UBAAAAAECfbdiwIV/4whfy/ve/Pz/4wQ+ydevWUz6nWCxm3bp1efrpp/ORj3wkK1euzObNmwesxvKm6Z6G2traXHPNNZk4cWLq6+vz4IMPnrTtz1VXXZWHHnooDz74YH76058OdGkAAAAAAHBa3v72t2fu3Lm59dZbM3HixD49Z8SIEbnhhhsye/bs3HbbbRk7duyA1Tdg4f/48ePz8Y9/PMuWLUtzc3POO++8fOhDH8qMGTPypS996bj7J06cmCVLluTv//7vM3Xq1KxYsSLXX3/9QJUHAAAAAABlGzduXG677bbcfvvtqampyaxZs/KVr3wl73znO08a5ldVVaWhoSF33HFHbr/99owaNSq/8iu/knvuuSeXX375gNRZk+Rzfblx0qRJmTt3bn72s58dN7DgWL/6q7+ar3/967n44ovz9a9/PX/zN3+TjRs3pqurK//3//7ffP7P/izPv/BCnnrqqZ7nfPOb38yY0aPze7/3e3n44Yezb9++fOpTn0p3d/dR953KJz7xiaxcuTK/+MUv+vwcAAAAAAA4lalTp+YTn/hE3v3ud6dYLPY8PmbMmLzjHe/ItGnTsnv37uzcubPnWl1dXT74wQ9m/vz5mTFjxlGvN3HixFxzzTVpb2/Ppk2b+rXWfu/5f+211+Z//I//kXvvvTdr1qzJZZddlqlTp+bgwYN58cUX09DQkL//h3/I4sWL86Mf/SjPPPNMbr/99nzoQx/Kvffem3nz5uWJJ57IQw89lEceeSRf+MIX0tXVlZaWlv4uFQAAAAAA+qSuri5/8id/kqlTpyY5vJu/pFgspqqqKm9/+9vzy7/8y1m1alX+9//+37nmmmvyb//tv8306dNTVVXVc1/pucViMRdccEH+4A/+ICNHjsx3v/vdfqu3X9v+XHTRRfnqV7+ar371q/n+97+fiy++OFVVVWlqasqXvvSlfPCDH8xv//Zv5w1veEO2bduW+++/P7feemsWLlyYB/7u73LhhRemsbExDQ0NuemmmzJq1Kjcc889+c3f/M1ceeWV/VkqAAAAAAD02SuvvJKvf/3reeWVV5LkqJ3/pWC/WCxmwoQJed/73pfGxsb8m3/zb/LGN77xqPtKSgsBxWIxy5Yty6OPPtqv9fZb+F9dXZ177rknS5cuzZo1a3LJJZekrq4uH/7wh3PjjTdmw4YNuf7669PV1ZWRI0fmwQcfTH19fVpaWvLYY49l+7Zt2blzZ5599tm87W1vy6//+q/n93//91NVVZXly5fnk5/8ZH+Vekp1dXVZsmRJisVi6urqkiTz5s1LsVjM8uXLz1odAAAAAAAMDcViMU888UT+4A/+IGvXrk1XV1fP47139L/66qv58z//87z5zW/OX//1X+eHP/zhUa9R+kqSl156Kc3Nzbn//vvT2dnZr/X2W9uf973vfZk8eXJaWlpy4YUX5r3vfW+qqqpSKBQyatSoHDx4MFOnTs3u3buzb9++XH755Vm3bl1e3LEjL774YqZMmZKtW7ems7MzL774YsaNG5cJEyZk0qRJ+X//7//lXe96V97xjnecct5Af2hoaMicOXOOeqy0CNDW1jbg7w8AAAAAwNDU1taWhQsX5l//63+d97///Zk2bVrPtY0bN2bp0qWZPXt2rrnmmtx44435X//rf2Xnzp25+eabe3b679+/P9///vfzf/7P/xmwzLlfdv5XV1fnox/9aP72b/82r7zySsaOHZuZM2dmwoQJ+fnPf55Nmzbl4osvzvr16zNixIjMmDEjv/Irv5Jrr702o0aPzmWXXZZLLrkkb3rTm3LFFVfk4osvzqhRo9LR0ZEbb7wxr7zySu6///787u/+7hnV2dDQkPvuu++o1ZXW1tbMmzfvlM9dtGhRqqqqMnfu3DOq4XTV1dUdVfexX62trWlubk5jY+Og1AcAAAAAMJw88sgjaW5uzsMPP5wkWbNmTZYuXZrf+I3fyDXXXJMkGT9+fD760Y+mvb09999/fwqFQv7lX/4lX/7yl/PVr351QDeb90v4/6Y3vSnV1dVZvnx5zw75TZs25c1vfnM+/OEPZ8qUKRk9enSampry1re+NZdddlkmTJiQmurqjB49OtOnT8+MGTPypje9Kcnh2QHFYjEvv/xy6urqUl9fnzVr1qSmpqbnnnI1NDSktbU1DQ0NmTFjRs8RjJaWltx3331pbm7uj4/irFiwYEFP/aUFiY0bN+bOO+9Ma2tr7rvvvsEuEQAAAACg4l1wwQUpFAr5wz/8w7S2tuZjH/tYLr/88p7rxWIxY8aMyQc/+MFcc801+eM//uM8+OCDufLKK1Nb22+NeU6oX179xhtvzFNPPZV9+/blAx/4QK666qrs3r07EydOTEdHRyZOnJiLLrooI0aMSFdXV3784x/n8ccfz+rVq/Pyyy/n/PPPz4UXXpiFCxfmxhtvTEtLS4rFYkaPHp3u7u689a1vzYYNG/K9730vv/Zrv5b169eXXWNzc3Pq6uoyf/78bNy4sefxRYsW9VxvaWnJ2rVr++MjOataWlrS0tKSefPm5b777su8efNSV1c3aKcUAAAAAACGg0KhkFtuuSXXXHNNpk2bdlT//yRHfb/uuuvS0NCQ8ePH57HHHjtqYPBA6Jed/zfccEMef/zxvPvd78773ve+1NfXZ/Lkydm3b18mTJiQ6dOnZ9KkSamvr+/Z0X/bbbflS1/6Uq699tq89NJL2bt3b4rFYp566qkcOHAgb3jDGzJixIi0tbVl586dSZJVq1bl/PPPP60aS+1wSicTelu0aFEWL16choaGkz5/zpw5Wb58eZYsWXLC116+fPlRbXjuu+++E75e6XVK923YsCF33nnnaf1Ox1q8eHHPYsacOXOOet3W1tYUi8XceeedmTNnTs/Pxw4wbmxs7Bl2XPpasmTJce2EerchamxsTENDw1G/1/Lly7UgAgAAAAAq2g9/+MPcfvvteeWVV7J///6esL8U7PdeDCgWizl48GC+8Y1v5Etf+lIKhcKA1nbG4f+oUaMyceLE7N69O9XV1Zk4cWIOHjyYAwcO5KKLLsqECROyf//+dHV1paurK4888kh+9Vd/NfX19bnqqqvymc98Jp/5zGdyxx135Nlnn8369eszadKktLW1pbq6Os8880yefPLJVFdXZ/r06dm7d+9p1Vna0X+yUH7+/PlpaWk56fMbGxtz0003Hff4nXfemeXLl2fFihU9bXhWrFiRefPmHdUGqXTvkiVL0tLSkqqqqsyYMSNtbW1pbm4+LoQ/XYsWLerpE3WiWQalcL8UzPf+LEptg5L0/C7z589PY2NjWltbT7pI0djYmHnz5vUMrFi8eHFuuummLF++/HUXVAAAAAAAznWbN2/OZz7zmSxevDg///nPe7LV3tra2vLwww/nc5/7XL7zne+clbrOOPwfMWJEuru7s2/fvhQKhdTU1OSyyy7LBRdckBEjRqSqqirPPvtsli1blk9/+tNZt25dxo4dm9ra2hw6dCgNDQ254YYb0t7eni1btmTcuHFpa2tLZ2dnXnjhhRSLxRQKhUyaNCkTJkzIK6+8clp1LliwIG1tbWlsbMyGDRv6pcf/nDlz0tzcnEWLFvXsuE8O78BPDgfrpQC+sbGx597S9Y0bN+bmm29OW1tbbrrppp5wva6uLhs2bMiGDRtOeFLh9bS1tfUsdDQ0NBwXvs+ZMydz587t+RdwxowZSQ4vFDQ3N2ft2rVHtQtavHhx5s+fn+Rwa6QTLSjU1dVlwYIFPT/Pnz8/a9euTV1dnfkDAAAAAMCw8L3vfS9f/OIX861vfSsHDhxIcniT9WOPPZZFixbl/vvvz/bt289aPWcc/k+cODETJ05Me3t7Zs2alQMHDqSrqyuFQiE7duzI1q1bs2bNmqxduzbr1q3Le97znlx66aVJkldffTXr1q3LsmXLsn79+nR2dmb37t3Ztm1bnnrqqfziF7/Iq6++mn/1r/5VLrroomzatCkvvfRS3v3ud5dd58aNG9PU1JQVK1YkObzLfffu3WfUcqe5uTltbW1HBf9JsmLFimzcuDEbN27sOU1Qep9jTxf0DutPFKyfjt5zC44N/xctWnTCEw6l+koLE72tWLHiqM/tRNePVXqPm266ye5/AAAAAGBYePnll/PAAw/kU5/6VNasWZMvf/nL+au/+qv89Kc/HfAe/8c644G/pV38e/fuTW1tberr6/NP//RPufDCC/NLv/RLefTRR/PWt741b3vb27Jq1apccsklPc/dtWtX/vIv/zJXXHFFJk2alIULF6ampibTp0/PZZddlkmTJqW6ujpTp07N6NGjM2XKlDz66KO55JJL8s53vjPf//73y6q1tNN+zpw5mTdvXm666aae3exz584ta9hvaVd9qcVOb21tbT076ktKLYNKbXVO9pp1dXUnfH45Xu+0wInq7X1C4GSfwYoVK3qC/JP93r31Hqrc0NBw1M8AAAAAAJWqWCxmx44d+cIXvjCodZzxzv+2traMHDkydXV12bZtWzo6OnLppZfm4osvzmOPPZbVq1fnyiuvzPjx4/O+970vHR0dOXjwYJLkiiuuyJVXXpk3vOENaWtry549e9LW1pb9+/dn3LhxueKKK/KWt7wle/fuzaRJk9LZ2ZkbbrghP/3pT1NbW5vp06enpqam7JpbWlpy88039wT+DQ0NaW1tPWFP/5MpZ5htXV1dTyA/Y8aMnpY7J/o6VajeF7132vcldO/9u5xs4aB3XX1pRSTsBwAAAAAYPGcc/u/atSv79u3L1VdfnXXr1mXnzp3p6OhITU1NtmzZkl/7tV/L1q1bM2rUqDz33HP58pe/nI6Ojp7nX3jhhfnud7+bz3/+80mScePGZc+ePXn11VczadKk1NXV5e1vf3vq6+uzY8eOPP3003n++eezfv36XHzxxRk5cuRp197S0pKmpqae3e7ltAAqheF1dXWZM2fOCe8pDdft7fUWGObMmXPGLXLq6up6wvy1a9f2KYQ/dpf+iZR+395tik5Vx4leHwAAAACAgXfG4X+SPPHEE/nIRz6SZcuW5fvf/366u7uzZs2aPPXUU7n00ktz5ZVXZs+ePUmSpqam7Nu3L0ny3HPP5amnnsqFF16Yt73tbbnlllty/fXX573vfW+uvPLKHDx4MDt37kyxWMy+ffuyZ8+etLa2ZseOHbnmmmtSKBTS2dl5yvpKg2dPFu6XhtWWE7yvWLGiJxA/2es2NzenpaWlT339GxoaemYInIl58+b1BO/HziI4md6LBCdbnCh9Nifq738ipQWI0uwDAAAAAADOnn4J/5cvX57rr78+V199df7iL/4i9957b3bu3Jk3vvGN6ezsTHt7ezo6OjJ69Oicf/75aW9vz+7du3PXXXdl7dq12bp1az72sY/lzjvvzK233pqmpqZcffXVGTduXA4ePJgf/ehHWbFiRTZs2JDa2tpcddVV2bVrV3784x+nu7v7lPW1tbWlrq7uqGC8t3KD7ZLScNzGxsYsX768J/Cuq6vLkiVLUldX1zP4thTEH3tvcnjxoLW1NYsWLTqj8H/OnDlpbm7ueb8TDfY91e8yZ86cEy4AlBYtTrSgcKIWSKXTECcaIAwAAAAAwMDql/C/tbU1e/bsyfz58zN58uQ89thj+eu//utMnTo1v/jFL7Js2bL86Ec/ypYtWzJu3Lh0d3fn2Wefze7du1NVVZXLL788dXV1+d73vpe/+Iu/yJNPPpmlS5fmoYceyne/+9088cQTWb9+fXbu3Jldu3bljW98Y6qqqvq0679k/vz5SQ4vVPTefT9v3rzceeed2bhxY88JgL5asGBBz4LBTTfdlNbW1hSLxezevTuNjY2ZO3duz70tLS09wXnve4vFYpqbm7No0aKeoLyuri4bNmzIhg0b+tRfv9ReqNRiaMGCBWX/LosWLeqpb8mSJT2fUV1dXZYvX56GhoaTDkUuDU0uKS1u9P6dAQAAAAA4e/ol/N+zZ0+WLl2auXPn5lOf+lQ++tGPpr6+Po888kgef/zxrFixIsuWLUuhUEhXV1dGjhyZpUuXpqamJjfccEP279+fTZs25cEHH8xVV12V+vr6PP/883nuueeyZ8+ePP/88zl06FAOHTqU8ePHp62tLRMmTMiECRP6XGNbW1tmzJiRFStWpLm5+ajgvaWlJTNmzDitXfc333xzFixYcFRrm9IsgWPb3SxYsCBz58496oTBihUrMnfu3LJC8t71F4vFtLa2pqGhIYsWLcp555132oF7qb61a9fmvvvu61nI2LhxY2bMmHHSkwSLFi3KvHnzeuqpq6vreS0AAAAAAM6+qiTFvtx46aWXpqWlJX/3d3+XL37xi8ddv/DCC/ODH/wgnZ2defjhh1NXV5ft27dn586d+elPf5rOzs68/e1vzzPPPJP58+dnxYoV2bVrV8aMGdPTFuiFF17Ihz70oaxfvz6bN2/OyJEjc+GFF2b//v158cUXM2bMmOzYsSPt7e257LLL8vTTT+fll18+qo4f//jHueuuu/Ltb3+7Xz4gTqyuri67d+9OkqOGJgMAAAAAMPhq++uFduzYkTvvvDMPPPBANm3alB/84Ad573vfmyT50Y9+lGeffTZbt27Ntm3bsmXLliTJypUrM3ny5EyZMiU7duzI7Nmzs379+qxfvz6XXHJJRo8enZqamowYMSLTp09PZ2dnJk6cmDVr1mTjxo3p6Ojor/IBAAAAAKBi9Fv4nyT/8A//kG9/+9v59V//9Xzve9/Ltm3b8q53vSt79+7NwYMHc/DgwYwYMSKvvvpqNm3alO7u7tTW1uaCCy7Itm3bsn379mzYsCFTpkzJvn37cujQoRw4cCCFQiGFQiE1NTXp7u5OTU1NJk2alP3791sAAAAAAACAY/Rr+N/d3Z3//J//c97znvfkzW9+c1auXJmurq6sWbMmjz32WIrFYkaOHJn29vY8//zzGT9+fKqrq7N9+/a0t7dn165dqa+vz86dOzNmzJjs27cvVVVVPa+9f//+PPfcc5k4cWIKhUJP2xkAAAAAAOCIfg3/k2T16tX52te+lvr6+nR3d+eZZ57Jzp0785a3vCUdHR3Zvn17Xn311XR1daW7uzsHDx7Mhg0bsmvXrnR0dGTv3r09O/0PHDiQsWPHprq6uufe9vb2nn7z3d3d/V0+fdTW1tazMAMAAAAAwNBSPRAvunTp0rz44ot5+eWXs2fPnuzduzfV1dWpra3NgQMHsmPHjhQKhbzlLW9JoVDI9u3bM378+Bw4cCA7d+5Me3t79u7dmxEjRuTQoUNpa2vL/v37s3v37kybNi179+5NkowcOXIgygcAAAAAgHNav+/8T5ItW7Zk06ZNeemll1JVVZUXX3wxmzdvztatW3PppZfm5ZdfzubNm/Pyyy9n7Nix6e7uTnV1dXbt2tWzm3///v0pFoupqqpKsVjM/v37c+DAgSTJK6+8ktGjR9t5DgAAAAAAJzAg4X9XV1deeeWVtLW1paurK11dXZkwYUKmTp2ampqa7Ny5M1dccUX27duXF198MUmyb9++jBw5MoVCIV1dXamtrU1nZ2dPyF9bW5vRo0enra0txWIxnZ2dOXTo0ECUDwAAAAAA57QBCf+TpL29PZ2dnTl48GCSpLOzM1u3bk1DQ0NPO6ALLrggnZ2dSZJDhw6lq6srhw4dSkdHRyZPnpzk8AmAqqqq7N27t2choXQCAAAAAAAAOF6/hf+lFjzFYjFjxoxJe3t7Dhw4kGKxmNGjR6dQKKS6ujp79uxJoVBIfX19Ojo6UigUkiTd3d0pFovp7u7uOTnQ3d2djo6OnscBAAAAAIBT67fwv1gs9vx5xIgR6ezsTKFQSGdnZ/bu3Zvu7u5UVVXlwIED2bdvXwqFQmpra1NdXZ3u7u4UCoXs2rWr53VKJwIAAAAAAIDy9Fv4Xwrxq6urM2rUqLS3t6dQKGTUqFHp6upKZ2dnT9uempqa7Nu377jX6L2AAAAAAAAAnJ5+3/l/3nnnpaurK7t3786+ffty8ODBdHd3a9sDAAAAAABnSb+H/6+++mrWrVtnFz8AAAAAAAySssL/2traHDhw4HXv6erqOqOCzlShUOgZPgwAAAAAAMNRn8P/qqqq7Nq1Kx/+8Idz8cUXZ+TIkQNZ12mbNm2aYcEAAAAAAAxrVUn61J9n5MiRueSSSzJv3rxMmDBhSPbwr62tzWOPPZbvfOc7aWtrG+xyAAAAAABgUPR55/+hQ4fS1taWlStXZsSIEUOyp391dXWef/75U7YmAgAAAACAStbnnf8AAAAAAMC5oXqwCwAAAAAAAPqX8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACqM8B8AAAAAACpM7T333NPzw09+8pOsWrUqW7ZsGcSSAAAAAACAM1GVpFj64Yorrsj111+fb37zm4NYEgAAAAAAcCaOavvzL//yL3nrW986WLUAAAAAAAD9QM9/AAAAAACoMMJ/AAAAAACoMNVJ8s1v6fEPAAAAAACVovaJJ58Y7BoAAAAAAIB+pO0PAAAAAABUGOE/AAAAAABUGOE/AAAAAABUGOE/AAAAAABUmOq3Xfe2/PznPx/sOgAAAAAAgH5SnSS/89u/M9h1AAAAAAAA/UTbHwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAOKCNiQAACAASURBVAAAqDDCfwAAAAAAqDDCfwAAAAAAqDDCfwAAAAAAqDD9Fv7fe++9Wb16dWbPnt1fLwkAAAAAAJyG2jN58sc//vF89KMfzYgRI9LV1ZVvfOMbWbZsWX/VBgAAAAAAnIayw//rrrsuCxcuzJQpU5IkTzzxRP7Df/gP/V4YAAAAAABwesoK/2fPnp0FCxZkx44d2vsAAAAAAMAQVVbP/2XLluVd73pXkqS1tTWtra259957B6QwAAAAAADg9JzWwN+5c+emqakpd911V6666qq0trbm8ccfz8c//vH+rg8AAAAAACjTaYX/JaWTAE1NTfnhD3+Yj3zkI9oBAQAAAADAICt74O/JGPoLAAAAAABDw2nt/J89e3ZWr17d0/d/9erVdvwDAAAAAMAQUXb4/6d/+qe5++67s2PHjjQ1NaWpqSk7duzI3XffnT/90z8diBoBAAAAAIAylBX+X3fddWlqasrXvva1zJ07t+fxuXPn5mtf+1qamppy3XXX9XuRAAAAAABA35UV/tfX12fEiBHZsmXLcde2bNmSESNGpL6+vt+KAwAAAAAAyldW+L9z584kSWNj43HXSo+V7gEAAAAAAAZHbTk3P/nkk3n88cfzgQ98IDfffHOam5uTJAsWLMjYsWPz7W9/O08++eSAFAoAAAAAAPRNVZJi7wfuueee3HHHHa/7pOuuuy4LFy7MlClTkiQvvfRSPvvZzwr+AQAAAABgCChr53/Jk08+mdmzZ/d3LQAAAAAAQD8oq+c/AAAAAAAw9An/AQAAAACgwpTV9mf27Nk9w31PpKOjI83NzVm2bFm/FAcAAAAAAJSv7J7/e/fuFfADAAAAAMAQpu0PAAAAAABUGOE/AAAAAABUGOE/AAAAAABUmLJ6/i9btkyvfwAAAAAAGOLs/AcAAAAAgApT1s7/JLnuuuuycOHCTJky5bhrHR0daW5udjoAAAAAAAAGUdnh/6c//ekkySc+8Yk8+eST/V4QAAAAAABwZspq+zN79uycd955Wbp0qeAfAAAAAACGqLJ7/h88eDBbtmwZiFoAAAAAAIB+UHb4P3LkyFx66aUDUQsAAAAAANAPygr/ly1blt27d+f9739/rrvuuoGqCQAAAAAAOANlD/z9y7/8yyxcuDD/7b/9t+OudXR0pLm5OcuWLeuX4gAAAAAAgPKVHf4/+eSTmT179kDUAgAAAAAA9IOye/4DAAAAAABDW1k7/2fPnp0FCxZk7NixJ7yu7Q8AAAAAAAy+ssL/ZcuWHRfsn2pBAAAAAAAAOLvK7vl/bNjf1dWVb3zjG/nv//2/93txAAAAAABA+U6r7c/y5cvzZ3/2ZwNVEwAAAAAAcAbKbvuTJAsWLMgHPvCBJPr8AwAAAADAUFNd7hOWLVuWd73rXWlqakpTU1OWL1+eu+++O62trVm9enVmz549EHUCAAAAAAB9dFptfwz3BQAAAACAoavstj/a+wAAAAAAwNBWdtsfAAAAAABgaBP+AwAAAABAhenXnv8dHR1pbm7WGggAAAAAAAZR2Tv/9+7dm7vuuitNTU359re/nY0bN6apqSl33XVX9u7dOxA1AgAAAAAAZdD2BwAAAAAAKozwHwAAAAAAKozwHwAAAAAAKkxVkmLvB+65557ccccdg1QOAAAAAABwpuz8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwAAAACACiP8BwA4DbOSFJOsfO1r1uCWAwAAAEcR/gMAnIbPJln42p9nxSIAAAAAQ4vwHwDgNH0uyY1Jql77nhxeACidCLAQAAAAwGAR/gMA9INVObwAcONrf06cBgAAAGDwCP8BAMr0emF+70UApwEAAAAYLMJ/AIDT8Ggf7iktBGgLBAAAwNkm/AcAKNOsHGnt01fHtgUyJBgAAICBJPwHACjTzDN4rtMAAAAAnA3CfwCA01Duzv+TvcbrDQm2EAAAAMDpEv4DAAyykw0J1hYIAACA0yX8BwAow0AH8doCAQAA0B+E/wAAZXr0LL2PIcEAAACcLuE/AEAZZqV/+v2Xw2kAAAAAyiX8BwAow8xBfn9DggEAAOgL4T8AQJnO9s7/EzEkGAAAgNcj/AcAOMdpCwQAAMCxhP8AAH10LoTohgQDAACQCP8BAMry6GAX0EdOAwAAAAxvwn8AgD6alaHR779chgQDAAAMP0eF/1dccUV+8pOfDFYtAABD2sycm+F/iSHBAAAAw0ftPffc0/PDT37yk6xadS7/Ly0AAH2xKkcWMmYl+WwOLwCUri3Mub3QAQAAMNxV5XDrVwAATmFljuyYr0SlRYDSCQCLAAAAAOcuPf8BAPpgOLTEMSQYAACgctj5DwDQB713ww8npdMApT+XTgMkw++zAAAAOJfY+Q8A0AfDdde7IcEAAADnJuE/AEAfzBzsAoYAbYEAAADOHcJ/AIA+0ubmiN4nAlblcPDvNAAAAMDQIfwHAOC0OQ0AAAAwNAn/AQBOQYDdN8eeBkiOPg3gcwQAADh7qnJ4YxYAACdRCq21/SnfrCSfzdGf4cL4LAEAAAaanf8AAKdgx/rp0xYIAABgcAj/AQBOYeZgF1AhDAkGAAA4e4T/AAB9oE1N/3EaAAAAYOAJ/wEAGDS9TwMsfO0xQ4IBAADOnPAfAOB1CJ/PjlVJPpcjCwHJkUUApwEAAADKJ/wHADiFhae+hX6kLRAAAMCZE/4DALwOQfPgMiQYAADg9Aj/AQBex8zBLoAkTgMAAACUqyqH/58JAIATWJkjYTNDS2kg8MzXvq/KkRZNqwarKAAAgCHCzn8AAM5JhgQDAACcXPWMGTNS+rrgggsyevTowa4JAGBIEB6fO7QFAgAAONpRbX/GjBmTiRMn5sUXXxzEkgAAhoZSW5nPDXYhnJZZST6bI8F/qS2QlkAAAMBwcFTbn/3792f8+PGDVQsAwJBS6iPPuclpAAAAYDjT8x8A4CRmRvhfKUoLATfmyFDg0myAz8VCAAAAUHmE/wAADBsnGhL82RgSDAAAVJ7qJLnyyisHuw4AADirtAUCAAAqWfW1jddmzNgxg10HAMCQIvgdXnq3BVqVw//8nQYAAADOZdr+AACcxMJT30KFcRoAAACoFMJ/AIATEPJiSDAAAHAuE/4DAJzAzBwOf8GQYAAA4Fwk/AcAgD7SFggAADhXVK9buy77O/YPdh0AAHBOMSQYAAAYyqqT5Gc/+9lg1wEAMGQIbimH0wAAAMBQVDvYBQAADEULT30LHGdVjpwCmJXDsyNWvvbYo72uAwAADDQ9/wEAjmGnNmfKkGAAAGCwCf8BAI4xM3Zn03+0BQIAAAaD8B8AAM4SQ4IBAICzRfgPAABnmdMAAADAQDPwFwCgF6ErZ5shwQAAwECw8x8A4BgLB7sAhiVDggEAgP4k/AcA6EXAylCgLRAAAHCmhP8AAL3MjBYrDC2GBAMAAKdD+A8AAOcApwEAAIByGPgLAADnGEOCAQCAU7HzHwDgNbNyODiFc4UhwQAAwMkI/wEAerFb+iSunXf466LGI18MKdoCAQAAvWn7AwDwmlkR/p/QbWtf//r2Xte3tx7z8ymey4Do3RbosznSHmhVkoXx7zkAAAwHZYX/kydPzrRp01JTU3PC64VCIVu3bs2uXbv6pTgAgLNpZg63UKGX0g7/h+cdCfJ77/q/qDG5qOnIz9fOP/HrHLtAcNxjFgkGQu/e/6WFgJW9rlkIAACAylVW+L9r167jgv1TLQgAAJwr9Pvvo+NC+8XH33PsAkFyZJHgoqaTtw2ySDBgDAkGAIDhpSqH24D2mDFjRjZs2HDSJxwb9heLxezYsSPbtm0b0EIBAAbayhzplc5rrp13eDf/1waox//rLhK8zntqNdQvercFSpwGAACASnJabX/a2tqyefPmgaoJAOCsmxU7/0/ooqaBDdNPGNr34SSBVkP9QlsgAACoXGe881+ffwCgEvTe+Uwvt609HJI/PG+wKzm11ztFcOz13iwSHMVpAAAAqAxlh//Hmj59es4///wkFgIAgHPX56Ln+QndtvboYb+VQKuhPrMQAAAA567TavtjuC8AUGlm5vACAL28XhB+LtNqqM8MCQYAgHPXGe/8BwCoBJ+L8P84Az3stxIMw1ZDTgMAAMC5oayd/wAAlWrmYBcwFA30sN9KcLqnCJIjiwQnO0Vw3OsPjVZDhgQDAMC5QfgPAAx7s059C5yZCm011LstUOk0wKxYBAAAgKGgrLY/p+r5b+AvAHAu6t2+hF4qcdhvJRjirYa0BQIAgKGh7J3/3d3dPQH/9OnTM378+DzzzDOZPHlypk6dOhA1AgAMqNJOZXqp1GG/lWCItxoyJBgAAIYGbX8AgGFvZgz7PU4pLLbr/9w1yK2GTjQboPTlNAAAAAw84T8AMOw9OtgFDEWG/Q4fxy0SlHmK4KKmU7YaWpVk1WuLBLO2r+1ZBFj52iKBhQAAAOh/wn8AYNibOdgFwFDXj6cITtT2Z9b2tZn12uLAqpO+HwAAUI6yBv4CAFSaUjuSGwe7kKHGsF8GUs/pgSOnCGZtb83KdYcXFFZd1JiF187Pqt6LCWdpYDEAAFQK4T8AMKzNeu27liO9XNSY3LJY+M9Z13tI8KwcXgR49LXvq5KjTxKcotXQ4T9bJAAAYPjS9gcAGNZmRfB/UkJSzrLjhgSX5gP0zAZYfPx/r6czsDg5fpHAAgEAABWm7J3/EyZMyGWXXZYkee6553Leeefl/PPPT7FYzI4dO7Jt27YBLRgAoD+tjJY/x7nltV7uD88b3DrgNaX2XL1P6pQ9JPgErYaOu3YspwgAADiHlb3z/9JLL01tbW127NiRJJk0aVJefvnlJMn555+f9vb2tLe392+VAAAD5NHBLgA4pdKJgN6LAKVTO31eBCgF9eUMLE6OLBJc1GSRAACAc0pZ4f/kyZNTU1OTzZs3Z9euXZk+fXqSZPfu3Rk5cmQmTZqUkSNHDkihAAADYeZgFzAUXdSYrLtvsKuA4xzXFiiHT++UrpV9GuBYJwzs+7BIoNUQAABD0Bn1/B81alQKhULa29szefLk/qoJAOCsmBU7/49TCjUFkQxxvU8DlIYEr3ztsUdz9EJBvzsutD/BAkHy+q2GTrZI4BQBAAD9pKzw/+DBg0nSE/SPHTs2bW1tmTBhQqZOnZpCoZBdu3b1f5UAAAPEsN+TEDByjjjRaYDSV7+cBjgTWg0BADCIygr/29vb88ILL2TatGmZMGFCDhw4kM2bN2f69Omprq7OCy+8MFB1AgD0u1LPcHq5dr7QkHPWgLcFGghaDQEAMECqkhR7PzBjxoxs2LBhkMoBADh7Via5cbCLGGpueS10fHje4NYB/aT3kOBkCC8C9JfXazXkFAEAwLByRj3/AQDOZfr9n4Bhv1SYc/I0wJkYrFZDFggAAIYc4T8AMGzNHOwChhrDfqlwgzokeCjp71ZD6+7z9wYAwBBUVvg/efLkTJs2LTU1NSe8XigUsnXrVkN/AYAhb1bs/D8pIR4VbkgPCR5KjlskOMECQfJau7CTXAMAYNCUFf7v2rXrqGB/+vTpGT9+fJ555pl+LwwAYKAJ947xekNDoUINu7ZA/U2rMACAIat6sAsAABgMs059y/Bk1z/D2KocHgJ+Y460B1r52pe/M07iFjv+AQCGKuE/ADAszYzdvMe5qPHIEE8YxkqLAFWvfU8OLwAUYyHgONvXWjQEABiiDPwFAIYl/f6PYdgvnJAhwa+j90BgAACGHDv/AYBhaeZgFwCcU1Yl+VyOtAVKjswHGLanAW5ZrN8/AMAQVtbO/8mTJ2fatGmpqak56vHGxsM7PgqFQrZu3XrUUGAAgKFmVuz8P46d/9BnhgT34u8MAIAhq6zwf9euXYJ9AKAiDJtgrq8uahLiwWno3RbosznSHmhYLAL4OwMAYEjT9gcAGHaGZXuOUzHsF87IsBsSfNtaLX8AAIY44T8AMOzMTIXvxi2Xlj/Qr0oLATfm8O7/5MhsgM+lAhcCAAAYkoT/AMCwo98/cDZU9JDg7WstGAIADHHCfwBg2Jk52AUMNXb+w4CrqLZA2oQBAJwThP8AwLAyK3b+H8ewXzirercFKg0LPqdOA9yy2N8ZAADnAOE/ADDs6PcPDAXn7GkALX8AAM4Jwn8AYFgZsmHaYLqoMVl332BXAcPaOTMkuNQmDACAIU/4DwAMKzNj5/9RBHn/n717j5Hrru///5o558zM3r0XO147duw4IQTCzQ53UBygUgH9WpWqFyiIb1VKitrSCxRE1CqOBEitqrYqlSAU9O23FyFaSlEhBdTSOC0CldoJCvckTuw4ju3Ya8fe28ycc2Z+f6zfZz9z9sxe7N2d2/MhjWb3zLl8zjEiO+/P+/N+A22l7ZsEv/XTTBYCAAB0CL/VAwAAANhM1PtPodnvpnnta1+rMAwVhqHK5bJmZ2c1MzOj2dlZVSqVVg8PbeiwFicrD2pxEsA+u1ctmszk/y8AAAA6AsF/AADQU+5o9QDaDc1+11U+n9eNN96oSqWid77znRoYGFCpVFI+n9fIyIjy+byCIFA+n1cul1O9XpckRVGkKIpUrVZVLpf13HPP6eLFi5qamtLnP/95hWHY4jtDq9lEgE0CHLzy2vRJAP7/AgAAoGMQ/AcAAD3joMj8x8Z497vfrSAINDExoVtuuUV9fX0qlUoKgkC+78vzvCTYn8vlkuPc33O5nGq1mqIoUq1WS1YI/NRP/ZSeeeYZnTlzRl/5yld07NixVt0m2kBLVwO856h0/3s36uwAAABYZwT/AQBAT6Hef8rkAYJ5V+Gmm26S7/t697vfrW3btmnLli0qFotJsD+XyymfzyuOY9VqteQlKQn212o11et1+b6vIAjkeV6yzfM8FQoF9ff3q16va2JiQrfeeqte8YpXaGpqSsePH9ef/dmftfIRoA24qwGkxYmAw1qY6HQnCgAAANB7CP4DAICeYSUycAXNftfsbW97m8bHx3X77bdrZGREg4OD6uvrS7L26/W6arWacrmcwjBUpVJJgv6W5e8G/+1nC/aHYai5ublkm3tMsVhUf3+/JiYmtHv3br3oRS/SqVOn9Id/+IeteRhoG4edd1sNYK91XQ1w+ihlfwAAADoIwX8AANAz7pB0qNWDaEcE85Z1/fXXK45jffCDH9R1112nsbExBUEgaTGL30r1uBn+YRgqiiLV63Xl8/nkfPV6Pan173meoihSPp+X7/uqVCqqVCrK5/MKwzCZUMjlcvJ9X4VCQYVCQcPDwxocHNR1112nv/u7v9Px48f1iU98Qs8+++wmPx20mw0rC8RkIQAAQMch+A8AAHoG9f5T9t9F4H8Ze/bs0f79+3XnnXdq27ZtGhoaSpr1Wk1+C+Tn8/nkZZ/FcSxJSRkgaSHbP45jhWEoz/OS/SuViqrVatLYt16vJxMHkpLs/2q1qjiOVSgU5Pu++vv71d/fr9HRUX30ox/V3/7t3+qxxx7T2bNnW/PQ0FbWtUnwWz9NiTAAAIAOQ/AfAAD0jDtaPQB0BM/ztHXrVr3//e/Xzp07NTw8nJTgqdfrSZDezeC3QLzneZqfn1ccx0nGvjX7tSx+y/K3vgD5fD45by6XUxAESfkgq/+fz+eTCQD7LAzD5PzDw8Pq6+vT7/zO7+g///M/9a//+q86depUi58k2sW6rQZgshAAAKCjEPwHAAA94aDI/F+CZr9LvOQlL9Ev/MIvaO/evRoZGVEQBKrVaqpWq0mg37L3LWtfWgj+F4tF5fN5VavVJFgvKSn5U6vVku1WNsj3/Yba/sYtFeR5XsN57Gf3+lYWaOvWrXrLW96ivXv36qMf/aguX768kY8LHeiqmgRPHiDwDwAA0IHyK+8CAADQHWj266B+d4N9+/bpV3/1V/X+979fL37xizUxMSHP81StVlUul5NsfyvnY4F4t3Z/Olvf9/1kHwv85/N5BUGgQqGgIAgaJgh8308mB3zfVxAECoJAnucl2f/2sgkBE8exoihSHMfq7+/Xbbfdpj/5kz/Rli1bNvEpopNYkP/OKy9pcSLgAS1ODkhaKPnz0H2bOj4AAABcOzL/AQCd7ZAWark0S+leLtpLJLin3KOF0hZI6fFs3r6+Pm3btk2/+7u/q507d2pgYCBptuvW7U8H+60Ej7SQvW9le6zhr2Xl23Hu/ukM/1qtlgT+jZX3cScU7HM7t53fnWSw8RaLRd1444360z/9U91333363//93w17huh8K5YFOn1Uh3v8/ysAAAA6UU5S3d2wb98+HTt2rEXDAQBgDQ5qsVbBSlHdg022NysC/+AKn2VhoqGtPaDF7FZoIZNX6umyP6Ojo3rf+96n5z3vedq6dasKhUJDQ95ardZQs98C/5IaSu7Y557nJdtcVtIn/VkURUndf9/3lwT53WC+Nf9N9wnwfV+lUkmFQkGSVC6Xk2M8z1MURTp9+rS+8pWv6Atf+ML6P0R0LbdJsHSVTYIBAADQUmT+AwA6l9UrsBTFe7WwEqDZvuul2UTCcp/d02S7TSQw0QBsKt/39Qd/8Ad63vOep8HBQeXz+YagvwXQrSa/yyYCLNjv9gJwubX+3d8lJed3M/zdBsLuOSzD38r6WGkgKzXkeZ76+vqSa5XLZYVhqLm5OXmep8nJSf3sz/6sJiYm9KlPfWq9HiG63Lo1CQYAAEDLEPwHAHQ2i04c0kJk4g5tfHr3ZgbeN2ui4WomGVbzOdrX5IGerOHt+75+5Vd+Ra9+9au1e/du+b6vKIqSl6QkoC41luhxWdDeLcXj7u82/LWSP/a7m8GfDva717N97F1amDRITxTYygGbULDPoihKmg+PjY3pwIEDuu6663T27NlrfIroNcs1CZaYCAAAAGhXBP8BAN3hkBYiD/dooaDdneqOSEQnTzSs52qG1Xy+jIPLXLYnWbPfHqzh/a53vUsHDx7UxMSEfN9XGIaKoqih5I412JWWBv8t6J8VzLff08fa58bK9dgKAzeIbyWEXO4EQxzHDcdZkH9ubk5RFKlcLiclgjzPUxzHKpfLKhaL2rFjh97xjnfon/7pn/T000+v96NFDzjsvLtlgQ6K1QAAAADtiOA/AKB7uKsAVioDhKW6eKLhsMT/FrL0WPD/93//9/Xyl79cW7Zske/7qlQqqlarDcF7q6fvZvBb2Z5mTXvd7Hx3H+P+7jYBTp/bgv/LccsA2bFW5ieOY1Wr1aQ0kE1i2ATBwMCA3vjGN6q/v18f+9jHruVRApQFAgAA6AA0/AUAdKd0M2AiEL3L7VaJBT3W7LdYLOpjH/uY9uzZo8HBwSRjvlqtqlarqVaryfM8+b7fUIonHcx3M/jd+v32uwXaXemsf9tmWfm24sCdfDDudd1j6/V6Mm473t7dvgBuo+I4juX7vnzfV7lc1r/927/ps5/97Do9YWABTYIBAADaS37lXQAA6EDWDFhamAQg67t3uemp6DkDAwP68Ic/rL1792pwcFBRFCWlftxsewv0W6A9K/Cerr9vqwTslQ78u+exc1mgv1arNQT804F/d//0+azZrwXz3TH4vq8gCJIJAXdsc3Nzmp2dVbFY1Bve8Aa97nWvW5+HDFxh/+nNqfE/wfUr78st+gIAAMD6I/gPAOheFoW4V431CIBeN3lAOn2k1aPYFB/84Af1spe9TH19fUng3wLv6RI/buDf/dykS/ukj19JOpifnlhYrXQDYd/3kxI/6UkBm2ywVQnValVhGGp0dFTvete75PtUAcXGsP8E20ta+M+wvZgIAAAA2HgE/wEA3e+QFiMPdRFxQG/rkWa/QRDoz/7sz7R//34NDAyoVqspDENJagiUuyV0pMa6/iY9AeBm6WeV9ZGUlOFpFux3j81aHbAcd5LCAv8W8LdVDEEQNNT7t31zuVzS52DHjh16+9vfvuKzBK6FLb5yJwGsMh+TAAAAABuL4D8AoDe4qwAoAwR0tYmJCX384x/Xnj17koz/KIpUr9cbSuzUarWk7r6UHfg36SbAq8n2dycG3Ax89/f058uNwT2vK5fLKQiChuB/uuZ/LpdToVBIMv3DMFSxWNRb3vIWvfKVr1zxXoD1QFkgAACAzUXwHwDQW2wVgJUBItKAXtPlmf9btmzRb//2b2vfvn0aGhpSFEVJY1/LkLea+5Zl79bMN/azG/DPCvqvtuSPBfXTpYVcK2X8r3Rum1wIgqBhksNWAeTzeRWLRRUKheS48fFx/eIv/uJVXRe4Fu5qgMNiNQAAAMBGIPgPAOg9NANGL5u8vWsD/5L0gQ98QC960Ys0MjKiOI6TGv/W8gGkJQAAIABJREFUDNct2WOlcCxj3g3Iu82A3eD6ajLz7fjVbHObALvbstj2dH8AK1vkrh5wJzRstUEul1OxWEwmA+I4Vj6f1w033KCDBwm3ojVYDQAAALBxCP4DAHoTzYDRq7q42e+b3vQm3XLLLRoeHlYURapUKkmAOwiChsC+vbsBfqkxw98NoK8mOJ917qzP3OPT57Ugflqz3gL2mZUvsn0s0989xu7TJgPK5bLm5+fV19enn/mZn6H5L1qOJsEAAADri+A/AKC30QwYvaSLS/685S1v0bvf/W6NjIyoXq+rWq0qiqIkuO8G+KXFUjlZmf1uCaB03f60rDJAzZr2uo2FsyYA0tde7jruOdOrB9z3dINi2z+OY1UqFc3OzqpWq+n666/Xa1/72sxrAJuNJsEAAADrg+A/AAA0AwY62s/93M/p53/+57V161ZJSgL/Fty3THcLrFuGvW1z39MZ++kJA+MG8dMBfzeAb/vZvunzZP3u7ps1WZB1vqxJg2aliuycbsPjkZER/fRP/7SCIFhyHaCVKAsEAABw9Qj+AwBgaAaMbteFmf+Dg4N64xvfqO3btyuXyykMQ0VRpHq9ngT+0817LeBtLzc73s3yT08MuNxz2iSDHW8vtwyP7ZvP55dsbxbYd/drVmooqySRO+HgHu/es62IcFc4TE5O6gUveMGyzxtoJZoEAwAArA3BfwAAXDQDRjfrsma/4+PjuvvuuzU5OSnP81StVhXHsSQ1NPE1cRwnTYCr1WpDff3lsvHTAfVmNfvdDPv0ebOOcfdpdpzbiyBtuRUD9rk9g6yyQPaMwjBUGIYaGxtTFEVLzgm0G1YDAAAArA7BfwAA0mgGjG5lmf9d4s1vfrP27dun/v7+JLCfy+WSoLZlxbtZ79VqNSkLJKkh+91l5YGkxlUAWfvY+W0M9pIay/6YdBDeruGWDsqaEFiJXStrtYHv+0vq/9vEgD0b3/f19re/fVXXAtoFTYIBAACaI/gPAEAzNANGN3rovlaPYF289rWv1Zve9CYNDw8npXSs7r0b+JcWA+uW4W4lgdLlgJq9p9l2C/i7gfsoipLgf7r8jtsbIB3Qd5v22ooE23+10n0HjOd5DRMiLneyolaraffu3brppptWfU2gXdAkGAAAYCmC/wAALIdmwOgWXZT1Pzk5qXe84x3aunVr0qDWAtxBEDTU3rdAehRFSakfz/OS/VzplQLNAu/NSvnkcjnFcZyUH0rX23ez+t1AvRvsT/ceSDfrNe7EQrr5sMu9ZroEkDvZYCsiBgcHdfbs2dX8MwBti7JAAAAACwj+AwCwGjQDRqfroma/v/zLv6wdO3aoVCo1NNi1zH/72Q2uW+A/l8upUCgkwX93oiCdke9m47vcQL57vK1AiKIoKaVj53Hf0+dyz5M1hizu/S3XmNgde/p+3RJAURQpiiJ5nqd3vetdTZ890GloEgwAAHoZwX8AAFaLZsDoZF3S7PfWW2/Vy172MvX39zc04nUD2tJigN6y/q1uv9W+l5YGw7PU6/WkJI690qV7arWawjBUuVxWFEUN53V7AdjPWbX53TGna/Y3s1xZouUmDdxJgnw+n0yE1Ot1+b6vF73oRU2vCXQqVgMAAIBeRPAfAIC1yGoGTMQA2BSlUkl33XWXxsfH5XleEixP19ZPB+ylhbJAVvYmHQBfjp3fMuMtq19arPkfhmGy3fM8FQoF+b7fcA63N4CNM93U1/oVuBn5a9VslYG7SsBq/1vgPwiChlUHxWJRY2NjV3V9oBPQJBgAAPQKgv8AAFwNtxkwkQJ0gskDHd/s921ve5t2796tQqGQ2TjXLd9jjXctk96C/27gOyvzPl1LPx2wd4Prdryd1/d9FQqFJYF79zzFYjEpO2QvC8inm/KupeGvO053vO6zsfOmn4VNTuRyOfm+r+Hh4YbnAHQrmgQDAIBuR/AfAICrRTNgdIouaPa7detWHTx4UP39/ZKUWe7HbWhrAX4LrLsBere+vqQlqwfSzX7d1QJuQ2H3Gr7vJ8F7G4dbXsj3fRWLRfX19alUKiWvIAgaAvFZ41lJejWBy5obW3a/O+lhPRCiKJIkjYyMaHh4WAMDA3rzm9+8+n8coAtQFggAAHQjgv8AAFwrmgGj3XVBs9/f+q3f0tatW+V5XmYJHQtyp7PwLfBtwXnbxy23k86Ud5vhNlspYJMH7uRDeiLCjrVrWykgy7B3JyWsP0A6g98dZ9aEQLOmxO443YmO9KSEHV8qlTQ6Oqq+vj75vq+XvvSl1/YPBnQwmgQDAIBuQfAfAID1cFgL6YISqwDQfjq82e+NN96onTt3qlQqLQmA2+9BECTB/nSQ310B4AbxmwXFs8rlpLPqbXu6eW+6TI87cRDHsSqViiqVStIE2CYHLLhvExtptpoha/tKjX/tWm4ZJHe8vu+rVCopjmNdvnxZ1WpVxWKx2T8H0DNYDQAAADodwX8AANYTzYCBdZXP5/W2t71NY2NjSbDdLcvjZv1bwN9K+1hWu5W3SQfPswL+zRrtuqsBbJ9ardZQqqfZcaZWqykMQ4VhqEqlomq1qjiOFUVR0jTYGhW70gH+dFmi5foC2P3bOe2Z2POxVQi1Wk0XLlzQs88+q0qloqGhIfX19TU9L9Brmk0EsCIAAAC0M4L/AACsN5oBo910cLPf2267TS94wQvU19fXEMh2y+Oky/K4zYDTme6SlmTKZ2X8Z/3uahZwzwrcp3+P43jJJEA6mJ8O8Kctdz/p67ljajaRMD8/rwsXLmh+fl61Wk3FYlHz8/NNzwv0MpoEAwCATkHwHwCAjUAzYLSLDm7263meXvva12rLli3K5/MNZWssc90NpFerVVWrVYVh2FA+x23QKy1tkJsV9M/a7mbP23ldzcr1pLljt2a76fJC9rOtWsgK9tvKg/Q43Z4B6X4GWedwx2KrAnzf180337xk7AAWURYIAAC0O4L/AABsJJoBo110YM3/22+/Xfv371exWFxS7scC2xb0r1QqmpubU6VSadgnvSLAlW5+627LyqZfqcRP1vas82Rl9ts4082Is6THnLW6wH13Jyncc0dRlPQFcCcZ7LiRkZHM6wNYKt0kWGI1AAAAaD2C/wAAbDSaAaOV9t/VkYF/aSH4Pzo6Ks/zltSsr9VqDTXybRLAsv6lxXJAFmh3y99Y9r39nNUE2GXb0g1+V2LXdve3SQnP8xr2S088ZPUpWK4UkHvu5Y5JT6LYCgK3rFI+n9fc3Nyq7hHAIncSgNUAAACg1Qj+AwCwWWgGDKya53m67bbbklr/WQ1u6/V6EkC3oLX7Su+bVQbI7RfgZtJnZdM3axxs15cay+nY7+6Eghvcd8fpnme5iYWsz1aq/9/s2aUnG+y+4jhWLpfTq1/96qbjALAymgQDAIBWI/gPAMBmohkwNtvkAen0kVaPYs1+4zd+QyMjI/I8L8nut8C1lcZJl8hxA+xuIN8t6WMs+O1m37sTArZPmgXyswLqdqxdMx3Yt+PdMaeb716t5Y5tNiHg+76CIGhaRujo0c5cMQK0I5oEAwCAViD4DwDAZqMZMDaLNfvtwLI/u3btUn9//5JyPW5DXM/zlpTskdQQwE8fl54AyAqaW3A/69r2efo86WPTx7jSqwKaXT9re7N9s1YjNGPHuM/N87xkIsS2v+pVr1r1OQGsDk2CAQDAZiL4DwBAq9AMGJulA4P/27ZtU6lUygz8S4t16y2IbcFvC2A3a/a7XKDe3d/lrijI6j2Qdb5mmfjpiYPVcvsW2LnT/QRW23BYkqIoSs7jeZ5835fv+8kzqNfr+s53vrPmcQJYPZoEAwCAjUbwHwCAVqIZMDZShzb7/aM/+iNt2bIlM1vfSv245XfcckDp1QBuSR5paTA8a9WA7/tLygFJSwP66Qz/Zr0C0vstF6zP2sfKHqXH4zY0du/RPUfWNayvgBvwD4JAQRA0TAC8/OUvzxwfgPVFk2AAALBRCP4DANAOaAYMSJL6+/s1Pj6uQqEgabE0T1bGvjs5kJW1n872z9rHDaq7EwvuCoLVlOBpFshfLtN/ufJEWffa7B6alfxJPzdXoVBQoVCQ53nJBEO6j8KRI53XKwLodJQFAgAA64ngPwAA7SLdDJhVALhWHdjs94UvfKG2bNmSNM6VGuv8Z5XqaRb8Tte1d2X97tbPt2OzSvuspWxP1tisX4Fl2bvnzeol0KwckVsCyN2/2flsf3eyxO6vUqkoDMPk2Hw+rxe+8IWrvk8A6y9dFogmwQAAYK0I/gMA0E7cZsC2CgC4Gh3a7PclL3mJBgYGknI2brDfgtZucN7d5pbncZvqZjUFdo+zY+I4TrLg3eu6tfbtc1c6u96u16y8T3qsti090ZBuatxsImGlVQh23jiOFYaharXakucbRVES9Pc8T3Ec65FHHmnyrwRgM7EaAAAAXC2C/wAAtCNbBWBpfnyzx9XqsOD/bbfdpr6+voaMeElLMtjTJX9c6Yx3V7OGvG5tfXtPTyakm+2655MaVyE0q7lvTYOjKGoIxGeN1+4tqwdBuo9A+v5sLL7vKwgChWGomZkZVavVhgkOO96uYZMNcRzru9/9buYzBNA6KzUJ5s8FAADgIvgPAEC7ohkwrsX+u1o9gqsyNDQk3/eXNLhNN/nNqvW/3GdZzYPTqwrcCYD0KwzDhpUBWcdLS0vxpAP7Wdn9tt393ZW+p/SxLgvkl0ollUolBUGger2uMAwVRVHDWO2cQRCoVCqpUCgkkwBhGGpoaGi5fyoALdSsSTBlgQAAgIvgPwAA7Y5mwLhaHZb1v2PHDvX390tqrE9vLEDuNqVNB8XTGfHuJIB7LnfCIF3yJ45jRVGkKIqSn21SwMrjuM103dr5y/UgcGWtVsgq/WMTDOkyRFkrBWx7EATyfV+FQkGlUinpLWDbs56F7/tJ4N/3fVWrVU1PT694HwBaj7JAAACgGYL/AAB0ApoBY606sNnv61//epVKpSXbmwXT3aB/uoyPfW7v6fr76dI5VvfeJh08z1OpVNLQ0JD6+/tVLBaT/dweAFnjtGulVx5k1fpPa1a/3y1XlHVt2yeXy8nzvIafLbBvkwBZZZE8z1OhUEiy/wn8A52JJsEAAMDlt3oAAABglQ5feR3SwiqAO7Q4IQC4OrDZ7+7du/Wa17xGQRBIWhqkT5f3aVZT3w1uZ5X5yQp8W+a/JJVKJY2Pj2v37t0aHh5O9jl16pSOHz+uS5cuZfYhSDcUTkuPPT1ud/zpSQl3AiB9bXcVgDXrdVcPuKWM0s2R7Xx2nO/7ySTI2bNnM8cHoDPYnwzSQtDfFg/aZ/c6nwMAgO5F5j8AAJ2GZsDoQrfccktS79/NrLfgdLOgvhsQN8uVxEln49u1fN/XxMSEDhw4oNtuu60h8C9JO3fu1Itf/GINDw8vaTa8XHkhSUvq9Gc1KU6Pf7mJhWYNja3BrztZUK1WNTs7qzAMkyC/fWbnsqx/K6cUx7EeeOCBJecH0JloEgwAQO8i+A8AQCeiGTCW04GZ/zfffLNKpdKSAHq6nr9bWicdBM+qo+9KN9+1n4vFoq6//nodOHBAg4ODTcc4NDSkvXv3qq+vL7OZcNa10tvdsj/NVgnYvdp53JI/7mfu724ZIAvih2HYEPi3ev/GxlAsFlUoFJJzhWGo//mf/2k6NgCdiSbBAAD0HoL/AAB0MpoBI8vk7R0V+Jek0dFRFQqFJEidlSHvlstxy92kA/ruSgF3W1qtVlMQBNq7d6+e97znrWqcO3fu1JYtW5rW92/2sn2z+gGkuSsZLKjvBvrdhsP2iqJIYRgqDMOkzE+1Wk0C/25mv3HL/eRyuaTnwaVLl1b1LAB0LpoEAwDQGwj+AwDQ6WgGjLQOa/aby+V0/vz5pNmsBe/TJWrsZzd7vlnJH3fyoFn2ve/72rVrl3bt2rWm8W7btk1BECTBfAuqp8v/uNdrti1rUsLO0yzwn870t+cQx7Gq1arK5XLyXq/X5XmePM/LfBZWGmh+fl6VSkVhGOrkyZNreh4AOhtNggEA6F4E/wEA6Ab2zd1dBYDe1IElf+r1uvbt25cE/22bvafr+qeb3Lq/u8c2U6vVlM/ntXXrVu3Zs2fN4926dauCIGgYY/p+srL/LdC/Uta/O6nhBvatD4JJ9y4wURSpXC4n95k1IWLXKpfLmp+f18zMTBL8//KXv7zmZwKg87EaAACA7kPwHwCAbkIzYHSo4eFh5fP5pHRNmtsE2H6XFrPil6v9nz5PLpfTwMCAnv/851/VWAuFgoIgaGhKnBWYT5cgshr76bJExs5Rq9UUx7GiKErK+cRxrDiOl9yjG8gPw1Dz8/MKw1C1Wi1ZOeH2F3DHXK1WNTc3l7ysTNC3vvWtq3ouALoHTYIBAOgOBP8BAOg2NAPubR2Y+W91561OfRzHTbPqzXLZ9umyOOlyO4VCQdddd13T8Tz88MP6whe+oK9+9as6depU5j59fX0NQX/3elk1+dNjSfcCyFrpUKvVkgkAy/x379WC+3a8PTe3b0J6jO7zCcNQ1WpVlUpFURTJ8zxdvny56XMB0HtoEgwAQGcj+A8AQLeiGXBv6sBmv7fddlsS/HcD325WfRY3sO4G/qXG7H8LiNs+AwMD2rlzZ+Y5n3nmGT366KOanZ3VzMyMnn766cz9BgcHkzFn1d9PN+Z1P7d3y+R3z1EqlTQ+Pq4dO3Zox44d2rJlS1JiKH2+9HPyPE+lUikJ/LsrDdzJBvdYmzSwCZgnn3xymX8pAL2MskAAAHQev9UDAAAAG+iQFr6t2wTAvWIlANrO85//fAVBIM/zkiB3HMdNm+K6Nf7TQW372Z0UsN/r9bqCINDY2FjTsTz++OOam5uTJIVhqKmpKZ04cUI33HBDw34jIyPyPE9RFDVcx8bqltqR1JCV7wb7bXuhUNCOHTt04403LhnT8ePH9cQTT2hubi4p55OeUAiCQEEQNDwry/y3hsTpvglWXsmaK9dqNd1///1Nnw0AmMNabA58jxZLAR3Wwp8ah5sfCgAANhGZ/wAAdDuaAfeWyQPSQ/e1ehRrsmfPHvm+35C9ny6X41qpJFDWZzZh4Pt+06z/ubk5zc/PKwiChvI41Wp1yb6Dg4PJfm7g37LsLeCennxI1/KXpImJCb3uda/LDPzb89m7d2+y0iB9j57nqVAoNFwzvSIizcZg95jL5TQ9Pa0jR440faYAkMZqAAAA2hvBfwAAegXNgLuf1fvvMEeOHFmSmS41D1y7rHyOBdfTKwLSZYSGhoaanuvs2bOK41h9fX0aHh7W4OCgRkZGNDk5uWTfQqEg3/eTgLukhkC6TQDYpIakpGmvNfAtFou66aab9OIXv3jFZ7R3715t2bKlYXWE2+w4XdLHvXeXbatUKslEhK1IOHny5IrjAIBmaBIMAED7IfgPAEAvoRlwd+vAZr+StHPnziRIna7vnw5iZ2X753K5hqa27rnSZYRGR0ebjmNsbEyjo6MaHR3V1q1btWvXLt16660aHBzM3L9YLDaM0ZryumOw1QZuOaBCoaCdO3fq9a9/vXbt2rXq5zQwMJCUR7JzG3eCwyYZsp5VvV5vyPq3wH8URbr77rtXPRYAaIYmwQAAtA9q/gMA0Ivu1ELg/x5Jd4gCvd2iA5v9StKNN96YWe7H3ZZeAeAGttMZ/ulVBBYUHxgY0LZt25qOY2RkRK9+9asVRVFDRn8z+Xxevu83NM9N9xpwVyDk83mNjo7q1ltvVV9f39oekhbLClnfgLSsbTYO+8ydJLBnVSwWdeHChTWPBwBWYr0BpMX+AA84n/HnBwAAG4vMfwAAepWVAZJYBYCWsiB6un6+NaFNZ9JLSjLt0xn3tq+bAR/HsSQ1zeBPc0v1LMea/bqBf5uosNr/9Xpd1WpVxWJRz3/+87V///6rCvxLUqVSWdL82K4lNfYcSPcbSI/bxpzL5RQEgR555JGrGhMArFa6LJBVIWQ1AAAAG4fgPwAAvYxmwN2lA5v9StLx48cbMtMt8C81BvvT2fUW6DYW8I+iqKGxbhzHyufz2rJly7qN+dy5c6pWq8mYLPDu+74KhYKCIEh6AkxOTuqVr3xlZu8A89hjj+no0aNN6+6Xy2XVarWmgX53UiAd/Hff6/V60nPAsv5nZ2f1+c9/fr0eDQAsiybBAABsHsr+AACAhaz/w1r89u1260Nn6NBmv5K0a9euhnI9bta81JjdbtuzgtvpskHGzt3f378u452bm9NTTz2lSqXSsN3GafcwNDSkm266SUEQND3XkSNH9JOf/ESXL19WLpfTtm3bNDo6umSVgjUjTgf60yWR3JJA9qzs/iWpWq2qWq02BP+/+c1v6ty5c9f8XABgrawskJUEkhb+FLGSQLYPAAC4OmT+AwCABdYM2CYBKAPUmTqw5v93v/vdhsB/OqM/ncnu1r63bPv0tvTx9Xpdzz333DWP9dFHH9WRI0d0+fLlhrFYkN1K6mzfvl233nrrsoH/b3zjG3r44Yd16dKlJKt/bGxsSeA/jmNdvHixYcWDe2/SYmkft+5/ui9CHMean59XuVxWHMcqFAqqVCr6h3/4h2t8KgBwbWgSDADAxiDzHwAANKIZcGfaf1dHBv6lhUa7Vj/fAunGAt0W1E7XtM/lcg316+13N0BuvQCeffZZ1Wo1jY2NaXR0dMk4ZmZmdPHiRc3OzqparTaUD6pWq6pUKgrDUEEQqFAoyPO8ZKxRFCUTAGNjY9q9e/ey92zjLRQKyuVy8jxP119/vQ4eXBri+uEPf6j5+XlJSvogpKWbHKevY70HqtWq6vW6giBQX1+fvvWtb+n06dPLjhUANhNNggEAWD8E/wEAwFJWBsi+cd8rVgJgw5TL5SR4Li3W97cAfrqUj1sOyAL7bqa/53lJzwBrCJzP5zU7O6vZ2Vk9+eSTSeA+3VPAJhDsmtZM2Pb1PE++7yf1/N2x1et1FQoF7dixY8V7zuVyesMb3qDHH39cly5d0s6dO7V9+/Yl+507d06XLl1SLpeT7/sNkyAmXeYnfR1pYXKiUqkojmN5nqe+vj6Vy2Wy/gG0tXRZoINXXkwCAACwOgT/AQBANvvG7a4CuHPZI9BKkwek+9/b6lFclWPHjimOY9XrdXmel0wExHGc7OMG6u3dzYB3G9+6dfHdwLxNFlggPD1pYI2G0/0E3HOma+zbPjYpMDAwIN9f/Z/YN91007KfP/7440nAXlLmhIM7fpuoSD+fMAw1Pz+vfD6vvr4+FYtF/cd//IdOnTq16rECQKuwGgAAgKtDzX8AALC8Q1oM+tdF4d121MHNfiVp7969SdmfdENblxv0bxb4d7P+paUNcbP2t0z+dDkhq+dv53THZpMIdn7f95P6/hcuXFiX5/Lwww9rdnY2Cei7AX9bqWA/u+WQjDvWmZkZ1Wo1DQ0NaXBwUJcuXdJf/MVfrMs4AWAzuf0BrCmw2xuAP1MAAFhE8B8AAKzMvmnTDLi9dWjN//vvvz+pRe8G3O1nSQ0/p0sAucF625Yuf5M+1r2Gvbv7upn/6cmDWq2WrFSwfeyYarWqc+fO6Xvf+56eeuqpq34mP/zhD3XmzJnkOrVaTWEYKgzDhuC/u3rBnRRxn0G5XFalUlEul0v6F/z7v//7VY8NANqBLU6kSTAAAM0R/AcAAKtnaXa23p5v1u2hg5v9SlJ/f7/K5bLCMGzIyLeAfzrj3g12u0HudL1+dwLADeanJxbcz13pwH/WpIJbYieOY8VxrHK5rEuXLun48eP69re/rYcfflhPPPGEKpXKqp7HD37wg6Tcj13Dzm0BfpsUcBv6WuDf7VEQhqFmZmYaVimcOHFCf/M3f7OqsQBAJ7AchZwaJwLq4s8VAEBvI/gPAADWxi0DxCoArIOpqamG8jZpbgA+KxM/3bQ3qx6+m/G/Um+ArOPda7uTEumsezve+gpcvnxZzz77rJ588kkdPXpUP/7xj1WtVjOfw4ULF/Tggw/qxz/+saIoariv9P255Ync/gjumGu1mubm5lQul5XL5VQsFlWtVvXnf/7nTf4lAKDzuWWBrFkwqwEAAL2Khr8AAGDtaAbcXiYPSA/d1+pRXJPHHntMt9xyS0P2umX5u9ntWRn6Jh34lhaD/VmNcd1z1mq1hoB+sz4BdrxdL917wL222yTYAvHValVTU1MqlUoKgkD1el3T09Oanp7W3NycoihK+gfYddzxuz+7Y0+vSqjVaqpWq8mkSrFYVKlU0kc+8hEdO3Zs1f8uANCpaBIMAACZ/wAA4FrQDLj1rNlvB5f9kaRHHnlE5XK5aQ3/dEZ/+pW22kkCtxyQ8TxPQRCoUCgkJYLc4H9W8103KG8TC+6xbk+Ay5cv68yZMzpx4oSeeuopTU1NKQxD+b6v/v5+9fX1NVw7Pe50w+L0PdlKgLm5OXmep1KppL6+Pv3kJz/RD3/4w9X+kwBA16BJMACgVxH8BwAA14ZmwO2hw4P/cRxrdnZWYRhKaix1Iy2uBEg3tG1WoicrUz79mZ3X9rOXBf6DIJDv+02D/81eaekJBnfFgO/7KpVKKhQKKhaLKhaLyXWzGhGnz9vs/ubn55XL5bR161aNjIwoiiL91V/9VbPHDwA9gSbBAIBeQ/AfAACsD5oBt0aHN/s13/ve93Tp0iWFYZhkzrsldSwALy3tAWBBd3dywI5Jl/tJSzf99TxvySoA3/czVxKkVyOkSwa579akN4qipHGv7Wtlftygf9Z50uP2PC9zwqFSqWh+fr6h5NEnP/lJPfXUU0vOAwC9iibBAIBe0PANqK+vTzMzM60aCwAA6HQ0A8ZVyufzevLJJ5PSP26wP5/PN2TCp0vcuL0A3EB5s1UBWdwgehRFqlarqtfr8jwvyci3SYD05IN7Dgvyu3X4wzBUpVJRpVJRGIaK4zi5lpXtcZsIu0H7dGNid19JSaNf+71arWp6elrValVRFCkMQ/3kZxWlAAAgAElEQVTgBz/Qgw8+uKp/BwDoRTQJBgB0q/y+fftkr+HhYV26dKnVYwIAAJ3MvkG7qwCwcSYPSKePtHoU16xWqykIAs3PzzdkxRsLhLuTAi63zI+9uz+7zXMtaz69EsDObRn6URRJUlIGyD1H1vVrtVoycRCGocIwTM5Tq9Xk+74KhYJ830+y/bPG4db0TzcaTjcBdpv+RlGkmZkZVavVpH/A8ePH9ZGPfGSN/xoA0JtYDQAA6Db+sWPHWj0GAADQjQ5p4Vv0PVr41mzpdFg/XdLs1/zLv/yL9u7dq9HRUfm+L2mxnI4b5Ha5Wf/2e5oF5rMmBmwlQToA72bwWxZ9pVJRFEUN5X0s896OsX0tqG8rAIIgULFYTM7rTmQ0q9tvY7J7didFLNjvjmNubk6zs7PyfV8jIyOqVCr66Ec/elX/FgDQ6w5rcRXAQUl3aGEC4LCkB53PAQBoZ9T8BwAAG4dmwFiDU6dO6bnnnktK/6QD4+nAv9S8tI+7Pf2zu0ogHXh3JwUkKQxDzc/Pq1wuK4qihtI+9grDUOVyWZVKJXl3f7Zx23Y3qz9r7O59Zt2XrUywZxTHsWZmZjQ9Pa1cLqe+vj7V63V99rOf1TPPPLPq5w8AWCqrSbAtbGQ1AACg3RH8BwAAG49mwBujyzL/JenIkSOanZ1VrVZbtsyPtJgdnw7eu2V+0isDjFuTP31Odx8r32NZ9+6Y7PNqtapyuZyU3HEbEFtDX2mhPr+tCGh2T+l7zlrZ4PYMqNVqmpub08zMjGq1mvr6+lQsFvXJT35SX/3qV1d83gCA1aMsEACg0xD8BwAAm4NmwOtv8vauCvxL0te//nU99thjmp+fX5Ih71opg9/V7DPbllXyJ4sF5tMvK8Hj1vW3JsH9/f0qlUpJ34AgCBrG1UyzUkZuE+B6va75+flksqRUKmlwcFBf/OIX9bWvfa3puQEA144mwQCATkDwHwAAbB6aAa+vLmn267KSOtPT0wrDsKHJrUk397VseMvmd9nKgGYrAFYrfaxNOLjB/oGBAfX392tgYEClUikJ+luwvtl9pLfZqoF0L4JarZasJqjVaqpUKslqg0KhoKGhIT3++OP63Oc+d9X3CQBYG1YDAADaGcF/AACw+dxVAHXxzfhqdGHJH/PP//zPunTpksIwlKQl5XnchrvGDZjbPummuq5mqwSkpSsBsva31QG5XE5BEKivr08DAwPq6+tToVCQ53lJiZ/VnN+9r/Q9pY91A/9hGKpQKGhwcFBPPfWUPvjBD6pcLje9NwDAxnFXA9x7ZZutBjgk/twBAGw+gv8AAKA13FUAlAGC44knntA3vvENzc3NKY7jpsF7C4y79fstOz6tWWPflbhB/nSpIZtc8DxPxWJRvu839CBY6XrpcTZrAJy+72q1qtnZWVWrVQVBoKGhIZ07d06/93u/l0yYAABahybBAIB2QfAfAAC0ln07phnw2nRx5r8kVatVnTt3TpVKRZIaGvi65W+M/RxFUVICKGsiIGsioVmN/2Y9BVZqFOz2EnCPSWf82zjcSYxmY7HPK5WK5ufnFUWRfN/X4OCgzp07pw984APJswIAtA/KAgEAWongPwAAaD37ZiyxCmC1urDZr8sa/166dCnJ7LfguAX3LSDvTgzYfnEcK47jZJsbsE8H7putAnCb69r17JpZPQCkxomErMz/rGs1KwPkqtVqCsNQYRgm/QUGBwd18eJF/fqv/7qee+65Fc8BAGgtmgQDADYbwX8AANAeaAa8Npb536XiONYXv/hFTU1NqVKpNNT5z+Vy8jwvMxPfgvNxHCuKouRlEwZxHC+5VrPAfPp3C9LbhIBdO2s1gX3W7FzNtrnciYQoijQ/P698Pq/BwUH19/fr4sWL+vCHP5x5TwCA9sVqAADAZiH4DwAA2gvNgFdmgf+H7mvtODbY008/re9+97u6cOGC4jiW7/sKgiBpqOtm4lsQPt0HwCYBwjDMLAHkBvSzSvlklfBxP3fPYefM+tn2Xwu7tzAMNT8/39AA+eLFi3r3u9+t06dPr+mcAID2QpNgAMBGIvgPAADaD82AccWXv/xlnT17VnNzc/I8T/39/erv71ehUEiC7m7JnXQWv1v7f7lyP+nParVaQ4Z/M+7kw2rK96SvYddutoohjmPNz8+rXC4rn88rCAKdPn1aH/rQh9Z0LQBAe6NJMABgIxD8BwAA7YtmwNm6vNmva2pqSv/4j/+oc+fOqVqtKp/PK5/PNwT0s7Lt3ea56RUCxvoD2M+rZZMCbo8Bu6a7UmA57rjT43KbF8/Ozmp2dlb5fF5DQ0O6dOmSfvM3f1MnTpxY9XgBAJ2FskAAgPVC8B8AALQ3mgEv1eXNftMefvhhfetb39Kzzz6r6elplcvlpIZ/OsgvLQbh3Qx6d/tKAf90YD7NnXRY7hxpzUoCpfsHSAuB/3K5rPn5eeVyOQ0MDOjSpUu6++67ae4LAD2EJsEAgGtB8B8AALS/K998D9IMuGd96Utf0okTJ3ThwgVVq9WGRrtuUN+t6+/uY5n4tt9ymf7uSoEs6TI97rt7nbT0+Gzs6VJF1qS4VqupUChoaGhIzz33nD7ykY/oySefXN0DAwB0FVYDAACuhify5wAAQIf4v4el//egFr7h/o2kByUdb+2YWuKOQ9J/HZJmeqfZa7VaVRzHkqSxsTGVSiV5ntcQZE+X+bGgv73cgL4bcE83C04H45vtkz5XunxPmjtRkL6GTUpYxn8URfI8T6VSSRcuXNDdd9+tp5566pqeIQCgOxyX9P+08GfQCUl7tNgceK+zDwAAZP4DAIDO0uvNgK3efw/67//+b33uc5/TiRMnNDMzI0nyPE+e5yX7NCutY4F5C7KHYagoipL3KIoarmX7NWv4m15NkD4urVarJaWK6vW64jhO9o3jOAn6T09Pa25uTvV6Xb7v6+mnn9bHP/5xnTx58iqeGACgm9EkGACwEoL/AACgIyz5AturzYB7qNlvljNnzujuu+/W97//fV28eFH1el2e5yXZ/ZIUx3HycvsCpMsDufu4livrI2lJSSD393q9vqTZr10rDENVKhVVKhVVq1VVq1WVy2XNzs5qZmZG09PTCsNQxWJRfX19euKJJ/T+979fP/rRj9b/QQIAugplgQAAWSj7AwAAOsIeZVT5OX5l40Et/EWT08K33262/66Fcj+PfbnVI2mpU6dOSZImJydVKBSSCQDLqk/X9bcAf1ag3y0LZOzndCDf2LVsX7e/gMsmGubn51UulxWGYTI+C/7bNs/zNDQ0JN/39Z3vfEcf+tCHlqxIAABgJW5ZoD1a+DPp/1x5PyFKAgFAL/H37duX/DIzM6PnnntOlUqlhUMCAABY6qCaxPUPa3Hd+z2S7tBiyhu61uOPP67HH39cnufp5S9/uUZHR1UqleT7flKb30rrWK8AaWkdf2PZ/ula/K70tpX2t2tb4L9WqymfzycrDqx0UL1eVxAE6uvr0/z8vP74j/9YR44cufaHBADoafYnkrTwd5QtlrTP7lX350wAQK/LaWEVmCSpr69Pw8PDOnv2bAuHBAAAsNQDWkVM377ZHryyczd+o33PUen+9/Zs2Z+0fD6vm266Se973/u0Y8cODQ8PJz0ALMju1uG3IL37u+/78jyvoUFwul+A7WvHWdB+uW21Wk3lclmVSmXJigTP8xQEgXK5XNK8+OzZs/rLv/xLHT3Kvy0AYGMcvPK6Q4uJFQ+qcaIAANA9Gsr+RFGk7du36+LFiy0cEgAAwFL/RwtL2Jdl69xzkv6vuq8M0OQB6Xn/30LJn5nTrR5NW6jX65qamtLXv/51bdmyRYODg0mz3HQzYGkxQO+WBUqX60mX8HF/dj+z1QO2zc5nDXzjOE4mEtyVCJ7nqVAoqFAoqK+vT3Ec6+GHH9b73/9+nT7NvysAYOMc18KfRlYWyEoCURYIALqT3+oBAAAArLtDWvhm+4AWUtu6bV07Wf+ZPvOZz+hrX/uafu3Xfk07d+7Uli1bVCqVkmx+NzM/l8s1rAqw7dY7wH52S/uY9EqAWq3WEOB3GwnbigI7X6lUUrFYVBAEiuNYp0+f1l//9V9T5gcAsOkoCwQA3W9Jw9+xsTEy/wEAQFuxJeorZv67urEZ8B330ux3BZcvX9aDDz6oH/zgB3rmmWfk+37StDefzyfB/XSDYJsIyGrYa+82UWCvMAyTOv6SVK1Wk6x/t4mvnXdgYEADAwPyfV+XL1/Wt7/9bX3wgx/UyZMnG/oSAACw2WgSDADdKSepfuutt+pHP/qRJGnfvn06duxYa0cFAADgOHjl/apj99YMuNPT2N766YX3+9/b2nF0EN/39Z73vEf9/f0aHx/Xrl27NDg4qGKxKM/zGoL4buDfGge7Wf42UeBOAhQKBY2MjKhWq+ny5cuKokjSQumfvr4+FYvFpNSPpKRE0aOPPqpvfvObm/9AAABYJbeVktT5f0YBQC/y9x/Y3+oxAAAALMsa0l01KwNk69k7tRnw5AHpoftaPYqOEkWRPvWpT0mSbrrpJt1+++16yUteoomJCY2OjioIgqT5rrRY0sfzvCSTX1JD/X/L9JekIAhULBaTpr62qqC/v1+lUkmSNDs7q7Nnz+rzn/+8vv/97+vEiRObdv8AAFwtKwvkNgl+QDQJBoBOktt/YH9dkh46+pAkMv8BAED7sXj9urBVAPcqVfywzU0eWMj8v/+91PxfB29961s1Pj6ul770pRoeHtbw8LBKpZJ830+y/sMwTEr2WHkfKxdkNf0t+F+tVlWpVOT7flL6Z35+XqdPn9aXvvQlHT5MeAQA0PlYDQAAnYWGvwAAoLd0ejNgAv/r4v7771epVNLf//3fa2JiQr/0S7+kWq2msbEx7dq1S6VSSZ7nyfd9FQqFpD+ArQyQlJT+KZfLSfC/UqnozJkzOn78uO67j1UaAIDuQpNgAOgsBP8BAEDvOayFzkcPXHl1wiqA/XcR+F9n5XJZkvTss8/qE5/4hCRp69atmp+f18zMjN7znvckAf+5uTnNzs7qNa95TUP9/1qtpqmpKZ0/f14nT57Uf/3Xf7XylgAA2DRuWSBbDWClGpkEAID2QPAfAAC0tYMr73L17tRiGaBOXAWAdXfu3Lnk58985jNLPv/yl7+8mcMBAKDtsRoAANpX/qGjD2l+br7V4wAAAGjq3o08+SEtNhR4QBs823ANJg9Ip4+0ehQAAABNHdbCn1V3avHvN1toeUjt+2cWAHSrvCT96Ec/avU4AAAAMm3Kl0T7pnqvFr+dtpPJAwvvlP0BAAAd4LAW8yssx8JWBLRzrgUAdJt8qwcAAACwnDu0iUvF7VuqfTttt2+mBP8BAECHsRyLnBoXW9bVnn9uAUA3IfgPAADgsmbAUvusAth/V6tHAAAAcM3cskDWLJjVAACwcQj+AwAAZLEyQO2yCoCsfwAA0CVYDQAAm4PgPwAAaFst/+LXLs2AafYLAAC6FE2CAWDjEPwHAABt7d6Vd9lYrW4GTLNfAADQA2gSDADrj+A/AABoW231Ja/dmwEDAAB0CcoCAcD6IPgPAADa1h1a+PLXNlrRDJjMfwAA0MOymgTf09IRAUDnIPgPAADa1oOtHkAzm9kMePJ2Av8AAKDnuZMAB0X2PwCsBsF/AADQtu5o9QCWk24GvFGrAGj2CwAAkLhHCxMBbbU6FADaFMF/AADQljoim8ttBmyrANYTJX8AAAASlvF/b6sHAgAdguA/AABoWx3zxc5WARwUXegAAAA2iNX6J+sfAFaH4D8AAGhLHRc/34hmwGT+AwAAJA6KwD8ArAXBfwAA0LY6bgJAWt9mwDT7BQAAkLT4J1XHrAwFgDZA8B8AALSlQ6n3jrJZzYABAAB6BI1+AWDt/LXsPD4+rl27dsnzvMzP4zjWyZMnNTU1tS6DAwAAve2QFsvo36sO+7Jn304PaeHb6h1anBBYrckD0v3vXe+RAQAAdBRr9LvWP6UAoNetKfg/NTXVENi/4YYbVCwW9eijj2p8fFw7d+5c9wECAIDeZjH0B5zfO8ohLd7AWmYxrN4/AABAjyPrHwCuDmV/AABAR7hTCxlfHVlB52qaAdPsFwAAIMn6p9Y/AKwdwX8AANAx3CT6rm8GTLNfAAAA3XPlnax/AFi7awr+nzhxQo8++uh6jQUAAGBFh7UYP+/ICQCaAQMAAKwaWf8AcPXWLfN/ampKjzzyCM1+AQDAhjusLigDlF4FkDZ5QHrovk0dFgAAQDuxRA+y/gHg6lD2BwAAdCwL/GfFzjuCrQI4qMYyQDT7BQAAoNEvAFwjgv8AAKCjHdJCAn3H9gFYrhkwNf8BAECPotEvAFw7fy07j4+Pa9euXfI8L/PzOI518uRJSv8AAIBNZRlhh9TBpYDu1MLA75E0cJd0mMA/AADoXWT9A8C1y0mquxv27dunY8eOtWg4AAAA1+aQpDu02FO34xyU9HvvlX7m0wupbh05kwEAAHD1rCLinSL4DwDXguA/AADoOrZMvGOzxd5zVLr9vdJdRxebAwMAAPQIK+eYW2lHAMCyqPkPAAC6jpUAukcdmDhvzX7vVXYzYAAAgC5HrX8AWB/U/AcAAF3Lyujbq6OcPiqd1kLK2wNXXpQBAgAAXc7+1OnI1ZsA0GbWnPlfq9V0/PhxHT16VOfPn1e5XNbRo0d1/Phx1Wq1jRgjAADAVTukhS+PHZM8v/+uhcC/604tBP7vUQfdCAAAwNrdoQ4u3QgAbYayPwAAoOtZ2fx71MFx80NarP3/gFgBAAAAuo71baLkDwCsD4L/AACgZ1gJ/baOm08ekE4fyf7MZjHcVQAAAABd4h6R9Q8A62lNwf9qtSppofa/JBWLRQVBoPHxcQ0ODjbsAwAA0I7augyQNftNl/1Js1UAByXV1YY3AgAAsDZk/QPA+ltT8H96elrnz5/X4OCgDhxY+HI6NzenPXv2aHx8XOfPn9f09PSGDBQAAGC9HNZi8nxbxs1XCv5LCzeR0+JMRlsvZwAAAFgeWf8AsP5yWsgXS+zbt0/Hjh1r0XAAAAA216HUe0u99dML7/e/d23HHdLiN+Z7xbdmAADQcepa+DOmLf4mA4AuQc1/AADQ09q6DNBq0QwYAAB0sLZKxgCALuKvZefx8XHt2rVLnudlfh7HsU6ePKmpqal1GRwAAMBmsET5e1K/b7rJA9JD913dsbZO3lYB3KHFCQEAAIA2dodYuAgAG2HNmf+1Wk3Hjx/X0aNHdf78eZXLZR09elTHjx9XrVbbiDECAABsuMNa7KHbkqyz1Tb7XYm7CoBmwAAAoM3R6BcANg5lfwAAABwW+H+gpaO4RjaTQTNgAADQ5mj0CwAbh+A/AABAyiEtNpzbtMT59cr8d92phRu5Rx3e1AAAAHQjsv4BYGMR/AcAAMjgls/flMT5ydvXN/BvaAYMAADaFFn/ALCx1tTwd2pqqqGZ74kTJ5p+BgAA0A3u1EK8/AFtcP/ca2n2uxKaAQMAgDZD1j8AbDwy/wEAAFaw4WWANqLkTxaaAQMAgDZhf4awKBEANg7BfwAAgFXY9DJAG4VmwAAAoA1YyR8AwMZZU9mf8fFx7dq1S57nZX4ex7FOnjxJ+R8AANC1rAyQvdbFZmX+u+xGrAzQveIbOAAA2BSW9U/JHwDYWGvK/J+amtJ3v/tdHT16VNPT05qentbRo0d19OhRHT9+XLVabaPGCQAA0DYOaTFxfl0q52xUs9+V0AwYAAC0AI1+AWBzUPYHAADgKlj1nHu0DhMAlvnfCnYj92rhZh5o3VAAAED3o9EvAGwegv8AAADX4E4tfIG96qR5C/w/dN/6DOhq0QwYAABsArL+AWDzXFXwf8eOHRocHFzvsQAAAHSkdS8D1Co0AwYAABuIrH8A2FxrDv7v2LFD27dv1+XLl9Xf368bbrhBkrR9+3ZJUrVaXd8RAgAAdIDDWqycs6YJgFY0+11JugxQR89oAACAdmF/UpD1DwCbY03B//HxcW3btk1TU1N6/PHHdfHiRU1MTOjAgQMKgkCnTp3S9PT0Ro0VAACgrVni/JrKALWq2e9KaAYMAADWmZX8AQBsDn+tBzz77LN65plnJEknTpzQiRMn1n1QAAAAnezQldcDWoyfdyQryHtIC9/W71CH3xAAAGgVy/qn5A8AbJ41Zf5PTU0lgX8AAAA0d0gLX25XrJozeaD1zX5XQjNgAABwjWj0CwCbb82Z/0NDQ9q7d68k6cknn9TY2JgmJiZUr9d15swZJgcAAACucBPnM0sBWb3/TuDezANamNmgFBAAAFgFa/TLAkIA2Fxrbvi7e/du+b6v8+fPS5JGRkZ0/vx5TU1NaWJiQkNDQ+s+SAAAgE5mMfIH0h+0Y7PfldgqAJoBAwCAVSLrHwBaY80Nfz3P04kTJ/TMM89obGxMknThwgXNzMxIkgqFwvqPEgAAoMNZGSBbBSCpfZv9rsQ6G0s0AwYAAMuyrH9q/QPA5ltz5r+rWCwqjmNNT0+v13gAAAC6lts7t+Pj5TYBcK8WVwEAAACk3HPlnax/ANh8awr+V6tVSQsrAMbHx9Xf36+ZmRkNDQ1p586diuNYU1NTGzJQAACAbpHUu+2EZr8roRkwAABYxkER+AeAVllT8H96elqnTp1Sf3+/9uzZozAMdeLECY2NjSmfz+vMmTMbNU4AAICucqiTmv2uxF0FQBkgAABwheUEUPIHAFojp4UcrcS+fft07NixFg0HAACgR0wekN76aekzXTQJIC18y39ACxMC94pUPwAAephVBbxz2b0AABvlmmr+AwAA4Crtv6szm/2uhGbAAABANPoFgHZA8B8AAADri2bAAAD0vHu08CcBiwABoHUI/gMAALTC5AHp9JFWj2Jj0QwYAICeRNY/ALQHgv8AAACbzZr9dmPZnzSaAQMA0HPuufJO1j8AtBbBfwAAgFbpheC/sVUAVgaIVQAAAHStgyLwDwDtgOA/AADAZuvWZr8roRkwAABdz+b3KfkDAK1H8B8AAACbh2bAAAB0NRr9AkD7IPgPAACw2Xqh2e9KaAYMAEDXodEvALQXgv8AAACbqZea/a6EZsAAAHQVsv4BoL0Q/AcAAGgFgv+LaAYMAEDHI+sfANoPwX8AAIDNtP+uVo+gPdEMGACAjnbPlXey/gGgfRD8BwAA2Gxk/WfLagbMKgAAADrCQRH4B4B2Q/AfAABgM9Hsd2VuM2AmAAAAaHv2n2pK/gBAeyH4DwAAsFlo9rt6NAMGAKBj0OgXANoTwX8AAAC0L5oBA2gjuVyu1UMA2g6NfgGgfRH8BwAA2Cxk/l+dw5Is3sYqAACbaGBgoCHgPzQ0pN27d2twcFCS1N/fr4mJieRzz/NUKpU2fZxAK5H1DwDty2/1AAAAAHrG5O0E/q/FnVoI/N8j6Q4tpBgSaQCwjnbv3q0wDHX69GlJ0nXXXafdu3drenpaR48e1ejoqPbs2aNXvOIVmp2d1bFjx7Rt2zZdd911euKJJ3TixAnt3r1bxWJRjz/+uKanpxXHseI4bvGdARvDsv7vXGlHAEBL5CTV3Q379u3TsWPHWjQcAACALvaeo9JD90kPfbrVI+lsB7UwAWDRBiYAAFyFgYEBBUGg5557TtJCVv/NN9+sXbt2aWRkRD/4wQ+Uy+V0880364YbbtDY2Ji+//3vq1qtau/evdq2bZvK5bIuXbqkfD6vrVu3an5+Xk8++aR839fo6KjOnj2rp59+WpcuXdKRI0dUKpW0bds2zc7O6uzZs8rlcqrX6yuMFGhfVpGPglgA0J7I/AcAANgMlPxZP1Zb4JAWog73ilJAANZseHhYu3fv1tjYmB577DFVKhVNTExox44d2r17t1760peqXC5rdnZWk5OTGhkZ0cTEhHK5nKIo0vDwsCqVirZv3y7f91UqlRTHsa6//nrV63UFQaCbb75ZURRpdnZW73znO/Xwww/riSee0MTEhPL5vB599FFdvnxZFy5c0MWLF5kMQMeh1j8AtDeC/wAAAOhMh7QwCfCAKAMEYFn5fF7Dw8NJlr/nedqyZYu2bt2q3bt361WvepVmZ2f13HPPafv27dqzZ4+iKEr2LZVKqtVqCoJAkuT7vnK5nHK5nOI4Vj6fVy6Xk+d56uvrUz6fVz6fV7FYVL1e1/j4uAqFgrZv364oinT27FkVCgXdfvvtunz5ss6fP6+pqSk98sgjKpfLOnPmjMrlcsueF7AaB6+8859eAGhflP0BAADYDPvfK+2/S/rMgVaPpDtZ3QFWAQDIYI16d+zYodOnT+vUqVN6wQteoFtuuUX79u3T6Oio6vW6fN9XoVDQyMiI4jhWrVZTPp9XoVBIavdb/X7L0q/VaqrX68lkgH2Wz+eTn3O5nIIgSH6u1+vK5/Oan59XHMcKw1D1el2XL1/WmTNndO7cOZ06dUoPP/yw6vW6nnnm/2fvzX7kOq9z76eGXUN39TxWN9nNZnOQOEgWSTFWYEt0ZMixFcVjHCDIhYMvcGAgcGxfHeBchEJuzj+QwBECyM7JTYDAAeQpcWJblk48U5E1hJYokuIkiiKb7Hmo8bsorc1Vq9aubpLdZA/PDyhUd9Xe737fd++m7Get9ax37tneERLFj99/p98/IYSsX5j5TwghhBByN2Cz37WFzYAJIU3o6OhAd3c3ent7cfDgQaTTabz33nvo6urC4OAg0uk0yuVyKNJL1n+1WkWpVEKlUglFey34A6j7WRCRXwIAAFAsFhGPx5FIJML3IAiQTCaRTqeRTCbR3t6OgYEBlEolFItFfOITn8CZM2cwMTGB06dP45VXXkEymcS1a9fu3uYR4sBGv4QQsjGg+E8IIYQQQjYHYgP016ilI7IZMCEENYse8esfGBjA4OAg4vE4hoaGkMlk0NHRgUqlgnK5HGbwy8/ykmAAgDqhX37Wx8pnEgCQIIAEFwCEtkA6OMnL+EAAACAASURBVFAul8OAQDqdRiwWQ0dHB/r6+lCpVDA/P4/5+Xm89tpruHr1Kk6dOoW33noLsVgMs7Ozd2MrCQn5a9xswUMIIWT9QvGfEEIIIeRukD8MfPeL93oWmx82AyaEGNrb29HZ2Ynu7m4MDg6is7MztPjJZDKhYC/CvAj/OqNfW/wI9jxt++NVAyQSibpr6OPlfKkIkJdYDsXjcbS0tAAAent7USqVMDMzg2vXruHVV1/FlStXcPr0aVy4cGGNdpGQmzDrnxBCNg4U/wkhhBBC1po8ff7vOmwGTAh5n46ODnR2dqKrqws9PT1Ip9MAEDboFR9/m9FfrVbrfP/lHJvlr/3+daa/DhbEYrG6n/W7JhaL1dkDaYsgaSqcyWSQSCTQ1taGwcFBjI2NYWZmBhMTEzh79ix+9KMf4cyZM2u6p2Rr89fvv/M/q4QQsv6h+E8IIYQQstaI+E/P/7vL8wBiqAUAWAVAyJYkCIJQ+O/q6kJbW1udvY8I9GL7Uy6XQ5FdEBsg+VnQFj9W6BdLn0QiEQYRbA8AGwzQVQSVSgWlUqluHG0VJIGLRCIRVjYMDg5i586dOHz4MN544w0899xzuHbtGpaWlrC0tLSGu0y2GsdQ+08qIYSQ9Q/Ff0IIIYSQtYbNfu8tbAZMyJYhlUqhUCiEv3d2doav7u7uOjFerHl0AEC+A9AQALACv0UfI+9i9ROPx1GpVFAsFkMhXwcOdABAVxJIQEIHArT4r4MA0tsgl8thYGAAR48exRtvvIFXX30V7733Hl599VVMTU2t2d6TrYHE0PmfUUII2RhQ/CeEEEIIIZsfNgMmZEvQ2dmJoaEhTExM4MKFC2hvbw8z47u6ukLxXMR+EfO1t7/8HmXT431nrX90EMBWAcjnNoBgqwrs+NJ02PYGkGBFMpmsaxicTqdx6NAhHDx4ENeuXcPs7CxOnDiB7373u5ienl7NbSdbiMfARr+EELKRoPhPCCGEELLWsNnv+sA2A34e7Fa4Bchms2htbUVLSwuy2SwymQwymQxOnjzpCqDpdBotLS1Ip9N4991378GMye0Sj8fD+713717kcjlcunQJnZ2daGtrQzqdRqlUamjGq+17lmvaK9/rqgEt+OtjdN8ACQIEQVAXdNCBAh08kHnJMWIXJNctlUrh+FIJUCgUwp4BUgkQi8UQBAEGBwcRj8cxPDyMxx9/HC+++CL+5V/+BbFYDHNzcw3rJMSDjX4JIWTjEQNQ91/58fFxnD59+h5NhxBCCCFkk5E/DDz5TE38p/XP+uEYbgYAaAO0aQiCAO3t7WhtbQ1fuVwOuVwOra2tofifSqUQBAFisRj+6Z/+Ce3t7WhrawuPlYaqsVgMb731Fk6cqP3tplIpZDIZZk2vYzKZDB588EHs378ffX196OzsRDqdRnt7O7q6ulAqlUIbHcmY94Rv7c2v7Xmsz7+XoW8tfDwRX/B6COgAgD5Ozhc7IDlWWwHp4+UzXXkglkPFYhGlUgk//elP8dvf/hZXr17FiRMnGmyTCNH8+P13iv+EELJxoPhPCCGEELKWiPj/D4fv9UyIx49xs3MhmwFvGp588km0tbWFgn5bW1so+mvfdHlNTU1hfn4e7e3tYeZ4PB7HwsIC5ufnUSgU8KMf/QipVCoc45VXXsGbb74ZXjOZTIa2LOTeMzo6is9//vMYHBxER0cHurq6wiz8YrEYev7bDH6gPvvfvuvj5XObua9FedsDQI+lr+VZ/uhj5HMt5st89BxtcEGfL7ZAUhUgz2y5XMalS5dQqVTCYMD09DTOnj17J7eAbDIkZk7XPEII2VhQ/CeEEEIIWUuefKb2Ttuf9Ys0A2YVwKYhl8thx44d+PznPx96nwdBEGb8axFUxHzb+FWLqtq7XWxjZmZmMDMzg3/+538OgwaJRAK/+tWvaBe0DshkMjhw4ACOHTuGBx98EK2traHoL/dQi/Ke4G6FdMmk10K+Pl4HAfTn2gJI9xiwfQK8JsJeZYEeV+ao5y/BAc/OSDL/9XsymQyPm5+fx9LSEk6dOoVr167hV7/6VVj5QrY2zPonhJCNCcV/QgghhJC1hOL/xuAYagEAVgFsOCSzXxq7tre3o6OjAx0dHWhra8Pu3bvR1dWFIAhCwVPQtizSBBZAQ/a2tVQBgHK5HGaPF4tFzM/Po7W1FYVCAdeuXcPXv/51tLe3I5PJoFqt4uWXX8bi4uJd3ZutzMDAAA4cOBC+enp6UCwWUSgUQvHfNua1dj0ilstnQLSw7v1shXv9mX72bHWBh/1Oi/26ysCK/TrwYCtePEsgWbP0FLh06RIuXryIH/7wh/jlL395O7eCbBKq4H8eCSFkI0LxnxBCCCFkLfnzE/T730joKgCmN65b8vl8KO6LpUtXVxc6OzvD5q5dXV3IZDIA6kVYneWtBVgRR+XzqCau8rOIpQCQSCTqxpGAwOTkJKamppBOp7G0tIR3330X//iP/4jW1lZUq1WcP3/+ru7bVmL37t04cOAADh48iAMHDoSCtgQAdAY+4Ivr+rtEIlEXHNLPh23Gq8V4S1RjXfu5bUhsr2kbD+t5eVUFehwt/tuXtgUKgiCslJiYmMDly5fxne98Bz//+c9v7WaQDY/8pzE6PEUIIWS9krzXEyCEEEII2bTk6fO/4TiOmvD/4/dftAFal1y+fBlTU1P4/Oc/Hwr/XV1daGlpQSqVQjKZDJuiapEUQJ21j/4cQINli87K9oRf7b1ufdfT6TSGhoaQz+fDa+7YsQMf+MAHcPHiRVy/fh2VSgXPPfcc3nrrLQRBgGw2y2bCq0Amk0FHR0fYyFlEbG3fBKAhACDBHP2ZCOmlUqnO9kc/G/KdINfSzwdQb91jG/tGNQq2FQrW5keuYXsXRAUBpO+BjGX7HEgTZB0ESCQS6OvrQ29vL3bs2IFPf/rT+Na3voVf/OIX7HWxRXgM/E8hIYRsVCj+E0IIIYSsNcz631g8j1p6ow4A0OdgXZFOp9Ha2ooXX3wRTzzxBHK5XJ1liYi41ifderXr7Gr9uQ0AiIBqBWE51gq88pl+lzHa29tx6NAhVKtVLC0t4ciRI5iamsL3vvc9ZLNZxGIxnDx5Eq+88spqb9uWQapC5CUWTUJUtr8VxD0hXTf21QEh23hX2wlZ6yjvedG/y1z0Ncvlcl2AAEC4Js+KSM8jKtggc9aNifXY0uNC9whoa2vDvn37MDY2hvPnz+MHP/gBLl68iImJCVy5cuW27hdZ3xx7/8ViOEII2ZjQ9ocQQgghZK2g3//Gh82A1xU7duwIPf27u7vR3d0dZv3v3LkzbOarxVcRTa3djxX7tUgKoCHD2oqqXga4bRArxwL1WeWxWCzsQaBFWJnT3Nwcrly5gm9961vhdX/xi1+sziZuAXbv3o39+/dj37592LdvH4IgCDPeJUtdN/wFGn379f3V39sKEAB11QRRgaNkMtkQALBZ99amR+YoP8vc7fe6IkCuawNfdj424GDXI9g+AfKS57dYLOLq1as4deoUrly5gp/+9Kd46623bv2mkXULG/0SQsjGhuI/IYQQQshaQfF/c8BmwOuKfD6PJ598Ej09PRgYGEB7ezuy2WxoTwLUZ03rl87+Bm6KpF6Gt2cPo7Eir8689qyA7Hme/7oWcCVwIeJqsVjEa6+9hu9///tobW3FzMwMbty4sRpbuqnIZrO4//77sX//fhw4cABjY2N1z4OuAvCCQPK59eD3qkjkcyueA/XBIx0AkGdU33ddHaDH9J5hHbAA6htPR4n+lmbr89aqn1PbKDidTodBrIWFBVy7dg2vv/46vve97+HMmTORcyAbg2Ooif8fAWPfhBCyUaH4TwghhBCyVvz5CeClvwdeeuZez4SsBmwGfM9pbW1FLpdDR0cHPvWpTyGfzyOXy6GlpQVBEDRYpujMaY332XJNU+U8naHfzGpFvveqAeR6XjWB/lnbrVSrVRSLRSwsLOD06dMoFAqYn5/H17/+9bC3QTPBd6vQ19eH++67D/v27cOBAwfQ19cXeuLLcyFiOeDfB0HfZzlWH6N9+G0TXYu9p1ZQt8GgqPl5AQG9JmsXZOeg12FtrLz+A/p3r1FwPB5HMpkMe20AwOLiIm7cuIFXXnkFzz33HM6ePXsLd5CsJ5j1TwghGx96/hNCCCGErAXS7Jd+/5sHNgO+5+zevRsDAwPo6enB4uIi5ufn0dvbi3Q6jVQqhWq11pjVEzOBm8K8l9FvbV7kMzlfRFt9vmcZo8/XlixRNjL2HMD3hJeM8Vwuh6NHjyKZTKJareJDH/oQ3nvvPXz/+99HtVrFv//7v9/RHm90WlpakE6nw5cERuQeFIvF8Fjt3y/oCgxPzPcy5qMy5PUxtipAC+naEkqeM7mWFvI9SyF7jPfyMv11YMs2Po6yDPKCIWJHVCwWEQQBgiBAOp1GPp9HT08PDh8+jBMnTuBf//Vfcf78+Wa3jqwzxOv/6Xs9EUIIIXdEAqZwubu7m+WjhBBCCCF3StsQsOcp4AV6xGwq3kZNCfkCav8rOgYGAO4i7777LmZmZvDoo49iaGgIPT09SKVSkXYp8hngZ1Dr4+2xOtvfnu95tWu8zGk5Xn7WorTFXl/3LhArIAlyBEGArq4uHD58GAcPHsRTTz2F7u5uXL9+Hfl8HuVyGYuLi7e34RuMeDyOnp4e9PX1obe3Fx0dHUgmkw29IPTLG0Oy2aV5tIwhVRj6HUB4nHzmveS7IAjCa8jntiJAj6nPtdUg+nrSU0DmLvOP8uzXATAvAKLfLfZ5liCAWBCJrVIikUAul8Po6Cg+/OEPY/v27bh06RKmp6dX6Y6TteQLYKNfQgjZDFD8J4QQQghZCx57Gpi9DJz69r2eCVkLvoma8C+9AM6hFhgga8rw8DD6+/tx/fp1DA4OoqOjI8z6t4I8UJ/Z38x+R2dAe7Y/nu2Klx0d1exVf2ZtU6Lm59kOacFaC60itqZSKWQyGezatQvHjh3D0NAQfud3fgeHDh3Cz372s8g92CxUq1XcuHED1WoVu3btQjabRSqVCl/Wrkdns1uh2wrlnnAvL0+wTyQSSKVSDUEBK/bL+VbYlzlZqx19vu03YI+TgIC+tg086CoDO15UkEsHT2wAywYB5JlvbW3F6OgoPvKRj2BoaAgXLlzA7OzsGj4N5E75MWqx7W/e64kQQgi5Iyj+E0IIIYSsBXueqr1T/N+8PA/gJ6iJ/6wCuCvMzMygVCrhQx/6ELq6utDW1obW1ta6zG7JiLdZ/d5n1kpF2/N4eMGAKN9/m0kdJabqz5e7vszZzkn83ovFYii4JhIJDA4OYnh4GGNjY/jkJz+JD37wg2htbcXOnTtx6tSpFe35RuTGjRv45S9/iW3btmFwcDAU/72Mf71/EgDyvPK958Q2v7UvT4TXz6B3323WvRbX7bPhVa7Y52m5qgRbgeAFHOx8bKDEVg/InCQQIO/xeBwtLS0YGxvDRz/6UQwMDODcuXOYm5u7k9tN1oBjqGX+/xkY1yaEkI0OxX9CCCGEkLXgseM14Z+e/5ubt9FYBcA0yTVDhNQLFy5g586d6OzsDIVV4GZGsviQ28x7OUbebfa+vFsR3o6jxU0tAnvZ03ruKw0A2J4CGs+aSH5vZhGUSCTQ3d2NBx98EA899BCefPJJtLS0oFAoIJvNbjorlra2ttCDfmhoCKlUKsxGl6x/uXd67wDUidZyrP7cVmV4VQLapicqYx9oXjnife+J8F6wSV/HWgZpmyAvMGDX5An+9rNmlTK6/4AEAbLZLHbu3ImPfvSj6O7uxtmzZ7GwsHAHd5ysJs/ipssdIYSQjQ3Ff0IIIYSQ1SZ/uJb5/9IzNesfsvmRKoDjoA3QGlKtVpHJZHDo0CEkk0kAQG9vL1KpVPi7tnLRwq0nSmqiqgQEWyWghWMbSFipfQqAuqxuLSaLQCu2LN78l6tS0BZBEgSQvUmn09i3bx8effRR7N69G0eOHMGFCxcwNTUVNjbeyPT392N0dBTbt29HPp9HJpNBqVRCqVQKBXwAdXuugwJAfb8FncGurZYEm6FvM/11UEcL8lGietS9XemzZY+xVQqeHVGz4EBUJQDQ2HzYQ1dYyCuRSCCbzWJsbAyPP/442tracObMGRQKhVu72WRVkWI2Zv0TQsjmgOI/IYQQQshqI81+T32b4v9Wgs2A7wpLS0s4ffo0Hn74YWzfvh3ZbDYU/gUdABDRURBR1cvAls/kd+84LaQCqPvZy8D2rFS877xMce01H9W81csQ99CBinK5jFKphGKxiHg8joGBAYyPj+NjH/sYHn30USQSCYyOjuL06dO3d5PWAdu2bcO2bduwfft2jIyMhJ/rqhAvg1+OAVD3u87415UBWvBeiYd+s2dJsM+WVxGgj/UqEDyRPur62prIBp6sRVCzAIDej6jnUQdU5PhkMolcLoedO3fiscceQyaTwdmzZ1EqlZa5y2QtYNY/IYRsLij+E0IIIYSsNnueAvJHgBeOL38s2XywGfCak8/nsbS0hOnpaQwPDyOZTIae/1r0181ctY+7FVetzY/O0LbvIqJHCfn6OP2d4H0un3ke7CLCJpPJsLeBNJ61DWdl3Z4grDPd5bhqtRoGAQqFAqrVKjo7O3H48GE8/PDDePLJJzE+Po5Lly5hdnZ2w1QD5HI55PN5DA8PY9u2bWhvbwdQb6VjRWrPN9/2ibDr18EUfe/lvgh6z/Xv+nsbeNJz8UR9Owfbh0LmoZvxWvR1bBWAPHs6CCDPn21KbNeo/waj9s1WAVSrVQRBgI6ODoyNjeGDH/wgkskkzp0711BlQdYOZv0TQsjmg+I/IYQQQshqc+gvahn/bPa7dWEz4DVldnYWFy9exBNPPIGWlha0traG4r8V+gUve9rLopbPo2xUKpVKQ4a4fO8JuFHHyjFa+NcirBX5RYgV8VWsjqxNi2cX42W1CyK8SjZ2oVAIM66z2SxGR0fx+OOP48iRI9i9ezd+9atfNbs164LW1lYMDg4in8+Hlj+2ykMQ8blZhrxGhGg7nmezozPlvefPCwJ449rvvCoCr5IhqrJAX0+L8/p4bTdlbYCkEsVWo9jASbO/M11Fod8BIJPJoLu7G6Ojo9izZw9mZ2dx5coVdw1kdfkCav/Z+rN7PRFCCCGrBsV/QgghhJDVhs1+CcBmwGuIiIPnz59HS0sL8vl8XTaybjIala2uBcioIIAn3Ho/WzHXji0CqgQPtDCrqwi0oOp5suvggBZiRYzVAQI7Tx2MkP3RQQG9DxIIkKqAWCyGwcFB3H///fjsZz+LsbExBEGAt99en7nBi4uLmJ+fx+joKAYHB8NAiQjYUdnogH9/7e9epr2txLACvJzvBQH02M2CBXYuUUEAXfFiM/Pt+d5nWsj3njt5VvVz5/UPiLq2vpYXBABqwTAJ4nR2dmJxcRHT09PsB7DG/Bi1ODX/U0UIIZsHiv+EEEIIIasJm/0Si24GfPz9n9enZrphKJVKeOihh7Br1y50dnaiXC6jv78fqVSqzm4lyntci/JedrIVVLWYGeW5LmN4grqehzTTtb7y+pqev78nxOoMcy38B0FQVxmgx9fCsJ6TFZv1nMvlMpaWlrC4uIhEIoGxsTE88sgj+PjHP45du3bhN7/5Ddra2rCwsHDrN3ONWFhYwKuvvopyuYw9e/Ygk8kglUqFjX3lBTQK0Z4w7n1nM+ZtM13PIz+qosBWZHjiua4EsNZA1qJIB7609Y+9ttf7wgZA7O/6HK8/gLYG0n9Det5RwRdtRxWLxZBKpdDX14cHH3wQ1WoVr7/+esPekdXhGGqZ/7T8IYSQzQXFf0IIIYSQ1YTNfomHdE+kDdCqcf78ecTjcRw9ehT5fD7MQl5J5rbQTOiV4613uxWJ7XdyvszF2p9o0VN+1wEAT+i3mdRyvBVurUWQDgLYAIYOfuighbVu0fMUW6ClpSWUy2W0trZi165d+PSnP418Po/du3ejWCzivffeu6V7uZZUKhXMzMxgeHgYPT09YQDECuRRIrc3ng0caZHe3jdbheFZSsm5ssdatG/WEFj/bgMMeq5ePwPvfPsMe5UJev56brZqxQYCvHNkPG9Neg5BECCXy2F8fBz79u3D5OQkbYDWADb6JYSQzQnFf0IIIYSQ1YTNfkkz2Ax41Uin0+jq6sKVK1cwMjKCdDpd53sPNIr7ntDo9QfQ6DF0prg+1xMugZviLYBQ1F1JFrfN7refex7zABqy/G3/AGnYmkwmGzLZdSDCq0bQ85T1FAqFsFHwjh078MADD+AjH/kIPvzhD+O9995DOp3G5ORk0/1dS+LxOIaHhzEyMoLu7m7k8/k6mxkrjgONme36Zy+Is1zliA7eCN41bdBFP1v2mfaeYx3YsYELW8lhKwrszzKe3icvKGSvLcEy24fCWgZF9Qjw0HPOZDLI5/Po7OzE3NwcpqamUCwW3fPIrcFGv4QQsnmh+E8IIYQQspqw2S9ZDjYDXhXK5TKuXr2KAwcOoL29Ha2trchms3Xe/8BND3QtxlphdSV+7/ozLZSLcGmz8PU5Wvz1rFj05/KdFY+1oOvNU6/Lm7dtDiwVAfba8ruuLvC+F5G5UqmgWCyiUCiE/QE6Ozvx2GOP4dixYzh06BB++9vfYn5+Hslkctlgy2qSy+UwMDCAfD6P4eFhdHR0hN9Z8d8K+1aQt9n1XvWHDQZFBQFs5YBGi/32GtZ6ylaR6GvbZ9IL4Ojr2c+inltvrKjKES8IEGVnZa9v74nuBTA4OIiHH34YxWIR//M//9Owh+TWYdY/IYRsXij+E0IIIYSsJo8dp/hPlofNgFeNiYkJTExMYO/evaGgbUV6Eam9rGibld1MpLdirxCVga3H8cbSYrCdr5fxb69nBeSoCgS7HnkFQRBWTDQ71wZJ5F0L+bpJsLxSqRS2bduGJ598Er//+7+PeDyO/fv33zXf9ng8jv7+fvT09KCvrw/pdLohOCRz18+HfGbtnzwf/6jAkQ0AyHy0jZT9Pmo8z7pHH+tVkUTNxX6unwtvTXrd9lj7zOi/LxvAss2AdYVA1Fj2Jc9YPB5HJpPBvn378Oijj2J+fn7dNp/eCDDrnxBCNjcU/wkhhBBCVgtp9vvCcfr9k5WhqwC+ATYDvg0WFhZQLpfR2dmJrq4utLa2ho1/PQG+XC43ZENbaxYRPTWeOB4lvnpipjeWiJpaAPUa0S5XkWDnFCXk2jmIMCuVALpvQrNMcvudFsp1M91isYjFxUWUSiVkMhk89NBDeOihh/DHf/zHOHz4MN5++2309fVhYmKi6Zpul1KphMuXLyMWi2F0dLTO9iiRSNRZ2ui9krVZ8dn2Y7BCvP4uyq8faAwwyfVWeq/1vdNZ8d45XhDCq1qQY+w9tnPT40UFAfT3+md51vTzbptb2zna6gy5Z+VyGclkEh0dHdizZw/279+Pubk5XL7M//beKs8C2IGa+E8IIWTzQfGfEEIIIWS1YLNfcjtIFQBtgG6bubk5vPbaazh48CA6OjoQBEGDZYm2//FsXgA0iLbWc99+p79fLgDgWbQIMi8tftrMc09wtugAgq4s8MbTx8v40hdARPJm87ZBACugS4+DarXa0B8gHo9jYGAATzzxBPbu3YvDhw8jnU7jzJkzCIJg1a2Brly5gpMnT2JkZAQ9PT3IZDKhTVK5XA6vF1Vd4e1fM/HbiuX6WbHHaLznRgvo+lm182hWAWLnY4MGUWPbzzy8wIBXfaCP1VUAujLA20/9t6s/0z00Wlpa0N/fj9HRUXR3d2NqagrT09NN501u8g3U/pPD4jNCCNmcUPwnhBBCCFkt2OyX3AlsBnxHiJA7OzuLoaGhuix2T0yUc/T5OiPaivzWrkS+t02GrbWP966vaTOdtQCqhWnAF4xt4AG42fhXj2l95OVci3i0JxKJhiCAFae9n71eBnJetVoNLYFkbV1dXRgbG8PRo0fxxBNPoKWlBaOjo5ibm8PMzEzD/G6XgYEBJJNJtLa2YmBgoE4A1/scFVjRgaSojHmLZ7VkxXJPPLfnR2Xe2yz8qD4SUQEDG+yICiDYQIitiLDfN1uH3s+oIEDUcwTc/JvRc4nH40ilUujq6kI+n0cmk8Hs7CxmZ2dRLpdBojkG4Aug5Q8hhGxmKP4TQgghhKwWbPZL7hQ2A74jLl68iLm5OezevRvZbDa0/9HZ24IVMOUz/a6FUhEptWApIrlYyYh3vmf5YjPodcWAXEvmaDO0PQHTip8rtR7S140KAlhP9nQ6jSAIGq6pz/UEWY0WikVwl7UtLS2hXC6jpaUFBw8exJEjR/D444/jgx/8IF5//XX09vZiZmZm2Sz0KJLJJLZt24Zt27ZhaGgIAwMDDQ1/7Ry9ucveeCK3DXLoMa2NU9TPXmVAM0FeWzRFjWH3zFtLlE2UN55+t38/3vz1c+zdP2uTpP++bGWNXYd93hOJBFpbWzE8PIzR0VFMT0/j4sWLDdckN2GjX0II2fxQ/CeEEEIIWS32PFV7p/hP7gQ2A74jrl+/jtOnT2N8fByZTCasANCZ3stZpAie7Y71Lrfiv/5cvrN2PiKAy9hWBNYZ+5LFb7PTrbAqn3vitUezSgArEMt6JQhg+yFY9Pqi/PFlL/R+FAoFLC0toVqtIp1Oo7+/Hx/72MfwyCOPYNeuXRgfH8dvfvObZddm6ejoQD6fx9DQEIaGhtDa2lrnH29FexsEsPZRska9R4Jnr6T7IEQFZTT6+dRBFr1/ejwtrtugiyfI2+vYyoJmgr43lhcEsGK9bhpsj9fjRTUE9vbG+zkWiyGbzaK3txd79+5FT08Prl69uqpVJJsFNvolhJCtAcV/QgghhJDV4rHjbPZLVg82A75tJicn8cILL+DgwYNob2+vCwBoYdJmTNssajkGaLRWkUxjm6ksL91cVn62zXQ9YVdbnoiAboMX+hwtmq5UXI5aZOqZkwAAIABJREFUpxcUsXZEujmwl5lux7YCs3cftAAP1Br1Li0thT0Cstksdu/ejQceeACf+MQnsHPnTuRyOZw+fXpFz0NnZycGBgaQz+eRz+cRBEFDtn9UAMQT+O1eydx1kEDfC1mr7S9g5+BdX19D76PeN32ctS/S1lL6Pnn2VFGZ/fZc/bM9R++HHcN7Ljx0oE1X1+igkrzr8fV+JBIJtLW1YWBgAK2trZiZmcHMzAxtgBTM+ieEkK0BxX9CCCGEkNUgf5jNfsnqw2bAt00sFsObb74ZerynUqk6odKzaLGZ1VrQ1KKtzmb3/Nx1MMAGAUQ4t02JrWivqwXK5XI4rsxFBE693maCa9Qe6WtZsVmL/jbooCsBdOBBj+utS6/PCuBWzBZ7oGKxiMXFRZRKJWSzWYyNjeHhhx/GU089hQ984AP49a9/ja6uLszNzbnrnJ2dRSwWw9jYGHp7e+sCMXo/vWoMG9xollmvj4m6l5VKBaVSye0xoCsOPJHcZvt7fQbsnO1nXrBmJffLCz4024Oo76KCKd4Y+m9MAni2B4UN0tiqiGw2i+3bt2N4eBgTExN49913G9a2FWHWPyGEbB0o/hNCCCGErAZs9kvWEjYDvmWq1SpmZmaQTqdRKpUwMDAQitVa+AcQ+bO1CJLPrGiqAwC2MkBnheuAgAQCUqlUaA0kmc62EasOUlgRuZlYHJU5HkVUFrcNAujvZR3pdLphHO+e2GxyWw1h523Xu7i4iGKxCADIZrPYtm0bPvOZz2Dbtm247777kE6nceHChYZrT05O4sSJE+jr68PAwEBoCSXXtj0AbGBE740nmlurHs9HX1d06KbH+nnznjmNtSqyz6GHrRbwAhve8St5VrxryO9Re+fZDUXZDOlzpeokKgig0fcxlUqhp6cHDzzwAHK5HK5cuRIZKNoqPAtgB2riPyGEkM0NxX9CCCGEkNWAzX7JWsNmwLfF+fPnce7cORw4cCAU/4MgqDvGZh5bcdTLKpbjbABAfhb0z5KpHxUM0FUC1utcrm8zwYvFYlgZYEVnLaxGBRGa4VnT6M90oEMqAVKplLuneg16PGuZI+PpTHz5TN+DUqmEYrEYNgseHBzEvn378Oijj+KJJ57AzMwM8vk8hoeH65q+zs3NoVqtYmhoCLlcLvJanjDuBUe8+cl6pAJEr13fh1KpFAYBZC+k2sEGBmxwRH+/kuoEr1pAPw+eiG6rGKLW3SwA4c3HVrusJNiggwPyN6N7AkTtk/yeSCTQ0tKC/v7+0AZoenq67r5vJb6B2n8+2E6GEEI2PxT/CSGEEEJWAzb7JXcDNgO+Lebm5vDWW29heHg47AGgs74lo11XBFjBU1sBWasgLwjgZTDbrHl9Dcn614EAeWkRWVuaCGKNo6sSpOpAj2+92PUc7Jy0lY8W/PV6vJ8lOzuZTDY0v/VEdRsEsONqcdaKyCKUSxCkUCiE1kAPP/wwjh49ipGREfze7/0e9u7dizfeeAN9fX3I5/Nob29HX19fnYBeLpcbLJz0vGyAwGb+e/dcvOr15/r5EaFfrIB0gEl/5wn0eu7eXnvzs2K+zc63gRB9nH1G5Hh9f7xsfvlOP8N6jGYBg6h1yDOug2XyfVRAIx6Po7W1FTt27MDAwAAuXbqE69evY6txDMAXQMsfQgjZKlD8J4QQQghZDdjsl9xN2Az4lpmcnMRPfvITPPLII8hkMqFgmEgk6sTOqExg3cw1yqrEy7qXc+QzK357wYAoiyBdEeA1Ay4Wi6EljgjPelyvIsCrBNDzlLGjPNrt+TqQkUqlwiBC1D7aMW0AwLuuDmTYdYhYXiwWUa1W0d3djcHBQYyMjODDH/4wBgYGkEgksH37drS2tjZUc+h5ekEcez+9xrkryX634rSI/aVSKTzGVgFosV+L2lE/e4K99yzqqoCoTP9m2flexQAAtyeFto9aLlvfqwywa5JnTV76+fICI/F4HOl0GgMDAzh69CgSiQTeeecdLC0tNaxrs8JGv4QQsrWg+E8IIYQQcqew2S+5F7AZ8C0Ti8Vw8eJFlEol5PP5uqa7VnC03v4igEeJn54wasXpZvOy53pNhW0wIBaLhZY/InxKFYBki1s7Ik/YlTVYdPZ/s2xyK+bK+PF4POxroOeqLY1kjlqs1XO2gQf9mQ266PPluFKphEKhgEqlgmw2i/7+foyMjKCjoyMU1EX4jxKhbXBDftb2T3YOdo/tc2bP12Pa50UHKHR1gBcI8II6toLBwxPa7d7aNdm5e2K+npM+3waPvGP1Z/pd/hb09XUQQAfEvL0Car0qWlpa0NfXh9bWVkxNTWFmZqbp3+lmgI1+CSFk60HxnxBCCCHkTmkbqon/bPZL7gVsBnxLXL16NRT+Ojs7kU6n3Qxv621vBdSVCKlRv3vY63liurY7CYIA6XQa6XQ6nJv2QC+Xy6ENjnxnBeDl5uSt3a7HiszymT5OBwEkO99a6+g9kJdu6qqDIPq6XpBEBxC0qC5Z9bFYrM5mR+bkBWu0gGyz1z2x34rXXpBC31O9Pi+wIMdZoV/PywYG7Nyi7o3GPhPNRH47nve82zXovbRE7aOdjxXwbYNtHSTTfwtybRtYicViyOVy2LlzJ9rb23HmzBnMzs42zG8zwax/QgjZelD8J4QQQgi5Ux57ms1+yb2FzYBvievXr+PUqVO4//77kUqlGnz1o0RIz3LEE3yBm5Yr1k4l6ngr2so1bPa5lwUtlQCyBu0zLxY4pVKpIQtcj6Oxn9nMe31clEjsBS6kKXAQBKFYrcVbPS8ruOu52Ma1Wuj17pNdkw4OyHX0Oj0hW/9uffK9e6LXo0V/7356Qr0XRLB7YsVwbRNks/C1OB6LxeqqDrzse433nRcAs8+XXbP8bG2gvOfIC4jYe+gFMuR4HQCwASMdBEgkEkin0xgaGsIjjzyCQqGAixcvolwuY7PBrH9CCNmaUPwnhBBCCLlT2OyXrAfYDHjFSNZ3IpFAS0sLOjo6wgAAcFMQ9bLK5Wcrqmr0cVrA1AK1vo7GG99is53lZ+2zr+cr9jfFYhHlctn1YrdzsoJplAjrjaGPsQK2BCsymQwSiUQosnpjegKvnosNAnie7yKE22x+eZdgid5zmzGu76X+3d5DL1ik52rvt1xfX88er8eS43R2u90zAGGPAFsloO+BfhajRPoosT2qsiFqLC9gYO+ttz9AvTWSfrfXsN/pvzmvUsMGPIIgQGtrK3p6epDNZjE9PY2ZmRlsJp4FsAM18Z8QQsjWgeI/IYQQQsid8tjxmvB/+cS9ngkhbAa8QorFIt58800MDg6iu7sb2Wy2Lntei8qeiCtIBrsnPHoCdZRA7AmvVljW38u89Jg6m1lnPcs1pKGs2N94GezykjXpuet1ekEJb47NsrV1PwCb0e6ty1ZmyBr0Hmr/fp0Rr8VwLVDL514QwArZzcTtqDnp/dIivwSfxKNeN6DWIr8eS74LggDJZLKu4sPePz1Haw3kPTv6PntVA953XrWCF7jwqh3s9WyAzT4zXmWN9+zqeyJEVQB4AYi2tjaMjY0hCAK88cYbYQPtzcA3UPvPA2PChBCytaD4TwghhBByJ0iz35eeYbNfsn5gM+AV89prr2Fubg4DAwOhGG3FQuBmk1Eve9lamdhsZTkfaBRUbVa9YIXMqCxnL+gg1/fEZABhEKBZM1+9Rm/N9vpRFQKedY8WXcV2JZVKNQRSouxjvN/l/ki/BCtye2KzHkdb5cix2pbIwxO1te++zjrX89QvEf69l5yvx7HnSQNo6V2h56+DJrpRsPd8ynleUEc/D/a8Zs9llD2SHi8qiKTX683VViZEBQH02mxgT+657q8h1UDDw8N4+OGHMT09jYsXL7p7spE4BuALoOUPIYRsRSj+E0IIIYTcCWz2S9Yz30Qt8/9ZsBlwE86dO4fJyUnk8/lQiNYZ2dJ41svGbuZnrsVzoL5BabNMd32ezdBuluFsP9MitGS1a697CQLI3OzYnvjtBQE8MTjKFseK8HqOmUwGyWTSDaToeVvBWa9V3yPBNn+15+h52ioOrwrBXi+qUkEHJbSQLy8t8mvRXzL75aUFf2slZQMBmUym4fkUa6UoGyC9z/r5sUQFiJYjytc/KgBgqyj0sVHBGO+ZsMEMOV/vabVaDf8GdFAknU6jra0N3d3dSKVSmJ6extzc3LJrXa+w0S8hhGxdKP4TQgghhNwJbPZL1jtvg82AV8A777yDN954Azt37qwTYLWwKvYqVoAFGrO/tZ2MFwTwsug1zUTWZlnTXla7CKPaWkZnP4tHvAjJ+jqe9YuegxbM9XW1kOoJ5F4mvsxRBOxyudwgqOs5eoEAnd1uBW4R05sJ1pIlH7XvUdZDeh2ezZMW+rXYr5tN68CFDQzYCg67rwDqxtUBLF2dYgMAek+jAivefW/23XJBFk/AjxrTWhDJfntVCt6zYMeW+emAmH6+7N9LR0cHRkZGUK1WcebMmTBQsJFgo19CCNnaUPwnhBBCCLkT2OyXbATYDHhFTE9P4+TJk+jv70dLS0sozmpRVkRSKyhqEVxnU8tnXnNWK4DbLGf5WZ9jgw32Zy2+W8sZ4Kb9iRaVq9UqisUiSqWSa91jr2HXYUVovRYrwOrMepsRrsXzVCqFTCbTMJ62NNKf2eCCnKe/03PWa9efa3QzYgnq6HnLNTzR3zbx1YK+BCDkXkgAQIvntorBPodRAR49F7EEkibQ+v7o4/U5+l5G2QA1CwDYCgw7T31M1Hj2e0+Yj3pG9bv9XF9P3nXjZLsn1WoVyWQSuVwO27dvx969e3H9+nVcvXrVXft6hVn/hBCytaH4TwghhBByJ7DZL9lIsBnwsszOzuLnP/85tm3bFvqoW69w7f8PNNr12CoA8Rb3MpZtFrf9zorTUUEBoF40tdnn9mcrSIsQLpUANhihfeObCcLeHnjz1/PVc9LCrVgBBUFQJ8TLPOUY2UOb7W/tb7yASRAErrWS3n/bP0BXOtgeA/Z8yS7Xtj8641zv73IZ9fp3mz2v165FclsNoJ8pLXLrwJUXhPDuc1QQwAajmq3LW6d9DuTdViY0G7vZMVFBFpm7tkCS41taWtDV1YWenh7E43FMT09jcXGx6bXXA8z6J4QQQvGfEEIIIeR2YbNfshHRVQDPgjZAEfz617/GyZMnMT4+Xif2a5FWZ2sDvre5/KxtgDzbFvu7iLSeiB/le26bo8p8oyoV9DE6+1zmKwK7HsMT0gUrzC4n1NpMbv2uz5OmwPF4HKVSKTxGByn0vbFBAPlOV2+IAC/o+2vHs2N69y0Wi4XzsWuRDHydZW77L9jzoix4rF2P3Wf9s20+rC2s7P7rKomoIIA9p9l9W84+yJ5vnwUv0GLnYoMgURn+UT0HogIdOiCix0omk+js7ER/fz8KhQIuXbq07m2AngWwAzXxnxBCyNaE4j8hhBBCyO3CZr9kIyNVAGwGHMnk5CR++MMfYnh4GNlsNtJGx2ssKt/J7yIqahsg/b0n/nrCqx7fy3rW15KsdCuYythWLJUKABsAEAshW4ngWbBovACB3itr5RM1hvbBT6fTdfso59sgjJxbrVYbhG/70mK/9tnXIridq7Wh0WuzFRja3sdeTwccvD2wIrRnFaXPaRYQkD3S9kMWCQBY4bvZc7jccfp7fZz3ne2VYYNNUc+8fo8KTnh/v1b4t/up+yTEYrVKkfb2doyMjGBwcBBXr17F1NRUw5zWC99Aze6H8V1CCNm6UPwnhBBCCLld2OyXbHTYDHhFnDhxAoVCAb29va49i4iGWtAH6v3UgZvZ/M2saSxelvdyWdVe9rntAaC/08EAmyUvQrBktcsarbisz9Pri7qWtRTyMsr151rEF/saCUzoc/T6JJAh1xRBX8YQoV+aO8u7DhLIeuVYe389ixjbNNlaR9nqAnuuXoNXYWAFdk/YtsEKOVY/T3p9cr9isfqG1VHX11ZI+p5HzbHZGizVajXMqG9WTaPHtvfAjmev731ngzJ2z2TNyWQSbW1t6O3tRXd3N8rlMqanp1EoFNz13CuOAfgCauI/47qEELJ1ofhPCCGEEHK7sNkv2QywGfCyVKtVnDlzBgsLC+jq6mpo3gs0ZlVr8VVntluh34rteiz9sxV5rb2Ll9Esc5ffbZZ6lHWMXY8OAtjAhSe+ahFZz1++s5n+UZneel16LSLYp1IpAAgbFetgi9eTQa6TSCTCtchxku2fSqXC7HwR/SUoIN9pwVxbP+l7CCAMNtgAgBWYrdhsraH0PfGy172seP282Hsnv9veBXZMLwBgg1W6kkU/p14AwAZ3vD3T31er9b0dop5xj6jggpfd71VveMfYZyydTocBgPn5eVy9enVd2QCx0S8hhBCA4j8hhBBCyO3DZr9kM8FmwMty/vx5vPjii7jvvvsa7FGswG/7AQhWkNYCsBV2bRDAerJbYd9m1cu4UfY8WsSVhr/6OC2ESsBD9wIA0FRkbzYPu2feHgGoE5htQEOEeS8IIb9rIV8HZESIl0bCev16b3QQQK6XSqXCQIC8PKFfX0ePq62AZK76fusgx0qCADKGtVny7osNAJRKpYZ+BlHH273R91EHAaKEfq9qRV/PVoMIXkPlZsK/N75eg1ctYa2FvL9hWwUgAYDOzk7s3LkTbW1tuHz5MhYWFlY0t7UkbPR7DHj7LFjVRQghWxiK/4QQQgghtwOb/ZLNCJsBL0u1WsVPf/pTVKtVdHV11WVIewKuZ48j6PMEK4Lq8ayIai1jbIDAE+JFtLTn2qCCvrbY7WghW2yABF0NocV4mxFur2n3SKPnEnWs2NfoigKZiw5QaL99ObZarSIIAlSrVRSLxYa5e4ES+VyPmU6nkU6nkUql6ux9dGDEs+WxGe9W1PbWo4VoLWDb82zgSL/L+iSQI6+oYIzee1v54QnjNjil12fRATRZq30e5bm2c7F/D8tVAth5NrMD0vfIs3rS10ylUujo6EB/fz96e3tRKBQwOTkZBpbuBWHWv079ZxCAEEK2JBT/CSGEEEJuB2n2e+rbFP/J5oPNgJflzJkzeP755/Hggw+GQri1nBHRVoRVK2JKRrMNAmjx31quWIsXHQTQYmmUgC3HRgmzzYIAcr7OttdBACv6isiu90Kvy2av23lFCdxWyNWZ+YJ49AM3xXDr5S/3Q4T6UqkUBgG8bHG793pPxDZI2wTZzH0v6KLH9io0mmX0e5Y8niCunwe9/1r4F6HaHm/3wT4zOiDkZfY3wwYsvMCV3F9dJRGFV3EQdT19vLfv+p7ZKgGZrw5cJJNJZLNZ9Pf3I5fLYW5uDpOTk/fEBijM+of6Z5tBAEII2bJQ/CeEEEIIuR32PAXkjwAvHF/+WEI2ImwGvCL+67/+C+VyGd3d3XUZweKVr8V4LxPYBgAANBV6bYayCK/63ctk1mNpwdYKxZ44rX+X8aUSQM735q9/F5Hdq2qI+t1eW8/PCsSydgkAVKvVUJDXwrEIyVqgl/PlnkmwxuuHoPdKi/J2DroywNrHeOfodUQFU6Lui56XjCef6Wx++5zJMSL+6+fLC8zYHg5Rx0cJ9FFBDb0vVsD3Ahx2PBuw8PbRvuz1bNDAe0VVAUgFRTweRzabRXd3N3bt2oVYLIZz587d9QqAZwHsQE38b4BBAEII2XLUif/ZbBaxWAxzc3P3cEqEEEIIIRuAQ39Ry/hns1+ymWEz4BXx9ttv44UXXsADDzwQ+uGLQK6b/wL1VQDATQHSa64qaBFaPm8mgnrCvx0TuGknY8fyeg8Ieg5aEJXPrIhss6U1IsY3s7+x69bfe+Kx3i/5WXvyy3FaoLcNfOVY3RS4WSa9nZNXMaGfBW9vrLAtexVVHeHZ/ch3UT0S5Dp6bL3HUd7/9j7oahMZ3wZnLFHPtt0/uyf6Xa/D9lGw146qQrBr8vbXm6N91vX9tn+76XQaXV1dGBwcxODgIObm5jAxMeHuy1rwDdT0/aaaPoMAhBCyZUiMj48f7+7uRnd3N2Kx2D33piOEEEII2RCw2S/ZSrAZ8Ir42c9+hkKhgL6+PgA3s8BFYBY8uxzgZmDAipyeF/tyQr8+RuOJtt7xVuCMEme1gBrVxFay120We1SAQ9Be93pfdBDBE3i1FY/+WeyK7Bp1TwP9suKunoeMG5VlbtepKzM8D399nBXutfe8fKfnoffSCuVRgSF9X3X1g2e/Y++R9yzYqgP7jMgxK8ni9+btWRp599IGozRedr/ddzt3/a6Ptfsuc5LgXzweR0tLCwYGBpDNZjEzM4O5ubk1twE6jto/009jhf9EMwhACCGbnsSNGzeO37hxAzdu3MDc3ByFf0IIIYSQ5WCzX7IVYTPgFXHhwgW88MILOHDgACqVSp2YLFi/cCvC6yBAlLjvCa/2c/2ZFk2tXY0VrJfLxPfmpIV0ebfje0KuHqtZFYAnqusKCrmWFb3lWpLNr++FDTToeXlis2fNo4MEgmfVo++NFa71d9719bhafLa2PvY+eWJ3VFWHFf+9OTWbmxd08O6hXMP73HverdivmyhHNShuNk89xyjbJv27XZ/+3QsCyLzK5TISiQTS6TR6e3sxPj6OxcVFvP322kZN/xr1ev6KYRCAEEI2LQ2e/4QQQgghZBnY7JdsZdgMeEW8+uqrof+3NIIVAVpnk9tMf0GyiK39i85A986x4mtUYEALqlr41IKyPSdKRJXvtFgsIq31s9cZ8F4QwAsOyJhWHLaBAmubI8d717R2TDYooo/31mjvmQ4C6LHsXnqC/kr3V4IdtpdEs4x6O5bNZtdCuravkXG02G7vTVQQwFru6HtvRXK7bu/51XO099Q7R+MFmXRgxM7Tjm8DBXatUQEGTTqdRmdnJ0ZGRtDX14fp6WlMTk5GHn+7uI1+bxUGAQghZNNB8Z8QQggh5FZhs1+y1WEz4GUpFAo4efIkcrkcurq6kEqlkEqlGsRUAK7grsVGrwpAC/VWtPaEbPlcv2sRVF/TzskLTthxo4IOVqgVAVuLrF7PAU90t5nvgg5gRInz+hxtu2Ob8XpiurYPsufYagBrBSQvG6zxMuPtz7Jf1ubIiuZR4+tqE32cvYaer+5zIGu2gRIvCBDVm8A+h3r/9N7oMeVc++zYwJm9183ssaL22r7bwIXFW48n/tvATSKRQC6XQ19fHzKZDObn5zE7O7uqNkDP4jaz/j0YBCCEkE0DxX9CCCGEkFuFzX4J8ZsBswqggTfeeAPXr1/HyMhInf+/Zw0jFqw2W1k3pNXY7G+L12C32e+aqCDAcsJolN2NfkUdozP0vQoAKzTrfbFjy8+yp7a5rYi4OqgiQrfsnc6Gt4EH26PAswbS1/P2xwvQ2P23YrgWwPW69ZjNxrfrt2Po/fMCLXocL2gR5bfvne9l13s9D+y17D7boIEXtIoKKHnPF7D8307UXjQLAsTjcWQyGQwODmJ4eBgzMzO4dOnSsnu1ElYl69+DQQBCCNnwUPwnhBBCCLlV9jxVe6f4T0h9M+DjYDNgh8uXL2NiYgJDQ0NoaWlBOp1u8IrXwqHOhAYQZmLbIICtAhDs71EWJxovk1ofo/31o2xd9O+eCO2JpLaiwcvct6K/7IesVWfiR2WZ6952cpxnuWRFdfFv14EZK1I3C754djHeHnvZ83pPbKa9xe6f95m+lnd+MwsbT1z3hHo5Vle42DG8a8r5tmLEivH6Puv75wUebNVFs2CGsJL72gzvOdTBilgshkwmg66uLuzZswddXV24du0aZmdnb+k6llXN+vdgEIAQQjYsFP8JIYQQQm6Vx47XLH/o90+iOPTFWmPoQ3+B46e+vfk1EjYDXpZ3330XJ06cwIEDB1wLIJ1prC19rNe/9A6QY7yM+mYZ5c2Ee2udY+flnRuV1e6Nb7PEteirxflm49heAXrPJDiSSCTCKotkMhnut816D4IAmUymztZGC8Za5Jc52ioIu/9eMGc5Adlbh3dMVLBIv9sKAZuR3ywAoPfXHm/noN9vtf+AVy2gP7drlGCC3nPb9Nh7PnQwwAZiojL87VqtiG/XYI+1+22DetpWqa2tDX19fcjlclhYWMDMzEzd38Gt8A3U/gle8392dRDgWbDaixBCNgAU/wkhhBBCboX8YTb7JY0osR9t+dpnl08ALz0DYAtpJGwG3JSlpSW8/vrr6OvrQ0dHB1KplNukVtACv3wPIBS3xaLGE4SjsIKuJh6PR3rTe0GGZtewWKsZ+7mMaTPsvfGiRGA5VoIAyWQS6XQ63Kt0Oo1cLodcLofW1la0traira0NHR0dyOVyCIIgzCjX4rAWu3VTXJmL3h89P/25Pd5bm9fA2AZV9DVsMCNKPNc/2/Ot8K7PBxCK7l6zYW98Od9m2zerJljJM+vtr83Sj/L8jwqyyR7L595zvdyzbOdmgydekEb3cWhpacHo6Cj6+/tx7do1XLlypeleeBxH7Z/bj9zymXeABHzPgv/WE0LIOicG4Nbq2AghhBBCtjKHvlgTeP/h8L2eCblX5A/XXkDtWXjp72s/Xz5RezkcQ80W/2lsoYT4H6O28KfBdCOHL3/5yzh06BC6uroQi8VQLBZRKpXqLGaAm8KnCNqSsZ5Op0P7IC18rpSoTH0rnN7u+Z5gr4/RmfY6m16LuXrNdrx4PI5yudwwlr6GDpKkUilkMhlks9nQeikWu2kHJMdVKhUUCgUsLCxgamoKN27cwOzsLAqFQjhXua7NItdrtQ2c7Z5FZcjLvPV5tkmyvGxVRjPxXV9TxOeoYJFepxeQ0vZTXiWGDc5o8d3ugRXQ7d55gQPPTkln1dvx7HF6r5qd7+2LtfCJqhCQNVYqFZRKpbq/bX0PJEhVLpexuLiIn/zkJ/jOd76Dq1evNlw/ih+//35XxX9LFbX/uG2p/8gRQsjGgOI/IYQQQsit8GQtkxvf/eK9nQe5e0SJ/e9n9d8Kx837puc4alHpYXUSAAAgAElEQVQPikINBEGAT37yk/jEJz6B7u5uxONxlEolFIvFOiFSo8VwLWjrCgARFrUg3cwGyIrU2j6l2Xn6ejbjXJ+vx/aEZD1+VBNbnU1tgw42C9yuQ8ZKp9Nob29Hb28vurq6ItfkMTk5iUuXLoX3Z2lpCQsLCyiVSuE1ZJ56H7xqByv6y7qt971n/6P7LniZ/t798KoC7B7KtT2LJwlK6e8kqGItduwa9PhR33lCuqxVBxC8PfR6Y0jAwgZl9FhynA0AeIEJQT9TNrgSFSzQY8qzIwEA/TderVbD4NP8/DyuX7+O559/Hv/5n/+JmZkZd1zhGGri/0ewTv55ZRCAEELWHRT/CSGEEEJuBYr/mx8t9uePAJd/Xfv5NsR+j+MAHsM9ztK8m0jZg/hSUBCq4+Mf/zg+97nPobe3F/F4vE5s9YIAWkhNJBJhBYB42gONAq8VmT08ob6ZqKnRx+osdT22vq629vFEWh2A0HiCt72+J9jKsclkEm1tbdi2bRt6enqWXVcU5XIZ77zzDt577z1MTk6iVCohCIJQ5PWseuwe6M91xrsVyb3Guc2EZ5vZrkVyOUc+twEST2Avl8t1QSktogdBEM5PVwTYceQaUQEAfT3r0y9zt/tgs/Dl78L+3digkQ0C2OCX9+zq76OubQNOFplTsVgM91IHAYD6qorr16/j8uXLeOGFF/Diiy+iUCg0jAmsk6x/DwYBCCFk3UDxnxBCCCHkVvjzEzXhP8LehWxAROzPH6n9vspiv8ex91/PYwvpIlIFQBugBh588EH85V/+Jfr7+5FIJFAoFEKrEBHUtRBqAwCpVCoMAliBWYTFZsJ/M5bL/tfH6QzuqCxpLdLq8ZcTuPWxemygPuNeX1+LsWK3AgDZbBbDw8MYGxu75f3wWFxcxOzsLKanpzE1NYWpqSksLi6GgQDbnFkL5jI/20jYa+hs99vuj/7OBmS8bH89TlRFhojUEgAoFovhvHRjZV2d4QUsvCBAVJDEzk+L416gyAaMxBJK7rk+z7MGsgE2LxAi780qEGxgw/t7k3npIIANVsj+VatVXLhwAf/93/+Nn/3sZzhz5kzdPNdd1r8HgwCEEHLPofhPCCGEELJS8odrmf8U/zc290Dsj+LHqPXH3TJauKhVFIMa2LVrF7785S9j+/btCIIAhUIhtAkRrEioxeRUKoVsNotUKtVgFwM0ZnrbzGvBCu12DHusZ+Ei39mMdCEqO9wb12ZT2wBAVMa1CMBa+JZM9nK5jEQigYGBAezfvx+ZTMbdi9tlfn4eExMT4T2cmZnB1NQU5ufnUalUkEgkwpcWiO1+2Wx9iwjXVtzW3+lAQjORW3+vRWj5ToJREpiSY0X8lwCArEkE7KiqBPG615nvQH2TY3m29by1aO+J/3I92Q8d9NFVALavhqzT259mFTXysw0q2MoDjd5PqfTR+68DK+VyGQsLC/jFL36B//iP/8D58+exuLgIYB1n/XswCEAIIfcMiv+EEEIIISuFzX43Jofet2jSYn+T5rx3my1nAwSwGXAEfX19+Ku/+ivs27cP6XQaS0tLKBQKoYCp7WG0OC9iejqdRjabRTqdDvsAWKI8/i2SnR9VSaDHAG6KxzqrPyor3Qq0+jv9rgVez7amWXBCj2WbrEoT1mq1itbWVoyPj2PXrl3uPqwmp06dwtmzZ0PhWXvpa9FcV0dE2dXo/dC2Qjpj3x6n98Duo95r/bzpLHQR/3UAQET8IAjq+gDIWmwQQCoJZM2yXu3Vb+djf7cNhj2rI2/dwM1nzmb86+N1QELPJyrgoLGf68CDDSLoZsDa6kufI/tTKBRw7do1PPfcc/jBD34QxlE33D+hDAIQQshdh+I/IYQQQshKod//xkCL/ZLVv47Efo8tbQNEIaiOTCaDr371qzhy5AhSqVQotupMZcAX4ePxeNgEWBoBe1njNntZi8F2TDl+uc+sdY03jocWcW0wws4ryttfX9OKryKmiqBtgwFirdTT04NDhw6hvb09cq6rSbFYxKVLl3DhwgVcu3YNhUIhFNIl61sLzrIWoN7+RovZ8q4tc6zdjg4A6ICSzVr3ni/xrNfPpAQIJPtf+/7LNfV6ZF4i9muBW69Rr0nPz64n6nn0xPlmgSQdXBFkjlHX84IHdt/t3PT4ss9SjSIBAF2xIvda9m9ubg4XLlxA39/9Hf6/CxewvCHXOoVBAEIIuWtQ/CeEEEIIWSkU/9cnG1Dsj2JL2gCxGXADhw4dwsGDB/Hxj38cmUwmFFx1o1zBy1wOgiCsALCNgD0LE89SR1/DE/Y9P/NmlQR23t789Uv3KtDfa2FVxGw7lie+2sx/Ld5K5nUqlcLo6CgOHToUuYa1oFAo4O233w77BczOzqJYLIbCuu4RoIM1OsteE5WtrsVtvQ+e3U2UgF2pVFAoFLC0tBRWT2ibGqkAsHPQVkcyvg7I2GbBUcK9t07bv8CrCohamwjs+hq6Z4GI8fqZ0z0L7Pj6GnbuOsCix7cVKbbht4wpQZZKpYJPf+YzeB6boGKMQQBCCFlzEthC/9+CEEIIIeSOeOw48MJxYPbyvZ7J1ubQF2/2XxAun6h59ovov0Hv0TdRXwWw6XkbtUXHADz7/vuWWHhzLl++jGq1is7OTnR3d4ee9FHZzJ5Qr4VZnYmtBdIo4V/j2cvYzOlmaIHWztPOya5Ji6RWPJXPvH3wRGtbYaC/E0G3VCrh+vXrOHfuHBKJBLq6upZd32qQSCTQ09OD4eFh7Ny5EyMjI2GvAKlM0EKzZMt74rO3l7JGoDGzPqqCwgaY9P3QtkRe42bvmVvuubV4gr6822dJr8fuQdRx0s9Af+81IpZj5e9K9hxAQ2Nj+4x5e+Fd03t5z7/sd////A9Gn38ef4baP6EbmqdR+2/AWdT+w3cOm2BRhBCyvqD4TwghhBCyEvKHgT1PAae+vWGF5Q2J7LsV+196ZlOI/R6ifT+LLaSDPI9aycOzoAD0PleuXMHJkycxPj6OXC6HbDZbZ9sieBY+8nmxWGzwlNeZ9bbZrBZwdWa0fC/vVvC0RH2vhc2o45udr9fsBSK0LY6dd1SFgUYE4aWlJVy+fBmXL19GT0/PqjcEXo4gCDA8PIz7778f+/btQ3d3N7LZLCqVChYXF1EsFsMggKxbZ6frZ8QTs5tVZ8j5cqz8bsVrLc57fv26YbAcZzPuvWz4KDHdjq9FdSv2N6se0Ne2VRM6KODZ+sh3Ogig98IT7L1ggJ6//c6r5tDfVyoVPPx3f4dfXb2Kp5vexQ0GgwCEELJm0PaHEEIIIWQliPjMZr9rS/5w7QXU2/i89Ez0OZsYaei4pZLh2Qy4jnw+jz/6oz/Cww8/jI6ODpRKJSwuLqJUKoXHaP92K2jH43EEQYCWlhZks1kEQdAg7OsmvRpPJF4u29/LJI8KGHj+7jooYa1ZbAa511jYBi6irID0Z+K3rkVzybIOggADAwM4cuQIgiBouva7wY0bN3D+/PnQHqhQKACot6/R4rEWlD1xWo7RYrd+LqKCB8Vise5VKpXCnhOpVKqugbFcSwdndNa8tv2RlxeAsKK9rU7wqj88+x352QYx9N7JcyHz1nuqLXlkHnYcfU0hKjDgnSMBnVKpVNcPoPf113Hs+PHN75JGOyBCCFk1KP4TQgghhKwE+v2vDRT7l+W4ed8SsBlwA1/96ldx+PDhhgCAFjSlJ4D1gJeGodlsFi0tLQiCoE4k1yKmZ//jCZU2895eT+MFFjxLGPtZsyCBnouXma5tWqIqI/Q4UdnzcnwqlUJfXx+OHj3asJZ7yalTp3Dq1CnMzc2hUqnU+e1rwV/3DfCEc73XVmiXY4Cbe1sul+u8/23z31Qq1eD9bxsNy5xsRYFuAGyrUfT5di32nkeJ6p4gr587PS89Z+BmQMQ2RfYCb3JNu3dR2L8zma/sb7lcxu/+7/+N119/feN7/a8UBgEIIeSOofhPCCGEELISKP6vDiL254/UfqfYvyKkL+6WEXwANgM2ZLNZfOxjH8OTTz6Jvr4+VKvVugAAgDph22YZi6iayWTCZsCJRKLBQsgT0D3E8kSwoqaXYW4zr3V2uhVk9Xn6mt619LHWLkW+s5ZAds/s/OR3sbTRQYDh4WE88MAD7r6sBPHzB2r3NZvN3vZYQqFQwDvvvIPr16/j+vXrmJubC/sFWDsZ+5m11rFBE0HvoQ0A6HsjFQBBECAIgrp7IiK2jK2bAOuAkrapsvdW3w/bKFivxTbMBeoDPd55gm0+HCXiR/19aOxeepUqUUEw2bNKpYKOl1/GB//X/9qa/xwyCEAIIbcNxX9CCCGEkJXw5yeAl/6eIvWtcuj9YIkW+8Wnn9wSooVvOe1DVwFsqeiHzx/8wR/gD//wDzE4OIhqtYqFhYXQAx6IzobXYmsqlUI2m0Umk0Eymaz7Hqi33hFLICsE26x9OdYeE3WsDh5YsbMZnp2Lti1qll1tM7V1UMBWMejghw4AyLzT6TS2bduGffv2LTtny/T0dPhaWFgIBXGho6MDo6OjaG1tveWxhatXr+LNN9/E1NRUmDku91WL7Z4Fjyf8C7a6pFKphLY/+p4mEgkkk0kEQYBkMolkMlknvOu91JY/VhjXtkH2edZ9ALyG0jrTXx9rqzq8Z0bPxwat9FiCrZjx9s/7e7FVC/bvQp+345vfxPj//b9Yvs32JuU4gMfA/w4QQsgtQvGfEEIIIWQ5xO//u1+kaL0cWuyXrH6K/avKlrQBOoZaLwBmfgIA9u3bhy996UsYHh5GIpHA0tISCoVCKBw2E9El4z4IAmQyGWQyGaRSqQbffy/LWQvU+jub/WyFUStwWlFTPrfBAzumnpc+X9Be8focu379skEAGcc7B4AbBBgZGcHevXsj5+7x7rvv4p133sHk5CRmZ2frsvTT6TRSqRRaW1uRy+VCy6bh4eEwWHMrVKtVvPXWWzh//jwmJydRqVTqrHVsk1l7f+Qz777FYjGUSqVQ/NeWPJLVL+K/teTRlQReVYLcG5uFry2B9Pyinh8vs94+B/ZYGxixgSOpXoiqHLBBAC+4oi2HmlUZCB/5vd/bmjFQCQAD7AVDCCG3AcV/QgghhJDlYLPfaETsP/QXtcoIgGL/XWDLJkCyGXDI/fffjz/90z/F3r17EQRBnf3KSrLoY7EYkslkWAUgNkBAo9gNoEFw1eMnEonIbHErjnrZ2VZ8beaLrjPOvSzrZpn/9joA6kR3G8TQYrLNEpfsdZlvJpPB0NAQ7rvvvsi5W+bn5/H666/j2rVrYQWAXocIz8lkEvF4HOl0Gp2dnRgYGEB3dzdaWlpWfC1hcnISZ8+exczMDObm5rCwsFDX0FYL7fp5kD3QVQJ6b3TTZJmziP/Wrkfb+NgKBLme9v73BHSdxd+s+kTP3z6f+vOol7UVErTtjxdwslUBzebsVejYnztffhkPfe1rW8vyR/5Dx3/zCSHkjqD4TwghhBCyHPT7vwnF/nUDbYCwBRdfT0dHB77yla9g//79SKfTKBQKKBQKdVnJy4npYgMkVQDSpBWo7yFgx7FBABGObUNdT7D1qgJWYvmj8bzSNSIae2u3In+1Wg0b1npzsZnn9vpaAE6lUhgaGsL999+/4rVcunQJb731FiYmJrC0tNRwbWnYLEEAaaorc+rq6sLAwACGhoZWfE3hwoUL+O1vf4sbN27UXU/WbEV5CS7pzHzdLFn2Ufz+JQAg1kZSISCBBKkMsNY9OoigBX0dSBD0vfCCP94zEiWy6/vp2Qp5lRB2Pnpe3jW8Z2k58f+hr34VL//mN1sn4Cv/zj+tfieEEHJbJMB/RgkhhBBCmrPnqdr7qW/f23ncbfKHa2uXygfhpWdqLxH9Zy/fuzluYd4G8E0AX0AtELBlNPDnAfwEtUUfRy2dacssvp6lpSW89NJLaG9vR39/P7LZbCh6e5nxUVnJOiNbC51RViaCFwywWdJakPUEUn3e7aCzqZc7Ts/Hrslmudvz5Do6813vlRalp6amcOHCBSwuLqKvr2/ZNbS3t2PHjh2oVquuhZP1ya9WqygWiygUCigWi5idncXExASuXLmCa9euYW5uDplMBkEQLHvtjo4OjI+PY//+/cjlciiVSpifn6+z8dGCv+yRfqb0cfoZEGFf9swK6/oZteK/Pt5e295za5+j5+BVKUThBae8Y4Rmtj8edt+8SgAvCND58ssY+8d/xJ+h9u/+puY4Giu8tui/74QQslow858QQgghZDm2SrPf/OHaC6j37N/s694EbFkbIDYDBlATrj/72c/iiSeeQE9PDyqVCpaWltxGwNrb3fs8nU4jk8nU2QABNxv0RomnXmaznCefWcHWEpUB7Y1rr+3NLcpmJaqSwVoKWTsZGxixx+jz5PtEIoH+/n4cPHjQnbvHa6+9hgsXLmB2djbMqNdrkmx4TxDXmfTt7e3I5/MYGRm55eDKr3/9a5w/f77h+dB7pCtLtKAP3Gz4m06nwz4FlUoFpVIJxWIxXJe1CZJ3fV90BYK1AdJj2OfaPk86+NAsWKTP0WuS76IsgPS1PexzZ+dnAwly3S2R9U9ff0IIWTMo/hNCCCGENGMzN/ul2L+pOIabFQBbKlGSzYBDnnjiCXzyk5/E4OAg4vE4FhYWUCqV6rzkReTW2dhW5A6CIAwCpFKpunP1z1GCv0cz0b/ZGF4m/nLHeeKrDoJ4AREtZHuCsT7Ovuz4tjFytVpFEAQYGBjAvn37ItdveeWVV8IKAi+w4f0sArgI5SLAi8B+4MABdHZ2rngOlUoFp06dwsWLFzE3N9fQsFmvUa6rqwHE+kcCErHYzQbBum+CzF03CBahX+6L9d+PsnuyFQpRFj1W2Pf2V66hgzAreQZXUgmgj40KjHX893/jA1/96ub1+qfoTwghaw7Ff0IIIYSQZmwm8V/E/vyR2u8U+zclP0bNFWfLaShsBgyg5jn/9NNPY3x8HKlUCouLi3V9ADTaz11n6AMImwE3qwKQ8Wx2v8Zm2Ht4mdTNsrObWbPoudlri9ir525tZ7wscm+N2l4GuNljwBOW9RyCIEBXVxcefPDBpnuiOXnyJC5evIjFxcUGSye9Jn0tfU0R1IVcLoeRkRGMjY2teA4AMDs7i1dffRXz8/NhZYndm5aWFiSTSVSrVRQKBQCNlQlyjhb/re2UBAD0vdLVB14QwB5ngzhRgr00KhZsNr4X5LDj6WfOPlP6PPncm5s3vwe/8hV0vvwyVh5K2CDIv9Hi67+F/80mhJC1huI/IYQQQkgznnymJpj/w+F7PZNbR5rzarGfzXm3BMfN+5aBzYBDPvOZz+BTn/oUOjo6UCgUsLi42BAAqFQqdUKpzoKWz3QVQBAEdaK4Pr5Z1vNKAgByTYvNopfxPesi/bMWbq2ljxaatYiubX30Hsg1tbDrBU2soOuJ2nJMIpFAe3s7du/ejba2tmX3BgBOnz4dVgLEYrGwia4V0vVeWiFbNwyOx+PIZDI4cOAAuru7VzQHvc9vvPEGLly4EFYEtLS0oL29Hel0GvF4HKVSKWz2u7S0FL6CIAgrJLT9jxXCdQBA1ibZ//Zlx7A2TTJnLyADoG4uNsCj9y8qGKCfAVtRYJthy2dR89Jze+zYsc3naqb/nd6SkWpCCLm7UPwnhBBCCGmGNLr97hfv7TxWghb7JaufYv+W5RhuJlVuKQ1cFs4qAPT19eFv/uZvkM/nUalUsLi4GDZyBeo9zSUz3NqPiICqbYBEkJUxhGYZ1vL9rVYB6HlatF2PPtbOyx6jx/Q+F4HcBgS0uGvHbpZZro/1ROVUKoWRkRGMjo423Rvh3Xffxblz58KAjtjoiJe+BEzs3OVey/1OpVKhcN7W1obt27djx44dt2RZAwATExM4f/48ZmZmkMvl0NvbG9r95HK50IJqamoKExMTYTCgVCrV9abQfvwimGsLIN1QWFdcRAUB9B5EBX/0fdJ/D7ZhsL3H3rn6Psu1vMoSazelgxs6yND1m9/gwa98BX+2Ywd+WC7j2rVrWFhYuKV7s66gxQ8hhNwTKP4TQgghhDRjPTf7FbH/0F/U5ghQ7CcNbFkbIDYDBgAMDw/jT/7kT3D06FGk0+lQbC2VSuEx2jrGy24WkVL84yUIINnvXgAAWJmHv/e5FV31PPW4nuDuifl6XBsc8IRbm6mvgwCyH955eg81+ned8S1rknGSyST6+/tx3333uXtkmZ2dxdmzZzE3N4dSqYTFxcXw3uqmujpbHrjpX297P8RiMWSzWaTTafT09GDXrl11PR9WytmzZ9He3o6enh73+2KxiImJCczOzmJqaiq0EVpaWkK5XEaxWKyrXEgkEmE1gb0/uhLDiulA/TMix+hKCbmGvhdeAEzGspUVGvvse4EIO5Z9HiQAEYvFcOhrX0OpXMYv/s//wfz8PP7t3/4NQRDgmWeeifw7WpdQ9CeEkHsKxX9CCCGEkCjWm98/xX5ym2xZGyA2Aw750pe+hN/93d9Fd3c3CoVCmHmtRdJmGezasz0IglAkto1Z9Vhynn4HUHdNPb5nseLhNZ21tioaK9TrLHJ9jF2rtkWyWdl2znaNdu5aLPYEX/0qlUrIZDIYHx/H8PCwuybLO++8g+vXr+PGjRthg+BisRgK6nJdbYcj85Sfxfs+kUiE97mlpQW9vb0YGRlBS0vLiuYiFAqFFQUPJiYmcOPGDdy4cQPz8/NYWFgIKy8kiFGpVOqeNyv+6yoVHSSw39v1LvccWKLGtN9bSx/PTqhZr4yOl1/Goa99Df/vb/4G7+3bh3K5XFeF88477+Bv//ZvkUgk8NJLLy1/M+4FxwE8BlZhEULIPYbiPyGEEEJIFPdS/JfmvEC92L8eKxDIhmDL2gABbAb8Po888gg+97nPYXx8HIlEoq4ZsBBlYWLtS1ZSBRAlbnpBAk/EbxYA0JnT9rpRv9u1NTter13PxR6nAwF2jTY7XIu/NohgrZjEziebzaK/v3/FWfhLS0s4c+YMrly5gmKxGAro8tJZ83If9dq0/Y2sVYIguVwOO3bswJ49e5adx+1QqVRw9epVzMzMAKhVCCwsLGBubg4LCwuIxWJhVYAW8YF6ex/7vRbeddCjWq3vOyBEZerr7/Vz28wi6P9n796D7DqrO+//zv30VX3TXbK7JWGPDbYs25gQk7JkA/EfkHJwhoTgSDKVepOZEIwNhMxUMWrxTyYOhFSqZlIvmRqrZcLMJPUCQ1JcJlBqBVMkYQxq2cY3sCVblmxJVkvdrb6c6/vH6Wdrnaef3d2SJfXlfD9VXaf7nL332XufIyestZ617H31V9e4aw6tlNn28MOqSvrxo49G/z4nJyc1NTUVrdhIpVKamprS888/r9dff11/9Vd/FfyOLghX7b/X/A0AWBAE/wEAAOLc+v/UAu9XY9ivDfbbnv0E+3EZNXQCgGHAkc985jPatm2bVqxYoWKxGLWLiWuFI81sdePaALkkQC6XmzEQWKofCuxXzIcSBXY7+3dc4N2uAnDn7gdhQ9cVN/g1FOz1Vy/Mto2t6o9rA+S28RMmthrcBYVdEsAN1O3t7dXmzZs1H6+88oqOHTumsbGx6Fhu1Ydt+2NbArlrcffWBsYrlYpSqZSam5vV1dWl7u7ueZ/LW3Hu3DmNjY1pampKIyMjOn/+vMbHx+v65LvzTqVS0eoAe102+O8H3u0sBCe0EsR/3v0dauFjEwA2CRFKDPjH7Th0SNseeURPfuELOnvLLTO+x661U6FQUKVSUSaTUUtLi8bGxvTMM8/oRz/6kb7zne9cvg/gYtDiBwAWHYL/AAAAca7ksF8X7F97e+1vgv24ihq6DRDDgCVJd955pz784Q+rt7dXyWQyqiq2w2L9Njo2QOyCkbZFTDabjZIAft/0uGP5AVe/PY/lB9H9FkVx7Vv8YapxbYfso98aKBTcD52zv00oMGyvLRQEtsF2+7v727XiWbVqlVatWqW5nDlzRs8++6xGR0eVTqejmQCuzY9/D/x7a2ceuISASxrk83l1dHSot7dXa9asmfNcLoepqSm98cYbOnnypM6fPx+1OSqVStH3MZ1OR7/bz9bOObD3NNT+x08shPiV/dLM73aoFVGoPZCz7eGH1TE0pO9/73t1r7kVGFahUNDExESUBMjn85qcnNTZs2c1PDysz33ucyoUCioWi2/tps+FoD8ALFoE/wEAAOL87nQ//csR/Hf9+m2wn379WEAuDt6Qs3AZBhz57Gc/q23btqm9vV2FQiFqBeQCvqFqe79tiaSohYxLANhWQFZc25y4VQehvuruvPwEQ6jS33+0KwX8AL0TF5j3z8kew28d42/n7pUNRtv946rA/Wu1gepEIqFMJqP29nZdd911amtr82/3DMeOHdMbb7yh0dHRaEWBG7JrkyShILV7v0QiUfcdSaVSUfunTCaj9evXa8uWLXOey+Vw5swZvfLKKxobG9P4+Hg0H8BPAvgJADvLwX52/mfoJ44c/zOyyaK4mRR+UshPODg77r5bZ7du1U+/9KUoceTaE7l97TVVq7U5Ee4nkUgom81Gn9Ho6Kj+8R//UV/5ylcuyz2vQ9AfABY9gv8AAAAhb7XfP8F+LAEN3QaIYcCR97znPdEqgEQiEa0CKJVKksKDbe2PDUbbeQB2FYBje/rbwL0/sNcP0ocC5pZNVtgq7NCqA78a309k+L3ZQ9X+oVUI9n39bWzlt/t7NnEJAnu/XdDaBYOz2aza29u1ceNGdXd3z3p8SfrZz36mU6dOKZVKRcOBXQDZBdH9hIqdFSApCjBLUiaTUSaTiarQN27cqJtvvnnO87gcxsfHdezYMY2MjGhkZCQ6L5cACH0PbVsguwrACfXlj0s2+a2BQkF9/7sUSgC4lj8//fM/1/DWrTPO17Vsssdz7Pe3WCxG7zM5ORl9ZmNjY/rUpz4VtU+6ZC7I7/4PCEF/AFi0CP4DANzNLecAACAASURBVACEXGzw3wb7XQsfgv1YIhq2DZDEMGDDXwUwMTGhQqEQDGS64Ll0IZBtt3NBV1cRbgfL2mNIF9qZhFYBOH6g0x7DBmLdczYQ72/rv7+tyvarwP0EgA0Wh47nt+zxVy7428+WBIhb8eD3kHeBejcfoFwuq6mpSb29vbr++utjj+8Ui0UdO3ZMp0+f1tjYWDQE2rWB8u+jX0XvKs/taoR0Oh3tdzHncjmcPHlSJ06c0PDwsMbGxpRIJKKElEtO2M/VD6CH2j2Fvl9+ciD03QoljWzSyU+wuEG/T37hC9GqCvtdTqfT0f224laOuNUCU1NTmpycjP5NFgoFfe9739O+ffs0Pj5+cTfYrpw6qIb/7yYALHYE/wEAAELmGvbrgv23/p70k/+39jvBfixh/ZLuUoN2wWEYcOQ973mPfuM3fkO9vb1KpVKanJzU5ORkFER0QoF5v3rfBS9dL3LXCsgGsf0+66Ee6JYNOIfOxz7vD4R1r/tBdz/IH0ocSIraGPnBd3de9n1DleG2nZB/3nE95e1rfiLAf08X/Hc/1WpV2WxWTU1N6urq0k033RRsxWSdPn1aR44cieYCuFUgxWIxSozYdkg2geIHzV21uevH39raqs2bN+u6666b9Rwul5MnT+r555+PVgK4uRSZTCaaWeBX47vrskmA0PX6n33obyfu8/aHD7f/5Ce65eGHNfQXf6Gzt9xSd79DA7fjzqNcLte1i7L7FgoFnT9/PkoQtbW16fTp0/rc5z6nN998U2fOnIm/obT4AYAlieA/AABAiD/sl2A/GsD26Z9BNWD8m2HAdT75yU/qne98pzo6OqIgsOsLL80ciCqFB5vaxIBLANgkgM+vqLZCwW8/2O+3+fFXF7igrq3ID1X6xwV0Q0F8O/cgrlLcP2///OJWAYSCyDb54e/jgr+FQkGFQiHqAe+uubu7W9dff73WrVs34718pVJJx48f18mTJzU2NlY3FDoUBPfvk//5u3Nobm5WV1eXtmzZos7OzjnP462YmJjQc889p1dffVWFQiFameCSAHYmgL9Kw7ZWctc2W8se93zoe+1/p0KrSt7xh3+oSqWiQ1/6UrSPpOB31vHnC9gVBeVyObrvdoVDIlGb2TA8PKxSqaTm5ma1tLRoYmJCTzzxhB5//HGdPHnywskT9AeAJY3gPwAAQIir/JcuBPt/8uWFOx/gKjqgBu7mwDDgyJYtW/ShD31I73rXu5TL5TQ1NVU3C8Dv3S9dCHKmUqkoaGnbA9l5AHYosA10+gH52YLnoXYnNuDv87cJBeIdP5HhtrfX5K9U8FcQhJIToZUHfhLFX63g8wPq9jrc86VSKWrh4+6FWxGQyWTU2tqqNWvW6B3veMeM4/tGRkb05ptv6uzZszp37lw0XNcOCnb8Knr3u73mRKI2lLazs1ObNm3S6tWr5zyHt+JnP/uZjh07ptHR0Sgo7mYB5HK5uiSAn1AJXZ/9fP0VJn6SyO3jhL7THYcO6eaHHtJP//zP9eZNNwUTXfbfk//d9L8j/r8nmwDykwBjY2OamppSKpWKEgGvvvqqdr28q7YcjIQoACxpBP8BAAAAzNDQbYDcMGCpdgMabhlEvQ0bNujzn/+8Vq1apUqloqmpqRlB5VAw3Qao/UpnF3TN5/PKZDJ1Ae9QsNtvJ+RXV8cFQkMJgNmEArOhmQbub78lj796wJ6Dv1LBTwL41+i/p19VHlq9EHretQOSaj3g3cqAUqmkSqWifD6v1atX6+1vf7s6OjrmdZ9eeuklnThxQuPj4yoUCnWDaP1r8FdjuL/d9efzebW3t2v16tVXtCXQyZMn9eKLL2p4eFiTk5MqlUpRW6q4lQBO6LtpW1jZVQKh74D/ffW/Z1s/+Ul1HDqk73/ve3Wth+z3wL2fu5d29YxLDNjj+98NO6/CnlMiUZt1MTk5qdHRUSWTSX1z2zf11eu+Wgv6SwT+AWAJI/gPAAAAIKih2wBJDAP2/NZv/Zbuu+8+tbW1Ra2ACoVCbIA9VAVvh/u6yutsNhtVX4cq6Z244HKIX60dSij44rafrZrfbRvX6kW6UIkfOme772zJD3seoe38lQTuOT8J4YLULiFQLBajIcFSLaDc0tKi97znPVqxYkX4RhljY2M6efKkzpw5EwXV3fv4wWv/3tmq+VQqpXw+r7a2NnV3d6uvr0+tra1zvv/FqlQqeuaZZ/TGG2/o/Pnz0SwDF/h3j+l0esYsAHsMW93vVq+46/FnH8QlaOz9uGv7dg1v3aonv/CFuvdz/fvtczYJ4N7f3stQEsD+HppZIdW+L/uu3af9m/bXnuC/ewCwLKTEf84BAAAABBxRLej/mKQ+NWACYEC1cik3C+CoajelQT399NP6h3/4B91+++3q6OhQc3NzcBBpqHe9DUTbRIAdUhsKevtV1HGV1G5ba7bkQNx+fpA2dDy/LY8fXI27Bvf7bCsGQu8ZanVke7iHzlGqH3js9rHXkEwmlc1m66rdy+WyxsfH9fOf/1yvvfaaUqnUrH35XeuedevWqbe3N2o35Fo/2Sp2e17+9dk2RaOjozp58qRef/11nTt3TqtWrYp9/4uVSCS0evVqpVIpTU1N1d1/t5LFPc620iK00sT/rOK+z+7RfSYrDh3Smu98R8/+0R+psG5d3coIW93vJ8Jc0sElbuJWxPj7+zMqqtWq9vft1yO3PqKhzqFa0J8VTwCwbFD5DwAAAGBO/d5jQ2EY8Az33XefPvCBD2jNmjWSFK0CcJXKoer0UNDUVjOnUiml0+m6FixzBf5DA4hD7XWsuCpsW50flwSwyQ1b2e4fNy6BYBMH9npCqwn8941r7+MH1/0gr3tf+162Utxt71YA2HZOiURtQG4ikdC6deu0bdu24HX5RkZGdPz4cY2MjOj8+fOamJgIzouIW7Fg2/Gk02l1d3fruuuuU1NT07zefz5OnTqlY8eO6cyZM9G5uWt3yQu7GsAFy/1Kf9e+yAbT3TXa10Pf/UQioa2f/KSq1aoOfelL0b+fUFLN8pNidqWF/+8jLvgvSfv79lPpDwDLHJX/AAAAAObkikAfUwMWwB/RzFUAAwt6Rgvuueee07e//W2Vy2X19fWpra0tChKHApNWqEI5biWAH/T3g8V+8F2K7/M/18oBv82LXa3gV12HEhKhRIX//n7/df91//j+ufoBZve6C0iHjjfbCgh7LJeAccF3F/h2KwLOnj2rX/ziFxoZGVE2m1Vzc3PscXO5nHp6erR+/Xp1dnZGx02n03XBcNcnP65q3gXAJyYmdPz4cZ08eVITExPq7u6OAuXzXeHha2lpUU9PjyYmJmZ8l9x30Q009ttIuXvnJ7T8++3/G/Cvb8VPf6q+gQH97DOf0fmVK6P7YhMQLhFg388mB0L/Ltx72RY/dtXH/r79GuoYqgX+qfQHgGWNyn8AAAAAF+WAavGihowV2VUABMwkSd3d3froRz+qHTt2KJ/Pq1gsRpXUtjrab48izRw+av9OpVLRPIBsNlsXnJYuBDn9ocPu91DVs/vd9Uv393N/h5IKofewQVc/eB3qc++28/e172P3C23jbyfVJy785EfcKgT7WqhXfChg7Qfcm5qatHnzZl1zzTWx7+F75ZVXdPz4cZ07d06FQkGFQkHFYjFKOtj77ZIQrvLeXWs2m1VPT482bdqkyclJnT9/XpK0cuVKZTKZeZ+Lu64XXnhBJ0+eVLFYjH7cTAR3D9x5hAYD+/c4FIx37Oe09aGHVJX05Be+EPz34Y5lEwyh1SaVSiVKALnvvf2O28Hb+/v2a6BvoPbfroOiHBQAljmC/wAAAAAuWkO3AZIYBhzQ1dWlz3/+89qwYYMymYxKpZKmpqaiVip+9bQfxLaBVFcR7oK/uVxOTU1NUdDV9pMPBdn919xzflDUPw/Hb0sz1yqDudr/xAWA/WC+PUf/Of9vG/i1x4oLFMetiLDc5xRKMPjHsq81Nzerq6tLq1evjlpBzWVsbEzPP/+8JicnNTExoYmJCRWLxWDFuvse2LZJqVRKuVxObW1t6unpUTqd1ujoqDo7O9XT0zOvc7B+8Ytf6MyZMxofH5+RAHB99e2qCJeQ8gcrO3ErEtzzK376U9380EN68gtf0PDWrTNWEPjfUb+lVeh197w/YyGZTGqgb0ADvdNLlvjvFgA0DNr+AAAAALhoDd0GSGIYcMDExIS+9a1vqVAoaNWqVWptbVU+n69rY2KDm35Q2QUsXXDfPef60Me1ArKBUctvw2ODxnYbJxTE9ZMSdh8/SG33Cf3uv1/cOdpr8avK41YL+MkB/71C9yeO314mLrBtny+XyxodHdVrr72mZ599VqdPn1ZLS8usrYGy2azWrVuna665Rr29vXXfFXe+bu6D/R6493eBeVf5XygU1NbWpvHxcZ07dy5KDsxXV1dXFDi3/fP9pIf7secSuodOXHLq+j/5E+Vff10/+8xn6laq+DMl/OO7+xNKPLn3s62B9vft1yPbHtFQB8N8AaAREfwHAAAAcEmOqBbz3qMGjX27thnbVftfVQkRVJP07LPP6uDBg+rp6VFra6taW1tnBGH9wadSfQsTF7y0w1VdAsAOpHXsQFuf3zM9LoFgz80Gfv0gbCi4H/e6fS2u2t9/LrRyIBR8D7X38ZMUs80P8Pf197P3z7+/ocC4+8ySyaTGx8d17NgxvfbaaxodHVW5XFZbW1vwvd2+HR0d2rBhg7q6upTP55XL5ZRKpepmBLjvgatmd/ehUqmoUChoampKuVxOuVxO58+f1/j4uPL5fHAmQoibXeEC++76Q6s+3HmEkgChlkl+EuD6//yf9dLOnRreunXG4Gp3H92/h7j2Um6fUJJg37X7akH/ToL+ANDICP4DAAAAuGRuFu5u1WLgDRdbYhhwUKFQ0I9+9CP94he/UDabVUdHh1paWur6sce15fF73tvq72q1GlV8u0RAXH98+z6WX+0ft52TSqVmtNax2/t/h1oK2YruuBUJoSC9v1rCvhY3s8A/h9B1u/38oLb/XqFVB6FEhn/NLmBeKBQ0PDysV199VS+99JJGRkbU3t6ubDY74/ycpqYm9fT0RKsCWltblUwmVSqVlEgkoiHE7vtg+9lXKhWNj4+rWq2qublZU1NTGh0djY47H247G1S3KyDc7y44XyqV6q7d/474K0Sq1apWHDqktd/9rn7xwAOaXLOmbqWLv18ikahrOzRbIsn9PtA7UOvrv1e1n33zunQAwDJEz38AAAAAl0W/pLtUKzBtSAwDDkqn07r++uv1wAMPaMuWLWpublapVNLk5GRdP3VpZgsg6ULluQuC2u1di6BcLhdVeLtAqg3QziYUFJ+tx3+opUsokO9ed3/bQK1/HDtjIC7gHxfkD1Xt2+pxSTOq0+PuiTsPez42qRG6V/77uh+/77z7SafTamlpUXt7u1atWqUNGzYEzyXk3LlzOnXqlM6ePaupqSlNTk5qamqq7nrdQN5cLqfu7m61t7drZGRELS0t6urqmncroLNnz+rUqVMaHx/X5ORkNJi4VCrVzbCwbYIymYwymcyMlQb2sy+Xy9r2yCOSpJ988YvR/bGfVblcnjEA2a4CiPue7O/br/2b9tf+oK8/AEBU/gMAAAC4TAZVa/+ze/rvhmsD5FYB0AaoTqVS0alTp/T9739fw8PDWrFihVasWKGmpqYZQeNQexp/JYANeLvAq20H5Aeh7T6zsduEWua4bay41/3WQbPt4/bz3z+0CsBWg/uJB7uNbYcjqW5Actw5+Pfdvw77d4ht++MnClwg262imJyc1PDwsE6cOKGjR49qfHw8arkzm3w+r66uLq1bt06rVq2KAu7+e7rvxsTEhAqFgjo7OyUpWgWQz+dnfR+3TTqdjoL99hrdtbj3db/7CZvQZ991+LB6Bwb0wn/4DyqsW1fXrqlarUbHkmqJAHtNrg2SvyJjoHdAn7rtU7T4AQDMQOU/AAAAgMuu33tsOP2qrQIYVC0YRyAu0tzcrDvvvFP33Xef1q1bp2QyqampKRUKhWCFuh9QtQNR7XDYRCKhVCqlbDardDqtdDpdF5gN9X0PBWfd+zq257wValsUdwz/+HEBdHvcuDZGdrtQ9bekGffIJg5C24e4lkr2mH6Swb8H9pxtCxv76H63yZpEojbct6mpSfl8Xu3t7brxxhvnPEd7riMjIzpz5oxGRkY0MTERnX86nVY+n9eKFSvU1dUVvXdnZ6fS6fScxx4dHdXx48c1MTGhYrEYVf/bBJS9BzYpExqYvO2RR1SpVDT0F39Rd2/t6gh3j+NmB7gkyuObHtfjmx+vHYRKfwBAAJX/AAAAAC67QV3ogtOQLfAZBhyrWCzqpZde0ve+9z2l02mtXLlyRh94GwD1+YFV+7x0YRCrvwrAD9Laffy2OqGWP/52oVY+djtbCT5XRXjc+dhK+9DsAht0t6/7Mwrs9bvjhVZDxK1icI+hlQ5+IiB0rjaBYj8/25qoUqloampK58+f1/DwsI4eParh4eGoTdBsksmkmpqa1NXVpbVr16q9vT1KJjQ3NyuTyaharapQKCidTketgCqVyqzzByQpl8spnU6rWCzWtWty99PdN7siwK6+sJ9T59CQ+gYG9H//4A90rrMz2s8OufYD/fbzcq/v692nw52Ha4F/Kv0BALOg8h8AAADAFbNdFwYBN2xsyq4CaNiBCPG6u7v1gQ98QDt27FBnZ2cUBHbBVklRINXy29H4VfguGOv6sNtAc6g9TVzAfzb+OYQGA/vnZ88hJFSZH5ohYIPxrhe8PQe/jVIo+D/bddnj+PfWrsCw5y1pRoDcv4bQZ+n299/TJgkSiYRWr16trVu3xp73bM6fP6+JiYnoM2hra1OpVFKhUFBHR8ec+xcKBZ04cUIjIyNR//9CoRCtAnDXZpMh9lok6dZPfUqdQ0P6+29+s26fXC4XrUzxV2e4RJa7B49vflz7+/ZfSDBSzgkAmAXBfwAAAABX3AE1eJyKYcBzWr16tR544AHdcccdam5uVqFQiIKsoWp9aWaQ3Q6stUHqdDqtbDZbV5lte7aH+Mf2A/Bx7V1cEDu0KsGeY6gljBUKkPvHtwF1G0y2Vek2cGxbBV1MC6JQRf9cbYnsEFv7vvZ841YzhN7PBdkzmYy6u7u1Zs0arV27ds45AfMxOTmpbDY75zVJ0tGjRzU2NhYNrHbfU/uZhBIklUpFd99zj1786Ed1ZNeu6HO0wX2XpHLs57Svd5++svkrtRdo8QMAmCeC/wAAAACuin5Jd6nBi98PqJYAIHgXa+PGjfroRz+qW265RU1NTSqVSpqamqqrsI7rce9esz82EOtWAWQyGaXT6SgJ4FdrSxeC4n57HH/gsNvWX0Xgn6OtvA+dmxXqp+/zjxE697hVAHYFgn1ttvcJVfGHtvVft+cXamEU1yLIXp/93O15uIG469at09vf/nblcrnY65iPcrkcnA3hO378uM6cORPNqrCrAOx9tisX+vbvV+++ffqnz39e5265JQr0+6sdXLLKHWegb0CPb6KvPwDg0tDzHwAAAMBVMSjpqKTHph+PLOzpLIwB1Uqw3CqAhr0R8UZGRvTEE0/oueee0+rVq7Vy5Uo1NTXVBUql+gBzqO1KqPVOqVSqGzTrqv/tMUPBcL+9T2gfP/DtAr9xvfv99i5+gD7UQiZ0TqHj2/eIW13gn6+/WsG/z6Fjhc7Rnod/z/zEgz2uf58lRYF4m1xx7Zzc75VKRWfPntUrr7yiU6dOaXR0VGNjY8rn8xe9KmA+lf+S1NbWpkQiUTf4152nn3xy37VNjz+uka4uPX3//dF30b/n7rrK5bIe3/y4Pn3bp3W48zB9/QEAl4zgPwAAAICr5ohq8e+GTgAwDHheTp48qYMHD+qpp55SX1+fOjo6lM/nZwTNrVAA3R8MXKlUoiRAuVyesZog1OrH8YfguuB0qBWQX5lvj2mrvENV8XEB91Biw98vtIogLpHgt6gJ3QO7nb+PLxTI9+9vKHESurfud3tMF/i35+kq5SuViiYmJjQ8PKwTJ07oyJEjev3115VIJObV0/9itbS0KJvNqlgsRisQ4pItnUND2vKVr+jJj39co93d0YoTqTZLwN6jp7qf0vc3fF9f2fKVWtB/r6R9l/30AQANgrY/AAAAABZEv/fYkBgGPG/ve9/79Gu/9mvauHGjEolENBTYVfJbccFx6cIAVfeaGwqczWaVTqeVTqdnBJxDgW+/4r5cLs84B38f/zzjkgN2LoB/Df7KhNnmIczVMsienw2+x60CiDtOqA1TqJ2Prfy3Kw58frsi1zrHH1ocSiy474P7PBKJ2kDd5uZm5fN5tbe3621ve9uc1zZf58+f12uvvabz58+rWCxGFf2lUin6/fbPfEZdXV168ctf1rFjxzQ8PKxyuaxMJqNUKhWd79/d+Hf6n//mf9YOTIsfAMBlQOU/AAAAgAXhit1ZBaDaKoB907835I2Y20svvaQf/OAHeu2117Rp0yatWLFCmUwm2EfesQFsG0j3B7GWy+W6wG0ooB5XDe+32AkFvUP9+ONa6NjfbRA8dE122/msEIir2g+tBnDPz5YEiDuOvwoh9N72vtntQv39/c8j1DInNIPBrRIolUoaHx/XuXPn9Oabb+rYsWM6d+6cqtWq2traYq9vPrLZrHK5nMrlskqlUvS8aynVOTSkTfv3S489pu7bblNfX59aWlpULpc1NjamSqWiv73xb/WffuU/6emep2nxAwC4rKj8BwAAALCgtqtW/L5XDR7vYhjwRdm5c6d+5Vd+RStXroxWArihq05cexrLXwngBgOn0+no0QWo7SBWqb7C3Q9QxwXsQwF///3tcdw52ve0x4mrug+tAvCD+f61+wH6UFW+5Y7ntvMD8vMZguxv54L4NgkTapvkKutdn3y/735o5YZr8+ReT6VSSqVSyufz2rBhgzZt2jTr9c6mUqno5Zdf1tjYmKampqJrePvHP64VK1ZIBw7M2OehMw/pL7v+svYH/+4BAFcAwX8AAAAAiwJtgFQL/h9QLQvS8NmQua1cuVL33HOP3vve92rlypWSpMnJSRUKhVmHA/sBYls17rZ3leM2CeCqudPpdLTdbCsP5lqN4Pj97v0Avx9cn0/1vn0u1CbIDlC2Ffb+jIRQWx9772wAP5R0CK0kiEuUuEC+O65t8xNq9WNXAbgZDvY47scF+R173e49ksmkWltb1dPTo/Xr16uzs1MXq1qt6ujRo3rzzTejYcC/9O53S/390p490XZ7tVeS1K9+gv4AgCuK4D8AAACARcOtAmjo9vfuJrAKYN5yuZx+//d/X7feeqs6OjpUrVZVKBSiJEBcn/y5hgZLqksCuFUALiFgW9bYYLp/nPm0z5ktKeCOEQqu+y2M/KHHNmHgnrN9/W3Q3p8FEDczIHRNcyU6bFA/dK3+HAb/mqQLA5btddokQLFY1MTEhKrVapSoscF/O8/BrqrwExDus87n8+rt7dXq1atj70PIiy++qLGxMa36r/9V6//bf5PMvdmrvReC/hL/vgEAVxTBfwAAAACLCm2ApjEM+KJdc801+tVf/VXdfvvt6unpUSqVUqFQULFYnNFGxgbAQwOD/b9dEsBVladSKWWz2bokQKg6fT4V+u73uH7+vrjztddln7fvaVcV2Kp3f8ZAXCIjdD1ztVbyz9Xe+7gEQiix4Z4P7ZNIJKK++8VisS7x45IBmUymrpWT3T9u5UQqlVJzc7N6enq0du1atba2zus633jjDbX92q+publZOnDgQtBfIqkHALhqCP4DAAAAWJRoA6T6VQAMAZ23DRs26P7779ctt9yinp4eVSoVTU1NRS1iJAUrv0NBbhsIdkF+V4GeSCSUyWSUzWbr2gL5Qr327fN2H/t7XMDd7esnAWxg3/W3d8Fy//r8Xvj+KgD/vthjhRIds4lLEPhtjkLH84P9fhLAX7ngjuESAS75UyqVlEwmlcvllMvl6lYBuH38lQ/+vXU/XV1duvbaa2cfFjw4KO3Yob0Htqt/+/Q/XIL+AICrjOA/AAAAgEWrX9JdovA9WgVA8PCibN68Wdu3b9emTZu0efNmZbPZKChsg+d28G0oUG0D6PZvt52rKndBZZsEiAuShwYN21kE9vh+tb49nv9o9w0F7kOzDew52fNwvfTd8e172NUOobkAPreNf1/suYcq/f3PwD+X0L3x2QRApVJRKpVSLpeLVm34yYVQAsLnZgR0dHSop6dn5oyAHTs0qEHtOCD+3QIAFgzBfwAAAACL2vbpn0E1eOE7w4Av2dvf/na1trbqIx/5iDZu3Kh0Ol0XDLaDX0MDa/2q+dA2kpROp6Ogsusdb4fY+vuGqs1tJbudJxA6n9lmF4RWLsw2h8BPcLj3tPfHHsdt464vtHoixB/g644TqroPJRXsvn4iI5SIcPe0UqnUDQeuVCpKp9PKZrOzrgDw76d/D92qiUwmo7Vr1+rE9Sd0UAelvf0aHJz+Z8q/VQDAAiH4DwAAAGBJOCDpoBq8gJZhwJcsk8kokUjo1ltv1f3336+NGzcql8tFQeFSqRQFhS0/MC0pGIC3bWjcPAA3GNglA0LtbWybHXcc+55xqwAkRVX5/nm548YF92drw+MH5m2g3V8t4c7HP2d/X1/o+KGA/2yrCuKSGjZpEVpJYT8393m7RI2/TdywZJv8qFar0ffnq2/7qr6y5SuSpO17pUH+fQIAFhjBfwAAAABLBm2ApjEM+C17//vfr3vvvVcbN25UPp9XsViMnQvgzNW73p8P4ILKriWQTQL4geS4ZIKfBLCv2f3s+8/2vN/+xw/2+6sOfH7w3z+u/5qbj+Dfz7gkg93Onn9cSx97XraNkb9Kw12Tf/22zY973Z3zXNfo3nN/337t690nSepXv/Yk+snNAQAWBYL/AAAAAJYU2gBNYxjwW9bWMZMofwAAIABJREFU1qZ3vetd+sAHPqD169crk8lEVdwuCeBXntuK9xC/Ut4mAVxboGw2G61EiBtYG9cCJy4JYAPV7txmSwz4wXGbkLDPxyU7ZmuN429rg/L+fnEDju31hO7/bEkYv49/6HzcMeJmFdjPJi5J8d+v+e/ad60J+muPtHev1N+v+MZHAABcPQT/AQAAACxJtAGaxjDgy+LDH/6wVq9erTvuuEOtra2qVqsqFosqFotRCxg/OO9XrzsuaBwarmuHA+fz+RkrAWYLfNu/3Xu43+15ufeLm1Hgb+u3xrFBcTcE2L7/bD357Xu43912rqI+1B4oNG/A386ek12dYPd198/dbz/AH0qqzNaeKJRoeeyax2qP1z6mP5r4I/1p05/anViQAwBYNAj+AwAAAFiy+r3HhsUw4Mti1apV2rBhg2666SbdfffdWrFihSqViorFosrlcvQjqa4nfNwqALedY4PMqVQqagWUzWbr+s7b4Ld7nC1I7Sci5kpO2P3sjw2Y+6sA7PH8c4mrxveTALaHvp8YkRQN5vWfDyVA/N77/goIdyz3mj1H//xnS2D4n+2+3n3ad+0+fezVjymTyeiPJ/9YK1euVEtLizQ4KO3YwUIcAMCiQfAfAAAAwJLmut80fMybYcCXTTabVaFQ0Gc/+1l1dnZq8+bNSiaT0VBg2xLIH3xr+RXnfj96SVHQ360IcC2BkslkXdsZ/718ce1r/PcL7WcH3Lpzsufqz0CwQ3VD8wviWvLYFkd2dUSowj5uVUHomm0SxgX9Q+16/PcLrWqwj/a8bF//j73yMf3ua7+rTCaj5uZmtba2qqWlRV0f+pAO/tM/UfUPAFg0CP4DAAAAWBZoAzSNYcCX1ebNm3Xdddfppptu0m233RbNBSgUCtEqABtsDgW+/X78oSG7dthsNptVNpuNkgI2GD9byxt3LPvoV7rP1sPf/e6C4jahEQru+6164gbzxs0GcI/lcnnGPbHXOx82qeCvXLD3wE+ixK2qcNvs79uv/Zv2S5L+/cl/r3938t9Fw36r1Wo0y6H9Jz/R+gceoOofALCoEPwHAAAAsGzQBmgaw4Avu1WrVmliYkJf/OIX1dHREVWYu9UAdi7AbMNoHRek9oP6LnCdTCaVyWTqftLp9IyA9Wzta/yA/XySAP5+/ooGVzlvj2ED7/b84s7F7esH5W1Sw98mtL9vtmu0LYXs8fwWS85A74AG+gYkSX/45h/qE8OfqFvl4BIAlUpF5XJZvQ8+qB//+Mfk2wAAiwrBfwAAAADLiot7E4QTw4CvkNtvv1333nuvtmzZUuv1Ls1oBxTXssYPTtve8n4PfreNawfkVgO4FQF+uxwpXCkfdz5ztQ+yv/srDUIV/34LJBvYd/zzC7VK8pMafpIhdF7+fqG5BaEZAvY6qtWqfrripxrqGNJA34A+ceYTetfku/TuqXdHn4O/GqJYLCr1gx9Q9Q8AWJQI/gMAAABYdpgDYDAM+Ip5xzveoY985CPq6+tTc3NzlAAol8t1rWHm6l1vEwA2uG5XAySTySjwn06n61YEpFKp2KC9FTqXUKsiGyj3VyTYn9Bw3rgWSH7SYK57Yo85Vyslf4WAW4EQd91uW3/A8EDfgAZ6a9X+D519SA+fezh6LZlMRsmXTCYTHbdUKin7J3+iti9+UfGTFQAAWBgE/wEAAAAsW7QBmsYw4Cvq9ttv11133aX169dr7dq1ymazdS2BbFV8HFspb6vZ/Z70tl2QC/67oLTfoidu0G9cCx6fa/ET1wZoPi1+ZltdMFsCwF8xEJekcPfVb6EUt31omK8N+n/izCf0yOgj0coKt62bvWBbMCUSCRUKBbW0tjJiAwCwKBH8BwAAALCs9Uu6SwTmJNUPA2YVwGV35513Kp/P69d//de1evVqZTIZlctlFYvFqCXQbEkAG2i3FfZ+IN0GxVOpVN1qANcSyA/Sh8S10Inbxv0eajlkr8sNDI6bD+Bfw2xCSQBfaAiye4/QTAT3vB3m+9DwQ/qD039Qt8LC3Uf3nEsA1FX/Hzig5D330PIHALAoEfwHAAAAsOzRBshgGPAV1dLSonQ6rTvuuEP33nuvOjs71draqkQiUTcXIJQEiOttb4Pafo9722ffBaltOyD3vEsGzGcY8Vxtdtx7p1Kpun3iEhu22j7ueLO9dyiA7ydFXALAbu8nDpz9ffs11Dmkoc4h9atfe7RHY2NjOnfunMrlcnRMlwTIZrPR/XQrLCLbt2vw4EGSiwCARYngPwAAAICGQRsgg2HAV8X999+vpqYm3XXXXWpvb1cqlVKpVFKxWAy233FcMNtWtfvDbO3f5XK5rt+9XQXggtbud6m+yt8PkvsV85ZfuW/b49jn4/aLO07cKoTQ3IDQ8fy+/3FctX//9Jd+j/bUvX7+/HlNTk6qWCwqmUwqlUqpqalJuVyuLtkhSRoclHbsIIcGAFi0CP4DAAAAaCi0ATIYBnxV9PT0qKOjQ9u2bdP73vc+tba2RkN63SoA27vePbq5AVJ9xbsfEHcrAvwe+C7YbxMCcSsCrLgq/lCQ37X48Qfsxu0TGkxsn7PnH7pWK7S/PQ97DbbFj6v2n02xWFShUJAk5XI5pdPpmRvt2KHBwUH+WwIAWLQI/gMAAABoONunfwZFvFtSLQHAMOCr5vd+7/fU1NSkm266SS0tLUqlUiqXy1H1vuOC5TY5EFeVb4P5tgWOfS2RSNT1s7ePfgDfvZd9tMdz7HnZRII7d5eU8K9JCicZQtX7tt2Rv61/Du75aJhv74AG+mrDfD899mn9WeufzTjOJaHqHwCwBBD8BwAAANCwDkg6KOLdkhgGfJVt3LhR69at04033qhf/uVfVkdHh5LJZF07IBfEdz+uH32IHe7rgvB2X1tJ71YFuKC/62tvB/n6gfy4QL0Tqr63KxFC/P79c80j8JMA9rrstVar1bpK/9956Xe086Wdam5uVnd3t66//vrYc5q3vXul/n6FmyMBALA4EPwHAAAA0NCYA2AwDPiqa25u1vj4uD772c+qra1NGzdujPrLVyoVlcvlutZAtio+NODWJgFsZbwNsNvtXbLAJQNcQsCuDJitdY97zh0rFOwPDQv2hVoP2X0sOw/Avwc/af+JhjqGNNA3oN956Xe06+VddQmRVCql9vZ29fb2qq2tLfCJzFMioUHRPgwAsLilxP+PCwAAAKCBufj2Y5KOSjqygOey4I5IGlCtTOyx6UcSAFdUsViUJP3whz/Uq6++qhMnTmh0dFQdHR3K5/PKZrOzBsYl1VX320c/UeD30PdnDhSLRZVKpejHJR78Y8XNCLCB/VCCIC5p4fNb/ITO2z7vrnegd0B/esOfaqhzSDtf3qkHjz4YbecSGpI0NTWl0dFRFYtFrVixIngOsxoclAYG9KAa/L8XAIBFj8p/AAAAAJhGGyCDYcALZt26dZqamtLHP/5xrV69Wh0dHdHAWTcbwM0HsO2B/Kp+xyYHbG//uISA298NDLazAdyPv5+/r31fx61A8IVWDYSOH5JIJDTQO6B9vfskSbte3qWdL+8M3g+7isHNP+jq6tKqVasuLgnAoF8AwBJB8B8AAAAADNoAeRgGvKBuvPFGbdu2TRs3btTmzZvV1tamVCoVVee7gLk/5Ne2xHFc8N21wLHBdTsnIJRMcEmDTCajdDodtSYKHc+uAPAD+nHnWKlUosp8u/9sCQA7zPfBow9q99HdM2YlhJII9nxTqZQymYy6u7t17bXXRucQi0G/AIAlhOA/AAAAAHhc63sK3qcxDHjB3XDDDapWq/rN3/xN9fb2Kp/PS9KM6n+bBLDtf9zfLgEwW1DcHdce3x4nnU4rk8lEP7PNCHD8RIQTN5g4LnAv1YL+hzoPaahjSLuP7tbuI7ujY9kWRPZeJJPJGSsR7OyAZDKplpYW9fT06Nprr43/IKj6BwAsIQT/AQAAACCABICHYcALLpVKqVwu6+abb9add96pXC6n7u5uXXPNNcpms5JUNyDYBtH9QbzVanVGb/1Qqxy7GsAPprvhwC747ycE7KoA+x5x/CSAY8/TVfu7fv6u2t+yCQU/GWLfx78n7lq6u7u1du1arVq1qv4EqfoHACwxBP8BAAAAYBa0AfK4VQC0AVpw73znO5XL5TQxMaEHHnhAq1evVjabVaFQqGunExq26/8exwbw/WO5122gPpvNKp1OK5vN1q0GSKVSdbMGQkN//dUG1v5N+7W/b7+kWsD/Y698rG5b/5z810IDiO2wY3/WQS6X08qVK9Xb26uWlpbaTjt2SIODmnsSAQAAiwPBfwAAAACYgyt6p9XHNIYBLxpNTU3KZDIaHR3VL/3SL+nuu+9WtVrVihUrtG7dOmUyGUmqmw/gJwD8QbyhSvq4FjyhBIJbCWCHBGezWWUymbpjxK0ysEmAgd4B7d9UH/S3rYtCAX57TP81/9rdSgk7RNklPNLptNrb27Vq1Sq97W1vkxIJDYr/DgAAlg6C/wAAAAAwD7QBCmAY8KJ0zz33KJPJaGRkRLt27dKKFSuUyWSiIcHShaC//T1uJUBo+K9tGxQKvNu/3aDgTCZTtwrAJgn8BMNj1zwWDfPd+dJO7T66O9rHvmeoyt8fFuyq+0PX5a7dBf9dMsAdJ5VKKZvNatMrr+j63/99Wv4AAJYUgv8AAAAAcBFoA+RhGPCi1NbWpqamJo2Ojurd73633v/+96u5uTlKBLgguqt299vfOH7FvF+pH1oxYAfvuudtsN8lBFzgP5PJRO2CBnoHlEgktK93n3a+tFM7X95Zl3QIJQD8an4/WeGec4F918bIzjOwQf9yuaxSqRTdm1Qqpe39/Xrq6aep+gcALCkE/wEAAADgIvVLuku0/4gwDHhJuO+++yRJd955p1auXKmWlhYlk8m6QLefBPD78LtAvl9d7wLplg3SW7Ydj6usz+Vy+rsb/05/87a/0dazW3XL2Vu06+VdMxIKLnHgHyeUAPDPwQX5bVW/Y/v/u59isahSqaSep5/Wjs9/nq82AGDJIfgPAAAAAJdg+/TPoAgIRhgGvOh1dnaqvb1dfX19uvfee9XW1qZ0Oq3m5mal02klk8kZlfA2AWAr+v0EQIitsHftgiRFx6hUKvrmrd/UN7Z+Q5Lqqv1TqVTd6gE/8O+/twvax7Ue8hMAkupWEvgDgCuVikqlkt79H/+jnnnmGZJ9AIAlh+A/AAAAALwFByQdFLHuCMOAl5QPfvCD6unpUaFQ0B133KGOjg41NzcrkUioWCzWBcpDFfbz5QLs7vdKpaKv3fy1KOi/6+Vd2nVk14z3sfuFjiVpRgsflwCwyQa7n0ts2MHB/jHcY8ehQ3rXH/8xVf8AgCWJ4D8AAAAAvEW0AQpgGPCS0dXVpQ0bNqhYLOqaa67R+9//fnV0dCiRSCibzdYFx+3w3NDfVqjdTyKR0Ndv/rqeW/Ocnl39rHa+vFO7j+yu28btayv3Q+1/QhX+fgLAzRjwz8UG+O2MAJcwcK/d9ulPq+vwYcWvbQAAYPEi+A8AAAAAlwFtgAIYBryk5HI5TU1NSZI+/OEPq6urS6VSSVu3blV7e7tyuVwUHPdXBPjDgkNBfEn6xtZv6Bu3fEM7X94pSXWBf1vt7wf27bHswF7b9z+uBZBtFxQ6Zz/R4I5fqVR0z3vfq0GR2AMALE0E/wEAAADgMqINkMcOA2YVwJLS09OjTZs2qVgsav369br77ru1cuVKVSoVpVKpGf3ybULA97WbvqZv3HKhr79r8RNaLWD7+/u9/f1AfyhJEJotYFcK2PeyPf795EXXU0/p1kceoeUPAGDJIvgPAAAAAJdZv/cI1a8CoIx6SWltbdXk5KRKpZJ++7d/W6tWrVKpVNKWLVvU0dGhpqamqI9/qVRSuVyO9v36zV/X17d+XVJ90N+22vHZAH+ozY+/raS6FQDub/voDy22bYz8JIBz+6c/raHDh/m6AgCWLIL/AAAAAHAFuIJ3ut0YDANeFlauXKkbbrhBpVJJPT09uvPOO7VmzRq1tLRIkkqlkv72hr/V127+miRp58s7tfOlWpsfG8D3EwC2N3+ohZCfBLABf7sawFX72/1tL3/7vD2GXQHQceiQbv/MZ6j6BwAsaQT/AQAAAOAKOiDi3DMwDHhZ6OzsVKlU0ujoqB588EHddtttGrxrUKlUSv/rhv+lXUd2adfLu2ZU5IeC/C5wn0qlgn34LT8B4I5jW/yEAv22l3/o2Pactj3yiIaGhqj6BwAsaQT/AQAAAOAKow1QAMOAl52OL3Xo7CfPauvZrbrl7C3afWR3FJAPBeIlzWi1I9USADZI74b8WqEEQGjosB/o91cBhIL/HYcOaRu9/gEAywDBfwAAAAC4CmgDFMAw4OXBJXJU6+u/8+WddcF5vwrfSaVSda/PFqSvVCrBvv9+KyCbMPDf3x3frQCwx7KrBbY9/LA6hoY0c8IAAABLS3qhTwAAAAAAGoEL+O/x/m5og9M/Lnh8lxgGvJSYoP+ul3dp58s7g+18bGDeBtzL5bISiURdqx8/eO+2d4kAmwSwAXsb0HfvHUoYVCqV6H3dagJ/pkDH0JD2Xrm7BgDAVUPlPwAAAABcZbQBCmAY8NLRr1qiZnst6L/76O66oL8f5A+tAPBXBrjgfahK318B4Pa3rYD8Fj6z9fUPrUJwx+s6fFhbP/lJWv4AAJaF5NybAAAAAAAuJxf0P7CgZ7HIDEpRn5UDIjOyWE1X+++6tjbMd9eRXXVV+jYobxMCdhtb4S+Fq/odV6nv9rdJAncM/z39vv8+e652VUK5XNY1jz0WLUgBAGCpS4n/lwoAAAAArrpBSUcl7Z7++8gCnsuiMqBaEsDNAjgqbs5i0K9aUma62n/XkV265ewtMwLtfrDdBvT9intbrW9XAfjbh/r8u2399/Zf81v62L9tq6BEIqHOoSFtevxxPSi+cgCA5YG2PwAAAACwwGgDFMAw4MVhHsN83WNc8N9/LW4Arz2ev3pACgf9/fZAfusgu21odYB97paHH9bQ0BBjJwAAywbBfwAAAABYBFwbdQKPHhd8HhQ352oyQf+PvPAR7Xxpp1KpVF2fffu768Uvzazi94fyhoLzfgKgUqkolUrVnZLfu98+ukr+uRIA/v7uvTuHhuj1DwBYdgj+AwAAAMAisX36h57jHoYBXz1uhcUe6dcP/7r+7TP/Vvl8XtlsVul0WqlUasYw39AqAMs+H6ryjxsC7LcA8lcWhAYI+3MB3Os2OeGvHEgkErr1kUfUMTSkmRMCAABYutILfQIAAAAAgBoX9J9urU6nG8cNAz4w/UMboCtjutr/hpM36IanbtCHnvqQKqqoUChI0ozgulQfpHdma88z1/Y2iO+3/PETAn6LIRvo919zQ4X91Qhuv46hIe299DsHAMCiRPAfAAAAABaZHbowX5VON4a7MXtU65HEKoDLw7T4+Y1nfkP3P3V/LTCevNCCp1Qq1Q3IlWYO4nVsgN5V3Lvge2g/e7zQKgD/2H4bodBKglCCIW6ocO/AgCS+SgCA5Sc59yYAAAAAgKutX7XYtlsFgGn9upAROSBWALwV/ao1At5T6+v/jf/9Df3ms78pqb5q3wX/S6WSisWiSqXSrId1QXVbge+33fGr991jaNCvf2x7XnE9/+3r7lzcOfhJgc6hIVptAQCWJXr+AwAAAMAid0DSQRHnnoFhwJfGVPrf9P/dpI+9+jFt2LBBuVxO5XI5CvC74L2ttM9ms8rlcspkMspkMnUV/VJ9Cx8XbPcD+TbQP1e7ILsaINRGyG3jjpVMJuuC/rO1GZKkjkOHtO2RRxj0CwBYlqj8BwAAAIBFzsW1Cf573CqA7aqVtbFEYm4u8L9X0g7p57/zcx06dEgnT55UsVhUKpWKBvu6KnwXgC+XyyoUCpqcnIwSBH5g3XL7++Kq8UO9/Gdr72O3cQF/l7BwqwDcj2VXDPQNDFD1DwBYtlLi/38EAAAAgEXPBScfk3RU0pEFPJdF5YhqgWw3ITkhIrkhbojEdl0YmHxEKpVKeumll5TL5bR27Vq1trYqm81Kqg+y24G5NsgeGgI8X6GhvO55PwHg3sPf3x8OHDdPwP7utu84dEh9+/frQfHvCQCwPBH8BwAAAIAl4ohqgf89IgEww4Bqgf89qgW4uUE1ftA/0N+mUCjo9OnTam5u1po1a9Te3q50Oi2pvn2P7eUvqe752YL/oYr+0PN2LkAosO+vAgglHmZLFNjjStINjz6qf3njDe2d350EAGDJoe0PAAAAACwhrr29K3SHwTDgC8wwX+1VLTEyy/14/fXXtW/fPj3xxBMaHh6WJOVyOWWz2SgRIF3o018ul1UqlTQ1NaWpqSkVi8UZwXtfaCiv/5ptB+SC/v4Q39Dx7XHtjAL/PezvnUNDOhh/SwAAWPKo/AcAAACAJci2ARpYyBNZbI5o5iqARrpB/apds+nrP982SNVqVU8++aRKpZK2bNmiXC6nVCqlRCJRF5C3Vfb+4N35tAAKDfoNVea797D7hYYLW/5cgNCwYEnqHRhQ59AQc6IBAMsawX8AAAAAWKJcGyDmAAQMSjqoWiB83/Tvy/0GuWG+Ui3hcQn/az+TyURtgNavX6+2tra6inqXBPAH8LofvyVPKEDvB+Ljtgtx1fz+MULHsoF//xz7Bgb0z2+80VB5IQBA4yH4DwAAAABLmCt0361anJtZt4a7Oct9GLDf1/9BXfJ1VioVTU1NqaurS+l0WitXrlRTU9OMBIBtyePYlQD2+bghvDY4Hze41640mGvbOP55dw4NMegXANAQCP4DAAAAwDIwqAvdXqhm9izXYcDzGOZ7KYrFoo4cOaJMJqOenh61trYqm81GLYAcW0lv/3bigvJ2aK+/j3vNzg+wx/PbBdkEgP9+9tzsOb79z/6MQb8AgIZA8B8AAAAAlolB1eLau6f/Xg7x7cvGtgFa6qsArlDQ33f06FGdOnVKa9euVXNzs/L5vDKZTF3Q3fFb7LhtZksAuB9fqHWP+93OFbDb+m2G/F7/7rWuw4ep+gcANAyC/wAAAACwjBxRLQ78mKQ+Ld349hWxHIYBu77+e6d/9l3ZtxseHtabb76pjo4OdXR0qKWlZUbbHcsF/21roFBFvgvkhxIA/uwA/zV3THsefgLA7/Pv3Pjoo/pnqv4BAA1iZoodAAAAALDk7Zh+PLCgZ7FI9evCDaqqlgRY7PpVO1cX+O/XVcnsFAoFPfnkk/r2t7+tF154QefOnVMikVA2m41aAfkB/Gq1qlKppEKhoImJCU1NTalcLtclBSqViorFosrlsiTNaCnk2v74SQD3WCqVomPZnv5uW3de/qDijqEhHbzytw0AgEWByn8AAAAAWKZoAzSLpTIM+Cq1+JnL8ePH9cILL+jaa69VS0uLmpqalMlkJCm4AsCpVqtR4N8F5f0WQdLsPfv9Vj9SrW2QP3NAqk8auOC/O9amxx9X1+HDUd4HAIDljuA/AAAAACxjtAGaw2IdBrxIgv7W2NiYjhw5ohUrVqi7u1stLS1KpVKS6hMAoZZA5XI5Cszbgb9+wD+uzY/bz24T137IrgiwP7d96lMa1NLr9AQAwKUi+A8AAAAADcAVuW/XgseQF5/FNAy4f/o8XHufRRD0d6rVqoaHhzU8PBwlALLZrNLpdPS6NHNgr22947axrYL8lj5xQ4L9gH7cTAHHJhu6Dh/Wmu98h0G/AICGQvAfAAAAABqEiyE/psVT4L5oLIZhwG6Y78Hpn0X6v9ZPnz6t5557TuvXr1dbW5tyuVxdCyAbpJcuBOFtCyCpvjWPbdHjXrPDfB13fH9IcBy3/Q2PPqp/fv11Bv0CABoKwX8AAAAAaCAuxk0boBh2FcC+6d+vdJbEb/FzlYb5vhXj4+N66qmn1NLSotWrVyufzyudTkcBexfgd22BnEqlErUA8tvy+Pzn7UoBt7+kGSsA/JUBnUND6tu/n6p/AEDDIfgPAAAAAA2INkCzuFrDgBdhX/+LMTk5qZMnTyqTyWjVqlVqampSOp2uG8ZbqVRmVPW75+1wXj8J4LcQcsF8Oy8gtCrAXz0gSTc8+qj+5Y03qPoHADQcgv8AAAAA0KBsGyCGoAZcqWHASzzob42MjOiFF15QV1eXurq61NTUpEwmE1XpuwC/pBkBfjcDwLYJ8oP7oTkC/t82ueCvAqDqHwDQyAj+AwAAAEADO6JaTJs5ADEu5zDg7ZJ268Iw372qtRZa4gqFgn784x9LktasWaPm5mZlMhmlUqlgZX5oFYCfAAjNAAiJWzXgjrfmu99V59CQHrwSFw4AwCKXkFRd6JMAAAAAACy8fu8RHjeQd1C1av1L2Ve60Nd/mWlra9M73/lOffCDH1Rvb6/a2tpUrVZVKBRULBZVKpWigb+29Y9r55PNZpXL5ZTL5aL2QSF+ex/HX1EgSfe8972X9HEBALAchP8vKQAAAACg4bh49IEFPYtFrF8XoshV1Sr557NPVReq/RNaloF/SRodHdUTTzyhv//7v9ezzz6rM2fOqFqtKpfLqampSdlsNmoJZCv8k8mkKpWKCoWCJiYmNDExoUKhUDcTwPJbAkkzVxAkk0l1Dg1JEr3+AQANi7Y/AAAAAIDIoGgDNKv5DgNeRn39L0a5XNbLL7+s4eFhtba2KpvNRhX9mUwmuI9t12NXBkiqSxS4be2KAL+VkO35f+Ojj+qfGfQLAGhgBP8BAAAAAHVcfHu3arHrBohZX7y4YcD903+7Sv8GCfr7Tpw4oUOHDqmpqUlNTU3K5/PK5XLRHABbtW8lEglVKhWVy+W6yn9/BoA9jj/kt1qtquPQIQb9AgAaHsF/AAAAAEDQoC7EsQcW+FwWJX8YsBvoe3D6p8H/13ahUNDQ0JDOnj2rzs7OutY/NnDvJwLcay4J4Pf4d6+7lkHShSG/7u8bHn1U/0LVPwCgwRH8BwCzd+CkAAAO0UlEQVQAAADEcm2Adk//TRW1xy2TSEjqnf69Xw1Z7R/n2LFj+tnPfqaVK1eqtbVV+Xxe2Wx2RgLAJgFcEN8lAMrl8ozj2op/e6zOw4e1iap/AAAI/gMAAAAAZndEtVj2Y5L6RFw7aFC1wD83J2h8fFwnTpxQLpfTihUr1NTUpHQ6PWsCwAb1XQKgUqlEqwBsmx+77drvfledQ0N6cEGuFACAxYPgPwAAAABgXtycW+YA4GJVq1UNDw/rzTffVGtra90wYH9YbygBIGnWWQC27c+tjzwS5WIAAGhkBP8BAAAAAPPmgv6P6cKMW2C+zp49q6efflo9PT1qa2tTPp+PZgBIcycA3CoAlwBwff/d6x2HDmnt//k/tPwBAEAE/wEAAAAAF8m1uacNEC5FsVjU0aNHlc1m1dPTEw0CTiaTda1//EHAll0FkEwmlUqllEwmGfQLAIBB8B8AAAAAcEloA4RLNT4+rtOnTyubzaqrq0tNTU2xKwBcT3+rWq2qUqmoVCpFqwC6n3pKmx9/nKp/AACmEfwHAAAAAFwy2gDhUo2OjurZZ59VR0eHOjs7YxMA9nc77DeRSEQrAMrlsm79y7/Uv546RdU/AADTCP4DAAAAAN6SI6oF/veIBAAuTrlc1uHDh1WtVrV27Vo1NTUpl8splUpJUl3Fv58AsL+vfOYZ3fS1r1H1DwCAQfAfAAAAAPCWuTkAu0UbIFycarWqN954QyMjI+rp6VFLS4uampqUSqXqhv26baWZCYB3f/nLaj19Wg9e9bMHAGDxIvgPAAAAALhsbBuggYU8ESwpU1NTOnr0qKrVqrq6utTS0qJsNhsN8p0rAfDuL39Zg+I7BwCARfAfAAAAAHBZuTZAzAHAxahUKvr5z3+uM2fOaM2aNcrn88rlckqn08EVAG4Q8Opnn9WmH/yAlj8AAHgI/gMAAAAALjvaAOFSHT9+XD/60Y/U3d2tzs5OtbS0RC2Akslk3baVSkW//Nd/rf97+jSDfgEA8BD8BwAAAABcMYOqBf/3iJYsmL9isah//dd/1dmzZ7Vly5ZoBYA/B2D1s88y6BcAgBgE/wEAAAAAV9Sgau1/dk//TZAW83XkyBH98Ic/1DXXXKP29nbl83klk8loDsDt/+W/6MenTlH1DwBAAMF/AAAAAMAVd0S1JMBjkvpEGyDM38TEhF588UWl02l1d3dHqwBWPvOMrvvqV6n6BwAgRnLuTQAAAAAAuDx2TD8eWNCzwFJSrVZ17Ngxfetb39L3v/99vfLKKxobG9OWv/kbSSSSAACIQ+U/AAAAAOCqog0QLsXIyIief/75qPL//f/jf2hQzJIAACBOQlJ1oU8CAAAAANCYDkg6KKrScHF+921v01+/+KJ2iMp/AADiEPwHAAAAACyofu8RmItrG7Vj1q0AAGhs9PwHAAAAACyoftWqtw9I2r7A54LFb/v0z96FPhEAABY5Kv8BAAAAAIvGAdWCurRyQRyq/gEAmB8q/wEAAAAAi8YO1aq6aQGEEKr+AQCYP4L/AAAAAIBFhTZAiLNn+pGVIQAAzC290CcAAAAAAIDPBXcJ9sKi6h8AgPmj8h8AAAAAsCgNijZAuMCtAiERBADA/DDwFwAAAACw6PVLuksMeW1kDPoFAODiEPwHAAAAACwJbtjroKj+bjTbVQv+7xCfPQAA80XwHwAAAACwpPR7j1j+qPoHAODi0fMfAAAAALCkuKD/gVm3wnLhVnww6BcAgItD5T8AAAAAYEmiDVBjOKDa55xY6BMBAGCJIfgPAAAAAFjSDkg6KNoALVdV1ar++XwBALg4tP0BAAAAACxprg88bYCWHxfwZ2UHAAAXj8p/AAAAAMCysF3SHtWqxAkWLw8M+gUA4NIR/AcAAAAALCu0AVoetqv2We4QyRwAAC4FwX8AAAAAwLLT7z1i6aHqHwCAt4ae/wAAAACAZadftWrxA6pVkGNp2T79s3ehTwQAgCUsvdAnAAAAAADAleBaxezx/sbix2cGAMBbR+U/AAAAAGDZGlStbcx20QJoKaHqHwCAt46e/wAAAACAhrBdtYpyesgvbv2qfU6JhT4RAACWOCr/AQAAAAANYVC1anLmACxud4l2PwAAXA5U/gMAAAAAGk6/94jFYbtqyZkdIgEAAMBbRfAfAAAAANCQ+lWrMqcN0OJxYPqRzwQAgLeOtj8AAAAAgIbUL9oALSbbxaBfAAAuJyr/AQAAAAAN74Ckg6IN0EKi6h8AgMuLyn8AAAAAQMNzAWeC/wtnu2oJGAAAcHlQ+Q8AAAAAwDTXemZQDJy9mvol7VEtSAEAAC4Pgv8AAAAAAHhoA3R1VVVLttDyBwCAy4e2PwAAAAAAeGgDdPW4YcsM+gUA4PKi8h8AAAAAgBjbVWtHs1e0AbpSGPQLAMCVQfAfAAAAAIA50Aboytiu2r3dIZIrAABcbgT/AQAAAACYh37vEW8dVf8AAFw59PwHAAAAAGAe+lWrTj8w14aYl+3TPwcX+kQAAFimCP4DAAAAADBPg6r1/z+gC4NqcWnc/WMlBQAAVwZtfwAAAAAAuAS0AXprqqolU2j5AwDAlUHlPwAAAAAAl8AF/WkDdPFc1f/eBT0LAACWNyr/AQAAAAB4C7ZL2qNaIHtwgc9lqWDQLwAAVx7BfwAAAAAALgPaAM3PdtWC/ztEsgQAgCuJ4D8AAAAAAJdJv6S7REX7bKj6BwDg6iD4DwAAAADAZbR9+mdQVLb7qPoHAODqIfgPAAAAAMAVcEDSQdEGyOpXbT5CYqFPBACABpBc6BMAAAAAAGA5cm1tCP5fsEdU/AMAcLUQ/AcAAAAA4ArpVy3YfUC1ljeNzF3/3gU9CwAAGgdtfwAAAAAAuAoavQ0Qg34BALi6qPwHAAAAAOAqaOQ2QG4IMlX/AABcPVT+AwAAAABwFW1Xrff9XjVO/3uq/gEAuPrSC30CAAAAAAA0Ehfw3+P9vVy5qn8C/wAAXF1U/gMAAAAAsED6vcflyA07Tiz0iQAA0GDo+Q8AAAAAwALpV63y/8BcGy5h27X8VzcAALAYEfwHAAAAAGABDarW/99VyC8n7noY9AsAwNVH2x8AAAAAABaJ5dYGiEG/AAAsHCr/AQAAAABYJFzQfzm0AXKDfqn6BwBgYVD5DwAAAADAIuMC54Nauv3yqfoHAGBhEfwHAAAAAGCROiDpoJZeG6Dtqp37Di3d5AUAAEsdbX8AAAAAAFikXNX8UmsDtGf6kcA/AAALh8p/AAAAAAAWuaXWBqiq2nnS8gf/f3t3l9M4EERhtJDYF87KYlbmzMpmHtqWkBGJyM+4fX2OhCwCJH7+KFcDsB3xHwAAAHZiD2uArPwBgD6I/wAAALAj4+raGwf9AkAf7PwHAACAHRmrTdRP1abse7KsJ/rc+kYAAJP/AAAAsFdTtdDey3odU/8A0A+T/wAAALBTp2qT9j2sADL1DwB9Ef8BAABgx3pZA3Ser708hQAAR/e+9Q0AAAAAj1mC+5YB3tQ/APTF5D8AAAAEuNR2a4CWJw5M/QNAPxz4CwAAAGHGqvqo/3fwroN+AaA/4j8AAAAEWg7gvdRrJ/KHavH/9OLPAQB+R/wHAACAYOPq+mym/gGgT3b+AwAAQLAl+k9Xf+s+y9MFDvoFgP6Y/AcAAIADeMUaoGl+z7cnvR8A8DziPwAAABzIVFV/6jlrgP5Wm/p/1UohAOB+1v4AAADAgSy7+R9dA7QEf4f8AkCfTP4DAADAAQ1Vda42uX9PwHfQLwD0TfwHAACAA7tnDdAw/92pTP4DQK/EfwAAADi4cXW9xdQ/APTPzn8AAAA4uLHaBP9Ubar/mmH++nz1TQEAD3nf+gYAAACA7S3re86r79du/RwA6IPJfwAAAKCqWtA/VZvs/2kFkKl/ANgHO/8BAACAb4ZqU/5f9/qP82tvm9wRAPAbJv8BAACAby7VJvy/ngPwUdb9AMBemPwHAAAArhqrhf+h2pMA/gEAAP0T/wEAAICblul/4R8A9kH8BwAAAACAMHb+AwAAAABAGPEfAAAAAADCiP8AAAAAABBG/AcAAAAAgDDiPwAAAAAAhBH/AQAAAAAgjPgPAAAAAABhxH8AAAAAAAgj/gMAAAAAQBjxHwAAAAAAwoj/AAAAAAAQRvwHAAAAAIAw4j8AAAAAAIQR/wEAAAAAIIz4DwAAAAAAYcR/AAAAAAAII/4DAAAAAEAY8R8AAAAAAMKI/wAAAAAAEEb8BwAAAACAMOI/AAAAAACEEf8BAAAAACCM+A8AAAAAAGHEfwAAAAAACCP+AwAAAABAGPEfAAAAAADCiP8AAAAAABBG/AcAAAAAgDDiPwAAAAAAhBH/AQAAAAAgjPgPAAAAAABhxH8AAAAAAAgj/gMAAAAAQBjxHwAAAAAAwoj/AAAAAAAQRvwHAAAAAIAw4j8AAAAAAIQR/wEAAAAAIIz4DwAAAAAAYcR/AAAAAAAII/4DAAAAAEAY8R8AAAAAAMKI/wAAAAAAEEb8BwAAAACAMOI/AAAAAACEEf8BAAAAACCM+A8AAAAAAGHEfwAAAAAACCP+AwAAAABAGPEfAAAAAADCiP8AAAAAABBG/AcAAAAAgDDiPwAAAAAAhBH/AQAAAAAgjPgPAAAAAABhxH8AAAAAAAgj/gMAAAAAQBjxHwAAAAAAwoj/AAAAAAAQRvwHAAAAAIAw4j8AAAAAAIQR/wEAAAAAIIz4DwAAAAAAYcR/AAAAAAAII/4DAAAAAEAY8R8AAAAAAMKI/wAAAAAAEEb8BwAAAACAMOI/AAAAAACEEf8BAAAAACCM+A8AAAAAAGHEfwAAAAAACCP+AwAAAABAGPEfAAAAAADCiP8AAAAAABBG/AcAAAAAgDDiPwAAAAAAhBH/AQAAAAAgjPgPAAAAAABhxH8AAAAAAAgj/gMAAAAAQBjxHwAAAAAAwoj/AAAAAAAQRvwHAAAAAIAw4j8AAAAAAIQR/wEAAAAAIIz4DwAAAAAAYcR/AAAAAAAII/4DAAAAAEAY8R8AAAAAAMKI/wAAAAAAEEb8BwAAAACAMOI/AAAAAACE+QfA6tSYnmuHGAAAAABJRU5ErkJggg==) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABgIAAAOuCAYAAADFNQxvAAAgAElEQVR4nOzde3Cc5Xn//8+z5/NqJa0kWz7KQCAcEpAMdEIIBEiCCSGh2KWZtiQplWknTSiBsX+TwtQkITIhpZk0NJh0+iVpksZKW2ISTYxEAXMyIBsIhybxAbDBli1LK+1qz/s8z+8Pf/f+2mDAsuUDyvs1o5G1++zuvSv94bk+931dluu6rgAAAAAAAAAAwLTkOdYLAAAAAAAAAAAAR45v85atx3oNAAAAAAAAAADgCLEk0RoIAAAAAAAAAIBppLu7W6tWrZJEayAAAAAAAAAAAKY137FeAAAAAAAAAAAAmHoXXHCB/uxzf8mJAAAAAAAAAAAApqM/+9xf6psvLCAIAAAAAAAAAABgOkpEgtoyHiQIAAAAAAAAAABgOiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGsoWylqQLBMEAAAAAAAAAAAwHf37//lX/X+nb5HvWC8EAAAAAAAAAABMvYcfflgPP/zw0Q8C4vG4Wlpa5PV633JfoVDQ0NCQarXa0V4WAAAAAAAAAACTYlmW5s+fr+uuu06rV6/W4ODg214bj8f15S9/Wc8//7zWrl2rSqVy1NZ5VIOAQCCgK6+8Utdee618vv1f2uPx6LXXXtOtt96qF1988WguCwAAAAAAAACASWloaNBHPvIR/emf/qkikYhuueUW3Xvvvbr//vvfUuR/3/vepxtvvFGtra3q6urSiSeeqDVr1mjz5s1HZa1HNQhwXVdXXnmlXnjhBd13331vORVw2223ae7cuQQBAAAAAAAAAIDj1sKFC/XpT39ap59+uqS9tW/LsvRnf/Znam5u1o9+9CMVCgVJ0kc+8hF97nOfU1NTk1zXlcfj0QUXXKD3v//96u/v15o1a1QsFo/oeicVBFiWpWg0ekgv5LqufD6fHMfR4OCgHnjggbdc88UvflGRSEThcFgej0eWZU36dWq1mkql0iGtEQAAAAAAAACAtxMIBPT5z39eH/7wh5VIJOS6rqS9tXPXdeX1enXZZZdp3rx5+va3v62PfvSj+sxnPqNoNGrCAmlvvbylpUVXX321Ojs7tWrVqiN6OuCgg4BoNKorrrhC119/vSzLOugiveu65sNwHEennHKK7r///gNeGwgEdPvtt+uGG24wpwU8Hs/BLlGSNDIyoq985SucKgAAAAAAAAAATJmmpiZ9/etfV3t7u6l7v7lOXq+Fz507VyeeeKJOPvlkEwLsa9/g4OSTT9Ydd9yhf/7nf9bAwMARWftBBwHpdFo33HCDXnnlFfX29h50gd7j8ci2bVXKZdVsW7feeqsCgcABr/V6vbr//vvV398vn8+nSDgs23EOdomSpH/6p3/SSSedRBAAAAAAAAAAAJgymUxGDz30kK666iqFQiFT3K8HApZlqVQq6amnntKqVauUy+X05JNP6hOf+ISuuuoqtbS07Hd9neu6evHFF/Xss88esbUfdBDgOI4cx9G6deu0evXqSb3I7bffrtmzZ8vv9+vUU099y2yAumg0qksvvVSNjY0Kh0L62c9+ptW9vZN6rZtuukm2bU/qMQAAAAAAAAAAvBPHcbR69WoNDQ3pL/7iL9TS0rJfUf9///d/tWbNGj3++OP7Pe7Xv/61fvOb3+iqq67Shz70IYXDYXOfbdtau3atfvjDH5qZAkfCpGYEuK673yIP1rp163T//ffrP/7jP/SXf/mXeuqppw543YoVK5ROp/WNb3xDtVpNX/zbv530a/l8R3X+MQAAAAAAAADgD8i6dev0xhtv6Atf+IJOP/10ZbNZ/fznP9cjjzyiTCZzwMfs2LFD3/3ud/XYY49p8eLFev/73y/btnX33XdrYGDgiG9uPypV81/+8pf6q7/6K91www267rrrND4+fsDr+vr69MlPflLValWXXXaZdu7ceTSWBwAAAAAAAADAQduyZYtuu+02fepTn9Jjjz2m7du3v+tjXNfVxo0b9eKLL+qzn/2sHnroIb322mtHYbXS5CbxHgKfz6czzzxTiURC6XRa991339u2BjrttNN0//3367777tPLL798pJcGAAAAAAAAAMAhOeecc7RkyRJdddVVSiQSB/UYv9+v8847T4sWLdK1116rSCRyhFe51xELAmKxmK677jr19fWpp6dHjY2NuvLKK7VgwQLdeeedb7k+kUho9erV+q//+i+1t7drYGBA55577pFaHgAAAAAAAAAAkxaNRnXttdfq+uuvl9fr1QUXXKDvfve7+vCHP/y2hX3LstTR0aEbb7xR119/vYLBoD7wgQ/ojjvu0AknnHDE13xEWgN98IMf1Le//W1lMhl9//vf16ZNmxQOh+U4jj73uc/p5729euqpp/TjH//YPOaee+5Rdnxc119/varVqj7+8Y9rxYoVuvfee/WTn/zkSCwTAAAAAAAAAICD1t7ers9//vM6++yz5bquuT2VSummm27SU089pfvuu08vvfTSfvddeumluuyyyxSPx83truuqvb1dy5Yt049//GM9/PDDR2zdUx4EnHXWWfrXf/1X3XXXXVq/fr3mz5+v9vZ2VSoV7dq1Sx0dHfqv//5vrVq1Ss8995xeeuklXX/99bryyit11113qbu7W08//bTuv/9+rV27Vrfddpuq1ap6e3uneqkAAAAAAAAAAByUVCqlv//7v1d7e7ukvbv861zXlWVZOuecc/T+979fDz/8sH784x/rzDPP1J/8yZ9o7ty5sizLXFd/rOu6am1t1Ze//GUFAgE98MADR2TtU9oaaMaMGfre976n733ve3r00Uc1c+ZMWZalrq4u3XnnnfrMZz6jP//zP1dDQ4N27Nihe+65R1dddZVWrFihn//nf6qtrU2dnZ3q6OjQxRdfrGAwqDvuuEOf/vSndcopp0zlUgEAAAAAAAAAOGhjY2P6t3/7N42NjUnSficC6kV+13UVj8d1+eWXq7OzU3/8x3+sefPm7XddXT0UcF1XfX19euSRR47Y2qcsCPB4PLrjjju0Zs0arV+/XrNmzVIqldLVV1+tCy+8UFu2bNG5556rarWqQCCg++67T+l0Wr29vXriiSe0c8cODQ8Pa/PmzTr77LP1sY99TH/7t38ry7LU39+vL37xi1O11HeVSqW0evVqua6rVColSeru7pbruurv7z9q6wAAAAAAAAAAHB9c19XTTz+tL3/5y9qwYYOq1aq5fd+d/uPj4/rGN76hk08+WT/96U/17LPP7vcc9S9J2r17t3p6enTPPfeoXC4fsbVPWWugyy+/XE1NTert7VVbW5suvfRSWZYl27YVDAZVqVTU3t6u0dFR5fN5nXDCCdq4caN2DQ1p165damlp0fbt21Uul7Vr1y5Fo1HF43Elk0n9z//8j84//3x96EMf0uOPPz5VS35bHR0dWrx48X631QOBTCZzxF8fAAAAAAAAAHB8ymQyWrFihT7+8Y/rU5/6lGbPnm3u27p1q9asWaNFixbpzDPP1IUXXqh///d/1/DwsC655BJzAqBYLOrRRx/VT37yk6NSc56SEwEej0fXXHONfvazn2lsbEyRSEQf+chHFI/H9dvf/lavvvqqZs6cqU2bNsnv92vBggX6wAc+oLPOOkvBUEjz58/XrFmzdOKJJ+qkk07SzJkzFQwGVSgUdOGFF2psbEz33HOPPv/5zx/WOjs6OnT33Xfvl7oMDg6qu7v7XR+7cuVKWZalJUuWHNYaDlUqldpv3W/+GhwcVE9Pjzo7O4/J+gAAAAAAAADgD8natWvV09OjX/3qV5Kk9evXa82aNfrkJz+pM888U5IUi8V0zTXXKJfL6Z577pFt2/r973+v73znO/re97531DaeT0kQcOKJJ8rj8ai/v9/snH/11Vd18skn6+qrr1ZLS4tCoZC6urp0xhlnaP78+YrH4/J6PAqFQpo7d64WLFigE088UdLeWQOu62rPnj1KpVJKp9Nav369vF6vuWayOjo6NDg4qI6ODi1YsMAc0+jt7dXdd9+tnp6eqfgojorly5eb9dfDia1bt2rZsmUaHBzU3XfffayXCAAAAAAAAADTXmtrq2zb1t/93d9pcHBQX/jCF3TCCSeY+13XVTgc1mc+8xmdeeaZ+spXvqL77rtPp5xyiny+KWvY866m5JUuvPBCvfDCC8rn87riiit02mmnaXR0VIlEQoVCQYlEQjNmzJDf71e1WtXzzz+vJ598UuvWrdOePXvU3NystrY2rVixQhdeeKF6e3vluq5CoZAcx9EZZ5yhLVu26MEHH9RFF12kTZs2TXqNPT09SqVSWrp0qbZu3WpuX7lypbm/t7dXGzZsmIqP5Kjq7e1Vb2+vuru7dffdd6u7u1upVOqYnV4AAAAAAAAAgD8Etm3rsssu05lnnqnZs2fvNy9A0n7fFy5cqI6ODsViMT3xxBP7DRs+0qbkRMB5552nJ598Uh/96Ed1+eWXK51Oq6mpSfl8XvF4XHPnzlUymVQ6nTY7/a+99lrdeeedOuuss7R7925NTEzIdV298MILKpVKamhokN/vVyaT0fDwsCTp4YcfVnNz8yGtsd4yp35iYV8rV67UqlWr1NHR8baPX7x4sfr7+7V69eoDPnd/f/9+rXruvvvuAz5f/Xnq123ZskXLli07pPf0ZqtWrTLBxuLFi/d73sHBQbmuq2XLlmnx4sXm5zcPP+7s7DSDkutfq1evfkvLoX1bFXV2dqqjo2O/99Xf30+bIgAAAAAAAADT2rPPPqvrr79eY2NjKhaLpvBfL/LvGwy4rqtKpaJ7771Xd955p2zbPmrrPOwgIBgMKpFIaHR0VB6PR4lEQpVKRaVSSTNmzFA8HlexWFS1WlW1WtXatWv1wQ9+UOl0Wqeddpq++tWv6qtf/apuvPFGbd68WZs2bVIymVQmk5HH49FLL72kZ555Rh6PR3PnztXExMQhrbO+0//tCvRLly5Vb2/v2z6+s7NTF1988VtuX7Zsmfr7+zUwMGBa9QwMDKi7u3u/Vkn1a1evXq3e3l5ZlqUFCxYok8mop6fnLQX5Q7Vy5UrTV+pAsw/qhf56kX7fz6LeWkiSeS9Lly5VZ2enBgcH3zaw6OzsVHd3txl2sWrVKl188cXq7+9/x3AFAAAAAAAAAN7rXnvtNX31q1/VqlWr9Nvf/tbUVveVyWT0q1/9Sv/wD/+gX/7yl0d9jYcdBPj9fjmOo3w+L9u25fV6NX/+fLW2tsrv98uyLG3evFl9fX266aabtHHjRkUiEfl8PtVqNXV0dOi8885TLpfTtm3bFI1GlclkVC6X9cYbb8h1Xdm2rWQyqXg8rrGxsUNa5/Lly5XJZNTZ2aktW7ZMyUyAxYsXq6enRytXrjQ78aW9O/OlvUX2ejG+s7PTXFu/f+vWrbrkkkuUyWR08cUXm0J7KpXSli1btGXLlgOeYHgnmUzGhB4dHR1vKcQvXrxYS5YsMX+MCxYskLQ3NOjp6dGGDRv2aym0atUqLV26VNLe9kkHChdSqZSWL19ufl66dKk2bNigVCrFvAIAAAAAAAAAfxAefPBB3X777frRj36kUqkkae+G6yeeeEIrV67UPffco507dx6TtR12EJBIJJRIJJTL5XTBBReoVCqpWq3Ktm0NDQ1p+/btWr9+vTZs2KCNGzfqE5/4hObMmSNJGh8f18aNG9XX16dNmzapXC5rdHRUO3bs0AsvvKDf/e53Gh8f1x/90R9pxowZevXVV7V792599KMfnfQ6t27dqq6uLg0MDEjau/t9dHT0sNry9PT0KJPJ7BcCSNLAwIC2bt2qrVu3mlMG9dd586mDfQv3ByqyH4p95xy8OQhYuXLlAU8+1NdXDyn2NTAwsN/ndqD736z+GhdffDGnAgAAAAAAAAD8QdizZ49+/vOf60tf+pLWr1+v73znO/r2t7+tl19++ajOBHizwx4WXN/dPzExIZ/Pp3Q6rccff1xtbW163/vep0ceeURnnHGGzj77bD388MOaNWuWeezIyIi+9a1v6aSTTlIymdSKFSvk9Xo1d+5czZ8/X8lkUh6PR+3t7QqFQmppadEjjzyiWbNm6cMf/rAeffTRSa21vgN/8eLF6u7u1sUXX2x2uS9ZsmRSg4Lru+3rbXj2lclkzE77unpboXrrnbd7zlQqdcDHT8Y7nSI40Hr3PTnwdp/BwMCAKeq/3fve174DmTs6Ovb7GQAAAAAAAACmK9d1NTQ0pNtuu+1YL8U47BMBmUxGgUBAqVRKO3bsUKFQ0Jw5czRz5kw98cQTWrdunU455RTFYjFdfvnlKhQKqlQqkqSTTjpJp5xyihoaGpTJZJTNZpXJZFQsFhWNRnXSSSfp1FNP1cTEhJLJpMrlss477zy9/PLL8vl8mjt3rrxe76TX3Nvbq0suucQU/zs6OjQ4OHjAGQBvZzKDcFOplCnOL1iwwLTlOdDXuxXYD8a+O/APpgC/73t5uxBh33UdTLsiCv8AAAAAAAAAcHw47CBgZGRE+Xxep59+ujZu3Kjh4WEVCgV5vV5t27ZNF110kbZv365gMKhXXnlF3/nOd1QoFMzj29ra9MADD+jrX/+6JCkajSqbzWp8fFzJZFKpVErnnHOO0um0hoaG9OKLL+r111/Xpk2bNHPmTAUCgUNee29vr7q6uswu+Mm0CaoXxlOplBYvXnzAa+qDeff1TmHD4sWLD7uNTiqVMoX9DRs2HFRB/s279w+k/n73bWX0bus40PMDAAAAAAAAAI6uww4CJOnpp5/WZz/7WfX19enRRx+V4zhav369XnjhBc2ZM0ennHKKstmsJKmrq0v5fF6S9Morr+iFF15QW1ubzj77bF122WU699xzdemll+qUU05RpVLR8PCwXNdVPp9XNpvV4OCghoaGdOaZZ8q2bZXL5XddX31o7dsV+uuDbidThB8YGDDF8bd73p6eHvX29h7UHICOjg4zc+BwdHd3myL8m2cXvJ19A4O3Cyrqn82B5gEcSD2MqM9KAAAAAAAAAAAcG1MSBPT39+vcc8/V6aefrm9+85u66667NDw8rHnz5qlcLiuXy6lQKCgUCqm5uVm5XE6jo6O65ZZbtGHDBm3fvl1f+MIXtGzZMl111VXq6urS6aefrmg0qkqloueee04DAwPasmWLfD6fTjvtNI2MjOj555+X4zjvur5MJqNUKrVfkXxfky1y19UH63Z2dqq/v98Uv1OplFavXq1UKmWG5taL8m++VtobJAwODmrlypWHFQQsXrxYPT095vUONBT43d7L4sWLDxgG1AOMA4ULB2qTVD8lcaDhwwAAAAAAAACAo2dKgoDBwUFls1ktXbpUTU1NeuKJJ/TTn/5U7e3t+t3vfqe+vj4999xz2rZtm6LRqBzH0ebNmzU6OirLsnTCCScolUrpwQcf1De/+U0988wzWrNmje6//3498MADevrpp7Vp0yYNDw9rZGRE8+bNk2VZB3UaoG7p0qWS9oYW++7K7+7u1rJly7R161ZzMuBgLV++3IQHF198sQYHB+W6rkZHR9XZ2aklS5aYa3t7e00Rfd9rXddVT0+PVq5caYrmqVRKW7Zs0ZYtWw6qH3+9BVG9DdHy5csn/V5Wrlxp1rd69WrzGaVSKfX396ujo+NtByrXBy7X1YOOfd8zAAAAAAAAAODYmJIgIJvNas2aNVqyZIm+9KUv6ZprrlE6ndbatWv15JNPamBgQH19fbJtW9VqVYFAQGvWrJHX69V5552nYrGoV199Vffdd59OO+00pdNpvf7663rllVeUzWb1+uuvq1arqVarKRaLKZPJKB6PKx6PH/QaM5mMFixYoIGBAfX09OxXhO/t7dWCBQsOaTf+JZdcouXLl+/X/qY+e+DNLXGWL1+uJUuW7HfyYGBgQEuWLJlUwXzf9buuq8HBQXV0dGjlypVqbGw85OJ7fX0bNmzQ3XffbUKNrVu3asGCBW97wmDlypXq7u4260mlUua5AAAAAAAAAADHliXJPZgL58yZo97eXv3nf/6nbr/99rfc39bWpscee0zlclm/+tWvlEqltHPnTg0PD+vll19WuVzWOeeco5deeklLly7VwMCARkZGFA6HTeugN954Q1deeaU2bdqk1157TYFAQG1tbSoWi9q1a5fC4bCGhoaUy+U0f/58vfjii9qzZ89+63j++ed1yy236Be/+MWUfEA4sFQqpdHRUUnab+AyAAAAAAAAAODY6+7uNl1ofFP1pENDQ1q2bJl+/vOf69VXX9Vjjz2mSy+9VJL03HPPafPmzdq+fbt27Nihbdu2SZIeeughNTU1qaWlRUNDQ1q0aJE2bdqkTZs2adasWQqFQvJ6vfL7/Zo7d67K5bISiYTWr1+vrVu3qlAoTNXyAQAAAAAAAACYlqYsCJCk//7v/9YvfvELfexjH9ODDz6oHTt26Pzzz9fExIQqlYoqlYr8fr/Gx8f16quvynEc+Xw+tba2aseOHdq5c6e2bNmilpYW5fN51Wo1lUol2bYt27bl9XrlOI68Xq+SyaSKxSJhAAAAAAAAAAAA72BKgwDHcfSP//iP+sQnPqGTTz5ZDz30kKrVqtavX68nnnhCrusqEAgol8vp9ddfVywWk8fj0c6dO5XL5TQyMqJ0Oq3h4WGFw2Hl83lZlmWeu1gs6pVXXlEikZBt26Y1DQAAAAAAAAAAOLApDQIkad26dfrBD36gdDotx3H00ksvaXh4WKeeeqoKhYJ27typ8fFxVatVOY6jSqWiLVu2aGRkRIVCQRMTE+YEQKlUUiQSkcfjMdfmcjnTn95xnKlePg5SJpMxIQ0AAAAAAAAA4PjlORJPumbNGu3atUt79uxRNpvVxMSEPB6PfD6fSqWShoaGZNu2Tj31VNm2rZ07dyoWi6lUKml4eFi5XE4TExPy+/2q1WrKZDIqFosaHR3V7NmzNTExIUkKBAJHYvkAAAAAAAAAAEwbU34iQJK2bdumV199Vbt375ZlWdq1a5dee+01bd++XXPmzNGePXv02muvac+ePYpEInIcRx6PRyMjI2aXf7FYlOu6sixLruuqWCyqVCpJksbGxhQKhdiRDgAAAAAAAADAuzgiQUC1WtXY2JgymYyq1aqq1ari8bja29vl9Xo1PDysk046Sfl8Xrt27ZIk5fN5BQIB2batarUqn8+ncrlsCv4+n0+hUEiZTEau66pcLqtWqx2J5QMAAAAAAAAAMG0ckSBAknK5nMrlsiqViiSpXC5r+/bt6ujoMC2DWltbVS6XJUm1Wk3ValW1Wk2FQkFNTU2S9p4MsCxLExMTJlSonwwAAAAAAAAAAADvbMqCgHqbHtd1FQ6HlcvlVCqV5LquQqGQbNuWx+NRNpuVbdtKp9MqFAqybVuS5DiOXNeV4zjmRIHjOCoUCuZ2AAAAAAAAAAAwOVMWBLiua/7t9/tVLpdl27bK5bImJibkOI4sy1KpVFI+n5dt2/L5fPJ4PHIcR7Zta2RkxDxP/aQAAAAAAAAAAAA4dFMWBNQL+h6PR8FgULlcTrZtKxgMqlqtqlwum9Y+Xq9X+Xz+Lc+xb5gAAAAAAAAAAAAO35SfCGhsbFS1WtXo6Kjy+bwqlYocx6G1DwAAAAAAAAAAx8CUBwHj4+PauHEju/sBAAAAAAAAADgOTCoI8Pl8KpVK73hNtVo9rAUdLtu2zeBiAAAAAAAAAAD+0B10EGBZlkZGRnT11Vdr5syZCgQCR3Jdh2z27NkMGgYAAAAAAAAA4P866CBg586duu6669Td3a14PH5c9vz3+Xy64YYbtH79+mO9FAAAAAAAAAAAjgsHHQTUajVlMhk99NBD8vv9x+UMAI/Ho9dff/1d2xcBAAAAAAAAAPCH4qCDAMdxlMlktHbt2iO5HgAAAAAAAAAAMIU8x3oBAAAAAAAAAADgyCEIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGvPdcccd5off/OY3evjhh7Vt27ZjuCQAAAAAAAAAADBVfDfeeKP54aSTTtIFF1ygH/7wh8dwSQAAAAAAAAAAYKrs1xro97//vc4444xjtRYAAAAAAAAAADDFmBEAAAAAAAAAAMA0RhAAAAAAAAAAAMA05pGkH/6ImQAAAAAAAAAAAExHvqefefpYrwEAAAAAAAAAABwhtAYCAAAAAAAAAGAaIwgAAAAAAAAAAGAaIwgAAAAAAAAAAGAaIwgAAAAAAAAAAGAa85y98Gz99re/PdbrAAAAAAAAAAAAR4BHkv7iz//iWK8DAAAAAAAAAAAcAbQGAgAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGiMIAAAAAAAAAABgGsgFwMoAACAASURBVCMIAAAAAAAAAABgGiMIAAAAAAAAAABgGpuyIOCuu+7SunXrtGjRoql6SgAAAAAAAAAAcJh8h/Pg6667Ttdcc438fr+q1aruvfde9fX1TdXaAAAAAAAAAADAYZp0ELBw4UKtWLFCLS0tkqSnn35af/M3fzPlCwMAAAAAAAAAAIdvUkHAokWLtHz5cg0NDdECCAAAAAAAAACA94BJzQjo6+vT+eefL0kaHBzU4OCg7rrrriOyMAAAAAAAAAAAcPgOaVjwkiVL1NXVpVtuuUWnnXaaBgcH9eSTT+q6666b6vUBAAAAAAAAAIDDcEhBQF39hEBXV5eeffZZffazn6VlEAAAAAAAAAAAx5FJDwt+OwwMBgAAAAAAAADg+HNIJwIWLVqkdevWmTkB69at4yQAAAAAAAAAAADHoUkHATfffLNuvfVWDQ0NqaurS11dXRoaGtKtt96qm2+++UisEQAAAAAAAAAAHKJJBQELFy5UV1eXfvCDH2jJkiXm9iVLlugHP/iBurq6tHDhwilfJAAAAAAAAAAAODSTCgLS6bT8fr+2bdv2lvu2bdsmv9+vdDo9ZYsDAAAAAAAAAACHZ1JBwPDwsCSps7PzLffVb6tfAwAAAAAAAAAAjj3fZC5+5pln9OSTT+qKK67QJZdcop6eHknS8uXLFYlE9Itf/ELPPPPMEVkoAAAAAAAAAACYvEkFAZL0ta99Tb/+9a+1YsUK3XrrrZKk3bt36ytf+QohAAAAAAAAAAAAx5lJBwHS3pMBixYtmuq1AAAAAAAAAACAKTapGQEAAAAAAAAAAOC9hSAAAAAAAAAAAIBpbFKtgRYtWmQGAx9IoVBQT0+P+vr6pmRxAAAAAAAAAADg8Ex6RsDExATFfgAAAAAAAAAA3iNoDQQAAAAAAAAAwDRGEAAAAAAAAAAAwDRGEAAAAAAAAAAAwDQ2qRkBfX19zAYAAAAAAAAAAOA9hBMBAAAAAAAAAABMY5M6ESBJCxcu1IoVK9TS0vKW+wqFgnp6ejg1AAAAAAAAAADAcWLSQcBNN90kSfrrv/5rPfPMM1O+IAAAAAAAAAAAMHUm1Rpo0aJFamxs1Jo1awgBAAAAAAAAAAB4D5j0jIBKpaJt27YdibUAAAAAAAAAAIApNukgIBAIaM6cOUdiLQAAAAAAAAAAYIpNKgjo6+vT6OioPvWpT2nhwoVHak0AAAAAAAAAAGCKTHpY8Le+9S2tWLFC//Iv//KW+wqFgnp6etTX1zcliwMAAAAAAAAAAIdn0kHAM888o0WLFh2JtQAAAAAAAAAAgCk26RkBAAAAAAAAAADgvWNSJwIWLVqk5cuXKxKJHPB+WgMBAAAAAAAAAHB8mVQQ0NfX95Yi/7uFAwAAAPjDFo1Glc/nFYvFZFmWwuGwwuGwRkdHlUgkNDY2pnw+f6yXCQAAAADT1qRnBLy58F+tVnXvvffq+9///pQvDgAAAO9dsVhMktTS0qJkMqlQKKRIJCK/3y+PZ2+HStd15TiOcrmchoeHNT4+rt27dx/LZQMAAADAtHNIrYH6+/v1ta997UitCQAAAEdJvVhfq9VUKpUUCoXk8/lUq9Xk9XpVrVZVqVTk9/tVrVbf8bnqjw0Gg/L5fIrFYmppaVE8Hpff75ff75fruvL5fPJ6vfJ4PLIsS6VSSZFIRI2NjapUKsrlcnrjjTe0a9culcvlo/ExAAAAAMC0NunWQJK0fPlyXXHFFZKYCwAAAPBeUG/L09LSIq/XK7/fL6/XK5/PZ/5dL8xbliXp/+3WHx8fV7FYVKVSUSaTUaVSUSAQkGVZSqfT8vl8amxsVKFQkNfrVTAYlNfrVTQaNdfVb9s3BPB6vcpmsyYg8Hg8CgaDikQiam1tVTab1e7du7V161YVCoVj/AkCAAAAwHvXpFsDvXlOwM0336xbb71Vt956K6EAAADAcWDfnvzNzc0KBAJKp9MKBoOKx+Om0O84jnw+nyzLks/nM8X5SqUir9erUqkky7LU0NAgj8ejSqUiy7JMP3+Px6NEIqFAIKBarSafz6dyuWyK/bVazZwE2DcAKJVK5vUDgYBs25ZlWfJ4PPu1DIpEIkomk5o9e7Y2bdqkzZs3H5sPFAAAAADe4w6pNRCDgQEAAI4v0WhUHo9H0WhU4XDYtNqJx+MKhULy+/2SZHbeezweU6j3er3mKxAIyHVdVatV2bYt13UVj8cl7T1V4LquksmkHMeR1+uVbdum2F+r1RSJRMzzh8Nhc8rA5/OZICEcDsvn85n1BINBlctl891xHBNWhMNhVatVRaNRzZ8/X0888QSDhQEAAABgkibdGojd/gAAAMdeIpFQOp02u+yDwaAp0tcH8tY3bwQCAQWDQUl72zpGIhF5vV6z07/eoz8QCKharSqXy5md/sFgUIFAQJVKRdVqVX6/X5VKRY7jqFKpqFgsynVd+f1+BYNBc6qgftLA4/GY+QIzZsyQbdvm9EE2m1UkEpFlWSagKJVKCgQCyufzymQykv5fWNDW1qaLLrpIjz/+uEZGRo7ZZw8AAAAA7zWTbg0EAACAoycej5sWPjNmzJDf71djY6NCoZBisZgqlYpc15Uk0+/fcRz5/X6FQiGzK79UKqmpqUnRaFSNjY3K5XJKJpPK5XJqbm42O/QLhYKSyaQqlYqamppULpeVSCRkWZaq1ep+LYNqtZpqtZrZ9V+pVPY7BZDL5RSNRs0MAMdxVKvV5DiOCoWCaQtUP0Hguq5pM5RIJFQqlZRMJpXP51UsFmXbtrxer7q6uvTII4+YFkMAAAAAgHdGEAAAAHCcicVimjFjhiqVimKxmFpaWhSPxxUOh+X1ehUKhRQIBJTL5RQKhUxv/nprn1AopGQyqVKpZIYEBwKB/VoHSTIBQrFYlN/vl8fjMTMEQqGQJJlrx8bGlEqlZNu2QqGQarWawuGwJiYmFIvFVCwWTXsiy7KUzWYVj8fl9XolyQQI9RMCwWBQuVxOHo9HjuPIcRwzH2DfUwq2bSuZTCqVSumNN94wJwjOP/98PfDAA0f7VwMAAAAA70lTOiOAYcEAAACHrj4YNxQKKRqNKplMKh6Py+fzyefzKRwOKxqNmsJ5vW2Px+NRPp9XJBJRMBg0bYACgYAikYhqtZrK5bJisZgaGhqUzWYVDocVDofV1NQkSRodHVUkElEoFFK5XFa5XFYgEDABQ2NjozlpUG/vUx/oW/+ezWbN7v5AICCv1yvXdWVZlizLUjQaVS6XM/MFQqGQOQmwbwiw7zDj+tqLxaIaGxu1Z88ec7qgs7NTGzZsODa/LAAAAAB4D5n0iYCJiQlT7L/55pt1+umna8mSJVq0aJG++MUvHok1AgAATGsNDQ2aO3eu2tvbFQqFFAwG5ff7lUwmzWDdQCBgCuTS3t389fkAlmUpEomY9j71NkD5fF7xeFyBQECO45hWPDNmzDBhQL1IH4/HzeDg+mvWi/qxWEzj4+OKxWLmtYvFosrlsvx+vwkLUqmUOQHgOI4ymYyZGVB/rnqw4fF4lEgkJEnDw8NKp9OybVvlclm1Wk2jo6NyXVeu65r2QvWTC47jKBwOq7m5WYlEQtls9ij/xgAAAADgvYXWQAAAAMdALBZTa2urotGo2tvbFYlEFIvFlEgkTCE+FArJ6/WaQnt9l36hUDDDeC3Lks/n08TEhLxerxKJhJqbm819kUhE+Xxe0WjUtPHJZrPyer1yHEeWZZmCfb3ff/2rVqvJdV1NTEyYUweJREKZTEbJZFJer1d+v1+WZSmZTGpkZETBYFD5fF7hcNi8l/oJBY/Ho0wmo8bGRmUyGbmuq5GREdN+aGJiQtFoVJVKRTNnztTIyIg5DVA/HSDJnEpIJBI677zz9Mgjjyifzx+z3yUAAAAAHO8IAgAAAI6iQCCgOXPmqL29Xc3NzabnfSqVUiwWM6cBbNtWMBhUuVxWqVRSpVIx7Xgkmb7/9TZCwWBQsVjMDNyVZAYGO46jUCikarUqn8+npqYmZTIZ0/qnpaVFlUpFoVDIfK/37i+VSqrVaiY4sG1bkUjEDAyuVCryeDwaHx+Xz+dTtVo1Jw9CoZAZKlwPAerfLcsycwUk6Y033pAkZbNZJRIJjY2NKRwOK51Oa9euXeYEhOu6ymQyampq0tjYmLxery644AI99NBDKhQKx+aXCgAAAADHOYIAAACAo6De8z+dTmvBggWKxWKmJ38oFFIikZDf75frumbnfzablc/nUzKZNKcAPB6PZs+eLUmm4O73+xWPxxUMBuU4jlzX1fj4uJqamszQ3fqMAa/Xq9HRUbMLf98e//UTBPUTAcPDw2pqatLExISCwaCKxaKCwaBs21ZjY6M8Ho8qlYoJKmzblsfjkW3bJrBwHEeSNDIyYtoEWZZlgoa6eDxu/l0fHFwul2VZltra2pTL5VQsFs0sAZ/Pp3g8bgYQX3TRRerv71epVDoqv08AAAAAeC+xJLn73nDHHXfoxhtvPEbLAQAAmH6SyaTmzp2refPmKZVKmd38ra2tpn9+rVZTPB5XpVJRJBJRIBCQ67pmMK/jOPL5fMpms0omk6pUKorFYioUCkqlUma3fjgcNm10AoGAqtWqQqGQKeb7fD6Nj4+rWq2atj21Ws3s5q8/bufOneZkQmtrqyqViizLMgX+SqWiVCqlXbt2mQHEiURClmWpXC6rUCiYQGDf91AfIlwv9tdPG9i2Ldu2zRyE+kDg+tyApqYm7dmzRx6PR6VSSaVSST6fT7lcTh6PR2NjY9q9e7fWrl17LH/VAAAAAHDc6O7u1qpVqyRxIgAAAOCIicfjmj17tmbNmqWWlhalUilTCI9EIvsN6HVd17TxqbcBGh0dVSQSUSaTUTwe18jIiOmhn0qlzGPqA3R9Pp9qtZoymYzS6bRKpZKCwaA8Ho9CoZApnEciEXN9ffd/faZAve//zJkz5fP5ZNu2WV+xWFQoFFKxWFSlUtH27dtN+OD3+zU2NqaGhgaNjo4qlUopHA6bmQCSzGtJewcENzc373dyoFQqaWJiQpVKRY7jmNMNruuqVCqpoaFBxWLRzCOohxz1oMB1XX3oQx/S448/fsx+5wAAAABwPCIIAAAAmGL1AcD1AKCxsVHBYFDBYNAU8iWZXvySZNu2stmswuGwKpWKKYDXajUlEgklk0m5rquWlhbl83mzqz4YDJqe+x6PR6lUSk1NTfL5fPL7/fL5fBobG1MoFNLw8LAp6tdDA0kaHR2V4zjmOdva2jQ6OqqWlhbt2bNHbW1tymQyam1tNQGC4zhqbm5WLpdTNBpVoVCQ3+/X+Pi4wuGw8vm8crmcKfA3NjaaVkOjo6PyeDzauXOnPB6PGhoalMlkTHukxsZGZbNZxeNxTUxMqFqtqlKpmKHHO3bsUDKZVFtbm0ZGRlQoFOQ4jmKxmObMmaPt27dr27Ztx+z3DwAAAADHG4IAAACAKZRIJDRv3jwtWLBAzc3NCoVCCofDikajCoVCCgQC8vl88nq9pihv27YZnJvNZhUKheT1ejV37lzTqqdWq2nmzJlyXVfxeFzj4+OmoF8fFFypVJTJZBQOh7Vjxw4Fg0EFAgFzssDr9ZoBw36/X7t27TLDicPhsMrlsoLBoGnzk8vlTDE+kUgon8+bXfo+n0+ZTGa/In48Hlc+n9eePXtk27b5TOrzBizLMt/3PQkwMjIiy7I0NjYm13U1MTFhdv0nEgmlUikTKpTLZTNUuVKpKJlMmrZGoVBIpVJJH/jABwgCAAAAAGAfBAEAAABTIBaLadasWZo9e7ZaW1vNKYBAIKBwOKxwOCyfz2dCANd1TU/+arWqhoYGhUIhM9i3vju/Pvg3mUyqWq2qWCyawb/hcNjc5jiOCoWCGhsbNTIyolQqJZ/PJ8uyFI1GFY1Glclk5PP5zImDeqsen2/vfwnrLYLqpxQsy1IymdTY2JgSiYQp+FcqFRWLRVmWpUwmo1qtpp07d5prA4GAZs6cqWKxqGw2a/r+7/u89devf6+r3+e6rhlQXA8kxsfHNXPmTPn9fg0PD6tQKOzXYml4eFjz5s1ToVDQWWedpY0bNx7lvwIAAAAAOD4RBAAAABymZDKpE088UXPmzFE6nVY4HFYoFFI0GlW5XJYk00u/PiC3VqsplUpJkhmK6/F4VK1WJUnZbFaRSESlUknpdNoUx/1+vySZIbuO4yiVSqlarSqVSqlSqcjr9apWq8m2bY2OjioYDKpQKKhSqSiXy5ld/36/X6VSyQzgtSxL1WpVlmWZtkH13fqZTEau62p0dFTxeFyWZampqUn5fF7Dw8NqbGxUrVZTJBJRuVzW2NiY/H6/arWa2tvbVS6XFQgEVC6XTcDh9Xq1e/du+Xw+ua6730mBekBQv3Z0dNTMH4hGo2ptbdXOnTslSePj4yoWi0qn0xobG9OMGTNUKpUIAgAAAADg/yIIAAAAOETRaFSzZs3SnDlzNGPGDNN2Jx6PKxKJKBQKKZVKmV79ExMTcl3XDM/ddw5AfYd9qVSS4zhqaGiQbdtqaWkxRXHLskxhvP5z/XlqtZp2796taDSq8fFxeb1eWZalUCikTCYjj8cjr9drgoimpiZlMhkzG6C+xvrP9UJ9/efdu3ebIr3X65Vt29qzZ49peZRIJDQ2NqZSqaRaraZ8Pq94PC7XdTUyMqJ4PG5mI5RKJUUiESUSCUlSMBhUqVRSKBTSxMSEgsGg8vm8WltbzZDiepDhuq7GxsbkOI6SyaRKpZISiYQCgYA5uVCpVMxnDAAAAAAgCAAAADgkiURCZ5xxhlpbW9Xc3LzfKYB6D/16cb1etE8kEmaXe33XfbVaVT6fVywWUzweVzgcNrvzJWlsbEypVEojIyNqaWkxYYDjOKbHf30YcCQSMY+rF/2TyaSampo0PDysWCxm2gD5/X75/X6Vy2VFo1Ezs2BkZESNjY0aHR1VKpXSnj17lEgkzNDicrmscrks27ZN26JaraaxsTEFg0E1NTWpVCqpWCyqUqmY++ptgKS9LYIqlYpGR0fl9XpVLpfNaYhgMCjHcRQOh81pgXpxv6GhQePj44rH48pms8pkMorH46YdUb01kd/vVyQSUVdXlwYHB4/NHwgAAAAAHEcIAgAAACap3gZo5syZkqRIJKJYLKZwOKxgMGi+KpWKbNs27X5isZhpA1StVlWpVJROp82uetd1TZHfsixZlmXa+EQiEVNMr1QqmpiYUENDgynmp1IpeTweZbNZtba2msJ+vcheL/b7fD7t3r1bM2bMMAV3x3HMQN/6EGCv16tMJiO/36+xsTH5fD6Nj4+bkwqpVMr06K+HA8Vi0fycTqdVq9WUzWZl27ZZhyRz4kCSeW1J5j3X32d9bRMTE+YEQjKZ1OjoqMLhsGbPnm1OP/j9fuXzeSWTSe3Zs0czZsxQa2vrMfjrAAAAAIDjD0EAAADAQYrH45o7d67mz5+v5uZmxeNxM4i3XsQPBoPmFEAsFjMDeT0ej8bGxkwbn5aWFtm2beYFBINBU1BvbGw0MwMcxzFDf3fu3KmmpibTXqharcp1XQUCATmOo0wmo2AwaIrpxWJRgUBAmUxmvyHE4XBYXq9XkuTz+VSr1eTz+ZROp7V79261tbVpeHhY6XRaw8PDKpfL8vv9qlarev3115VKpczg4FwuZ4YWx+NxjYyMqFgsKpPJKJVKqVarqa2tTaVSSeVy2QQUw8PDpn1RfYiyz+eTbdtmTfWQwLZtOY4jSRoeHpZt2woEAtq+fbvS6bQCgYC5fufOnUqn09qxY4eCweCx/HMBAAAAgOMGQQAAAMBBSCQSWrBggU4++WTFYjFFIhGFw2HF43GFQiEFg0EFAgHlcjmzw12SadUjSalUyswFcBxHtVpNlUpF5XJZkUhEktTU1LTfiYB63/76Dnlp76Dg4eFhxeNxDQ0NmaHEzc3NZjf/yMiIGa5b76Hv8Xg0Pj5uevGHw2EzR8Dj8WhoaEiRSES7du3a77vrukqn0xoaGlI6nTY774vFosrlssbHxxWJRLR7924TcNi2bYrxo6OjikQiqlQqpk1RKpUyrX7qQ5TrpxKam5vluq4ZMFwsFhWPx82MhWq1atonvfHGGwqFQmptbdX4+Lgkafv27WpqalJ7e7tmz56t7du3H8W/FAAAAAA4/hAEAAAAvINAIKB58+aZgcBer1cNDQ1KJBIKh8MqFoumiG1ZlhobG+X1ejU+Pq5YLKZqtSrLsuT3+yXJtAaybVuVSkXNzc3m53176O87tNeyLNN6R5IKhYIaGhoUiUTk9XoViURULBZNqx+Px6NwOGxOC9RqNRMC1Hfb73tyYXR0VIFAQJH/n707eXLryq5Gv27ft7hAIpEdG1Gy7JHD/v+Hnjnsie1yiZJIJpmZ6C6a2/ffgHF2MT8/x4s3eCIl7V+EIiVmB4AYVJ199lq2DV3X6fGKzQHTNLHb7WjAMJvNcD6f4bouyrKEqqrYbrcIggCPj48wTZO+TnQgiIHE4+MjFEVBXdfYbDZQFAVlWdLGhCRJuL+/hyRJmM1mFCmUpinCMISu69hut7i6ukKe58jzHG3b4ueff8b19TXSNEWSJAiCAG/fvuWtAMYYY4wxxhgDDwIYY4wxxv5Xruvi9vYW33//PRzHged5CMMQwOeyXcMw4HkeNE1DnueQZZkid+I4xuFwwDiOuLy8RFEUz7YAFosFpml6FntT1zU8z3uWkz+OI6qqQlVVmM1m2O12SJIEdV1jGAYqDxbDhLquKRZIlAXneQ5FUZAkCdI0hWma0DQNsixT4a6I6ZmmiQ7PReSQGGL4vo+6rlGWJYZhwHq9hud5FCdUFAX6vqcMfzGw0DQNuq5T3JF43mLwIQRBgP1+T4/j6emJNiOmaYKiKPQ83717h+vra5zPZ8znc9po8H0f2+0WDw8PuLy8xHw+x9u3b3+rtwxjjDHGGGOMfZN4EMAYY4wx9v/A9338+OOPSJIEjuNgNpshiiIq9tV1HWVZ0iF5EARQFAVt29IWQBzHdFAvonYWiwUA/I+D8GmasFgs6NAb+BwBNAwDgiCg7QJN09D3Pf2OqqqgqirKsoSu6wjDkDYN6rrGNE20YbDdbuG6Lh3Ii00ASZIwDAMOhwOWyyVF/Gy3Wxo+BEGAoigowshxHJRliePxiNlshuPxiCAI0DQNPM9D0zQwDAO73Q6vXr2ioYjYcJAkCYqiUBGxrutQVRVRFFFJsugm6PuetiLEazJNE969ewfXdfH27VsEQQDTNJFlGRzHgSRJePv2LZIk+Y3fOYwxxhhjjDH27ZH/37+EMcYYY+zPJQxD/PM//zNWqxVmsxkuLy+pHFjTNGiaRofWVVXRgTvw+YC8KAqcz2eK/6nrGn3fYzab0SF+XdfI85w2AqZpwn6/pw4B8efi5n3XdSjLElVVoSgKeJ6Huq6pd0BsGAB/GyB0XYe2bVFVFd3OL8uSDtZF9I44lF8sFthsNjBNE9vtFpZl0e3+0+kEwzBwPp+h6zryPMfpdILjOOj7HkEQ0I38qqoQBAE9r59//hmqqkLTNCpQtiwLvu9T8XIcx4iiiIYdAChySUQbRVEEz/Po78VxHLRtSxFJp9MJnufRax4EAdq2/WrvI8YYY4wxxhj7VvBGAGOMMcbYF77//nvEcQzHcbBcLhFFEQzDoHJgkbnf9z0URUEcx1BVlaJ/JEnCYrGALMvY7/cYxxEXFxcoigLTNNEh/Zc31Y/HI7quo0EBAPod4uBfURS69S4eg4ju6boOh8MBlmVhvV7DcRxM04QgCHA6nTCbzagfYLFY0ODi8fGRDuxPpxNtLgCgYmLx0bZtZFmGIAiQZRkduKuqiq7rUFUVkiRBnudwXReHwwGLxQK2bVMfwN3dHYZhoB6DYRgAgG76p2kKVVUpZklsAQCfexHyPIfv+1ROLEqM9/s9bTkcj0dYloXtdgvf97FarX6z9w5jjDHGGGOMfat4EMAYY4wxhs8H3i9fvkQYhri9vcV8Poeu61QK3LYtZPnzMqXI2D8ej/A8D+M4wvd9yuafpok2BkQmvuM4WK/XmM/nFP8jPgZBQH0AAOgmf13XmM1mGMcRYRjScOBwOMD3fRo+lGUJwzDg+z6V/IrDdU3TqHi46zrIskw9AuLxjuOIIAigqiptBNR1DcMw0LYtdF1H27bo+x6Pj4+IoghZlsG2bRyPR8znczw+PiLLMriuC0VRUFUVTqcTvQZRFOF4PNJzBPCsZHm73VJEEQAoioJxHOn1E6/VbrejmCPf9/HTTz8hiiIqOy7LEn/9619xd3cHy7Kw2+1++zcTY4wxxhhjjH1jeBDAGGOMsT890zSRJAkWiwWur68xn88RBAFs28YwDNA0jTLxAUBVVUzThDiOcTwe4TgOHWDbto3NZoMgCOjwWhxox3FMXycOwAFQpE/XdWiahjL+bdtG27Z0mN+2LcqyhOu6dCtfdBaIQ3bxT9u2UBQFdV0D+Hyj3vM8AEAURWiaBkmSUEa/eBwi9kiWZSoUFo+16zrM53OKKBLf9/HjR7x8+RKfPn2CqqoYhgFhGEJVVaRpisvLS6RpiiRJcDqdnhUk930PVVURxzFkWYau61RWDICGA7Iso2kaVFVFA5Gu62CaJqZpQp7nsCwLlmXh4uICmqbh/v4eFxcX/7++dxhjjDHGGGPs94A7AhhjjDH2p+Z5HgzDwJs3b3Bzc4PlconZbAbLsmhA0HUd+r6H7/tUtisKa0VPQNM0qOsawzBgPp/DsixUVUW35G3bpq/J8xzn85l+btu2yPMceZ4jCALaAuj7Hk3T0OeyLEMURSjLkjYLhmGgmCLx88UGgRg+KIqCi4sL2mj48la9LMto2xZt2+Lx8ZEej/jd4tB9GAaYponD4UCvma7rtO3w9u1beJ5HwxJFUVCWJa6vr2FZFjzPo2if5XIJXdeRJAltC4ifLyKDxnHEbrfDMAwU/QN8HgxEUQTTHX+7dAAAIABJREFUNGGaJizLwjAMaJoG6/UaqqrCtm2kaQrf97HZbL7ae4sxxhhjjDHGvhW8EcAYY4yxPy3P8zCbzfDmzRskSYLVagXP86gPYBgGSJKEMAyx3++hKAokSaLIGXHrPYoibLdbLJdLlGVJh/Ke5z2Lu3Ech363LMtI05SKgefzOR3+ixv9dV3TQb7v+/A8D13XwTAM9H2Puq6hKAqKokAcxyiKAkmSoGka+rnb7ZaKfMXj1nWdCn1N08R6vYZhGIjjGLqu02OL4xhpmiIMQxyPRwRBAMuyME0TDMPA4XCAbdt4eHjAzc0NHh4esFgsUJYlfN/H8XikSCXP8/D09IQwDJGmKbquw9PTE8ZxxOPjIxRFwfl8pu0DSZKgaRryPIdhGMjznF6f7XYL27ax3W6xWq3Qti19z8PDA5IkgWmayPMc8/n8a729GGOMMcYYY+ybwYMAxhhjjP1phWGI77//HtfX11gul7Btmw7rJUmiDH7P8yhuJk1TGIaB2WwGVVVxPp8hSRKSJME0TbBtG7vdDhcXF3h8fKQOAUmSoOs6HVhP04QwDJ/1AgzDQP0AcRzDNE3K9Z+mCV3XUWmuiN/pug5JkqDve/q6vu/pcH+aJpRlicVigc1mQ88jy7JnOfyGYUBRFADAdruFYRhI0xS6ruN0OtH3+L6PcRxhGAZFGVmWhePxiCRJUJYldR6I5ys2EWazGZqmgeM4MAwDWZahaRrIsoyu66CqKiRJomGIIOKVRFfAOI44HA4AQAOa2WxGX/fTTz/h5cuXsG0bT09Pv+VbijHGGGOMMca+STwIYIwxxtif0tXVFX744Qe8ePECSZLA9304joO+7ymbP0kSaJqG8/kMALQdIMsyiqLANE1UEpxlGWRZhqIoCIIAfd9jPp9js9lgmiYsl0sURQEAFOfjeR7d3BeRPPP5nA7zh2Gg2KBxHNE0DeI4hmVZMAyDIop2ux0sy0Jd19A0jTYEzucz5vM58jzHNE2wLIsid3Rdp4GBaZpomoZieDRNo+f75eH7MAw4HA4IggCbzQZhGCLLMtzc3FB8j9g+WK1WkCQJp9MJlmVBkiSs12sEQYCqqnBxcUGv0ZfxRkJZltB1HXmeQ9d1lGUJx3GoI0D0GfR9j6IoUNc1Li8vMY4jxSB1XQfHcZAkCZcGM8YYY4wxxv7UeBDAGGOMsT+dN2/e4PXr17i9vaX8esdxMI4jdQA4jgNd19F1HaIown6/R9d1sG0bsizDcRwoioKqqij6Rtx8z/McTdNgHEfMZjM6SLdtG+v1GpeXlwA+37xXVZUOxMUAYJomNE2DpmlQluWzxyi2BkRWf1VVCMMQ5/MZURRRoW/XdVAUhW7Xi6JdseGQpikNLcRz2m63FKujKAptFTRNQ8XJcRxju93i4uICZVlSUa94/qZpUhzPcrnEbrejA/6LiwtUVYUoinA4HKBpGna7HYIgwHa7pe/b7XZ0w98wDIzjSMXFYpMiz3NomkYbBXVd4+HhAbZtU2yQKG6uquorvMsYY4wxxhhj7NvBgwDGGGOM/am8efMGr169wsuXL2EYBi4vL+kmv7gxb9s2uq7DNE3IsgyapmE2m0GWZRyPR0iSBM/zAADH4xGaptFgQJZl2LZNEUCbzYYO8BVFoRgfAIiiCACoR2CaJrRtS6W/i8UCjuPgeDw+iw2qqupZBn7f99B1nboFdrsdXNfF8XiE67rwfR/7/R5RFEHXdTRNg9lshtPpBEVRqMPAsiwq612v19QHIH62iAsyDAPH4xFhGGIYBliWhdPpBNM0oaoq9QKo6t/+p+Y4jnh4eIDv+3j//j2+++47rNdrxHGMPM/hOA7yPEfXddB1HYfDgeKAuq7DOI7oug6+72O73VLnwHw+R1VV1Hvw66+/4vb2loYZURTRJgZjjDHGGGOM/VnxIIAxxhhjfxo//vgj7u7u8OLFC5imicvLSyiKgrIs4XkeVFWFqqroug6maSLLMkRRRF0AmqYhCAIoigJFUZBlGZX5iniecRyhqioMw4Asy0iSBJvNhrYARL7/MAywbRvTNFG2vTjoXywW6Pue4oIAoK5r1HWNKIpgWRbSNMVisXgWHzRNE6qqgu/7sG2bhhtt29LvWK/XsG2bhgBFUUDXdRyPRxiGQTfvfd+HaZrYbrcA8KzEt+97dF2HT58+0UDBdV1kWUYbDsMwoKoqLBYLihdaLBbIsgxJkiBNU2iahnfv3uHVq1c0YFgsFhjHEeM4UiyQGBSkaUobDmmaIkkSnE4nSJJEmwOe5+Hx8RFRFGE+n+O///u/v9r7jTHGGGOMMca+FTwIYIwxxtifwg8//ICXL1/i1atXCMOQ4nBEQXBd15AkCaqqUma+53lQFAVN0yAIAsiyjPP5DNM06SBflmU6fNd1nW7Bn04nytWP4xhd1wEAbNsGAIoR2u/3FA0EfL45LwYAIhpIPEbLsmgzQNf1Z1/jOA48z4NlWWia5lm2f9u2lKVvmib2+z0cx4GqqrBtmwYAmqZBVVWcTid6HhcXFzgej4iiCKfTCWEY4nQ6UQyRuI0vugNOpxN830dRFLi8vKQBQ9u2NJCoqoqihhaLBe7v7xFFEfq+x3a7pfih2WxGGwdi82K1WmG329HWBADqClBVFZZlUSfBhw8f6PVmjDHGGGOMsT8zHgQwxhhj7A/vzZs3uL6+xsuXLxEEAYZhgGEYcBwHsiyjLEuYpgnXdSnWRpTvAp8P9TVNo0Lh0+mEYRgQhiEkScLxeIQsyzAMgw6eLcuiqKDtdgtd1+n3GoZBt+bF4bvIuhc3/MuypNv/IqJHDAhENND5fEaSJLAsi6KM+r6nTHwxtMiyDKZpUkRQkiSU/S8KgcU/2+2WBh3A5+gjsfEgyzL2+z3GccR+v6fthWEYKIoojmNkWQbXdfHx40caKGiaBkmSEMcxTqcTPM9DlmWoqgqr1QqmaaIsS9rQUFUVu90OwzDQhkAQBMjznF5jsckgBiP7/Z76Eo7HIy4uLrBer3/rtxtjjDHGGGOMfXN4EMAYY4yxP7TXr1/TJoBt2zBNE7Zt0y3/qqoQxzEdxluWhTzPYVkWbNvGfr+nz4t+ANM0KYM+TVPouk6RQSLPXxTjAkAcxxSrI0kSDRK6rqPIoMPhgLZtUdc1Li8vYRgGHfaLXP+6rlFVFWazGYqiQJIkGIaBDv+nacL5fIZlWRRbpGkabNuGpmlo2xa6rgMApmlCXdeYponigkQGvzj8F1FIqqpSYe8wDKjrmjYS8jynYYKmaUjTFHEcoygKxHFMr5/48w8fPuDi4oIKgYdhwPl8xjRNcBwHXdfRBsFisaChQVEUNCApioKKiE3TxDiOqOsasiwjyzIYhoHVaoX9fk9dDowxxhhjjDH2Z8aDAMYYY4z9Yfm+j5cvX2KxWMB1XQRBANM04TgODQHCMESe55SrXxQF5d2Lz+92O9i2Dd/36ZZ/lmXQdR1hGEKWZaRpClVVaXPgy1vq4rEAQN/3MAwDiqIAAN3kV1UVwzDg9vYWT09PVMDr+z7O5zPqukaSJDBNE13XQZIk2g7I8xyu6+J8PiOKIiovbtsWiqKgbVtIkoSyLKnrQAw6RLyRuJEveg1EZr+IMBKdBIfDgUqCRR/Al7FBaZpCURQYhoHdbgff9/Hhwwfc3Nzg119/RZIk2O12mM/neHp6wmq1QlmWkCQJHz9+pI0NsX0QRRGOxyM8z6PoIcMwcDgc0DQNxQ2J52SaJj0GsTHAGGOMMcYYY392PAhgjDHG2B+S53n4x3/8RziOgziO4fs+DMOA53mQZZlurJ/PZ/roui7dPhflwev1mr43TVP4vg9JkuC6LvUDSJIEy7JgGAYkSUJRFFTOK7oF0jSlzoCLiwuUZYlpmqggeD6fwzAMdF2HOI7x8PBApcDic3Vdo+971HUNy7JoQKGqKnzfh+u6aJqGNgjyPEfTNMiyjAYZYRii73vaEBjHkQ77RbSQuGnfNA1UVcV2u4Wqqvj48SNkWcbT0xMNSERZMABkWQZFUXA+nxEEAQDANE0EQUDlvk3TIAxDPD4+YrVa4cOHDzBNE7qu4/r6GqfTCY7joCxLiiASWw8iwklsF9i2TbFMYoAj/u7qukbbthRNxBhjjDHGGGN/Zvz/jBhjjDH2h2MYBl69eoU4jrFarRBFEcZxpMP7uq4pq1589H2fDp9FefA0TZjP59hsNtA0jYpyp2mC67pUsLvf7+m2vOM4UBTl2T95ntMGgIjgEQfrURRRgS4Ait7p+x4AsFqtkGUZDQDKsoRlWRjHEYqiwPd9jOOIruvQNA2KokBd18iyjAqGgyCg0mCxeaAoCm0jVFWFvu9xPB5hWRbCMKSOA1Haq2ka/Q7RL6DrOg1CRLSSpmn0+2ezGfI8h+M4SNMUrutC13XkeY4kSfDhwwdcX19jvV7D8zz88ssvuLu7o6FM0zR04J/nOdq2RZqmmKYJWZYhDEMEQYDj8UibDbquoyxLnE4nBEFAPQ+MMcYYY4wx9mfGgwDGGGOM/aHouo75fI7ZbIbFYoE4jtH3PWXxn04nLBYLunkvDpTFwbk4yDZNkw72kySBoig4HA7PMvdFH4Asy2jbFq7rYrvdYj6fQ1EUOtwXmwBia0AU7UZRBFVV4bouJEnC09MTHMfBNE24ubmhG/qGYWAcR0zThNlshrqu4fv+s40CEREkhhM3Nzc0IBA/p65rbLdb6koQN+x930eWZbi4uICqqrQpIPoQJEnCMAzY7/eYzWb0Og/D8CxGSAwChmHAYrFAWZbQdR2Hw4FKmUWXQVmWuLy8xPv373Fzc4OqqnBzc4PtdgtJkvDw8ECDFwBU3hxFEXUTPD4+AgCurq5wf39P8UJJkmA+nz8bsDDGGGOMMcbYnxkPAhhjjDH2h+I4Dl6/fo3FYgHf9+H7Pt1uFwfgqqpShM2XHQHn85lu0RdFQQXARVFgHEd4nkf/LW6ai6ggcVNd3EqvqgqyLOPx8RFJkiAIgmdFweJxSZKE8/mMruvQ9z2maaJD9Wma6JDfdV2sViuM4wjf99H3PZqmQVmW1BFwfX2Nuq7hui5tANR1TTfugyBA13U0gBB9AwCgKAoNLESnwPF4hK7rVBYsDuOnaaItAJHHL/oI+r6HqqrPugQsy8J+v6fb+9M0wbZtnM9nXF5e4v7+Hre3t3j37h1WqxXu7+/x4sULHA4HeJ6HLMuw3W7p38MwRNd18DwPh8MBm80Gy+USDw8PuL6+xtPTE66urpDnOcUeMcYYY4wxxtifGQ8CGGOMMfaHYRgGrq6ucH19jevra8qpn6YJlmWh6zrYto1xHNE0DZXPipvnYijQ9z0sy0JVVVAUhW7Fiw0BWZapYFcMFEQuv+/7dJC+Xq9xfX0NWZaR5zkdzgOA67q0YTCOIwDg9vYWACBJEjabDZqmged5uLq6wjiO6PuebvkXRUG5+C9evMAwDBiGgb5ODBDCMKTtg67r6PNpmsLzPJRliWEYUBQFNE2DruvwPA/H45E2G8RQQMQYybIMSZIgyzJOpxOapqE4JdEtoOs6drsdHdprmoY8z+m/RTGzGGrc39/TY3Uch+KaNE1DURR0sD+OIx4fH2EYBoqioOGDKCB+fHyEaZqQZRkfPnzA+Xz+Om9GxhhjjDHGGPuG8BUpxhhjjP1h3N7e4scff0QURfA8D13Xwfd92LZNh9TDMOB0OsG2bfR9jyiKaAggbt7bto22baHrOs7nM/b7Pfq+h+/72O/3VG4rMvvFYCAMQ4rC2Ww2uLm5weFwQF3XUBQFsixDURQq6hUbAZIk4fr6muJ7RMTP3d0d3eLvug5VVSHLMtpcePXqFZIkeRb9U1UVFebOZjMMw4Dlckk3/QGgrmvUdY2iKBBFEeq6xmw2Q9/30HUd0zRB13VIkoRpmrDf7yHLMg6HA6ZpwvF4pO0GRVHgui5FHKmqCsdx6L9PpxNtZBiGgfV6DcuysNls4Hkettst2rZFHMd00H95eUmlzY+Pj1AUBe/fv6ehwGw2o4GGiGmapolKjoMgwMPDA3zfh2maX/ldyRhjjDHGGGNfH28EMMYYY+wP4fb2Fq9evUIQBLi4uKBDaRG9M5/P0XUdTNNEFEVI0xTjOCKKItoEcF2XugJEln8YhlBVFfv9nm7BF0VB8ULiwFzcjhc35m9vb7Hb7RAEATabDS4uLqBpGiRJAgBst1ssFgvaBhC35B8eHrBcLimmp+97GgREUfTs1j/wedsBAKqqoudYFAWmaaLtgfP5DMuyUJYlkiSBpmnUh9B1HQzDwDAMUFUVfd9DkiRUVfXs9VNVFdM0Ic9zSJJE+f+qqkKWZWiaBsMw6PZ/13UUn7TZbBDHMc7nM+I4xvF4BACs12tcXl5is9mg73vathCbDmVZQpIkjOMIwzDw9PREWxxiiCI2FR4fH+F5Hg1gxNbHhw8ffuu3ImOMMcYYY4x9c3gQwBhjjLHfPc/z8OrVK7x48QJJklD2ve/7kGWZCnAty8IwDDBNE0mS0CF4XdfwPA+n0wme51HUTxAEUFUVWZZB0zQ6fAaAw+FAhboi/9/zPLpFPwwDZrMZdrsdXr58SQW54zhC0zS6gS8GAbIsY5omzOdz9H1P/7Rti/l8/iwaqG1btG0L27YxTRMNH8ThvzAMA4IggOM4yLKMSpLF53RdR9d1aJoGkiQhDEPsdjvEcYwwDLHdbmFZFoIgQJIk2O/3mM/nSNMUrutC0zQoikJdAiIuSWwUjOMI13VpS0FsWdi2jaenJyRJgg8fPuDm5gaPj4906D+OI+q6hq7rVMIMgGKA+r6nvyPHcWhgk+c5rq6u8P79expqiEEJY4wxxhhjjP2ZcTQQY4wxxn7XdF3HP/zDP+Dq6gq2bVPUTRiGmKYJRVHANE0YhoG2benWetM09O+n0wnH45GGAIZhUDGuuF0vcuv/7+idJElwPp+RZRn1C4zjSAf0SZLQLX7btimuRpQDC+IQfxgG+h1BENAQQPzMqqpQVRXF44hYIEmSKOtflPSKnztNE5UB13WNp6cnVFVFfy62BADg4uKCDs91XaePu90Ouq4jTVMYhkGPKU1TyLJM2wuPj4+YpgmbzQaqqqIoCopiurq6gizL1L2Q5zniOMavv/6KJEnQ9z1c14UkSYjjGFmWUdGwpmnYbre0ySDimLIso0N/3/cpqsk0TeR5jvV6/Ru/IxljjDHGGGPs28MbAYwxxhj7Xbu6usLt7S0uLy8RRRFkWaZ+gLZt6YC5rmvK2xe36du2xeFwQBRFVGbrui4URUFd11BVlQ6dd7vds6x7UQAsbveL/P80TSkmR2wMyPLnuxfisF+SJGRZBgDwfR+SJKHvewzDgK7rMJ/P6QD/y+8TUT/isH4cR1RVRRn7YoPgy7ih4/EIx3Eo6icMQ9i2jfP5/GyDQGwK7HY7es6iAwD4XGDcNA1FBIlNiyAIcDgcaHCSJAkAYDaboSxLNE1DQ4/9fo+yLGGaJlarFYZhwHq9xmKxwPv37zGbzbBer6HrOj59+gTP85CmKbquowP/d+/eYblcAgCapkHTNNhut5jP5zidTjAMg4YIRVH8Ru9CxhhjjDHGGPu28SCAMcYYY79bV1dXePPmDRzHgWVZsCwLXdchSRJIkgTDMNA0DZXhigGAOOgXfQHH4xG2bcNxHMrTVxQF+/0eV1dXqOsaURRRZn5d15BlmTLtRRa+pmkIw5CGBOM4Qtd1KIryPwYCi8UCsixju91S5M1isfgfh/7DMNBtfpGVL4YZdV0jjuNnfQKiiFgUB89mM6RpSgMR8XMURUHTNMjznA7aRea/53kAANd1sdlsYFkWRStZlgVVVbHb7TBNE56enuD7PhRFgeM4VH682+0QhiHGcUQcxyiKAr7vQ9M0HI9H+v0imimKIrx//x5xHGO/3+P169c0mDkcDui6Dp7nPYtqEjf/+77H/f09RR+JrgceBDDGGGOMMcbYZzwIYIwxxtjvkuu6ePXqFV6+fIkwDOE4DhRFgWmaaNsWwzAgiiJM0wRN01DXNRzHoTJbANjv99QlYFkWzuczXNfFfr/HNE2Iooi2BsQBuaqqMAwD2+0WQRBAURT6vKZpcF2XYoDE4b/oBpimCYZhAADl4cdxjGmaKNpHDABEfNF8Pkee5xiGAXVdoyxLem6i80AUCovhRpZlmM1mcBwHfd9TGXDTNHh6eoLnefB9H2maIo5jyLIMwzAo738cRxyPR8xms2fZ/2Ig8fj4CNd10bYtbQoMw0BbEev1GpIk4fHxEWEYPotdsm0bAGhLQfQAlGUJ3/dRVRXiOMbj4yOiKKJIIvH5aZrQdR3SNKW/C13XsVwucT6fEQQBxnFEnuf4+eefv9r7kzHGGGOMMca+JTwIYIwxxtjv0g8//IDb21vIsgzf9+lWfBAEdPO+qiqM40gHyE3TwLIsuokexzFtABwOB3ieh/1+D1mW6Wa/KML1fR+n04kKcmezGRRFwel0gmma8DwPiqIgyzIqJP6yTHeaJlxeXiLLMiiKQjFA4mBcDAJEEfByuaRoIMMw8PHjRwRBgBcvXjzbGmjb9tnWQ1EUmM1mNCAoy5K2E8QhuYhOEl+32+3geR52ux3d0K+qCp8+fYIsy1QmLCKRwjCk3P/5fA5ZlrHZbGjbYj6f0yF/XdfwfR91XaPve2w2G7iuizzPkSQJqqpCWZZQFIXihZ6envDdd98hz3OsVivs93t4nkcbA4qi0GDhdDpRL4R4zf/yl7/Qz2WMMcYYY4wxxoMAxhhjjP0OXV1dYT6fI4oixHEM27ZRVRVF0bRtSzE04oZ90zR0IC2Kg0+nE8IwRJqm8H0fx+MRkiQhCAKKsAnDEKqq4nA4wDRNWJYFWZZRFAUdSH/ZIaCqKkUGFUVBt/7FQb9t29jv95jP5wCAp6cnKg8ex5Hy//u+p8z/tm3x4sULOvwXQ4SmadC27bPb/7Iso+97NE2DqqrgeR69Hl3XPRtCpGkKXdepZ0A8N03ToGkalQmLjQBRmKyqKpbLJeq6pm0G0a0gNjDE9oBhGEjTFGEYUomzoiioqgofP37Ezc0N2ral4mTbtrFcLvH4+Ijlcon3799jtVrh4eEBSZLgeDzCsixomkYbHiKqqWkayLIM27aR5zl2u93XfJsyxhhjjDHG2DeDBwGMMcYY+10R5byXl5fQdR22bWOaJgRBgKIo6Ma6yNcXOfdxHKOqKti2ja7rsN/vMZvNsN/vqSdA13U60C7LkqJ/2ralDYEsy2CaJkX/lGUJVVXhui4Mw6A/k2WZ+gHEUGAYBgzDQAfYAOixiuchyn5FD0AYhjBNk773y34A0SnwZTSQ+D1imNH3PebzOQ0VxOdPpxMcx4FhGIiiCFmWwbIsytUXr818PsfxeKRII1VV0fc9vS7DMND36LpORcJiYCG2GjabDcIwRJZlWCwWtDGQpikVDmuaht1uh5cvX0KSJKzXa9zd3eHjx4+YzWa01bDf76Hr+rPBTNu2UBQFu90Ofd/jfD5/hXcnY4wxxhhjjH2beBDAGGOMsd8V27YRxzE0TUOSJHBdF03TwLZtyLIMSZLoYFzk2Hueh7Is4boudQREUQRFUSjmRtM0eJ6HoiigqipM06TD6sPhQIMAEaEjYmgMw8DxeMQwDNB1nYqLN5sNlsslqqrCNE1UPAwAsizjdDrRgf9isUCapnRw3/c9ZrMZDQDEobqIAVosFnBdl75WxOCIYUQQBPS5pmmQpil1IHieB9d16da/KBAWN/jF7xHbAuv1Gqqqoq5rqKpK2wO6rtMBvigkDsOQXivx5+Jn1nWNNE3RdR12ux1tFywWC3z69Amz2QxZluHu7g739/e4u7sDAIpEOp1OmKYJjuNQxFHf9/T3YxgG9RyIjgfGGGOMMcYYY5/xIIAxxhhjvyvikFzk20uSBN/36ba87/t0218caruuS6W0IgNfxPGcTifaBCjLkm6ai68VB9tpmtIWQBAEUFWVDqc9z4MkSRQhpKoq4jim4cBut8Pt7S2OxyNM08Q4jpAkCZ7nAQCGYYDnefj06RPCMITv+2iaBtM00XMQAwOR/d/3Peq6pufc9z3CMKQNgaZpUBQFHMehQ/skSWizYBgGOtgfhgHn8xmSJNHjVVUViqLQR8/zkKYpkiRBmqYYxxHb7ZbKkWVZxn6/h6ZpOJ1ONOzwPI/Kgi3LgmVZ1BEwDAM+ffpEHQ+apmG9XuPm5gb39/fQNI3inBzHQZZl9LNVVYWu6xiGAZZl4f7+Hi9fvsR//Md/QFVVbDabr/xOZYwxxhhjjLFvBw8CGGOMMfa7IkkSZFlGFEUwDINy6EVprDhUF/E5URTRwbjoEhAH/YfDAbqu0yaAyJ5v2xaaptEQQNM0uK4L0zShKAryPKfcfHFQnmXZs6FAURTUCxAEAbqug+d5dEC9XC4hSRKyLEPf9xiGgW7BA0CapmjbFhcXFzBNE9M0oes6GgCUZfmsB0EMCcTWQJZlcF2XNhT6vqdb+1VVIcsy2qI4nU5QFAWGYcCyLPi+T1E+YhtAFCCLAYIYToiiX3EzfxxHqKpKfQan0wl930PTNMiyDNd1cTwe4TgOAGCaJux2O7x69Qq73Q7TNOGvf/0rvv/+ezw8PGC1WmG9XqOua+i6jrIsUZYlkiRBlmWYz+coyxKr1Yp+/mazwa+//vpV3p+MMcYYY4wx9i3iQQBjjDHGflds24bv+5BlmfL2x3GE7/uUsV9VFSRJQhzHNBBwHAdlWT7rCBDDBHFoLj7quo6iKKjYNs9z2LaNNE0pVidNU1xeXqKua0iSRNsCYsNA13VomgbgcxTQ+Xym8l3P89B1HeXpK4ryrDeg73uoqoppmqhvQMTrVFUF3/dxd3eHaZowTRM2mw1kWUZd16jrmmKFxKCk6zpUVYW6rpFlGQ0vbNvG8XhEFEU0DBDbCqZpou97GpYAwOl0okHBfr9HHMeDpbryAAAgAElEQVRQVRX7/R62bWM+n1PM0DRNqKoKhmFQN0PTNDifz7RtMQwDdQY8PT1RkXIYhnj79i2+++47fPz4ERcXF3h6ekLTNAjDEMMwYL1eI45jvH//HrZto21b2LaNoiho44AxxhhjjDHG2Gfy134AjDHGGGP/X8xmM7iui+12C0VRcDgcUNc1DMNAnudwHAdJkiCKInRdhyzLaAggNgFEdI+u62iaBq7r4nw+0wFyVVWwLAtN06BpGjiOQ6W25/MZx+ORcvh1XcfpdEKapuj7ngYKpmlSdE9VVVAUBa7rIkkSWJaFPM9xPp9h2zZ1GJRliSzLKMt/NpvBcRzkeY79fg9VVXF3d4cwDJ9l74uNCMdxqHy4rmv0fY88z5GmKcX+iAik2WxGAwcRFVSWJYqiwIcPH5DnOQ01DMOA4zjwPI+2LhaLBd36F4OCYRiw3W4xjiM2mw0Mw8A0TVBVFWma0oDg6uqKvscwDBRFAd/3aXAgXseff/4Zq9WKNgMURUFVVaiqioqHLcuiMuH//M//RBiG2O12X/MtyhhjjDHGGGPfHN4IYIwxxtjvhigIbtsWq9UKjuNQZE3btnAch2KAVFVFnue0FeA4DkX+iLJb0REgcvnzPKcb/03TwDRNbDYbxHGMOI6pQNe2bSiKQh0Bonj4cDgAAG0dmKZJcTeXl5c4Ho+Uay+6DcQWgK7r6PselmUBAB3Od10HVVXx6tWrZ8XBXddRDNBsNoNlWc+ig4qioJikuq6pR0EML8SQRNM07HY72gZQFAWr1QqGYaBpGhiGgbZtKcNfbBiIcmPxvBVFAQDqChB/Po4jdR3keY4gCPDx40csl0saCuz3e+x2O8znczw+PkKWZaRpiiiK8PbtWyyXS/zyyy+IoghpmmKxWNCmRN/3NCS4vLykQQpjjDHGGGOMsb/hQQBjjDHGfjcuLi5oAAAAnz59wt3dHZqmQdd1mM/ndKA8jiPCMESe5zQYMAwDXdfR4fbhcEAcxzidTvB9nzYCbNumm+pJkmC328E0TYokErEztm3TQbpt2zQkENn/okRXlPm6rov1eo3Ly0sAn/sOjscjHdAvFgsAoPiccRyRJAn1A4hD+KZpUNc15vP5swFAVVUoigKe5+Hm5gbjOKLve8zncwzDgKZpkOc5DMPAdrulToTVakVFyaqqouu6Z0OHtm0hSRLO5zMsy6LSY7Et4Hke1us1LMtC27YwTZOGCGII8fDwQLE+qqpivV7Dtm3qdrAsC+fzGXEcUz+B+D3v3r3DcrnE/f09bQKIyCHxs8XmgXi8jDHGGGOMMcb+hgcBjDHGGPvdmM/n8DwPYRjCNE3Yto2+7ynXXpTQHg4HytUXhbfiEFvcxjcMA7PZDIfDAVEU4XA4IAgCeJ6Hw+EAx3FgmiaV6tq2jcPhQAf+hmFQVI3neTAMA7IsI8syyLJMPQEAsN/vaRPg6uoKdV0DAA0KHMfBNE1UGqwoChXwNk1DnxMdAJeXlxRdJA6+xQDg7u6OBgt936NtW+z3e5imSdFJjuPQcKJtWwzDgGmaMI4j1us1RS+JngMRVeT7Pj0vUSIsvk88f8MwqGNAfEzTFKvVijYCyrJEnucYxxFt2yJNU9zc3FAskHjdHcfBZrPBixcv8PT0hDiOsd1u4boudrsdwjDEp0+foOs6/vKXv2A+n+N4POKnn376mm9TxhhjjDHGGPvm8CCAMcYYY78bWZYBAB32v3jxgopqRSyQqqrQNA3TNFFEjuu6mKaJbs9XVUXxM3Ec43A40E1zTdMQBAEURaFyYUVRqCj4cDhguVyirms6rFcUBbvdjgYCu90Ol5eXqKoKwOfNgWmanj1GERm0XC6x2+0oGmgYBsxmM9i2DVmWsdvt6Fa9qqp48eIFzufzs8GA7/vPNgBEbFBVVXAcB5qmQdM03N7eUqyQGIxUVQVN03A8HqlEGADdsLdtG3Ec0/eJrQRJkug1EIMXWZYhSRIkSQLweeNhmiYAwHq9plJi3/fRNA2VDnddh+12S0MA0Xvw/v17rFYrfPjwAYvFAp8+fUIQBCiKArPZDL/++isWiwXu7+9xeXlJJcRFUXyFdydjjDHGGGOMfbt4EMAYY4yx3w1xQ12WZVxfXwMAgiCAqqpo2xaKoqDrOirDFTftsyyD7/t0wGyaJqqqoiFAHMc4n88U7fPlIb/oCwjDEPv9HpZlUc6+yMAPggBhGGK73UKWZbiuS6XET09PuLq6gizLOJ1OUBQFwzBAlmV4nkdlxY+Pj4iiCK7rUm+AiNHRNA2u6wL4vEUgOhH6vsft7S0NAMR2QFVVNMBwHOdZzn/f91RKXJYlDU36vocsy9A0DY7joGkaKlwGgL7vURQFnp6eaCMC+HzY33UdvV6yLNPPE1E94mev12v6nmma8OHDB1xcXNBQw3VdKiiuqgrX19d4fHykXoEgCGizQtd1hGEISZKQJAkURUHf99hut7/125IxxhhjjDHGvnk8CGCMMcbY74LneQiCAHVdY7PZ4O/+7u9wf3+Pq6srnE4nzGYzOuj+8tDd8zw62BYH4qKQd7/fYzab4XQ60UBBZOWnaYrlcomu63A8Hp9tCojNBNd16Xa/YRhQVRWmacI0TSoOvr6+pm0CXdchSRIsy4IkSXTjfxxH3N3d0W36NE0pEmixWMB1XYr6EXE+cRxT6e+XAwDXdWkAYJomlSeXZYm+75HnOR2aixgi8dqK/oQvPxZFQQf1qqri5uYGh8MBnuchTVOEYYiu62iDQbz24u8DAHU0SJKEsixRVRX6vgcAKg7O85x6CSRJgqZpsCwLcRxjt9thNpvB8zzYto2iKDCOI0U99X0PSZKw3W5xf3//Fd6djDHGGGOMMfZt40EAY4wxxr55nufh9evXuL6+xmKxwDRNGIYBNzc3FOcjDpkNw0Acx8jzHLquI8sySJIE13XpprzYDJjNZjgejwjDkOKBLMvCfr9HGIbo+x5pmkLX9We31cUhtSzLOJ/PsG0bAOD7PhRFoSGEiAIKwxBpmmIYBiyXS4oMGseRDsjFzftxHJ91BIg4nrqu0fc9ZrMZTNOkgmBRHCyGEq7rwrZtivCpqgpZlkHXdUzTRIfsNzc3OJ/PtL0gSRJ9FBn+YkNCbBWUZYlxHKFpGm01iNJhcYh/PB5pwOH7Pg6HA3zfpx4D3/fhOA7GcaRIoTRN4TgOuq7D7e0t3r9/D13X0bYtXNeFLMvo+x6WZeHx8RGKouDt27e4urrCzz//jLu7OxpOiNeRMcYYY4wxxtjf8CCAMcYYY98027bx+vVrvHr1CovFAgCwWCzw7t07/P3f/z3KskRd13S73bIsbDYb+L6P7XaL5XKJcRyRZRk8z0NVVRTvI4qCxc328/kMTdOeFQyLfz8cDrAsi0pxy7KELMt0E15RFOR5DgC0HSAGBeLwXBxUq6qKx8dHXF9fw7Zt2gLo+x6LxQKGYcDzPEiShM1mQz0AmqZRua/YAPA8jwYAjuNQln9VVSiKArZtY7FY0KG/YRg0VBCH/18OTXRdh2VZuLm5oQikYRjooL/rOmRZRn0C5/OZnjcA6gkQw4BpmpCmKcZxxDRNMAwDeZ4jTVPK9TdNE2VZUrfB7e0tfY0Yrriui/V6DdM08csvv2C1WmGaJgRBQL/36ekJv/zyy2//JmWMMcYYY4yxbxwPAhhjjDH2zfI8D1EU4fr6GlEUYbFYwPd9mKZJt+gdx0EYhvjll19g2zbKskQURej7HpqmIcsydF2H2WyGsiypMFjXdQRBgP1+Tx0BIh+/LEuoqgrP8yDLMpXu6rpOJbuqqmK/3+Pq6oqGAmJbQJIk5HmOaZooLsc0TeR5Tgfid3d32Gw2CIKAugDGcUTTNJAkiQ7dVVXF3d0dle5ut1s0TQPP82i4IMqQ27al6B3HcaCqKhXzJkmCruvQNA2KoqCon+12C13XAYAO/w3DoNJhRVFQ1zXyPIemadhsNpBlGV3XYZomhGEIABSLJJ6H6EJomoZu9+d5jrquMY4jbNumbYmqqjCbzbDb7TAMA56enrBYLKiXwDRNrNdrJEmCf/3Xf8VisYCu6xjHEeM4YhgGfPr0ifsBGGOMMcYYY+x/wYMAxhhjjH2TgiDA69evcXNzA9/3cXNzQzn7qqoiiiJ8/PgR3333HbbbLa6vr3E4HFBVFYIgoMLgPM8pV150BZimibquqXBWRP44joOqqqBpGhXWapqG4/FI2f6u6yJNUwDAarVC27Y4Ho9QFAWLxQJFUdBtefHxy84CABSDM5vNsF6vcXFxAcMwqCNgu92irmusVisAn4t6Rf+BiPgRAwDRGyA2AERHgO/7FPvTNA0AIMsyHI9HqKpKj01k74uf/38f/ldVhTzPKc7n+vqaehLEQGaz2cCyLIps2u/3SJIE+/0eURRhHEfIskzRSFmWoa5r2lywLAsfP37EfD6nwc3DwwP1NvzXf/0X7u7u8G//9m9YrVYUp/Tzzz/D932kaYooivAv//IvX+fNyhhjjDHGGGPfOB4EMMYYY+ybEwQBfvjhB3z33XdwXZcKc3Vdf5YXv91u0XUdoiiCJElQVRWXl5c4Ho+I45huwJumiWEY6KZ8XdewLAt1XeN4PMI0TeoQEDfbxaBADAlEBI849A+CAOM4Ik1TmKYJWZapiFhE4lxcXNAhvIgKEs/hdDpRZJCI5hEH6bIs4/b2lkqKxWF/0zS4uLiA67oUDySy9x3HwfX1Nd2S//L2v6qq2Gw2ME0TURTB8zy6sS9Kg0UhcFEUMAwDlmVRJE8cxxiGAafTCX3f43Q6wTAMjOOI3W4HXddR1zUkScLhcICiKPj06RMkScLDwwMV/gZBgDzPKTopyzIqcBYbE67r4nw+o2kavH//HqvVCnEcQ1EU3N3d0Xvkp59+oscJAOv1Guv1+qu8XxljjDHGGGPsW8eDAMYYY4x9U1zXxZs3b/D9998jiiLMZjOM40hxPF3XIQxD7HY7vHjxAj/99BPiOIYkSZTXL26fiyLgpmmgaRo0TXtWkHs8HumQXxxKi1JcwzBwOBwAfB5MnE4n2hQQETjn85m+X/QBiELeIAjQ9z32+z1UVaV+g7IsKebHcRwoikJbAE3T4Pr6muKLbNvGer1GGIaQZZmGG2KoITYArq+vKcO/73saDog4H1VVcX19jaZpoCgKuq7DdrtFEARomgbDMCDPc1iWRbf3RZ9A3/eYpolKi4uiwOl0oo0CRVEwjiPFGYVhSJFIYijRti1tHRRFQYXCjuPAtm2cz2cAnzcfHh4e8OLFCzw9PdEAQ3Q7iNfk6ekJQRCgKAqKHOIhAGOMMcYYY4z973gQwBhjjLFvShAEuL29RRAEWC6XmKYJiqLANM1n+fmiRFbE9QzDAF3Xsd1uoaoq5fGfTid0XYc4jjGOI0ULAZ9LfYMgQJZl1AFQliVM08TpdAIAhGGI4/EIwzDocZRlSVFBiqJgv9/DsizIskzDAnHrX9x+7/seAOhgfpombDYb2LZNh+83NzeQZRlFUdCBv6ZpcF0XALDZbDBNE5UH397ePrvVLwYAoiT4+voaWZbRUKLrOhwOB7qFL54XgGe3/6dpoq8Xj0UMQkScEPC5w2G9XtMApes62mDQdZ22EkQMkNhsEAODPM+x2WzQ9z1ubm6Q5zk8z8Nf//pXigUSr4GIB/rLX/6Cly9fYr/fU4RRWZb49OnTb/1WZYwxxhhjjLHfDR4EMMYYY+yb4TgOLi8vkSQJHMehLQBxy3wYBriuS4W7RVFgNpvh3//93/FP//RP0DQNtm1Tye75fIZt23Bdl6JnTNOk2/Kz2Qyn04kO+z3Pg2VZOB6PlLW/3+8pC1/ECamqCk3TIMsymqaB4zhYr9e4vb2FLMsoyxLjOMJxHEiSREMBAGjbFkEQQJZlhGGIvu+hqiocx6FhQZqmCMMQuq7D93066LdtG03T4Obmhg7rRSFvWZa0ISBKf9u2pVLlsixxPp9pIyCKIhwOB5imSa913/eUty9eI1VVoes6rq6u6HtlWcbxeKR/lyQJfd9DURTaBuj7HtvtFrPZDG3bYj6f43w+02M5nU6Ypgmu66IsS7x79w6LxYL6DUSUkCzLmKYJy+USYRgiDEN8+vSJCpLLssR+v8fHjx+/5luXMcYYY4wxxr5pPAhgjDHG2DejKAokSUIHvp7nUY6/OBiXJAkA0DQN3fT/8ccf6VDbtm0AwDiOuLm5wWazwTAMeP/+PZUF27YNSZKw3+/heR7SNKWCYVVV4fs+bRXYtk2DgLquAYBicZqmwW63wzAMuL29RVmWFEEkIn/KsgTw+ca9YRgUI7Tf79E0DVarFWRZRpZldLD/5RbAl4fnq9UKmqZRTI/4c3GT/u7ujm7y9/+HvfdYkuTOrj6Pa63CIyIjVaEgSGM3jZvme/Bp+ADzAHwobrjggh/JbhAtQDTQnVUpQrnWahZp/9uVnDH7BvM1Kwng/sxgWZmh3D18ATvn3nPGkcR8sQXheR71Dti2jSAIXkz/13UN13VxOBzgOA5ub2/RNA1tEGRZBt/3kWUZbWh0XYe+7yHLMp2niEeyLAtFUWCeZyoUrqoKV1dXeHx8xDiOyPMcnucBAP7whz/g8vISqqrCsiycz2dst1vc3d0hCAJIkoRpmmCaJm1GNE2DP/7xjx/1PmUYhmEYhmEYhvmhIb/2ATAMwzAMwwi22y1NyXueh2VZ4HkeVqsVxeuICfTtdouLiwtM0wTHcfCHP/yBpuvzPIdhGMiyDFdXVwCA9XqNpmloQl6UC5umCV3Xafrdtm30fY+u66iMNssymq5P0xSqqqLvexwOB2iahjdv3pBgfj6foaoqxeGcz2fIsgzP82AYBpqmocn829tbVFWFruuoTFiWZURRROXAkiRBkiTc3t4iTVN0XYeyLJEkCQnub9++RRRFGIYBwzCgLEsqLA6CgAwMx3Hw5s0bhGFIx9Z1HYqioOO2LIuMGBHjM00T2ralOCEx8f/4+Ii2bamUuK5rFEVBBkRVVWiaBk3TkGliGAbu7u6wWq3oexddC67r4ssvv8SyLNA0DY7jAHjuVZimCf/+7/9O2xjTNGGaJpxOJ3zzzTcf/2ZlGIZhGIZhGIb5AcEbAQzDMAzD/I/h+voaYRjCtm3KhxcivWmamKYJsixjGAYYhoFlWdB1HTzPw1/91V9hnmfc3d0hDENkWQbXdVEUBW0SrFYrKpi1LAvTNOF4PGK1WqFpGliWRcXC5/OZhHTP85AkCWXbi0gcXdehKArGcYRhGDidTtjtduj7Hrqu43g8QtM0WJaFpmlI6BdGh4j7EVsLu92OtgUAYBgGXFxc0NS+53m4v7+H53lUjiyy+adpIiFedB+0bQvXdemYRYSQbduoqgq6ruN0OsFxHNzc3NCWgSgd/rBwWJZltG0LWZahKAqA546FaZogSRIMwyDTYJomACATQ2wLqKqKuq7RdR2enp5wfX2NsiwBgEycIAgwzzMAUNSRMIHiOEZZlnAch3oLfv/733/s25RhGIZhGIZhGOYHBxsBDMMwDMP8j8B1XVxfX8PzPLiuizzPsdlscDweEUURNE2DaZrUD7AsC4ZhgO/7ZAycz2cS+C3LouJeAGiaBoqiIAxDTNOEpmnQti0Mw8DxeITneVQUnKYpxQOJUlrTNGGaJlRVRZqmmOeZCm3TNAUAyLJM0T5JklB5cN/3UBQFT09PuLy8hGmaOJ/P0HWdzI3dbkclxELcv7y8RFVVAJ5NgbZtSWz3fR9d11FHgIgAEgaA4zgwTRN935PYL55zPp+pN2G9XtN79n2Ptm1pul+WZZrmF9n9iqLAcRwqSG7bFkEQoOs6LMuCvu/hui4eHx/JCPgQSZKgKAqapsHd3R0kSUIYhmQYCCNBRABN04SiKLDb7XB3dwfTNKmnQZZl3N3dfdwblWEYhmEYhmEY5gcIGwEMwzAMw/yP4LPPPsNqtYJhGBTZo6oqDMOAoijI85wm69frNU2uiyl20zQBAJ7n4fHxEa7rUklvFEWwbRvTNFGpreu6cBwHuq7j4eGBptZFSa2qqrRN4DgOFEVBURRYlgWO40BVVYq6ERFCYuI/SRIAQBiGVDg8TRNub28BPJsStm3TJoHYDtA0Dff394jjGK7r0mbB/f09giAAALx58wbAcwfC8XikSCLf9+l9hQHQ9z3quoau6zgcDtB1HbIs45NPPiHhX1y/pmlom0CYJKqqwnVdyucXz6uqinoIlmXB/f09FEWhaf26rlHXNTzPwzzPFOckxH5VVbEsC1RVRZIkdG6SJNH3KLYS5nnGarUig0H8XXQWMAzDMAzDMAzDMP972AhgGIZhGObV8TwPt7e3WK/XFIPTti2macJqtcLpdKJpdM/zaHJdbAMAwLIsCMMQeZ4jiiISs0XGvyzLcBwHy7IAeI6iAUCFw5Ikoe97bDYbTNOEPM8hSRJc10WSJLBtG7quQ9M0qKqKqqrgOA4syyIRXJIkzPNMJsTpdKIone12i67rIMsyDocDVqsVHMeBJElUFDzPMwn9eZ6TWC/id8QWAADK7BeFvFEUUalv3/domga6rpOZcHV1BcuyqBhYiPrzPKMoCortEVsJjuOQQVGWJRRFQdu2FOWTpildS7GhATzH+YjfN5sNfN+n4xFbGYqiQJZl2t7QdR3DMLzY4BDGwzRNSJKEuh88z0PTNEiSBI+Pjx/pDmUYhmEYhmEYhvlhw0YAwzAMwzCvzl/8xV9gs9lQUW0URZSf37YtbNuGpmkUySPLMm0FiElxIRrHcYy6rvH4+IjPPvsMdV1DVVUS3IMgQN/3kGWZOgaCIKDsetd1ATyL267rUozO4XCgUuBpmmAYBjRNw+l0gud5NG3/4WeZpklT/2ITQRQM27ZNArvYJABAQr+iKOi6Dm3b4vr6GsuyoCxLDMOAuq5pC6Hve7x584YijETckTgfz/No+0CYCm3b0vR/XddkeggBX5Ik7Pd7KvFtmobMD3GshmG8iP4RfQqiWwEAdF1HmqYUV3R7e4u6rsmYEdf+/fv3ZIDYto3Hx0d88cUXGIYBiqJgt9uhLEsEQYAsyyi26He/+93HvlUZhmEYhmEYhmF+kLARwDAMwzDMq/LFF1/g6uoKtm1DURSK/GmaBqZp0hS5mOgXZbsifkcYAqIsV3QDxHFM4reIlDEMgyb5P5y2B4CyLFFVFYIgwP39PVzXxfF4hO/7OBwOuL6+pmNJkgSSJCGKIoRhiOPxCEmScHFxgaZpAICMBrHJIARysSHQ9z0Mw8DDwwN836dzO51OFHU0zzMuLy+RJAl0XSfjwjAM5HlO8UZVVVGkT13X6PseqqrS55imSVsNVVVR34LoNvhwol9VVViWhSiKkKYpbQmoqkrbEB+aF8IMEIZAGIb0fkVRYJomuq7v3r3DbreDJEm4urrC4XBA13XY7XZ49+4dbm9vMc8zfN+nSKJpmpCmKRUGi/P/7W9/+7FvVYZhGIZhGIZhmB8sbAQwDMMwDPOq3Nzc4PLyEp988glM03whUs/zjK7rcHNzQ1Pr8zxTeXDXdUjTFLIsw/M8bDYbjONIWflZlmGz2aBpGkiSBFV9/l8f8bgQ6o/HI5Zlwe3tLf74xz8CeBbHdV3H09MTdrsdmqaBpmk4n8+QJAmr1Qrn85k6DcTUvSgY3mw2kCQJdV3TFsM4joiiCLIs43w+o21bXF1dUZGwZVlYloUMB3EuhmHg/v6ezBJRbiw2DkRxr5iUF6K8MAIeHh4wTRPF75zPZxLzxQaApmlkAAjD4fb2Fm3boq5rFEVBGw+KogAAXU/xeeL9FEXBNE2Y55m+52EYoKoqHh4e6LtZr9eoqgppmuLi4gLv3r3DF198QUaGbdv41a9+hd1uhzzPURQFAKAoCnz99dcf5wZlGIZhGIZhGIb5EcBGAMMwDMMwr4brulitVgjDENM0YZomEpNFQex6vcY8z+j7nnLnRb69bdvoug6qqqIsS8iyTDFCoox2WRbqExAT+bIso65rKhB2XRdlWeJ0OuHy8hJt21JXQRiGlGEvNgHiOCYTQPQW/Nep/77vqRxXxBwJk0D87fr6GpIkoWkaeJ6Huq5RVRU8z8MwDHAcB4fDAcMwwDAMAMDpdKLXiJ4AwzBwOp1oYl/EDPV9jzRNMY4jTNOELMvI85yujzhWTdNwcXFB5ywMiHEckWUZbQkI4d80TSiKAtu2kaYpVqsV8jyn78MwDHRdh6ZpEAQBuq7DMAz0XU3ThKZpMAwDfN/HZrPBfr/Her3Gr3/9a1xcXOC7776Doii4vLyE4ziY55m2A7755ptXu2cZhmEYhmEYhmF+iLARwDAMwzDMqxFFEaIogmmaWK1WlIkvCnxF2e9+v6cJ+2VZaFPAsiwSicUkvIjmKYoCm82GRGjgOX/fNE0Az9PsIitffJaYdBdRN3me4/r6msTzaZqwXq9xPB5JFLcsC1VVYZomitcJwxDn8xnzPGMcR1xdXdF2gKZpGMeRtgwAkFlRFAVs20bf9/A8D4fDgcp+xfmJyB4RbSRepyjKiwl8cXzb7RaHw4EikOI4hqZpyPMchmHAcRysVivM84w8z9F1HcUHCQNDkiQYhgHXddF1HZUQT9MEVVWRJAkMw6BM/3Ec6TXH4xFBEJCxIUkSyrKErusYxxH39/ewLIsMHGEKiF4Bz/NgWRZ+85vfQJZlJEnCRgDDMAzDMAzDMMz3hI0AhmEYhmFeDdu2ScyfponKb6MowsPDA968eYOu67BerwH8Kct+nmfaDACAtm3hOA4AkJgfhiH2+z1UVcV2u0WSJLQJ4DgONE2DJElU4BtFEYqioE0CMaUvtgyqqsLFxQXmeaZNA1mWUZYlJEnCNE0YxxHr9Zr6C4Tg33UdPefDiXlZlnE4HFCWJTRNgyzLCIIATdPQJoCu61RADIDOQ9M0rNdrqKqKu7s7OmZhBkiSBNM0sdlsEIGdjcMAACAASURBVIYhyrIkI6QsS8RxjPV6jWVZKDYIeDZLxLW0LAu+71Nfg9gWEIaLMBRkWUbbtgBA2wjifKdpwuPjIxX9CtPFdV1M00Q9EMBz34OmafA8D8fjka773d0dwjBEnud4fHz8770pGYZhGIZhGIZhfoSwEcAwDMMwzKshInmE+C6mzbuuQxzHJCZrmoZ5niFJEsZxxDzPZAoIY6Dve/R9D9/3cT6fsVqtEMcxhmFAnuf0XEVRaOq9qiqEYUiRPcKYaNsWXdfh8vKS4nKKooDrunh8fMSyLHBdF4ZhoCgKjOOIZVkQBAEOhwMURUFVVbi+vkbbtpBlmToAxnGEZVkU2VNVFRRFgSRJsG0bj4+PaJoGy7LANE0yASRJQpIkLwwDTdOQpil0XYemafB9n4qDXdfFer2mTYdhGCim55NPPiEDQJQsZ1kGXdcRBAGJ/47j0LmJOJ88z6knYJ5n1HX9oiwYAG1uCESsj6qq2O/3ME2TjJX9fg9d19G2Ld6/f4/PPvuMopAuLi7w+PgI3/eRJAkVOjMMwzAMwzAMwzDfDzYCGIZhGIZ5NUzTxG63I1FbiLyKoqBpGliWBc/zkKYp5c9LkkSxQOM4QtM0tG0LXddh2zaapoGu66jrGmVZ0pT58Xikglwx1S46AkRuvZiqlySJSnzv7u7g+z5WqxXev3+Pvu8RBAGV9AqB3DRN7Pd7Og/P80gAf3p6osl33/fpHERUkKqq2Gw2ZGiIiCDxmK7rZFqIsuFhGHA+nzGOIxzHQVmWAIDVaoX1ek1ivGVZOB6PWK1WMAyDSo1FZr8Q/MMwRBiGGIaBxP9hGF5EDymKAsdxaONBdAeIqCARYSTMk2EYME0TxSwBoM8QmwJXV1d4fHyEqqoYxxFffvklrq+vYds2XNeFbdu4u7tDHMd0rRmGYRiGYRiGYZjvBxsBDMMwDMO8GqZpIooiisnZbrcUMeO6LoZhoCl/WZZRVRVF+MzzjLZtYRgGNpsN2rbFPM8wTRN932MYBpr0lyQJu90OWZYB+FN8UJZlMAyDSohFxI1pmtA0jTYUlmWB4ziQZRmn0wlRFGEcRzw8PMBxHCrynaaJjsn3fRLbRYTO1dUVhmHAfr8n80LE9yzLgrquSWy3LAuWZVEpsjAFuq6jrQYA8H0feZ7DdV34vk/mgzjHaZoQhiHatkVZlmR21HUN3/dxe3uLaZromoqiYBFXpGkaxQBJkgTLsrBer2FZFpkwohRZbAaIAuG2bcnAqesaXdchz3NM0wTf99F1Hb7++mt89tln2O/3mOcZruvim2++we3tLb766iv6Xvb7PaqqIsODYRiGYRiGYRiG+f/OCyPgL//yL/HLX/7ytY6FYRiGYZifGLIsI8syfPbZZyjLEo7j0MR9URTwfR9pmkJVVSrQHYYBcRxTl8AwDJBlmQRwWZaRpikZDELIF+8jYojqusZut4MkSXh4eIDneQCeM/I9z0PXdTifzxS5I6bit9stNE2j+Jq6rqkXQJgKlmVhnmc8Pj5ClmXqQViWBX3fU2GwZVkvzI/T6UTlvVEUUVlv13Xouo5KijVNo2geWZax2WwQxzGWZYEkSTifz5BlmYqURUzPer2miKXLy0uKBhqGgQqY8zyHZVmU378sC66vr6kU2TAMOl5hHCiKguPxiPV6jTRN6Vw8z0Nd1yiKAkEQoO97xHFMxzTPMzzPw3fffYe3b99iv9/DMAxst1ucTidYloVvv/0WnudhHEcychiGYRiGYRiGYZjvh/oP//AP9Msvf/lL/OM//uMrHg7DMAzDMD8lRCRP0zSUXx8EAU6nE+I4xul0gu/7UBQFnudhWRbkeY4gCChKpqoqmooPggDzPFM2vsi6X5YFuq5jnmeabJckCaqqYhgGKikW0TxCqFZVFWEYUn6+oiiUb78sC3zfBwA4jgNJknA4HGgq/3Q6YRxHBEGAZVmw2+3QNA1tK4RhiPV6jaZpoCgK9vs9NpsNVqsVmRrTNKHve5RlCcuyIMsyZFmmmJ63b99iHEcYhkHHLIwBsb0gyzKmaSIBfpom6LoOAFReLAR+EUkkrrXnedQJIBDfl9jiyLKMHj+dTtTFIEkSjscj5nnGPM/QdZ3+ttvt8PDwgNvbW5zPZ9i2jW+//Ra+70PXdbqey7IgiiIAwB//+EeKF2IYhmEYhmEYhmG+H+rf//3fv/YxMAzDMAzzE0XXdYRhiKIoqGx2miasVisS3YHnCB9RGCyEeEVRYFkWRfBsNht0XYd5nqEoCm0UiN6AeZ4B/Ml8cBwHuq5jmibsdjsMw4DHx0fYto37+3sEQUAFvaZp0ueJclzgOb5IlmWKq7m6uiJRPMsyitFp2xbDMEDTNFxdXZHILabuT6cTPvnkEzp/YUgMw0AZ/kEQAABs2yYjYBgGDMNAkUnAn4p5JUnCer1+Uag8zzPFFRmGQa8zDIME+LZtqVRZvL/4d57nVEwsyzJ0XUcURSTwC/NGHIuISur7Hk3TUDeD6EZ49+4dLi4ukOc5bNtGlmV4eHjA9fU1JEmizYB5nmlLhGEYhmEYhmEYhvn+yK99AAzDMAzD/HQRxby2bVM5rMiSP51OME2T4m/EVD/wnPEvRHoxlT6OI/I8J8F7u90CAIn+hmFQya54r6qqYJomuq7D4XCA67pYlgVd16HvewAg0d+yLJqCN02TthDatoWqqvQZbdtSLn8URViWhYqCsywjcf3DeByxBSDKeeu6RlVVCIIAm80GmqZhWRYYhvEih1+SJCiKgtVqRcaCOP+npyecz2c6fnFedV1T7JCIF1qWBU9PTy8+v2kaVFVFZss4jthut1iv14jjmEqMAVAZ8vl8hmmayLKMIprEJsBms8Fut8NqtUJVVRiGAY7j4O7uDuv1GmEYwrIs7HY7fPPNNzAMA3d3d9A0DWVZUrExwzAMwzAMwzAM8/1hI4BhGIZhmFdDTKdLkoSyLGEYBk3qh2FIWwFiOl+SpBfGwDiO2O/3UBSF4oS6rqNoGvETAMqypHgfSZIwjiOVA4vSXhFNI4R5AKjrGsuyoCxLihYSpb2u6yLLMti2jWEYMI4jXNeFZVkYx5Gid+Z5RhzH2O12yPMcTdNQZJEQ8IUAX9c1wjCkAuFxHMmUaNuW8vjFuciyjPP5DAAvYoMsy4KiKDgcDgBAmxLifYVh0Pc96rpG27ZkPrRtS9dAbGUI0X9ZFiRJQp0CYtI/z3PqGGiaBu/fv0fbtnh4eAAAJElC5oHjOPA8D03TYL1e4927dzBNE6qqom1bXF9f4927d9B1Hff395BlGWEYYpqmj3dzMgzDMAzDMAzD/IhQ//dPYRiGYRiG+e/BMAwMw0DROyJW5ng8UqGu4zioqgq2bZMJAIAm+z8sAE7TlAqHFUVB13XUCbBardB1HcqyRBAEL0qGj8cjoihCkiRUTCtJEvq+p8Jc8Xxd11HXNU6nEzRNo34BTdOQJAl834eqqpSt/+FGwjRNlN8vyzKKoqDC3dVqRddFfJ7YTIjjGNM0QVVVSJL0YsNBbCOkaYpxHKkA2fd9PD09Ybvd0qaA+ClMhyzL4HkeoiiCaZpUgiz6FIRRIop6VVUls2EYBuz3e7qGYrtAfMayLLSpIMyQ9+/fw3EcMg5ELJDneXh6eoLv+3Bdl67HNE20vfD111+jruvXulUZhmEYhmEYhmF+0LARwDAMwzDMqyHLMrbbLeq6RhzHVODrOA6maYKmadA0DY7jkAGgqiqJ50JQFzn2fd+/EOujKKLyX7EhEIYh+r6Hrus0tb8sCw6HA4IgwPl8prLaoihwc3ODYRhoCt40TRK7xbFmWYZ5nhGGIQzDQJZlFOMTBAEURUGSJPRZ4m+iT8C2bZp2H8eRInpWqxVc1yWBHQBFC1mWhaqq6Np8eP5i4n+z2dCxChNkHEc0TYPVavUikkgYHSLCqK5rEvzFBoI4VkVRsCwLdSSkaYogCFCWJZqmQRiGqOuaopJEsbN4/2maqMw4DEMyg4qigKqqME0TnudBkiQqJw7DEL/73e9e4S5lGIZhGIZhGIb54cNGAMMwDMMwr4Yo6H379i32+z00TaPoGOA53/98PlPprRC5FUWB53mo65qm0ZdlQRRFkGUZ4zhCVVU0TQPDMJCmKU2Wi78rikLbAqK8dp5nWJYF13WRpinCMKQ4IvHcb7/9FnEckwB/Pp8pgkhV1RfFx7Zto65rEuM9z4OqqjifzxQ9tN1uUVUVAKDrOozjiDiOYVkWvW4YBtR1jbIs4Xke1us1ZfzLsoxpmtD3Pdq2hed5LyKH+r5HWZao6xqu68K2bZimiWEYkKYpfN+n7QARz9P3PRkuvu/D8zwyW0QHQpIkiOMY5/OZtjEkSYKmaTidTlitVhjHEYqi4Hw+4+bmhjY22rYFADIlxPaC4zjoug5pmiKOYypcPp1O9HeGYRiGYRiGYRjm+8NGAMMwDMMwr4aI1BECtqZp6Psefd9Tjr+YWheishCp8zxHHMcoy5JigIQALzYApmnC09MT1us1iqIgQXu1WiHPc1iWBVVVEYYhyrKkQuDz+QxZlmlyv21bBEEA27YhSRLyPMft7S2qqkLTNIiiCJqmAXjO8ZdlGY7joCxLivlxXZdeO00ThmGA7/voug6qquL+/h6+78MwDHRdB+BZKBfFwb7v4+3bt3R+whw4HA7wPA+r1QqO41D2f9d1qKoKZVnCdV3c3NyQqdH3PZqmQVmWL7Yj5nmG67ovhP++73E6neB5HpIkoW4CTdOQ5/kLMwQAbV2I8xQRQ0mSwLIsMmyapkGapnS9RFG0ruvQNA0PDw/wfR93d3ewLAuGYZBhwjAMwzAMwzAMw3w/2AhgGIZhGObVUFUVtm3Dtm2KhZEkiSb/LcuivHkRKyMiacTkehAEGMcRwJ86BwCQGC7MgCAIcDqdyGAQ0+cAkGUZoihClmUk2HueB13Xkec5xeEI80Ecp2ma6PseXdehrmukaYqu66DrOrquIxNBmBLCGGiaBtvtFsBzia4wE0zTpKidruvo/W9vbwE8b1CIjP+iKOC6Lq6vr+kxYTqI7QHbtnF7e0tRQaKUOM9z6LpORoxt2wiCgLYKxIZEWZZQVRWGYeDp6YnOBwCVN4vrLgwIAHSe4nsQ36W4lqfTCb7vwzRNVFWFoijgOA4syyJjII5jfPnll7i4uKANBDYCGIZhGIZhGIZh/v/BRgDDMAzDMK+GELqrqoLruhS/o+s6qqrCsiwYx5GEZlmW4Xkeic5lWULTNJzPZ8RxjK7rqJS36zoSx4VADjzHDdV1TWZAlmUIw5Bia0Rcjq7rKIoCtm3jcDiQUH8+n+G6LhRFQdM0JF6Laf8gCOB5Hk6nE5qmged5cF2Xcv+7rsNut6P8/bqu4fs+lmVBURRQFIWOUdM0XF5ekgEyjiOKooBhGBR79F9jdoqiQBAEuLq6oteM40gCv6qqZFK8efOGrtXpdIJt23RND4cD9S9IkgTLsgAAmqZBURQAoDx/8bs4P3Fcfd9TD0OaplTqPE0THh8fsSwL1us10jSlrQ5ZlpHnOZkl8zyjaRouCmYYhmEYhmEYhvk/gI0AhmEYhmFeja7rYNs2uq6jqXEA0HUd0zRBkiSK41mWBbqu43g8UvTMarXCMAxYr9cYhgF5niMMQ+i6TvFAohNAGAlVVUFRFARBgP1+T+8ZRRGZAZZlUVSQqqqI4xhJkgAAXNel3gFxXACo3DgIAgCgmKFpmnA8HtF1HbquQxiGkGUZZVkCeDZDhNBeVRXF9gRB8CKv/+HhgToU+r7Hzc0NlmWhvgFhHPi+T2K/iAEqy5LMA9d1yQAQMUxVVUHXdRwOB/pedF2HLMswDAOmacIwDCoAFnFAwnRZloUKg0Vfg6Io9P18+PniO5EkCV3XIUkS2LaNPM8p/qcsS1xfX6PveypaFsfGMAzDMAzDMAzDfH/YCGAYhmEY5tVo2xZpmuLm5gaqqkLXdZzPZziOg77vATwL5UJsFt0By7Jgu92i73sURYEwDGEYBmXPy7IMXdexWq0o7keI4p7nIQgC3N/fUz6/pml4enqiMl0xdS86C5IkgaqqFEl0Op2olNjzPOR5TiaEJEkkvodhiLZtoSgKGRqiRFf0BARBgGEYUBQFbQJ4ngfLsigmqGkays7v+566EUTsjpj0d10Xuq7DMAycz2e0bQvDMOh63t7eUmSSMBzyPIemaTSNL6KAfN+HoigYxxF931Pef9/3SNMUuq6jrmsqAwZA34UwQIqioGueJAmurq5ow0AUOotYJcuyMI4jXT9xjKfTCVmW4csvv3zNW5VhGIZhGIZhGOYHDRsBDMMwDMO8GpqmUZFsEASYpgm+76NtW0RRBFVVKW9eiOdxHFNW/oe/L8uCaZpeZP+LclvbtklctiwLkiTRxDoAKrCd5xlJklD+f13XNPnuui6qqoIsy1AUBZ7nQZIk7Pd7yLKM1WqFi4sLiiTyfR8AoCgK6rrGMAzwPA/Ac25+VVVYr9fQNA3LsqAsS+oKsG0bWZZBkiS0bQtN05BlGXzfh+M4qOuajIQPTQBFUaDrOrIswzAMNIUvynjbtn0RISREfxHtY5omdS6IvgCxCaCqKrIsgyzLZIIsy/IiNkj0H4jvRnQTnE4nuK6Lp6cnKmqWZRkAqPuhrmsyfcZxRFVV1Avw7t27j3pfMgzDMAzDMAzD/NhgI4BhGIZhmFdDVVWaArdtmybvRU6+53koyxKKomCz2ZCw3HUdTccLs0BsBQzDAMuyKKpGRAOpqgrLsqBpGuq6pr6Btm2poFaWZZpyB54F+3EcqYfANE2oqgrHcaBpGvI8p4Jf0zTJSFBVFZ7nUd694zh0PsfjEaZpUi9AVVWQJAllWSKKIizLQoK7OG7RI+A4Dgnrtm2jqqoXJoBpmlQ0bFkWiqLAbrejKJ7Hx0cyB0TWv2VZCIKAjAZhAnxoFojvSdd1AKCSYAAUjSS+G/G7KBMWz62qCuM4IkkStG2LpmmwWq1evOY///M/EccxDMNAkiQYhgFJkuA3v/nNR7snGYZhGIZhGIZhfoywEcAwDMMwzKshImSEEP7hFH2apjShLoRoMUkfRRGmacI0TQCA9XpNsUFd10GSJOoACMOQBHwxnS8KfpMkITFabACM44gwDHE4HGDbNoIgoEgcYUCUZQlVff7fKMdxoCgKqqrCPM9YrVZQFAWn04m6CuZ5pvz8D80K8e9hGGCaJpZlgeM4VJarqipkWUYcx1AUBfM8k5FQFAU0TYOqqliWBZqmIUkS9H1PZcs3NzdkRojjH4aB4oN838c0TVR2LLYA8jynImBZlrEsC/3UdZ0EftFXYBgGuq5D27a0qSBKmj9ERA+J/8R3KP4TBc1i8+Lx8RH/9E//9HFvSoZhGIZhGIZhmB8h8msfAMMwDMMwP136vkcURdB1HafTCYZhoG1bZFmGOI6RZRll0otiWREhVNc1xdiIWKBlWeB5Hgnem80GeZ6jKArKpDcMA7qu0waB6CQQwvrt7S1F7MzzDNd1oWkahmHAOI4Ui3N3d4dhGHA+n9E0DZZlgWmalLdv2zYZFWEYUrzPOI7YbrfQNI3KgauqwtXVFW0eCBNAZPQDQJZlOJ1OME2TtiSAP21VHI9H6hIoigJxHKMoChRFgWEYqCRZbDQIM6RpGpRlSYW84j1EBJLYNBAxSuLzAdA2xzAM9J36vg/P8zAMAzRNo+9FIN4XAL766iv0fQ/TNKlIWBgjp9MJ//qv//rffxMyDMMwDMMwDMP8BOCNAIZhGIZhXg3TNHE6nfDpp58iDEMkSQLXddH3PTRNg+M4ME2Tni8m6Ou6RhRFJBqrqoq+72lq3vM8yqoXE/pt20LXdZimib7vkWUZwjAEACoHNgwDfd/jcDhQfE9ZltB1HdfX16jrmsTqKIoAPAvxDw8PiOMYkiShqiraRJjnGb7vQ1VVKtTd7Xb0eV3Xoes6RFGE0+kEz/OQpinl9ouyY0VRaNJfmACSJEGWZYRhiL7vcT6f4fs+ZFmG4ziQZZk2GUT8kGmaFD+UJAnGcURd13RdRBGz+G48zyOxX8QntW0LAHQtRARTmqbUQ2CaJpX/KoqCYRgoGklsDnRdB8/zME0TGRCO46BpGgzDgH/7t39DVVUf83ZkGIZhGIZhGIb50cIbAQzDMAzDvBqe5yEMQ9zf3yMMQyzLgru7OzIIHMehrYAkSZAkCdI0he/7GIYBbdtSEbDrupimiaJ4+r6n/Pl5npHnOUXQCNE8TVNYlkVT713XUXxPFEXI8xy2bcOyLHRdB8MwkOc5JEmCpmlwXReSJCGKIjIspmlC13WoqgqGYVAZcl3XmOeZhPf9fo+iKOB5Hm0S3N/fwzAMAKDOBFmWsd/vYRgGiqKgLgNR0jtNE3UlyLIMXdepT6FtWxRFQeXIURShaZoX0/ciNknE/ZimiTiOaWOgqio6nyzLkKYpxnFE13U0zS9KhYXIfzqdcDqd0LYtfv/736Pve+osEBsPoptBvJf4KcsyHh4ecDweX/PWZBiGYRiGYRiG+VHBGwEMwzAMw7waQiC2LAv39/fY7XbQNI3KdxVFgW3bFOezLAuV2E7ThDAMaZJcxN+Ikt4wDGkSflkWbDYb1HUNANhsNnh6esJqtaJJdhGJczqdEMcxTbqLEl0RI+R5HkXsJEkCXdcpN19k8du2jYuLCwzDgOPxiGEYKPaobVssy4K6rqFpGsZxxPl8hizLZALIskzdCfv9Ho7jvOglAJ57FMIwpHgfYQSIOCGxCSDifS4uLvD09ESiuzAfJEnCNE1QFAWe5wHAC4FelCeL4/uwAHieZyp2Fp0Ay7JgHEfa3NB1nXoIRKSQJEm4ublBlmXYbrfwfR/ffvstttstHh4e8O7du495GzIMwzAMwzAMw/zoYSOAYRiGYZhXZRgGErrFFP/j4yN2ux3O5zOCIECSJDRRvl6vMU0TxnEk4X8YBiRJQlP8IiJHGA0AsCwLXNdFURRYlgXb7ZY2A8Q0+/l8RhzHOB6PWK/XZAaUZQlN0xBFEWRZRlEUVGQsyzLKssQ8zxTXc3t7S9E5TdPA930EQYC6rvHw8EAFu5Ikoa5rEs9FL4GIOOq6DrquI89zaJqGZVmgKAocx0EURZjnmUR2wzCwLAv6vqfSY7E5YJompmmCbduoqoo6AIBnMV+WZdpuKMsSXdehaRoqI5ZlGVmW0XUUP33fp2MQJcGiAFoUBkdRRJFLwzBgtVohCAIqLf7tb3+Lt2/f4vr6GpqmvehFYBiGYRiGYRiGYf48qJ9//jn9UpYlsiyj7FeGYRiGYZj/TmzbpngfIbiLrP15nikr37ZtaJqGMAyRpilkWabsfZGdH0UR0jTFarVCWZaQJAlBEFBEj3hPz/NIpBcbAZIkQdd1BEEAVVURhiH2+z3W6zWZALZt09aA67o4Ho/wfR8A6DhFFNE0TdA0DY+PjwjDEJIkYRxHaJoGwzDoOFarFeZ5xm9+8xs4jgPgT2W6wmBwHAdd1wF43gJwXReWZVFB8X6/p3gjy7Jogl8U8gJAnudomubFe4vIpGVZ4DgOJElCURTo+x5t29JmhjAAxPUDnguBl2XBMAxkWIhrKt5ztVpRXFPf95AkiTY7BJqm4erqCgDIHBGfyzAMwzAMwzAMw/z5UL/55hv6xbIsBEHARgDDMAzDMB8FSZIo3sc0TczzjDRN0XUdhmFAEAQ4nU6IogiHwwGbzQZBEGAcR8q+FwK9ZVkIw5BKgIXw/aGQLgTwYRgo0kaYC57nQdd1Ohbxd8MwYJomxe+ILQURHyQ6CUSszjiO9N+nn35KwniSJBTLE8cxmRjzPOP6+prOwfM8zPOM0+kETdOQZRlF+FiWRaW+YnjDsiyoqgrbtiFJEi4uLvD+/Xu0bQtFUaDrOrbbLb777jvaOgBeTvY3TYO6rql3QVVVMgrE8QvxX2wdiMfEuQqzRRQrl2VJkUMiOkmULwvjAQCdWxRFuLu7g6ZpePv2LXcEMAzDMAzDMAzD/Bl5EQ3UNA2urq7w9PT0WsfDMAzDMMxPjKIo8PbtW/R9jyiKcD6fsdls8Pj4SPE8Ymr9fD6TeSCE9PP5jNVqRcW2ohvAcRz6KcwBkW8fBAEZBQAQRRHatiWxve97pGkK0zQpTkd0BERRBF3XaVPAsiyK4DmdThjHEbvd7sXEPfA8RT9NE66urkhEPxwOKMsSvu/j6uqKInXGcYRlWSS6C4FdxAOdz2cqLxYbE8uywDAMMi/EpsRms0FVVViv1zgcDgCexX9ROmyaJnzfx36/pwgicdyyLMPzPDoXgXhsWRaoqoogCHB1dUVxP33fo+s6XF9f43Q60evatoXneWQcCHPl8fERNzc3iOMYmqbh4uLiI96BDMMwDMMwDMMwP364I4BhGIZhmFdDiMlJklBkjygP9jwPX3/9Nd6+fYvf//73iKIIQRBgnmfkef5CAFcU5UWUjed5KIoCruuibVus12ucTieEYUjivxDoRZlw3/e0JZDnOaIogqqqqOsatm1TFJCqqsjzHLIsk0kgyoNXq9X/63mKSfp5nvHw8IDdbkfRRBcXFzRVPwwDmqZBWZYwTRN1XcM0TViWRdP0x+ORug+EmD9NExzHQZ7nmKYJRVFQmXBRFACAp6cnGIYBVVXJWIiiiDoGbNuGqqrUCSD6Dj6M6RFT/OJ8xL/neaYNgCzL4LouPM+jMuBpmlCWJfI8p1ijcRzx+eefI89zdF0H0zTpcz+MD2IYhmEYhmEYhmH+z2EjgGEYhmGYV0OUyfq+T+W/6/Ua5/MZ0zTBNE08PDxQIfDj4yOurq7gui66rkNRFHAcB23bIssyrNdrtG2LNE3h+z4J6X3fY7PZIMsyOI5D0TcCWZZJIAeA9XqNuq4BAK7rIs9zrFYrMgEkSaKCY13XkaYpLi8vaXpfIP4tpunnecbl5eULEV0UFotsfsuyMAwDDOq2dgAAIABJREFUZFnGp59+CkVR6DEx9b8sCwn1juNgHEdkWYZhGGAYxovy33meaVJfCP0i+qgsSyr4FdsTH4r/H5oBolRYfLbYahCPVVWFaZqwLAtM0wTwnPv/zTff4Oc//zlFIFVVhaZpoCgK7u/vcXl5CQCoqgpXV1f46quvUJblf+dtxzAMwzAMwzAM85NDBYCf/exn+PWvf/3ax8IwDMMwzE8MIXJ3XYdlWSjb37ZtyuWv6xpJkgAALi4ucDweoWkaPM+jQl3bthFFEfI8h+/7mOcZpmlSfM44jijLkqbUPc+D53moqopMACF+C9FeHMc4jvRcVVXhui4URaHJ9+PxSEbF8XgkIV6I6uM4AgBtLogJfuBZKBfiuSzLWK/X6PseFxcXUBSFrkvf9yjLEqqqomkaigRyXReGYWCaJjI9kiSBbdtkeIzjSL0Htm1jtVphmiZM0wRVVbHf76kAWVVV+l5EFJFlWZAkCcfjEZvNBsfjkTYsgiCAJEmYpgld16FtWzIWrq6u6Lt9fHyE53m4v7/HbrdDHMdwXRdJkuB4PCKOY2RZht/97ncYhgHv3r372LciwzAMwzAMwzDMjxr1F3/7i9c+BoZhGIZhfqKIyfVhGDBNE8ZxRNd1JCqLqX7RYZSmKU3zfyj667qOh4cHOI6DNE0RRRFNuPd9jyRJEMcxkiShDgEAtE0ghHuRWS8EfBEbNM8zHMeBoiio6xqqqsL3faRp+mJTwDRNeJ4HRVFQFAVN42+3W5qYf3x8RBzHmKYJkiRht9uh6zqKxRFGRJIktBFhGAaV8b558wa6rtMxJkmCaZqg6zrFBW23W/R9T1sCq9UKq9WKIn2maULTNKiqCpvNhnL+hcEhTISmaWgrwDRNHI9H2LaN8/kMy7KQ5znCMMT5fEYYhgCezRqx0SD6Aq6vr1EUBbbbLXVRLcuCuq6R5znatqVC56enJy4KZhiGYRiGYRiG+TPD0UAMwzAMw7wa4zji3bt3uLm5QVmWuLm5QdM00DSNBHlRmnt9fU3T66ZpUi69MBI0TYPruijLEkVRwPd92ggIggDH4xGr1epFdwAAMg4AoOs6KhTe7XYvImqESC96AURpcVEUkCSJMvaPxyOZBrZt08R8lmWQJAnr9ZpidRRFoevQdR0sy8I0TVAUBXEcU+GuiC0qioL+LXoVPM9DWZbYbDYvugjE1sCbN28wzzNtHogpfV3X8dlnn2EYBiRJQlP/wshQFIUijJ6enqCqKjRNoy2FsiwhSRIeHh5ow2CeZ2RZhqZpaPPANE0yCtI0hWVZeP/+PW1Z/OEPf8Ann3yCx8dHNE2Db7/9lkqNGYZhGIZhGIZhmD8PbAQwDMMwDPNqaJoG4FmAj+MYy7LQRoDjOJAkifLmAWC73SLPcxRFgSiK0DQNCehCaA7DEHVdo6oq2LZNET9BECBNU5imiaqqYFkWTfSfz2e4rgvXdVEUBWzbBgAqJfZ9n7LwRT9A0zQAQFFBkiShbVtEUYTD4UBGgnidmOjv+x4AqKRXTM2HYUjbDZIk4enpCXEc43A44OrqijYcJElCmqYIgoC2I9brNUUQCSND9C+ITQvxUzzXMAzM84xlWaDrOsZxhCzL6Pseuq5jv9+TobHb7egcT6cTVqsVuq6DpmkYhgFVVUHTNMzzjDiO8fT0ROcqpv67rsMwDBTlJGKELMtCVVXIsgx5nuOrr776mLcgwzAMwzAMwzDMTwL5tQ+AYRiGYZifLn3f4+rqispqh2GA67oIggC6rqNtWxyPR9R1jcPhAF3XEccxfN9HlmXQdR3LstDzgiBAnudwHAemaSJNUxiGga7rkKYpTenruk7iv6Io8DyPDAYRR5SmKdbrNeI4pq4CMckvJuJFpJGYhk/TlDYM+r6HpmlkPojp/aZpkKYp9vs9PM9D3/cvNgP6vscwDCS8C6PAsixomoa2bSnPvyxLaJqG9+/fI01T2oYQrxWxPqqqIo5jbDYbMgTO5zMJ9cIACcOQtgpM04SqqlAUBefzGYqiIE1TOicRT3Q6neB5HlRVhW3bFGmU5zl1DohtDrEF8fDwgGEY6Dy7rvt/FDgzDMMwDMMwDMMwfz54I4BhGIZhmFdjmia8e/cOn3zyCRzHoW4AUbhrWRZN5wvBOM9zzPNMBcB932O73VJm/4eFwx8W4IroIBE9JDLsm6ZBURQwTRO+7yPPc+i6Dl3XSWz3fR+KoqBpGszzDE3TEMcxjscjXNfFOI70N0VRcDqdoCgKxnHE9fU16roG8KfCYFVV8fbtWxwOBzIqRGxPFEUvJvdF7v/pdEIURSiKgkwSkdsvOgccx8GyLEjTlEwAsWkgSRIA0KS+iDUKggB931NRMfBsSLRtS0XDRVG82G4AQKXBwnARZoiIIRqGgcyb/X5P7ztNE52PZVnwPA+maULXdXie91HvP4ZhGIZhGIZhmJ8K6v/6l/+Fn/3sZ699HAzDMAzD/AQRIr0Q/0UmvxDy67qG4zio6xqbzQbzPCMIAsqhb9uWhHff96GqKkXoiJ9iG0CI6JZl0bS7pmlYlgVN0yAIApqw13WdomzElHsQBLAsi3L0ZVnGer3G8XjExcUFqqqiqXZRnCv6AUzTxOl0wjiOuLi4AACK7nl4eKBug81mQ2L54+Mj1us1gOdSZd/3kSQJoijCMAxwHAfjOML3fTJEhGmh6zrFCLVtS6XIhmEAAMUUifMfx5Gm/ZMkga7rtKUgSRJ834dpmtA0DeM4kskhjAER55QkCRkBIrqormsyFYSpI8sykiTBmzdv8O7dO0RRBF3X8cUXX+Cv//qv8R//8R8f7yZkGIZhGIZhGIb5CaACwK9//evXPg6GYRiGYX6CDMNA0Tu+76NtW1xeXmJZFkiSREXBpmmi6zoS7IVwvCwL6rrG6XQCADiOA1VVURQFPM9DXdcwTfNFUa1pmjRNL8qEhUmgKAo9rmkazuczNE1DGIYURRRFERRFQVEUWJYFq9WKSoSFEO66Lp2jiOgRhcRicr7ve8rjj6KIpulFme92u6XpeUmSoCgKXNelbQkRpSQeF+aDaZoYhoGifnRdp4Lkpmmoc2GaJjRNg2makKYpbNuGoii4ubmh7Qvx3lVVUQSS2EA4Ho8Iw5AilsZxxDzPuLq6og0BsQGh6zoMw8Dl5SXev3+P7XYLRVEgyzJWqxUuLi5wPB5xPB7xd3/3d2wEMAzDMAzDMAzD/JlRAPxfH/5htVohSZJXOhyGYRiGYX5KfP7557i5uUEYhpimiQR0IYqP40iCuxD5h2Ggkl9RwquqKmX3A88xQH3fwzAMGIaB/X4P27Zpu+BDE2AYBmRZBlmWYds2+r6Hqqq0ReA4DsqypCl7Md0uYovKsiSx3zRNOI7zYjJeTM0vy4JpmsjQCMOQ3kPk8i/LgqqqsFqtsCwLGQNlWcI0TczzjLIsqS9A0zT6u2VZkCQJdV3TNRHFx0KQr+say7IgyzJM0wTLspDnOcIwhG3bL4yHtm0BgM5lHEcMw4A8z5EkCfq+R5qmqOua/t9R5P5vt1sMwwAAZOrYto27uzv8/Oc/h6ZpmKYJFxcXME0TZVni+voaURTh8fER0zTh/fv3r3BHMgzDMAzDMAzD/Hj427/9W/zLv/wLAO4IYBiGYRjmFRFissjB7/uehOu+76k0WAjkotx3tVohyzIEQUDC+X6/h6IoSJKEhGjXdTEMA4n4WZbB8zx6L/H5mqbR303TxPl8RhRFOJ/PJNaLol5d13E8Hmma3fd9HI9HyrmXJAlxHAMACfPzPFOxcBzHJPILg0D8DoB+H8eROhPiOKbHNU2jyKGmaSgOScTuiILfYRjouuV5TudxOp2oy0CI9qL7oO97jOOILMvI3ABA0/3LspBhIIwO8bPve7qm9/f3+PTTT1HXNcZxpOsqyzLu7+9xfX2N9XqNpmnI5AEAXddhmiY+//xz/PM///PHvh0ZhmEYhmEYhmF+tLARwDAMwzDMqyHiblzXhaZpqKoKAEjcF3n5RVEgjmOaOM+yDOv1GlmWwfd99H2P1WpFU/CyLFM0zn6/h+M4SNOU8vTFa3Rdh+/70DQNRVHQ9P9qtYKmaXAch17rOA4Mw8DhcMBqtYKiKEjTFLquIwxDyLKMLMsAPIv5wrTQdR339/e4urqirQBhdIjzE1P94jHRfxBFEWzbflHAW1UV5nmmx0Ukj3hOXdd4eHiAYRiwLIsy/4VIv91u0XUdlQbP84w8zyFJEs7nM2RZpnMAnk0A4HlLQ2xh/FeDQHz+MAwIggDjOOK7777D7e0tuq7Dzc0NGQmi6Fj0CIjehmVZoKoqdF3HdrvF3/zN3+BXv/rVx74lGYZhGIZhGIZhfpSwEcAwDMMwzKtRVRXatkXbtiQwN02DOI5pIt5xHIRhiHEcqUsgiiIkSYLVakURP2I6/fr6Gvf39wCAw+GArusoSkfk2Q/D8GLCv21buK5LZoCID/J9H+fzmYp6bdsmkyBNU3ieR4W4otNA13XIsow0TUn413Udp9OJsvTFBP1ms4EkSciyjCbnXddFlmXYbrcYxxHAc+ROXdeo6xqu61LXgYjrETn+IkJIURRIkgTHcSiffxgGdF2H4/EITdPIyJBlmYqBxXWSZZkifYQhoCgK4jhGkiRkfAhDo65rWJaFtm2p36DrOpRlSVFOYpNDREAdj0csywJN03A4HLBer+keuLy8xC9+8Qs2AhiGYRiGYRiGYf5MsBHAMAzDMMyrMY4jFEVB0zQwDAN939OEuTABxCR8WZaI4xht25IgL4TzpmlgWRbF4QhjoCgKKsv1PA9t21KprojI6bqOJuJ93yexXVEU9H2PKIqoL0CU9E7TRJFArutC13Xoug4ANG0v+gwcx6Hc/aenJ3Rdh91u9yJiR1EUtG1LcUaO41CUUF3XaJoGnufh9vaWJv+naULbtiiKAoZh4OnpiV5rGAZ0XccwDOj7HsfjEYZhIE1TqKpKk/lBELy4HmIzQ5IkJEmCOI6RZRlWqxX+b/bupDeSM0sT9WvzPLibO50zI9RCQZWoYZH7RC4v6jf09q76L/SiNxf9o3rRiQJqV7VIoARUZkopxUDS6e7mZm7zbHcR+E4GlX2HkNQKZeR5ACEiSAbpdPpCcc73vW9RFIjjGKZp0o0MURwsvj/TNLFarWhBcXl5ibIs6SaGbdu0nAmCAE9PT1itVrRMEa+FaZrw4sWLj/a6ZIwxxhhjjLFPDS8CGGOMMfbRtG37rFTW933ouk6D7nme0fc9TNOkLHzP8ygeaLlcIs9zGuBblkWn7X3ff1bYezweqaRWRAk9Pj7SsL4oCoqnETcCkiSBYRg0yBbRNYZhoG1bhGFIRblBEEBVVcRxjGmasFgsaKGQpim6roOiKLi7uwMAFEWBYRjQNA0cx4EkSfjss88QxzHdUhALgNvbW1qQiJsRVVXBcRwMwwDTNPHixQv6fkXRb57nSNMUhmGgaRoa/It+AFVV6XHpuk5//3A4QFVV7Pd7yLKM3W4HAJBlGXVdQ5ZlPD4+YpomPD4+Pis3Ph6PaJoGZ2dntEARcUxFUWC9XiNNU0RRRLcfNpsN0jRFkiQ4Pz/H69evURQFvvjiC/zHf/zHR3hlMsYYY4wxxtinhRcBjDHGGPto9vs96rrGOI4YhgGWZQF4l0cfhiHF2YjT7bZto21bmKaJIAgopqbrOliWha7rcDqd6CR6EASU+f/27VtaMhyPR7Rti2maqKhWVVVst1tcX19TZ4CmabBtmyJ39vs9Li8vqVBXDNBd14WqqjgejwDexejYto2qqjDP87MT8+LWA/CuLBkAbNum2w9i0SHLMm5ubqg4WPQCFEVBtwxs26abDMMwoOs65HmOLMuoI2C5XCIIAupEEFE9juMgSRJomoYkSSDLMsUEiZ4AESEkSRJ0XUfXdTBNk2KJROyS+BkNw4DNZoO6rqFpGnRdx263QxiGdHtCFDCLJYxt23h8fHy2uFmv17BtG3d3d7wIYIwxxhhjjLEfAS8CGGOMMfbRSJIEWZapBNdxHEzTBODdkkAM+adpgmmalO3fdR2OxyPW6zWGYaBbAVmWIQxDnE4nBEGAqqqw2WwwTRM2mw3atqWhd57nz4bZYikgcvHFTYIkSeD7Pna7Ha6urtD3PUXkiGG8KPCdpgmyLNPAX5IkbLdb3NzcwLIsKuQVnQSSJMG2bfoe27ZF0zS4uLigYbt4e1VVsG2bCpbff3/XdSiKgop3l8sl9SqI4b8Ystd1jaIo0DQN5fwvFgsoigLf96EoCuI4poghRVHoNoUsy9B1nRYDmqYBAFzXpZ/R6XRCGIYYx5G+Z3HbYL/fU+xRlmWwLIsin0TBcdM0iOOYopAYY4wxxhhjjP1wvAhgjDHG2EdT1zVs24bv+zQQF7E1Ysi8Xq9pqD6OI73//Xz/1WqFuq6xWq0of15E/gzDAE3TKM6nbVssFgsAQJqm0DQNjuPg6emJInjevn0LRVFwfn6Otm1xf3+Py8tLOukuBujvn/qfpomG6pIkYb/fY5om3N7eUrfAMAwUB3R2dkaLh/1+T9E919fXyPMcwLuSYNGJIMsyPM971ptQVRVOpxNM04TjOLi5uaHnTsQIZVkGVVXphoOqqgjDEGVZQlEUXF1dYRgGyLJMz79pmrQQeXp6oqgi8dyL3wPvljmi56BtW4zjiLdv38L3fWy3WywWC2RZhrquEYYh0jTFxcUFNE1DFEUoigIPDw+Iogj7/R4A4DgO7u/vKZKIMcYYY4wxxtgPw4sAxhhjjH00TdPQIFvE54i4HRGRIwbSkiSh6zqoqgrDMDAMAwzDoHJaz/MoXqhpGjqVbxgG+r6HrusIw5BOp4vT/GJwr2kaTqcTlRTruo6HhwcavouB9263o+ic9wf2nudBVVUkSUKn7y8uLujk/W63oxsPL168oFsFIh5InH4Xmf+Pj4/wPI8WAK7rou979H3/rCPg5uYGpmk+KxEWz6koEpZlGaqqQpIk6jKwbRu6ruN4PMJxHFqK7HY7qKoKWZZhGAZM04TneRjHkQqR4zhGEARI05TilsTfqaoKURShqirc3NxQD4SiKHQj4M2bN7BtG3mew3EcuuEhegnOzs5wdnYGXdfx2Wef4Y9//ONHeHUyxhhjjDHG2KdD/tgPgDHGGGN/vcSA3/d9uK6LpmkQhiHmeaYcfUmS6Fdx6h4ADb1FXr+IwRGD8PcH4+M4Usa+7/s0HL+4uKDhdhAEFNVTVRUkSaIT6+IGQdd1qKqKYnBELr+4sSC+bt/3uLi4oBP22+2WonpEXr/oJpAkCefn5+j7niJ+9vs9uq6DbdvUlVDXNU6nEw6HAwDg+voavu/T9zYMA6qqQpIkUBSFBvpBEMDzPERRhNVqhXEcsd1u0bYt3r59i67rUJYllTHrug7btrFareD7PoIgwDiOdCNjv99D13UqFBbvE6XPhmFgv99T2bLoGVitVgjDkD626zokSYLj8UixR9M0wfd9nE4nzPMM0zSxXC5/uhckY4wxxhhjjH2i+EYAY4wxxj6asizR9z2KoqCM+nEcqRBYZOqL3y+XS7Rt++xzNE2D1WpFBbt939OfxbBe9AuIfHoRr9O2LQzDgK7rCIIAeZ5TWa343L7vQ5ZlDMOAuq6xWCzQdR3iOMYwDLi4uEDf99jv91BV9dkSQJZlPDw8UB9AGIZ0Mr5tW5ydndFNBkVR8Pj4CNd1AQAvXryg5ULbtiiKAkEQ4PLykh67WEQ8PT3Bsix6nlzXxWKxoAghcXI/yzIa9IseANEPcDweEUURncwXHQmn04kKhcXJ/6ZpMM8ziqIAAOpHEN0K4u+O44j7+3v4vo8kSWAYBn2OIAjQNA3yPEdd17AsixYYlmVhHEeUZQnf93/aFyVjjDHGGGOMfYJ4EcAYY4yxj0rE19R1jb7vKct/nmeK9lFVFYfDAYZhwLIsGjJrmkaFuXmeY7FYwDCMZ5FAdV3TgF6WZbRtC0VRUNc1XNel3P40TbFarSiuRgy7XdeFLMvYbrfQdR2KokDTNOR5juVySTFBkiShbVtcXl5SHND9/T3F++i6jqIoaHgfRRFM00RVVdQbIPoLbNumxyXKgn3fp3Lj9wuERUzSNE0U9yNO27uuS0uWw+EARVFoUQCAFiIA/uy5FKXBkiRBkiRaDohBft/39Lbz83OKaqrrGm3b0k0JcVNjGAaUZUm3IZIkQRAEVAzcNA3KssT5+TnSNIWqqvQcMcYYY4wxxhj7YXgRwBhjjLGPqu977HY7vHz5kgbMi8WCTpk3TQNd12GaJnRdRxzHMAyD4n2WyyWVB4tT+O/HBokhs23baJqGsvZFB0HbtrAsC2EYIo5jrFYrFEUBTdNg2zZUVUVRFLi6ukKappSnv1wuacj/9PQE4F1cT1VVFAfkeR7FDeV5ToN80QcgYoZEifHd3R1kWUae5+i6Dnmew7IsbDYbilE6HA5UCCyG/FdXV/R5RImwYRjYbrdQVRWWZeHq6opuFQzDQM99URRI0xSKotBSQxQBB0EAAJBlmX4vFh6iIFgUJ6uqimmaoKoqhmHAer2mmwy+76MsSwRBgCRJni1EHMdBXdf0c0ySBMvlEk3T4Msvv0RVVT/p65ExxhhjjDHGPkUftAiIogg3Nzf0j8PvGscRb968QRzHP8qDY4wxxtinr65rXFxc0NBbRPnM84y+7+nEuoibsW2blgCicNcwDMrmV1WVbgdYloW2bemEvfj8RVFAlmUajItomsVigSzLqCR3GAbkeU5ROmEY0sl613WhaRqyLEMURbBtG0VRQNd1PD09wfd9zPMMx3Fo8D3PM66vr5EkCWXi13UNALi9vUWe55AkiW4I+L6PxWKBuq7p+RD9BGLArqoq6rqmJUNRFFBVFW3bwnVdBEGAaZqor0CczBfDfwAUKWQYBi0/2raFruuQJAlFUWAcR2RZhjAMqRNhGAbEcYyzszPs93tEUYTD4YAgCHA8HtE0DQ3/Pc+jWxfH45FKoOu6RtM0ePv2La6urmCaJtI0xfX1NXa7Hf38GWOMMcYYY4x9fx+0CIjj+M+G/P9fywHGGGOMsf83dV3T8N2yLIq4EafTRREuAKiqClmWKepGDOPFQsCyLBowi7LgYRigqirdFqiqCkEQ0BKgLEvYto2u65BlGUzTpD+LhYCqqnRjQNM0uK4LwzCQZRmmaYLjOBRx9Pj4SPE+iqJQgbCiKDg/P0fTNPA8jz5OlmVIkoSu66BpGuq6RlVV8DyPug1UVcXj4yPl/Pu+T7cAxnGk2CRR+quqKmzbps8rOgXETQdJkmjIr2kawjDE8XiE7/vo+56y/kURs6qqqKoKaZoiSRK6BTBNEwDg/v4e0zThzZs3CMMQZVnS59jv9/Tz67oOf/zjH3F+fo55nlGWJRaLBZ6entC2Ld68eUM/+2+++YYWF4wxxhhjjDHGfpgPjgb67uB/nmdst1s8PDz86A+OMcYYY5++0+kESZJoaH52doaqqmDbNuZ5poGyYRgAQIPhzWZDWfwiG19VVRyPR1iWRX+36zrouk63ATzPo8z9oijgOA66rkOapjBNE67r0g2BsiwpuiaOY4rZMU0TeZ4DADzPQ5IkcBwH2+0Wi8UCrutShJGIKgrDEOM4UunucrmkrweAhuGu6+Lm5gbTNCFJEhqY27aNsiwRRRHFF/V9j6ZpnvUrmKaJxWIB4N3/p8VxjLquoWkaNE2DLMswDAOu61LPwjiOdAOirmsoikIDf0mS0Pc93YIYhgGyLD/rXXj/xoHruhRRNE0TJEmCLMtI05Sef/EzatsW3377LS4vL6nP4Xg8Yr/fY7FYIEkSNE3zEV6VjDHGGGOMMfZp+V7RQEmS4NWrV/+7HhNjjDHG/ooURQHDMGggfzgcMM8zDev7vqd4Gl3XaSFwOBzgeR4URcE8z5RXL07iH49Hig2a5xlPT09YLBZ0Ol5VVRpa67qOMAxhGAbyPIfneSiKArZto6oqHA4HWibYto00TSHLMg38bdvGdrvFZrOBpmlQFIWWEaqqwvM8qKpKNyuvrq4gSRIAIMsyGpyfn59DURSKRNI0jU7Nt22Lzz77jE7ii1sOoh9BkiT6/uZ5pqG++P7EAkBEBYnhvcj8F4sNcWNAdDLIsky3IkQBsHi7uH1xOBxwcXFBX1uSJERRRLcbfN9HVVWQJImikIqioI6ANE0RhiGapoGmaYiiCI7j4JtvvuFFAGOMMcYYY4z9CD44GggAbm5usFqtAHAvAGOMMcZ+mG+//Ra73Q7L5ZKG32KQXpYlpmmC7/vQdR3jOKJpGqjqu/+FURQFkiTRjQJFUehWgOM40DSN4oQ0TaOv2bYtLRQMw6DhtjiRn+c53QwQA3RN02hoLcsyHMeBoih0E2Cz2UDXdSoAFssJcZJfnKS/vLzE6XSCYRgYhoE+9u7uDgCQ5zmdjnddF4qiwPd9AKDy5LquURQF3R4YhgFpmgJ49/9mIm5J13V4ngfHcaDrOi0IRJ9AnudQFAWapiHPc3ruxcl+8dyIWxkipmgcR0zThCzL0HUdVFXF09MTdSiEYYg0TeG6LizLQpIkCIIA2+0W19fX6LoOcRxDlmXIsoy2bXE4HHB2doaiKOj2xnK5RFmWP+XLkTHGGGOMMcY+SR8cDfTdnoC7uzu8ePECL1684KUAY4wxxj6YOP3/9PSE8/NzpGlKg/2qqrBer6kwNssyLJdLdF2H1WpFQ2rLsqg82PM8XFxcUMSN6BSIoght2wIA1us1mqah0/XDMNDJeEVR4Hke6rqGaZrY7/fQdR1BECDLMroJIE7rz/OMzWYD0zRRVRXl54slgVhmSJJEp/Bt28bT0xO6rkMQBAiCAH3fAwANx6+urjDPM3zfxziO6LoOVVWhqio4joO7u7tnPQiSJCHPc5RlCcuycHFxQR0D4laFKCHO85wWH2Lwv1qtsFgsaAkBgG5PSJJEEU5lyB/hAAAgAElEQVTiFoA4+Q+AbhbsdjtaGkzThOPxiGEY0Pc9RROdTicsFguYpomLiwvEcUxf8+3btwAASZLw+vVreJ7HHQGMMcYYY4wx9iP4XtFAXAzMGGOMsR9LWZaU9f/w8ICzszNM00Qn8MUJdsdxsFwuMQwDsiyjUlqR9R+GIVarFQ29xTBeLAOSJKHYIAAUbQO8O4W/XC7h+z6apoEkSTTEX6/XUFUVp9OJHpOiKCiKAvM8w3Ecii4SfQCu60KWZRRFQXE5nudBkiQkSULD8dvbW3obADRNA8dx4LouLQbEAkDcEHjx4sWzMt+yLFGWJWRZhq7ruLq6otP7wJ8WLfv9HpZlQdd1Gszf3d1BVVUMw4CqqmjhUJYlLVLef3yqqmK1WkFRFBiGQd0N4vP1fU+3LUQkUNM0qKoKTdOg6zqYpomnpycYhkE3LqIowsPDAxUwq6qKy8tLjONIPyPGGGOMMcYYY9/fB0cD8Wl/xhhjjP3YpmmCruvYbDbY7XbYbDaUG5+mKaIowjiOGMcRWZZhsVjgdDohDENUVYXFYkFlteI0OvBuCA68KyS2LAuapiGOY+i6DtM0IUkSnfiXZZkeizjx7nkenfIPwxAAUNc1dF2Hbds0JBd5+qqqQpIkHA4HWJZFj8H3fSiKgjRN6XbA7e0tZFlGVVVQFAXTNOHq6grTNFExcdu2aJoGruvi9vaWPp9YAOR5Dtu2cXFxgaqq4Lou3RIQtx9Et4KqqnTbQZQXt21Lhcp1XWMYBioJFvFLmqZhsVhA13X4vo80TSlWScQt6bpOf1ZVFdM0wbIspGlKtx2CIAAAuo0gHr+maUiSBDc3NxS7dHV1hbdv39JzzRhjjDHGGGPsh/ngaCDGGGOMsR+bOK0uhuKapsGyLBwOBwRBgK7raLgs/ix+dRwHfd9TDI6ID7IsC03T0HJADOlt24ZhGHRLwPM86LoORVFogC9uFACg+CCxKCjLEpIkUYTPYrGg5YV4nxi2O45DNw/yPKdoHcuy6ObC4XCA7/swTZMidlzXpcXGdxcAdV1TbNH7g30RPdS2LYqigGmaVJjseR4Mw6BeAlH8W5YlFEWhU/iSJOHi4oJuNIhS5TiO4TgOgHe9DKLbII5jrFYrxHGM5XKJ/X6PxWJBZcTDMODp6Qmu61K0kOM4uL+/x2effYbj8YjD4UA9C1dXV6jrGv/+7/+O5XKJeZ6fdTswxhhjjDHGGPt+eBHAGGOMsY8uTVMcj0dsNhv0fY80TbFYLCh6JkkSyq/v+55y5sXHmaaJpmlgGAaiKEKWZTBN81nprBisi4F3lmVUKHw4HGAYBlzXha7rdNJf0zQ6/T5NE0UNiWG6JEkYxxGr1YoWFXEcYxxHbDYbGrCLGwoi9kfcRJjnGdfX15jnGbIsI01TdF2HpmlwdXUFy7LoJkTTNCjLErZtY55nuK4L27ZpOSDid8qyhOu6cBzn2fC/73vIsoyyLJFlGRUZu65L/QmiNwAALVDE8qIoClqWiLilcRyx2+3olsNyuYSmaWjblm4m9H1PiwOx/LAsi2Kg5nlGlmXQNA2Pj4/4/PPP8e233+LVq1e4vLzkSErGGGOMMcYY+xH8qB0BXBbMGGOMse/j/v4ebdvi/v4eV1dXAN7l0VuWhePxCNd1kec5ZFmGZVkUDbRcLlEUBaZpgm3bdCMgCAI69d62LUXweJ6Hruugqio0TaNT/bZt000AcStALAkAYLfbQdM0rNdrihESpcDiP3FjYLFY0Ol813URxzENvOM4hqZpGMcRmqZhGAYqPBbDdFmWcXd3h9PpBFmWKbZH3C4QQ36xFBEdASKK6Pb2lvoDxPC/KArkeU4Fxnd3d4jjmCKVgD/dfOj7HvM84+npiZ4jTdNgmiYMw4Bt27QsieMYURQhTVOoqkq9DKKH4PLyEkVRwPM8FEUB13WRZRmqqoJt23h8fMR6vabnEgC++uoreg5M06SbCIwxxhhjjDHGvr8PvhEwTRMN++/u7uC6Lr788ktEUUT/cGeMMcYY+xAPDw9IkgRffPEF0jTF+fk5RdeEYUjDaHEy3rIsLJdL5HkO3/ehaRq6rqPegKIoYFkWRQB1XQff9+l0+jzPsG2bMvJlWaYT/6JQV2Tky7KMi4sLlGVJJbe2bQMARQiJQbY4PS9+D4D6DeZ5xmKxwNPTE9brNX3vh8MB8zzTrQJRBGzbNh4eHuB5HkUAifJksRwQvQC3t7cYhgGmadIwvyxLFEVBMUs3NzeI4xhhGNLnn6YJTdNgnmcURQHDMBAEAY7HI9brNTRNgyRJOJ1OdANCLAEOhwNM06QSZXHaX5z+NwwD+/2efk6u66IsSziOQ7FFl5eXOB6P9Ly9ffsWf/M3f4M4jvHVV19huVxiuVz+9C9IxhhjjDHGGPvEcDQQY4wxxj66sizRNA1l2oti3bIsoWkamqaBbdvIsgxRFNEw3Pd91HVNUT7L5ZIif8RSwDAMKt8VtwKqqqKBdNd1sG2b+gR836fcfkVRaKAvyoPFqf33B//iYwA8WwaIP79/c+D9gb8kSZSpLwbowzBQpM7t7e2zvP2+79E0DbIsg+/7uLu7o9P/wzBgu93CNE0URQHbtnF9fU3RRCJmaRxHdF2Hqqqoa0Fk/Ytli2ma9D2LiKS2bTEMA47HIxaLBZqmQdM09L0Pw0CLgHmeEUURZFnGfr/HMAzY7XbwPI8+n7glsNlscH9/D0VR4LoufQ+bzQZFUeDh4QGO46Asy5/4VckYY4wxxhhjnw5eBDDGGGPsZ0EM57uuQ5IkFGOjaRqdQl8ulxjHEXVdU6GuGBJHUUSlte+XAA/DQH0Cy+USZVnC8zy0bQtN02hhIAbduq4jiiIcj0fYtg1VVaEoCt0YeD/GBsCzRUAcx1AUBWEYPnu7iAp6fyEgPsd3B/1932O1WgEA9QP0fY+u69B1HZbLJdbr9bO/U5YlxQepqoqbmxtaDoilQdM0tEjwPA+WZVF80mazwTAMeHh4QBAEKIqC4pM0TcPxeISmaVBVlZ5T0zRpgeI4DqqqgmmaaNsWYRgiSRKsVisqNxa3KkQh8vF4hGVZ+Prrr3F2dka3B3a7HYZhoAgk8fwxxhhjjDHGGPv+eBHAGGOMsZ+Fx8dHaJqGMAxpgL1arZAkCXRdxzRNME0TZVlScXBVVRiGAY7joG1bRFFERcK6rqNpGozjCMuyEIYh9QeIYmFxur0oCui6TrcR2rbFYrGgEmFRqns8HgGA4noAPDv5L+KExJ/Fr+/HD73/NpHL37Ytuq7Der2maKB5nmk50XUd3STo+x7AuxP4TdOgKAqYpombmxv6XACe5f03TQPf9+n34v1xHMOyLHqOxQl/VVUpcmm/3+Pi4oL6AsTHisWAqqrUXTCOI3Rdpz6C0+kE13WpCNn3fbRti7OzM2RZhr7voaoqkiShEmTxOZMkoS6G1WqFoih+glchY4wxxhhjjH2aPmgREMfxsyLgV69e/T++jzHGGGPsQ7x58wZv3rzB559/TkXBIt9eZN8rigJVVakUOAxDyqc3TRN1XSMIAlRVRcXCwzCgbVsURYHFYoG6rukku1gceJ4HVVVR1zUMw0CapgiCAMvlksp2p2lCGIZQFAVZlkGSJCrGFQPrIAienfYXywBRJPzdmwHifVEU0VJALAuAd8N68T4RAdR1HQ6HA2zbxmKxoJP94u+LJULTNFgsFhiGAXVdP/uc4mZAnucYx5EikUR/gKIotHBwHIduH4gbD+JriFsP4uaCLMvo+56+R/H7ruswDAPevn0Lx3HwzTff4Pr6+tlNgTzPqVsgTVMoigJN05CmKXRd/0leg4wxxhhjjDH2qeIbAYwxxhj7Wfj9738PWZbx8PCAq6srOhUeBAFOpxPW6zXiOIau68iyDMvlEqfTCUEQUIa9aZrUJyCKaUUkkKqqFAtUliVc10Vd1/B9n5YBtm0jSRIsFgvEcUyn6EU8kegOcByHFgFVVWGeZ1oIAH8eZfP+TQAAz24DiPd9d0EAALqu00C9bVsa7pumCV3XMY4jpmmCLMuo6xpd19GNivdvJAB/ukGQZRlM06RbFWEYIk1TSJKENE3pOdA0jZ5vMZQX36Npmuj7HrZtQ5ZlSJKEpmkoNkgUDzdNgyAIqNg4iiJ6vpIkQdu2OJ1O1NMgehIcx8Fut0MQBLAs69lzwhhjjDHGGGPswykA/tv7bxC5uowxxhhjP7W/+7u/w9XVFeq6pvgZwzAAgAbyiqLQqfHFYoGyLOlkvrgpMI4jXNdF27bwPA993yPPc8q/d10XTdPANE06/V9VFQ2hRTxNHMeIoohuHViWhSzL0HUdFEWhKJs0TSnmRpyA/+5QX5zCr+uaOgfESfssy2BZFi0F5nl+dpK/rmuEYUg3I8ZxpHz/tm1hGAamaaLFhfjcYkFwOp0wzzMN1l3XxTRNVHwsCopFabN4nsdxxNXVFcUjOY6DYRigqirFKAHA8Xh8VqSsaRr9XixfJEmCaZr0nImYIFmWkec5xR8dDgcMw4AgCGDbNl69eoWyLPHw8PATvxoZY4wxxhhj7C/bL3/5S3ieh//rv/7XD78R4HkeXr58CQD45ptvsFwusVqtMM8zttst/yONMcYYY99b3/fY7XaIoghpmuLy8pKy5cUtgGmaoKoqoihCnuc0UAbw7IZAXddUXns6nbBarej94vS6KBfO8xyaplFkkCRJOJ1O0DQNVVXBMIxnpbmu6yLPc4rFiaKIMvFFTM5ms0FVVei6jpYDogQ4jmNIkoRhGOh7GseRnoOu62hRkec5NpsNDffF+3RdR1mWVBwsegfEwuF0OsFxHARBAF3XYRgGPTZFUSgaSCxRRPa/7/v0vG02GypHnueZooFEp4EkSYjjGNM0UfSSWCoAoEigJEkQhiE9pnmeaTHTNA3u7u5QFAUeHx8RRRHdbrBtG1dXV/j6668/zguSMcYYY4wxxv7C/Z//+T/j//jv//3DFwG3t7dQVRXb7RYAEAQBDocDAGC1WiHPc+R5/uM+WsYYY4z9VTgej/jFL34B4F12vqIosCyLInmqqoIkSZTRv1gs0DQNFd4uFgukaQrLsmBZFrqug2VZkCQJSZLQkNkwDBiGgcVigSzLqCOg73uKABKn1sWJ/2maEAQBFEWhcmHDMOjmgPi9KMedpglpmmIYBlxfX9Np+nme6fS/ZVlU6DuOI/q+R1mWODs7g2maSJIE5+fnGMeRTv93XYcoijAMA87Ozuj2QFmW2G63cF0XYRhSz4JYIEzThLquURQFLMvC8XiE4zhYLBb0PGuaRosL8bgOhwNFA6mqCk3TkCQJlsslLMuC53nY7/eIogjH4xFRFKEsSxiGgbZtUZYlfN9H0zTo+x5JktD7RAH04+Mj1us1oijCdruFruuwbRtpmqKua44GYowxxhhjjLHvSfN9LL/++sMWAeK026tXrxDHMe7u7gC8+0e7rut04owxxhhj7Pt4fHzE27dv8fnnn8P3fRosW5YFXdfp1HsQBFTiK+J+lsslDocDPM+j0/KO49DJekVR6G1i4H06neikuhj6i9PrItYGAGXy7/d7OI5Dg/OmaTCOI308AFpWDMMAwzBweXmJoiggyzIV/vZ9j/V6jbIsMU0TRQaJxxDHMT1OMUBvmoZO/4tBfZZlcBwHdV3D8zx0XUen8kVEUNM0KIoCXdchz3Poug7HcehxD8MAANQrIKKWxEJFDP9d16Uh/nq9pmikp6cneJ5H/z+YJAl83wcA+pmJGKeu63B5eYnj8QjLsmAYBrbbLQzDQJZliOMYt7e3SJIEcRxjvV7j/v4elmV9nBckY4wxxhhjjH0iflBZsLhinuc5oij6sR4TY4wxxv5KvX79Gr/+9a8patAwDOz3eyyXSzrBLkpn9/s9VqsViqKA4zho25aWAZZl0YDctu1nHQHvlwiLGwRhGCLPcxiGgSAIsNvt4LouJEminoKqquB5HizLgqIoqOuaTscD725Jio9/fHzE5eXls4XA4XDANE04Pz8H8K4c2DAM3N/fw3Vduk0gcv7F6f6iKLBareC6Li0S6rpGWZZ0an61WmEYBmw2G7o9IHL38zyn58MwDCoZ1jQN4ziiKApkWQZd16HrOt2OaJqGSpZF9JKmaRiGAbIs0y2AaZqog6Cua0zTRM/1NE1o2xbjOOLh4QGu6+JwODzrYbi4uMDpdELTNAjDENvtFpvNBnme49WrVzAMA13XfZwXJGOMMcYYY4x9Ij5oESD+ESaG/rZtI0kSeJ6Hq6srjOOIOI5//EfJGGOMsb8KDw8PmKYJYRii73t4nodpmqBpGp3qd10XwzDAtm060d91HWX4LxYLqKpKA39Rxvv+yf+qqujvLRYL5HlOJ/DrukYQBDgej7i4uIAsy5TJL2KCRI7+4XCguKDj8UhZ+jc3N3RjQeT/iyijvu9pQC5Kh13Xpccqhv1iKA/8qTy4qiqUZUnLCMdx4DgO+r5H27YUUSSWBI7jQNd1mKZJ/QAPDw9wHAd5nsO2bWiahru7O3RdR/FI78cXxXEMWZYhyzLSNIUkSbTwyLKMOgZEDJNYyIj+haZp6GchblSkaQrTNFHXNVRVxdnZGZ6engC8WzY8PT3hxYsXePXqFe7v7z/mS5IxxhhjjDHG/qL1WYbjf/pPUAD8t/ffsVwu6WTbd4nCt9VqheVyia7r8Ic//AEXFxewbRsPDw+o6/qnePyMMcYY+0T9wz/8A8UP2rYNRVGQJAlF8iiKgsPhgDAMoes62raFYRg4Ho9U/DuOIy0MxEn3LMvg+z7quoZlWRiGgQbb4qaBiPdJ0xSGYVD8jWEYNGS3LAt5nmOaJjrJX5YlleqKMtxpmnA4HGDbNv2naRrSNKUT+MMwwPd9DMOArutQliXqukYYhrAsixYAdV3jdDpRNI94v+gVyPMcSZJAlmUarJumiWmaMAwD3WBIkgS2bcPzPLRtS4c7FEVB13U4Ho+QJAnb7RbjOKJtW2iaRkXKQRCg73ssFgu4rgvbttG2LXRdp5+Npmk4nU4wDANFUcDzPEiSRJ0PAJ6VQVdVhd1uR5FG4jZCHMd06+LVq1d0S4QxxhhjjDHG2P8/v/zlL/E//uf/RPBf/suHRwPFcfxnp/5fvXqFV69e/WgPkDHGGGN/vXa7HaZpwjzP2O/3OD8/h2maUFWVImdEXr0sy7AsC1VVPYv5kWUZbdtSjv/pdEIURTSY7vueyoINw6DT8afTCeM4QlVVBEGA0+kEAHBdF1EU0SDedV0arkuSBOBdubHoLhCHKkQef1VVAP6Uwy/LMlzXheu6eHp6wjzPaNsWvu9js9mgaRrM80zLARFL9PLlS1oytG2Lqqoo0mixWGC5XNL7x3GkCCHHceB5HsZxpF83mw0tIOZ5RpZl0DQNh8MBhmFAlmW6XSEKhFVVhW3b9DXiOKbnYRxHAHjWTTDPM4qigO/7FFnkOA6OxyOCIECWZdhsNnAcB4+Pj1BVlYqTxSLDNE3uCGCMMcYYY4yx7+k3v/kNfvOb3/ywjgDGGGOMsR/bV199hV//+tdwXRdZluHp6YmG8GKIL4bDIme/6zqK9BFltaZpoqoqWJaF5XJJS4C2bak/QNwM8DwPSZJQKbEsyzQYF7cSyrJEEAS0kDBNE7Is43g84vz8HJIkURSQuEkgBuCKolCpruu69H2IiKOu6+hzzPOMJEkwTRMcx4EkSXj58iWAd70CXdehqiqkaQpd1xEEAZbLJYA/DeHLskRRFPTYRb+Aoig0pBdDfNGxoCgKPM/DcrlE3/d0S0AsJGRZpvgesfwQHQgiUkhVVfo64zhC13UqCRYlx/v9nn5Otm3j9evXME0Tm80G+/2eOgjevHmDMAyhaRotUhhjjDHGGGOMfT+8CGCMMcbYz8pvf/tbfPnll/jFL36BMAxRliVUVf2zWwFJktCQWdd1SJKEw+GAxWKBsixpyP5+R0BVVXBd99kSQMT0eJ4HRVFQVRXmeabT7yLH3rZtxHGMIAhgWRaSJIEkSXRiXuToT9ME3/chSRLSNKUB+NXVFQAgSRKoqoq6rlHXNRaLBcIwxDiOFAUklhsifkdE/IhiX03T4Ps+3QAQp+jbtkVZlijLEq7rIggC6hwQC4SmaWj4r+s6bm9v6bR/3/fY7XbwPA+73Y7KkFVVpeG/iP8R37voVgBA/QHv35J4fHzEarXCOI64ublBlmWwbZtimMRNga7rsFqtqJPg5cuXOB6PeHp6gqry/7IyxhhjjDHG2A/xQf+qiqIINzc3lO/6XeM44s2bN1wYzBhjjLEfJMsyZFlGefv7/R5RFOF0OsF1XXRdB8/zkOc5Re30fY8oipDnOTRNg2VZ6PseSZJgtVrRAqHv+2cdAYZh0MBflmXqBhAlwWIYLk7ei7ggUf4rFhDTNAEAzs/PqShYDNfFY5YkCaZp0on7u7s7KuEVJ/VFjI+I9+n7HlVVoaoqSJIEz/OwWCxowSAG6d+NEPru+0TJMADc3d1RvBAAei5kWUae5/R9S5KEKIrg+z5kWcbpdILjOLQYEM/ZPM+I45hubojOqb7voWkaHh8fsVgsEMcxHMehRQQAulHx+PiIsizpc4jfi5JjxhhjjDHGGGPf3wctAr7bD3B3dwfXdfHll1/+6A+MMcYYY3+90jRFnueo6xpffPEFiqKAoigwTROHw4FOnW82GzoJb5omiqKg/P6+75GmKQ2loyjCMAyoqgqqqtKNAlH2axgGRfKEYUi3EA6HA5bLJd1G0HWdMvTFcFzTNPR9D8/zMAwDxnHE4XDA5eUlnY5P0xRN06DrOlxcXAAAndbXdR1d11E3QRzH0HUddV0jyzIYhoGLiwscj0eEYUgRQKIDQJywf/HiBX1ecQOgrmuYpombmxuK63m/hLjrOhRFAcMwIEkSbm9vKRZIPE+SJOF4PFJBMQCcTieKAhKFy6LouG3bZ7c1mqZBXdfwfR9N01AsUFVV8H0fT09POD8/R5IkdOtCRBJxDxVjjDHGGGOM/XB8z5oxxhhjPzu///3v8fd///c0eJYkCXEcIwxDWJYF0zTRdR2yLMM4jvB9n06ZJ0kC13WhaRrl3S8WC/R9jzzPEYYh3Q44Ho9UGJymKTzPg6ZpOB6PUFUVuq5juVxCURS6fSBOtIscfAC0fBADcxGDczqdYBgG5eZrmoaLiwsURUGD7qZpsFgs4LougHdDfNd18ebNG5imicVigSAIkKYpNE3DMAxUFGyaJiRJguu6mOcZfd9TSXBd13AcB9fX17ScGIYBcRzD8zwURQHTNKEoClRVpTx+cXMBAEUktW2Ltm0xzzOAdyXI4uNVVcXpdKIiYE3TKCpIxBoZhkHxQ5qmYZomBEGAqqowjiOapsHDwwPW6zVkWcZ+v4fv+9B1HZ9//jnyPP8Ir0LGGGOMMcYY+3TwIoAxxhhjPztfffUVAEDTNLx69Qqff/45iqKg6J7j8YjNZgMA6PsedV3Dtm06MZ/nORzHobx9cTtgtVrRAFwU2eZ5jnme4boudF2n91uWBUVRUNc1JEmCbduQJAlVVcEwDDw+PlLuv7gVME0T5nlGEAQYhgGO42C322GeZ5yfnwMADcYfHx9pgWBZFqqqelYGvFwuEYYhRQ55noeHhwc0TQPLsmgpISKQxEC9LEt4noebmxu6OTBNE90OsG0blmWhaRoEQYC2bbHZbNB1Hfb7PXUhiOcoCAK6IRAEAcU0iYWB+FgRJTQMA/b7PTabDZ6enhCGIZ6enuD7Pg6HA6qqoiXAixcvsNvtoKoqhmHAbrdDWZa0LEmShH4GjDHGGGOMMca+P/ljPwDGGGOMsf+V/X6PcRxhmibu7+8hyzKd1Be5/Xmew7IsWJaFrutosO95HsXStG2LNE0RRRHdGhA5/SLeJgxDGIZBQ3/XdVGWJdq2fVYULGJ8TqcT7u7uaIBtWRb2+z0Mw8BisaDHlmUZVFXFzc0Nnf7PsgyHwwEXFxfwfZ8ii2RZxuFwAABcXl5SPM40TSiKAofDAX3fA3h30l4sG5qmwel0opLd29tbKh/u+x5lWeJ4PNKNBFEgfHZ2RiXDIl7J933UdY3VaoXlcgld1yHLMiRJouWDYRg4HA4YhgH39/do2xZN06BpGvp6Ir5IxAO9v1jRdR1pmqJtW3z11VcwTROr1QqSJOHi4gKqquLh4YGel9evX8M0zY/5UmSMMcYYY4yxv3h8I4AxxhhjP0u/+93v8Ld/+7c0+BcFv/v9HrquQ1VVGIZBkT2WZeF0OsH3fSrGbdsWtm1D0zTqD2iaBpIkUdSO67qoqgqyLEPXdWiahjRN4boukiTBZrNB27ZUFDxNE2zbRt/3tJwYhoGy9cWyYZomyLIM3/cxDAOV5/q+TwPxqqpomF/XNW5ubgC8iwcSZb5lWdIg/Pb2lm4NjOOIqqrgui5UVcXZ2Rn9XbEE6PueHqtYLIiIoMPhQPn9lmXRgmCz2WAYBkiSRJFGokgYAJqmoV6F9XoNXdehKAqSJIFpmpBlmR6vLMt0U0DEF61WK5RlidPphNvbWxyPR7pFsd1u8fLlSzw+PtItgrqu8ebNm4/wCmSMMcYYY4yxT8cHLQKiKMLNzQ0URXn29l/+8pcA3v3D882bN88KhRljjDHGvo9/+Zd/wa9+9Sus12s0TYOvv/4an3/+OZ3aF1n0kiRBlmWkaYogCJDnOS0DHMd51g0ghvRZliEIAqiqSkW5Iu9eROYcj0fYtk3xQiKmxvd9yLJM/QSqquL8/JxOxMuyjGma4HkeACDPc+oSuLy8xDzP8DwP4zgiSRJaDNzd3UGSJBRFgaZpUFUVLMvCPM+wLAuGYaDrOir5NU2TFgZBEKDrOgBA27ZUzOv7Pp3+F8sBUVi8XC6R5znW6zX1MIzjiDiO4bou8jynJUwYhlQWvNlsqDhZPP8iUmgYBooRmqaJipYBYLvdwvM8nE4nmKZJXyCwSacAACAASURBVMN1XaRpSoXJX3/9Nd1CEHFJopuAMcYYY4wxxtj380GLgDiOecjPGGOMsZ/M4XDA3d0duq6j0l0xmBddAWKQ7Xkejscjlsslnf4X8T/L5ZKG/2maUqlt27ZwHAeHw4FOzIuhdBiGUFUVWZZRHr+iKCjLEvM8U6GuiOhRFAXH4xFnZ2eQZRl5nlOBMPBuCSBuIUzThLquMU0TXNelU/xieG4YBuZ5fhZjJIb44nkAgJubG8rpFzcFPM+DruvwPA9ZltEiYZ5nDMPw7GaApmkUC5SmKVRVxXq9xuFwwHq9hqIoFEckTvnP84w4jqlEWNd1zPNMywkRBSR+HYYBYRhSR4G4kZEkCWzbxm63o+4GsXR4enpC13WwbRtN01BPAmOMMcYYY4yx74c7AhhjjDH2s/W73/2OTrqfTid8/fXXcByHYmpEua+iKFBVFYvFAn3f0xJAZOIfj0f4vk+3BQzDQNu2OJ1OyLIMi8UCuq7TKXfP81DXNcqyhK7rcByHcu3F17Ztm4qJT6cTRRSJU/siS98wDARBQEP/0+mEeZ4hyzJevHiBqqrQdR1OpxNl78uyjJubGxr+i0iiYRgAvFsAiJP2wzBQ/JEYpK/Xa7p5UJYlhmHANE20mBDD//dz/ZfLJcZxxDzPtKxIkgRd1+Hx8ZE+VpIkep5lWYYsy1itVs8inDRNoyWGZVnI8xymacJxHIp0AoAwDOk2xzzP0HUdSZLAcRx4noenpycMwwBV5TRLxhhjjDHGGPsh+F9VjDHGGPvZ+ud//mf80z/9E+XKq6pKp+ffvn2LKIogSRIN7YF3g/EoiqBpGpqmgWmaCMOQ+gNEbE2WZdB1Hb7voygKAO+G++/HBSVJgvPzc7RtiyAIsNvtIMsyzs/PIUkSyrKkyB7LsqDrOg6HAw3Sb29vAQB1XaPve7RtSyf9RWyRaZp4eHiA67qQZRkvX76kE/uKomCaJhrQX19fAwC9TXzOYRhwdnb27GPFf+v1mpYIfd/T462qCqvVCsMwUC+Aruvo+x7TNKFpGrRtCwBYrVbQNI1uBwCgPoB5npEkCd0QEMsQ8T1kWUaFx+JWxW63o0Lmm5sblGVJPwPP8/D27Vus12s4jgNJkpDn+U/2mmOMMcYYY4yxTxEvAhhjjDH2s/bb3/4Wv/rVr+jUf1mWODs7g2VZ6LoOqqpCURQq/RUdAJZlwbKsZx0Bohg3SRKK/qnrGrquwzAMKIpCnyOOY/i+j77vEccxFRSLAb6I8bm+voZt27QYEDE8oiQYeBdx1HUdgiDA9fU16rqmU/nzPENVVeoUGMeRBvVieH59fU1D/mmaKCooDEM6XT8MAy0PANAi4P2Pj6KIbgZsNhuKWlIUBXVdo65rDMOAxWKBsiyxWq1ouD/PMy0C9vs9NE2j76+qKjq1L5YDm80GT09P9Lxrmkb9Dqqq4nA4wHEcWgp4nofHx0cqFC6KApZl4ZtvvkFd1z/pa44xxhhjjDHGPjW8CGCMMcbYz9of/vAHXF5e4osvvkBd1zSQliQJrutCVVV0XQdZlmlwD7w73d91HdI0RRRFKMuSlgWLxYJKccXJ9qZpqBNA13Usl0uoqoo0TWGaJiRJomLfw+EAALi9vaXyW1FoK072F0VBp/Z1Xcfl5SUkScI0TVBVFfv9Hq7rUvRQ3/cYhoGy9C3LerYAEJ9LLADeLwsWSwAREyS+jrgFsFqtni0GxK0BMfw3DAOu69KNiGEYKEJIRAKdTidomgZd16FpGpUmF0VBBcL7/R4XFxdIkgSSJMGyLKRpSj0OooPh4uICpmnSc5FlGZU0F0XxLH5puVzCcZyP8+JjjDHGGGOMsU8EdwQwxhhj7GftX//1X9F1Hd6+fQtZlqHrOtI0hW3bFF0jsvuPxyOdsG+ahgb6WZZRFI3jOOi6Dr7vI8sy5HlOfQJZltGQu67rZ3E1vu+jqipkWUZZ+13XUX9AlmXwPO9Zl4AkSZAkCWEYou971HVNnQI3NzcIwxCO46Cua5xOJ+z3e0iShMvLSyoh7roOZVnS9yaWHV3XoWkaVFWFsiwRBAGiKKLnpa5rhGGI1WoFALRAqesaVVVR4XAURTSgn+cZx+MRbdsiz3OKPhJdCVEUIQxD+tmkaQpFUSgGSJIkbLdbdF2H3W6HqqowTRNev36NsiypKDjPcxRFQc/pixcvsFgskKYpLMuCoijU2SDLMt08YIwxxhhjjDH2/fCNAMYYY4z97N3f38O2bSrh1TQNeZ7DsiwMw4CmaagjAHhXIizenmUZoihClmXUEaBpGo7HI0zTpAJf4N3AX9M0OpWuqiosy0Icx1RSrKoqdF2HJElIkoTeJqKAgiDAfr8HAGw2GwCgGwR1XcP3fRryz/NMp+8lScLt7S2d5Bc5/XVdw3VdKgKO4xiqqtINgvPzczrlL0kSFEWhYf00TQBANwHKssRisYDrurQImKYJbdvSUqUoCrp1IHoB1us1DocD5nnGfr+HZVm0UBCxP4qi0IJELGy6rqP+BPG9A++ihTzPw+vXr2GaJrbbLaIowmq1wps3b2CaJq6urvDw8ID7+3tomva/+yXGGGOMMcYYY580XgQwxhhj7Gfvt7/9La6uruA4Dg3OZVmGpmmI45jy9tfrNZ1QB97l1YtBfxAEtAR4vyOgKIpnMUN1XUNRFBiGAUmS0LYtfN/HdrvF7e0tZFlGWZaY5xmGYcC2bQCAoig4nU6Uwe+6LnUJmKaJrutwe3uLeZ5pidD3PcZxRBRFtNQQBcBVVdFjEgP0aZqwWCyelQGLyB0RpwOAMv1FRJD4T5ZlTNOEeZ7ppsEwDNST4Ps+LMuCpmnUDSCWErIsI8sylGWJqqooIkgsTxRFQZZldNtCdDqIRQnwbkEjfi5RFAEA3RL49ttvMY4jbm5usN1u8dVXX8EwDCwWC74RwBhjjDHGGGM/EC8CGGOMMfaz9/XXX6Pve+x2O9zc3EDXdTrFnmUZxnHEcrmkIbz4VQzKRQmwaZpIkgTL5RJJkkDXdSiKQkW9dV0jyzKcn59TAe5+v4eu67i7u0NZltA0DbIsU29AnudQVZVO3wdBQCf8D4cD6rrG5eUlFRcDoILecRxh2zad7u/7nuKLNE1DEAQAQB0B4nsSQ35xql8U9IrvXfxZ3AQQv0qSRIuGtm0RBAE0TYNt22iahj5uu91C13U6jS8WI4Zh4ObmhkqRNU1D13WQJAnH4xG6rtPNDdFbIJ7Xvu9RFAU8z4PruiiKgm46xHEM27ZxOp2w3W5xc3ODh4cHnE4nKoNmjDHGGGOMMfb98SKAMcYYY38Rttst5d0rioLtdgsAsCyLTvYDz0+d7/d7rFYrGsCLE+aHw4GKghVFodPveZ7TcNswDMRxDMMwKIrHMAw69V9VFTzPg2maOBwOuLi4oIH4+0W4t7e3iOOYyn3FkFx8L9M0IY5j1HUNz/Og6zrCMKQlhugJ6PueSnNFfNAwDFiv189ikcQCQJimCX3fo2kadF2HYRiom0DcrpimCV3XYZom5HlOSw5N03B1dYW+7+k5ELcxxMLANE2KU1IUBZqmYRgGKnG+uLjAMAz0d5Ikge/7OJ1OqOsaZ2dnWK1WuL+/RxRFqKoK33zzDYqioM8hYosYY4wxxhhjjH0/vAhgjDHG2F+Ef/u3f8M//uM/Yr/fY7PZUJSOiAgahgGO46AoCliWBeDdSfTD4YD1ev3stL1lWbAsC1mWwXEcKIqCJEnwf7N3Jz1yZdf16Nft+zaa7DNJutS6AAMyDBswNPPQA/sbvS/lmQeG5QY2IAmCJ7KoKrOSTWZ0N27cvr9vQJxtpv2A90p/vCLI2j+AYDEZmRkZjIF09tlrBUEARVGopFaWZfpYlmWYpgmO41BHgBgK3NzcUMSN67pIkoT6AaZpQhAE2Gw2VOYrugHEQb+qqjBNE0EQ/D8OAFar1ZNIITEAAN4f/ItNBQAwTZNigsZxpIP0OI4xjiPyPKdtgqZpsNvtaFPCtm0oigLf91GWJdbr9ZN4oaZpME0TTqcTDMOg56woCrqug+u69LoBoM9N0xSLxQLTNNHgwnVdnJ2dYb/fYxgGuK5LHQei2HmeZ+pEYIwxxhhjjDH2h+P/V8UYY4yxT8Lr169xOp0wDAOyLEMYhpBl+cnt87quYZomTNPEN998gyiKUJYlttstoiiiA39xcz+KIqRpinmeEQQBjscj3YY3DAOe5yFJEtoKME0TqqqiLEtM00SxOuM4Qtd1OqS3bZtusYub+7quwzRNVFVFN/r7vkccxzBNE/M8U0SQGAKIrYEPiW0BMRiY5xnL5RKSJAF4X/Yrhgld19HwQQwXZFlG0zSoqgqn0wmu68LzPMRxTDf/RS9A13XYbre0ZSGifcTWgigTVhQFlmXR89lut1gsFkjTlLYt3r59izAMaRtD/Dv6vo8sy6DrOsqyxDiOOJ1ONJSwbRvv3r377t5ojDHGGGOMMfYZ4kEAY4wxxj4ZDw8P+NnPfkaH6uKGvyzLaNsWnufh8fERq9UKuq5Td4A4NBcH7FEU0baA4zh0wO84DuZ5pi2Buq5h2zYsy4Isy6jrmg71NU2DJElomgYAqNRXfK80TSnb/+zsjPL3TdPEdrvFcrmkgYAo7xXDATEAEDfqAdDv4mOih+BDkiTBtm1sNhus12s6/J/nGU3T0M+TZRniOKYtgb7v6bVIkgSqqiLPcwCgA3nHcXB2dkbxPvM805Bkv99TKbD4XUT7iD4Hcbvf932kaYqmaWDbNh4eHrBareh5KoqC8/NzHA4HKnIW2w6MMcYYY4wxxv4wPAhgjDHG2Cfj17/+Nb788ksURYHFYkEH94ZhIAgCyLKM9XqNw+FABcHisFwU4opDb1Hqq2ka6roGAOoMqOsaqqrSJsDxeKRi2yRJALy/ma8oCh3sn5+fo65riv0Rnyu2FsQNe1VVEYbhk2ggcXt/vV5Tvr8YHACgwl8R6SNu3gOgn09sRszzjMViQUOItm3RNA3KsoTv+xSNJB4/jiOqqkJVVWjblp739fU18jyH67pomoYKe/u+xzzPyPOcOhVM06SiYEmSsNvtsFqtqGMBANbrNdq2RVEUuLi4oFimMAzpNdU0DU3TQNd1xHGMJElwf3+PNE2/i7cXY4wxxhhjjH22eBDAGGOMsU/G7373O2RZhuvra7Rti7OzMyRJgnEc4TgO7u/vYZomTqcTFosFDMNAlmW4uLhA0zQ4nU64urpCkiTo+x66rkNVVTRNQzfcgyCAqqo4nU50w1/EBs3zjDiOKWJIlmXouo7FYkFRN+Kw/uzsDFVVUUxP3/dYLBYAgLIsKQKo73saAHx44C9u/YuPi6/R9z0duIstBzFMEM9HxAy1bYuyLOF5Hp4/f06RQuLwf7vdUqyPpmlUftx1HYD3pcxigDGOI5UCG4YBy7IoTsgwDFRVRZ9jWRZ2ux0Mw6DBweFwQNM0dPDvui7yPEfXdXAcB6fTCaZpwnEcNE2DJEmoAFp8bcYYY4wxxhhjfxgeBDDGGGPsk3J/f4+bmxv4vk9Z9CKPXxwoi4PqaZpQ1zXl2DuOg2+++YaiZizLotvovu9T7I0oCTYMgw6jfd+HoiiQZRlFUTz5s9g8CMOQDuinaYJlWdhut1iv1wD++2Z/lmWY5/nJxz+86S/L8v8aALRti8ViAUmScDgcoOs6+r7HOI70dcQwo2katG0L3/fx7Nkzeu3GcURd18jzHLZtQ9M0uK5Lmf1d11FM0TiOyLIMsixTb4Jt29B1HYZhUPa/eI5N06DrOiRJAlmWIUkS8jynwYZ4vXa7HZUgi20FVVWxXq+Rpils28YwDLi6ukJZlhS9xBhjjDHGGGPsD8eDAMYYY4x9Uv7t3/4Nf/Znf0aHzbIsQ5Zl7Pd7SJKExWKBqqpQ1zV836fHJUmCrusolsd1XfR9j7IsKefe932UZYkgCKBpGsqyBPA+VqgsS2iaRgW2SZLA930A7yOFRIeALMt0OC4ibsZxpFv6oiAYwP/6uGmaAP57MCB6A6IoguM4tCXgui622y0uLi4oigj473ggXddxfn5OA4VhGNC2Ldq2xfF4hOd5CIIAnufR4X/btsiyDIqiIE1TivwRsUuiM0BEDn04LNB1nYYit7e3FFGUZRmapoHjOKjrGuv1GmVZUplyWZbU9ZAkCW0ciJ9/tVrhdDp9128xxhhjjDHGGPvs8CCAMcYYY5+UV69e4fXr1/jxj38MADgcDgjDEKZpwjRNDMMAz/MwDAOKoqDDe+B9oa8ozJ3nGZqmwTAMjOMIVVXx9u1b3N7eQpZl5HkOSZLgOA4dWB8OB7r5H4YhHVI7jgNN05DnORRFeXJLv6oq6gIYhgGr1QqbzQaO41A00DAM1G2gaRq6rkPTNPA8D57nYRxHAHiyHXB9fU2dBOJwXsQPie4EMWAQGwKr1YrijUTkT1EUtCUgBhHPnz+HqqpPtgRkWUbTNCiKggYoqqpisVhAURTqXVAUBZIkIcsyaJpGQwHDMGh4UlUViqJAEATYbDa4ublBURSoqor+rR4fH3E6ndB1HZbLJfb7/Xf6PmOMMcYYY4yxz4kC4P/68ANxHON4PH6kp8MYY4wx9v9uuVzCsiycnZ0BALIsg+u6SJIEYRjSrXdd1+F5HuX727aNaZqg6zrFBsmyTAfphmHAtm0aBABA0zT0tSVJQhRFOJ1OGIaBcvV1XadCXVmW4bouHdDv93uUZYnlcgnTNDHPMyzLwn6/h+M4cBznyWDi8fERhmHQgX2e5xjHEWVZIk1TRFEEwzAwTRMURcHhcMDpdILjOHBdF8B/RwDVdY2qquhzRPyQiO05nU7QdR2apmG9XtPrNU0TdrsddF1HURQYxxGn04l6AsTgxXVdTNNEWxDA+3iizWZDA4u6rtG2LaqqQtM0SNMUkiTREENsLIj+gLIsMU0TVFWFZVnoug6yLOPrr7/+OG82xhhjjDHGGPtE/emf/il++ctfAuCNAMYYY4x9gv7u7/4Of/zHf4xXr17h5uYGhmFAVVXqCLAsi3L8RcyM7/t0y19VVeoD8H2f8u3v7u6gqird4lcUBYZh4PHxEQCwWCygqipUVYUkSbAsiwYAohNAkiSUZYl5nin//vLyEkVRQFEU2gAQB90ifkgcmotuAsdxqIj37du3CMMQV1dXOBwONNBo2xZd19EWQ1mWlNdfVRUWiwWiKHqyGVDXNRzHgaqquLm5wTiOaNuWXqfj8Qhd11HXNcZxpAP/MAwp7kgMTpIkgWEYOJ1OkCSJIpKCIEDbtrBtG13XURRQVVXQNA1VVeH58+f0s4toot1uh8VigcfHR8zzDNd16TX++7//+4/2fmOMMcYYY4yxT538sZ8AY4wxxtgf4uXLl9A0jQpskySBaZrwfR+WZdEh9IfltXEcQ9d1KgmepokGB5qmUZxOURQoioJKdA3DwHq9hiRJ2O/3aJoGkiShqirIsoyHhwcYhoHj8Yi2belj4rZ813UwDAPb7ZZifnzfR9d1UBQFm80GdV2j6zp4ngfDMFBVFZIkwXa7RRiGFCUUBAEOhwP6vsc8z7i5ucHpdKLyXuB9BNKLFy/oQL4oCiRJAkVRcH19Td0JH97GFzn94mb/zc0Nrq6uaAAgHvvw8EDPzTAM+L6Pu7s7hGGI5XJJgxLP82jYIUkSbU+oqgrTNHF/f0//TpIkYb1eU/yS6CZ49+4dDocDPSfGGGOMMcYYY38Y3ghgjDHG2Cdpt9vhdDrh8fERYRjSVkBZlmiaBpZlUSnu6XSC53mo65oO5w3DoJz/0+mEy8tLDMOAuq7pa202G0iShLOzM8iyjL7vUdc1rq6u6Ba9uK0vDux3ux3d7JckCZ7n0UH9MAy0nZBlGaZpQlmW0HUdq9WKcvinaaIb/IvFAoZhUO6+yPsXQw/x9TabDXzfh+M4tKEwDANkWYaiKLi7u6Nb/+JXVVXI85zKksVQJQiCJwXEdV1jt9s92WKIogi2bWMYBuz3e4oUmueZYn/Ev5PoJBA9CV3Xoes6bDYbGIaBYRiQ5zl836fehHEccXFxgaZp8PbtW/z4xz/Gb3/724/5lmOMMcYYY4yxTxYPAhhjjDH2Sfqnf/on/PznP0fbtnj16hVevHhBWfnilrmIwYmiiG74q6qK3W5Ht9zDMETXdTBNE13XIU1TaJoGy7JgmuaT2/D39/cIgoBKb8XXnKYJmqZht9uhLEsEQYBxHBHHMQ0BRMGvONAXuf+qqmK9Xj8p6xUbBnEcwzAMNE1Duf9N0yAIAoRhCEVRkKYpFerqug5FUSgiqO97RFEEy7IwjiOmaaLb/7ZtI8uy/3V7X5QDN02Dw+EARVFQVRVs24bjOHTg33UdxRP1fY80TaGqKnVNKYoC3/dxdXWFruvoazdNgziO0TQNRSfleU6xRUEQwHEcpGmKqqoQxzHOz89xdnbGgwDGGGOMMcYY+wPxIIAxxhhjn6zXr1/jyy+/xHK5hCRJ0HUdx+MRpmmi73usVit0XYcsyxBFER2AW5ZFh+/TNMFxHACgrHtd19F1HcqyRBiGmKYJDw8PUBQFtm2jaRo8Pj6iaRo8f/6cbs2rqgpFURDHMYZhQJIklJG/WCyoP2AcRxRFAU3TcHZ2hjRN4TgODocDqqqCaZq4uLiAJEk0BBAH5+v1GrZtQ1EUKjC+urqixw7DAMuyqIhXbAGIm/hpmsLzPMrfV1UVfd/ToX5RFKiqil4r0zRxfn6OcRxpSNB1HYqigGEYyLKM4pZ0XUcURTgejzR80DQNmqZhmibabBDfd5ommKZJUUZN02C73WKaJpydnWGz2eD+/h673Q5RFH3kdxtjjDHGGGOMfbp4EMAYY4yxT9ZvfvMb/OAHP4Cmabi/v8ft7S1M04Rt23Qjvq5r2gjwPA9lWcLzPByPR4RhiDzPsV6vAQBd11GWvbg5HwQB9vs9dF1H0zRUYjsMAzRNw/F4RF3XdMgfBAHmecZ2u8U8z9A0jSJ88jxH3/e0CXB+fk6xOrvdjoYAl5eXyLKMYnPEEOD29pZu/APvb90DQN/3dKN/t9sBAFzXpS2AruvQti1WqxWmaUIURfT6JEkCTdOQpikd6GuahtvbW4ooEhsRH5YR+74PXdepIFh0FOz3e1iWRdFCYnggvp/YhHBdF7IsY7fbwbZt1HWNuq5xfn6OJEnw7t07LJdLAMA4jlBV/p+tjDHGGGOMMfaH4v9HxRhjjLFP1n/8x3/gb/7mb2CaJsXqiK2As7Mz7Pd7LBYLDMNA5byO46BpGkRRhDzPKRan6zrEcQxVVSHLMpXeGoZBvQK6rtOBtizLcF2XMvEty8JqtaK8/qZpYBgG3YL/MJZH0zScn58DAA0B6rqGaZq4vr5GmqawLAubzQa2bVNRcZ7nUBSFOg7EBkKWZTBNE/M8Y7FYQJZl7Pd7+rPI5+/7HpqmYRgGFEWBsixpmHBzc0M/nyhDFj0C8zyjbVt4nkeDlr7vAYCKfEXZsYgvUhSFhiXDMEBRFERRRB0H4meUJImGAVVV4Xg84vr6Gpqmoaoq2pwQRciMMcYYY4wxxr49HgQwxhhj7JP2n//5n3Sj/+3bt7i+voau6yjLEpZloe97FEWBIAhQVRVc16WtAPExAEiSBHEcQ9M0tG2Lpmlwc3OD7XaLqqpwdnYGy7IgyzLu7+8RxzG6rgMAyLIMWZYRRREVAothwmKxwDiO2Gw2aNsWuq7DcRzqMBCH4Lqu04aA4zjYbDaIogiLxQLb7Ra+78N1Xex2O5yfn0OWZXRdB0mSqHxYDAPmeUYURXQTXwwvPhxEmKYJTdMozkfc4BdDg6ZpkKYpbNtGURRYrVZPHtO2LW1KKIpCcUBiUKDrOiRJgizLAIB5nunP0zRhtVqhqiqEYUgbHGIzYbvdQpZlWJaF0+lE/QMiLogxxhhjjDHG2Lcjf+wnwBhjjDH2f+LXv/41Fe0ul0u8efMGqqpSFv1+v0cQBMjznEp8xS150Q8wDANWqxWOxyP6vodpmnBdl2KCFosFbQeM44ggCGhLoOs6KtxVFAWHw4E2AS4uLqAoCna7HXRdh6ZpVPSbJAnd+F8ul1Sq63kebTKIeB9xaF4UBeI4psgfcTA/zzOCIEDbtui6DsMwUDdA3/eoqgppmkKWZcrs930fi8WCCpGbpsG7d+/ocVmWwfd9RFGE9XpNWwGvX79GWZY4HA7QdR1hGNLAYr1e088pSRLFI+12OwzDgMfHR/R9T1sAIuYoyzK4roumaej1rusaZVnihz/8IYIgQBRFtPXBGGOMMcYYY+zb4Y0AxhhjjH3SXr58ia+++gqr1Qp1XWO5XGKeZ4rNUVUVh8MBi8WCbsVrmkbxNeKGuhgGpGkK3/dhmiZtFbRtC9u2aeBwdnaGtm1xOp1ouBAEAY7HI4D3ufhXV1cwTRNFUdBBvSjEVRQFuq5juVxS0XFd15BlGWma4tmzZwDwJHs/jmPqIRDZ+o7jYJomumkfBAE9RpQDV1UFx3Ho8N913SdxP0VRQFVV5HkO13VhGAY8z6Ocf9Ex0DQNqqqCZVnwfR+KotAQRTwX8foOw4B5nmEYBg6HAw1IxOaAiBk6HA7oug6maeL169cwTZOe/+3tLbbbLX7/+99DkiR0XYf1eo1vvvnmY77dGGOMMcYYY+yTxIMAxhhjjH3yvvrqK8RxjJ/85Cfoug5938P3fRyPRwRBgLqukec5xnGkSJ8wDNG2LeZ5puggkanfNA11AIg+gbZt4bouVFVF3/c4nU60JWBZFhRFgW3bWCwWkCSJbrvLsow4junQ3rZtOhRXVZXy803TxHa7hWEY6PseiqJQzr4syxStMwwDDMOgYQSAJ38PAGmaYhxHeJ5HAwDHcZ5EBHVdh6Io4DgOZFnGs2fPqEdgHEe0bYuqqui/l8slDRTGcYSiKJimiToB2raFqqrQNA0AKCZoGAaoqkoDi3meUVUVqqrCIm1EyQAAIABJREFUarWi5+B5HpIkwTzPKMsSDw8P+NGPfoTD4UCxSrZtf7T3GGOMMcYYY4x9yngQwBhjjLFP3j/8wz/gj/7oj/Dy5Ut8+eWXKIoCkiTBNE0oigLXdekQvWkausneti0URcF6vaahAAA0TQNVVenQ2nVdWJaFruswjiPKskQcx3SYL27ti8Ns8f3EITsAZFlGxcFxHAMA3X4X/y22A0RZ7zzPNIzY7/cwTROGYUCWZSogFgfvAND3Pfq+h23bdOve8zxM00RdCVVV0bbE9fU1lR2LrYiyLDGOI9I0heM49Et0DZxOJ9i2jdPpBMuyUJYlDMOA4zg0EEmShG7367qOeZ6RJAmWyyV2ux1834dt20iSBEEQIMsyRFEE27aRZRnFHL18+RLjONKWgq7r3/E7izHGGGOMMcY+DzwIYIwxxthnYbfb4eLigjLo9/s9wjDE4XCgbPwoiuhWfJ7ntBUgDtMlSYIkSZRxLw7bxU17ces+jmPUdU2H9eIG/zRNlG8/TRPmecY0TVAUhYqCx3Gkr6MoCrqug67rVDgsyoM1TaPBhOg7EAW/siw/OWwfxxFd11E/wGq1otLgrutQliXFHF1eXqLrOhiGQRsAXdchz3NUVfVkw8F1XQCgx7RtC8MwaBBiGAZc16XtCNEHIB4zzzMOhwMd9idJAt/3sd1uaWjR9z3FC63Xa4RhiDzPsV6vcTweMc8z9vs9bNuGYRgf583FGGOMMcYYY584HgQwxhhj7LPwq1/9Cj/4wQ/w9ddf44c//CGyLAMAyq63LAt5nkNVVWRZhuVy+b86A8TBtygbFgfQ4gb+MAx0QN/3PTRNo3JgcfC+2WxgWRbl9ouy4A8fAwD7/R6apkFV3//PMVmWcTweYds2dF2n7y+Kh+d5hqIo9HHxXKuqooN6Ueo7jiNtPJRlCU3TcHl5SR//sERYbCqcTid4nocwDDGOIw0/xnFEVVUUD5SmKVarFfq+BwA6yBevbZ7ntFUgSRIuLy8xDAPu7u7o9RZ9DiJWKIoi6LpOr52qqqiqCsfjEcvlEmEY0jYFY4wxxhhjjLFvjwcBjDHGGPssvHnzBnmeIwgCvH79GtfX13h4eMByuYRpmpBlmWJqgiDAMAzUDVDXNW0HiKgfXdcpMmi328GyLIrlEVn+YtNADA5kWaY4InEbXtM0OtwW5cRpmtKte7EZID6maRr92bZt+np5ntOWgrgtPwwDlSNP0/RkANB1HYIggKZp0DQNwzBQNFKWZVAUhYYAlmXhxYsXAEDbBX3fU69BFEX09U3TxDiOKIoCWZbBNE2oqgrHcaAoCoIgQFEU6PsenucBAA07xGslXndZlrFcLqmM+IsvvsAwDHh4eAAALJdLNE0Dy7Lw8uXLj/PGYowxxhhjjLHPAA8CGGOMMfbZuL+/x9nZGR1gx3GM169fIwxDyLIMx3FQVRUWiwU2mw0WiwXKsoTv+3RILg7NPyzfFcW8qqrSloAYLnRdRxE94vFiE0Ac7IshgdgsWC6XqKqKBgGyLGO9XqOqKnqM+FxZlrHf72nLQJQLe56HeZ7puX4Y3xOGIXRdp9v6sixTYXKWZRRRtFgssF6vAQDTNNEAoOs6rFYrijE6Ho9wHIdu8fd9j7quYZomFovFk56ENE2h6zosy0Lf9xTtE0URdrsdgiBA3/d49+4d/XvUdQ3HcfDmzRvc3t7CdV2UZYm6rmmYcHFxga7rPs4bizHGGGOMMcY+cTwIYIwxxthn4xe/+AW+/PJLirAJggBnZ2eUiS9JElzXpULdPM+pN0B0BvR9j8Viga7rKPcfAEX66LoOXdehKAq22y0NAeZ5xsPDA93gl2WZPlcc9ovfN5sN3YgXvx4fH+H7Pv35w++5Xq8pw18MAkRxrhhgVFWF5XIJwzBoACAO7MuypC2AKIoQxzHFI4ki4bZt0fc9VqsV3f4Xw4G6rgG8L1FeLBbQNA2u60LTNHRdh81mA9M0kSTJk+cvSRL1JyRJQq+dKCY+Ho9wXZe2LMTN/ziOsVgsqDhZDGVEHBFjjDHGGGOMsW+HBwGMMcYY+2yUZYntdovz83PKqBfbASLbX2TRi5vwbdsiz3MsFguKC5JlGWVZ0u+qqtINd9/36eb+crmkm/CSJD3J8weAMAxRVdWTbPt5nqn0VnQGzPNMmwPiz67r0gH8MAywbRvzPNMh++l0QtM0aJoGjuPg6uoK8zyjqioaDOi6Tr+HYUhDj3EcoaoqDQBE/v+HEUOifDiKIpim+eQA/3A4wLIs7Pd7ii0qyxK6rlNpsCg37vueooFE+fI0TWjbFpeXlyjLEsvlkgYbmqZht9tBURSsVitYloXD4YDXr19jGIbv5o3EGGOMMcYYY58ZHgQwxhhj7LPyz//8z/jhD39IB+dN00BVVTr8liQJmqbheDxCVVWEYQjgfeltmqZYLBbUNVBVFaIoQt/3FK9jWRZs26aYmqIo4HkeFEVBGIbIsuxJTJDjODidTpSlrygKAFC0jiBu6YsDcXEDXtzUdxwHwPvYoWEYqFtADACmaXpyc76ua4zjiCAI6JBflASLiB8AWCwWdDgvhgRiCLBer2mrYbvdUv5/nuf0ehqGgTiOUZYldSGIbgRRcDxNE7bbLZbLJfb7PXzfp4FMEAT0c2uahrdv3+KLL77A4+MjTqcT8jyHLMtYrVbY7/ffwTuIMcYYY4wxxj4/PAhgjDHG2Gflq6++Qpqm8DwPVVXh7u4OSZLQoXSSJPA8j/oA8jynjoAwDJGmKcIwRFmWcBwHXdfhdDrBtm3KyRdZ+OKWfVVVdKPf8zwkSUKRNwDg+z4OhwN9H/F3m82G4oBUVaUtANEF8GH3gCRJOJ1OdJi/WCxgWdaTG/ZVVdEGgBgAiIFE3/f0967r4vz8nIYDokhYRAqdnZ3R1xVbEk3TQFEUuuUfRRHSNKWuBEVRMI4jHh8foWkasiwDAOoxEF0H4zjSa6GqKpIkoaGIGCS8efOGBgx5ntMQYhzHj/a+YowxxhhjjLFPGQ8CGGOMMfbZ+eqrrxAEASzLwuvXr3F2doZxHCHLMnRdp+JgceDeNA1l5otb757noes6Kr61LAt1XVO3QBzHOB6PCMMQjuPgcDjA8zyoqkp/N88zPM+DpmlYLBY4Ho+YpgmO40DTNKzXaxpSmKZJv8TjdF2nGKFpmpAkCS4vL6kcGAC6rkNVVSjLErZt4/LyEuM4Qtd1elzTNDTYuLu7e7JBIIYIbdvi7OyMoomGYUDbtmiaBpZlYRxHhGGIYRiwXq+fbEmIIQUA2roAAEVRYNs2fX0xSInjGIfDAYvFAnVdY5omXFxc4HA40AZH13XUuRBFEcUuMcYYY4wxxhj79ngQwBhjjLHPzi9+8Qv87Gc/o6z6cRzpcFtE14hb+HEco+s6BEGA4/EIADAM48mhv+M4tDlQ1zVc18XhcEAURRRrE0URVFVFURQA8KQMuCxLTNP0pAy4KArM8/zkcVmWYZomyLJMjxUf6/sed3d3SNOUoo5EH4BlWbi8vKTHiRx/13VpA+B/DgDEr2maoCgKdSZIkoS2bVGWJbquw2q1Qt/3ODs7Q9/36LoO0zQhyzJkWQZZlmHbNhRFgWEYcByHth1s24YkSXTTX1EUuK6L4/FI2xjioL8oCpyfn0PXdRyPR5RlCUVRqOT49evX0DTto72nGGOMMcYYY+xTxoMAxhhjjH129vs99vs9fvzjHwMA8jyng2wRWaMoCnRdR5qmcF0XkiTR7XXbtjGOI/2duPlfFAVc16VonjzPaQugaRr0fY+iKHBxcYGiKCguKMsyXF5eUleA+F6yLON4PFLEjyjbFZ+jqip9DADGcYTrunh4eEDTNPB9nwYAwzBgHEeKCHJdF67rIo7jJ6+NOPwX/y1+ia8v/hxF0ZPoILEdIJ6voih49uwZDocD4jhGkiTUbwC8H6Y0TQNZlrHb7RCGIX3vruvQ9z11H4jX/P7+HlEUYbFYIMsyNE2DMAzRdR1838d2u/3//b3DGGOMMcYYY58jHgQwxhhj7LP0r//6r7i7u6Mi34eHBywWC4qdkWWZioM1TcNms6HYnrquYRgGTNPEfr+nrHrP89C2LXRdpz+LW+3isbquYxgGeJ6Hw+GAaZoQxzGGYYDv+xT7I56X7/tIkgTjOGK9XqNtW0iSBMdxsNvtcHZ2hqqq/lcJcBRFsG0bfd8/GQB4noebmxsq/5UkifL+xYG/+G9xyC8O5MXjxKBAfL+iKGBZFoIgQN/3CIIAeZ7Tbf++7zHPMxX7JkkCSZKwWCwAADc3NzQosSwLALBcLqm0OAxDJElC2waqqsLzPMiyTBsOooOAMcYYY4wxxti3x4MAxhhjjH2W/v3f/x1//dd/Dd/3KdJGkiQoioLVaoWqqjDPM2RZRlVVVOxr2za6rqPonyAIkGUZTNNEXdewLAtZlsH3fWRZBsdxkGUZbNtGHMdQFAVFUUBVVYRhCEmSsN/vYVkWJEmiKCBx638cR5imCcMwME0TTqcTFEWh2CIRZ7Tf79E0DTzPo5ifw+EAALQBYBgGgiCgIUDXdSjLEnEcP4kCEoXDwzBguVwCAB2yD8OAuq6x3W7hui6CIKB8/mEYYBgGbQ6UZUnxQCK2R1VVPH/+HIqiAACKooAkSTBNE/M8Q9M0Km4WQwRd17FYLGgokSQJbQn4vo93796hbVu4rvtdv40YY4wxxhhj7LPAgwDGGGOMfbZ+9atfIY5jGIYB3/cplkbE2MiyDEVRMAwDbQekaQrLsuC6LkX32LYNx3EwDANOpxP1CYRhSEMAUSY8zzNc14WiKKjrGrIsI4oi7Pd7nJ+fU1zONE2UoS/LMvI8BwBEUUQDi6Io6Na+oii4u7sDAIoCMk0Tfd/DMIwn0Tti8NH3PVar1ZNeABHzs1wuaUMAeD8A6LqO4nj6vkcURRjHkX41TUNDjjRNYZomFEVBHMf0WjZNA0mSIEkS0jSlzgHx/cuyxDiOyPMcTdNQ3wIArFYrbDYbrFYr5HmO7XaLtm2hqiq9VowxxhhjjDHGvj0eBDDGGGPss/X3f//3ePHiBX7yk5/ANE3aAhjHkQYBAKBpGrquo6z/Dwt9RcFt0zQwTZMie8RhtegI6LoOWZZBkiTM8wzbtmEYBtI0xTzPWCwW9H1FFJCqqnTL3XEcSJKE3W4Hz/MwDANkWaZeAADUAyBu+4sy3/8Z89N1HZbLJQ0AxHBA3MgXQwARNyQGAG3bYr1e0+Bhnmf0fY88z/Hw8ADbtqFpGoIgQBRF6PsemqZRnNGHB/9JkgB4v2kgyzKGYQAA+L5PQwfxfJumQdd1FLtUFAWyLEMYhjidTiiKArZtU2QRY4wxxhhjjLFvhwcBjDHGGPtsFUWBw+GA0+mEs7MzHA4H6LqO5XL5JA5IlmX4vo+maagzQET2DMOAPM9h2zaqqoJt23RA7fs+9QkYhoEoiqCqKlRVRZ7n0DQNvu9DlmVkWUY59+JjkiThdDrR4EAc0h8OB7q9v1qtsNvtqMB4GAZEUUQFw+KQXxzoiw2AcRxpc0AcvIsS4w8jgsqyfLI58GFsUFmWqOsapmnSIEPTNAzDgO12C8/zsNvtoOs6jscjxQEBgK7ruLi4wDRNkGWZtg92ux3W6zXKsoTruqiqCsMwwLZtAHjyuonYIcuy6GdnjDHGGGOMMfbt8SCAMcYYY5+1d+/e4fr6GoqiIAxDpGmKuq7p5r4YBmy3W/i+D1VVURQFiqLAer2mbQDDMKCqKuq6RpZlMAyDSnRPpxMV3vq+T2W/sixTVI6u69B1HYqiQJIk5HmOeZ5pKCC6CsRtfFVVoes6pmlCGIbYbDa4uLigQ3xxW18c8otiXpHfLzYGhmHA2dkZAFAc0WazQRzHmKYJQRDQayFu55dlCV3XYds2dF1H3/cAQNFImqahrmvaCJBlGavVivoP8jyHYRiY5xnH4xHL5RJ5nqOqKpimSa+TuPkvtgRc14XneUjTlLYrDMOAZVn45ptvEIbhx3kTMcYYY4wxxtgnjgcBjDHGGPus/cu//At++tOfQtM0xHEMz/MwTRMkScI0TdA0Dfv9nnLtVVWFJEm4uLjAbrcD8L5AuCgKuK5LN/5FCXGWZRQPJDYFLi4uUNc1VFWlPoJxHOE4DgzDgCRJKMsS6/UaeZ7DNE1M04Tj8UiH9gDo9n7XddB1HU3TQFXVJz0AIuZHbAd8GLmzXq8BgPL5xa84jiliSNzMF0MA3/dhGAZtRYzjiLZtMY4jTqcT9QMAwO3tLYZhgKZpOB6PkCSJft6+77Hb7SDLMl6/fk3PUfQetG1LP8M0TTAMA+/evYNhGFitVtA0DafTCX3fo65rnJ+f4+XLl9/9G4gxxhhjjDHGPgM8CGCMMcbYZ60sS6RpiuVyid///vdYr9d0y77rOsrKl2UZhmGg73soikIltdM0UQyPiAMKggBVVcFxHDrcb5qGbsIPwwDLspAkCQzDgOd5VKYrOgPCMMQ4jvA8j6KANE1D3/c0pNB1HaqqUn+AJEnY7/eYpom6AcQAQGwGiAHA/zz8/7AwWMQONU1DvQliqCAieERfQpZlOJ1OMAyDehWeP3+OPM/peU7TROXGZVmiLEvM80xFzKJDwfM86LqOtm1RliVtDYjCZVEq/ObNG+o4KIriyfNhjDHGGGOMMfbt8SCAMcYYY5+9r7/+Gjc3N7AsC7qu02G967p49+4dVFWFoijUBaAoCh4fH+kAvqoqWJYF0zTplnsYhijLEo7joKoqGgKI3gAAkCQJvu/TQfo8z3Ach7YQxCaAJElwXZcGBaJcd7lc0rBC3M5XFAVBEKDv+ycdAKJP4MP8/67roGnak0HANE1o25YGGbquIwgCAKDPyfMcr1+/hmmasCwLq9UKcRyj73u67S/LMuq6RlmWT563aZpYr9dI0xSmaVJ80DzPqOsasixDVVWKEyrLEp7nwbZtPD4+QpIk1HWNzWZDkUy+76OqKhiG8THfRowxxhhjjDH2yeJBAGOMMcY+e7/4xS/wl3/5l5Txr6oqxnGEJElPcu7FVoC4ze66Lt1SF1n3URShqirqBRBlwVVV0XBA5PvLsozD4QDHcbDf73Fzc0O9Aa7r4vHxEdfX11QcLLL5TdOk2/bH4xG6rtNgAHhfbpwkCd3eFwMAURAsIndEtr8oGm6aBm3b0s38MAxpQ0AMB4qigOM4WCwWFCEkbuKP44iHhwfqRRARQr7vI45j7Pd7hGGI7XZL31OSJAzDgN1uhyiKKGZIkiR89dVXuLm5oeHD5eXlkw0Fx3FwPB6xWCyw2WyelBEzxhhjjDHGGPv/jgcBjDHGGPte2O/3WCwWaNsW6/UaRVHg3bt38H2fCnYlSaKIH1mWcTqdEEUR6rqG7/vUGbBYLOhxpmlC0zQsFgsURQFFUWAYBtq2hSzLcBwHeZ7Tgbcsy9jv91BVFXd3dzQ4EBE55+fnyLKMioIty8I8zwiCAOM4AgDatoWiKFAUBZ7n0QDgw3ig5XJJcUK73Y6GD+M4wnVdGjR0XYeyLFHXNWzbxvX1NcZxRJZlFCHUdR0kSUKaprBtG77vIwxDiuwRr59lWei6jvoEANDzAoDdboe+76mTQJIkHA4HtG0Ly7Lw6tUr3N7eIggCHI9HAICiKDBNE7Zt4+3btx/hncMYY4wxxhhjnz4eBDDGGGPse+HXv/41vvjiC5imiYeHB5yfnyPP8yexObIsY55neJ6HoiigqipOpxM8z8M8z/B9nzoHoigCAJimia7rUBQFNE2DaZpomgaaplGprmmaGMcRqqrieDxC0zTKzTcMgw69TdPEMAxwHAfb7RYAsF6vIcsyiqJ40gewWCwAgMp3+77HOI5P4oHEzXpFUaCqKlzXheu6tDUgOgJs28bl5SVFCo3jSJsLIsu/LEsEQQDHcTCOI3a7HYIgoB4E8TMAoCGDKF9eLBbUv9A0DXUsVFUF13VxOBxQFAUsy8L9/T3mecZqtaINgN1uB8uyUFXVd/Z+YYwxxhhjjLHPCQ8CGGOMMfa98Mtf/hJ/+7d/C8MwoCgKNpsNlsslyrLEYrGg8lsAUFUVnuehbVtIkoS2bWHbNmX+t22LPM/hui66rsPxeKRb61VVwTRNysz3PA+apuF0OtHhfBAEUBQFx+ORYoGCIICqqlQmrGkaHcqLIcK7d+9wdXVFJbzA+5ggcXDveR6GYaAegLqu0TQNLi8vYVkW/V3TNLQBcHV1RYMDcftfDAeKokAURdB1Hb7vU19B0zSYpol+xsPhAAA4Pz+HYRi09eD7PmRZBvB+OCDikrIsg+u6AIA0TbFer6FpGuq6hq7rAIB3795BURQarvzXf/0XbUQwxhhjjDHGGPt2eBDAGGOMse+NzWaD9XqNcRypCFgc/iuKgmmaIMsyNpsNVFWl4mAAVAgsyzLiOEZVVVRmq2kaHMdBURQwTRNJktDhfpZlUBQFkiRhHEfEcUy5/6qqwrIsKg+2LIvKdD/sGBAH4M+ePaPn0vc92rYFAKxWKwDA8Xiksl3HcSDLMp4/f440TenjXdfBdV1cXl4CwJNegbquURQFgiBAGIa0abDdbhEEAYqioMgk8VxlWcaLFy/oZxIH/6IjYJomHA4H2h5o2xZhGOLx8ZG6EO7v77FcLqHrOpIkoSJkEUck4odE5wFjjDHGGGOMsW+HBwGMMcYY+974zW9+g9VqhdvbW3Rdh1evXmG5XOLx8RG+70OSJCiKQofZpmlSTr6iKAiCAHVdI01ThGGItm1xOBwQRRFOpxMcx6EhQBzHOJ1O0HUd4zjCNE24rks5++Lri40Dx3EwTRPl95dliXmeqT/AcRy60Z8kCTzPow2FYRjosdM0QVVVBEFAz90wDLx58wamadIGgPgcccDeNA0WiwXOzs4AgB4j+gP6vodlWXBd90k/QF3XmOcZuq4/6RIAQMOPDyOR4jimAubtdgvHcdD3Pf29KGuuqgqO48A0TQDvtzTE5gFjjDHGGGOMsW+HBwGMMcYY+974x3/8R/z0pz/FixcvoOs61us15e33ff9kCCDLMvI8RxAE9Pn7/R6O40BVVRRFQYfW4sb74XCAoiiI4xhJksCyLACA67pQFAV1XcOyLIoZapoGkiRhu91CVVVcXFygqiraVPhwc0GWZRyPRzRNQ9FF4nGHwwFN08DzPOi6TkODD3sAFosF4jjG4XCA4zjoug7DMNDPL7L/D4cDfN9HVVW04SCigWzbRt/3FPsjSo8B0M19RVGg6zo0TaM4IjHMkCQJfd9D0zQoioIf/OAHqKqKopfEMKAsS6zXa0zTRPFGuq5TWTNjjDHGGGOMsW9H/thPgDHGGGPsu1TXNb7++mvKxR/HkfL4FUXBdrulg3cxDFBVFfv9HoqiwLZt6LqOaZqgKApl6G82G1xcXCAMQyiKAsMwME0TlQc3TYPtdgvLsnA6ndD3Pd2U13Uduq7Tx9I0RZZl8H0fRVFQJwHw/ma8YRgYhgFN01BckRgOOI6DpmmQpin2+z0A4OrqCmEYYp5nRFGENE2RJAlc10UURbQVUFUVyrLE6XRClmXQNA1BEFAxsegX6LqOSoRN04Tv+4jjGOv1GqvVCpIk0TaFGBTsdjsaNIjX7v7+HpqmYbFYoCgK3N7eUkdA3/ewbRuGYaDrOuR5jrIsP8I7hjHGGGOMMcY+fbwRwBhjjLHvlc1mgx/96Ef46quv8MUXX+B4PAIA3e4XN9dFN4AosRUH2kVRUK6/GBw4joPVaoWyLKkoGADdrNc0DdvtFnd3d0jTFL7vU26/JEk0PEiShJ5nEAQYhgGO4+Dh4QHr9Zpy+QEgSRLaAvA8j2J+ROyOLMu4uroCABp4iA0AXddxeXkJSZKQ5zkVBLuuC03TaKAghhl936MoCozjiCzLYJomgiBAmqZwHOdJLJHoWRCH9rvdDmEYQtM0JEmCcRyx2+2ox+DVq1cwTROqqmK73aIoCqzXa2y3W6Rpirdv32K5XOLVq1ffzRuEMcYYY4wxxj5DPAhgjDHG2PfKb3/7W/z85z9HHMe0FZAkCcIwBPA+EkjTNMzzDE3T6IA7CAKK7UmSBMvl8smAQGTdiwP+MAyR5zkMw0CSJLi9vaU+gSRJIMsyxQwdDgdomgZJkmBZFlRVhSzL1Ddwc3NDvQEfHrZfXV1BkiTs93sYhoG2bVHXNVzXheu61APwYQyQ6AAQwwFZliFJEjRNg+u6sG0bwzCg67onMUEiHsiyLOo9EJsJANC2LZUaHw4H6LqOKIrw/PlztG0LTdPQdR3qusZ6vUae59QnkKYpRRB1XYc3b95QJ4Hneajrmr4PY4wxxhhjjLFvjwcBjDHGGPteefXqFTabDWzbxv39PW5ubmgrQJZlKIoC0zRpI0BRFLr5H0URuq6D53lQVRVd10GWZbiui6Zp6HPEbXnbtnE4HHB9fY2u66g7QJZl2LYNTdOoa0CSJDiOA0VRcDqdKD5HxBaJ57Lb7RDHMXzfp497noeHhwf4vo+7uzsq4tU0DW3b0uZAFEXo+x7zPKPve/R9j67rsFwun2wNlGWJqqrg+z4V9mZZBgBPNg+yLKMthSiKsN/vsVgsIMsyLMuigYVlWZjnGYqiUDSS6D7IsgxnZ2domoZKmUX8kaqqqKoKVVWh7/vv+q3CGGOMMcYYY58NHgQwxhhj7HsnTVMYhgHHcbDb7bBer9H3PXRdpwPo0+mEOI5xOp2oB+B0OlHxb57ncF2XYnMMw8DxeKQb/47jYL/f4+7uDm3bIkkSOuS2LAumadKteMuyoCgKyrKkWJ95nuF5HhRFQZZlGIYB4zji9vYWAFBVFR3IS5KEu7s7AMA4jpimCZ7noes6qKqK58+fAwCVGYsC4evrayoAFp0DTdPAdV08e/YM0zQBAIZhgCzLtHGgaRqapkEYhnAcB0VRYJ5nikXSNI0GDmKzIAgCbDYbeJ6HpmmQZRkNXQDhqe8PAAAgAElEQVQgz3O8ePECL1++pOFHnufQdR2qquLx8fEjvFMYY4wxxhhj7PPAgwDGGGOMfe988803+Iu/+As6dBe5+7vdDr7vY7/fw7ZtFEUB0zRR1zVFBtm2jbZtoes69vs9dQMA7zsBxEE/ABoCiBvu8zzTAEL0CDiOQ4MDRVGw3+9xcXFB+f0iJkhRFPi+T9FAIpJIVVVIkkQH7+JWf9u2WK1WAN7HAA3DAMuy0Pc9FEXBixcvAABZllFRcBAEOD8/p8gkcZBf1zVOpxPmeUYcx6jrGmdnZ+i6jr5+XdfI8xzzPKOqKgzDAEmSaIhwOBwQhiENG2zbRtM02O120HUdpmnid7/7HUzTpCiiruugKAqOxyN9L8YYY4wxxhhj3x4PAhhjjDH2vfOrX/0Kf/7nf47z83Pouk7Z/eL2uWmaVBisaRo2mw1tApRlSY8TEUDiIHscR5xOJyiKAtd16fb88XiEoigIggCqqiLPcwDvI32SJIHneTgcDpBlGdfX16jrmnL7T6cTzs/PAQD7/Z46C4ZhoIN+ADgej2jbFtM04fz8HI7j0HZA0zQ0rFiv1/A8jwYGx+MRYRjS1oAYAnRdh67r0DQN4jiGrusIwxDjONIBvxhUiA0L0XNgmibatoVpmjR4GMeR+gE+fC593+Px8ZG+lthWkCSJhi91XT8pUmaMMcYYY4wx9u3wIIAxxhhj3zt5nmO73VIETlVV2G63iOMYh8OBcu8NwwAAXFxcYJ5nDMNAEULTNGG/38N1XViWRTf/53mG67ooyxKGYdABv+/7dNitqiocx4EsyzQEUFWVCn5PpxMd+Pu+T0W5rutS3I84zBdDAVmWoaoqzs7OnsQG1XVN2wai1+DDzYFnz55BkiTM8/ykO6BtWywWC3pOiqJQfFCWZTBNE7ZtQ9d1GpioqoqiKKAoCmzbxjzP9HOIjoAPi4HzPKfhQBAEeHx8pNdcxA/d39/DsiwaHjDGGGOMMcYY+/Z4EMAYY4yx76U0TVHXNRXailv+Ir9/HEfIsozHx0domkaH27Iso+s6ZFkGTdMgyzIVBSdJgiiKkKYpdQRcXl6i6zraHBCFuW3bAgBl5XueB8MwkGUZDMOA67q0lSBihPq+x3K5BACUZYlxHDGOI/q+x2q1giRJmKYJaZpiGAa4rgtVVeH7PoD3t/13ux1tDYjHi6Jg8bWWyyUNBsZxRNd1yPMc4zjShoBhGOi67skQYb/fQ1EUdF2HaZrQ9z0NKsQQoes62LaNruvogH+aJkRRhMvLS7x9+xaGYeCbb76hEmFJkj7Om4QxxhhjjDHGPhM8CGCMMcbY99J2u8Wf/Mmf4M2bN3jx4gXKssR+v0cURTgej3Swfnl5iSRJaFgwjiPdbp+mCY7joGkaJEmCq6srPD4+wvM87HY73N7eUlGw2DKwLIsGB2maQlEURFFEBcWWZUGSJBomNE0DWZZh2/aTg3tZljGOIzzPo8N4cZtf13UA7zsLxCG92GYwDAPL5RJ1XQP47/6A/zlMEAMAsVUQBAEcx4HneWjblvoDRESQeM7idTk7OwPwfnghfmbx2oqNCbE1kaYpsizDPM/UDbBcLqHrOhzH4aJgxhhjjDHGGPs/xIMAxhhjjH0v/eY3v8Ff/dVfwfd9Kgk2DIO2AqqqouJb0zQxTROKooDneXj79i1WqxWqqsLxeIRt23Tz3zAMbLfbJ0XBpmnSYX5d11BVFcfjkXoGyrKkbH2Rk++6Lg6HA93c3+12cByHbtcPw4D1eo0kSSiTv2karNdrKiYWN/LFgEB0CszzDMuykCQJpmnCer2m+J4P+wFEPJDjOOj7HgAwDAOqqsJms6GfS/u/2buTHTnP63zgzzeP9U01NQfJGmzJsh3YsIPEXiRGYCBAcgdZ5F58P7mB7AJkkV2yjC1blkhJZHeN3zxP/0X7PaqSMij+W5RMnh9AiGRXd1c1a2G/5z3Po2kwTRNRFFFngCgzHseRno+u63j8+DEVE0/TRNFJVVVBURT6ryRJ+PWvf02lzYwxxhhjjDHG/nA8CGCMMcbYK6ksS9R1TWW2+/0em80GcRzDcRzKt9c0DW3bQpZlKIpCh/O73Q5BEKCua8iyjLqu6Ua/KPw1TRNxHFMXQF3XUBSFyoODIECe59B1HYZhQJIk9H1PQwJVVen5BUGA3W6HKIrgOA7d3HddF7vdDovFAqqq4nQ6XZUBj+NIcULTNNH2AHC/MSA2Cy4HBiIeSGw/DMNAGwRt20LTNFiWhSiKIEkSsiyj2KPj8QjTNGlYIQ75+75HURTQdR2LxQJZluF0OmG73UKWZdqAGIbhqj8hTVMeBDDGGGOMMcbY/yceBDDGGGPslXU+n/Gtb30LXdcBAFRVhWEY0DSN4n9ub2+x2Wwo934YBkiSBFVV6ea/uEUvyzIAoCgK2LaN0+kESZLg+z7yPIemaTidTnj8+DH6vkeWZfQ9xfbA6XSivgLRSRDHMQDg8ePHAEDdBm3bYpomPHr0CADokH+326HrOmy3W2iahnEcAdzH9IzjCF3XKXf/shtgGIarocEwDLQZIIqAxSbEdrul2COxkQAATdNQ9JH4XM/zrn6maZqirmvouo5PPvkEy+US6/Uafd/TwEQUJAPABx988FW/FRhjjDHGGGPspcaDAMYYY4y9sn77298iCAL84Ac/wDzPOJ/PiKIISZJgvV7j7u6Obqt7nodhGLDdbrHb7TDPM5bLJYqigOu6dHDtOA7atsV+v4eqqliv10jTFKZpUm9A3/dUHmzbNpqmga7rtCngui4URaEiXVmWIUkSHejLsozD4UBbAH3f08eHYYCu63j48CHdxh+GgW78b7dbSJJE5b8iPkjEA4mIIHGI37YtVqsVsizDZrPBOI60uVDXNUUoSZIEwzBgGAY9LxGtVNc1VqsV9vs9HfhnWUYH/1VVoSxLLJdLZFkGWZZxOp1Q1zVtLzDGGGOMMcYY+8PxIIAxxhhjr6z/+I//wF/+5V9iv9/jwYMH1BFgmibKsqRDalVVkWUZPM9D13VQVZU+LssyjscjVFWFLMt0kL3dbmGaJm5vb2EYBgDgtddeQ9M0SJIElmVR/r1lWYjjGPM8U2yOyNgXf6coCrIsQ9d1GMfxagsgTVPaari5uaGsfxFvlCQJ/b2I+xHlxNvtlv7+cjNAdAqI4YLYLGiahm7sh2GIrusQRRH2+z0Mw4CiKJTxv9/vEYYhJEnC6XSCoih49uwZVFWlm/+qqmKz2eB3v/sdTqcTTqcT9QdcbkMwxhhjjDHGGPvD8SCAMcYYY6+soihQVRW6rsPd3R2WyyXiOMZisbjqCKjrGoZhQNd1tG0LRVHw2muvYRgGNE1Dh+JVVaFpGmiaBkVRqFtgnmfqEVAUhQ75syyD4zg4n88A7jP7Rd7+fr/Ho0ePYBgGTqcTLMuCJEmQJAmbzQZN00CSJMrhVxQFwH2Zr8j1Fwf6hmFQ2a848B+GAaZp0kaA+Jyu67DZbABcxwOJ11VVFW05AIBlWZjnmX5OsiyjKAooioI8zwHcRxKJDYMwDFGWJQDQIOA3v/kNwjCkwYGu6/R67+7uXtwbgjHGGGOMMcZeUjwIYIwxxtgr7XA44Fvf+hYMw8A4jjAMA+fzGQ8ePMBut4MkSVgul+j7HmVZIssymKaJLMvgui7mecbDhw+x2+3gOA7F3SiKgufPn0OWZRiGAdM00TQN9vs9LMuiG/G73Q6yLCOKIsrb3+/3ePjwIRUVu66Lu7s7rNdreJ6HaZroIL7ve4oBEjFGwP1AYL1eU6zQ+XzGPM9X8UCSJCFJEhoM3Nzc0FBA/Oq6DmEY0qbAgwcP6OckioXF81EUBZqmYbPZQFEUeJ6Htm1R1zU8z6NByGazQVEUVNZsGAaKokCe53AchyKZiqLAhx9++HW+PRhjjDHGGGPspcCDAMYYY4y90na7HRXXivJfwzDQNA0sy0Lf98jzHEEQQJIkaJqGIAhQ1zWSJEEQBFAUBbIsU9a+iOsZhgFvvfUWqqqiuJ+HDx8iTVOkaQpN06jsV5T4AqCYorZtcTweAQCvv/46JElCVVV0ED+OIx48eICmaej7e55HRcDiUF+SJHieR9sJ0zRBkiTIsowgCK5+HqJwGABtRYg/iy2Duq5R1zVc10UURfRYcYv/cDjg4cOHFPMjOgls28Y8z3j69CkeP36MIAgo1kiUJouOgaZpUFXVC3kPMMYYY4wxxtjLTv66nwBjjDHG2NfpP//zPyFJEtq2xc3NDY7HI0zTBADYtg3P82AYBsqyhGmaWC6XdKjtOA50XadIHFVVYVkWgPsIHlE8bBgGpmmCYRg4Ho9YLBYYhgGSJOG1116jcmERIWSaJpXwjuOI119/nWKHJEmiryEO4S3LQlmWtLEgonhE+e9l9M80TQA+O/AXHwNA/xWDBPHfeZ7pYH6eZxRFgeVySVsFh8MBuq5T7BAAPH/+HE3T4JNPPkGe5yjLEre3t3jy5AnatqWP27YNXddxOBygqiqePn2KcRzx7Nkz1HX9gt4FjDHGGGOMMfZy440AxhhjjL3SDocDbm9v8f3vfx+apkFVVaiqCkVR6IA7CALKyk/TFMvlEkVR0M31NE1hGAZc10VVVdA0DQAQBAGqqkKSJDQo8H0fp9OJvlfTNABAvQKO49CBv2EYePPNN5HnOSzLwuFwgCzLePvtt6k3QJZl2lIQ8UBxHGOaJpimSVsHYgPgcgAgDvoBXP0euN8m6PueNiOmacJyucQwDLQlMAwDgPtBQVVV9PXFZoN4PmKwAgC+78PzPIpE2u/3kGUZ6/Wa+gnE5sKzZ8++0n97xhhjjDHGGHtV8EYAY4wxxl55cRzj008/pUP18/mMpmlgGAYURUFRFDBNE6qqIgxDpGkKz/PQ9z0sy8JyuaQSXNu2YZomoihCmqb05zAMcTqdkCQJVFWFbdsUpSPy9UWOvqqqcF0X5/MZfd/D8zzkeQ5N0xBFEcZxxHK5RF3XcByHbv2LG/lBEGC1WmGxWKDve7rpL1ze+Be/xJ/FFkJd16iqir7fZRmxLMuoqgp1XaPrOkRRhDAMKeJHlmWsVitomkaDAMMwIMsydF2nmCTLshBFEb2Gu7s76laQJAkfffTRi30jMMYYY4wxxthLigcBjDHGGHvliQP729tbhGEIXdfpdr6IrhEH3yL3fhgGFEWBYRgQxzGVBNd1jWEYqFcgz3O4roumabBer2EYBqIoguu66PsejuNAVVXaOnBdF7Zto+97bDYbOtxfLpfYbrfI85zifNbrNYD7aKC2bb8QBTRNE6qqQlEUV8OAyyigy0HA5TBB3M4XN//btqV+AN/3Mc8zlsslVPV+wfTy5r+maUiSBF3XQZIk3N3dYRxHaJqGuq7Rti2qqsL5fIbjOFgul4iiCEEQQFVV+j6MMcYYY4wxxv44OBqIMcYYY6+8w+FAN973+z0cx8HxeISiKHQLXxyYG4aBvu+RZRmWyyXatsVqtUKSJPB9H7Zto+s6uiHv+z76vofruvTYsiwhyzLKsoTjOFiv12jblg70AaCqKnieB9u20TQN5nmGpmlYr9f0ZxHvI8qA9/s9FosFDQIURcFyuYSiKLR1oOs6ZPmzuyDzPNMvADRIEL0GXddRP4E4tL+MB+q6DuM44nw+X30NMUzYbreY5xlpmkKWZfi+TyXAhmHg9vYWlmVRt8AHH3wATdNwOBxe2L8/Y4wxxhhjjL3seBDAGGOMsVfer371K/ziF7/Ad77znasYHFVVYRgGiqKAqqqUgx+GIcIwpGgecfDf9z1kWUbXdVAUhWJ+2raFpmk0JOi6DoZhIAgCNE2D4/FImweizBfA1U19kZsvSRId9I/jSAf7Imc/SZKrfgBZljFNE4IggCRJFDEkhgGXmwLiAL9tW/R9D9u2AQBRFFFnwOFwwGKxoA2BLMugaRo0TYOu6+i6DrZto21b2li4u7vD48ePacggvpdt23j48CHSNEXf99jv99A0DV3XIcuyr/4fnjHGGGOMMcZeERwNxBhjjLFXXlmWyLKMSnzrusajR4+gqip0XaeBQBAE8DwPRVGgLEtYlkUFwsMw0C36xWKBeZ4RhuFV3I44yFcUhQ7z9/s9LMuigt22bXE6na4Kdm3bpsdLkoSiKFAUBcULjeOIYRjowF/k7ovvKbYH5nmm5wbg6mOX5cBt22K5XF49DgC6rsM0TRQP1Pc9ttstDQp0XacuBcdxcD6fIUkSLMvC8XjEPM84nU4A7uOD0jRFURRYr9dwHAeO48A0TVRVhY8//vhFvgUYY4wxxhhj7KXGgwDGGGOMMQDH4xFN09DN9v1+D9M06VBelmUcj0ecz2e4rksDA8MwEIYhiqKgOJxxHGkAIAYCkiQhjmPaODgcDkjTFNvtloYN5/OZ4n/EzX8R/zPPM9q2RZ7nVD48zzNc16UegGEY6HsDoLidy1v/u93uKg7o8peIErq5uaHPG4YBx+ORXs96vaZeANM0acCgaRqapsE4jtSn0Pc9nj9/jizLqGdADEf6vkcYhlAUBXmeA7jfgGiahr4+Y4wxxhhjjLE/Dv5/WYwxxhhjAPI8R9d12O/32G636PseqqrSDfVpmqAoCgzDQJ7n8DwP0zShaRoURYHlcomu62CaJh16q6qKaZpwOBwQhiEMw4CiKBiGAbZt0yZA27aQZRmWZcEwDAD3BcBN01A8kK7rOJ/P2G63kCQJSZLAsiwoikKDhtPpBNd1oaoqTqcTfS1JkrDb7aDrOizLosGCLMtXUUOXAwNJkiDLMlRVxXK5pLghMTAQ/QHA/RBF13UqBN5sNhSvJIYjIhZJbETM8wxd16l4uaoqihgqiuJF/tMzxhhjjDHG2EuPBwGMMcYYY7gvCbZtG+M4QpZl6LqO0+mExWIBSZKg6zpl5rdti7ZtkWUZpmnCZrNBXdf0+dM0Uflv13XQNA2SJFGp7uX3EQW/4nC9rmvaEMjzHLZto6oqjONIcTyapiEIAlRVRYMCSZKwWq3Qti3SNIVlWdA0jQ7pRQeBqqr0OeLzRI+AJElUYHz5dUU/gXgt4tZ+EATY7Xa01SAeu9/vcXNzgziOsVwuqftADEZ2ux2CIECapsjznPoDxM/1V7/61Yt/AzDGGGOMMcbYS4yjgRhjjDHGAPzmN79B13XwPA+HwwGaptGNenEz/nQ6QVEUOlT3fR9RFCHPc8rk7/seaZpSdI8sy6jr+qorQFVVLBYLAMB2u4Usy2jbFq7rwvM8GiZ4noc8z+G6LsIwhGmaFPcjSRLFEAHA+XxGmqaQJAmmaSKOY6RpCtu2Yds2HMeBrus4Ho+I4xhxHNNGgLilrygKZFlGnuf099M04XQ6UTnxPM80TJjnmWKTpmnC8XhE3/cAgOfPn6NpGjx//hxPnz4FAAzDQKXGotdgvV4jjmM4joNvf/vbSNMUdV2/6H9+xhhjjDHGGHup8UYAY4wxxhhwlbEPgOJ8RP798XiEoihI0xSqqqLveyyXS9zd3WG9XlOcjW3bVJ47DAMkScJms6HYHMdxkKYpgiDAOI6QJAlpmlI0kGVZsG2b+gQsy8IwDFBVleJ2pmmCLMsIwxCffPIJfY5hGJBlGc+ePYPv+zBNk7YRRBGy4ziUwS/LMu7u7rDdbmloAQCKotDrkmUZURRRGfLlBsF+v4emaRiGAfM8I89zijcyDAPDMNCARAwPZFmmoYMoL5ZlmfoS1us1yrL8Gt4BjDHGGGOMMfby4kEAY4wxxtjvffTRR/j2t78NVVWRpikURaEDeJFxL7YERLa/4zhIkgSr1Yo2AqZpQlmWMAwDZVlSfn6WZdB1HWEYous6WJaF0+lE2wXiNv40TfB9n+J5mqYBcH/rX+T073Y7VFUFz/NoQ+GDDz6AZVlYLBZwXReKotDXF38GPisPLssSlmWhKAosFgvM80xFw+KxAHB3d4fVakUDibZt0fc9fN+nSKAkSbBYLKDrOvq+h67r0HUdiqJA0zQAgKZpmKYJ5/MZQRDAdV1kWYamabBarVCWJR48ePBi/rEZY4wxxhhj7BXC0UCMMcYYY79XFAU+/PBDAPclt8+fP0dVVTifzzAMA57nwTRNKvk9Ho+wbRue56FpGsRxjL7vkSQJwjCEZVlYLpcYhgFJkiAIAgRBgLZtoWkakiSB7/sIwxB1XVM8kO/7OJ/PtDEg+gRWqxVFFPV9jyiK4LoukiTBp59+iiiKEEURPM/D+XxGlmVwHAeO41BRsSzL9EuUIV8OCERckGVZAO63D0TGf13XmOcZURQBuD/Yn+eZeggcxwEA2gaQJAnDMGC329EGgaqqePDgAc7nM20yrFYrNE2Dtm3x3nvv4e///u+/hn99xhhjjDHGGHt58SCAMcYYY+z3Pv30U2iahg8//BCr1QqbzYZy8MWt98PhgLIsqUfgfD6jKApomkY5/mEYou97FEWBrutoCyCOY3RdB9M0kaYpHfiLzYI0TdF1HWXni5x+cTj/61//GmVZ0o3/oiion8D3fSwWC6RpSgMA13Upw198DRHNI/48zzOVBF+WCLdtC+C+iyAIAkiSRM9pt9vRYf8wDACAruvQdR3quqaMf1mWoWkaHj58CF3X8ejRI8RxDFVV4XkekiSh59i2LZbLJVRVxdtvv/3i//EZY4wxxhhj7CXGgwDGGGOMsd/7+OOPkSQJlsslAFCJ7+eLgjVNw2KxgO/70DQNmqahLEu0bYskSZAkCR30K4qCpmmg6zqCIECe58jznD53tVrRgf56vYZpmgBABb3iMP23v/0twjCkG/6iYNd1XYr9SZKEbtmL4cXl4b4YCFwSwwHxewAUESQihPb7PeZ5pk2EaZrQNA1tMYhNh2masFqtoCgKxQKdTifqQYjjGIvFAk3TUJfCOI5o2xar1Yp+xu+++y7+8R//8Sv/92aMMcYYY4yxVwUPAhhjjDHGfu94PGIcR4zjiE8++YQOtMWBuiRJVMiraRpOpxMWiwUdvKuqisViQZE9ZVnCNE268Z7nOUXoiJz9PM8RRRF838cwDNjv9xTNo6oq3n//fWRZhjfeeANBECDLMnz88cfwPI8if06nE6ZpgmVZV1E/lzf+L4cCl9sBInpIbAAIhmFgnmcMwwBN01DXNYZhoEN7sfUgfh6i7DiOY3oOsixD13XUdQ1VVeG6LnRdR5ZlME2TXj8A2oLQdR2e5+Gtt97C3/7t377YNwBjjDHGGGOMvaSuBgGiLI4xxhhj7FUlDrLX6zVub2+haRr2+z0damuadvXfJEmQZRkMw0BRFDBNE1mWwbZt2LaNvu9xPB6RJAltElx2BARBgKIoME0THayLQ/s4jhGGIVzXhWEYV1E6i8UCh8OBNgNs28Y4jlev5fK2v7jdP8/zF34vSRJtQQD3vQDDMGCaJrRtC9/3Mc8zlsslNE2j52dZFm0PnE4njONIJcGyLON0OsFxHGiaRsOBJEngeR76vodlWZimCU+ePMF+v8ft7S2yLKO+gx/84Af4u7/7uxf0L88YY4wxxhhjLy/1MoO1KAqkafo1Ph3GGGOMsa/X8XhEnucIw5Cy8SVJgqIoMAwDx+MRruuirmu4rkuZ/1VVwXVd9H0P13WRZRlkWYbv+4iiCFmWwfd9tG0L27ZxPB4RRRHSNEUYhhiGAYqiwPd9PH/+HMMwIIoizPOMLMtwd3eHKIpgWRbF6yiKAsuy6HA+CALK5xfbAJe/vxwAiO0A8d/z+UzxPk3TUHyP2C7QdR3A/ZBgHEckSUI/GzE4OZ1OsCyLBhCLxQKKouB8PsPzPLRti7quMY4jiqKAYRiwbRuvv/46FEVBVVUYxxFvvvkm7u7u8Pbbb8O2bbiui3/6p396oe8DxhhjjDHGGHuZKHEc/zKOY8RxjLIsqfCNMcYYY+xVJHL733jjDczzjLqusdlsMI4jXNfFOI60DaCq6hf+ezqd4Ps+bQ2IA3XDMFBVFWzbRtM08H0fcRwjiiKcTifK9P/ggw8QhiEMw4BlWbi9vYXv+1iv11AUheKLVFWF7/uoqgqqqtKhvIjhAfCFSCDhchAgNhHEDf1xHNF1HVarFW0GSJJEg4e6rmGaJjzPQ1mW1E8gyzJUVaVeAPH6syyD53mI45g6Ftq2pTiktm3x/vvvo65rdF2HcRxxOp0AgIYKwzDgwYMHeP/991/sm4ExxhhjjDHG/oT95Cc/wb//+78DABQAv/x6nw5jjDHG2DdHVVX48Y9/TIfWsiwjyzI4joPz+QzXdTHPM1RVxfl8xjAMqOsakiQhz3O4rouqqjBNE9I0RRAEGIYBRVHA8zxUVQXHcdD3Paqqomx+y7Kw2+1gWRZc16WC3nmeabDQ9z3CMAQAKIqCeZ5hWRaGYaC8fxH1qKoqZFmmLYDPlwQDQN/36LqOcv/necY4jpjnmYYYTdPAMAzkeQ7LsuB5HpUYi5+Dpmlo25YKgjVNw/l8hizLsG2btiFM06SS4cPhgGEYUJYlNpsNpmmCLMuIoghhGGKeZ6xWK8iyDMdx8NprryGKIux2O1RV9eLeEIwxxhhjjDH2J+pyEMBlwYwxxhhjF8qyRFVVdPNdVVX6JW7tK4qCw+EATdPosFsUBWuaBtd1YVkWfN9H0zRIkgS+79OgoOs6DMMAwzAAAF3XUTeBJEk4n8/YbDawLAsPHz4EABwOB3qOdV1jmibK5xfDADE4CIIA4zii73v6HPH4yz9LkoQoirDdbmlQIIYD4nktl0tIkoTtdgsANKA4nU60CXC5ldB1HUUcWZaFuq5RliWyLMOzZ8/Q9z1kWcZ2u0Xf99hsNojjGJqmoe97nM9n6lx48uQJDMOgbYrvf//7+MUvfoHXX3/9q30TMMYYY4wxxthLhjcCGGOMMcY+53vf+x4ePXqErutgGAZkWUaSJHAcBwDoBrwYEBiGAUVRoCgK4jhG3/eo6xq+70NRFOi6jizLqAtAROAMw4Db2xfD8CoAACAASURBVFtEUQRN0yiGR9d1+l6KomAYBnieR7fmHcdBWZZQFIXy+OM4hq7rFAVkGAbiOL4qHwZAhcJiKCD6BaZpwjRNKMsSy+WShgq6rmMcR+R5DlVV0fc9HMehzQDRIaBpGrquoxLkPM+vtiratsVyuaTy4GfPnsE0TYpHAoDlcomu6xAEAWRZpm0K8XPYbrewLAtvvvkmxnHEs2fPXtybgjHGGGOMMcb+xPBGAGOMMcbY/+Du7o6y6fM8hyzL0HUd0zQBuI/ZEYfwmqbhcDigqiqcTifYto3FYgHXddG2LcqyRBzHCIIAfd+jaRoq5L27u8Pjx4+h6zq6roMsyzBNk27ri9ggcfNfbBLM84wwDK8GCtvtlh4v3NzcoCgK+r7jOFLZr/h6d3d3V50B4vWJ7ys+NgwDdF3HZrOhx8qyjDRNoSgKmqaBpmkoigJBEGCxWMCyLBocBEGADz/8ELvdDnd3d1iv15jnGZvNBqqqIgxDPHv2DJIkIcsyjOOIpmkwDAPiOEYYhri9vaVByLvvvotf/OIX2Gw2L+ItwRhjjDHGGGN/0ngjgDHGGGPsc6qqwk9/+lP4vg9ZltF1HUXfaJpGsTiKolDBrq7rsCwLhmEgTVN4nkf5+YZhYBgGtG2LaZrQti0d5ouS4WmaYJom3ZwXB+G6riNJEioPbtuWDuJF5r648e84Dg0DLv8uTVOsVivUdY15nmkroes6ivwRwwaxBQDcRxDZtg0A8H0fAJCmKcUbiWigPM+pBFl0FIgehDRNaSPAcRwaEEzThLquryKGLMtC13WY5xnr9Rpt26JpGooPkmUZYRhCVVU4joPXX3+dSouTJHnRbxPGGGOMMcYY+0bjjQDGGGOMsf/B3d0dyrJE3/cUpSNKdIHPYnV0XcfhcIBhGDgejzifzzifz7BtG0VR0JaAOCDv+x5BEGCeZ5imSaW74rZ90zRYrVaYpgnDMCCKIgzDgPV6jWEYME0TqqpCnuf0Z9d1r/oCxEG6+DMAOuz3PI8O10U3wGVvgCgOFoMGEYt0efN/sVjQEKQoChqGHA4H6LqOoiioIDhNU/i+D0mS4HkeXNfFbrfDkydP6PtsNhsaZriuizAMsVqtaHCwXC4pHsg0TeR5jrZtoWkaxnHEW2+9hb/5m7/BX//1X7+YNwdjjDHGGGOM/QlSv+4nwBhjjDH2TfTRRx/BMAy8/vrrKMsShmFgnmfc3d1B0zR6nKZpUFUVsixD0zTYtk1Z/cD94TUADMOAIAhwe3tLQ4H1eo0sy6CqKoqiwGazoege8attWyiKgjRNEYYhoiiCLMuo6xoAKN9/HEfK1U/TFKZpQlEUGghIkoR5nimPX7gcbiiKgpubG+oqEDf1xS8Rh3Q6nRAEAR3MO44D3/dxPB6xXC5R1zUWiwXGcYRhGFQS7HkexSa5rotxHFEUBeZ5RpZlcBwHTdPQpoMoU67rGpvNBnVdo+972LaNYRhgmiY8z0MQBFAUBZvNBv/yL/+C0+n0lb8/GGOMMcYYY+xPCUcDMcYYY4z9FyzLws3NDaIoQtM0UFUV8zyjqqqrguB5nqGqKtq2hWmaWCwWOJ/PkGUZRVFAkiTqGRC39cV/+74HAIq8EYf/XdfBsiwMw0CH457nXRUCG4Zx1SvQ9z0V94qIHXH4D3yW9y9+Lz72+b8XsUDiMZqmUUmxKBYWcUZieFAUBSzLgq7r0HUdsiwjyzLqR/A8D7Zt0zBA13X0fY+yLDGOIxzHQV3XcBwHpmkiSRL0fY8wDJEkCW0FiJ/X4XDAPM8YxxFlWdIQBQBWqxVUVcVut3txbxbGGGOMMcYY+wa6jAbiQQBjjDHG2H8hjmP82Z/9Geq6plv0ItseAB14e55Hh9SWZdEGgKZpME0TmqbBsizkeQ5FUeD7PhzHobibKIpwPp9hmib1CIiPi20A0Q2wWCxQVRUd+ItBgDh41zSN/nw+nzFNEzRNo4N+sT0A4CoSSPy+6zrYtn0VLXQ+n+E4DqZpoi4AWZZRVRVtRqiqijiOsVgs0DQNiqKA53mU++84DhRFQZIkmKYJfd9jnmc4jgNd11FVFfUlFEVBWwPi+Yi+BPHaxGaBpmlQFAWqqmK1WsFxHDx+/BhBEMAwDFRVhaqqvuq3CmOMMcYYY4x9I3FHAGOMMcbY/6IsS7r9XxTFF+J6xnGEJEm4u7uDLMtQVZVus5umiTRN0XUd0jS9ivgRRcFd11EHQBiG6PseVVVdDQFEAe8wDNQL4Hke6rrGNE1UyDuOI6IoQt/3mKYJ0zRhu91iuVyiLEs6fBc9B+Ixn98WEAMGUTQsyzINPMShfhzHtJFQ1zVtCgRBgOPxCMMw4LouyrKkx7dtSwf88zzThsAwDEiSBJ7noSgKPH36FK7roq5rZFkG4D5Sqaoq2hgQ8UxPnz7F6XSiLQHxfcqyhOu6+NGPfoSf/exntCnAGGOMMcYYY68y3ghgjDHGGPtv3NzcYLPZYL1eU9zOZeGvbduYpolu0Ytb8efzmf67Xq9R1zWCIEBZlpAkiYYGAOhQX3QL9H2Puq7pQNx1XbRtC8/z6Na/2BBQFAVxHMM0TciyDNu2UVUVZPn+rockSXAcB3meo+s6yvgXsT/icWIQIA74P182LCKBgPsYI/E6JUlCWZZ0SH8ZDaSqKvI8p6/nui40TYOmaUjTFG3bwnVdqKqKruvgui4sywJwv21h2zY9NggCNE2Dqqroefu+jyAIKKYIuN+uED/Dt956C4Zh4J133oFlWXjy5MkLfe8wxhhjjDHG2NeNo4EYY4wxxr6Eu7s7/PznP8c8zwiCAKqqoq5rKIpCUUCSJCFJEiwWC0zTBNd1KbcfABRFQdM00HUdWZZR9n/TNOi6jopuh2FAHMeUo19VFTzPQ9d1yLIMuq7DcRw60DdNE13Xwfd9FEUBVVUhSRINLIDPcv4dx7mK4RmGAcMwQJZl2goYxxF93yNNU7p5L4jeAAC0ASCGHaqqQlVVJElCQ4uqquh1iDLlruuQ5zltJvi+j2maYFkWVFVFmqZwXRdpmlK58TAMsCwL8zxTvJJ4Lc+fP8c0TVQMPM8zyrKkbQOxaSCKj29ubuj1McYYY4wxxtirgAcBjDHGGGNfQtd1eOedd7Beryl/P0kSGIaBIAiw3+/hui4kSYJpmjQUENE+bdvSgbi4AZ/nOaIoohJckX9v2zYsy4KmacjznA6xkyTBarVCmqYwDAOGYaDve0iShPP5DMMw6Na/GAacz2eKH/p8R0CSJNhsNrBtG3Vd02sVz/fBgwcAPtsSEAfsYrAh6Lp+9XdiyOA4DhUpF0VBQ4FhGOC6LvUmiK2Cuq5R1zU8z0OSJAiCgCKWRElz3/foug7jOGK/32McR4RhiGEYsFwuaaigKAo8zwNwP4AJw5Ce47vvvgsAcF0Xz58//+reNIwxxhhjjDH2DcGDAMYYY4yxL+nRo0d4+PAhAMD3ffR9D1mW6VD+eDzC8zzkeU4Z+FEUUZyO7/soy5IKhW3bBnB/cN40DZUAt21LB//ikLssy6uhQVmWdLtelObmeQ5N02DbNrquwzRNdCvecRwqyxXRPq7r0mszDANZlkGWZUiShNVqRY+7jAsSkUGCGCxcDghE5JHYbOi6DmEYQlVVGIYBVVXRti3iOIbnedQxIEp/i6KA7/tX2wG2bdPnd11HxcO+72McR9R1TaXIYlNBdDaM40ivTdM0VFWF7XaLxWIBx3FwPB6pX4AxxhhjjDHGXkY8CGCMMcYY+5KeP3+On/3sZ3AcB0VRUHZ+VVVYLBY4nU7wfR+SJNEt9yzLIEkSqqqCrusoy5IOoEVEkO/70DQNuq6jbVtkWYbFYgHTNKn0VgwEfN/HMAxwHIe2B9q2hSzLVMyraRoMw0Acx9B1nQ70DcNAkiTUDwB8NhQAcNVN8Plb/2IYAABVVX3h47Iso2kaGhLs93s62Nc0jQ76FUWhwmERleQ4Dg0HxCZAkiTUdaBpGv0cRTRQWZZ4/vw55nlGURQwTRNRFFGXgtgIME0TqqpClmV0XYfFYoGu63BzcwNN0/Do0SO4rotf//rXX9n7hjHGGGOMMca+bjwIYIwxxhj7krquw3vvvYcwDFEUBRRFoQJg13Upakb0BYji3uPxiCAIkGUZwjBEmqYU06PrOqZpQtd1FJWj6zpl6Its/WEYqB9ARBJZlkXDgK7rIEkSbNtGWZY0GIjjmG75i44A0zTRNA2A60GAkGUZHMcB8NmNfzH0mOcZsiwjz3NYlnVVMizLMrIsQ13X1GMghg6qqiLLMnRdB9u2IUkSFEWBoihIkoQGJGI4cDkIEVFKcRxDVVXs93uYpkkdCJvNhoYEwzBgHMern9nl68nzHL7vU6+BGLp873vfQ1VV1DPAGGOMMcYYYy8THgQwxhhjjP0faJqG9957D9M0YZomGIaBxWIBSZKoGFgU3IrD7bqu4fs+9Qp4nkeROb7vUw9AXdcUYxOG4Rdu0pumiSzLEEUR2ralvxMH3gDo79q2hSRJWCwWSJIE0zRBVVV6HWmaYhgG+jtxkD7PM5Uciz9P0wQA9F8AdLAvDtjFLX9RCCzLMhXyiggiXdep10CUDouoIfG8xeeKuCRN02BZFhRFgaZpME0ThmHQdkRVVTgcDhQ5JAYi4mvZtk0/v3EcMc8zFosFAMBxHNqi2G63UFUVjuPg008//YrfRYwxxhhjjDH2YvEggDHGGGPs/+CTTz7BX/zFX+Dm5gYAKPpmHEd8/PHHsG0bwzAgCAIMw4A0TXFzc0NRQdM0wXVd+m9VVbAsC3Vdw3Ec3N3dYblcoigKNE0Dz/PQdR26rqMBQVVVtAWgKArSNEUQBEjT9Gogkec5FEWhKKK6rilD33Vd6g0QGwPiVj9wPRgQmwAArsqGRWSPGISI7y36BMRgoO97GIYBWZZp8FBVFZqmoa0JEQ0kBgTi92IY0rYtTNPEOI6I4xjjONLrFl0B0zTR4CWKIjRNQyXJ4zjSgEYMMMQGwHa7xeFwgOM4ePfddyFJEp4+ffqi31qMMcYYY4wx9pW5GgS8/fbbv4yiCFEUQdd1Wq1mjDHGGGOfefz4MbbbLeXzh2FIkTfi0DzLMrrp3nUd5fOLbYDFYoHz+YwwDLHf7xFFEfq+pz6BIAigaRrl5odhCF3XaWBQ1zVs26YYHTEkEPn5AK7idBRFgWVZOB6P1BsgbuOLKCFx+A/gagtgnme6TX85EBAH+OL1ia95+Uvc8M+yjEqURT+A2CrI85w2Hy77B1RVpZihsizpYB8APM+jngQxBBC9DWKrQtM0+vmrqgpN0zBNE8ZxpJLh9XqNcRxhGAa22y2yLMPNzQ3W6zXef//9F/3WYowxxhhjjLGvxNUgII7jX8ZxjDiOMU0TfN+n/yPJGGOMMcbu7fd7/OQnP4HrurAsC6fTiXoDxnHEarVC0zRYLpfo+x6HwwFRFAG4L+QFQBFCbdtC0zQAuDrw77oOlmVRvE2SJGjbFp7noSxLGkJkWUYxOeLGv3hOIrYoyzJM0wRFUa6GA6IfQEQJTdOEvu8xDANkWaYhQN/3AHB1wC/+LAYIRVHQLX/Lsr7weBEZFMcxPM+jgYLYADAMA6Zp0oaDZVlo2xaqqtIgo21b+L4PwzAwjiPyPKeIpnEc6evato2qqrBcLtE0DcZxRF3XAD4bbIhLL4Zh0L9BURRYrVbI8xzb7RZvvPEGfve739HrZ4wxxhhjjLE/Vf9tNNAwDLTGzhhjjDHGPlOWJb71rW9RPJBlWTgcDnQo7bouDMPA+XzGMAwIwxCn0wm+7yOOY7iuiyRJ4LoulQOLOB+R3e+6Lh3Kx3GM5XKJYRhoGFDXNeI4RhRFFHNjmibiOL4aAKiqCtM0YZom3aa3bZsKi8Whvq7rSNMUy+USlmWhqioaVIhfl4+/LBkWN/8vy3+LooDrul/YENB1HXEcU8mx+DtZlul7iMN/UfortgI0TUOapmjbFsMwYJomeJ5HPxMxBEnTlG78i4HCOI4IwxCmaaKua9pgSNMU8zzT4xVFge/7OB6PcF0X77zzDtI0xfl8fsHvMsYYY4wxxhj74/kfOwKiKOJBAGOMMcbYf8HzPARBgDAMqQTY930sFgvsdju6uS+Ka4uigOd5kCSJbszruo48z2FZFvb7Paqqwmq1gqZpqKoKwzDAsiwYhoE8z+H7PhRFoUN0cXgvhggiUkhE6biuizRNoaoq5nnG6XSiWCCxUSCiIMXfAfexPyLKqOs6bLdbOI6DsiypY0AQB/yiFwAADQVs276KE7qMC1JVlfoRdrsdFfiKgUaWZVeH9nmeo6oqKvs1DIMKhauqQt/3KMuSth7SNIWiKPTzz7KMehXEz6DveyoXFlsGdV1jnmd4nkebHQ8ePEDbttjv9y/yLcYYY4wxxhhjfzQ8CGCMMcYY+wM8ffoUP/rRjwAA6/Uauq5T7IxhGDgej9hsNhiGAVEUQdM0nM9n+L6PNE3hOA7iOIZlWRQDNM8zhmGA4zhUQizif0T5rSjX7fsecRwjDENkWUaROG3bYp5npGlKWfhVVQEA9ROIKCIxSAjDEG3bXt3yBwDHceiAHrjffEiS5CpWSAwALgcCsizTDX/DMADgapNAxAkpioKyLOmmvygd1nUd4zhC13XM80xbApqmwTAMZFlGnQJi6ND3PcIwpIGF4zhQVZUGCKJ0ua5r+L4PWZaRpilFNonhhvg5qqqKJEmwWCzQ9z3W6zUURcHz58+/qrcUY4wxxhhjjH1lLgcBKgC89957+NWvfvW1PinGGGOMsT8Fz58/h23bUBQFRVFQJI/IuBeZ+7vdDq7r0qG5aZrY7/cIggAAqFBXFOkWRUEFu+v1Gqqq0g1/0d9kmibCMESaprQFIOKCDMPAcrmkW/eLxYI2CVarFdI0BQBomoYoiqhoV9d1KIoC0zSvugCEeZ6x3W4pUkfE8ziO84XIIPH5l9sDYjNAfFyWZdoaAEAlwsfjEUEQ0LCgrmtYlgVFUbDb7eimfl3XcF0XiqJA13Xoun4VSSQGLuI5pGmK9XqNrutQVRXCMKS4JTFgEYMHsalh2zaGYcDjx4/pY//2b//2VbydGGOMMcYYY+yFUH78kx//UtM03N7eAuCNAMYYY4yx/8nHH3+Md955B13X4dGjRyiKAmma0mH7fr/Hw4cPUdc1drsdHj9+jPP5DNd1MU0TTNMEcH8z/3g80uG2GBAYhkGZ+CJqSBxsd12HLMsQBAGapqEBg6qqcF0XeZ5D0zSYpommaSDLMv3vOsdxYFkWdF2nTQBRfCyKdgFgHEeK9vn8Qf/5fL6KDAKub9ULdV3DNE36GuIxn48XEq93nmcoigJVVXE6neA4DgBQEbFpmsjzHGEY0kZBlmX0mh3HuSpNFo8pyxKmaWIcRzRNQ58vtgKmaUJd11gul/QYsX0gBiXb7RZhGMIwDDx58uQreEcxxhhjjDHG2FfjCxsBjDHGGGPsyynLEl3XQZZl/OY3v8Ebb7wBSZIoDkgcIIdhSLn0sixjv98jiiKcTidst1vs93s4joOu66BpGsqyxGKxwDzPUFUVdV1ThE/f91RCvFqtUJblVUGuaZpo2xa+79MBuTjcj6II+/0ewzDQJoNt2zifz/A8D/M8Y5om2LaN0+l0ddtfxPmIX9vtFtM0QZIkKtcVB+/A/TBAlB2Lz7+MERLE0EDk+Yvvo6oqdSKIzYDLjYrD4QBZlqmjIU1TeJ5H5cin0wk3NzfI85zKksX3M00TwzBQZJAYAoRhiGma0HUdlssluq5D27YUnfTkyROsVit85zvfgWma+Od//ucX/6ZjjDHGGGOMsf9PyoOHD34JgDcCGGOMMca+JM/z6Ga8YRiQJAlN06BpGnieR7EzwzAgTVNsNhv0fQ9V/ewOhij7FfE0ADBNE5X1+r5Ph/SmaULTNIzjiL7vYds2siyjIUDTNMiyDJqmwbIstG0LAIjjGF3XIQgCWJZFxbuyLMNxHCodFgf3tm3DdV3EcUzFuuIw/3JDQHBdF6Zpous6DMOAruvQ9z1FCV1uBFyWA18SnQliACIO6MXPVZZlHI9HeJ5HvQF939MAQgxRLMuCpmno+x66rkOWZVRVhbqu4TgOiqJA0zQIggCqql51BXRdh3EckWUZlSwXRUF9C7ZtY5omPHr0CJqm4aOPPvqq3lqMMcYYY4wx9kdzuREg/y+PZYwxxhhjn/Ov//qvaNuWYmZc1736eN/3+PjjjyFJEsZxxN3dHR3Ai9Jdx3FwOp2Q5zkMw4CmaVAUhW64Z1kGx3Eo5/98PlMvgCi0NQwDbduiKAo66B/HEaZp0kF3GIao65qy9R3HwTAMGIYBYRiiLEvkeU6H+eM40taB53no+54GEOLjl/FB4tC8bVusVivc3NwAuB9qXJIkCXmeX20YiC0A4H4wcj6f6WdUliVOpxOSJKENBjE4EWXB4kDfMAz6vDRNaaNCRCYNwwBd17FYLKgfwPM8el0Arr6mYRjQdR2+71P80Gq1wjAM+PM//3P8wz/8wwt4lzHGGGOMMcbYHw9vBDDGGGOM/QG++93vYrFYwHVd6LpO8TxBENCAII5jvPbaa3S4Po4jFosFpmnC6XSCaZpQFAVd18F1Xez3+6ss+7ZtKc7HsizKwxeFu23bUnyQ+PuiKADcdwKcz2eoqgrbtpEkCR12n89nzPNMcUKu69LNenHrX+T0izLerusoAugy8ge43wwQw5DLrgDxe0mS4HkehmGgvgTxcXGAf7ntILYDxKG8qqrQdZ2GKOK5i+GJeO5iC0N8vqIoUBQFcRzD8zwoioI8z1GWJVRVRdu2aJoGuq5jHEdM04Qsy2BZFsZxRFEUmKYJuq5TEbSIR3rzzTdxd3dH3QqMMcYYY4wx9k1zuRGg3N7e/jIIAhyPRwA8CGCMMcYY+zJ838ebb74J13XRdR1M00SSJBRrI8p5Lcui4loRx2OaJvq+h6ZpkCQJhmFQV8A0TRjHEa7rQlEU9H2POI5R1zV0XUfTNHAcB23bIkkSRFGEPM8hyzIMwwAAigtyXZeKb0WMUF3X8DwPlmXRny/LgMXzErFEIjLo81FCl8W/wueHAOLrig4AUfwrXvc8z+i6DoZhYBxHVFUF13Xp81RVpc+VZZkO7cXnihifPM9h2zY9XgwBqqpCVVUwTRPTNNFgRTw2TVNIkoS6rjGOI5bL5dX3EN0JmqZRWfFut0MQBHBdF77v8zCAMcYYY4wx9o11NQgA8EsxBAB4EMAYY4wx9mU8ffoUP/3pTxEEAbquo7LZsiwRxzHd/M/zHA8ePEBZlnQTXmThA6Dce1mWoWkaHea3bYthGK76AcSt9rquYVkWDMNAlmVYLBYoioJKccWmwDAM8DwPtm2j6zrM80x/Lw77dV1HmqZQFAWO41CevrhZLw78JUmCZVnIsuwLfQfitYgD+suBgChK7rru6nWKiKFhGGAYBqZpwjzPsCyLvtZ+v8disbgqLW7blgYAbdtC13X6OVZVBcMwoCgKbUCI2CVVVSleSQxmxECkLEusVisqShavSWwoiA0CwzBgmiZtDMiyjO9+97s4n8/8v58ZY4wxxhhj3zhfGARcfpAHAYwxxhhjX84Pf/hDbLdbKrbt+x6LxQJZliFJEjiOg2ma6KD5dDphsVhQGa/jOFcH8sfjkb6GJEnQdZ0y/D3Poxv18zxjGAYkSQLP85DnOSzLguM4aJoGSZIgCAI4joOyLKmYOI5jus1eliW6rqPDb7FtMAwDbTOITQWxGQCAtgMMw0Bd12jblg7nAdBjxeH9+XyGrut02N/3PdI0heM4NDDQdR3TNFHMz+FwoJv5bdtSmTBwv2lwuVFwOp0oBkkc+p9Op6sBQJZlKMsSi8UCfd9T14CmaYjjGL7vA7jvdmiaBqqqomkaRFGEcRxpW0MUDou4oGEY4Ps+Hj58iHEcKWqTMcYYY4wxxr4JuCyYMcYYY+yPYLfbYRgGulU+zzMURcHNzQ0ePXqEYRiwXq/pQBoA7u7u6JZ9kiTY7/fQNA37/R66rkNRFMq7v7u7g2VZlG1vmibKsqQyW9d1KWroMi4oDEO6ub9YLJDnOdq2xXK5RJqmqKqKhgS2bVN5cFVVKMsSVVXRgb54fJ7ndPN/miZM04SiKBAEAUXqDMMA4LobAAB1AIjnIAqFRfzQ5WMvhxqiFwC43yyQZZk2IsTvRXnwZYeAuLWvaRr9fHzfh67ryPMcSZKg6zrUdY0wDGEYBm1UqKqKqqqwXC4BgDY9xPdZrVZwHAfH4xFBEKAoCjiOg5///Od4++23v/L3HGOMMcYYY4z9IXgjgDHGGGPsD+T7Pn74wx9ClmU4joO6rnE+n+H7PuI4pqgdERG02WxQFAXdkPc8j262A/elu9M00U38YRhQliWWyyXmeabc/77vkSQJHeR7noe2bRHHMZbLJeI4RhRFdLi9WCzoa9q2jTiOqTdAkiTEcUzRQ47jUHfANE0ULWTbNmXpd10HSZLgui49d1FeLCJ8ANDWg3h8FEVX0UFN06BpGti2TdFAkiRBURTaKABAvQvCZT+A2BAA7gcOp9OJCpwPhwPF+ciyTP82lmXRz6ptW9R1jcViQcXIURRRDNEwDMjznOKM8jxH13VwHAe+76Ouazx+/BhZluGtt95CVVXY7/cv7D3IGGOMMcYYY/8d3ghgjDHGGPsj+OCDD/D+++/jd7/7HRzHQRRFAIDb21usViuYpondbkf58qKAVhT7Hg4HKrZ1XRd3d3ewbZtuvwdBAMMw6Ja8iO85nU7wPA+maWKxWKBtW5imiSiKcDqdaAhgWRaVGfd9jyzLUBQFlsslFosFmqahG+8iO1/c9nccB1mWUY7/MAwUKRSGIW0SiMN4kdkvDvQB0O/FgOBwONAWgPiczWaDcRyvtgguBwmKolz1EQhFUVwNDcQQQdd1GiIYhgHLsiDLMkUAiS2BIAgoW4BkXQAAIABJREFUJuiykNnzPMzzjLquAdwPF0S3gYgbAu4LmZ89e4btdos0TbFaraAoCn7+85/jr/7qr77y9x5jjDHGGGOM/V/8P/berEey/KriXWeeh4gTU2ZlZg3dXe0HLDMYbJABCVmyhJjkBz/xAHyS+2FAMggJIR5A8ABClgXGGEwb2jYuytVVnZkxxxnizMN9yPvfzmzgonJ3V9vt/ZNSHRlD5okTp6TsvfZai4UAhmEYhmGYH5L1eo3D4YC+7/HixQsAwHQ6RZIkSJIEaZrCcRxcXl7CMAzsdjvcu3ePhtsi9mez2WCz2cCyLGRZBlVVKevedV2KBhJFuLquUyGx2JYXmfrj8ZhihMQ2vmmaSNMUo9EIYRiiLEtUVQXbtpEkCQ36gyDA8XikmJ/ZbIYkSVBVFbIsw/F4xHQ6Rdd11H0ghvp939PG/20xAAAmkwmapqHH+r5H27bQdZ36BARCBBC3xVeapnSfEErE7evrayo2Fj9ztVrBcRyoqorNZgPP86AoCvb7PXzfh2EYiOMYx+MRYRhSUXHTNNR94HkehmFA0zTQNI2OfTQaQVVVeJ5HXQeLxQKz2QxnZ2f45Cc/ic985jOv6CpkGIZhGIZhGIb5v2EhgGEYhmEY5n0gInXKssSLFy+gqiru3bsHx3GwWq1gGAYN8A3DwHK5pEJc0zSxWq0gSRJUVaUIm+PxSJ0Bh8OBCnqzLMNms0EQBDAMg7oE8jyHaZqo6xpJktCAuqoqaJqGJEng+z7iOEbTNPSz6rrGZDJBmqYoy5I24l3XRdu2qOsa4/GYhuXCXSCEgMlkcsdFILb/39sRMAwDoijCbDajKKS+7+889zbvdQWIPoDbwkDXdVitVpBlGZqmQdd1AD/oEhAigCzLCIIAqqqSeKJpGhUtO45DZcxd15EDI4oivHjxAp7nYTaboW1bNE2D2WyGvu+x3+9hGAaePXuGMAyx2+0wHo+h6zpOT0/xhS98Ab/6q7/6iq5ChmEYhmEYhmGY/39YCGAYhmEYhnkfiEx40zTR9z3dzrIM9+/fx2azgeM42O120DSNSmdFVJAYcuu6TlvtmqYhDEOKpTkejxRNc3ugfbtEuCgKxHGMIAhoyC+ihHzfx+FwoMeKokAYhjBNE2VZ3im9resafd/TtnySJIiiiIb/Ymu+6zp0XYcoiqjwdxgGike6HRk0DAPF/4gNeyE8iNcCuDP8/5/EgO12SxFAmqZhPp9DkiRYlkUugL7vsV6vSVzZbDbUHeD7PjRNuyMIJEkC13VhGAbyPMfhcEAURcjzHJPJhKKRiqKg75umwenpKXRdp2LjrusgyzIsy6Jz+Iu/+Iv44he/+IqvSIZhGIZhGIZhmP8OCwEMwzAMwzDvg29/+9sYhgGGYcDzPMRxjP1+TwNiXddpM3+5XELXdWw2G8qal2X5jhtA0zTIsozNZkMDaiEG6LqO0WhEW+u2bcOyLByPR+z3e2iahjRN4bou0jRFURQwTRNVVVFvgGmalO8vinDTNMVkMkHf9yRijMdjBEFAUUIiTqjrOti2TcKAKEK+HQeU5zk9fnvjX4gHwzBgNBphOp2SOPC/Df9vRwUJV4BARArVdQ0A1Lcwm83otud5d2KYhAigqiriOIbjOCQCWJaFMAyhaRqVOldVhTzPSQxp2xYAsFqtMAwD5vM5lTYXRYG2bWGaJh49eoSyLPGJT3wCv/7rv/4hXoEMwzAMwzAMwzD/NywEMAzDMAzDvA+22y2yLMPhcIDjOFgsFrRJn+c5iQGGYZAbQAz9RdmtaZqQJAmmaUJRFKzXa9pcF2W3mqahLEuKClJVlbb+xVcQBLBtm8qFb8cFicgg13XRNA0OhwOSJEEYhgiCAFVV0bA/SRLs93tUVYW6rqkk2PM8tG2Lruvu9AOIwl/x/XQ6pcfF4P92QbBAlCd3XXfnnIrnvFcg0DSNhIDbm/9CNBDnVTgrRPzS7XPvOA4VBtu2TeKJbdskCIhiYdGvEAQBuq6j0mNVVemzS9MUwzBA13X0fY+yLDGbzfD06VM8fvwYXdfhk5/8JL70pS+9uouSYRiGYRiGYRjmPbAQwDAMwzAM8z5YrVY0BL+6ukIcx/B9H6qqIssy2tpfr9cwDAPb7Ra6rlMhsMi33+12KIoCu92OyoR1XUccxzAMg57z3iG2eI0o+hVDb9M00TQNZdm7ros8z9G2LbbbLcIwRBiGSNOUBIUsy7Df7zEejykeR5QEj8djchCIfgAR7TMMA7Iso6G+eFxs0wsnwe3HRYyP+BKCAQASDgSiV+C22CDLMqbTKQDAMIw7z93v9/QcRVGQZRn1NIhoJdEhEMcxLMuCqqooigKWZVGnQJqm0DQNTdPgeDwiCAJ4noemaTAej0nI6Pseuq5TD0TXdZhOp7BtG+PxGOfn5zg7O8Nv//Zvw7btV3uBMgzDMAzDMAzDAFA/6gNgGIZhGIb5caeqKhoUy7KM/X6PBw8eUK79ZDKBoijUESDidOq6hu/7WC6XcBwHAOA4DmRZRtM0tOleFAWSJIGiKHBdF2VZIs9zuK5L0UJpmlIHgCj1vS0CiI1+kfm/Xq8xGo2oRFiSJBIwbm/S3x6SK4qC6XSKPM8hSRLG4zHdns1mOB6PdMwAqMdADOP7vkfTNKiqCtPp9I5LQAzuhVPitntgGAbUdY2qquB53p1zf1sgEBv8InZJ0zQMwwBN0yhOSVEUek/CCaAoChUvy7JMHQvCPSEcHeI4wjBE27aoqopil9brNQAgDEM6t+I8rtdrnJ6e4uzsDLIs48/+7M9e1aXJMAzDMAzDMAwDgB0BDMMwDMMw7xsREaOqKpIkwdnZGdI0pUG6iLDZbrc0YNc0DUEQkDggYm80TbvzPLFpLp4r3ACqqqIsS2w2GxwOB3rMcRyUZYn1ek1xQSLDXogVlmXRQF88NhqNqFzYcZw7gkLTNHdKgrMso7z/206ANE1RVRX1D9yOAcqyjH6f6CN4b2SQcAEI8eB2HwAALBYLEhm6rsNms6H+AlmWEUURAFA80O2fuVwu6XxqmobD4UBRTEIEEA4LXdepWDhJEmw2G3qvaZpSTFBZlpjP59QbICKEVPVm10Y4AjzPQ1EUKMsSP//zP4/f+73f+3AvSIZhGIZhGIZhmPfAQgDDMAzDMMz7pOs6jEYjSJKE0WhEm+ZXV1cwDAO2bcP3/Tvb53meY7PZUIEwcFN2u1qtaFCdpil0Xcd6vcZut4Pv+1TmKwbXojxYZNwXRUEb8EEQIEkSOI6D4/EIy7JoUC+EC9d1Yds2DbbjOEaWZXAcB23bwrZt1HWNrutwOBzQti0mkwkVAk+nU+oNmM1mKIoCeZ7TEF5s689mM0RRhPl8jq7ryBlQ1zWOxyOAm9z/tm1peH/bLSC2+4dhQNM0KMsS4/GYOgokSaKt/K7rsFwuIcsyrq+vSWDRNA11XZP4oqoqNpsNFTLneU6xTWVZIk1TOI6D8XhMhc2TyQS2baMsS0ynU1RVhaIoqCdhu93ecSKsVivqZhAlzo8ePcIf/MEfYDabfTQXLMMwDMMwDMMwP3GwEMAwDMMwDPM+SZIEuq5juVyibVvK9xe59HEck0NAZP77vk8Ft33fUyGwpmmUbS+KbFVVJbeBEACqqsJ2u8Vut7sz6FdVFYfDAaPRCHEcw/M8HI9HOI5DrxHH4vs+iqJA0zS0/a7rOqIoosfatoXrumjbluJ/2rbFaDSioX2SJHfEgPF4jLqu6Wc3TUMOgb7vSXQoigLj8Rjz+Rx1XdOgX/xc4Qjouo627vu+h6qqmM1md0p6u66DoigUL7RYLCBJEizLwjAMsCyLnAZCKFFVFYZhQJZlHI9HOv9lWZJIomkasizDdrtFEAS02R9FEYkZk8kEbduirmucnp5C0zRcX19TYbP4DLfbLS4uLnA4HDCfz/E7v/M71HPAMAzDMAzDMAzzYfJSHQFRFOH8/ByKovyPj3ddh+fPn2O73X4gB8cwDMMwDPPjQJqmKIoCqqrCsixsNhs4jgPLsrDb7XB6eortdgtVVWGaJuXoi3x/8d/VakXf67pOuflhGKIoCgA3okMYhui6DmEYIkkSxHF8Z/tfiADicd/3UVUVTNOE4zhwXZdy+23bRhzH6PseYRjeuV/EDEmShCRJEAQB/UzXdUmAmM1mWK1WCIKAhve+7+Pq6gqj0Qh5ngMARQbVdY35fH5n4z+KIiRJQpn/AKiIt65ryuwXpcIiIkgUHQM3XQ0A6BwKgUX8juVySdv9wg3g+z4kSSKRoCxLGIZB9yVJQk4OWZax2+2wWCyw3+/JldC2LYkDdV2jbVtcXFzQ8U8mE9R1jSiKoGkaiQVlWeI3fuM38Hd/93d4+vTpK71mGYZhGIZhGIb5yeKlhIDtdntnyH///n0YhoHvfve7iKII9+7d+8APkGEYhmEY5kcdseHeNA0AYDqdIo5jWJYF13Wx3W6h6zoOhwMcx6EYGyEKiCGzaZroug6apkFRFPR9D0VRsNlsMJ1OSWzI85xy+sfjMZqmQRzHcF0Xuq7j6uoK0+kUSZLA8zxUVUVuBN/3cTgc4HkeLMuizgHRTSCEAhEPJGJwJpMJdrsdwjBEGIYU5+O6LvI8x2w2QxzHFL1jGAZt7WdZBlmWyVUgSRJt/L+3EFhs+Is+gq7rSDQQIoAoZxYCh+u6FMUj2Gw2GI1Gd4QAMeAXXQpBEEBRFBwOB/i+j+PxSP0Nx+ORnASiQLhtW3ieRz0KhmFQiXEURVQgHEURqqrCfr+HbdsYhgFJkmA+n+PZs2ewLAtZlmE0GuHs7AzDMMBxHHzrW9969RcvwzAMwzAMwzA/EbyUEMAwDMMwDMP8d47HI23cG4YBx3GQ5zkURYGu65AkCUEQYBgGGvyLYb+IBBLb6cJ5KcsyZd1rmob9fo/xeAxJktA0DTzPQ5ZlyLIMlmXBMAyYpom6rmEYBg233ysC7HY7cgyoqgrHcagkdzQaYbfboes6EhAkSaJom/F4TA4Bx3HuuBRs20YQBOR0KIqChvCTyQQA7pT33h7QS5IESZIQRRF2ux3FDM3ncwCgQmHRKVDXNcXxLBYLih16b0mwOJbNZoPxeIyu6/Duu+9iPp+TA2C73ZITQjgJ4jimImFRBC2+l2UZh8MB0+mU4o1GoxH6vkdZliRMiGNsmgbr9Rr37t1DmqbwfR9BEODq6oqcBffv34dlWbAsC//0T//0EVzBDMMwDMMwDMN83OGOAIZhGIZhmPfJer2GpmlQVZXKgH3fh6ZptPm/3+/vRNCoqnrnezFojuOYttQ1TYOu6xQdJCJ2xOs9z6NOAF3XUVUVbfuLwltRPCxEgDAMsdvtyIGg6zrKskSSJLSlLkQA0zRpwz1NU/R9Tw6BrutgWRaVB4sCZFEObFkWjscj0jS90xNQVRXyPKeNfzG0FwP8MAwRRRFmsxndX9c18jxHURQoigKTyYR6AdbrNbquo818IQiIjgBZlskJAIDuj+MYmqZRTNLhcCBhQHyWm80GRVHAsiyK8hEuC+F8ELezLENRFDg9PaVjkWUZdV3j5OQEu90OAGAYBi4vLynKSfRJRFGEz33uc/iVX/mVj+w6ZhiGYRiGYRjm48v7EgKePXuG7373ux/UsTAMwzAMw/xYIrbBxcBYuAJub6aLGBmx/S9EAMMwsNvtYBgGttstZFlGkiQwDAPH45HEBdM0qdR2tVphv9/T8Nr3fRiGgfV6TdE/ouhWFAZvNhvK99c0DbZtwzRNlGWJOI6h6zo5CJqmgWVZFDnUNA1GoxEVBTuOQ4N8MegXBcOiQFcUCk8mE+R5fmeYn+c5DocDuq5D0zQ01L8tCIh8/bZt0TQNjscjxuMxFosFhmFA0zQUxzQMA4qioCE/cBMNBACSJN3pYRA/VzgyhCDjOA6JI6Kw2fM8KhUuy5LOkSgQFgJMnuc4Ho8U31SWJRaLBbkXJEmCpmnU0SD6IyRJQpZlFIckyzJ+9md/Fr/5m7/5UV7ODMMwDMMwDMN8DPnAHAHb7Rb/9m//xkXBDMMwDMP8xLFarWgwnuc5nj17hu9973uwbRuTyQSbzYYEAlmWKf5HDJwNw8BqtYKiKDQ0FpE2nueRK0CUCI9GI9paF7FAeZ5D13Xs93v4vo8syyBJEjzPg6Zp8H2ftuA9zyMRQAyzfd9HURSwbRtFUaBpGuz3e8xmM9rgF4P+vu9xOBzQNA3G4zH1FbRtC8uykKYpxft0XUciwvF4xGg0wmw2w3Q6vbPlLwb0oh9ADNfbtoWiKBQTJAQA8ToR+aMoCvUNCGFBfC/OqyzL2G63FEckYpnW6zVF/ojBf5Zl9HmJAmHhCsiyDJ7nUTyQ67qIouiOINA0Deq6xmw2o5JkRVGw3++hqiqGYYBt29TVoOs6Hj9+jKZp8Nprr+HXf/3XP5qLmWEYhmEYhmGYjyUcDcQwDMMwDPMBIAb6wM0gOo5j7HY72nJfLpcwDANXV1ewbZv6A3a7HcXDiKx8scGuKAp2ux1s24aqqvQlon00TUOe5/T6MAxhmibSNCXRQOT/bzYbKIoCy7Joa/1wOGA0GiEMwzsigOd5aJoG0+kUaZoiCAIayluWReXBh8OBNv9FNI6ICorjGGVZIs9ztG2LKIown89JHBAxQOPxGGEYUnyQGPAXRYEwDGmI3nUduQnyPMd4PL5TIiz6AUQJ8Xw+J3dA13XYbrfkwhCf0Wq1gqqq1M2g6zpUVcVyuYRlWVAUhc6vqqrk6BC9Cvv9ngqGRYHzaDQioSKKInRdh6qqKM5oGAaKHdrv99SnsFgs8PTpU7z55pt4/PgxXn/9dXzpS1/6yK5nhmEYhmEYhmE+XrAQwDAMwzAM8wGw3+/vbKWPx2O0bQvTNBGGIVRVBXATVXM8HmFZFq6ursgNILbURUyMGD7ruo4kSSjSZ71eU8nv7bggMVjWdR2KolCW/3q9xmq1gqZpJAqIyKDRaIT9fo+maWDbNo7HI2zbRl3XiOMYdV3TNn8YhhT5I+KCbm/1B0EA13WpCyCKImRZhjRNSQyo6xpFUSDLMhrui/86jkOugSiKKALIsiwkSYKnT5+So0AM1UXngCjpFeceAGXyS5KEuq4RRREAwDRNul+cKxG/JL43DIO6HsR9SZJQvJAQACzLwng8RlVVcBwHQRBQB0IQBHdEAFF2PJ1OoaoqNE3D+fk5dF0nF8JisYBpmojjGA8ePMB8Psfv//7vw3GcV3kpMwzDMAzDMAzzMUR9mSdHUYTz83PapHovXdfh+fPnHA/EMAzDMMxPHN///vfxC7/wC5jP53jy5AnquoZlWXjy5Anu378P4OZvpZOTE+z3exrai+1/UXIL3AypV6sVuQTEdn+WZRQrkyQJwjCk54/HYxIEoihCURQUZzOdTpFlGYkDcRwjDENst1uEYYg4jim/vq5r7HY7RFGEOI4hyzI5AoRjYDqd4nA4wDRNKiYuigIAEMcxRepEUUTvb7fb0dB/Pp9jv9/Dsiwa3quqiiiKIEkS9QWIvgHTNHF2dnanP6CqKsrgF4N2EQlU1zX6vqfPRpxXAHfuF+KBEF1E3JJwCIiv7XZLfQGSJFEck6ZpGIYB2+0WURRhv9+j73uKKxICijje0WhEkUGTyQRZlqFpGnJEiMghIRrdu3cPAPD5z38eX/3qV7Ferz+sy5dhGIZhGIZhmI85LyUEbLdbHvIzDMMwDMP8D3z729/G4XCAZVm0YX44HDAej/HkyRO8+eabaJoGV1dX8DwPeZ7D930aTN+OrRGb52IgrCgKDbNd10VVVZAkCUmSIAgCDMOA1WoF13VxcXFBMTsij3632yEMQ8q9930fm80Go9EISZLAsiwqAN7tdhiPx3TsIr/ftm1UVUXRQkJI6Pue3AlJkiCKIhqgp2lKw3JRxgvcDOPDMMRut6NoH9/3SRQQxy9cB0IAEOXCdV2jaRpyBogvcf9tcWAYBmiaBgAYhgFt2+L6+hqz2eyOALPZbBAEAXa7HXzfx3q9RhAEFKdkmiYURUGWZTBNk84rcOMyqKqKNvtvRxyJ7gURuSQEDABUvDwMA7Isw2uvvYbNZkPn+8WLFwjDEJ/+9KdxcXGBL3/5yywGMAzDMAzDMAzzQ8HRQAzDMAzDMB8AWZZRxE/XdVTaK4pk33nnHQA3Q/6+76FpGiRJwtXVFfUCvLfcVjxfDLJVVcV2u4VlWfSasiyx2WwgyzIsy0Ke51AUBavVCgAQBAEN6TVNg2mad8qDdV0nJ4Cu64iiiLoDiqKA4zhIkgRN01DBcJqmNPQXcTh1XSPLMsRxTJv/rutS3M/hcKAoINEdEAQBdQfEcUyxQcfjEUEQYDqd0pZ/VVX0s/I8JxFAOABu9wpMp1P6XEQkkRBcNE3DbDZD3/cUwyQ6BhRFQdu2VLIszreIYBK3VVXF9fU1DMNAGIbkFrBtG7quI01T+L6P2WxGxzyfz1EUBaqqwsnJCYkW4neIyCjDMLBYLAAA5+fnmEwmyPMc0+kUv/Vbv4XHjx+/mguaYRiGYRiGYZiPFS/lCGAYhmEYhmH+d/7xH/8Rjx49guM4iOMYx+MRqqpCkiSYpnkn2kaSJLRti+l0SvnzwI0IsF6vyR2gaRo9X/QDiG39vu9xfX0Nz/Mou9/3fRwOB8iyDN/3kSQJTNNE3/cwDAN1XeNwOFB0keu6KMuSSoDjOEYURTgej/A8D1mWIYoi6LqOqqrIVeB5HsqypO315XJJUUC38/SDICBng3hfQhAQYgYA+L6P/X6Pk5MTSJJEW/5t29KWvxiii+G96AgQj8/n8zufR9/31B8gEA6ArutQliVM04QkSZjNZmjbFrIsQ5Ik6mzQNI0+GxEfFMcxbNsmUWC1WpGL4navgSgCnk6nOB6PKIoC8/kceZ6jqirM53Mcj0cAgK7rWK/XmM1mJJr0fU8ODXG+f+3Xfg2KouDtt9/+cC9mhmEYhmEYhmE+VnBHAMMwDMMwzAfEW2+9hc9+9rMIwxBlWdKWfd/3CIIA+/0ewzBA13WK1gFwZ/B8O89eDKUBUKmtePx4PMJxHEiSRNn9olsgDEMURYH9fg/P85AkCcXSiCG2JEkkApimiTzP4TgOVFWlny0228uyxDAM1C0gSodt28Z2u4Xv+5hMJtjtdrBtG4ZhYLfb4ezsDHmeo+s6dF0H0zTJzSDLMm3ZCxeF4zho2xbADzb5RUTQfD6naB8REfReAUCILGL7X5zT09PTO+XE6/Uao9HozmvW6zXG4zEsy6L7r66u6HnL5ZJu67pODoLr62tYloWqqqhjQMQ8qapKUUpFUWAymaAoCtrwFw6BxWJBopFlWdQbcDwe8e6772KxWOD73/8+Tk5O0LYtPve5z0FVVbz11lsf6vXMMAzDMAzDMMzHBwXA/3P7jvF4jP1+/z8+2bZteJ6H58+f48mTJ/Q/Qd/85jdR1zU8z0OaplQWxzAMwzAM85PGN77xDfzUT/0UJpMJ0jQlNwBwU6TbNA0cx4GiKFTeK4bIpmlitVrRNr1hGFRmu1qtKE5I0zQoioIkSTAMA8qypCz6uq7h+z50XUfXdVRAa9s2DcODIKBNe03TyB0gCmsNw6BM+7IsYRgG4jjGaDRCmqZ0bKLnQNM0eg9pmqIsS4RhSO9jv9+jrmuMx2OkaQpZltG2Lfq+p+Jd27Zh2zaSJKH3UZYlJpMJbNsGAHIFlGWJsiwxnU5h2/Ydp4XYpO+6joQAwzAot7+ua+odEI6Etm3Rti25NlRVpR6DruvItSDOe13XVBxc1zX1HwihJo5j5HkOz/NwOByoKLiua+R5fqdMWBQIF0VBroLbBdKTyQSXl5dUAG3bNi4uLqBpGhzHwbNnzz6ya51hGIZhGIZhmB9tfu7nfg7//M//DICjgRiGYRiGYT5wvvGNb+CXf/mXIUkS0jTFaDSiwb+maWjbFkVR0Pa52OAHQENt0Rkgtudvb5prmoamadD3PRUTbzYb6LqOe/fu4erqijbsLcuCZVk4Ho9UDHw4HBCGIfI8x+FwwHQ6RVVVSNOUFj/KskRRFDAMA/v9HlEUIUkSuK6LNE1hmiYJGUKUEO4EUZicJAkA0NC6aRq4rovD4YC+7zEMAzzPI5dDWZZ3yoUlSaIt/tsOAeEAEI/VdX0nZui2GCBuS5JEcUrCMdB1HQCQ6HD7fMqyjOVyCc/zsF6voWkaLMvC9fU1wjCEJEm4vr7GaDSiz2i/36PrOooTyvMcuq5TrFJZliQICIeAuD+KojvHIIqYfd+nUmnDMKAoCna7HR4/fgzHcQAAf//3f/+KrmyGYRiGYRiGYX5cYSGAYRiGYRjmA+brX/867t27h8985jP4j//4DywWC9ogPx6PmEwmMAyDtuK7rsN4PEZZljg5OaHy2GEYoGkaVqsVbYnfLg4WAoHY7h+Px3QfcBMnpCgKRQRpmoYgCKiU13Ec6LqOzWaDrutg2zZc10We57Rpv9vtMJlMSDw4HA5wHIfiiHa7HUzThK7rNNTP85x6B4SAITbdhQtAHP9msyHnQ9M06LqOegLiOCYXwDAMJAC8tz+gqirMZjNkWQYAd4SDxWJB4sp2u8V4PKZugbIs6RwEQXDnecJ9UVUVJpMJ4jgGACoOFmKFcAyIiCNN02AYBoCbyCcR7SQKmJumQVmWCIKASpDF/U3TIIoiVFVFwoYsy9jtdnj48CGur68B3BRAX15e4vT0FJ7nwTRN/PVf//Wru8AZhmEYhmEYhvmxQ/6/n/ID6roGcNMVAACGYUDTNERRBNd17zyHYRiGYRjmJ5k///M/x9OnT7Hb7XB9fU2xPLqu04D7yZMnyPOcBtBpmuLq6gqqqqLrOhoi3+4JEMiyDM/zaOh/cnJCW/hiiCy6ANq2haIoCIKAyoMVRUFRFFBVFa7rUgFwnuewLAt5nsMwDIqN9DwPcRwjCALYto2qqkgE8H0fQRBQZI84ju12i6qqUFVHSAdVAAAgAElEQVQVOQuyLKOYoqqq4LouDd7H4zGm0ykNyPf7Pfb7PQzDuDMsFzn7eZ5jNBqRQOC6LuXxj0YjTKfTO90AooR4GAYcj0dMp1M6t8APooeEk2A2m9H5tSwLwzAAAJqmwWq1oucCP/i7WIgcu90OmqYhSRLsdjsYhoE0TZFlGVzXRdu2yLIMeZ5DlmVUVUWukLquEUURfX4PHz7EbrdDEATwPA+bzQaj0QjL5RKqquLTn/40vvjFL36YlzPDMAzDMAzDMD/mvFRHQF3XUBQF4/EYp6end+zZlmVhtVpxUTDDMAzDMMz/h4gIWq/XmM/nWK1WCIKAtuerqoLv++i6jgb16/Wats4Nw6DOALFpfjsuSPQGiKH/1dUV4jimDgIxlK+qCsfjkfL+i6KA67qQZRlZliFNU3IomKaJsixh2zbquiYHQJZld4qDN5sNTNNEEASI45jKceM4pkG867rY7/dU6gv8YKFkvV6jbVuUZYnz83OYpomqqig2qSgKyLKMBw8e0Na++NliS19E4/R9j7quyW0g3AzCNSAG/FVV0WNhGJILoa5rxHEMy7IQBAFc10Xf95BlmaJ6yrIkt4NlWSSauK4LRVGoBFq4HsT7EJ1aQnwxDANd16EsS+R5jvl8TsLGbDaj247j0OclxAIR8WSaJiaTCZUOi1Ln6XSK73znOx/Z9c4wDMMwDMMwzI8W76sj4PLyEpeXlx/4QTEMwzAMw3wc+fKXv4zPf/7zuLy8pJJcTdNQliWVCc/nc+R5DlVVEUURZcuLbXOx4S+y7WVZxnq9RtM01CcghvEXFxc0RM/znAQG4TwwTRNN0yDPc8RxjCiKUNc1PM9D0zSI45gG/kIESJIEjuNQr0GSJBSnkyQJPM/DbreDoijwPI/ihkzTJFHDsiyKyhFdBQDIHQDcOB4OhwPFAw3DQPFJbdvSUN1xHHqNeE5d12jbllyqYsgvBv3z+fxOd8B6vUYYhrSJP5lMyC0gvsTAXtM0EgQmkwn9bvG7gBuXgGVZFOXTti0cx7kTEyRiksRGv2VZJHqIa0DEHInb4r2K+CFR1Pzuu+9SHNJoNEIYhuj7Hl/60pfwJ3/yJ6/gymYYhmEYhmEY5seJl3IEMAzDMAzDMC9H0zTwPA+KotDGuuM4OBwOAECDeMdxcHV1RaXCYvO/KAraJhcZ9MvlEl3XUQmxJEm0EV8UBTzPowJc3/exWq3oZ9Z1TcP3tm0pXqcoCtrqt20bACiuyLZt+L4PADT4dxyH3AcijsjzPNpgNwwD2+0Ws9mMtv1Fnv9yucR8Podt2zgej+i6jrb2ZVlGEAR3tvmLoqBCXdu2Eccx+r6nLf+yLDGfz+m8imihuq4xnU5pw1/8nrIs0TQNbNtG3/fkDmjblmKKqqoiF4JpmlTULGJ+kiRB3/fQdZ1cHKqqUsmz+BzFa/I8p8ghEcvU9z2OxyNdA0VRUEdAnud37hdCjeu60HWdjlu4ErbbLUzTRF3XeOONN/D06VM0TfORXfcMwzAMwzAMw3z0/NCOgCiKcH5+fief9jZd1+H58+ccD8QwDMMwDHOLr3/96zBNE33foyxLKr8V8UCe56FtWxyPRxr2iw18AJRtL4b+wzDQzxCxNW3bQpZlcgt4nofT01NcX19DVVXaPtc0Dfv9Hr7vU7Hw4XCAZVkwDIOOMUkSKtYVMTi6rtMguigK2LaNJElgGAaJFXmeY7vdwjAMPHz4EGmakivgcDhgGAa88cYb9DuFe+D09BSyLEOSJPpbsq7rOyXBXddRSe92u8XJycmd8zMMA6IowuXl5Z3XiOgg8SW2+sVrhKggOg6yLKMN/ZOTExIxxPkV518UQBuGQZ+NcEuI+w6HAwzDIGeAeL3oGzAMg64JMfgX0UfCrTCdTpHnOX3G+/0ei8UCeZ7D8zwSi0SpsHCe/Omf/ukrvc4ZhmEYhmEYhvnR5aUcAbZtw/M8PH/+HE+ePCF79je/+U2ylKdpSv/TyjAMwzAMw9zwzjvvwHVdnJ2dUamt2MYXpbllWdL2+jAMcF0Xx+ORhsyapuHq6gpt22IymUBVVSrdFQKCiLPxfZ+KcmVZpvv6vkfTNBRbIzblxW3DMGjDPc9z2jrXdZ1Kg8uypOx/wzCoPLhtWxwOBxIeRCb+4XCgoX4YhgAAXdeRpinKsqRhujg2RVGwWq3gOA4d823XQFmWcByHMvzFY8LV4DgORfsIF0Jd1xTHI14juhM2mw0kScJ4PIbjOBiNRuRI2O/3GIaBuhFGoxG6rsPxeITv+1AUhYqdbw/9hcBi2zaJArvdDr7vI0kS2vLf7/eoqgphGKLrOhRFgTAM0TQNjscjgiCggmRxrYiSYkmSoOs6CRjT6ZR6KObzOR4+fIjVaoUkST7iq59hGIZhGIZhmI+C99URwDAMwzAMw/xwfO1rX8P5+TltcWuahu12i8lkgnfeeQe2bSPPc0wmEyqvFZE/YiAuNsdvRwLNZjNsNhsAN1v0i8UCkiTh3XffRRAEAABVVbHf7+G6LlzXpeeLIl7LsqgjQFVV+L6PPM8pz1/XdVoYEcKAYRiwLIuKdOM4hq7r8H0fqqpSfI4sy/B9H5IkYbfbQdd1DMNAQ3NFUbBcLqFpGiRJQtu2eOONN+i87XY7cgPMZjMAN5v1SZJQpE/XdZjP5zgcDtB1HW3bomkatG1LBcViC79pGtr8FzFIoiOg6zq88847mE6nKMsSdV2TSCAKj0XngegtaJoGuq5jNpuRc0CIACLbv+972LaNsizJPSEEGUmSSKyIoojEnNFodEcEuO0WKIoC19fXeP3116lvAQBFIRVFgdPTU3z2s5/Fv//7v+Ptt9/+MC9thmEYhmEYhmF+xGEhgGEYhmEY5hXyne98B48ePcIwDLTtLSJn4jjGfD6nrPinT59iMplQWW1VVQiCgDLoRda9GKIXRYHxeAxN0yj6RmTbAzcDcFW9+fNPDIuHYYCu68iyDEmSYD6fo+97pGkKy7LQdR0Nu/f7PWzbJieAZVlUSCycAKPRCGma0vsdhgG+72Oz2VCHwHq9xoMHD6gDQcT9XF9f4+TkhNwMovB3v99TP0Bd1xSvY9s2VqsVRQT1fU/FxV3XYTqdUvQPAOocEK6C6XRK7xEAPVbXNWX3m6ZJrgURQVQUBaqqgiRJAG5EFhFrJAb/QgRYrVYUu3T7uQCw2WwwHo8xDANWqxX1A9x2PQiHgDj20WhEt8/OzvDixQtYloXxeEwRQcvlEoZh0H3n5+cwDAP/+q//+gqucIZhGIZhGIZhfhRhIYBhGIZhGOYV8q1vfQuf+tSnoCgKdF3HdDql3Hff9ykmqOs6RFGE4/GIMAxhmia22y3l+hdFgbqucXp6ivV6DQB3huTr9Rrn5+coy5Iy/afTKW3fizgaUZgLgBwK8/mc4n9830dZljgcDnAcB6vVCqPRiGKLAOBwOEDTNIRhiCzLqKfg7OwMqqpivV4jDENsNhuoqopHjx6hKAqoqkoxQIqi4PXXX4csy8iyjISMtm3x2muvUfSOLMvkDuj7nvL+b0cBOY4DRVHu3F/XNZqmoY4A0csA3AgEYvtfOCxuRzSJWKO6rqn/YDqdoqoqACBxZbfbwfM8OpeyLEPTNCoSXq/XmM/n9P4cx6HoItM0EccxCTyz2Yy6A4SToCgKcmAIt4CmaZhOpxQ1NZ1O0TQNfN/H9fU1RqMRNpsNPve5z8E0TfzDP/zDK73eGYZhGIZhGIb50eClOgKKosByuaQOgDiO6X883/sYwzAMwzAM8z/z1ltv4ad/+qcpqsbzPHRdB9d1KfffcZw70TVt20KSJHIQFEWB2WyG9XpN5bWicPfy8pK27X3fR9d1UBQFpmlivV5TVI8olRWOARExIwbMIutflNsqikIlvMfjkeJ5NE3DZDJBkiRQVRXL5RIPHjyAYRjI8xyGYSBNU6iqCtd1oSgKFEXBfr+n6B1RKCxEidVqRRv74verqordbofpdArbtuE4Dna7HZUBi8H5eDymAuWiKJBlGeq6xng8vtNFUJYl1us1RfOIDgEA5IIoigJJkkCSJBJOhCtDOCzKsqSCY/F413VYr9ckSqzXaxiGgaqqoGkaLMuCpmnUHTAej6Gq6p24JSHelGVJDoWyLJHnOWzbps+3KAqKQErTlMQiIRK4rkvuBsMw8OzZs1d3sTMMwzAMwzAM85FxuyPgpYQAhmEYhmEY5oPhm9/8Jh4/fgzbtjGbzbDf73E8HuG6LjRNw3K5RJ7nsCwLlmUhjmPqBOi6DpPJBIqiIM9z1HVNpb6GYdCQXhTyihx+8VoAd4pukySh+B7f9zEMAzzPgyzLyPOchthi6GyaJuq6huM4lFkfxzGJB7PZDIqioKoq7HY7EhZGoxFs20aSJLRl7/s+XNeFZVk4HA4Uu3N2dkYOAlGILLoINE0jcUDTNGw2G8xmM9i2Ddd1AQDX19d3egAcxwHwg+3/LMuQZRk8z8NkMiHBQYgH2+2WnBeTyQSu69LvTJKECph3ux2GYcB8Pkee53AcB/v9Hrquk6ghzj0AGsYrioI4jql0WAz4fd8nJ4Zpmv+tKLgsS4oQEp9FWZbkMBFCgziXZVlis9lQH8N8PscwDHj+/PlHcNUzDMMwDMMwDPMqYSGAYRiGYRjmR4AgCLBYLBAEAa6vrxFFEeI4xng8xuXlJWzbpigc4CYD37Is2hyXJAlxHGM0GtF9IjO/qipyCriuC1mWcXV1BdM0IUkSuq6Dqqq0jS/ifUShsBjeV1WFrutgGAbquoZhGNjv95T3bxgGvVZVVTiOgzzP0bYtdrsdDMOA53lUhFyWJYCbjgLbtrHb7QCAnAEAEIYh+r6njf/j8UjuAFFuLLb8q6rC+fk5jscjDfLLsqT3dnZ2Btu2YRgG4jgmAcBxHJydncFxHHpdnudI0xRJksDzPHIe1HUNVVUpOkgIEqPRCKZpoqoqchLcjiQShdCmaSIIgjuf336/h2EY0HUduq5DlmXqX1AUhWKXRO/De0uDxf3itm3b2G63kCSJOhYWiwX6voeiKBiNRlRMLcqP/+u//utVX/IMwzAMwzAMw7xCbgsBL9UREEURzs/PyT7+Xrquw/Pnz8kazTAMwzAMw/zvfOUrX8GDBw9w79499H2PPM8xGo0oMkjEyiRJgouLCzx79gx93yOKItrqHo/H6Pseuq6j6zrMZjMsl0uoqoqqqjCbzaBpGl68eIGmaajQtm1byqyXJAme56GqKlxcXKBtW3Rdh+12C03ToGkalQtvNhu6T2zEC1FCRNqICB9VVeF5HtI0pcz85XKJR48eYbPZIAxD+L6P5XIJANQpIEkS9vs9vS8RhaQoChX2bjYbPHz4kOKEXNel7gNZljGbzQDc/H0qugZE1NHZ2Rn6vqcOALGNLxwCp6en9HtEv8BqtSJHxHQ6JeFCOC+qqqLYprIsYZomTk5OqGh4u91SLNJqtYJpmnROZFnGZrPBYrHAarXCZDKh7gZN0yjGKIoiEj8mkwlFIYnbwzBAkiTqRaiqCoqiYDweI89zzOdzaJqG+/fv4+TkBKZp4i/+4i8+suufYRiGYRiGYZhXx0sJAdvtlob8jx8/BgB897vfBXAjEty7d+8DPjyGYRiGYZiPN1/5yldwcnKCLMswHo+RZRnF22RZhtFoBOAmt991XRq667pO3QJic1+U1sqyjKZpEEURxf80TUNlweLxk5MTSJKE1WqFuq4xGo0QxzE8z6Ot/vF4jDRNsd/vYVkWTk9PSUQoigKyLFN80fF4hK7riOOYRIAsy6DrOjabDRRFwWuvvYbVaoUoiqj8WAzN8zynwf1ut6PIHlmWIUkSdrsduRlef/11HA4HWJZFmf+qqtIwvGka2vRvmobeP/ADcUAIAOK/b775Jn0ubduSA0AM/y3Lgu/7aJqGzqkoGVZVlQp85/M5FREPw0BxRuJ96LoO0zSpH2A+nyMMQyRJAtd1SVQQ5xe4+VtbDPfDMKTbwhVQ1zVFBq1WK4RhCOCm00uUEgdBgOVyiSAIsFqt8IlPfAJN0+Cv/uqvXs3FzjAMwzAMwzDMR8ZLCQEMwzAMwzDMB8v3v/99fO1rX8P9+/eRJAnOzs7w4sULFEWBMAyx3+9xcXGBt99+G7qukxjw7NkzdF1HkTNlWcLzPCyXS0iShCAIaIj84sULjMdjcgMIx4GqqlitVlBVFUEQ0JB9tVpB07Q7m+xVVUGSJByPR8q5T9MUDx8+RJ7nOB6P5AQQfQAiK1/8jouLCyrtFc4CwzBgmibatqVuBFVV8dprrwG4EUOAm1gkUTYsSRLatoXneViv17Sdf3p6SkLAZrOhToSTkxMSCwDQkD3Pc+R5jjAMcf/+fXIOCPFAOCyE00Ech8jqF0N14RAQj4vniJilq6srhGEISZKwXC4RhiFkWcZqtYLruojjmIQd0UMgPmvx3m7HEoluhtvxT0IcqOsas9kMTdNgs9lA13UoikIuEN/3MRqNUBQFuT8kScJf/uVfvtLrnmEYhmEYhmGYV8sP3REQRREAkEPAtm34vo80TVEUxQd/pAzDMAzDMB9Tnj17hvF4DMdx4LouLi8vMR6PaZvftm2K1xFDYEVRYFkWZfhHUQTLsrDb7eD7PiRJolx/EamjaRoAQJIkKrUVkY/iNaKsdzQa0Vb7fr+HbdswTZO+3263ePDgAbIsg2VZaJqGCoOjKEKSJNA0DdvtFvfv36eft91uKULHNE3qFBCb9WJ7XrgbJEmiYf90OsVut6Mi4tt/c4qIHtERIF57enqKsixpwz/LMiRJgjRN4TgOHjx4ANd1acNeFOyWZUkZ+0I4EK4Gkc9vmiZGoxG6rkPbtlgul1QC3Pc95fGHYYhhGEiEEbebpoFhGHTsiqLgcDhQ2bNwKgRBgDRNUZYlZrMZCQKTyYRui9LgoihIuJhOp+SM8H0fu90OjuNgs9kgCAIqJ5YkCVEU4Xvf+94ru+YZhmEYhmEYhvnw+aE7AgSnp6dwXZc2tBiGYRiGYZj3x9/+7d9iMpnA933UdU2lwbIsYxgGKIoCXdfhOA6yLKPtfcdxANwMva+urijnXpTbiq1xVVWhKAr9LAAwTZOG8kmSQNd1NE2DxWKB6+triqsREUPH4xHb7RaO42A6nSJJEiqp9TwP9+7dQ9d1d5wA9+/fp+ic5XJJUT+O40DTNGRZdqc7QGTmJ0lCG/yGYQC42eQfjUYUZSQy7wHQhr1wBNzuCDAMA1dXVyjLEkEQQNd1nJ2d0TkU2/biaz6fYxgGcgUcj0ekaQrXdXE8Hin6RzgThHAgooImkwmSJKHPdrlcwnVdHA4HqKoKXdeppFls6wsRQBQiA4CmabBtG1VVwXEcEn7E7xC/dzqdkoCzWCyQ5zmqqgJw4/owTZNcBsDNAo8QlIqiwP3790lE+Zu/+ZsP+1JnGIZhGIZhGOYj4KWFgNPTUywWC8owvX//Pp49e4bFYgEAZItmGIZhGIZhXo4nT55gNpvhzTffxJMnT5AkCebzOZbLJZqmgW3byPOcynNFTv18PqfbYnAsNs5FSa6qqui6jjoCLi8vMRqNaBBd1zXW6zXu37+Pd999l4bzlmWRcCBib0QxcNM0VCgsCnOTJKHXPHjwAGVZQtM0xHEM0zQhyzJ1EIhB/263w6NHjyhKR0QXCcFAURQq2RXHcHFxQVFFohxXDLMty7oTqSPOldiMB0ARQGKLXpIkzOdzKghu2xZVVVGcjihpvl0gnKYpOQlGoxG5Jtq2hWEY9PNFIfF0OsV+v6fzoKoqZFnG4XAgJ4QY1gtRw7ZtNE2DyWSCw+FAt8Xxic+4bVuKNuq6DgBQVRXquobruvQ57XY79H2PoigQBAG5ToIgwC/90i9hPB7jj//4j1/1pc8wDMMwDMMwzIfMS0UDRVGE09NT7HY7PH36FIZhYDKZ4PT0FJIk4cWLFzgcDq/iuBmGYRiGYT52XF1doW1bXFxcIE1TGqgfDgcEQUBxOXEc43g8wvM86LoOTdOw3+9xPB4RRRFc16UNcVFOK9B1HX3f0ya9iKvJ85zKZsWgezQaUbHt8+fPoSgKFosFDb8lSaLCWl3XKd/fMAw4joOiKOj4JUmC53kUWyPLMna7HZqmgaIo0DQNpmlit9shz3OK3tE0DUVRUHHuMAw4PT1FURTo+x6yLGO9XiPLMkynUxrGx3GMLMvoPC4WC8rGr+saeZ4jTVMqZ3Ych0qGbz/meR7m8zls26bzIjbx4ziGLMuYTCYkEMiyTOKDoijI8xyTyQRN01D8j3AvaJqGw+GAYRjgui50Xafz4rouTNOEruuwbRtt21Ic1DAMFE+Upil9pn3fU6lz3/dYr9c4Pz+nsmTLstB1HQlHz58/x2g0ot6G7XaLMAxxcXGBt9566yP5N8AwDMMwDMMwzAfH+4oGWq1WuLy8BHCTZ/vs2bMP9ugYhmEYhmF+gvmXf/kXbLdbfOELX6AugKqqkGUZwjBE27ZwHAe6rmO/32OxWNDmuaqqtBVumibCMKRMfk3TaPtbbOKLTfssyxBFEYZhwHq9hud51C2gKAp2ux3atsUwDDQsTpIEhmHg3r176PseSZJgGAbadBeD8M1mA1VV4XkekiSBaZpUQnx2doaqqqBpGlarFYAb18HZ2RkN/vu+x9XVFYZhwGw2QxAEqOua+gdE5M3FxQWSJCEnhNjqv3//PoZhoI15VVWxXq8xGo0wDAPu379P5361WlFcThAEePDgAZ2zqqoockdE80iShMlkQr9LFAi7rkvdAbPZjFwZ4jlC4FFVFbPZDNvtlgQTUYIsXBii0FnXdero2u/3aJqGXAx1XZOLYrFY0OcroprE98IZIq6H0WgEVVUpMunevXvQdR1FUeB3f/d38Yd/+Iev5qJnGIZhGIZhGOZD56UcAUVRIE3TV3FcDMMwDMMwP7GIGJ3JZALTNHE4HOA4DmRZpg3858+fQ1VVTKdT2sz3PI+iYoqigGmaNLyWZRmGYVDcjaqq0DSNttsdxyFHgq7rWCwWtN0uMuolSYKqqojjGIqiwHVd1HWN3W6HOI4xDAPCMKT8+c1mQwJEVVUwDAP7/R5lWeLs7AxlWWK/39O2O3ATQ3l70J+mKdq2xcnJCUUjiTgf0Scg4n7E0DxNU5imidPTU4rdKcsSWZYhz3MoioIoiuA4DnUBiAgfSZJwcXEB27bvxAAVRYE8zxGGISzLoo4AEdMkzuN4PEYQBPTazWYDXdcpIul4PMJxHARBAABU5Cw+W8MwoOs6VFXFbreD53kUEQTcxP3Ytk3nXohFt0uDxfHatk29BABwOBzovtuf+WQywXK5hGmaKIoCYRgiyzI8evQI19fXd0qZGYZhGIZhGIb58eF9OQI8z8PDhw8BAE+fPsV4PMZkMsEwDLi+via3AMMwDMMwDPPD89WvfhWLxQKu66KqKorByfMchmEgiiIa7IuSWeEaEFn///mf/wld1zGbzWAYBo7HI5UDy7JMA3BRjtv3PYIggCRJyLKMfn7XdRiPxxTRI0p7FUXB5eUlNE1DFEUIwxB932O329HGu6qqGI/HOBwOWK/XUFUVFxcXqKoK+/3+TkSR53kkCqzXazrWhw8fQpZlZFkGANhutxiNRnSscRyj73vUdY2u62jLX+T7r9drVFUF3/epJ0Bs0ZdlSbdnsxn6vkfTNOj7noqW+77HZDKhXoSu60gYGIYBVVWRM0Bs3zdNQ91ZInapaRrMZjN0XYfVagXP80hU0TQNXdfBtm2KD3Ich0QhVVWx2WwgSRJOT08RxzGapsHJyQkuLy+p5Fi8n/l8Tp+dpmno+x6KopDQMJlMUBQFfN+H4zj0mW63W2w2G7z22msIwxCapuGP/uiPPoJ/AQzDMAzDMAzDfJC8lCMAAN544w3ouo7VaoW6rrFYLLDf7ymnNM9zLgxmGIZhGIb5AHj77bfxyU9+EnEcw7IsEgWWyyXiOEYQBNB1HU3TIEkSRFGEJElwenpKmfGKotCGuaZpCIKAynGLosB0OsUwDLi8vERVVRRbMwwDNpsNZFkGANpoL4oCi8WCIoWKoqAegWEYoCgKiqIgV4AQAWRZRhiGlJe/2WxoG/52p4DY3K/rGqqqUpRQ27YAbuJ7Li4u4DgO5et3XYfLy0tYloXz83OsVis6DiEenJ2dwbZteJ6HzWaDuq5xPB6Rpils20YQBFS6m+c59QuMx2O4rktD/jzPcTweqT8giiLK7RfD/9VqRZE70+mUopq6rqO+BBG1VJYlZf+LwmNVVZGmKfUtKIpCxc3C9WGaJjkTbNumDgPxN3nbtqjrmn63EDcMw6AYqfV6DcuyKM5IlDU7joPr62u4rovJZIJPfepTePLkCfI8/8j+LTAMwzAMwzAM8/LcdgS8dFlwEAR4/vw5VqsVTk5OYJomrq6u0HUdfN9HnudsH2YYhmEYhvmA+PrXv46f+ZmfoWiYvu+RZRkVyYq4GUmS0HUdAMAwDHzve99DXde0qS6GxHEcQ9M02lKXZZkG1GLLHwCyLKPfKcsyRQbNZjP6XcfjEb7vQ1EU2rIXBbpis32/39NA37Zt1HWNzWYDwzBIBHBdF3me06a84zg4OzujPoT9fk/iwPn5OfUOKIqC6+trpGmKKIooVsiyLGy3WxyP/y97b/brSn5Wf6+aR5fL5Wl7e49n6IlESPQFN0gIBEJwyz/E38LN7xoJCYQCEYoISkR3k5BOh276NGfY23PZLtc8vhdH34ezyfvqzdj5pfv5SK2cbG+Xy2WXdM6znrVWAtu2cX5+TtE/ovtAkiRIkoTr62s4jgNN0xCGIZIkoeddXV2RK6Ioigflwm/GBwn3gOgP8H0fg8EAjuPQ45IkUZSPJEnwfR9VVSEIAopcEnFDYRhClmUSZUT3geM4UFWVehpExI/v+ySiiG4C0YnQdR05RgAgiiJ4nkeOkF6vh6Zp4LouAOD+/tZTWzwAACAASURBVB6+75NrYLfbwTAMjEYjHI9HHI/HL/sWYBiGYRiGYRjmF+RNIUD+ZQ5kGAaapuHeAIZhGIZhmF8jf/3Xf43nz58jz3PaylYUBaZp0gBaDPBlWYau67Btm4pfTdNEkiQUHZMkCW2J13WNV69eUWyOyNNPkoTiiABQOa0QHe7u7ihTX9d1hGGIpmlwdnYG3/cRhiG22y0V04rselF8K8sy+v0+er0eRRatVisEQQDP81BVFW2pd10HTdMwn89RliXFH63XayoEFoLHbrejEuGbmxvajhflytvtFmEYwvM8eo4oY9Z1Hbqu4/HjxxgOh1S8rGkajscjHMfB06dPyUXxv/sDBoMBJpMJdF2HJEkUM/Tm74xGI6iqiq7rKLtfuANELNJkMqHPWLhBRIGwyPIXEVG9Xg9ZlsEwDPi+DwDkdtA0DZIkYTabUTzQdDqFLMsIw5A+BxFLpKoqBoMBdF1HXdeQZRnz+RyWZWE6neIv/uIv6NwYhmEYhmEYhvnt4ufqCBCRP8PhEABg2zb2+z16vR7m8zn944thGIZhGIb51fL8+XPM53PK7xcDYNu2KSqmrmva6pdlmTb10zSFqqp49eoVfN9HWZbwPA+WZVGMTdu2UFWV4nTefvttrNdrAKAMfOEeWC6XCIIATdPAsizkeU7b5bIsY7vdUhTO9fU1yrJEmqbY7XawLAuyLMN1XSRJQsfcbDYIgoA23TVNo2F013Xo9/tUTAy8Hp5Pp1MaZsdxjKqqoOs62rbF+fk5ve+6rtE0DWXx39zcIIoicggURQEAGI1GdGwxmG+aBl3X4enTp+SsEBFCZVnS88/OztA0DQ3/RRFzlmUYDoc4HA7UDyBEgrIsYRgGbe5rmkb9DaKcWVEU+jv2+fk5DMOgzgDxOYdhSO4J4U6YzWaoqorik8S5ij/PZjMkSYIwDKmLoG1b9Ho9LJdLDIdDrFYr9Pt9uK6L1WoFwzDwx3/8x/i3f/s3/Nd//ddv5kZgGIZhGIZhGOYX4ucSAk6nE+7u7nB5eYler4c8z/H8+XNcX19DlmXc3d39us6TYRiGYRjma833v/99GIaBP/iDP8BisYDv+3BdF3EcI0mSB5n8z549o2Gy2BZ3HAdffPEFiQViYHw6ndDr9RDHMbquQxzHcBwHwGt3QVEU6Pf7kGUZkiSRK0FEE+33e8iyjOFwSL0BXddhuVzi0aNH9PqiY0DE4kRRBADkAnj06BEURUEYhlBVFcvlEpPJBI7jUJTPbrdD27ZomgZXV1cAgCzL0LYt1us1ORJs2yYXgOgDUBQFs9mMBusi3lK8PwCUoy+G+XVdUx+CeF1RllyWJaqqwnQ6fSAACGHAtm1kWYbJZIK6rmFZFkUqifiioijQ6/Xougj3hYjyEb9vGAbOz8+x2WwoikmSJHLl2rZNn48oXRb9AOPx+EGBcJ7nJHyIDgLLslCWJVarFYbDIR3P930sFgtMJhP6+36/34dlWbBtGz/84Q+/1HuAYRiGYRiGYZhfnJ9LCABAdus3ef78OZ4/f/4rOymGYRiGYRjmp/nOd76DTz/9FH/4h39IufiSJMFxHOR5DtM0abtclmUkSQLXdaEoCsqyhOM4FIUjyzJevHiBwWCArusQBAFWqxVs28bZ2RkN1kWmvSRJKIoCTdOQOwAAbeGrqorVakUlv2dnZyQYiIJcAPA8j0SA5XKJ0WiEXq9HBbmKomCz2WA8HtPPoyiiIbsY6GdZRuew3W4xGo0oQz+OY9rYlyQJl5eX2O12KMuSro8YsF9dXaHrOnRdh81mg7Is0TQNZrMZAPzUkF8M1c/OztB1HcUHFUVBW/di+C9JEpUIF0UB0zRJNBHlxwDo9auqosge0c+g6zoN/oUoIssy9vs9LMuCYRiQZRnH4xF1XWM+nyOKIhRFQZE/ZVliOByiqirqkRAdAqIbwbIsDIdD/Pd//zfm8zniOEYYhphMJthut/A8j8qjDcPA+++/D9M08f3vf//L+fIzDMMwDMMwDPNL8XMLAQzDMAzDMMxvjtVqhR//+Mf4xje+gSRJcDweIUkSzs/PYds2bZprmgZN03A4HHB9fY1PPvkEmqbB9304jkM9AafTCZ7nIcsyqKqKyWSC1WqFruuQ5zlGoxE0TaMugrZt4TgO5dCLwuHNZkNb87e3t7Q5v9lsoGkaldiGYUgb9sPhEL7vQ1EUnE4nrFYrAMDFxQVM04SiKDgcDlgsFhiNRlAUBbZtkzix2WxQ1zUuLy8pT1/XdXRdR4W6wGuxwvd9bLdbiv25uLigmBwxhBfZ/pPJBHEcQ5IkGpgDoPJl4RoQQ3axZT8ejx88JiKXxLUTbDYb+L4Py7IoIuh4PJKAst1u6T3Ytk1uCLGpLwqVRcdAFEV0veI4hmVZcF0XbdtisVhQjJMQAVRVpSgm0REgXCCiC8IwDHieRw4O8XqO40DXdRiGQdFT3/rWt7607z/DMAzDMAzDML8YLAQwDMMwDMP8lvGjH/0InudBVVW4ros8z+E4DkU3mqZJRb5i2C2G0mKI33UdLMuiXoAsy2BZFg2My7KkvgGxXS+GxGJrXWzoS5JEm/+j0QhRFMFxHKRpiq7rALx2lXqeR6XCsizTcP94PGK322E4HMI0TViWhTRNUdc17u/v6fdEB8Fut0NVVaiqCjc3NwjDEK7rwjAMrFYr6iYQPQkiUkhVVciyjPPzc8RxDADUAyBidABQdJB4Xtd1uL6+JsHgf/8nxIE3r52ICHJdlyKLhLtAuCvKsqSOh+l0iiiKIEkSuQHE6+12OxrES5IEy7IeCCWWZUFRFGy3W+i6Tm6MqqooBkoUQAtXiCzL1J8g/qyqKizLwvF4pDinMAwRBAHu7u4wGAywWCywXq9hWRYA4O2334Zpmvje976HzWbzG7gbGIZhGIZhGIb5WWAhgGEYhmEY5reQ7373u9jtdvjLv/xLLJdL5HlOkUCn0wn9fh9FUWAwGGC1WkFRFPi+jzzPYRjGg2x/kRU/m83wySefQFEUuK5LpbaiDBgAFEWhXP6zszPqicqyDE+fPqVt/fV6DU3TMJvNUNc14jjGYrGAqqq4vLyk3wvDEGEYYjqdUvSNiPbZbDbkSHAcB1EUPdhqv7y8xH6/h+d5P+VIUBQFSZJAkiTouo66rmGaJgzDoFig5XIJSZIwnU4pLkgcQxT+CrFExAzleU7xQEI4EM8Vw///7RwQwox4XFzL4XCIpmnIxSAG/WJQX9c1bNum4l9RDOw4DpUyi/MTkT1CRNE0jcSCzWZDr2lZFqIowng8JreAqqrY7XbUldC2LQlNuq7DsixomoY4jun70u/3oWkaPM8jUenDDz/E559//mXfCgzDMAzDMAzD/AwoAP7qzR8EQYD9fv8bOh2GYRiGYRjmZ2W32+E73/kOfvd3fxe2bcMwDLx8+RKKogB4vdk+Ho/x8uVLyLJMG/77/R51XZNY8OaAVwzafd+neKDT6YTZbEZDbtM0aUB/d3dHm+d5nsN1XWRZRiJEGIY0kI/jGOfn56jrmjb17+/vMZ/PydkgegjW6zVGoxH6/T5M00QURQ9y9UW3ga7r2O12NDy/ubmBpmnIsgy73Q5pmmI0GsG2bZimiSRJ6H2I4bkQPESEjzj/8XhMW/oi5z/LMgRBQH0Mb/YHiOdNJhMSFUSEkhARBoMBbNum/oa6rimWKE1TWJZFrg7gfzoCyrJEGIYwTROappFQIISAw+EA27apcDlJEhIwHMeBZVk4nU7Y7/eYTqeo65ocFqKjoNfrUe+BOL+u65CmKSRJQpZlJDgJl0KWZdB1HVVV4ezsDP1+Hy9evPjybwaGYRiGYRiGYX6K999/Hx988AEAFgIYhmEYhmF+6xFZ7SLmRtM06LqOJEkwGo0oXqeqKuoFcByHSoO7rsNkMsHz58+RpimGwyEURYGmaVQUK1wFSZJgPp9D0zQsFgvUdQ3f92nz/nA4QFVVjMdjiifKsgx1XWMymcA0TdR1je12i6qq8Pbbb9MGftd1WK/XiKIIt7e3Dwb3bdtClmWcnZ1B0zQSM5IkgaIomM/nGAwGFGG03W6haRoV3wIgkWO1WuHs7Ay2bcO2bcRxjCRJaNN/MpmQI0LXdWy3W8RxjPF4TPFJYoAvxIssyzAej+E4DokDIo7ndDpBVVVMp9MHWf2KoiBNUxRFQRFBpmlCkiRomoa6rqFpGna7HQzDQJqmFBG02+3gui5UVcV2u0Wv16OYJU3T0O/3SSAIgoBigURUkRAuRDxQ13U4nU4U+QO87h4QG//iddM0xXg8hmEYVOrcti0mkwnSNMVgMMBoNMKzZ8++/BuBYRiGYRiGYZgHvCkEcDQQwzAMwzDMbzkffPABLMvCN7/5TWiahiRJKGe+qioaTouhsRhSj8djlGWJq6sr1HWNKIrQ6/VoQPzmcSaTCRaLBSzLIvGgqip4ngfgdVRPURQUZ6MoChaLBaIogqZpuL29pYig7XYLAHj06BGJEqInIAgCeJ6H3W5HZbphGML3fSocPhwOtBUvCoRFdM96vYZhGFRoK85HRCG1bYunT58iDEPYto22baEoCgzDoPcjnAdvbuvf3NzQYL0sS3qsrmucnZ0BeB2pU9c1iqKgOKXRaISLiwvqa2ia5kE/QFmWtKGv6zpFE4nPzjRN6LoOTdNwfn5OTgbbtqGqKsIwpOLe4/FIPxfRQb7vI01T2u4/Ho9UXCzLMuI4hmmakGX5QVfAmwXKdV1TqTQA3N3dUU/BarUit8d7772H9XoN0zTR7/fxz//8z9wbwDAMwzAMwzD/l8COAIZhGIZhmK8AL168wKtXr/Dee+8BAEW8jEYjLJdLyLIMy7KQJAnl0otBv67r+PTTT6nA13VdGhjPZjPqBACAs7MzqKqKly9f0ia7KCIWsThiMCyy6kejEZXSnk4n2LaNyWRCIsB+v6eCXt/3sd/vYVkW1us10jTF5eUlXNeFpmmIooiy9HVdR6/Xg2EYiKIIRVFAVVUoioJ+vw/bthFFEYkEnuc92PTf7XaU2z+ZTOA4DhURx3FMkUdvlvduNht6b+I5YmguonOEq0K4Bw6HAwzDQFmW1DHwZsGwECHKsqRYIyGCaJoG4LXQst/vYZomlfmK9yrKggFA0zQcDgdkWQbP88il0TQNZf6bpgnTNMmpEQQBFTsLV4AoeT4ej7AsC5Ik4XQ6wXVdijEaDAYUCdS2LeI4Rr/fp2Los7MzLBYLZFn2Zd8ODMMwDMMwDMOAo4EYhmEYhmG+ksRxjM8//xyDwQCO49BG+n6/pwH48Xik4bGIebFtG8fjEZ7n0TAaeD30v7+/pyHvdDqFpmkoyxL7/R5BENAAWgy3FUXBcrmkrf+rqysAoPJfTdOocNi2bdoY77qORABd17FarTAejzEajWCaJtI0pcgfx3HgOA7F5ZRlCQA07LdtG2maoq5rSJJEvx+GIZ2/2Ip3XZe25UXPwHq9huu6JACI7f+yLEmEuLi4IIEkTVMkSYKyLKk/QMQ0idggRVGQ5zmCIKBBu3AQdF1HLgLHcSiOp+u6B/0AlmXBMAy0bQtd16Gqr829h8MBcRyTkGFZFnq9HjkYXNcl14UQZ0R5sm3bFB1l2zayLKPvAvA/xcHifEWhsLjWmqZhvV5Th0S/38dgMMBwOETXdXj77behaRpevnz5a//+MwzDMAzDMAzzEI4GYhiGYRiG+Yqy2+1wd3cH3/cxHo9RVRXatoUkSQ9iX5qmged5UBQFp9MJbduSeHA8HtE0DXzfR57naJoGjuPQYD3PcxoIy7JMA24hAhRFAcdx4Ps+kiSBYRi0LW8YBnq9HtI0xWKxAPBaLCjLEtvtFqZpYrFY0La96AlYr9cAgOvra8iyTAW2sixjtVrh6uqK4nFEwe5+v8fl5eWDIfxms0HbtpjP55AkCZIkURGxyPZ/+vQpXc/9fk+RR7quYz6fU3yQrutYLpcoyxL9fh+SJKEsywfFw3mew/d9ely4AIQYURQFTNNEEATUHyA26oXg4rouzs7OKBZIvL8oimhDX7gCTNOkPx8OB3ieR24CMbiXZRnL5RKKokBRFIzHY/psh8MhxRiJyKTtdkvXYL/fo9froa5r3N3dwXVdcm84joP7+3v0+30cDgeMx2Msl0u89957sCwL//iP//jl3xAMwzAMwzAMwwBgIYBhGIZhGOYrx/e+9z2oqoogCPD5559TEW/XddA0DXEcQ9M0zGYzqKqKxWKBqqoQRRGGwyGyLMPV1RVlwVdVRQWx6/WahAVN02iYrSgKuq6jTfN+vw9ZltE0DZbLJXRdpy3x/X4PSZLQ7/ep8Pb+/p6ONx6PacAdxzHW6zW6rsPt7S0OhwPFEW02GyiKgsePH2Oz2VAc0OFwQNd1uL6+RlEU9JqapsEwDEwmEyoZbtsW2+0WTdNQSbDY0q/rGoqiUOa+KPSt65pidYQ4IEkSADyIGxJCy3A4pNz/NwUAUUwsHmvbFmVZomkadF2HsixxdnaGPM9JtBDX9HA4oNfrQZZl6gjYbrfo9/sPRAAhGDiOQ6KPYRgYj8cAgPV6Tef6ZsFxXdcYjUZQVZWEHyFg9Pt91HWNNE3R7/eR5zk2mw2CICA3x8XFBZbLJd599108e/YMo9EIQRDg29/+NrlFGIZhGIZhGIb58uBoIIZhGIZhmK8gL1++xA9/+EPc3t4iSRJYlgXLslAUBfUD9Pt9KhCezWaQJImy4k3TxGazoSGwEATEdriIvymKgnoDXr16hdPphNlsBkVRKFc+jmMYhoEgCNC2LY7HIwBQvn+SJEiSBG3b0ga/yMIX5bOXl5eIogi2bSMMQ+oDUFUVmqbBsiyEYYimaWBZFjzPgyRJ2G63VJSs6zr6/T66roMsywjDkLbsr6+vYZom4jhGURRI05Q2+sfjMRzHIZEhSRIcj0fqAWjblob4AGjTXrgahNNAuCLKsqQoHhEPtNvtIMsyiqIgd4AoFG7bFrvdDoZh4HA4wLIs+L5PLg9ReAy87ggIw5CcAEIEUFUVcRzDsizouk5Chud5SNMUeZ6TW8C2beR5TpFAw+EQuq4DeB0JlGUZRTLt93vs93uKGBLFxOv1GoPBAIvFAq7roqoqGIZB74f/vcEwDMMwDMMwv364I4BhGIZhGOZrQFmWWK/XGI1GGI/HUBSFBriiPFhk4mdZRqWxZ2dn2O126LoOg8EAnuc9iPZxHIey7yeTCRRFoSHwzc0NANDg+XQ6wfd9yLJMpbGn0wnz+RzA616D1WqFruswmUxQVRUsy8LxeETXdbAsCwBom3+320GSJNq07/V6VDYsyzIcx4FlWcjznDb2ZVmG53mwLAtZltGAX2zZi5gjsYm/Xq9h2zZ6vR5s26bt/KIo6DlXV1f0nt6MARLCQa/Xe/A88Xuj0QiO4wAAqqqi54moo/F4TG4EEc+T5zmqqoLjOCRkiH4A4XQ4Ho8wTZOG+bquPxABkiSBaZp0zrquw7ZtnE4nKlE2TZNeazAYUIRUWZZUGizLMkUwDQYD6nwQYkGSJJjP59A0Db1eD67r4nA4YDKZwDAMTKdTXFxcoG1b3N/ff6n3A8MwDMMwDMN83WAhgGEYhmEY5mvC6XSiKJ0gCHA6ndA0DVRVhe/7WC6XkCQJvu/TMHo4HGK5XAIADYQlScJ6vaYNfDEQN02TNtYty6LBc5ZliKIIvV4PACjXHwCm0yl0Xcfd3R222y08z8NwOITrugDw4O+ig8EAbdvidDohTVMoioKLiwvouk4OAHFOvV6PioXFsFps7cdxDAD0PkSRsm3biKKIXABlWWI+n8OyLGw2G3RdhyzLEMcxkiShIb9wAGy3W3JXiIJhMcgXwsCbrgIxXBcCQFEUGA6HsG0bvu8D+B+BoCgK6leYTqfoug7b7ZYG+KL4WJQDA6AOCBGhJOKVTNOkkmYxvC+KApZlYb/fwzAMNE1DBcwi7klcM8/zYBgGVFVFXdcYDodUnCzLMl6+fAlZlum7I9wI9/f3CIIA6/Uauq6jaRqKZJrP53j27NmXcBcwDMMwDMMwzNcTLgtmGIZhGIb5GvHRRx9ht9vRoFpE5QBAFEVUZCsG+UVRAABtj7uui91uRyW/AJDnOS4vL6FpGpX8DgYDKIryYINcbK+naQrDMKiM9nQ6oa5rWJaF2WyGsiwplqhtWwDAcDjE6XSiYbaqqlTUG4YhbNvGarXCfD6H4zgUd7PZbCgCpyxLqKpKhbaSJOH6+hqr1YqKlA3DwN3dHQaDAeXfA4Dv+1gsFpSLP5/PkWUZdS6IQb2iKHBdl7L1RelvXdeYTCY0/BfiQVVVKMsS4/GYYoXEhvxgMEBZliQeNE2Ds7MzOqaI6KmqCrIsYzwekyARBAGiKKLNfTGoF4N/UdwsCp51Xcd6vcZwOISmaViv1xThtNvt4DgO0jRF0zTIsoyujSzL1OkQBAFFK4n3PhgM0Ov16DWGwyEMw0C/38d2u0Wv1yPXBgB897vfRZIkX+o9wTAMwzAMwzBfN9gRwDAMwzAM8zXgeDziww8/hO/7NKDv9/uIogi+71OOvWVZ+PzzzynnXgyXJUmi393v93j06BHatsVisUCWZRgMBpBlGW3bIooimKZJIkBVVTgej1RO3DQN4jim+B2xmS5KZ8WAOYoiKIqC/X4PRVHQ7/dhGAaJA+v1Gk+ePKFMe0mScDweoes6FEWB7/uIoog27yVJwnw+R57ncBwHYRgiSRKkaYrr62tyToheA7F1f3l5SQKKoij0PBF3I3oARCfB6XTCdDqlayAEANEPMBgM6LGu68gBIOKAgiCg12vblkQPkeVv2zaSJIHjOIiiCLquk7gjtvrjOIbrugjDkBwRuq5T+a8oFx4MBuSu6Pf7dEzXdUlEOBwO0DSN3BeyLNOgX3zu4rsjnB9CdBHxVKK7YLlcUlGycJrc3t6Ss4JhGIZhGIZhmF8d7AhgGIZhGIb5mvLDH/4QjuPAcRw0TYOqqrDb7TCfz7HdbnFxcQEAKIoC8/kcp9MJbdtClmUMBgMkSUIb6qLoVwzC3+wFePvtt7Hb7VDXNZX8ikiY58+fQ9M0yqbP8xyr1QqapuH6+hr7/R5hGNKw//Lykjb3j8cjdrsdZrMZnj59SlvuAHA4HAC83uRXVRWHw4GibCzLgqZpqKoKYRhS0S4AnJ2dIU1TGmhvt1s6hm3bqKqKNvzF+7y6uqIBeVVVqKoKdV3DMAxcXV1Rua9wAIgOhP/tDqjrmhwCk8nkgStCPFcIAqqq4uzsDFVVwbZtci7IsoyyLOE4DhRFQdd1sG37QQ+EyPbfbDYYDAbYbDYIggCqqmKz2VBx9OFwQL/fhyzLOBwO0HUdk8kEkiRhv99D13VIkoS6rnF2doblcokgCJAkCX1XBoMBuUCCIIBpmvB9H2VZkkNgsVjA930YhkECyIcffojPP//8N3BXMAzDMAzDMMxXH3YEMAzDMAzDfI2I4xiffPIJ3nvvPeR5Tlv+dV2jrmv0+30sFgvKwfc8j36v6zoA/5Pbf3d3B0mSKM6mKApEUYS33noLy+USsizjdDrBMAzMZjNIkoT//u//hmEY8DwPkiTRJn+SJCRCyLKMoihIfLBtGwAQhiFto3ueR3FDYRiiLEs6t8PhgKqqALwuGRab/lmWUVRQ13XQdZ16CWRZpm1+VVWp1NcwDBwOBxp0TyYTKjC2LAthGCKOY+R5Tn0EAGAYBrbbLeI4huM48DwPbds+cACInoDRaATLsuhxEd0jnAyTyYQ+P9ELkOc5bd9bloWqqijnXwzsxTBeuAs0TUPbtlSgLMsybfqbponD4QDHcaDrOk6nExzHgWmatMEvon2E+6Gua/R6PSiKAs/zqLBZRE3JsgzXdbHdbrHb7ajYWEQ3rddrbLdbckO8++67CIKAxQCGYRiGYRiG+RXBZcEMwzAMwzBfc/793/8d77zzDsIwpK3tNE0xHo9xPB5pEL7dbpHnOXq9Hm2zG4ZBJbaKotD/v7+/x+3tLWRZpsgdwzAwmUygKApevHiBOI5pM/7NAuHz83Nst1ukaYowDNF1HXq9HsX+iPJiTdPQ7/fJCbDb7Wj43Ov1EIYhxd70+32Ypoksy9A0DZbLJbIso0giXdeRJAkVG6uqitlshrquIUkSqqpCHMd0rsKtIISKOI6hKApkWcbl5SXl6Yshv9jCPzs7I7eEKB6uqgrj8ZiG9FVVIU1TKmwWHQFCBBHujfV6TaW/vV4P/X6fBvOKoiDPc1RVRZ0PwqUgCoRF4a8oDd7tdvA8j9wRmqbheDzCcRxomkaxTZZl4Xg80mciBII8z2GaJgDAdV1kWUbPVxQF9/f3VDQsehFEIfH5+TkMw8D5+TlUVUW/34eiKLi+vqbvAsMwDMMwDMMwvzgsBDAMwzAMwzD4wQ9+QENYMdj/7LPP0Ov1IMsybNvGer1Gr9ejAfh8Poeqqnj27Blt1Nd1jTiO0e/34fs+Xr16RRvqQRBQfM5qtcLFxQW6roOmaUjTFKfTCZeXl5AkCYZhUH6/iKpJ0xRt2yLLMiqkLcsSsixTd4DnedhutwAAVVWpQNg0TXIcbDYbAMBkMnlQLNy2Ldbr9YOCXdM0sd/vEccxdRn4vg/gtQtgt9s96Djo9XrUD5CmKZIkwXA4pMfFEH+z2cBxHEiSBMdxAABd1yHPc8RxjCRJ4LougiCAZVmQJOmnCoaDIKAy3qZpoCgKCQ9vxvEAr4f6QiAQMUSSJEHXdYoX6vf7dN26rsNut4Ou69B1HVmWUemy+P0wDKn3wbIsZFlGnQPifUqSBM/zqKdBFBRrmoa6ruG6Lg6HAzabDcU9OY6D/X5P4tGTJ08wHo/pujAMwzAMwzAM8/PDHQEMwzAMwzAMAODb3/42/vRP/xRBEFB5rRgYx3EM0zQRxzGGwyEOhwNOpxMGgwGapqFBsGma2G63eOedd1DXNQ1zRfxPWZb4jyCziQAAIABJREFU4osvYBgGFe5mWYYkSdDr9QAAq9UKeZ5D0zRYlkXZ/Hme43g84smTJ9jv91RwvFqtSBjQNA2maWK9XgMAHj16BEVRaEN/t9uhaRr6uRgsbzYbVFWF29tbSJJEOfgis17XdVxfX9OQX1EU1HVNOfkiSz9JEjRNA9d1URQFLi4uyC3QNA31C1iWRbE5YuheVRWyLEO/36cuhLZtyVkgOgSEi6KqKsiyTO6M4/FI19N1XYzHY9R1TcXNQjQpyxLD4ZAcB+I97HY79Ho9cgZomgbDMEhAkCSJBIHVaoUgCOh6GIYB3/ex2WzQNA10XcdgMAAASJJEwsDhcECv1yNh5v7+nsQOXdfR6/VwPB5xc3OD1WpF5caO48CyLHz22Wf4yU9+8pu5QRiGYRiGYRjmKwI7AhiGYRiGYb7mPHv2DP1+H/P5HMvlEpZlwTRN2gRvmgan0wnD4RBBENBWeJ7nlBsv3AD39/cPcuhVVUUURaiqCr7v03BcZNCfn59jtVqhLEtyELzpBBBOAxGFIwb1k8kEuq5T5M1iscBsNsNwOESe51TIu91uUdc1bm9vkSQJbdmv12uK+xHlwSJCSAzqhQugqiqoqor1eg3btn9q03+9XiNNU/i+D8/zyPGw2+2QJAnqukYQBOQCEK4GgaIoNOhv25Yid8S2/dnZ2QNxQEQTFUVB/Qzj8Rht25JLoixLLBYLKIoCy7Ko0LnrOqiqSscRm/2n0wmu61KJ8n6/h2maOB6PsG0b2+0Wo9EIuq4jTVPqD4iiCP1+n3oFqqqiUmgRqeQ4Dna7HYkgwikgXv/Vq1c4HA7Ybrc4Ho+IoghlWSJJEpimiSAIcHt7C9u2yUHAMAzDMAzDMMz/P+wIYBiGYRiGYR7w93//9/jxj3+M3//936fYnLIsH0TFCHfAq1evMBqNoGkaDocDiqLA7e0t7u7uEEURbYabponNZoPD4QDTNGnb/XQ6wbIsnJ2dYbFYoGkaxHGMx48fQ1EUisvZ7/d4+vQpjscjJEnCYrGgElwRLSSG+OPxmAQEMbSv6xrX19eQZRlxHEPXdXINiDiiPM/hui79fr/fh+u6UFUViqJgt9vRVv6jR4+oBPdwOKAsS9quPz8/h6ZpSJKEonx0XacOAVFeXNc1OQTE8F4U74rXEUP+s7MzEi7e7BDouo6G/2VZwjRNSJJEAoIoGx6NRuj1eqjrGmEYotfroSxLhGEIwzBgWRYJEqIjoG1brFYrKgHWNA0A6JqI4wiHhPi9KIroz4fDAW3bwnEcOtfZbEbXS9M0hGEI4HU0kihgFuXIwllR1zXFU9m2jbu7O9R1jS+++ALb7RY/+MEPvuzbhGEYhmEYhmF+a2EhgGEYhmEYhgEAvHjxAqPRCE+ePIGu67i6ukJRFDAMA1EUYTQawXVd2mzXdR3H4xG+71MZbtM0OD8/R1VVFDMEAJ7nIcsyihKaTCYAQCKA4ziQZRmyLON0OiFJEgRBgPV6jcFggNPphLIscXFxAeB1lFCSJFAUBbquw7ZtGmzf3d2h6zrc3t7S4FpRFOoJuLi4IMFgt9vRecxmMyrTPR6PqOsaiqIAAObzOT1HxPOEYYjJZALP82hwLboLANAgX5ZlrNdryvmfz+eQZRlN09A1KooCVVVRDBAAeqwsSxRFgSRJ4HkefN9H27YkHIhS3aIo4DgOPS6G/EJkSNMURVFgOp2SgCKKfg3DoO+B6GsQQ3vxs/v7e0ynUyiKAtu2oaoqRSO5rkufnW3b5DwQfQiyLCOKIozHY+x2O3Rdh+Fw+EA4ubq6QhiGdH3ruibnRlmW8DwPbdvi7OwMg8EAw+EQ//RP//TruBUYhmEYhmEY5isHCwEMwzAMwzAM8eGHH6IoCnzjG9/AbDYDAJRlSSW3r169wvF4hGVZAIAsy3B1dYWXL19ClmUEQYA0TWlrPAxDuK6LruuoADcIArRti+VyiTRNYZomptMpVFVFkiQ4nU409NU0DYvFAoZhYD6fQ1EUvHr1ihwGYqveNE2EYYi6rjEYDCBJEr32ZrNB27aQZZlEgN1uRzn6QjQQQ+0sy7Ddbkkc8DyPugG22y0V8z59+hQAcDqdyO0gyzIsy6IhvsjyF4N1IWQkSfJgmF+WJcUDCddAURQkAgwGA1xcXJBYUBQFDfb7/T7SNMVkMsHpdHpQ3Nu2LfI8pz9PJhPqd+i6jpwERVHAtm0oioLZbIaqqrBcLjEajag/QLg1DocDXNel9y5EhP1+T0KKEEA2mw25EoRAMRwOKQYpyzLoug4A+Oyzz0hoAIBer4c8zxGGIS4vL7FarTAcDjEej9F1Hb744guMx2N8+umn+Oijj768m4RhGIZhGIZhfgthIYBhGIZhGIZ5wMcff4yPP/4YsizDdV20bQvgdWzM8XjE2dkZyrKEqqo0BI6iCF3XQVEUOI6DOI5RliXOzs6gKArF1gyHQwDA/f09yrKErusYjUYU6yOy6sWx0jRFkiSYz+eQJImG7mmaYjQawXEcFEWB1WqFuq7pfHzfRxzHWK1WAF5v9BuGgTzPyRkg3Aq3t7dI05SEge12i7Zt8fTpU9pyF8N5RVFgmiY5A0T58WazoY4ATdMgyzKWyyXKskTbtjg/P6eS37ZtEYYhRQRdXl6SACDcASImZzAYUGFxXdfI8xxJkiBNU+R5Tq8phu3iNcQxDMPAcDhEVVUIw5CievI8pz4CwzAwGo2oJFp0PohCYRFvZBgGuq6DaZrQNO3Btn4URVAUhVwCh8MBTdNQL4CISYqiCG3bwvM8NE2DIAiQJAlUVcXV1RUURaHrGgQBDocDFEXB/f09LMvCq1evkCQJbNtG27awbRuz2QyTyQT/8i//gjiOv9R7hWEYhmEYhmF+W2AhgGEYhmEYhvl/5f/8n/+DP/qjP8Ljx48RxzFtvUdRhOFwiCiKYNs2JEmizXJVVZFlGZUMHw4HDAYDyLJMIsGbEUG+71NWv+gOEE4AsQ3/1ltvQdM01HWNly9fQtM0Gt6L8xJFv5qmwfd9HA4HGq5fXl5C0zTkeY7VakWRPNPpFI7jIM9zKIpCPQFXV1ckAIhC4t1uhyAIKGZIDMzFoP/29paKdsVQ3LIsKIqC6XRKpcFi+G+aJsXhRFFE109cG9/3yQFR1zUVJydJgiRJ0Ov1cH5+DgAUSySeDwB5nmMymSDPcwCgqB5RQFwUBZqmwXg8JgFACAhd1wF4XZIsYoJkWYYkSSjLEmVZYrlc0nBfDONXqxVFIwkHhPjcd7sdTNOkwl8hnoji5cPhQB0Io9GIPltxjpIkIUkSXF1dIY5jaJqGwWCAKIrIuTIcDrFarfAf//Ef1AXBMAzDMAzDMMxrWAhgGIZhGIZh/j/59re/DcdxcHV1hbu7O2RZBlmWKSpIVVX85Cc/QdM0cBwHqqqirmssl0vYto1+v08b7G+99RaapsGzZ89gWRZFwjRNgyRJqGhYRNuIYbamaWjbFs+fP4fjOHBdF/v9Hp7n4XA4YLPZYDQaYTgc4nA4YLvd0mBbOAGyLMNms6Gh+mw2I1FCDPWrqsLt7S32+z1s24amadhsNlQUrCgKVqsVTNMkAUBVVZydndF1yfMc2+0WAChuSGzRLxYLFEUBWZYxnU4hSRKapoGu61gsFkjTFMPhkAbbbduScJBlGZIkgeM4ePToEYkyQgAQMUEiO7/f79M5Nk2DLMto+B8EAXULdF2Hqqqo7LcoCgyHQ3IXiPN49eoVbNvG8XiE67rwfR+9Xo+6IWRZRq/XQ9M09L5M06SBv+u60DQNiqLgdDpBkiSMRiMAILHoeDxiMBjQNT0ej1SGLFwpp9OJzvd4PGI4HJLw8GbE1HK55DJhhmEYhmEYhnkDBcBfvfmDIAio4IxhGIZhGIZhPv30UwwGA2iaRoNgXdex3+9xfX2N1WoFXdcxHA6haRpt5yuKQgNrETVzOBwoEkh0BcRxjNPphH6/D0VRoCgKsixDv9+HLMsoigLL5RJxHGM6ndKW+Wq1omx7WZaRpikMw8DxeAQAKuVN0xTL5RJVVaGua5yfn8N1XeR5jq7rsNlsoCgKbm5ucDwe4TgOwjCkvP3r62tsNhsaZG+3W4qnCYIAALDZbJDnOW2rX11dIUmSBzE9wjmh6zpUVX0ggFRVheFwCFVV6bqJ4b8QAISo0bbtg56ALMswGAzI4SAEgizLcDqdIMsysixDEAQUySM6BIDX7gHRAyFy/N/8T5yDLMvQNA3D4fBBJJK45kIESNMUlmVRRBIAes95nlO58+FwgKqqsG0beZ7DcRycTieYpomu68gtsFwuqaBadE0YhgHDMDAej8lZMhgMYFkWJpMJLi8v4fs+yrKk7wPDMAzDMAzDfN14//338cEHHwBgRwDDMAzDMAzzM/AP//APeOutt3Bzc0NRLSJGRmxsiwG/iP0BQNn2YiM8TVO0bUtlwyJKZjAYoOs6dF2HKIrgeR5t9adpStv6ojvgeDwiiiJMp1OMRiPaEr+/vwcAXFxcIM9zaJqGFy9eoOs6TCYT2LYN27YpUigMQ1xfXyMMQ8rL32636LoOmqbRcQaDAbkGAODJkycUESRib5qmAQDYto2iKEg0EHFDInJHkiTqDxBlv/P5nB4TkUN5niMIAlxcXNDmflmWtN0vcvTF1n/TNBTdU9c1DfdHoxE8z6PiXcuykKYpjscj2rbFaDSi54ty39PphNPpRK4Jy7KoOBgAuQtEGXFZlrSVb9s2OS32+z05A7bbLW38Z1kG13VxOBxIoBDxS7vdDpqmkXgwnU7p2jRNg+FwiOPxCFmWEYYhACCOY3ImCNfHZDKB67oYDodcJswwDMMwDMN87WEhgGEYhmEYhvmZ+PTTTzGZTLBcLuG6LjzPw2effUbxLWLAezqd4HkedF1HnudwXReqquLly5fouo5KfkUvwHg8hizLqKoKcRzDNE24rksb78fjkYbMdV0jjmPs93tomkYb/WIwLrb0hSBxf38P13VR1zV0XYdlWTgej+i6DtvtFo8ePaIBtSgJBl67CYqiQFmWFPXzZn9AlmUPin+DICBHw5tRQ7Is4/r6Guv1mkqQxbk2TYObmxsAoO174VKQJIl6B0TEj8jQF0XB19fXNPQuyxL7/R6u65IYMJvNkCQJHftNZ4KIXRJl0F3XkcAgHBmWZcH3fbiuS8cRpc5CZEmSBLquP4hHEhv84jMSBcK2bdN3xDRNHA4H+L5PxxkMBiQMiKLi/X6Pqqrgui4AUFyTqqoULRXHMYIgIKdAHMeoqgrT6RTH4xGz2Qw3Nzf49NNP8fHHH/8G7hyGYRiGYRiG+c3D0UAMwzAMwzDMz8yLFy8AAKZpIs9zJElCcT5imC5y70VxrqZp6Pf7ePHiBWRZhu/7qOuahuK2bdOAX1VVjEYjyLKMzWaDJEmgqiqCIEAURQCA/X5PxxGuATEMHgwGlEO/WCyQ5zlGoxFtlYsIGwCYzWbYbrfo9/vYbDYkAlxdXaEoCmiaht1uh67rIMsyLi4uKO5HFAhHUYTb21tYlkX59UmSAAAV7KqqCsuysN1uyclg2zaurq4oSkmIIsfjEZZl4eLighwXRVEgiiLa0nddFzc3N9B1nQqE4ziGZVkUAWRZFkUhAa+z9ZMkgeu6GI1GD0qZsyxDHMd0XuKaeZ5HTgRROiwcHb7vo+s69Pt9Ei6EEJCmKSRJQhAEyLKMugVE74IkSQjDEP1+H4fDAZIk0eu6rovdbgdd11FVFbkLxOchooZ836fvpCzL8DwPsixjtVpR94G4LgDoXC8uLhBFEdI0/bXeJwzDMAzDMAzzfwNvRgOxEMAwDMMwDMP8XGy3W/zoRz/C9fU15fO3bYvBYIDFYkEFuSIH/+LiAp999hmKosBkMkFVVTBNE8vlEpIkwTAM5HlOP9d1HW3bYrvdkgigqipUVcWzZ8+g6zouLy9RliWA1xvhuq7D8zxYlkXb8cfjkYbeYqgeRRFt7adpCtd1sVqt0LYtptMphsMhyrIkIaKuayiKAsdxIMsyVFXFbrfD6XSCqqq4urrC4XCAoijkMhA590EQwHEc7HY72lIXXQD9fh8AUJYl1us1TqcTLMvC+fk5bNsmASEMQ+pQsG0bNzc36PV65AJYr9dU3DwajWDbNpqmQVEU9DxJknA6nfDOO+/Atm10XYe6rh90J0RRRCKBOIYQXLIsIxFmNBqRs0GIOXEc02cpSRLG4zH6/T51BmiahqZpaFvfMIwH0T+izNkwDOx2OwyHQ+qT6LoOrusijmMMh0MURQHgtbARBAE5HLIsQ1VV1E0g3odt25jNZvRZqaqKJ0+eYDqdknuFYRiGYRiGYb6qcEcAwzAMwzAM80vzN3/zN/iTP/kTXF5eQtM0VFVFMTSapuF0OsEwDMrGF3EvIt6lKAoEQYA0TaFpGgDQMH+73ULTNHieBwAUweP7Pm2Yi8JiwzAQBAFlxovB/s3NDdq2ha7riKII+/0eo9EIg8EAdV3jeDxiuVyi6zpcXl6Sy2G1WpGIcXV1Rdv2+/0edV2TIKBpGuXw73Y7itV5/PgxZeKr6uu/botugcePH5NQstvtUFUVuq6jcy3LkobsTdNA0zS0bYu33nrrQcmviP+xbZu288VjSZIgyzKkaYp+vw/P8+A4Dokhwn0ghv+i3PjN4wuXQNd1FN/jOA7quqZrLlwEhmFQF4EQEUTpsRADdF2HLMswDINEk8PhgH6/jzRNoes6NpsNiT4ibklcx+FwiLqu0ev1SKRZLBYPhCgRc2QYBoqioB6DMAzhui5FQhmGgcFggPfffx/z+Rwff/wxuTgYhmEYhmEY5qsKCwEMwzAMwzDML8y3vvUt/Pmf/zmyLIPnefS/b7oCZFlGURRwHIeeVxQFDWeFk+Dx48fwPA+n0wm73Q6WZaFpGsiyjCiKEIYhZrMZRfU8f/6cBvuKolCEjojxEb+3Wq0AAL1eD57nUTGwJElwHAf9fp9idZbLJVRVRVmWuL29pc6C3W5H5wKA4ot2ux3l9F9dXQEA8jxHURRQVRXL5RK9Xo+6BZIkoVJf0U1wdXWFKIqoD6AsS3IPXFxcwPM8KhYWfQFlWWI6nVJPgYjvEfn9ruvi4uICTdOgrmvqHsjznB5/8uQJPU9VVVRVRc/Psgy9Xg+DwYC6Dpqmobidtm1xPB5JABBb/y9fvsRwOEQURej3+ySa1HWN3W6HwWCAJEmoVFhRFIpg8n2frqnv+1BVFfv9nroX9vs9dF2HYRgYDodYrVbkZn6z10CcX9M0CIIAdV0jyzJcXV1hMBjAcRwsFguYpolXr17hyZMn+OKLL/Cv//qvX/4NxDAMwzAMwzBfEiwEMAzDMAzDML8Uf/d3f4d3330Xuq6jrmscDgfIskwD2J/85CdQFAWWZUGWZZRliSzLqCtADH5N00RRFNjtdmjbFp7nUQ+BEAFE6W+e59QToCgK6rpGFEU4Ho/kItB1HafTCWma4ubmhuKIHMehiKCrqyuUZYkoisi5UJYlHj16RCLAdruFJEmQZRmyLCMIAoqtEeLAbDZDmqaQZRnr9Zo2+kU/QZZlkCQJbdtit9uhLEucnZ1RbJGu67i/v0dd1xgMBpjP57RVv91uachfFAVubm4ou1+U/L45vJ/P52iahgQC0T0gIngGgwFdWyEUiA168bgoaxYlw3EcIwxDWJYFx3HgeR5UVYXjOCjLEofDAZZloes6pGmKyWSCuq6pRFpRFHIDvOkWOBwO6PV66PV6UBSFtveB110QjuNAURREUQTHcaCqKuX+T6dTvHz5EoZhUJzUbrfD+fk5jscj6rqG7/vIsgyqquLVq1fo9XrYbDaI45gcF47jYDab4c/+7M/wwQcfUDk0wzAMwzAMw3yVYCGAYRiGYRiG+aX55JNPcDwe8e6778JxHNo0F9v8wiXQti3iOKYN7+FwiDAMcXFxgaqq8Nlnn6FpGjx9+pSy/+/u7uD7PobDITabDbIsw+FwwOPHjwG8jt2J4xj7/R6Xl5fo9/sP4m1EnIyIf3n16hUkScLt7S2yLKPC3O12i/Pzc3ieR1n2m82GynDfjK0BgNVqhfF4DM/zUNc1NE3DcrlE27ao6xqz2QyWZVHPwGazoW3/J0+eQJIkRFFELoCu6zAcDul4XdehLEvouo7FYoF+v4+bmxusVisSEIqiQJ7n6PV6ePz4MUUzCWHkdDrRdZRlGTc3N9SPIJ5b1zV1DLiuSwJBURSo65qKhi3LwvX1NQkgkiSR2COubRAEFMnTti26rqOtf3EOiqKQK0DERSmKAlmWySVwPB7hui5kWabOBVVV6TxELNRkMoGiKEjTFHVdkxMkCAKEYUiikig4LssS4/EYtm3DMAzqg0iSBG3bYjKZYLPZ4KOPPiInCcMwDMMwDMN8FWAhgGEYhmEYhvmVcH9/j+FwCMdxKDc+DEN4nkfxMm8KA3meY7PZwDRNKIoCAJAkCYPBAHEcw/M8vHr1igbQn332GfUDTKdTGrDHcYzdbkeb8GLb/3Q6wXEcTCYTLBYLisexbRuDwYCcBcvlkmJkxPFUVcVqtYKiKDg/P0dRFAjDkDbPZVnGN7/5TRISwjCkwuOmaXBzc0ODazFUl2WZ4oPiOCZhZLlcYjKZYDab0Va8pmnkUKjrGu+88w7quibHwHK5RJ7nGAwGOD8/p4igpmmQpin2+z05M4IgwGg0ovx/IdIURUGOiiAIqCNAOCTCMIRhGLBtG2dnZ6iq6sHwP45jOI6DPM8pIkh8JqLnIM9zRFFEA/s3HxOigOhg2G638H0fURRRYbIQDERpsWma1F3geR52ux2CIKDIpzRNYZom2raFqqqQJAlFUVC8UlmW8DwPuq4jSRKkaYokSSjKyjRNjMdj/N7v/R6WyyU++uij39j9xDAMwzAMwzC/ShQAf/XmD0TOJsMwDMMwDMP8vKzXa/znf/4nJpMJzs/PEUURsiyDLMvwPA+apuF4PKJpGgwGA2y3WwRBgH6/jx//+MdUCNvv93E8HhHHMcbjMfI8pw398XiMIAhgWRaOxyMVC4vt8qIosFwu4fs+ptMp1us1mqah2KDxeExb5/f39+i6jspye70e8jzHer2Goii4uLhAlmUIw5D6Bmzbhm3bAABVVbHZbKhbYDgcYjqdIk1TlGWJuq6x2WxgWRZF4ohN+fV6jTiOMRqNIEkSDMOgot3tdgvXdeG6LnUliAx/ISrM53NYlkUCgIjvWa/XWK/X6Pf7+J3f+R3keQ5FUcg5kSQJlewKF0DTNDTcF4N4ETMkXr8oCgBAmqbIsgyGYVCkE/A6ikkIDeK84jjG2dkZCRWn04kihMIwhOM4VC6tqiqVNGuahjRNYRgGVFWl+KTtdoter4fT6URCxna7haIoJEqUZQnLssjdALz+N47Y/k/TFE3T0DkNBgPIsozpdArf99E0Da6vr+H7Ps7Pz9Hr9bBYLH4zNxTDMAzDMAzD/BK8//77+OCDDwCwI4BhGIZhGIb5NfC3f/u3+PTTT/GNb3yDMt+7rsNyuURVVej1ejTQHgwGlKkvBtRikG5ZFnzfx/F4/H/Ye7Mex+7r6nudeT6HM1lFFqu6qlqDJVsODAeBgQBBEt8ECJB8o+f75MYXAYIgN3GUCEg0xE4E25La6qGqqziTh+SZp/ei8d9vl58AzxRLlrx/QKHVxenwHBJQ77XXWjgcDvB9n4bwYmAvNrmFCBCGIVRVxWAwwGKxQF3XiOMYiqKg1+tRsbDYRHddF9vtFr7vY7FYQJZlBEGATqdDkUHAq838IAjgui5M08Rut6PBt+u6ODs7g6qqiOMYkiSRANHr9eB5HiRJwm63o3igsixxcXFBhbqr1Yo6B0zTpJLc3W5HG/xZlqHf79PwvmkaZFmG3W5HHQmqquL6+pqKlQeDAe7u7pBlGT1np9Ohzfw8z+n50zSF7/tot9vkrMiyjEqCoyiC53nodrvUk/B6WbF4b03T0HURxynOoXACOI5D/73dbum6apoGWZbpzziOoes6lsslut0udRMIUanVasGyLMRxjCAIsF6vYds2DocDOp0OlsslFosFLMuCoiiQJAmDwQC73Q6apsF1Xdi2jV//+tdI0xTD4ZBcGb1eD5Zlod1u42c/+xn3BzAMwzAMwzDfWFgIYBiGYRiGYX4rfP7553BdF9PplEp0d7sdut0uwjBEt9ulstonT57QhrmmabT9rigKvvjiC/i+j6qqYBgGXNdFFEW4ubmBJEk4OzujGJkwDKEoCt58803M53P6vWEYeOONN1DXNfI8x/F4RJZlGA6HAF6JFHd3d6jrGpZl0cBcCA11XeP09BSWZSHPcyr8FZv6olxYbN6vViuUZYnr62soioL1eg3g1SBcxCRdXV1RhJGIshEigGEYFK0zn89RVRWCIMDp6Snl5ud5jsPhQGXAiqLg0aNHODk5gSRJdGx5nlP8UqfTAQAqAY7jGHEcI01TBEGA6+vrB9v/WZbRa9i2jclkQu9BZOwLkUCIDgKRzS8cECJSqWkaiokSDoCqquh8d7tdbLdbeJ73IDao3W6T0GKaJtI0heu62Gw25KjY7XZot9sUKSV6F5bLJVqtFg6HA5qmwXK5hCRJFDUUxzHeeOMN6gU4Pz+nguKbmxs8evQIw+EQt7e3+OlPf/pVfo0YhmEYhmEY5r8FFgIYhmEYhmGY3xqffPIJPvnkE/z4xz+m7W7TNGHbNrbbLVqtFpUL67qOpmmgaRp2ux2VyWZZRvEvohtADNr7/T7KsqTHyrKM6+tr3N/f05BdFP0KQeDZs2fQNA3tdhuu65KLII5jDAYD9Pt9VFWFzWZDG+7n5+cwTZOKcEX2vygLjuMYqqqSC6DVaqHdbmO9XsOyLADAbDZDu90mYSRNU9R1jdVqhbquMRqNKEJov9+jqiqkaUrFxgBIOGmaBi9fvqSM/fPzc4rgEfcRIkBRFBiNRjSQz/OcYoBP1mKYAAAgAElEQVTiOIbneTg9PQUAch0kSYIkSVDXNd0u3A+ihyCKIjiOQ86Nfr8PAFRWLF5fiBLi2or7NE2D+XyOTqdD0UCiVNowDOoUUBQFQRBAVVVybqRpSlFBosRZ9Ers93vqgDgej8jznPoLPM9DGIbUdWAYBokUIoZJxCodDgcSfLIsg2VZmE6n+LM/+zN8/vnnuLm5+Rq+UQzDMAzDMAzzfwcLAQzDMAzDMMxvnX/4h3/Ad7/7XfR6PYp3kSQJSZLQsFiWZYqYEVnyIu7FNE04jkPD3e12S/n2aZoiTVPsdjs8fvwYdV1DkiTaUp9Op8iyjAQEEQfUNA12ux0kSUIcx3jrrbdQVRWWyyWyLMN2u0W73cabb75J8ThVVWE+n6Oua4roOR6PAIDlcom6rnF+fg5ZlrHdbmFZFubzOdI0RafTwXg8ps6E5XJJ5+fRo0e07S4ikxaLBVqtFsbjMdbrNTRNIxeAyPKfTqe4vLyEJEm05S8EhqIoMBgMHrgDkiShjH/XdckBICKGhAAgHAJBEKAsSwCgHoHD4YAoiuC6LlqtFpqmwX6/fzDwFxE7QogAQMKA2NRXFAWj0QhlWcJxHCoSliTpQTyQEAM2mw25Auq6pqJg0Sdg2zb9qes6lQurqorNZkO9BSJqSlVVtNttLJdL6LqOzz//HK1WC4ZhYL1eo91uIwgCOI4Dy7Ko+Prs7AxvvfUWnj17hv/4j/9gQYBhGIZhGIb5RsBlwQzDMAzDMMxXghhsa5pG2f+r1QpPnz6FZVnQNA2e5yGKIhogiwGsGGqbpokXL15AVVXYtk0Fs/f39xiNRkjTFPv9HmmakuMgCAIYhoHVaoXdbod+v0/Z9FEUYTabUY4/AOR5jjAMYZomptMp8jynLf37+3tYloVOpwPf9xFFEeq6xmKxQLvdxnA4pIF+0zS4v7+njfTxeIw8z7FYLGhYr2kaptMp3b9pGqzXa+z3e1xfX8M0TTRNA1VVcXd3h+VyibIscTwe8ejRI1iWBVmWKe5IxPg4jgPP8yh2J45jHA4HirsRJcCiJ0DE+4ii4MlkAtM0KZ6oaRpEUUS3v/54MfzXdZ2udbvdpucWt7/e+yAig5bLJWzbpt+naUq373Y7OI6D/X5PvxNRPmLAL9wjjuMgSRLoug5N08gxsF6voes6TNNEGIYUufS6EwR4VfrseR50XYeu63ROxec1z3MAoM+QaZooigKdTgeO4+BwOJDgwTAMwzAMwzC/K3BZMMMwDMMwDPO18LOf/QxffPEFfvzjH6PVagEA5fafnJxA0zTaCg+CgIbwYvv8888/R57n5CrI8xxxHKPVakFRFBpqa5qG4XBImfVPnz5FFEU4OzujbXQRbzOdTtHv9zGbzZCmKRRFoWH/8XiEpmm4v7+nKJnz83Mcj0fsdjuUZYnNZkMRQSKiZjab0SB5PB4jyzLkeY75fA5FUVBVFeXQF0XxoCxY13U8evQIq9UKuq4/GOKLjoS33noLsiwDADkOXs/5lyQJ6/WaBvlFUSAIAjx69OhBfNDrDgHP83B1dYW6rqknQJwjy7KQJAneffddKhhumoa2/EXJsBAzxPa+QLg0hAOgqirkeQ5ZllHXNT1eFPqKQmAxmFcUhYQfcR9N02hLXzxGVVUkSQLDMLDZbNDtdqEoCpUKL5dLOs5ut4vFYkHXOcsymKZJfQ1C+AiCgN7H+fk5DocDPM9Dv99H0zS4ubnBG2+8gZubG3zwwQdf4beJYRiGYRiGYf73YSGAYRiGYRiG+UqJogg/+clP8PLlS7zzzjs0cN7tdtA0Dev1GrZto2maB5vXURQhTVMa+L6+iW1ZFkXXSJIEAOj1evR6x+MRZ2dnNOQVr3d1dQUAlO0fxzE0TcPV1RWKongw1B8Oh2i329jv9zSgF1EzQRBgvV7DMAzMZjMURYFWq4XJZIIsy6AoClarFQBAURScnp4iiiLYtk0CgKIomEwmWK/XlEn/4sULKgI2DAPdbheu61JRcZqmAABd1ynnvygKKuZdLBawbRvT6ZSie8TjhAjgui6dBzHkF7eJDgFVVfHmm29SR0BZlhRD1O126bGv9wOI1xJ9AKLHoWkaioQSHQ/ifkIwEGLJ630BTdNQJ4DomhDHFoYhXNclEWC9XpM4tNvt4Ps+FQkbhkExScPhEFVVkSshjmMcj0d0u12cnp6S0CCKqoMggK7rJBIIZ0IQBKiqCn/+53+OX/ziF7i7u/vqvlAMwzAMwzAM878BCwEMwzAMwzDM18KHH34ITdNweXlJrgBJkihiJcsy1HVN2+DPnz9Hnudot9tQFAVJktB9RG68yIu/uLiggfRut0On00FZllBVFWmaYrPZQFEUGjxLkoQoimCaJjqdDvb7PTRNw+FwoCgjkfuvaRru7u6oD0DXddze3kLXddzc3CAIAoxGIyoXbpoGy+USVVXRc4mCY7G1r6oqZFlGlmUIggDz+ZyifhRFwXg8RrvdhqZpWC6XFJ2UpilarRZarRbKsqQtf3Fbv9+nngARzSM6ADzPoxJi4QIQg3AhAFxdXaGqKliWRT0L4rz2ej0URUFlu8JFUBQFDdhFLJBwAIiooyzL0Ov1UNc1AJCQI6Kd+v0+9UiIYX4QBFSmLLoDxDWybRuKokCWZciyDM/z6HoJp4bneSQQWJZFjg4RFyScIuJ8iZLl+XyOVquF4/GIMAyR5zmCIIDneeTQOBwOOD8/x+3tLcbjMRaLBf7u7/7ua/tuMQzDMAzDMMxvwh0BDMMwDMMwzNfGzc0N6rqG4zjI8xyDwQCHwwEA4DgOFosFLMuC7/t48uQJFcyKHPg4jtHtdlGWJeI4xna7xWAwgK7rUFUVy+USu92O+gNE9rwsy3j77bex3W6pHFjTNPR6PSiKgizLcHd3hyRJ0Ov10DQNbZzf39+jKApcXl5C0zQkSYIoinB3dwfP83ByckIigIjoKYoC5+fn9Dzb7RZRFNHwWVEU9Ho9bLdbhGGIw+FAWfTvvvsuPSbP8weiyfn5ORzHIRdBGIbYbrcwDAOTyQSO41CEz2q1wvF4hG3bdIwASADY7/cUPzQajWg4L+KPRM5/mqbo9Xo06BfCgIg/GgwG5AwQDoM0TZFlGWzbpuG/aZokEAiRII5jDAaDB6XBQmSwLAsAoGkagFdCkWEYJLBsNhvYto3dbgfLsrDf7+G6Lm3wm6YJTdOg6zrCMESn04HrutjtdlBVFaZp0nGKiCrhroiiCMPhkPoaOp0OVFUlN4oQbqqqguu6qKoKFxcXSNOUeggYhmEYhmEY5quGOwIYhmEYhmGY3xk+/fRTfPrpp/jBD36AJEngui663S6KooBt2yjLEp988gl0XYckSWiaBgAoHmaz2UDTNBRFAdd1afi73W5RliX1ArTbbbx48QKyLOPx48e4v7+nWJl+v0/DXTG81TQNo9GIcvvv7+8BvNpev7q6QlmWOBwOWCwWKMuSBuyGYVCJ8Ha7ha7ruLy8RBiGcBwHq9UKZVmSE0G87mq1QpqmOB6POB6PmE6nGI/HOB6PAF5FCt3f36Pb7WI6nUKWZRyPR5RlCUVRcHd3B8dxMBgMMBqNEMcxAFB/QF3X9DgRxSMigoSo8vjxYxq8iw1/cVvTNCSKCCfG62LAaDSieJ+qqsiVIMqSRXmwcGGIeJ6iKNDv91HXNQzDePD47XZL7gAhDgiXgBBRxHDeMAzqk1AUBaZpQlEUKkgWEUGHwwGtVguGYVAMkBCIPM/DbrejXgRRNLxarUgoWi6XWK/XdJxJkuDk5ITOi7gGTdOQsPX+++9jNpt9PV8whmEYhmEYhgELAQzDMAzDMMzvCB9//DEMw0Cr1aJc9jRNYds2bYa3220aPB+PR7iu+2BDfDKZwLZtHA4HzOdz2LaNKIrgeR5ttT9+/JjEhOPxiOFwSHnyeZ4jDEOEYYjxeAzbtmk4fzgcMBgMyDVwd3eH7XaLqqoQBAE6nQ50Xcd+vycRoN/v07a8aZqYz+eUfX96eoqiKLBerwG86jIQA+gf/ehHiOMYRVFgs9mgKAqUZYlutwvP88gRMJ/PURQF4jhGEAQYj8eQZRllWVJHQJ7n8H0fJycnFDWU5zmOxyP1LnQ6HVxeXtKgXnQEiBLfy8tL2t4Xf4pIoNPTU+ofEE4AEUEkhuziNnFNhQMgyzIamgtEp0CaphiPxxQVNZ/PEQQBVqsVFf4CoE1/USZcFAWVB4tuBRENtN/vqVxYlP5uNhtYlkWdE6JPwHEcRFFEXQbz+ZxEG0mSkOc5XNclR0S320WapuQkET0Wqqrij/7oj/D8+XP8+7//+1f5lWIYhmEYhmEYgoUAhmEYhmEY5neGDz74ANfX13Ach2Jt7u/vaVMcAAzDwGazob9rmob9fk+D4CRJsFwuUZYlNE1DEATYbDY4HA4YjUYoy5K270UBrKIoKMuSonUmkwmqqqIuAREtI7Lvd7sdsiyDqqqYTqeU+y+OKwxDDIdDeJ6HJEkoHqiqKkynU6iqiizLqCPgcDggSRI8evQIvu8jz3Os12tyQJRlifF4DF3XoSgKNpsNkiShot1+v4/RaIQkSR7E7ZRl+aAo2DRN3N3dIU1TKiv+/ve/j91u96B3IEkS2LZNcUYikki4C7IsQ7/fh23bFIEkjrMoCipXFtdIHFOWZfTYuq4xGo3o+YWAYFnWA/FACA7iNUS/gXADiF4ASZKwWq3QarWoCyAMQ3ieRwKL+IwIESkMQ7RaLaxWK9i2DcdxEIYhfN9HWZawLAuHwwFpmlJ3QKfTwW63AwC02218+eWXaJoGL1++hO/76PV6yPMcjuMgyzLIsoyLiwucnZ3h6uoKd3d3+OCDD76eLxjDMAzDMAzzewsLAQzDMAzDMMzvFE+ePMGTJ0/wp3/6p7i4uEDTNFQGGwQBlcBKkoQ0TaHrOo7HIyaTCXRdp1Lhfr8P13URxzHKskSn00G328UXX3xBLgLTNGmL/XA4kAggNuqjKMJ6vcZoNEK324UkSbi/v0cYhmiahmJr4jim46mqijb3xQZ+HMeo6xqXl5ckAszncxrIi0z/IAgoJkgMw5umwaNHj6CqKg6HA/I8J7dAURQ4PT19UHgrXAdBEODi4gJhGELXddryFwLCe++9BwDU0fDy5UukaYogCKDrOlqtFgkAQiAQt08mE8iyjN1uRwN6ERN0cnJCEUGiJ0BEDHW73Qf3Xa1WcF2XSoYHgwGJG+I9iddN0xSu6z5wH9zd3SEIAuz3e3JEiHgekfsvOhiEYCDKhWVZhmma2O12aLVakGUZYRjCdV0qi9Z1nZwEwlkiYotUVcV2u8VwOKTrKzoShOBR1zXm8zk9T5Ik5F747LPPcHNz83V+1RiGYRiGYZjfI7gsmGEYhmEYhvmd5OnTp9B1HUEQ0Da9pmmwLAur1QonJyeQJAlZlqEsS5yfn+N4POL29pbiewCgKArafg/DkAbMmqZRF8F+v8d2u8Xp6Sll7sdxjN1uR3FATdPgcDhQ+avY7Nd1HXEc0wD+0aNHFIFTliUWiwU6nQ5GoxF1ENzf3+N4PNKQ/nvf+x40TaOC49lsBlVVSYAQW/hJklDWfK/Xw2AwgGEYVPR7OBxQliX6/T7F57weYyRJEoIgoHicqqoQRRH2+z1t5A+HQyryfb2zwHEcil4CXgkIsiyTu2I4HMK2bYpuEhFAWZah1+vBtu0HrgEhDgBAt9t94AQQBcOGYSCOY3Q6HTiOg6qqIEkSOT9Ej0QQBNQVII5N13U0TUPFv0J0ECXEohRYCAZxHMOyLMRxDNu2cTweEQQB0jSFaZpIkoRcIEJAAgDbtkkgCMMQu92OYpWKokCv16OugbOzM0iShIuLC3S7XQwGA8xmM4o/YhiGYRiGYZj/Tl4vC2YhgGEYhmEYhvmdRcSt+L6Puq5hmiaeP39O+fCtVgtRFKHdbsPzPDx79gwA4DgOfN9HURSYzWY0hN/v94jjGLIsYzAYQJZl2iw/PT2l/P40TbHdbjEYDNDv9yn65e7uDoZhUNxO0zRYr9fI8xyKoqDb7T4YJN/d3aHT6SAIAmiaRr/b7/fY7XYYj8d44403kGUZVqsVZdJrmobpdArDMGjgLYb5juPg/PycBAjxGOEQePToESzLovcqYoTEcdu2Ddu2sd1uSTw4HA64vr5Gt9ul3gMheoj4I9M0Kabn9ZgfVVVxdnaGJEkeFBDneY52u00CQNM0FBskRIB+v09D/N90HrTbbZRlCeBV/JMQKrIsg67ryLIMnU4HdV1juVzCsiyEYQjLsqhbwXVdbLdbcgAYhkHxQrqukzsgSRJYlkVD//1+D9d1AYDOs+u6JAaIc5jnOe7v71GWJXUSmKYJVVXR6XRIUOn1eiiKAk+fPsVwOCTBxnEccrlsNpuv50vGMAzDMAzDfGthIYBhGIZhGIb5xnBzc4O7uzv4vk/xOK7r0ha2GMy+ePGCxILXC10lSYJlWQBAm+ai/DbLMux2O0RRBEmSoOs68jynoex0OgUAGt7XdQ3P8+A4DiRJwvF4xG63Q7fbxWg0oi3x2WyGPM9xcnKC0WgEAAjDEDc3N/T/2ldXVzg5OUGappjP5wBAETPn5+cUaSQcBHEco9frYTKZ4Hg8UuSOeOx0OkW326Uhc57nePnyJWRZRhAEmE6n2G63aJoGcRyTeDAejx9E9kiShNlsRsP3x48fQ1EUGoKLx4pBvmVZaJoGi8UCWZbheDyi1+vRhr+IBxK9BSIiSNwuxAbhehAOAvFYcUxJklCxsXAjaJpG8UniOUzTpCx/IewYhgEA5A6QJAnb7RZBEJATQBRS73Y7eJ4HVVXpcyGcAq7rklCx3W6h6zokSYIkSXBdF61Wi1wYZVnCNE0qn7ZtG6ZpIs9znJ2dwXEcDIdD9Pt9TCYTjMdjcmcwDMMwDMMwzH8HrwsB3BHAMAzDMAzD/M6zWq3w93//9/jRj34E13VpW9yyLOR5DgAUxdLpdNDr9bDdbnF3d4fz83MampdliW63S1v3i8UCdV3Thn+SJLQRf3p6irquMZvNcDweUdc1BoMBBoMBmqYhEUBsmW82G3IChGGI09NTBEGAxWKBNE2xWCwgyzIcx8F3vvMd1HUNVVXx5ZdfkmjR6/Wo3FaSJNze3pLQcX19TSXFYlifJAn6/T6GwyFtwEuShJcvX5ILQMTRiCLe+XyOuq7h+z4uLi4AgFwFURShKAo0TYPJZPIg/3+329FAXxQXi0ibPM/heR7FKiVJAkVRUNc1siwjR8DJyQkJA2VZUm+Boij0/gFQvJBwAMzncwRBQAIBAHIQiPuIWKLXf0SMkBAMxNAfAIIggGEYqKqKoqA0TaN+gSiK4DgOFEVBmqZwHIfEIF3X4bou9RqEYYgoirDZbGCaJnRdx3a7RZqmmEwm2O12FJ8keiGGwyH1WRiGAdd18Yd/+IfwPA+/+MUvvuqvGMMwDMMwDPMthx0BDMMwDMMwzDeGm5sbtNttGkJ7nof1eg1JkgAAhmHA930q+xXb61VVAXi1cS+y7EVkjsjKFwPfOI4xGo1wcnKCJ0+eIEkSKgAeDAaQJImGvpqmod1uQ1EUNE1Dee+j0Qij0YjicJbLJaIoQtM0eOeddyhmZ7PZ0HD/u9/9LmRZhmVZiKIIt7e3SNMUlmXh4uICh8MBhmFgsVhgu92SqKEoCjRNg6ZpmM1m2G63iKII77zzDpXgHg4HHI9HGvhPJhM4jgMAKMsSs9kMYRhSjM75+TkJAGJTX5IkyLKMi4sLyu4XET9ik384HMJ1XRqER1FEA3Xf92l7XwzDRfdAq9Uih4CIHhIOBBGx0+126XjF6y0WCyiKgn6/j7qu6VyI3gfxnJvNBo7jUJSTiJoyDAOSJJGYs1qtYNs2oiiiImFN07DZbKAoCjzPo8+JZVkwTZO6EISbRPy+0+mQQOI4DnVVFEWBuq6xXq/JRdDr9dDpdOC6LiaTCS4vLynyiGEYhmEYhmH+b2FHAMMwDMMwDPON5aOPPsJHH32Ev/qrv6Kt/ZcvX2IwGFABrCRJePHiBTzPAwAoikLlr7quIwxDKuTt9XrkDthutzg5OUG/38f9/T0JBt1ulwbRokRY0zR0Oh1IkkQRN4qi4OLiAlVVYblcIo5jzGYzGvC/9957kCQJh8MBy+USdV3DsiycnJzgcDjQxr6IuDk9PYXneTgej1T6m2UZHUur1YKiKPRaaZoiCAI8evSIBICyLLFcLsktcHV1BVmWcTweaZu+aRp0Oh28+eabFHkktu3zPEdZlhSTVJYlmqaBLMvYbreYTCYAAEmSKMpH5PhLkoSzszMAwHK5hKqq1GUgro3Y8q+qim4Tr9vr9SjeR7gLhEjQ7XZh2zZc1yWBoSgKWJZFGf2iS8KyLBIyLMuCLMtQFAXz+Ry9Xg+73Q6+79P5FPFSQRBgu92i3W7T0N+2baiqShv/nueh1WohyzKcnJzANE1EUYQsy+h9y7IMXdfJ+RAEAbIsQxAEOBwOkGUZqqrCcRzqL3jnnXfg+z4+//xz7g9gGIZhGIZh/p9hIYBhGIZhGIb5RvKTn/wE9/f3GI/HcBwHSZLAtm0AQNM0KMsScRzTIN0wDAyHQyiKgtlsBsuyMBgMKFIoz3P4vg9N0/Dy5UvaHu90Omi32w+igxRFoW37pmmw2+0gSRK9/mKxgGEYWK/XSNMUvV4Pl5eXUFUVt7e3lAPv+z6CIIBt24jjGLe3t8jzHHVdo9frwXVdHA4HAK+Kk18XAdrtNtbrNQ3ej8cj2u02hsMh4jhG0zSYz+coyxJBEGA8HkNVVXI43N3doaoqOI5Dw3oxUG+aBsvlkl7r4uIC+/0ekiTRRntd17i8vMRisaBt+yzLUFUVHT/w/wsHtm1jNpuh0+lAlmUa1AsB4PXrIDb8X48QWi6XNHB/XSAQRcNCJBDXf7VaodPp0N/ruqYeCAAPrpdpmhQftN1u4bouiUq+78MwDGRZhu12C8/z6HV6vR5M00RZlvB9H2EYUjyQcAXs93sqkRbH0Ol0sNlsqOtB9EEURYHHjx9jOBxCkiRMp1O88cYbeP78OX7605/+Fr9NDMMwDMMwzLcdFgIYhmEYhmGYbyz/+q//ih/+8IeYTqdwXRdhGAIAvvjiCzRNQ5EsYkiuqip+8YtfUKzO4XCA53m4vb1FVVU4OzujjfMoiuD7PsbjMUXg7HY7yLIM3/cpjigMQ+x2O4oDur+/f1BCPBwOcX19Ta4BMWBWFAXj8RhFUWCz2VCcjeM4CIIArutiv9+jLEvM53O4rovr62uKplmtVlS+m+c53n77bUiShDiOMZ/PUVUV8jxHu93GZDJBmqY0pJ/P5yiKAv1+H6PRCGEYQpZlcgjkeY6maaBpGlqtFsqyhK7rWCwWyPMc0+kUsiyjKAr0ej36vdh2b7fbD/oDhOvAMAzK3ReigThOIZjUdU0CQRzHD5wOQRDQbaJDYL/fUzHwYDCgbgAR9yNcAbPZjAQG8XshlgwGA4oSEh0BdV1jv9/D8zy6nu12G1mWwfM8KhYWDhPP8yDLMlqtFsIwpOilwWBAhc6ilPjzzz/HaDSiUmvf9+n8OY4Dx3Hw/PlzAKBuhD/+4z/G+++//5V+vxiGYRiGYZhvDywEMAzDMAzDMN9oPvzwQ3z44Yf4kz/5E5yfn1Pmvud5UFWVSmtbrRbu7++pDwAAbNvGcrlEkiQ0vE7TFMCroa0ozDUMA/P5HOv1Gqenp2iaBpIkYbfbYb1eYzQaYTgc0kB8tVqhaRrouo7Hjx8DAO7u7nA4HGh43u12cTweKaZnNpvB932cnZ1hv99ju90iz3PM53P6vSjiFa+Tpil838fV1RUdt+gpcBwHFxcXsCwLWZZBVVUsl0ukaYqmaTAYDOg2Xddxf3+PJEnQ6/Vo0C9JErbbLW2yG4aB6XSK2WxGw/A8z6Gqr/5ZIYqJRf+ByPIXpblCPBDb/0VRoCxLjMdjuK5Lm/tpmiJJEhyPR3ieh/F4TAKAeE3xMxgMUJYl9S6I1xdDdSEEiO1/WZaxWq0QBAEkSaLyYFmWMZvN0Ov1EEURLMuC53kIwxCtVgutVouKgI/HI3zfp2vY6XRQliW5T0ajEcVRRVEEWZZJoNB1HdPplLopACDLMoojStMUt7e38H0fwCvXwXg8JqHk+fPn+OSTT76KrxbDMAzDMAzzLYKFAIZhGIZhGOZbwT/+4z/ivffeQxzH0DQNtm1T1I1t29A0Dev1GqqqwjAM2sYW93ccB3EcQ9d17Pd7DIdDisF58eIFwjBEr9dDq9VCnufY7XbYbDYYDocYDAYPRAAxRP/BD34ASZJwd3dHxcSj0Qie51EW/2KxQFVVmE6n0DQN2+2WYoWyLCMBQFEURFGE2WxGRbpvvfUWNE1DkiS03Z4kCVzXxWg0gmmaSJIEADCfz0kQ0XUdtm1DkiSsVitkWYa6rtHv96EoCoqigCRJqKoKqqri7u4OFxcXUBSF3BQidijPc4zHY2iahrIsUdc1uRzEBr+IHtrv9yRgFEWBbreL8/NzbLdbSJJEw/0kSeB5Hk5OTqh3QLgAsixDWZbo9XoUAfV6P4DY+BfihBjCi6E7ALr24kfEIPm+D0VRYBgG9Qa4rov1eo1er0fXva5rqKpKDoiqquC6LqIoQqvVIjeI4zgwDANBEGC326HValE3w36/p3gjAFRSraoq3n77bYp8WiwWOD8/xxdffIFOp0OuBXYHMAzDMAzDMP8nsBDAMAzDMAzDfGv4+c9/DsdxcHJygvV6jSAIEMcxlfcmSQLHcdBut2EYBvb7PWRZhmma2O/3CIIAL1++hO/71Bcgcvg9z0O324WqqlTAOxqN0Ov1MJvNUFUVVqsVbZD/wR/8AZqmwWw2QxRFkCQJk8kEVVVB0zQcDgeEYQhVVTEcDimvf7fb4dmzZyiKAq7r0m3r9Rrr9RpxHMNxHFxdXUFVVURR9GAob+ZXqpoAACAASURBVFkWxuMxTNOkXPrlcommadDv9yn6Zrvd0lC9KApMp1NIkgRFUSiqSGzTP378GLIsI4oieh1Rfnt+fk4FyEVRkAtA13XaahevIRwGrVaLBA6xzT+fz6lA1zAMdLtdcgi8HlnU7/fJHSAEAPHzehSQoigU3wQAdV0DeNUNoGkaZFmmIbxwiAi3gCgHDsMQvu9TH0PTNDT4L8uShv7dbhdJkiBJEiqP7vV60HUdURSRC+Tly5fQdR2maQIANE1Dv9+nrgDDMCBJ0oMoouFwiPl8jvF4DN/3MZvNMBgMMJ1O8fz5c/zzP//zV/PlYhiGYRiGYb7RKAD+x+u/6HQ62G63X9PhMAzDMAzDMMz/Gzc3N1itVhiPxzRkraoK+/0eiqJA0zSKtdlsNgBeDYJ1XafN9IuLC+i6jmfPntH/G19cXNDAebvdwjRNGtyLQX0YhnBdF++99x6AV1v4URQBALkUJEnCZrNBHMfYbDbo9XrwPA9VVWGz2dCw/eTkBJPJBJvNBtvtljL3RYmsqqo4Ho/USWDbNs7OzjAYDGAYBg6HA8qyxGw2Q7fbhed5cF2X4mrEdr2iKDg7O8N6vSaHRF3XNCQXRcmiqyCOY9i2DcMw4Ps+xfDUdY3VagXLsnB6ekob+GmaIo5jHI9H7Pd7PHr0iDb3RQ+AiCsCgG63S7eLWCERFSQ6BITrQDgIRN/B6/0CRVEgDMMHkUCapgEAfR5M0ySBQAzhNU2DpmkUMxSGIZIkQavVgqqqaJoGrutS9I9wjRyPRwRBQBFBYRjS0F/0IYjyY9u2EQQBsiyD7/uwLAtpmuJwOCBJEhRFQV0Q4nV7vR4sy4Jt2xgMBnT7xcUFoiiiUmmGYRiGYRiGEfzgBz/Axx9/DIAdAQzDMAzDMMy3kOVyib/5m7+h7eler0fRMa1WC0VRoKoq7HY7DIdDAKCtdjF4TZIEm82GBu91XcO2bTx//pwKg5umQZZl2O/32O/3sG2bBt1ffvkl8jxHVVUwTRP9fh8AcDwesdlsUJYlzs7OSEQQ2/Ku6+L09BTdbhf39/cwDANRFCHPc3Q6HYqP2e/3yLIMcRzD9330+31yAYgonPv7e7r/6+XDi8WCugBEpJHneZjNZmiaBu12G67rUqnxYrFAWZaQZZl6AGRZRhiG5ALI8xzn5+cAQMPw2WxGg/ogCEhwMAyD3Ah5nkOSJIxGIxr+i8dUVUX9AiIiqK5rGqiXZUnX7zcFgqIoMBgMaNAvrrH4EQ4F4QxYLpfo9/uQZRmHwwGO46Cua3ieh6IosNvt0O/3UVUVoihCkiTodruI4xhRFJEDQFEUKjDebrdU/izuI9wI+/2ernFd17AsC7quI8syOI5DYoamabAsC0+ePEGe5/B9H1mWQZIkvPPOO9jv95hMJri5ucHHH3+MxWLxFX/bGIZhGIZhmG8CLAQwDMMwDMMw31ref/99/PCHP4QkSfB9n4b/IqZFlNXquk5Z+tfX15AkCZ9//jkMw8DJyQksy6INczGcLcsSACjrX+TGdzod3N/fkxOgrmsEQQAA5AKo6xpnZ2eo65o2y5MkQVVVuLy8RFEUWK/XKIoCd3d3FAVkmibSNKXSXxHpY5omTNOkGJrlcgnP8/Duu+8iDMMHzgWRzX9ycoI0TdFutzGbzQAATdNgMplQh8Jut0Oe5xSzM5lMEIYhNE2jTfjNZgPf9/Ho0SNsNhsYhkFug7quoes6dQRUVUXxNiJf//VoIREBJH76/T4uLy/JbRDHMQCgKAoMh8MHEUHivQkh4OTkhG4TAoGI/RECwOtCgIjrEaXIx+ORHBySJEFVVaxWK1RVRRv5osvAMAz6DO33e7TbbYRhSDFHorx6u93S+3ddF1mWUb9Et9vFdruFqqrwfR+73Y5eUzy+qioSsizLguu6WCwW6HQ6+OKLL+C6Lp4+fYqPPvroq/uSMQzDMAzDMN8IWAhgGIZhGIZhvtV8+OGHWCwWePvtt3F2dobD4YC6rrFYLCgqx/d9bLdbdLtdSJKEzz77DHmeU1yM2E6fz+dot9twHAdRFCGOY8xmM9p4n0wm+PWvfw3Lsmgr/6233oKiKFiv19hut6iqCpPJhOJ39vs9lsslZfuLY9N1HWEYoq5rPHr0iESMpmmw2+3oNXVdh2EYCMMQWZbRMYqhveM4WK1WFHUzmUwgyzLyPKfce8Hl5SX1DgjnQNM0GAwGsG0bRVHAtm0sFosHLgBxjlzXfdBX0Gq1AIBKhEWUj6ZpOD09pduapsF6vSZHgBAIRHFxXdf0/kSpr3icOM9i2C8EAvEjXAUiPqiqKkiS9GDIL35e/wzouo7j8UjD9n6/T9E/qqpClmU6Lt/3kSQJFQLHcQzP8yjnXzxXq9VCGIYYDodQVZUij0ajEZUD67qOxWIBTdOo8LhpGpyenmK1WuFwOKDdbmOxWJCD5O7uDp7n4fvf/z48z0O/38c//dM/kRjFMAzDMAzDMNwRwDAMwzAMw3zrCcMQT548ociazz77DEEQUGzL4XCgIbP4bzFod10XVVVhNptBURQa9BqGgd1uR5n5rVYLmqbBdV3c39+jLEt873vfowibm5sbNE2Dk5MTcgIcDgcsFgtYlkXROEmSII5jyuoXpcNVVeFwOOD29haapuHs7Ay9Xg9N02C73SJJEqRpitPTU/i+jyiKYFkWFosFZc6fnp5SHr9wARRFgdFohG63S+9bFAzXdY2LiwtyG6RpiuPxSJv0k8mE/u2QJAmV3k4mEyomtm2btvmPxyMsy4LjOPSeRA+AqqpQFAXT6ZSicYSzQIgHorshiiJyAAhxYTAYwHEcACAXgBADkiR5IBAIJ4hlWSSECGHGNE1yCYiYIE3TaPtfVVVkWYb1eg3f96HrOvb7PRzHga7rsCwLkiRhvV6j0+lgvV5DVVXYto0kSeC6LnVMGIaBOI7JAdLv91GWJSRJQp7nME2TjlnEVGmahuPxiJOTEzRNg7IsMRgMUJYlrq6uqA/CsiwYhoH5fP71fOkYhmEYhmGYr53XOwJYCGAYhmEYhmF+b5jP5zBNk4p7NU2DaZpUhisiWZIkQdM0ODs7ow355XKJXq+HNE3hui5evnyJ/X4PTdMwHA4p6iWKIiiKQtv1iqLgV7/6Feq6RrfbpUia4/GI5XIJwzAwHA5pEJ0kCVarFXRdx9XVFdI0haIoePr0KZXTPn78GMCrqKGyLLFer5GmKeXUi1x7IQJ0Oh0MBgNkWYbVaoUoiihT//z8HIZhkEAgts6n0ym63S6VLQtxwLZtNE2DVquFpmmgaRqWyyXiOIZlWZhMJtB1HWma0vlQVRWSJOHs7Ay2bZPbQeTrH49HGuS/vsUvnqPf78P3fQCg1wzDEHmeo9/vUyGwGJoLEUD0AzRNQ0N18b7FYP/1x4niYxHrAwCqqkJVVWiaRjFIiqJAVVXkeU7vZ71ew3VdJElCUUGieNiyLBrs73Y7EggURYHv+4jjmLopLMtCFEWQZRlZllHBMQD0ej1UVQVZlhEEAV3nJElQ1zXu7+/JOdHv9+G6LsbjMUzT5O4AhmEYhmGY30O4LJhhGIZhGIb5veXjjz/G2dkZbYO3Wi2K+Xny5Ak5ADzPo7iZKIrQ6XQgSRJM08RqtQIAKuEFQAP7JEmgKAosy0KWZbi5uaGBtRgUx3GMxWIBwzAwGAxQVRUURaH8/yAIcHl5SVvxi8WCsvnfe+89GloLgaLVamE8HqMoCjq+uq6RpikN0ZfLJQkcYgv+4uICSZJAVVWUZYnVaoWyLPHo0SMquhVlwXVdo9PpIAgCSJJEg2UhEozHYxqOS5JEefpZlmE4HMJxHIrzKYqCYm9kWcb5+TnCMISqqhT1I7b8hUgjhviSJJGo8npHgHhP/5UYIEnSg9tkWcZoNKLPhHA4iPgky7Kw3+9JKBD3CYKABvm2bVNcU6vVQrvdxmq1ogJhx3HoOdI0hWEYVEYdhiG63S71BwgRQLgthGA0m83Qbrex2+1Q1zU0TUO73cZ6vcann35KDhMRPXV6ekrCQLvdRqvVwltvvYXnz5/j/Pwcf/u3f/tb/nYxDMMwDMMwv6uwI4BhGIZhGIb5vWO/3+Ozzz6Dqqq0VS/LMuXGi9x2z/Pw7NkzHA4H9Pt9Ks1dLBaUFd9ut2k4fn9/D9u2cXFxgaZp8Mtf/hKHwwGu69ImOQCK9/E8j/Lp0zSl3oLLy0uKfXn69ClFz3z/+98H8Gqgvd/vsdls0DQN+v0+CRfr9ZriiN566y2oqor1ek059XVdYzgcYjAYUEHyfD7H8XhEp9PBaDTCfr+HZVnkHijLEhcXF7BtG8vlkoqS5/M5LMvCdDqFpmmI4xhZluF4PFKO/tnZGXRdRxzHSJIEURRRtE+v16M4HzHMzrIMvV6PnAdCOBARQGJYLzb7RR+BiBd6XQAoy5IKhG3bfnDb4XCAZVl0PoWQIIqBFUVBHMfY7XYkCMiyTGKHYRgAXolBQjRRVRWGYVD3g+d52Gw21BcQBAH1B2w2G3INhGEIWZap60H0D/i+T+XFInppv99DVVUEQYCqqhAEATRNo3/HiaLi29tbirCq6xqO4+Dk5AS9Xg83Nzdf0TeNYRiGYRiG+TphRwDDMAzDMAzDAPjoo49wOBwwHA5xdnYGAAiCAIqiUFb74XCgwbPoBaiqCkmS0BA7SRIqofU8D7Is49mzZwAATdMovuZwOGC73UJRFOoTiKKIynIty0K73SYnwLNnz6iEVxQdW5aF4/FIIsDFxQWqqqJlnsPhgMFgQFvlq9UKiqLg7u4OrVYLQRDAtm3EcYyqqrBcLklM8H2f4o6EC2AymUBRFCiKguPxCMMwcH9/D9/3MRgMMB6Pqc+gqiqs12u0Wi3IsgzP85DnOW3ti9z88XhM7gPhKCjLEoZhYDQaIYoiOudimF/XNU5PT6GqKvb7PbkGRJHzYDCg4bp4nPgR0UsA6HHimAA8KAwGQP0AIk5IlP6KawzgQXyQ6EHodDoUkeS67oMN/k6nA1VVybHRbrexXC7h+z45DSRJovuJ6KMwDNE0DWzbhqIoaJqG4orOzs4wm83IMZEkCZIkQbfbRafTwW63o+u02WxIwPmLv/gLLBYL/PKXv+RCYYZhGIZhmN8T2BHAMAzDMAzD/F6zXq/x/Plziv4Rg+QgCPDZZ5+hqiq0221omkZuAEmSUBQFLi4uaAv95cuX0HUd19fX+NWvfoX9fo8syxAEAXq9HpX5brdbGIaByWSCKIpgGAZevnwJ0zQxGAzQ7/exXq9xd3eHoiioGFiWZYoJEj0Ek8kEAEiwELFE5+fnAEAFvVVVwTAMep7j8Yg8zzGfz6lc1vd9HI9HyLKMxWKBqqownU6pHDdNU8xmM4q1OT09ha7rtGEvCoE7nQ48z4NpmnRMh8MBh8MB5+fnFMlk2zZ2ux2VEAsXAAByHRRF8T85BOq6hmmaCMOQio5Ft4Bt29jv9+QgGAwGD/oDhDtAPOdvRgcJMUAIAaKcV1EUbDYbioECXokFaZrCNE0AoN4AURQsOiNEJ4WmaYiiiP6u6zp1AiRJQudDOBtc10WapnAcB2maYjgcIs9zAEC/30eWZfS5EV0XQpgoigLdbhftdhs///nPsdlsEIYh+v0+uT7a7TbefvttBEGA1WpFz80wDMMwDMN8e+CyYIZhGIZhGIb5DZ4/f471eo2qqmDbNm5vb2nIenJyAsuysN1uEUURDdwdx4FlWQjDkHoBVqsVZFmmnoHRaESb7y9evADwqvRVRBHNZjM0TQPXdTEcDjGfz5HnOfb7PXRdR6/XQ7fbRZIk2O/3WK1WsCwLrVaLBuBhGGK326Hb7cJxHERRhP1+T9viw+EQ3W4XWZYBeOUamM1mGAwGVFi73+9R1zUWiwXa7TaGwyE0TUOSJJjNZjgcDsjznF5DDMzX6zWOxyPFB5mmSUKDKDJ2HAfT6RSr1QqqqlIJsHiO8XiMLMtok1/Xdeo+CIKAYoBEPBAAuK4L13XJOQCArp3YyAf+ZwFACAQiJkhEDb0uBsiyDEVRqDcAeOXs2G63DwqGZVnGbreD67qo65oilXRdh67rJCw1TYPD4UCuENFB4DgO4jiGaZrkGhCxQYqikGNEiFS2bVNEEPDq325FUZATRVxLIdg8e/YMFxcXsCwLvu9jOp2i3W6TKyGKInieh8vLSzx+/BgXFxfodrvY7XYoiuIr+uYxDMMwDMMwvy04GohhGIZhGIZh/gsWiwVF4pyenmK/36PVaiHPc+i6Tr0AAGBZFkzTRBzHCMOQ8uZFFj/wKjLGNE1EUYT7+3tUVYWTkxPKmQ/DkLbEHcfBl19+SQNnWZbh+z6GwyFF+BwOBxoWiz6Auq6x2+0wGo3Q6/Uo/z5NU1iWhfF4jLqu6ZhE0fG7775Lg+3tdkuxPv1+n+KRhJggSpPfe+89igcSUTNVVVFMTRiGMAyDnBNVVaHT6cD3fRRFgU6nQ9v+TdNgOp1CURTUdQ1d17FYLCgf//Hjx5S1L0qBxc/xeKT4ITHMr6oKmqbRgB8ARQMJUWI0Gj0oEhb3E64ASZIeiAOyLD/4fIjYp9cLiR3Hoe4Ay7Lo/cznc/R6PRyPR1iWRbdZlkXxSaJTQPzdNE1st1v0+32KB2q321gsFrAsC5qmodfrYb1eI89zOI6DXq+HzWaDp0+fwnVdqKqK8XiM4/GIfr8Py7IwHA6x2Wzw8uVL7HY7AMD19TWOxyO5VuI4hqIoeP78OdrtNrIso3ikLMvw2Wef4fb2ls6FiJdiGIZhGIZhvhmwEMAwDMMwDMMwv8EHH3yA8/NzDIdDXF9f4/T0FHEcI45jyq8XRa9pmkLXdRoaS5KE/X4P13VxenqKsixxd3dHRa6aplGEzWw2g67rODk5QRiG8H0fL168gKZpcBwH5+fnSNMUt7e3VDbreR4NvkVfwXA4RK/Xw2w2Q1EUUBQFh8MBV1dX0HUdRVEgTVOKgJlOp5RPLwbziqLg7bffRlEUUFUVm80G9/f3FJN0cXGBNE2haRpWqxVl7V9cXGC73SLLMhrmi4H95eUlJEnCarWCpmm0Za7rOk5PT5GmKQ3rxSBfkiS4rouqquhY1us1AODk5ITO83K5hK7rAF5t55+cnFDOvhAuxHP3+/0HAoDoMxDX63X+KzFA/F1EBHU6Hbq/6D4QwoTI/9c0jZwFi8UCnU6HXAGGYUBVVfq753kkAPm+D03TqGxY9BOs12tMp1NEUYRer4flcvmgyHgymSAMQxKfhLNgsVhA0zRyBXS7XWy3W4zHYyiKgtVqhcVigf1+T8KTruvUR+D7Puq6hmVZePvtt8ntID5n//mf/4kvv/zyt/htZBiGYRiGYf47YCGAYRiGYRiGYf4Lnj9/jufPn+Pf/u3f8Jd/+Ze0sS02x7fbLTqdDubzOW2+V1WFMAxhmia63S6iKMJ2u6X4l9cHwHEco2kaXF1dYbPZwPd9zOdzGh5fXl6iKAp88cUXSNMUnufh9PSUyoW32y1tuYvugKqqsN/voSgKHj9+DEmSqGw2DENomoY333wTx+MRkiTh7u6OhtxXV1dIkoSKgkUh8HA4hGVZSNMUwKsBvOguuLi4QBiG8DyPfl+WJc7OzqCqKrkKDMPAbDZDv9/HYDBAHMc0SBbb7aqqYjQaUd+BGNoLgcD3fSpwFhFAu90O4/EYqqqiLEt4nkcihTg3AGjwL4SI3xz2i98pivLgdgAkBAjhQFXVB+KAiAgSooHoCRAOEUmSYJom3aYoCv3dNE2oqgpN07BcLtFut+nz4TgOVFWF53kIw5BigBzHIQeHiB1qt9tIkgQnJydYrVbQdR23t7dUZi1JEgzDQBAE2O/3uLy8pEiqLMuoN0DTNCodDoIA77//PhaLBRzHwTvvvEOCTb/fp46JpmlwfX1N4kFZlhST9C//8i84Pz+n7444nqZpEEUR3dcwDPpuiHO8Wq2oD0LTNKiqSsXQx+OR4qfEdZMkicqQ0zSFYRjIsowcMAzDMAzDML/vsBDAMAzDMAzDMP8LPv30U5yenuLs7IyGt03T4JNPPoHjONA0DVmWwXVd5HmONE3R6XTwq1/9irbTZVlGr9eDJElI0xS73Q6tVou2yTebDfb7PVRVxTvvvANFUfDs2TMa4o7HY1RVBcdxcHNzgzzP0Wq1KCO+KArqKri+vqbfHQ4H2jQXW+Ku62Kz2eB4PFJ/gNgAf/nyJRaLBYIgQL/fh2EYSJLkQRTRdDrFer2m+CHRRVAUBS4vL6EoCj1mPp8DeFVw67ouyrKkTHzRByDL8gPHgKqqJIpMJhMasodhSH0BVVVRIa8Y0suyjFar9SAuSAyIRT9AURRwXfdBNJAQAF53AQhnwetOD0mS6Nq/HiUkroGiKBSNJMsyiR+vD7jFbZvNBq1Wi+KZDMMgJ4dt21TSrGka3R5FEVRVpQLr/X5PooTjOAjDEACQZRlGoxHiOCa3SbvdpuH5arWC4zgwDAPtdpt6F3zfx93dHdbrNbbbLU5PT+H7PlzXJVfBbrfDkydPYFkWHMfBd77zHTq/rVYLaZriyy+/hKqq+Ou//mtyt4jYJtM0sVwuEUURZFmG53mwLAtVVSEIAjx58uT/Y+9Nluy4r6vflX178vRN9QAKlBiiSEnhoB1+B0fYc8/1HPclPNHMU7+APXAvSzZlUqREUiQAAiigCqdvs+/zDhB7q4oM38/+LkVZ0v8XUVFAnTpdZtYZrLX3WjAMg2OYer0e92n4vo/ZbIZut4uqqthco/NHkVO73Y7PhyzLyPOcDQf6u8iyDL/61a/Q6XSgaRpvTwgEAoFAIBD8viKMAIFAIBAIBAKB4P/A8+fP8fz5c7z77rs4Pj5m8TqOY5RlCcdxOGc9iiIoioJnz56haRruFeh2u5BlGVmW4XA4cGktCdxhGHLMz3K5RFmWHP0zGo2QpikURcHhcECapuh0Ojwtrqoq9wo8ePAAwGtR1Pd9joFpt9u8TXBzc4O6rnF6eop+v4+iKLDb7TCfz7nHoN1us/hcVRWLpJPJBJvNhqfvaUq71+uh3W5DVVWEYcgT503T4PT0lA0UKqIlUV1RFHS7XUiShNVqhaqqWFA/PT1FGIbcG7DdbqHrOm8OUAwTCfokiNNtZBpQT8B4POZNA5qqb5oGlmXdEfzpsW5/p9fQNA3W6zWbOmTU0GYAAC46psJgem/9fp9LgskU0DSNuwM0TeP3T90RFBEUxzEsy0KSJGi1WtwXQeeCpuG73S4OhwNkWWazIM9zzGYzHA4HuK6LyWSCKIrYFHEcB77v4+XLl3BdF/fv3+fny7IMw+EQjx8/BgDeTKEtDU3TEEUR3n//fWy3W76ubNuGpmnQdR1ZlmE6nSJJElRVhX6/j/Pzc5imye/7ww8/xGq1wng8ZvPBdV3EcYynT5+ycTAYDDgey/M8xHGM7XaL/X4PwzDYPCKToaoqGIaBV69eIcsylGXJ53s0GnHfBv1d/NM//dM38ZEiEAgEAoFA8I0jjACBQCAQCAQCgeC/yX/+538CAN599907cS9lWSIMQxwfHyPPc86Sv10q++abbyLLMlxfX/M0O8XekDjueR7HpqRpClmWuTRYVVUsl0vsdjvYtg3btlHXNTqdDl68eIGmaTAej3lC/XA4YLPZwDRNVFWF7XYLwzBYJLZtG67rYj6fI8sy7Pd7ZFmGfr+Po6MjFEWB/X6PsixZoNU0Da7rQpIkvl/TNFxgTAW3FJdEoq1pmgjDkAXX26XE9P6bpmFxnB6rKAp+DyTYW5bFovftCXMSf+fzOQzD4PNQ1zUbB0TTNEjTlLsFttstHycS7gGgqioW9yluiIyP21sBNJWfZRmKomBTgKKkZFlms4Ceg/oQvtwlQH0A7XYb6/WauwUcx8F6vebIKRLvu90udrsdOp0OOp0OfN/HcDjEer1GHMdwHOdOdJBt2wjDkA2Dp0+fcn/A0dERgiCAZVnodDoIggDT6RR5nsN1Xb4+j46OEEUR6rrGy5cvURQFvv3tb6MoCti2zVFPi8UC8/mcH9u2bX7/g8EA7733Hvc8nJ2dod1uo65rNoyurq6g6zrOz8/hOA5s20aSJBgOh/jxj3+MPM/ZhBoOh/A8DwC41+CDDz5AWZbodDrcr0FF3WRANU2DZ8+ewfd9jEYj/MVf/AWyLEOSJPjnf/7nb+yzRSAQCAQCgeA3jTACBAKBQCAQCASC/yFkCNzmW9/6Fp48eYJ79+7BMAyOXiGxmgTHOI4hSRImkwnHuex2OwBgoTvPc+4VeOONN1DXNQ6HA+I4hizLLNiapsldAUdHR1wiu1qtOLbGdV0Ar+NiZrMZZ+ePRiOUZQlZlnnqnsTYOI4BAGEYsgBPJb60zRCGISzLwv3791EUBVarFQzD4C2B0WgE13VhGAaCIEBVVSxkX15eQlEU7Pd7FtHX6zXOzs54Qp5igCjGx3EcKIrCRcYU+VPXNQaDAfcHtNttzGYzFEWB4+NjmKbJWfK0oaHrOkajEZ+7wWCA3W6Huq65DJfiZkj4p46B0WiEuq4BgH+e5zmL8nme4/j4mOOgACDPczYvbscPLRYL9Ho9bLdb3mSgbQhVVeG6LlRVhWmaWC6X6Pf7/D0MQ3ieh8PhgH6/D13XudCaDKjBYMCmSK/Xw36/h67rCMMQjuOgaRpMJhP4vo9Op4M4jvlYbDYbRFGE0WjEPQRxHGM4HOLq6gpxHKPVasF1Xd5YOD8/R5Zl+Oijj1h0J4Og2+3CNE08ffoU8/kc6/UaFxcXvLlAk/nvvfceWq0WHMfB2dkZx0kNBgN88MEHAID5fI6Liwu4rgvTNPnv69GjR2x4ybKMe/fucbyQZVkIwxD9fh//+q//yv0VwOstirOzM9R1Ddu2EQQB0jTFaDTC3/zN33ztnx8CgUAgEAgEvw2EESAQCAQCgUAgEHwNUHTKbrfDeDyGbdtcwAoAf//3+WabhAAAIABJREFUf492uw1d1zmvnKJKaBKehHYSuamYNs9zTKdTFlTDMITrujx5b9s2+v0+C+BlWWI4HKLf70NVVaxWKxa6T09PuVw4jmOs12u4rot79+7xBHwYhthsNiymkzmwXq85Y951XVxcXGC5XKIoCqiqiul0ik6ng4uLC6RpCl3XeTNit9thMBig3W7zBD7wWtSVJAkPHz7kolwyBgDg/PwcsixzfwJtYZBYLkkSFosFmxXUQXB+fg5JkjgOiSJ+qqpCkiRskBC9Xo8Feoppou2KsixRliWOj49ZyKe4oaIo2CCoqgqTyYQn/4uigO/70DQNg8GAc+3JlKAeADIN6DXS+6TNkna7jU6nA1VV+RoCgCAI4Hke/04Yhuh0Oiz873Y76LoOy7IQBAGKouCSXnrPh8MB4/EYNzc30HUdq9WKzZfJZML/TtMU0+kUQRBAkiQcHR3xFkocx0jTFB9//DE0TcPFxQVvlwCAbdv48MMP+T1eXl5yj0Gr1cJHH33Et92/fx8AeIq/KAq89957WC6XOD4+BgC0221+XZ999hkOhwNs2+atAeoXIDOkKAp89NFHvK3w1ltv8WbB4XBgU8TzPHz66ad8bjudDv7yL/8Sy+USURThpz/96W/mw0MgEAgEAoHgG0AYAQKBQCAQCAQCwdfIzc0Nbm5uAACO4+CP//iPebLb932OVyEBcrVaIUkSniY3DAOLxQK6ruPy8hJlWeLRo0eQZRntdhuDwQAAsF6vEYYher0elw1HUYT9fo92uw3LsgAAaZry5DoVDiuKgjiOuUT44uKCI4ym0ylHFj18+BBhGLI5QXnzJycn6PV6LBpvNhsuSz49PeU+A+o60DQN/X6fJ9h1XcdisUDTNOj3+2xYhGGI5XLJsTrHx8fY7/cwTZNFakVRcHZ2xoK9JEnwPA+r1YqNDkmSUFUV6rq+0yNwfHzMWwW0hSFJEovztAXQarWw2Wz4uJG4TyXFZAzkeY4syzhKqa5rNknotuFwiCiKvhI5RFsCtm1jPB6jKArOrq+qCqvVCt1ul6fbFUXhCKAoirisl4qA6bwXRcHdEf1+n0uh2+02Z+qPx2Pouo4gCJAkCQDwcwyHQ+6v2O122O/3iOMYo9EIw+EQALhvYjgc4v333+f4HZrAL8sSp6en0DQNP/nJTzgKyHVdKIqCoijQbrfx4YcfQlEULr2eTCac7f/+++9zyfL5+TnqusZwOOTtg48//phfy+0ybQD8usIw5M6Oi4sLjmM6Ojpik6qua7x48QKmaaKua3iex8fRcRxMp1NcXl4iz3NYloV/+Id/4C0OgUAgEAgEgt8lJADN7R9cXl7i6dOnv6WXIxAIBAKBQCAQ/P5B5akk7iuKcicGxnVdaJqG4XCI/X7Ppa6GYaAsSwRBAF3XMRgMuDh4NpuhrmvOPo/jGIvFgsteKeKEugLOz895Cj1NU6xWKxRFgXfeeQcAuBNgu91yETFF82w2GyyXS3ieh/Pzc2y3W55uXy6XaJoGo9EI7Xab44Zoop9EX8qvr+sam80GAHDv3j0ulPV9H6vVCrIs4/z8nLsQAPDvS5KEk5MTpGkKVVUxm834ONZ1jePjYwRBwFsUNJlfVRU8z+POBTIaaMr8cDhwJNHtc0OFwxQpVBQFC/hUqgy87ipI0xRxHCNJEjiOgyRJMB6PWfQ3TRNpmiJJEsRxDMMwWHSnUt4kSWBZFndETCYTZFmGPM9h2zabBUVRcDRPURQs9Ou6zn0VVDacpilH6FAB8G63Q5qmsG0blmUhTVOYpontdoterwdVVfH06VO0220kSYKiKHibo2kaJEmCm5sb5HnOxhSVYTuOg48++ogNJ+qBoI4CwzDwk5/8BO12m6OcyrLEaDSCZVn4t3/7N44LotvJMNput3j+/Dn3ZnS7XSiKwuXJy+US+/0ekiRhNBpxQbWiKHwNFEWBX/3qVxzPZRgGnzN6HZvNhk2Tm5sb3lqhGC7DMHBzc4N/+Zd/+UY/RwQCgUAgEAj+p/zwhz/Ej370IwBf2gig3ESBQCAQCAQCgUDw9UEbAgDwySef4N1334WmaRzpE4YhVFWF53lcqluWJaIogmEYXChLwjDF3dAkeRzHLMC3Wi1YlsVxLbPZDMPhEEEQwHVdRFGE7XZ7Jw6oLEv4vs8T0p1OBwC4DDbLMs5PB15vOux2O1xfX6PT6eD09JS3COI4xmazgSzLLB4HQYDNZoO6rrFarTAajdDtdqHrOqIo4tcEvDYHDocDPM/Der2+U9hLk+ObzQa6rnP5rKZpLJAbhoHlcsmFtSTWr9drFoUp2ocm3DudDjabDRsz9DsUL5OmKccA5Xl+J/6HIoBubwBQgS3dRueh3W4jyzKMx2PeEmiaBtPpFI7jIMsy2LaNwWDA2weyLN8xO+g8kmHgui5c12URf7/fsyifpim63S622y1vpJCx0W634fs+lwnv93uMRiN8/vnn8DwPo9EIWZZxJwIVFl9dXUFVVfT7fT6OFIv0xRdfQJZlaJqG4+Njfn+DwQAffvghNpsN+v0+Tk9P0el02GixbRu/+MUvWNTv9/vI8xxHR0coyxLPnj3DcrmEqqo4PT3lIul2uw3TNNlYoM6KJEm424CukU8//RSLxQK2baPT6XDp9Hg8xvHxMZbLJTRNg6qqODk5YaNtMplw50Mcx2zKPXz4EHme43A4YL/fYzqdfmOfJwKBQCAQCAT/N0iXl5e8ERCGIfb7PbIs+22+JoFAIBAIBAKB4A+G09NTXFxcwHEc1HXNU866rvMUfVVV6HQ6eOutt/Dq1SvOsD8+PoYsy8iyDPv9Hp7n4d69e8jzHLPZDFEUod/vQ1EUFotp2vnBgwcsNj979oxz9B88eMAC73a7xWq1gmmaHL1CojCV815eXsK2bWRZxkbCarXC+fk5x9jIssxGgKZpOD8/Z/NhsVhAVVVMJhN4nsclyLdNgOPjY94C2O/3qKoKu90Ok8kElmVxt0BRFJyzT6/P8zwAr02NxWIBAHjw4AE0TeNJeFVVWSCP45gNAwA8kU9xOXScbncH0LYACdc0uZ9lGeI4RhRFLLjT79NEfpqmHBPV6/X4XB4dHfH5oW2N8XjM2wH08yiK4HkeqqrC4XDgCXsqSfZ9nzcBHMfh5w2CAN1uF6qq4vnz57BtG2EY8nR9r9eD4ziIoghXV1d8DRVFgeFwCFVVeYr+gw8+wHA45Mn7wWAATdPgui5++ctfomkaKIrCBgd1B6xWK1xdXaHT6UBRFHiex+ei0+ng/fffh6ZpaLfbHI1FZsEvf/lL1HUNVVXR7XZ5a4GMs/V6jcePH/P1Q9FKJycnXJi93W6RpilHNDVNg+fPn+P73/8+TNPEarXizRhd13F1dYU0TREEAcIwhGEYmE6nME0T77//Pn7xi1/8Fj5BBAKBQCAQCP5rbm8EfCUaSCAQCAQCgUAgEHzzkIg5Go3Q6/WgKAosy+JIGzIH8jznbQLKkF+tVsiyDBcXF+j1enj16hWWyyVc18V4PIbjOAiCgAuDR6MRHMfBcrlEkiQ82f7GG29wxvtut8NqtYLnebi8vORp8iAIOD7o8vKSY3HIAFAUBQ8ePEAYhlxKu16v2WTQNA1ZlmG9XmO/30NVVRwfH8N1XRb7t9stZ+ofHR0hyzKsVivOde/3+7wRcDt/fzabodvtcuwMFQkD4On0k5MTZFkGTdN4g4Gy+YHX5cW0+UCbGcPhkA2AqqqQZRmX/JZlyVsHTdMgz3OeHO90OojjmCOAbNvmaKAkSdBqtXiL4HbkT5Zl6Ha7qOuaX0OWZSzyk3BNz58kCTzPQ1mWWK1WXOBLRlAYhgjDEN1uF6ZpIgxD2LaNw+GAPM/vGARRFHFHwnK5RKvVguM4LPJ3Oh02JT7++GO0223O8qfrlmKeFEXhzRVJknjr5Wc/+xl3NbTbbVRVheFwyH0NP/3pT++cRzIXqqrC1dUVb0jQ8wHg6KBXr15B0zQ4jsNbEOPxGABgmiaapsEnn3zCpdMA2BBptVqIogiHw4ENLno/VVUhiiIArzse0jTlLoFHjx7h6dOn8H0fZVmirms8fvwYs9nsm/joEAgEAoFAIPgv+S+jgQQCgUAgEAgEAsFvB8rUf/XqFYDXETm6rnNMi+M4bAgAryfVW60Wi/mGYXAfQBRFqOuaS1YpfocmvdvtNp48ecIic6vVwr1791jgJhOg3+/zc9BGAU2r0wQ4TZ1vt1uMRiPYto39fg/LshAEAVarFcerUH79arWCpmkYDAYwTZMnz2VZ5giYo6MjpGmKoig41mez2WAymXD5chAEkCQJ6/Uamqbh8vISkiRhuVyysG6aJvcZGIaBpmk4VkeWZbiuiyRJoOs6mqbh4mUyDchAoHJgigiiyfi6rjkaiL7iOIbneVzkTD+XZRlJkvDkfqfTYbF/Npuh1+vxRgEZD8BrE4OKbkm89n0frVYLAGDbNkcDDQYDrNdrDAYDSJKEMAy5b2Kz2bA5Abw2Lm7HAoVhyOe/3W6zoUBxU0VR4Pnz53y9jsdjfgx6b1dXV2yy0DVCpsYvf/lLyLLM/QiGYXDPxePHj9msOD4+5o6AVquFsizx85//HIPBAI7jcNyQqqpwXRdVVeHly5eo65qNI8/z+HkoKoqMmk6nA13XoWka6rrGYDBAEAQYDAZI0xSdToeNAoriarfbfG1TRNXLly/ZzHrrrbcgSRIOhwNUVcXl5SX+9m//FpIkwXVdvHr1io0EgUAgEAgEgt8GYiNAIBAIBAKBQCD4X87p6Skcx0Gn08FgMIBlWbBtG9/+9rdxc3ODKIowGAw4R55KZ0kY3+122O126Ha73AkgyzK22y1s2+ZJ/aZp8OrVK6zXa9i2zQW0FMVTVRXG4zHnxkdRhOVyCQAcudJutxGGIRf81nWNb3/72yiKArvdDofDgTP4u90uXNdFGIaQJAmbzQaWZeHs7IwLjakwebvd4uTkhDcHJEkCACyXSxiGgZOTEyRJwrE92+0WRVHg+PiYc//DMISmaTBNE4qiIE1TGIbB75cEfyoTpi0BmrynHgASwOk2mvCnSX4qCabXQiYMTdu7rotOp4OmaXizIM9z9Ho97HY7vj+ZBPRFhbd0DouigG3bvCXg+z7yPOeoHNd1kec5wjBEkiS8HUA/tywLvu+j1+vhs88+Q7/fZ4OpKAo2GgzDwM9//nN4nsfRPJIkIU1TjEYjlGXJBgCVA8uyjLqu0ev18Mknn3DJLk3+k9D/8ccf8+8BgCzL/G9N0/i+tJXR6/WgaRpHQZG5QQXCANBqtbDdbrmvgjY+6rrGeDzm68R1XZRlievraziOA9u2IUkSbNvGf/zHf+DP//zPUdc1sixjY8uyLCyXSziOg+12y39zdO6n0ylevHiBJEl424Y2SX784x/zhopAIBAIBALBN4HYCBAIBAKBQCAQCH6HuF02DAB/+qd/Ctu28fLlS4zHY858b7VaPPlsGAZPze/3exRFgbqu4bou4jjGzc0NdF3H5eUlVFVFHMccJ6OqKs7OziBJEkcKVVWFyWTCguzLly+hKAoURcHJyQmapoGu61gsFqiqCkEQ4OjoCIPBAMvlEmmaQtd1ZFmG8/NzzpunkuDdbofhcIher4ckSXhrYDabYTQasRkSxzEkSeIC3Pv372OxWCDPc2w2G0iSxJ1nDx48gKIoiOMYsizDsiye4CaDpNvtwvM87HY7AMDx8TGapmGhmQwARVG4CLgsS44IStOUp+ZPTk4gyzJvDJA5QBsCFxcXbCaQeEzbAr1eD0VRAHg9qX+7rJgMiaZpsFgsOOqmrmvsdjs2Fej867rOxkcYhhgMBpBlGUEQcLyQ4zgIwxB5nuPZs2eYTCYIw5DfY6vVwuFwwHw+56LqKIr4/Luui+vr669E9ZAZUNc1Pv30U1xfX2M0GnHvQp7nGA6HbABQNNRtA+DJkyd8DCzLQqfTQbvdBgDuoqAuAYpdov4B27bx/PlzVFXFhstwOESSJCjLksuVv/jiCwRBwAJ+mqZctqxpGn7wgx9gOp1yMfRms0G73eb4LtpOKcsSmqYhz3M+Fm+++SZHVkVRBEVRkCQJm0H0/5/97Ge/4U8OgUAgEAgEgl8jNgIEAoFAIBAIBILfQWzbhuM4+O53vwvHcWBZFhsApmny5Hqe5wDA0/0kjK9WKxwdHbE4PZ/P4fs+LMvC/fv3WTynuKFut4uLiwsAr42Jw+GAuq7xzjvv8Gs6HA44HA4AwJnuVLRKAvzFxQVH/tB9NE2Doijo9/vY7XaIogiapqGqKi6dpYls2mQwTROnp6dI0xSKorBgfbtgOEkSjlM6HA6QJAm9Xg+6riNJEu4jAMC58vQYZVliPp+zSE9luVVVIUkSpGnKX51OB0dHR9y1cNsgSNMU3/3ud9E0DccI+b4P3/fR7Xa5I4Bua5qGJ9vpPvSdvlzX5e2FoiiQpilvClRVhTzPcTgcMBwOkaYpi+LU/TAcDjnyxnEcVFWFOI7R7/ehaRpubm7Y7LBtG6qqIk1TDIdDaJrG3QBkUlCPRRAEGI/HmM1mXAytqipkWWah/uOPP0bTNBwbpGka+v0+mqbBL37xC57ap8JgEtP7/T4ePXqE0WjE1zQVYdd1jcPhgOVyiV6vx5sTAOC6LjabDcbjMR49enTHYKBiYYpVWiwWuLi4wHK5RFmW+OSTT/Bnf/ZnbOpMJhNkWQbHcdhwybIMSZIgz3NIksSdGa7rIooiTKdT1HXNJdcAEIYhZFnG1dUV/u7v/u43/VEhEAgEAoHgDxhRFiwQCAQCgUAgEPwecXJygpOTExiGAcMwuES4rms4jgNN03BxcQFN07BcLlkUf/DgAcqy5Il9y7Lw1ltvIcsyyLKM6XSK7XaLwWDAJbFUnkomgCzLvDmw2Ww4d38wGCAMQ0RRhP1+j16vh36/j6qq2BigaJVWq4Ver4fD4QBd13kb4OzsjMt8d7sdFEXBZrPB5eUl1us1er0e1us1FEVhEf3i4oKNBAD8XikXnu5Hz02lsrIsc9lrWZYstE8mE57OXy6XbAQkSYJOp8O3k5hOt1GZLBUJ347zCcMQpmmyWUKGCX0vyxLdbhcA7nQF0O15nrMQX1UVP2a/37+zrUCT/dTNEMcxT/M3TcMmABUPP378mHsS6DzS75VliWfPnqFpGrTbbciyjFarBVmWkWUZXNdl40RVVRRFgZOTE6iqiqZpeKuFzChZltkA+PTTT3mjpN/vo9frcT6/ZVm4ublBmqYcbUXfqUB5Pp9DkiTeeun3+wBem2X0vn3fh6qqME2Tz1e320VZllBVFdPpFJ7nYbFY4Pz8HC9evEAYhnj77bfZ5KHSZuoToH4Misq6HRO02Wy4c8M0TaiqijAMeevg0aNHHNlFUVk//elPOWpLIBAIBAKB4OtAGAECgUAgEAgEAsHvMX/yJ3+CdrsN0zThui50Xeci2dVqdSc+hkqG8zzH5eUl2u02FEXBcrnEbrdDr9djMRgAXr58iVarxbE7FKNDgmar1WLxWpZl7Pd76LqO+/fvs4hOU/iWZXFhK8XvbDYbDIdDjg7SdR273Y6FYirz7Xa7HAVEz3VycoI0TaGqKpsWJPSTAWAYBvb7PRcOr9drOI7DE/l0fO7fvw9Jklg0zrKM+wOoKJgm9fM8ZwOAYoCOjo4gSRKXC5PBQD0LhmGgKIo7JsDtyX8yA2iC/cu3lWXJU+/02qMoQlmWfM5IuKZtACqRpun+PM9ZLKfeBYpCmkwmbNq8ePGCzYhOp8PT8XmeY7fbsRkgSRJfP8BrE2a73cIwDD531NfQNA0++ugjKIqC8XjMBcNlWWI0GuGzzz6DpmlsZLTbbWRZxmaHYRi4ublBURS8GUDHiQydqqrYQHAcB77v4+joCIfDAUVRYDabcQ+CqqpIkgSe52E2m+H09JSP13e+8x2+dmnbYzgc8rGizgMAbOzQpkZd13y9yrKMJ0+e8CZEmqYIwxCGYSDPc2y3W/z1X/819308ffr0G/3cEAgEAoFA8PvHbSNAAfD//HZfjkAgEAgEAoFAIPg6efXqFebzOeeiA6+z1W9ubjhvXtd12LbNArau63jw4AHnzvu+zwaA67os4FPMC5UI+76PzWYDVVU5ygUA568bhsERRWQaHA4HDAYDAECv10Oe54jjGGVZwjRNtFotmKaJPM95Cp/ig6gLYblcsuFA2fy3zQd6PYZhIMsyeJ7HAjH9mybEN5sNxxXJsox79+6xMEy9CWEYIggCjEYjtFotNgdo6yGKIti2jXv37sG2bTYIqBMgyzKMRiM2VA6HA0zTZHGfjhshyzJvU9DkPUGifJIkfLskSdB1HZqmcZ49xSLRpkKr1YKu69B1ncuQkyRho6LdbsOyLDiOg/1+j5ubG1RVxfn/JMr3+308fvyYo6TqusZgMODInFarhdVqBUmSYBgGX0MUv/To0SNEUYROpwPXdWEYBvr9PkzT5CgqTdO4c6DT6fB5e/LkCWRZxm6349dbVRUbPsvlko/9YDCAbdu8QdBut7FerzGfz3k7RdM02LYNXdcBvI4SkiQJjuOgaRpcXV3h5OSEz2GSJFyQTKI/FTXTeaAtGTI3bNuG7/vY7XYYjUaQZRmqqmIwGKDVakHTNEynU1RVhTfeeAOTyQT379+Hbdtf6QcRCAQCgUAg+J/wR3/0R/jggw8ACCNAIBAIBAKBQCD4vaQoCux2O1xfX6NpGqRpCtu2WbhM0xTj8ZizyzVN4/z91WqFIAhwenrKMS9JkrDYeXR0xNP7VCTsui5PelPkSbfb5WLbOI6xWCxQliUePnyIsiyhKAr2+z3yPEcQBDg7O+MpeCo4TpIE/X4fdV1zoe56vUZRFFAUBWdnZ7wFsNvtWMzvdrtYr9fQdf2OAUAT8J7nYb/fcwwSAHiexwJwVVVYrVaI4xi2bePk5ASj0QiGYSAIAiRJwgZAq9XCvXv3uEOgKAouAs6yDIPBAK7rsuhP0/r0XMCvxX3aQqACXhLrbdtms4DEZhL6KdaJfmYYBna7HTzP4+lzyvB3HIc3FajkWFVVGIbBETrU32BZFvI8x3g8ZnF8s9lgu91iOBzy7RSNY9s2ttst1us1Op0ObNuGbduwLItjmcIwhGVZaLVacF0XnufBMAy8ePEC+/2eRXIS6un7s2fPuK+BzIggCHjSngwb6siwbZu3AYqiwOFwgCzLKMsSnU4Hu90OJycnfL50XUdRFPyeJEniczadTmHbNvr9Pscp0TYAbXwcHx8DwB3DBgBmsxlvEDiOw5s6dP8sy7Db7TgS6fLyEt1ul8unoyjCfD7/zX9gCAQCgUAg+L3kthEgooEEAoFAIBAIBII/IC4vL/HGG2/ANE0URYHJZIKiKNDpdO7k1cuyjO985zvI8xxhGGK/36Pb7eLs7Azb7RZhGCIMQ0iShDfeeAOKoiAMQ46D6fV6uLi4gCRJiOOYs/opOqjb7cL3fez3e0iShHv37vGUfdM0OBwOXPrabrdZ6G2ahoXowWBwxwS4XTpMk/JRFEHXdTiOg91ux8K37/sAgO12i5OTExbeyZigGKDRaMTxPABYzJ/P5+h0Onx70zR3xH+KDwJ+nfNPBgOJ++PxmKfxySCgeBv6op9HUYTxeMzlsxTdQ49JxcUnJycsMtd1Dc/z7hQLh2HI8UEUMWRZFmazGXctKIqCbrfLsUj7/R4AeKqe8vhJZN/tdgCAPM/R6XTQNA2KouDi3ul0ytPxtLVgGAYkScJsNuNrYDQaQdd13sag90R9BCT8U8xPp9PBer3mjQragpBlmY9bEARshNEGQRRFd4yTzWbD132n0+Gtk8ViwZsA7777Lkc40RbFbZOEtgBuP/90OmUzp6oq7hVYr9eoqgqWZXGUlGEYCMOQtw6urq5gmia22y222y0+//xz/PznP/8tfGIIBAKBQCD4XUZEAwkEAoFAIBAIBH+g7HY7HA4HFlopEoWEWxJ1dV3HcDhEVVWYzWY8aU+Z8IvFApqm4fLykkVX2ho4PT3F+fk5ttst5vM50jTlEloS1FerFYqi4OggVVVRliWCIMBisUBd17h//z7qukaSJKjrGkEQQJZlXF5esmCcZRm22y00TYOqquj1egiCAK7rIgxD6LqOVquFMAzRarU4H54Kiy8uLpAkCR+D9XqNOI7hui4sy2IxnIRfEvsfPHjAMT9kYNDtFAF0W/y/Le43TcP9Bbf5sph8+/+6rnOZsmEYsCyL7ydJEhceR1EEx3FYgI7jmGN0FEWBqqpQFIV/Jssyrq+v0el0OJaJBGrf99lQSNMUg8GAy4JJuH7x4gVc1+Wp97quOe7mcDjcmdKn102l1dPplDcNPM9Dv9+HpmlYrVYsuuu6jm63i7quMR6PYVkWwjBEkiRYLpdQVRWe53HXhOu6kGUZ6/UaWZYBAL8nKkUmI4FKgg3D4OuPfodimwDAcRx8/PHHGAwGCIIAQRCgKAredqFrgK5t2u4oioKvSzoecRxzUbEkSej3+0iShOO36G+l3+8jyzKcnJzAcRxMJhPexhAIBAKBQCD47yKigQQCgUAgEAgEgj9g4jjGer3GbDbD559/jna7jSRJMJ1OoaoqT1g/e/YMURQhyzJomoZ2u80T+WVZ4vT0FO12GwCwXC6xXC7R6XQ4778sSywWCxiGgYuLC46cORwOOBwOaLfbXLoahiGm0yn3CgwGA+4uCMMQy+US3W4Xo9GIi4X3+z3W6zWX0Xa7XQRBAMuysNvtYNs2Wq0W4jjm3HtVVXE4HFh0Bl6bEjTFr6oqLi4u4DgOLMti0ZciYL4s8lM8DN0+mUzYGLn9/XYsEH0nI4AE5//KBKDvNFUvyzKXy6ZpykIyCc6bzYZLcAFAURRkWYZWq8UGgWmavP1g2zZvaSiKgsViweYH9SzYts3PkaYpgiAAABiGgaqqMBqNsN/vuUzY931UVcVbAobrCQYFAAAgAElEQVRhQNM0SJKE9XqNKIrgui5HBOm6jpubG6zXa37/1DtAsVBPnjzBYrFAmqYwDAPdbpfNF4pBStMUURTBNE02H+j803YDHT96XMdxuCPCtm2UZYn9fs8dGkEQwPf9O7FSdKzpOFHxM5UWS5LE2wy3I5skSYJpmgiCAL1ej8uH6ZxVVcXHq9/vs/EjyzJOT0/5mhcIBAKBQCD47yCigQQCgUAgEAgEAsEdHMeBaZr43ve+x8WrlmVx7v9kMsFgMGBR3jRNvP3225AkCdfX1xzbQznqQRAgDEMAwMOHD2EYBgDg5uaGo4POz89RVRVubm7g+z5PfQ8GA0iSdKegmMwEimYhM+Hk5ISz44MggGEYOBwOMAyDi4hpml5VVZ5WJ5F2u91CVVVUVQVVVTmLnwT7qqpYiL2d0U+3kRB8enqKuq7ZAKGv248D4M7PAeDs7IzvB+DO83w5HujLXyRmA69jeagUt65rPvYU+URxPWmaAgBGoxGKomDTp2ka3Nzc8BT7aDQC8LqQmB735uaGtw1og4SOBcXahGGIfr/Phcl1XSOOYwyHQ8znc6iqCl3XYVkWP5ckSVitVmxyUGdB0zSwLAvz+Zzjk1zXRRRFOD4+RpIkvN1Ax422WGhDgs5RFEUoyxJJknBE0OFwQNM0HBdV1zWyLEMQBBxp5Xkenj17xgXLP/jBD/j4v3r1CsBrowUALi4u4Ps+TNPkc0jXHZkDwK+NHzJT6PgWRQHP8/jv6/r6GrquI01TuK6L5XKJzWaDf/zHf8SHH374NX8CCAQCgUAg+H1ERAMJBAKBQCAQCASCO1Ax7/PnzzkeiIpjFUWB7/s89Z2mKRRFQRRFWCwWPJV9fHwMXdeR5zmSJEEYhnj77bdhGAbSNMXhcODS4m63i+l0iiiKEAQBqqrCcDjEeDzmDP39fs/T+8PhEGmashgKAL1ejzcSfN+HLMvY7/e4f/8+59BTUSzl3i+XSxiGwfEz6/UarVaLxeEkSTgDnuJ+KKalruuvRAQdHx+j0+nwMUzT9I5JQGL47XLZsixxdHQEz/PunIPbRcG3twJIQAaA+XzOOf00fU6/ezgceMuAJv/zPOdJdBKkdV1HFEXodDrQNA2LxYKn4SlCR1EUntT/4osveIsgTVOOEaJtAjqWFMHjeR7quoZpmrzJcDgc+JyYpglN0zjWiaJ4qOSXYp82mw0bRL1ej+OQer0e1us1Xr16Bdd1ufC51Wpx9I8sy5jNZojjGFmWodfrQVVVfo4sy+A4DndH0HGm8z4cDhGG4Z1jTPeTJIk7LzRNg2VZ6PV6/DthGLIR4fs+G0m3H0tRFJimCV3XOW5JVVWEYcgbL7QdYxgGOp0OZFmG4zg4OTnBhx9+iDzPf7MfCgKBQCAQCH7nEdFAAoFAIBAIBAKB4L8kjmPMZjPc3Nzg2bNnLIC+fPkSkiShLEt0Oh2kacqROLqu4+joCHVdw/d9FvD7/T7yPMdsNuN8836/z8LyfD7nvHXKXI/jGPv9HrvdDqenp6iqCkEQYLvdssA8GAw4CoYEb0VRMJlMkGUZbwHQxD8Vy/b7fazXa9i2jfV6jYcPH8K2bcRxzNPx6/UaZVliPB6j1WpxoextMf/o6Aiu6wJ4PeVPJa9pmrIwTcbB7a/xeHzHALgt8hMk7BNN03DXgmEYLHTf3lAgoT8IAt5qoC8qWnYc505p736/x9XVFfcYjEYjWJYFXdehKAqurq64y6AoCrTbbdi2zRPv6/Ua2+2Wo2woSoi6H6Io4tdyOyufOiYoDocek8qeKbaKYnvo+SzL4uekjYMkSTAcDvm8kDFwu0+BzCnaZiBTSlEUfn/7/R6KoqCqKs7tp76Afr8Py7Lw5MkTvHjxAoZhYL/fw3EctFot3oC5vbUQBAE0TYPneYjjmI0ZMgEUReFzoSgK8jzHfr9Hv9/nDRWKRsrznM8fXV+np6diK0AgEAgEAsH/EWEECAQCgUAgEAgEgv82s9kML1++5CgaVVU5nmW1WvGU+WAwQFEUmE6nKMsSjuNgs9lgsVjgcDjANE3O+K/rGpvNBkEQ4MGDB+j1emiaBmEYYrPZ4HA44Pz8HMBrYZxy6cuyxPn5OcexRFGEzWbDHQGUWb/ZbLDZbHhae7vdot1uY7PZwDRNqKqKyWSCKIp4Snu326EsS2iahouLC6Rpyjn3mqax6EsdAVVVsTBL38fjMWzbxuFwYAH3doEwxcI0TfMVE+D2NgDdRlPtFKlDk+KWZWE2m6HVat3ZGlAUhQV2ehxVVblYWdd1SJLEhcymacLzPLTbbXieh9lsxnn59Hq73S5H6XiehxcvXqBpGpimCcuyOF5H0zTs93sWwR3HQZIkXLpMr3G5XMKyLPT7fRb4VVXFcrlEnuec6U9xR1QuvN/vkSQJxuMx6rpmk4mKncmYouNO96W4Id/3EQQBWq0WdF2H67r8+/Qz6hegToUkSXB9fQ1N03B8fIxf/epXiKIIl5eXuLi4QNM08H2fOyvyPIeqqnBdF3EcQ5Zl7pqg80emAH1fLBbodDrQdR1ZlqHdbkPXdS7HtiyLt1AoGsnzPJimiS+++OIb+QwQCAQCgUDwu4kwAgQCgUAgEAgEAsH/mCAIcHNzA1mWEccxbm5uuBy2LEtMp1OEYYg4jnF+fo6yLFnQrOsa3/rWt6CqKgDwhHu/38fR0RE0TcN2u8Vut+OpbIqyieMYh8MB4/EY9+7d48ntMAyRJAk8z8PR0RFHtlCBsOd5kGUZYRhygTDF07RaLaRpypPxRVFAkiR0Oh0u2l0ulyz2k3lBk/55nt8xAW4L/fRYFDskSRJ3JFBePwD+TpA5QBn00+mUp/dJNAbA/QmqqqIoChbj6TY65iTcA7/eMqDuBsq8tyyLI2qur6+50Lnb7XJcD3UyUNSRYRgIwxDdbpdfWxzHCIKAtz2or8G2bfi+z2aALMscBUTivWEYbNCYpglFUTii6HA44Pr6Gt1uF7Zt8xS+YRhIkgRZliHLMlxfX7P5Qb9HUULz+RxxHKPb7fL11Gq12FCh80vRUrZtc7cEGUCe57Egr2ka3n77bYRhyNeC7/u4d+8edF2HruuI4xiWZXG8kOu6SJLkjgEgyzKm0ykfYyprpvgsXdfh+z53W9A177oumwXb7Rar1eo3+WcvEAgEAoHgdxhhBAgEAoFAIBAIBIL/a9brNabTKa6vr1lMpyx9Ej5PTk6gaRp2ux3n0ff7fZ6gpkLUbreLxWKBMAyx3W5RliUuLi64TNb3fez3e44Oohz3IAhY0G2aBlEUYbfbYbPZcOkvZffThgDlyJMJIEkS9vs9ZFmG53mciU857TRN7rouWq0WXNfF4XBAFEVIkgR5nmM4HHJMTZ7nCMMQaZryNPhoNLrzuBQjpGkamqZhcf/L5kDTNDyxfjsCiFAUBaqqcn8DxRTR79EGgWmad84dvSeKpaFJfBLnXddl84CKeikW6Hb8j2VZMAwD8/kceZ6j3W5DURQkScLbAaZpIo5jPm62baNpGjiOg6qquGeCpvAty4JlWfz/zWbDmwdFUfDGR1EUePLkCUzTRFVV3Dmh6zpvj9Cxo0l8mraneKTtdsvXAW0fUFkvdTpst1suM97tdrw5QIbJfr9HHMfQNA2TyYTNGTJakiS5YwBYlvWVmCAqCtZ1HZvNhvsfyFBrt9tcOGxZFpsqdJ24rovPPvuMDRyBQCAQCASC2wgjQCAQCAQCgUAgEHwtbDYbzOdzNE2DV69ewTRNlGWJ9XqNbrfL2fpU1judThEEASRJ4hJey7KwWCyQpinefPNNFjXTNMV8Pken08FkMkHTNMjzHM+fP0fTNCxYK4qCsixZeB0OhyxKN03DmwfD4ZCFWuoykGWZM+5J3F+v14jjGIPBAIZhcJlxmqZcAkvbA03ToCxLRFGEw+EA3/cRxzGGwyEsy0JVVSjLEk3TYL/fo6oqHB0dYb/fQ1VVlGUJWZbRNM2d2CD6IhGYTIDbk/8EleDevv32+aENB9o2uF1au9lsuMCXvsggyLIMqqrCMAw+3mTuBEGAzWYDz/Og6zrKsoTruhwPRLE6VJ7rui7n6gPAy5cvUZYlmwMUDyTLMq6urgDgToGwaZo4HA548eIFdF1n84UipcgwmM/ncBwHs9mMjQYqRaZtgP1+j8FgAN/3EYYhNE3jHowkSaAoCp8L3/ehaRq/HkVR4Hkenj59Csdx+NiRYVVVFXa7HZswaZrCNE1EUcR9BPQcdB5s20aSJLyd4vs+er0eFEVBGIZ8jH3fR6vV4k2b/X6Ps7MzXFxc4L333vua/7IFAoFAIBD8PiCMAIFAIBAIBAKBQPC1URQF1us1lsslVFVlcZz+b5omDMPAdDrliJl+vw9d16GqKufVn5ycsLi+Wq2w2WygaRq63S6yLMN6vcZqtYKqqjzZTUWrVEw7mUygqirquua+AUVR0Ol0eLqeHkvTNM5mD8MQq9UKcRxD13WcnZ0hDEPOtt9utwjDkAt/XdfFer3myfD9fo8wDOE4Ds7OzjguiSbN0zRFt9vl+BrXdeH7PsqyRFVVdzYDbn/RtDqAO5sBJFQTSZKwyH77/lReS9wuEV6tViy207mg/oAsy1BVFUcEKYrCcUODwQBN03DOvmEYHA9Ehbe0saEoCpcxu66L7XaLly9fwjAMuK7LIrnjOFitViiKgo0By7L4HFMXhWEYKMsSg8GATSfaHqEtkyiKMBqN+JhT2TB1BciyzB0AZHTQtgD1IxRFwTFWruuiLEuOMJpMJlgsFqiqCr1eD+12m483RSK1Wi3keQ7DMDgmKE1TPh5UKE3ngroqqFCZYo0kSeLNCjpPURSxOdFqtdDpdDAajUR5sEAgEAgEgq8gjACBQCAQCAQCgUDwG2G1WuHly5dIkoTLbMuyxGg04il9VVXx8OFDFtiTJEFVVbi8vESe51gsFoiiCKqqotfrsYC62WwgSRLu3bsHx3FQliXCMITv+3AcB0dHR1ymG4YhoihCu93GyckJACCOY2y3W8iyjPF4zL9H0UEA2GSg+BwyBxRFwenpKTqdDrIsY1NhOp1it9tBVVV+XRT5Q0YBRQgdDgfuNwAA27Zh2zYsy2LhvaqqO4K/JElsiBC3p/5J7K/rGrZt3zkXNP2v6/pXzhOZEU3TcMQQbWUYhsFdClVVcTFxURQ86U/bARQptFgs+HaK0DEMg+NwKC+fypht2+bnieOYI29M00Sv14MkSVAUhY8hPZZt23cimtI0xXa7hWVZ6HQ6HDnkui53DKzXa9R1fScaiIwNx3GQZRmfQ8/z+Hrs9/tI0xRhGPJmA4n8kiTh1atXeOutt7BYLO5EMPV6PZ7s13UdhmEgz3PeEKAOBLqubm8FVFXF5hJtvCRJwlFJANi4IAMpSRL0ej2cn5/j+voacRz///47FggEAoFA8PuBMAIEAoFAIBAIBALBb5QgCOD7PpIkQVEUmM1mcBwHcRyzGEx5+kEQ4OzsDNfX19jtdjgcDkjTFKenp/A8j7cGdF3nGB8AyPMcq9UKmqbh/Pyc8+GDIEAURfA8j6ett9stlsslgNcRL2EYot1uswgMAOfn5yzS7nY7NgkoVkbXddR1jdlsxtPxVVXhe9/7HgaDAce30OaAoijQNA3tdpuf1/d9FEXBxcPAa8HeMAwcDgfuJriN67oIggBlWd6JCQJwZ+uAJv/JQKDNDIrw+bLBQNsR2+0WWZbBMAwuyKXJeTJvKGefxP7dbodWq4XFYoHVasXltVVVwXEcHA4HuK7Lj09T+LRBoGkalsslZrMZH5vb5kCapjgcDrxNQtPwdC4OhwNarRZM04TjOPA8D2VZshFxdXWF5XKJVqvFvQOdTgdlWcI0TTZ6qF+BpvDzPOdc/91uh+FwiCzLeKMhCAIW7x3Hwb//+7/je9/7HhsXt+OI6rqG53koioI7CmhjgXosaNuDoo32+z3KsmTzCgDqusZ2u4XjOBzdRJsZdV0jSRJ0Oh2cnp6i1Wrh888/F50BAoFAIBAIAAgjQCAQCAQCgUAgEHxD+L6P6XSKFy9eoGkaaJqGuq4RRRHiOMZut0O322XBnAqDu90uer0ex+rsdjtMJhP0ej1omob1es1T5G+88Qbf7+XLl2iaBu12m4VsinpptVp8f+B16bEkSeh2uxiPx9wBQBPk6/Wa42MGgwHW6zWiKOKugna7jfF4zD0ASZLw47ZaLXieB8dx4Ps+T3rTRPmXJ8CbpoFt2xyRc3uKnybGXddFGIZQVZXvR19HR0cA7nYIkEh8dHSEpmmw3W5h2zYbAlQySxE9FPFDkTVUaKtpGk+2y7IM3/eRpinquuafU0Ew5dqT4UOFt7Zts6lA14Qsy/yebdtmc+DVq1fcH0AmAL2u1WqFPM+5oLrb7XLXAhU/F0WBXq/HQnzTNGxe0MR/GIbIsgye5/FGye0on3a7zWbEYDBAURRsHt3c3ECSJPR6Pfi+D9u20Wq1oCgKn7MwDHkzgAq0d7sdGw5pmnKBcBAEyPOcOwRoE4DOJ20zuK4LVVWx3W7heR4XUlMHR13XMAwDR0dHWK1W8H3/N/vHLRAIBAKB4H89wggQCAQCgUAgEAgE3zjz+RxPnjzhyBaaoM6yDMfHx9jv9zgcDjAMA2+++SZP2M9mM6iqina7jSiKkGUZXr16BVmWMRgMsFwuIcsybm5uoGkaBoMBC8dpmiKOY3Q6HS56pU6BMAxxfHzMv9c0DRaLBQurruui1+vBdV2sVitUVYXFYgHDMHB+fs7i9Waz4ccLggCXl5ewbRtBEPDk/3a7xXg8hmEYLKIrioLD4cBmB027t1otLp6lnHrg14YAbRxQ5FKr1fpKSTD9nybcaeJ8u93iiy++YPGbRHhd1+9k1t82A778M03TIMsymyEUd7TZbBDHMbrdLt9OAjzl68/nc57idxyHTQhFUfDo0SMArzcgHMeBZVnQdZ07ArIs480EwzBgGAZvblBck2VZHEfV6/XY/Li5ueGNjrIsOdqIYo7ocW4X+nqehzRNOb6JzIB+v8/dE0dHR/j444/hOA4bBQDY9KD/U6yQ53m8GRCG4Z1jlCQJNE1D0zRsDtF5LIqCN1Uo3mi326Hf78M0Te4n6HQ6sCwLo9EIL168EDFBAoFAIBD8gXPbCJAANP/fvy4QCAQCgUAgEAgEXz/f//73MR6PeeK82+2ycN1ut1FVFcIwhGmamEwmAIA0TXma+ujoiKegKXpoMpkgiiIuVU2ShIVlWZYRhiFkWUYQBHj48CHqukZVVdjv99hut5AkCf1+H3meYzAY8M+qqkKe5zg+PubHbpoGaZqiLEsulz07O8Nut+PM+M1mA1VVcXx8zP0IALDb7TgaqN1uQ9M0LBYLOI6Duq5hWRYbCPTaydyg+xH0+mRZRl3XLB7TNLnneRxpFMcxl+6S0E4CP0Unjcdjfh56rvl8Dk3TMB6PuZOApvAp3iZJEgwGA1RVBdd1kec5fN9Ht9vF48eP0ev1uD+BzAIqhg6CgLcSLMvi7YPNZnMnrojMI4LOAb0HiiMyDAOPHz+G67q8HUFmRqfTYWG9KAoW0GmTodvtwjAMRFEEy7KwWCywXC55syJNU3ieh6dPn7IhUdc1Tk9PefugaRpkWcbXX5ZlaLfbOBwO/DPf9/k906bKdDplYZ+MHwCoqgqmaXJc0nq95uNEmwFBEPBzlGWJjz76CH/1V3/1G/nbFQgEAoFA8LvBD3/4Q/zoRz8CIDYCBAKBQCAQCAQCwW+J+XyO2WyGqqqwXq+h6zqapuFy1CRJEEURTk5OYJomVFXlrYF33nkHpmlCkiTM53MkSYIHDx6grmuYponlcsk59cfHx7AsC9vtlmN6Wq0WWq0W6rrG06dPUZYlfN/HeDzmfPftdou6rrFYLGDbNs7Pz3mKvCxLzOdz+L7PRcFU9KtpGrbbLUcNdTod/nkQBCiKApIkcWTOdruFoihf6RDQNA2O4yCKIhayAfB3Eovp37cnyG8bArStYJomT8NblsXCOsXtkKhNJoQkSVgsFtwRoGkasiyDZVm8JWAYBiRJ4il/13U5J5+2HcjYSNMUw+EQhmGgKAo8e/YMQRCw4E2T97RlQUXEZFhQ9JAsy1gulyzI021UXrzf76GqKr9fXdfhOM6d6ysIAqiqyoXS1AGg6zpP+4dhyCZCt9u901FAx/Dzzz9Hu91Gq9XCdDrFaDRClmVsoFCXAG2dkMi/2Wxg2zba7TYcx8Fms+E+ivV6jePjYziOg/1+z1szZMaQOeQ4DndLdLtdRFHEBci+72MymcB1XTx//lx0BggEAoFA8AeK2AgQCAQCgUAgEAgE/+uwbRvf/e53oSgKDMPAcDhk4fWtt95CkiSYzWZot9t45513IEkSVqsVFosFzs/PMRqNUBQFi9e9Xo+z3F++fMlCLAnzTdMgDEPsdjtIkoQHDx5w3r8kSdjv9wCAs7MzGIaBJEm4gHexWCDLMoxGI5yenrKwvNlsuDgWAPr9PnRdx36/Z1E5DEOcnp5is9mwGeH7PmRZZkGdJryV/5e992iSJL+ufI9rHR4qdWVVVlVXC7QgaQ0DF6QZ1ty8T8DNbPhV5jvMmrsxmj2bHRfElmYASRCigUYVSqYK6eFai7eo97+MLAAkh0SjG8D9maVldUgPD89F33PPOYoCXdexWq0oM1+U1YrPIBCigBAB9oUDISyISBqRt6+q6p0fQRRFtIUvBvDi9aqqItGj73vM53P6d13XJKC4rkuxR+PxmJwXYlAvoo3E6yuKQqXQQpTYH76LiCjhVFBVFU3T4PDwEH3f4+bmhs550zRwHAdd1yGOYwzDAMuy0HUdVqsVnVfhDjBNE0mSULeBZVlwHIecA2mawrZtdF2Hm5sbdF1HotP19TVOTk5we3sL13VxcXGBpmkwm83oerFtmwb1ruvCdV3qyRCiTBAEMAwD5+fnVGIMAMvlEk3ToCgKfPzxx6iqimKShPtkP8Lp6uoKtm3j9evXqKoKL168wN/+7d9+xX+9DMMwDMN8E/mbv/kbPH36FH/9P/6aHQEMwzAMwzAMw3wzaJoG19fXuLq6wqtXr+A4Dt2+2WzQNA2qqoKiKCjLEldXVwiCgIagu90O19fXiKKIYmcsy8KbN2+Q5zkURcF8PqeYnTRNqWD2/fffh6qqJA4sl0t0XYeLiwvouk4ugNVqhd1uh2EY8Nlnn9GmviRJdCyKomA8HlPRcBzHUBSF4mXquqbSWLEJrus6RqMRbNumQbtpmnR8YgNdHMe+ALDvAnjXNQCAhuYi279pGux2O4qW0XX9zoBdFASL54j73nUBiLz+3W4H3/exWCyQZRnF3PR9T3n2SZLg+voaTdPQuRmNRhiPx1BVFev1Gm3bkiPBNE2YpkmRSWLwLY5XCCJCZBGuBeGqmM1mNNDfL1+Ooginp6d0rkR3RFEUqOuanAN5nkPXdViWhaqqqLA6CAJ0XUfxQW3b4vT0FM+fP8fDhw+RJAls28YwDOSOqOsam80GANC2LVzXRVmW9NqbzQZVVZEDBAAMw0AQBHS7pml48uQJCRB1XVO8lOM4ME2TYpREL0Fd1zg8PKQuDMuyyKXBMAzDMMwfB59//jn+6v/5K/zPxf9kIYBhGIZhGIZhmG8mNzc3eP78OQ3RxUb2+fk5bZe3bYuHDx/SkDqOY+i6jvPzc9rkFhvUT548Qdu2AECb6bIs41vf+hZUVUVd1wjDkESDjz76iISBq6sreq35fI7ZbEYDdlEWLLbmZ7MZDMNAHMcoy5KKhh89eoQwDKkMWHQGGIZBBbUAoKoqkiQhAcAwDFRVRYW/YRjSgBgAlcvuD/8BUCa+YLFYUJ6+LMt3SoL3OwgkSaLYIHHfYrGA7/t3tvHFORfDeDGI932fSn7zPMfr168p9sd1XfR9j9lsRq6A58+fYzqd0vP3BYDr62vqOhiPxzAMgwQNcXyibFgMzIXAsVqtSMzoug4AKJrIcRxcXV1hu92iKArEcUydE67rQtM0hGFIw3+xtS+6DcqypPNRFAVOTk7w7NkzvH79Gmma4vDwEHmeY7VaoW1bZFkGSZJIiKrrmq6LPM9xcXFBxy6Ki2VZxnK5xNnZGVzXRdu2JEqUZUmCkijfFjFJu92O7kvTFB999BG6rsPZ2Rnu3buHFy9esBjAMAzDMH8kfP755/jog4/wv27/FwsBDMMwDMMwDMN8s9lsNthsNmjbFpqm4fLyEgcHB0jTFHVd48GDB1BVFavVClVV4cMPP4RpmrT5nuc5Pv30UwzDAEVRsFgssN1ucXR0hEePHgF4W3QbRRGiKIKiKHjw4AGCIEAYhlgul1S6++TJExqib7dbpGmKruuo3Pbg4ADr9RpN02C9XiPPczx8+BBd11FuvYjOEdvcYjNcVVUqevV9n24H3pYO930PVVUpH14UGbdti7qu0bbtnQgfgSgCFtFAhmFQ1I3oCBB5+wDubP/f3t7SULmu6zv9Abe3tyjLkopw93P6N5sNyrJE0zTkIJjNZpT5/+zZM4RhSA4A3/fvDPm//PJLAKDeAcuyqGtgtVrRBn9VVXBdl6KNhFAhcvrFZr6maTQ8D4IAb968wfn5OWRZphgq0SWQJAnSNAUAyvCfzWawbRt938PzPNR1jcvLS8RxTIW9YphfVRV2ux0URUEYhijLErZto21bJEmC5XKJuq4RRREJHJ7noW1bbLdbtG17JyZIiBJFUdwRAkSps2EY5CDZbDaYTCYkDnz44YfQdR3z+Rxt2+L4+BiO49AxMgzDMAzzh83nn3+OR48e4X+v/zcLAQzDMAzDMAzD/H4QBAFevXqFV69eoa5ryLJMw9Ou62iL+t69e1QaGwQBHjx4ANd10XUdFosF0jSlAbNpmnj9+jVWqxXqusa9e/dg2zZWqxUMw6Ccftu28eDBA5RlScPztm3Rti0ePXqEoigwn88phkbEBD18+EH7ZuMAACAASURBVJDeb7fboSgK6g8QG+5FUUCWZSoeFkP1pmkQBAE9VhQL67pOsTtxHJNLwbZt5HkOAHecAJIk0UBeCBaiPFhRFMrID8MQvu/TkF84FkQEkKZpyPOchtviNl3XEccxCQkiVkk4A8SWf1mWeP78OUXgmKaJuq4xm81okP3FF19QvI7ruvQ44bwQMUTimITzwnVdKo3O8xy+75MboG1bchkI8eDg4AC73Q4XFxfo+x5t22Kz2eDVq1dUiDyZTO5k/buuS4XThmHA9330fU9RSMJVICKkoiiifgQRL7Xb7bDb7XB1dYWHDx9SNNF2u8VisaBYJtd1MZlMsFgsUNc1AGC1WqFpGhwdHcHzPHRdh6IoqFdht9thNBrBcRwSYYRzQjheoiiCaZo4Pz/HyckJnj59+rv8E2YYhmEY5ncMCwEMwzAMwzAMw/xec3t7i5ubGxqwi+LY0WiEm5sb3NzcIAgCHB4eoqoqhGGIy8tLhGEI13VhGAYVvaZpCk3TcHFxQbE3APD69WuUZYmPP/4Ynuehqir0fU+RLk3T4PHjx9hut5hMJthsNpBlGUEQ4OTkBIeHh5RdLwb6sizD8zy4rgtd15GmKUUaKYqCyWRCWe9hGGI6nSKOY9R1DV3X4bou4jim93ddF7Zt03kRcTDDMJAYIEkSHMchF8J6vaaMfZHfL5wJu90OZVlSN4CI+NnvDhC/RV6/oih3IotEtr7YnhcRN8I5AYC21g3DQBRFJDAYhgHbtuE4DmzbpoggUbjrOA6GYcBoNCL3g6ZpiKKIjld8t/udB0VRkOvBsizkeU4lxy9fvgQAchUoioKzszN0XYfRaIQ4jnF5eUnnsus62LYNWZbh+z52ux31AYiIpGEYEEURFosFLMtCHMfkRNhsNvj2t7+NrutwdHSEJEkovur4+BjDMGA6nWK9XkPTNIqeuri4QF3XkCQJVVUhz3PkeU5uktPTU4pK2ndP3Lt3D1VVYTKZkNPEcRzcv38fjuOQ+4JhGIZhmD88Pv/8c8xP5vii+gISgOE/fAbDMAzDMAzDMMw3mM8++4x6AR4/fkwb+e+//z5l6C8WC8znc9y7d49iWrIsQ5ZleP/992lIHccxoihCGIb4zne+g2EY0HUdbXOLCKDxeIw4juE4DsIwBAByIIjS377v6TminNYwDGRZhmEYsN1uKX/e933EcYzJZIIoiuB5Hna7HRXThmFIQ/flcomTkxPsdjvouk4RQWIArOs6siyjob3I64+iiDL0xfuK+0RZ8jAMNPAW/97/eVdkWCwWUBSFtuPFZ+37HpeXlyRA7Bc1V1WF8XiM6+trEgzE6wvhQWy6C9eBaZo0ZC+KgkSUYRjIVSBikcQgXlEUKIqC6XRKGfuO46BtW0RRRP8tHAmGYZDDZLvdwvd9tG2L0WgEWZbpfYRD4ebmhrorxuMx9Uy4rkti1NXVFZVT73Y7hGGI7373u/j0009/JVZKHKNwhwgHyKNHj5AkCZUCD8OAtm0xnU5xenoKAFQiXJYlPM/DdrvFeDym7znPc3KR3NzcIEkSvHjxAsvlEovFAt/73ve+hr9chmEYhmG+Sv7mb/4GT58+xV//j79mRwDDMAzDMAzDML//LJdLhGFIvQBiMH3v3j0Mw0BZ95999hkV5gZBgKIo8Omnn8K2bRRFgTAMaYD8p3/6pwCAruuoYDVJEhwfH8P3fSRJQsPavu+hKAqOj49hmiayLEPf99hut7S5LgpxoyjC7e0tFbY6jkMigPg9Go0QRRF83yfRQAgOfd/D930AgGmadzbKm6ahgbToSZBlGavVCoqi0CBbDJ/3C4TFRrwoEJYkCev1mnL4Rfmw+Dzr9Rp1XZMzQBzLaDSi821ZFizLgm3bkCSJfgdBgM1mQ2XIeZ5jMplQzv9ms6HBv9jwF4KGcGQIV4AQPcQx7pcYC9dD0zTUb5AkCfI8R9d1mEwmKIoCR0dHJH68fv0aXddB13XYto3xeAxVVTGdTpGmKdI0RRzH2O12NGTXdZ0iiUT3g6IoSJIEmqbh+voai8UC//RP/4TNZoPLy0tMp1NYloWiKDCbzaDrOtq2pZLpIAhwcHCAk5MTiqQS15OqqhR5JYSu/WtgNBpRYbGu6/S9iHiiw8NDDMOAw8NDHB8fYzqdYjqdsjuAYRiGYf7A+Pzzz/F3f/d3+D//7/9hIYBhGIZhGIZhmD8M8jzHcrnEy5cvYVkWuQCapkGWZQCA09NT7HY7bLdb2ro/ODhAVVW4urpCkiToug7z+RxZliFNU6xWKyRJgrZt8eTJEyiKQsP59XpNvQRic10U/m42G4rSmU6n2Gw2qOuahvInJyc4OTmBJEnI8xymaSJNU3iehzRNSQQQg9xhGCjapSgKdF1HkUGihFfEBEVRBMMwUNc1FEWB7/vouo5Kc3Vdx2q1oi14XdfJESCEA/HahmFQzv6+ALD/3P2IIJGhL37EQF9RFLx58wYAyJHQNA08z6OM+9evX9+JHTIMg8QBUQYtOgeAtyKKcBGEYYgsy+B5HgCQaCHigcT3KYb1Yng/n8+x3W5pSD6ZTEhgGIYBnufBcRy8fPmSYqRE+XGe53fcDEmSoCgK9H1Pr71aragM+erqCnVdI8sy5HmO2WyGDz74AF3XkXsiSRJEUYRPPvmEPn+e5wiCgNwIQggSRdFlWaIoChRFgdPTU0iShCzLSHhpmgZ5niPLMkwmE5Rliel0irquKS7Jtm3ouo7nz5//rv90GYZhGIb5ivj888/xz//8zwDAQgDDMAzDMAzDMH943NzcII5jLJdL2ox/9OgRXr16hbIsEUURDg4OALyNArq+vkYcxxiGAe+//z4V+KZpiiiKcHR0hAcPHkDTNBrWLpdLzGYzEhJUVcVms0HbtgiCALPZDH3fU3+AGL5LkoT79+/DdV0URQHbtjEMA5XeimiXJEmoiFcU51ZVhWEYqJy2aRoURQFN09B1HbquAwC4roumaWDbNtq2pTgeISqITfdhGGjYLEkSlsslyrK8M4gXm/gvX77EbrfDbDYjEUAIHfvugbquKQJJDPHX6zWVEvd9j9FoBE3TKN8/z3P6XHVdYz6fQ9M0APgV54HoIBDlxbvdDnmeYzwe08a/EAiAfytOFqKAEDGEW0Ccx9FoRCW94pqRJAmXl5do25ay+0XUUFmWJCKEYYjRaIQ0TclNoOs6PM+D53lU5jyZTPD69Ws0TYP1eo3Xr1/j8PCQOh+yLINlWbi4uKDbxDWV5zkVDA/DgCAIkOc5uQFmsxnFFInrQlVVRFFEAo9t2xQ/JUQTUVhtmiZ838dsNsPPf/7zr+GvlmEYhmGY3zYsBDAMwzAMwzAM8wdPURRIkgQ3NzdUInxxcYHFYoG+7/Hw4UNMp1MqqpUkCZ988gkNx1erFeI4xsnJCU5PT6FpGuW3i/z9hw8fYhgGVFV1p0R4PB5jPB5TPJAsy5SFf3x8DMdxUBQFFEVBURRwHAeqqtIAOwxDqKqKMAwp511s8qdpSpvcruvC8zxsNhva/K7rmkpmhXtAlPIKYUAM4auqgqZpWCwWJBSIXH4xxF8ul6iqCrZt0ya+yMwXAoAoGBab/mITXbggxBa/EBDKsoRpmliv15jNZpAkiVwNorRYfM7954k+BPG+4rlCFHAcB5qmYbPZIE1TJElChcXiefsCj+/7JHSkaYrZbEbuB1VVUdc1fN9H0zQ4ODiALMsYjUaoqgpZlmG1WsGyLHrcaDRC13UkJJVlCdu2kSQJZFlGVVU4PT1FHMfUUXF7e0sdEKPRCI8fPwYAlGWJV69eUffBbDajUunb21sAwGq1wmg0gu/71Bsgoo/yPIeiKFQi7DgORQQJt8N6vYaiKFQgrCgKxuMxuq7Dq1evvra/XYZhGIZhfjuwEMAwDMMwDMMwzB8NTdPgzZs3ePnyJfI8x3Q6pVgdy7IoMubP//zPqVtgtVqhLEvKaJckifLhRR79+fk5giDAbrfDcrlE0zQYhgGPHj3CMAxUZpumKWRZxvn5OTRNoyx9sZ3veR4N2rMso614z/MwmUwgyzIN80V/gOM4WK/X0HWdMv/F4Pzo6AhxHGM2m9Gme9d1JAAIF4KiKIjjGLZt08a92OIX2/Mi3kgct8jqT5IEaZreEQ+EgCAigDabDQCgqirM53OKBxKRPWJjXYgaontAiAKyLN8RAMT9m82GRBHx+UT8TxiGcByHBt2apsGyrDslwgDIAVGWJQ3AdV2nf2dZRqKCcCjsdjuMx2OUZYk0TdG2LebzOZX2rtdruK4L13VRVRXKsqThvO/72G63ODo6guu6OD09xbNnz1DXNTkixPcoHCtxHKMoCpycnGA+n0NRFNR1jeVySZ/jvffewzAMSJIEZVmSuCAErMlkQiXCQRBQfFOWZViv15hMJiScCBFptVohz3N2BTAMwzDMHwAsBDAMwzAMwzAM80fJ7e0t0jQF8HYYvF6vaZNcZPWvVisURUFFtoZh4OXLl1itVgBA29dpmkJVVcrxNwwDp6enlMufpik2mw26rsPjx48pPkhsdYuIFrFlnyQJ1us1+r7HeDzGer2GaZq0Hd91HcbjMTabDQzDgOd5VB4sBvkiNkZExKRpSln8YujteR7atsV6vaZ4n6IoYFnWnSJgkccvHiOy+AHQwP9d94AY1AtngGVZcF0Xtm2Tu6AsS1RVhfF4fCceyHEcLJdLFEVBvQHi9ffdBeI7EfdXVQXLsjAMA51f4bDYj0S6vb2F67oAQOXIiqLQewthIAxDTKdT+vzCmTGdTqmHYDab0XuKTgfxud68eUMCjijonc1m9J21bYuDgwPYtk0xQbvdDm/evMGzZ89wcXGBzWaDJEnw8ccfoygKivqJoghpmmIymeDo6AhVVVEp8Xq9phgpEWN1eXmJrusQRRG5EsS1IdwRt7e31GmQZRnqusbt7S1+8YtffA1/oQzDMAzD/DZhIYBhGIZhGIZhmD9awjDE69ev8ZOf/AQPHjxAWZa4d+8esiyjuJfpdArXddF1HW5ubpBlGVRVpVgVUT67Wq3Qti3u3buHo6Mjun273aLrOnIO6LpO3QGqqiJNU+i6Dt/3kec5ttstgiCgbfYkSTCfzxGGISaTCbquw3Q6pe6BKIrQ9z3l0DuOQ3FD++W4++4HEXuzXC6haRrdZxgGxd+IYb/neXSfGP6LrH3x+qqqkiAgegBELJEY1iuKAlVVKVJpf4AvhAkA1IUgBv8itkeIB6JfQDxfdBPIskyxQKIcV1EU7HY7uK5LToA8zwGAonrERr2IFQrDkEp/TdOk5yZJQi6IKIowmUzonAshIcsyVFVFzovpdHonbsh1Xbx+/RrT6ZSG8ev1Gg8ePMBkMsHLly/RNA0AoK5rrNdr6nwYj8dwXRdBEGCz2dCm/8nJCQAgTVPc3t6iqirIsoz3338f4/GYConrukYYhkjTFLZtk4NBOBFubm7Qti25O+bzOWRZRtd1+OKLL+i4GIZhGIb5/YSFAIZhGIZhGIZhGAC/+MUv4Ps+wjDEyckJlssllbWKTfooimCaJu7fvw/f9ymCJYoiaJqGDz/8EIqiUHFrlmXI8xyPHz/GdDoF8G/b/vsFurPZDLvdDrvdDqZpYjweA3jrOJAkCVmWwfd9RFGE6XRKw/EkSWAYBpIkwcHBATkTJEmivHpRmmtZFvq+h+u66PseZVliNpuhrmsSJxRFwWKxgKZpJHLIsoy2benfQgQQ0T37P4vFgrL8hTNCURT6EeLCu8KBGLSLobtwFiiKAsMwsNvtyAEgRADDMOh4hHPDdV16zzRNaXAvnBRiwC+OJ89zGIZBIoEkSbT177ouRTEJUSEMQ9i2faezQXRCFEWB6XQKVVUxDMOd70cUNwsxoW1buK5LZcaik+D4+Bg//vGP6ZqM4xgvX77El19+idlshrIs6bN88MEHGI/HWC6XuLq6Qp7n5GiYTCZIkgQA8Pr1a0RRhN1uBwA4Pz+HbdtomgZhGGKz2SAIAgRBgNFoRGJS0zS4vr4mweLy8vJ384fIMAzDMMxXAgsBDMMwDMMwDMMw/z9XV1d49uwZRfQ0TYOLiwuKiQmCAN/+9rehqiqKokBZlri9vcW9e/fw8OFDSJJEG9i73Q5t22IymSBNUyiKgs1mgyiKaKA9mUwwDAOiKIKu61QWPBqNKJ7IcRwqnZ1Op9RLILoFxCa9LMuwLAt5nlP2fpqm0DSNNtSTJKFt+6ZpoOs6+r6nGB3TNMkRIAbxwNuB9P7mvRj8A7gjAuw7AISQIEkSFdmK5wtRYF9UEO4Ix3HuiA7L5RLA2w3+6XR6R0BZrVZUcqxpGtq2pVgjMawXg3uRib9/7PuCgUAIEbqu02N2ux11F4guBVHCvF8y3DQNPM+jiJ7xeIwgCCDLMubzOcqyRFmWmEwmsCwLsizj4OAAm80G5+fnME0Ttm3j2bNnv3Jt/uxnP8NsNqOSX0mSKMbn6uoKQRDg/v37mEwm1Lvw5s0b5HmOqqpwfHyM09NT+jzr9RqLxQKbzQZ93+PJkyfk2BDPFY9L0xRXV1dfxZ8cwzAMwzC/I1gIYBiGYRiGYRiGeYfnz5/D933UdU0b2EmS4OTkhPL2F4sFFfaKDoHLy0tst1vKiz89PcUwDBiGAZvNBoqiUGa87/uI4xiqqqIsS1iWhcPDQ0iShKIoqOS2LEsaQpdlCdM0Ke/fdV2KuMmyjDoBiqKgWB8RFVMUBdq2Rdd1ME2TCnSFADAajUgA6LruzsDedV1yF+wLAWKgLssyHMchV4EYvItBva7rtO0vfoQIIJ4vNvVFtv3t7e0dF4DruiR6iNfdjxdSVRXA2+gfsa2/n/svCnZ1Xaf3FwJEWZYwDINiggCgKAoqKjYMA7Zto+97SJIEwzAAvO1HCIKA3kuWZfpuxHv7vk8Ch23bsCyLhAVxTouiwG63w2w2g+d5kGUZr169+pXr8pe//CWyLMMPfvADHB8fI89zbDYbrFYrTCYTtG1LnRKLxQJBEKDve0wmExwcHJBgs9ls8Pr1axIV7t+/fycO682bN1gul9hsNthsNvjHf/zHr+xvjWEYhmGY3w0sBDAMwzAMwzAMw/warq6u8OLFCyiKQlvlYos7iiKs12uMx2M4jgPTNHFzc4M8z6GqKmazGQ14NU1DVVXI8xxnZ2cUeyOiWqIoos4Bkavf9z1F2aiqSgPsYRgoQse2bRqUp2kKwzAQhiF0XadYIFFgHAQBdrsdfN/HZDKh9zVNkzoAFEWhCCDTNFHXNYB/6wGo6/pXBIB3t/rF5xfP8X0fbdtSD4BlWb8iBojNdvEaqqqSQ0Js+otz1nUdORredR6821tQVRUJAEEQkBAhxJR3j0M8Zp/tdksRQWLA3/c9FSrLskzHAgDDMJDzAAA5A0TUjij79X0fqqqiaRoqQDYMg0SZg4MD5HmO29vbX7kukyRB0zT4yU9+QiJDURQU+3R1dYXlcontdktdAbZt4/r6GkEQYLFY4NWrV+j7Hh999BE5BMqyxJs3b3Bzc4P1eo0wDLHdbvH973//t/p3xTAMwzDM18O+EKB+zcfCMAzDMAzDMAzzjeNf/uVfoOs6Dg4O8PDhQ8RxjCiKMB6Pce/ePUiShDAMadv+0aNHFAWU5zmCIIBlWTg/P4dlWWjbFnEcoygKpGmKDz/8kAbxVVVht9vRhrlhGGjbFgBou9zzPBiGgaqqKE7Hsixst1vMZjMSHsTAXBQJTyYT2mgvigJHR0ckVIhhveM4KMvyzuBfDNg9z0MURXcG+ADubPaLaB7hghiGAX3fYxgGSJKEIAjQdR1UVcV8PqfnKYqCvu/R9z1kWaYS33ddB5Ik3cn5F/ftb/KLYbwkSSR+iHLltm1pyC4KoMV7iEJiAOSIEM4J4Z4Qboeu6+h8d12H5XKJw8NDOgdJksC2bSowHoYBu90Op6en6LoOeZ6TWCDe03EcvHjxglwDf/mXf4nLy0us1+vfeG3+wz/8w53//qu/+ityhJRlCQCwbZu+V9FDoes63nvvPSRJgq7rEEURxVaJ5758+fLXChEMwzAMw/z+w44AhmEYhmEYhmGYX8PV1RWePn1Km+tJkuDRo0e0xS8G+5999hm6roNhGNhsNrTVf3x8TKW/XdehrmskSYLJZEIb+bvdDtvtFqZpUr58VVUIggBpmmI6nWKz2WA0GqEsS2w2GwzDgDRNUVUVZc0bhoE8zykqpus6cgFkWUb3W5ZF+fpCrJBlmYQA4SoQQ3FRQFxVFYkB+4LAer2mYbmmaXTuwjCk2J19t4CIABqG4Y6YAIAG/fudAmIIL8qQRX+AyN0XDoD91xDuAtd16bm2bQMAnQPxWPH97AsZIhYIeDuwF+XCXddhGAYSCgCQ2wEACQWappETYjKZYL1eU8STcACI54ry5jiOcXZ2hrqu4fs+Xr16Re6M/4ibmxvIsowwDPHs2TP87Gc/QxiGUFWVoqyKosBiscByuUTf9wiCAMvlElEUIY5j3N7e4kc/+hHSNP2/+AthGIZhGOabDjsCGIZhGIZhGIZh/pN873vfw3e/+11MJhN8+eWXSJIEVVWh73ucnZ1hvV5TvA4AiokZhuGOYOB5Hh4+fIgkSWiYLSKBxAB8u92iaRrMZjOEYYj1eo2joyPsdjtUVQXLskiIEI85ODhAVVWU92+aJsXb1HWN+XyOuq4pBihNU+i6jpOTE2RZBlmWqXdARCCJTH2xaR9FEUajEZ0TMfxWVZWy+sVti8UCqqqSE0EM0Luuox/BuyXE794ufg/DgMViQed2//3iOIbneTTIF/0GQRBgOp1SObDjOJAkiT6LeF/DMEh8EAP+9XqNw8ND+u+bmxscHByQ2CHKhWVZRt/3aNsWdV2T8HF0dISyLLFcLuH7PgDAdV3EcYzJZIIgCGCaJsbjMaIownQ6xWq1wtnZGUajEXa7Hf7+7//+P3V95nmOf/3Xf71z2y9/+UvkeQ7btvHLX/7yzn3CEVKWJX74wx/+p96DYRiGYZjff9gRwDAMwzAMwzAM8x/w+vVr2ha3LAthGOL09JS2yYMgQJIksCyLMv4vLy+x2WzQdR3l1ot4mLquqQPg8PAQhmFAVVUaPGdZhul0CgB3ImXG4zHCMMR0OiV3QZIk8DyPRAdRtNv3PbIsg2maaJoGo9EIVVVhMpnccQA0TQMACIIAqqrC8zzYto3tdgtd1wG8jQgSW+b7XQFZltFAXNzmed6dAuKmadA0Ddq2xcnJCQCQy+Ldgb+I9xGOAFmWsVgsUBQF9QOIjoD952VZhjRNafNfvJY4fhHTI0p5RSmwEA+CIKDnimG+YRjo+54+g4gVEj0LbdtSbJJwKBwdHZEzwDAMNE1D36kQIIqioJx/RVFI2Fmv17BtG7quw/M8xHGM5XL5X75mkyRBEAS/cvvNzQ2urq6wWCz+y6/NMAzDMMzvB1wWzDAMwzAMwzAM83/JdrvF8+fPMRqNoCgK5vM5Rb+ISJfRaIS2bSmep+97vPfee/A8D1VVUbyPLMuYz+fQNA2+79NAGgDF4YiNbkmSaKN9f4jseR7FCYmoIBENJG63bZv++9cVC4sN+b7vMR6PSQwQA/0oiigmyHVd7HY7KrqVJAmO49yJsBG3V1WFruvQti2apsHR0dEdR8H+5303CkiIAZIkYblcQtd1mKZJv/fvF88XxcL7MT8AUJYlFS6L4b44zvV6faczQAgDIsZpu92SODCfz6n7QDga6rqmzobZbAbHcegzl2VJJc9C4JnP5yTilGUJ3/cRhiH6vkeSJMiyDKPRCLPZDG3bYjab4fXr1xzXwzAMwzDMfxkWAhiGYRiGYRiGYf6LvHjxAp7nYbfbUSzMMAx47733oOs6qqrCer1GkiT44IMPqDi273tsNhuK5RFb9EmSwDRNGoYbhkGROiLrXmyJy7KMLMvgui6yLIPneSjLEp7nQdM01HWNIAhweHiItm1RVRXyPMd0OkUURZjNZhT9E8cx+r6ngX/TNJhMJoiiCABoC14IBYqiwHVdFEVxpyugKAoazosBvGmaCMMQh4eH1Amw7yQQv/cjgYQIALyNF6rrmgQA4ZjYdx7s/wD/JijsdjsSUIC3TgfHcdD3PeX9A6BII1FsLN676zpkWYbDw0OKNUqShOJ0iqLAcrkkMUj0EohC4DRNkWUZRRSJrgZR4pznObIso76BMAxhWRZs28b19TVmsxl834dpmjBNEz/60Y++oiuZYRiGYZg/dFgIYBiGYRiGYRiG+W8gioTv3buHPM8hSRLF6fR9jzRN8Wd/9mcwDANZltFP3/f45JNP6PamadB1HTzPoyic/dicg4MDqKoKTdOQ5zlt64vhf13XFO8ThiHKssTBwQHF8XieB13XEUURJpMJFQqLnoA0TTEMA3zfJ3HBNE3sdjtyNPi+D9u2KU7Itm1yE4gyXVmWabNeDP1Fia8QB/bv24/1EfdJkoTb21sq1RUCgNjo//eeJ/4NvO0tSJKExID9iCDhrhCuASHCdF2HMAzJFVBVFcUrifx/4G1kU1EUmEwmmE6nFBt0e3tLJc5JksB1XRwcHJAToW1bAG9Fn9VqhdFoBM/z6FjiOCanhYgPGo1G9H2/ePHid3FZMwzDMAzzBwYLAQzDMAzDMAzDML8Ffv7zn+Pw8BB930PTNGy3WwRBgKOjIwBvt9HTNEUcxxTVk2UZ4jimXHlZluF5HoC3+e1pmmI0GkGWZTRNQ5vkwNuced/3oaoqlQ1XVYUsyzCbzagPQDgMiqJAlmUYj8coioLe3zAMxHGM0WhERcEirkd0ATiOQ3FEkiSR4LDfDSAEkCRJqGtAlO7u/wBvewHE5wBwJwbo5uYGRVFA13VomnanB2Cf3+QmeBdVVbHdbqkUuCxLEgYkSUKe59B1nWKFgLeChaZpFIWkqiqqqqIf4G3U0Hw+R9/3kGUZaZoiiiKkaUoRTB9++CF9D1VVhuBHnwAAIABJREFU4fb2FgCw2WwwDAM8z8PZ2RniOIbjOCTK7HY71HUNz/MgSRLquobruhiNRri8vMRut/vvXawMwzAMw/zRwUIAwzAMwzAMwzDMb4lXr17RRnfbtjRcB4DdbnenxFbXdeoQqOsas9kMaZqiLEvEcYy2be8IA2LLfD8iaL1ew/M8Gjz7vk+vK7bnRSTQZDKhuCKxKS/KjufzORzHITEiDEPUdY1hGDCZTGAYBsqyhGVZFP+TZRlUVSXxoO97EgNc14VpmqiqCsMwUDSRGNgLUUAM0cV2/+3tLTRNo6ie/fJhwX6fwK8TB/ZvE48VpcnCaVGWJRzHodglERG03yOgaRr6vkdd19hsNtA0DUVR0PB/Pp9TdNDNzQ2apkGapnROHjx4QB0BcRzTdyvcGScnJxiPx4iiCJqmUcmx+A7atoWiKKjrGrZto+s6+L4Pz/Pw/e9//7d96TIMwzAM8wcOCwEMwzAMwzAMwzC/RZIkwdOnT3F2doau6zCfz5HnOZXDitz3KIqQ5zmKosDh4SFUVaWM/rZtMZ/PaTAcRRGiKKIMeVEo6/s+giBAGIawbRtpmlIkjSRJiOMYvu9TZ4BlWQBAYoLInrcsi7Ls4ziGLMvwfR+O40BRFJRlCdM0aYAuNtRFLJDItxdb/cDbobzoHxDxRFVVUU/CPkIIGI1G5Hz4dZv++9FC4vd+54C47d3oIQCI4xi2bZMYIFwRsixjs9nAcRwMw0C9AqILoCgKNE0Dx3EwGo2oR0CWZSRJgiRJEIYhmqbBkydPqC+gqiq6L45jcnDMZjPM53O0bYswDOm4F4sFHMehaChd10l0EJFFZVliNBphOp3ipz/96W/5ymUYhmEY5g8ZFgIYhmEYhmEYhmG+Ap4/f06xM3Vdo+97nJ2d0dC8aRrIsozz83PKnxcZ+67rQlVVjMdjJEmCPM9RliWOj4+pCLdpGgRBQINnMTyWJImGzK7rkmgQxzEkSUIURZhOpyQOLJdLAMBqtQLwdpAu3ANFUaBtW3I5GIaBpmlgWRaqqoJlWXeO2TAMVFWFvu/p8+12OxweHsJxHLiuizAMafAv3k/8VhSFnAP7w/x3h/r7MUP/Hu++thBCgLdxRJvNhuKVVFWl1zYMA0VRwLIslGWJ2WyGruuw2+1oe3+z2aDve8RxjIcPH+Lo6IjKoruuQxzH2O12CMMQmqbh29/+Nuq6JrFEdEXIsoybmxvcv3+fuhV830ee59A0jc5313V07GdnZ5BlGc+ePfvvXqYMwzAMw/yRwEIAwzAMwzAMwzDMV0QYhvjpT3+Ks7MzKIqCvu+x2+2w2WwQhiF834dpmpAkCdvtFlmW4eDgAKZpkjAgsvo//PBDGhQXRYE8z5HnOS4uLihOR8QMNU2D4+NjitcRw+ftdkvigGEY2Gw2sG0b6/UalmVBURRMp1PsdjsoioL1eo1hGKDrOpIkobLh/d9iWC4G9qIkWTgAJpPJnaG967rkehDuADHklyQJYRjCMIz/cPD/rjvg1/GumLDfDwD8W2QQADoXIhZI9CtIkkSiiegHyPMcaZri/PyctvuXyyXFKt3c3KDve1iWhfF4jMePH6NtWxiGgeVyieVyia7r6DX2nRmSJGGz2dzpjRDRRZIkoe97jMdjjEYjRFFEvQMMwzAMwzD/HiwEMAzDMAzDMAzDfMVcX19jNBrh/PwcTdOgLEucnZ0hiiLsdjtst1uKrhFlw4vFgnLnHz9+TAPpIAhICPjoo4+gKAoNuYUIcHJyQgIAAHRdh2EYcHp6SqW4URSRGGAYBgzDoMgi27ax2WxgWRYNs0Wx7ng8RhzHmE6nCIIAhmFgt9tR3r6maUiSBEdHR7BtG1EUkUtBuABc14XrukjTlI5R3FcUBVRVJfHi15UNA3fFgd8UFbQvJAj2i4MB3Olc2BdrPM8jQUBECdV1ja7rMJ1O0fc9HMdBGIYYhgFZlqFpGkRRBFVV8fjxY+pJMAwDeZ4jjmP0fY+u6+C6Li4uLtB1HcUNrddr2LaNxWKB6XQK27bh+z6apiF3Sdd11G9wdnaGH/zgB1TOzDAMwzAM85tgIYBhGIZhGIZhGOYrpmkavHz5EmVZUgnthx9+eCdDfzqdwvd91HWN9XqNtm3JIdA0DdbrNTabDQ2EHz9+jGEY0DQN3rx5A13XyQkgxIGiKKjEV5IkVFUF13UpvkbXdRweHmIYBriuSwN9EWfjOA5830dRFBiNRjTkn0wmSNMU0+mUBIw0TdE0DfUHqKpKW/HivmEYKKMfAGzbRl3XVPwrxAbDMO6UBL879N/f8n/3dvH4/eftP0bTNNr0BwBVVWGaJoZhQBzH5EYQ0UGKoiAMQ/R9j+l0iqqq7jxfCDBRFKGuazx+/BiHh4fouo5cBF3XUZ+AZVl48OABHMdB13UoigJZlmG5XNL3eXJygjRN0fc9VFWl8zoej5FlGUUFjcdjTKdT+p96hmEYhmGY3wQLAQzDMAzDMAzDML8jbm5u4DgOANAAvK5rzGYzXFxcUEyNKMy9uLhAVVVQFAVJktCQ/fj4GIvFgrL/RcxO3/dIkoRKg0XEjxhci4G0iKBxXRdFUcAwDARBAOBtRI7oJ7AsC0VR0Pa+cAIURUE59rquI01Tcg54nkflw6IbQTgAbNtGWZYAQF0HQnQQYoDjODQEF24B4Df3Avy6aKB/L0qobVvsdju4rkviQVEUdI5UVUVZlthut9B1HQCoI8AwDNreF58vyzIAwIMHD3BwcIC+7ylSSZQEi5z/Bw8e0BC/LEtkWUbf1Wg0gm3bOD4+xm63w2QyQRzHJNrM53OKKxKCged5MAwDh4eH+PGPf/zfujYZhmEYhvnDhoUAhmEYhmEYhmGY3yHX19cIggCe50FVVfi+T0W8juNAVVV0XYeDgwPoug7btpEkCTRNw/379+H7PgzDgOd5tGk/n88hyzIURaFIHQC4f/8+yrKkeJu2bdG2LXzfhyzLyPMcqqoiCALIsozVaoXHjx9jMpnQdr7oABDxOXVd07Detm2K0XFdlzoNmqaBpmlI0xSz2QxFUdDGv2mayPOcnAGe55EoIrAsi4p5xe2iUFk4HAS/aeC/HxEkhul1XdNreJ53x1mwXq/h+z7FHQnBQ3xu8b5JkgAA4jiGLMu4uLiAbdtQVZVEGVHuHIYhVFXFkydPYJomCQBxHFMJtOu6OD8/h23bAABN01BVFbIsg+/75MDo+56KlMW5ME0Tfd9TufPTp0+/oquWYRiGYZjfd1gIYBiGYRiGYRiG+R1TFAWePn2Ks7MzfPzxx5AkCbquQ9M0jEYjOI4D27Yp+911XYzHY+i6DlVV6ceyLIrhETn/fd8DAA36haCwXq9poC2G4G3bYrvdQlVVSJKE09NTaJqGoigQBAFGoxHquqYIHzGYFwJBnufkAKiqCpqmIQxDdF2HqqownU4hyzJM00RRFLT1b9s2qqoiV4Cu6yQOiOG967r0vK7raJBfFMWdkuG+71HXNbkH9t0AIo+/bVs0TYO2bXFycvIrIkDbthSPBACmaUJVVciyDFVV6TtbLpf0HiLKabPZwDRNbLdbtG2LsiyRJAmqqsLR0RHOz89piC/igeI4hq7rJACIz5ZlGbquI5fBbDajYmbhHsmyjGKXkiRBlmWYz+eoqgrj8RjPnz//HV3FDMMwDMP8PsFCAMMwDMMwDMMwzNfEl19+idFohD/5kz+hIfl2u6VoGVVVafgv7l+v16jrGnVdk6tg/0fE2WiaBsuy0Pc9bNumjP9hGJDnOdI0xXa7hWVZFAUUhiFtvZumiTRNaWNfbOjneQ7HcVCWJQzDQFmWlGUfhiFkWcZkMqHYmjzPAYBigfadAfv9AIZh0Mb7/ib/brejYf3x8TEcx8F2u4WiKGjblgqSRTySEAKE+0Hk6+8LAALxWFmWMZvN6HbDMJAkCXUtiI4A3/cxm83geR66rgMAEhHSNEVd1wCA8/NzzOdzAG8FGTH8F0XCBwcHuLi4gGEYiOMYWZaRSCCEmpOTE6iqiiiKSFBJ0xQHBwcIgoDOuXgP27bhOA6GYcDl5eVXfOUyDMMwDPP7BgsBDMMwDMMwDMMwXyNffPEFPvnkEwzDQMPyg4MD1HVNm+nL5RJ1XaPrOti2DcuyoOs62ralOBqxGd+2LTzPg6IoCIKAYmXSNEVVVRRFJEppRVRQFEVU7GsYBsbjMfq+R1mWyPMc0+kUURTB932UZQld12nLP45jKiK2bRvr9RqGYQAAdrsd5eubpklOAEmSEAQBuRH2xYCu62hI7zgOHMfBaDSimCDLsqicVxQkix4DSZJIBKjrGkdHR/A8DwDuuAj2i4SHYUAURXBdF13XUWmwpmnYbDZo2xaGYcCyLGiahu12C0mSsNlsKHbJtm2Mx2OMRiMAb6ODxAZ/FEWIogiapuGDDz6AbdvkEui6DsvlksSXTz/9lI5DnB8haIjnWZYFx3FgmibKsoRpmliv1zBNE8fHx6iqCjc3N1/PBc0wDMMwzDcSFgIYhmEYhmEYhmG+ZmRZxl/8xV/AsiyMRiNEUYQ8zylmx7Zt2LZN8UGyLNMQej6fIwxDDMNAQ34RNSNiZ5qmwWg0gqqqaJoG2+0Ws9mMNv7TNKVMf8uyqAjY8zxomgZN02gzPQxDpGlKxyA26j3Po5gh27apJFcUCO92O8r+Fw4H13URBAG5GADQ9n1VVVBV9U7mv/gsIkrJNE0qXwbeRvokSXInBkg8t+97NE1D0UnvCgEAyMEghu6r1QqO40CWZXqf5XJJToE4jtH3PU5PTyFJEhU2r1YrpGmKoihwc3MDy7LwrW99C9PpFG3boigKFEWBLMuoV+E73/kOjo6OqGMhz3MURUFCie/7UFUVaZqiLEuKChLH1bYtFQ7fu3cPRVHg+vr6q794GYZhGIb5vYCFAIZhGIZhGIZhmK+ZV69eYT6fYxgGGIZB2+Wu66JtW4rSWS6XqKoKfd9Tj8AwDDQUFwKAyMUXQ24RZZNlGYqioEx5EYGjaRriOIbruhQF5DgOuq7DZrNBnueYTCZI0xS+7wMAgiAgZ4LnebRRL8QLTdPodkVRMBqNkKYpJpMJgiCApmmQJIke0/c9ZFmGZVmwLAu2bSMMQyiKQp9LOAD2h/lt25JAIMSP0WiE0WhE2//ivrquqctg3xXQNA2qqkIQBLBtG/P5nMqBNU2jAufFYkGlzAAwmUyoqFm4MlarFUUvpWmKjz76CKenp+i6DkVRIE1TxHGMOI5RliXG4zGePHlCx1mWJdI0RZ7nyPMcjx8/Rtu2sG0bRVFAURSKc8rzHFmWwTAMtG1Lrog4jvHw4UNsNhtst9uv7bpmGIZhGOabAwsBDMMwDMMwDMMw3wA2mw0++OADOI6DyWQCTdOgKArW6zUcx6HyXsdxaACvaRrW6zWapqEt+KqqAACSJFGRcNu2tGE+nU5R1zV0XUccxzBNE2EYYjqdUm5/GIY0eJ5Op1BVFWVZwnVdJEkCXdfRdR0cx4HnecjzHIZhkLPAMAw4joM0TUkAEBvrcRxjMplQXr7oMIjjGE3TQFEUGvSLCKD9Qf7h4SEsy0JZlnR7VVUUi3RwcICyLAHgjoAg4nVOT0+x2WyoY6CqKhICTk5OSFgZhgFFUUCSJCwWC+R5jq7r8ODBA4zHY/o84n5JkrBarSDLMkajEQ4PD+F5HjzPgyzLiKIIYRjS+Ts4OMB8Psd4PKb3KooCdV0jjmP4vo8nT56QW0HXdZRlCcdxsNlsqHTZcRxomoYkSdB1HVzXhSzLMAwDs9kMl5eXSNP067moGYZhGIb5xsBCAMMwDMMwDMMwzDeAKIoAgGJ1RCmvyMafTqfo+x66rmO1WqFpGhrG27YN0zQpx94wDMqMFw4C3/dhGAayLKPBseu6ME2THAVN0yCOY8znc9R1TYXAlmVBURSKEjIMA6qqwrIsFEUBXdeRpilM04TnebAsC1VVUeeA4ziI4xgAMBqNaKAtYnz6vsdoNKLHA6ABPfA2skf0D4iIIEmSSCSoqgqHh4dwXZe2+He7HTkIjo6O4LouuRksy8JqtUJZlmjblob2fd+j73tst1sq5xUugPPzc5RlCd/3EQQBFfMuFgvqXJhMJhiPx9B1nb6/uq5xc3ODtm0p8/+DDz6A67qwLAtBEKAsS4oKOj4+hud5ODw8vCNkBEEA0zTJDaBpGlarFRRFQVmWJI6IQmTP80ioWSwWLAYwDMMwzB85LAQwDMMwDMMwDMN8Q3jz5g1msxnee+89irUpigJlWWK32yEIgjtDXrHxHwQBFfbuF8t2XUc5/03TYLPZ0Pa9EAZEJI5pmhQBJDblfd9H27aQZRlJkmA0GtGWvCgLNgwDURTB8zwSHkSUUdM0sCyL+gbEbUI4MAwDcRzj8PCQioc1TUMQBMiyjDb5J5MJiqLAMAx3NvgVRaFzIWKChDtivV5jPB7D932K+RHxOyIeSFEU2qAXZcsit1+WZVRVhSdPnsC2bciyTL0AhmFgGAbqSLh//z7quqYYoSAIEAQB6rqmaJ6qqnDv3j0qaK7rmjoANpsNJpMJPvjgAxJpFEVBVVXkdjg+PqboKPHetm1D0zQqlRYCSFmWUBQF0+kUlmVhOp3ihz/84dd8dTMMwzAM83XCQgDDMAzDMAzDMMw3iKdPn2IymcDzPLRtS3n2R0dH8H2fYl80TcNisUDTNNA0jQb7YsNfVVXK14/jGGmawrZtJElCW+26riOKIsxmMzRNQ50EpmlC0zS0bYs0TaHrOm23CxfAMAwIwxCqqsJxHMqpF/FCsiwjz3NyDoi4HlE8bNs2OQskSYJlWQjDkJ6rKApOT0+hqiqAt1FHu90OWZbRIP3o6IhifoqiQJ7nJBTouo7j42MST4Q7QGzYC5eAZVmI45gy+dM0heu6ODg4IOeAaZrYbDZ3vg9d1zGfzzGbzahDoGkarFarO1v8Z2dnOD8/h67rJM4kSUKCQ9/3ePToEVzXJTHj5uYGYRhC0zTUdY3T01NyK+x2OxiGQW4QWZYRBAEJF+PxmDoeLi8vcXBwAEmSoKoqXrx48XVe2gzDMAzDfI2wEMAwDMMwDMMwDPMN44svvsDZ2RmePHkCAFBVlaJ66rqGYRjY7Xa0DS8iZhzHwe3tLW2SN02DPM9hWRZkWcZ4PEbXdUjTFNPplH6HYUgD5CRJYBgGZe97noeqqu4MnKMoQtu2mE6n5CqQZRlxHJNTQFEU2LaNpmkgyzLF5Yihv6ZpKMuS3AFN00CSJKzXa5yensK2bfR9j/V6TUXHQjRQFAW+72MYBpimSTE/WZZhNBpR6THwNlt/u90iyzJUVYXj42MauovoHQBYr9fkDjg6OiJngdjIz7KMugsMwyAxJgxDhGEIWZaxXC4pbsjzPJycnJBwYts22rbF9fU19QC4rktiiBAphJii6zpOT08pskgcjyhEzrKMypmFY0F8P8JNoes62rYlkWi73XJ5MMMwDMP8kcJCAMMwDMMwDMMwzDeQn/zkJ5hOp7i4uKCyXhFh47ouJpMJgLcb9tfX15BlGV3XUY68YRj0WkmSwHEcyriXJAlJktyJAxLDeFGEK6KAmqZBGIZQFAWmaSLLMui6jslkQtvpYnvddV3aRhcCQBRF0DSNnAzCGQAA2+0WwzBgtVrR1v7jx4+x3W5pOK7rOjabDYqigO/7GI1GsG0bYRjS8B946xi4f/8+leo2TYM0TSl+R1EUPHz4EEVRoO97eq7I55dlGYeHh3TsaZoiiiLEcYy2bZFlGR4+fIjJZIJhGKAoClarFcUOAaAB/nQ6hSRJmE6n9B2JCKCmaWAYBokEovxXiAAi3un8/JwEACHKiH8LIcM0TRJGNpvNnRgmIeC4rossyzCbzeA4Dn72s59R9wLDMAzDMH88sBDAMAzDMAzDMAzzDeX29hamaeLJkye0zW+aJsqyRBzHWCwWtI0/Go2g6zo0TYMkSRiGAQAoz9/zPKiqSmXBsizD8zzIsoy6rjEajSgOqCgKTCYTGojvF/uapgnf95EkCSaTCf4/9u7kSZL8zOv/23ePfc/Ifa1F1Yt6kbpHQkgCTYOBDUeMCxdunPifOHHjNIYZh0FmbDP0jKRB6pYahHqpLZeI8Fg8Fvfw8OV3KPOHLuOHMb/5abpo6XmZlbVZZVZmZKRf+vt8n89nuVzSbreZz+eyqeD7PovFAtd1qdfrbLdbAEzTZDabydDCdV2WyyWu6+K6ruT512o1ptOpHH7bts3p6akMIsoC5NFoxGazoV6vc3Z2RhzHZFlGlmUEQcBms2G5XLK/vy8bBpZlycdWqxVhGHJwcCA39jebDWEYslgs5Geo1WpcXV2RZRk3NzfYts3t7S15nkuHwsnJCavVikqlIsOBOI5ZrVZkWQa82E4oioJKpYJhGFIQ7Ps+0+mUfr9Pq9WiXq+/NAQo/xwcHFCr1SQmyLIs6SFwXVc2LcpIpvKZKb+naZr0ej0++uijV/ZMK6WUUurV0EGAUkoppZRSSv1fqsytLzPhy0P1cjOg0+lIpn6aplSrVW5ublitVjQaDSzLktvw5U1+wzCYz+e0Wi25vf/lAUCZs1+pVOSQuowLarVaEnVTr9dZrVYyFGi32xI1tF6v6XQ6sg3geZ5E/9TrdRaLhUTdlIOF8pZ/mbVv2za2bcvt9qIoZMtgOp2y2WzodrtcXFzgOA5FUfwvPQInJyf0+31s22a1WrFer6WIt9wg6Pf7clM/DEMZAniex2AwkPz+MmapvLm/2WwYDAYMBgN6vR7X19c0Gg12ux1RFDGdTlmtVnQ6HWq1GrZtM5vN6Pf7jMdj6V8o3+979+7JwKXcACi7EOI45vj4WIYc5cdnsxmGYTAcDtlsNtRqNWazmfwuyoinbrdLEAQ4jkOv18P3fX7961+/6sdbKaWUUl8hHQQopZRSSiml1P/FptMpYRjS6/VotVocHh5KmW55+Fvmyt/d3eF5HoZhsFqtaDabGIYhh8flwX2Zb29ZFrvdjjiOJQu/2+3iOI7c1A/DkEajQb1eJ4oiAMn1L2Nnms2mRNaUw4UvbwyUX6885K/X69RqNer1usQTpWmKbdvM53PSNGUwGFCv16Xg+Muv0bIszs7OqFarsvlQDhFM08QwDI6PjwmCAMuyiOOYm5sb4jhmuVxKh0D5vhiGwd3dHWEY4jgOg8GAi4sLarWa9BOs12s5uC+KgkePHgEv+huur69lE6AoCvleBwcHMpApD+ijKGI2m3F9fU0URTSbTY6Pj0nTVAYdQRDIoOTo6Ihms/nSAKD8MxwO2W63UhpclguXmwhFUUiE0Jd/J4PBgOvra8bj8St7rpVSSin11dJBgFJKKaWUUkr9X66MfvF9n/l8TpIk9Pt9PM+T0tuyO6A8eLdtm81mI4fYZVxMeXs/SRI5gI+iiG63K7FDZb9AWSJc/r1t24RhiGEYVKtVoiiiVqu91Aswm80kEiiOY2q1GqvVCsdxCMNQDt7LA/ssy+R1lVsDZQ5/efhdZuBXq1W5YV8UhRyex3HMZDKh2WxKh0Ce51IUXA4wAE5PT/E8jzAMJTqofB+yLOO1116jWq2SZZnEB61WKxkgnJ+fU6/XcV1XtgPKrQxANjXK98BxHBaLBc1mk+l0im3bPHnyhPl8ztXVFXt7e6RpSp7nzGYzdrudxDv1ej3ZhCgHIeWf/f19iRwqI4IMw5DyY9d1aTab+L5PURQyMJjP5/R6PRzH4Sc/+clX/CQrpZRS6lXRQYBSSimllFJKfQ08ffqUbrfL5eUllmWRZRndbhfLssjznNVqhWVZdLtd0jSVv/+ybreLaZoSOdRut7EsS6J7giCQW/2+7+N5Hp7nEUURlmWxXC4lrieOYxkQLBYLye/vdDrEcSxft9wsWCwW1Ot12QIos/7LQ+5yS6DRaMjnl1E82+0Wy7LY399nMpnIkGOz2UiO/snJicQYARJzZFmWZPh3Oh35mlmWMRqN5ID/+PiYfr8v0T7L5VK2AFarFa+//rpsXyRJwvX1NYDECn25d8G2bQC53d9ut6W/Yb1ec3h4SJIkDAYDZrMZQRDIwKMoClqtlnydshQ5yzLiOGY4HMqmwna7ZbPZcHd3JwOHk5MTkiSRsuBKpcJisSDPc9kGcV1XXusnn3zy1T7ISimllHoldBCglFJKKaWUUl8Tv/nNb3jttdewbZskSVgul0wmEwzDoN/vy0358kA8TVMMw5C8/XIboCyt3Ww2Ulhb5tnPZjPpIigjgMbjMYZhyKBhOp3KQXOZe7/dbmk2m3L4HIahlNQGQUC325Whwm63oygKgiAgz3MGgwGu67LdbuWA++7uTjYKms0mtVoNAN/3mUwmxHHMZrOh0+nQaDQApKR3NpvJNkS/35cIobI4t+wYyPOcXq8nUUtlCXO5LVB2Ldy7d082EMotguVySZ7nNBoNOp0O+/v7sulQFIUMGSqVCrVaTTYtPM/Dtm0+//xz0jR9aaPA932azSaTyYQ8z9lsNsxmM6IokmiocguiHHSU78He3h6VSoXpdEq9XpchhG3bOI6D7/vkeU6SJDL4aTQaGIbBF1988Sofa6WUUkp9BXQQoJRSSimllFJfI3/+53/OH/3RH9Hr9eRgO45j4H8ekjuOQ71ep9ls0mg05AZ8mqas12viOKbX6+G6Lq7rygF6WfYbhqFE9cxmMyqVCo7j0O12WSwW9Ho9ydvv9/tMp1NarZYMAZbLJdVqVaJw6vU6tm2zXq+BF1FHWZZJmXH5GkajEUmSSEnxyckJlUqF2WwmUT2bzUZu35+fn1OpVOSWf3mLv/y6p6enOI4jh+ZlTJJt2xwcHNDr9aSH4Obmhs1mw2KxYLFYUKvVuLq6ktv3URTJdkDZj/Do0SMZQpSdDWXfgGma7O/vAy9iesqegGq1KhsTH3/8sWwkHBwcYFkWq9UK0zQZj8dst1scx+Hhw4c0Gg0RVcOFAAAgAElEQVRms5m8P1/e6jg8PJTf7263A+D29lY6ILrdLqPRCMuyiKJIfheWZZGmKZPJhDAMX8HTrJRSSqmvig4ClFJKKaWUUuprZrvdYhgGg8GATqcDILfQ4cXNeM/zJLc+CAL5nIODg5cy4+FF4W2e55imSRRF+L5PmqbMZjM5yG82m2w2G1qtFkmSUK1WpbOg3+/L8CDLMpbLJUmS0Ol0CIIA0zSJ41jy+h3HwXEcWq0W9Xqd6XRKHMdSeGtZFpVKBdM0pQC4vOVfDh/q9ToAu92OLMsIgkAO+ssugfLnLA+74zimWq3KhkSe57IRAcgN/XfffZder8d6vZaYoDKip1arcX5+Tq1Wk3LeKIp4/vw5rutyfX3N2dkZg8EA0zTZbrfM53P5/CAIaDabRFFEq9WiVqvheR57e3uyiRAEAUVRcHR0xHA4JI5j6TyYTCayBfDgwQMajQa73U5igsoNDtM0ubq6ot/vM5lMXhpYrNdrarWabEUcHx/zs5/97Kt7gJVSSin1lfvyIMB+xa9FKaWUUkoppdRfwX/4D/+Bfr+PYRh8//vfp16vM5lMJJc/iiKm06lEBZU37o+OjjAMg/V6Tb1eZ7FYyFZBmfu/2WwYj8d4nsfp6SnValWKhev1OqvVilarxW63Y7VayXZAq9UCoF6vk6YpzWZTegHKg22A4+NjuXE/n88xDEPifHq9ngwYLMtiPB7LTXfLsjg+PgbAMAzCMJSS5DiOSdOUi4sLiqLANE1ub2/lIL78GYfDoXztyWQiZcTl93j99ddf2rAoikJu5jcaDQ4PD6lWq9JLMJvNJCKpjOs5PT2lUqlgGIZ0J5RdDbZt0263CYKAfr8PwLNnz3jzzTd58uSJFAL7vs/l5SXwYohTq9VYLBZUKhWq1SoPHjwAkLLi7XZLFEWEYch6vebtt9+WzYe7uzva7TZxHHN9fY1lWTSbTVarFYPBgHa7zaeffsqPfvQjfvzjH391D7FSSimlXhndCFBKKaWUUkqpr4lPPvmE+/fvEwSBHISbpkm326VWq0knALy4Ne+6LnEcE0URnudhWZbczDdNk+VyiWEYbLdb8jzn9PRUDtI9z5MD8mazSRzHcvj/5cz5arUqufrL5RLP85hOp5imiW3bHB8fs9lspMC27AmIoojz83MajYZEC202G8m5r9VqHB8fS2luFEWkacpoNCKKIqrVKmdnZwRBIBFH5SF+GIZUKhXOz8+lpHi9XssWgeM4dDodjo6OJEP/y1FCeZ6zt7fHvXv3qFarzGYz4jiWr2NZlvQzDAYD2ayYTqdYlkWtVmO327HZbHBdVzobHMchDEP6/T4//vGP6ff7UuBbxgfNZjMAiRSybZter0ee59ItUP4uttst3W6Xq6sriQi6vr6mVqthGAae57Fer9nf32exWNBut9lsNhRFwc3NDXt7ezx58oTVavUqH2ullFJK/Q3RaCCllFJKKaWU+pr6xS9+wbvvvktRFOx2O3q9HrVaTQ6Csyyj2WzSbrfZ7XbYto1pmhIzU0YHlZ+/Wq0wDIPhcCjDA8MwSJJEhgtJkkhnwGazkYP25XKJZVn4vk8Yhti2zWw2w3Vdjo+PsSwLQP7+y1sAR0dHclBdRgQFQcBqteL+/ftUq1XpMRiPx5LXn2UZBwcHNBoNiRAajUaEYchisZD4nG63SxRFMkgYj8csl0v29vY4Pj6W2J75fP5SCW+z2eT8/JxqtSrbA2V+/3q9ptlsMhwOZaOgHEKMx2Nc18W2bdkOKOOJyoieIAjkwP/w8JDPPvuM999/n2q1iuu6rNdr2TooD/L39vZkGJEkCdvtlu12SxzHEvlU9gAsl0viOJbXUK1WpWegHBLNZjN53Z1Oh0qlwscff/zKnmellFJK/c3RQYBSSimllFJKfY09fvyYfr/PyckJq9WK6+triqJguVxydXWF4zhyqF8Wxdq2LQfqy+WSxWKB53lkWUan02E+n8tQoIwCSpIE3/cxTVOGBq1WC8/zpG9gtVqRpimO47BYLGRDoYwnKg+wywHEwcGBHMKHYShdAOv1msvLS/b29phMJhiGIYf/o9EI3/c5Ozuj3+9TqVTk9YZhKDn/r7/+Ou12myiKJEJnNBqxWCwwDIM333yTOI5xXVcKeO/u7iQGqOwBKKOHoiiSQ3fbtrm4uKBSqZCmqWwITCYTTNOkXq+zv78vUU1ZlslgplKpAC96HMp+gDAMOT4+5s///M+p1+sydChLiff29rAsS+KQyk6FNE0ZDofU63WJJrq9vcV1XcIwpNVqMRwOyfMc27YloinLMm5vb+VZqFarUrC8XC65u7t7lY+0Ukoppf4G6CBAKaWUUkoppb7G1us1H330EZVKhXv37pHnuRz8jsdjOcwHaLVa5HmO7/vUajWWyyVpmjIYDDAMA9M02Ww2VKtVDMNgs9ng+77ckPd9Xw7Wm82mbAcYhiEbAWEYkiQJ7Xaber0uQ4EkSTAMg8lkwv7+PtVqlel0imEY7HY7ptMpm80Gz/M4OztjPp9LfNHd3R1xHLNarV4aeuR5LjE/5SbA/v4+BwcHEp+Tpinj8ZjFYkGe53S7XVqtlhQS393dSSlwWZxb3tovBwTln91uJz0DZSxPGSFUFikPh0OJRyqHD/V6nSiKJD6p7DH4cjxQuR3ws5/9jOFwyHa7pd1u0+l0ME1TNgDK3oGHDx+y2+0oiuKlLYayjHl/f59er0eapkRRxHg8xrIs8jzn+fPnHBwc0Ov1AOh2u2w2G7Isw7IsPvroo1f2PCullFLqb4aWBSullFJKKaXU74A//uM/xvM8Go0GP/jBD1itVjiOw2q1YrvdApBlGcPhUG7flwfBZWGvbdvsdjuCIKDT6WAYBq1WizRNAeS2fFlQu1gsgBcZ9r7vs1wupRC3jNops/hN0+Ti4kLKcsuomvLGv+d5HBwcyNCgXq8znU5lqFEUBffv3wdeRBl5nicDgiiKGAwGDIdDeT/G4zFxHMtgJM9zDg8PJRJpOp2SZRmGYUjEUBnnU968LyOTkiSRAcRisZB8/rIkuNvtcnl5KR0D5ZbEdrvFsixM08T3fbrdLkmSyOsJggDf94njGN/32dvbo9Fo8PjxY87OznAchydPnsiBfpIk9Ho9eR31ep3RaCQDj36/z8OHD1+KDdpsNqxWK9brNbVajSzLOD8/l0HPYrFgs9nIz1qWMj979uyrfHyVUkop9RXSjQCllFJKKaWU+hr71a9+RavVYr1e841vfIMwDAEwTZM0TeUGfHn4Xd78B+RQvIybKYuEy8N/27axLIs0TQnDkCiKqNfrrFYrXNel2Wyy2+1ot9uSPT+bzYiiCMMwOD8/lwGE53lSuluWFddqtZc+tl6v5db82dkZnU5HthHKW/hpmpJlGW+//bZ0C5S340ejEcvlEt/3OT4+ZjAY4Hke8/mc5XIpXQFlEXHZeVBG7oxGI+I4pl6vc3FxQa1Wk2Ld8jU0Gg0sy5L4nTKep4xgSpJEPlYUBb7vS9dB+bn1eh3Xddntduzt7eF5HoZh8N//+3+XAuPRaIRlWTSbTQB5r+fzOa7r4jgOtm1zdHRElmVst1uWy+VLGyFfLjMuy52vr6+xLIvr62sZFOzt7TEej3ny5Mkre46VUkop9dunGwFKKaWUUkop9Tvk3//7f8+TJ08wTZNKpcK3v/1tKpUKi8WC1WpFpVKR7PnFYiHlwlmW4fs+zWYTz/Pk1npRFJL9XxYNNxoNuU1eqVRoNBosFgsZAjiOIyW3Z2dnmKYph/Lz+Rx4MZwwTZNqtYrneZim+dLHJpMJ/X6fVqvFdrulKAoAJpOJZOi3Wi3Ozs7Y7XZ0Oh2ur69JkoTdbodpmnz7298mz3MWi4UUKhdFwWQyoV6v89Zbb2FZlsQMpWnKfD4nTVPyPGc4HNJsNqVkuNxEGI/H9Pt9er0eRVHIZkDZIVB2DxweHpKmKUVRUBQF4/GYdrst8UxlefNyuZSehTzPuXfvHrZt8x//43/ktddeo9fryTAmz3M+++wzdrudRBx1u11s2yaOY7bbrQxDlsslhmFwcHBApVIhjmN2ux1xHHNzc8Nut2M8HtPpdGQYMhqN+O53v8t/+k//6ZU9w0oppZT6m6UbAUoppZRSSin1O2A+n+M4DgcHB6zXa7rdLp7nEUURjUYD13VJ0xTbtplMJiyXSym+rVQqUgZcHqo3Gg1838dxHLlh7nkelmVJ/n15yG8YBrPZDNu2OT8/Z7PZAEgcj2maWJaFYRh0u11c12W1WsnmQBAEDAYDer0e9Xody7IkvqbsAyiKgouLC6rVqmTgbzYbGV70ej3pCSiHGeUN//V6Ta/X4/T0lCAIMAyDm5sbydfPsoyTkxN6vR7VapUgCIiiSHoCVqsV5+fn1Ot1KQr+ck9At9vl3r171Ot12RJIkuSl97IcapSlyWWZ82q1IssyarUahmHQ6/X4+OOPefToEZ7nsdls2G638vVOT08xDIN6vc5sNiMMQ4kCWi6XvPHGG/i+z3a75fnz57JFUG41dDodhsOhbAiUGyNxHGtpsFJKKfU7RjcClFJKKaWUUup30H/5L/+FLMv4oz/6I376059i2zbHx8cScVPe6K/VanKob9s2d3d3mKbJcDiUm+zlwfBwOMSyLKrVKtVqFcdxiOMYy7IIggDHcQiCgKurKyzLkpvxs9kM0zRxXZeiKGi1WhIJ9OXhQZZlXF5eYlkWURSx3W5J05QgCIAXpbbHx8cABEEgMThlvn2n0+H8/Fzeg/F4LHn5ZTzSvXv3gBdRSPV6nfF4LBFIhmFwenrKYrHA8zyyLHupT6D8+qZp/i89Av1+H9d1GQwGJEny0sfKbYHj42PSNJVBSDkQyLKM2WzGYDDAdV2SJAHg+PiYKIr48MMPOT8/J0kSHMchTVO++c1vYts2hmHw/Plz5vM5eZ6TZRnvvfeevDfr9ZrlcslisWC9XrNYLDg5OZFC5MePH2OaJtfX1/I1ttstZ2dn/OIXv/jKnlellFJKfXV0EKCUUkoppZRSv0P+4i/+giRJ+NGPfkRRFDx79kwOkuM4ptFo0G63SZIE13UBODo6YjweEwSB5NlnWcbe3p7k4pfxMo7jkOc50+kU3/cBuH//Po7jsF6vgf+5nQBQr9cl93+xWMi/BaTANggCKcEtOwnKjgHTNOVmvWVZ3N3dsd1uGQwGnJ2dScdBWWZcHuC3Wi0ZIJTlxOWAoCgKDMPg8vJSBiSe5xEEAUVRkGWZbDeYpslms2G328lr7ff72LZNr9cjz3M2mw1ZlsmhfJqmEhFU/n05HEjTlMlkgu/7HB4eMplM2N/fl6GIYRgMBgOKouCjjz6SKKNGoyHfq/w5sizj6OiI8/NzptMpaZrKECAMQ+bzOfv7+/zgBz+Qwc7jx49JkoTxeMxisZB4oTfeeIPhcMiPf/xj+T0qpZRS6neHRgMppZRSSiml1O+YshzY933eeOMNptMp6/Uaz/OoVCq02225vW+aJqPRCN/38X0f13UxTRPDMFiv1zSbTZIkYbFYsFgsgBe37iuVCpZl0ev15KB9MpkQRZGU2XY6HWazGQDr9VqKhE3T5PLyksViIbFB5XDAMAxs2+b09FRigcoInvV6TVEUDAYDTk9P5YC+jOpZrVYAXFxcUKlUpCMAkNe2Xq+5f/8+nU6H7XYrg4HyIN5xHE5OTuh0OpK/Xx6uJ0mCZVkMBgPpWSgjisrC4YODA2q1GlmWSa9BeXAfxzGDwYBqtSpDhCRJiKIIz/Oo1+vsdjuyLGMwGGBZFr/85S/55je/SZIkTKdTxuMxAFEU8dZbb8lgJs9z7u7umM/nUij8/vvvc3p6ym63IwxDRqMR4/GY2WzGbDaj1+vxjW98g06nI7/nsrRYKaWUUl9/Gg2klFJKKaWUUr/j/uzP/oxf/OIXVCoVLi8v5cB9u93yxRdfsN1uaTQadDodjo6O2Gw2Ugxcfm4ZkVMUhcTKjMdjrq6umM/ntFotZrMZlUqFIAgkCqhSqbxUTBwEgdyoPz8/x7IsNpuNfKy8CW/bNgcHBxIRZJqmfDxJErrdrtzSL+ODTNOUj3c6HQ4PD6UHoMzWj+MYeLGBUN7OLy0WC3a7HY7jcHh4iGma8u/K+J4y5qff79NoNFitVhLxU3694XBItVolSRKKoiBJEokJKl/f/v4+u91OCoMNw5C4oDI2KY5jkiSh2WxycHBAvV7n3/27f8d3vvMdZrMZaZpyfHyMaZo8efKEbrfLaDTCNE2KosA0Tb7//e/La37y5AnwIlbp9vZWBir/8B/+Q4lGMgxD+hYGg8FX/KQqpZRS6qugGwFKKaWUUkop9TuqLAAuD7n7/T6AHBiXpb6+7zOdTnFdV3oDbNuWfP88zwnDkGq1yuHhIavVimazSRiGOI4jhcAXFxcYhkGj0WC5XGKapnQFWJbFycmJRNqUt/DLjx0eHmLbtsT2zGYzNpsNaZriui5nZ2dUq1XJsy/ja2azGY1Gg8vLS2q1mkQbTSYT2SSo1+ucn5/jOI7c3i9v+ZdlyOXPnOc5t7e3JEnCer0mz3OOj4/p9XpUKhVms5n82/V6zXa75a233qLZbFKpVF4qOS5Lg8stgS9vAARBgGVZMhhwXVcKnD3PI45jsiyj3W4ThiG/+tWvGAwGvPHGG9JjsN1uubm5IU1TxuMxb775JsPhkN1ux3q9ZjKZEAQBq9WKm5sbqtUq/+gf/SMODw9JkkSKgctBwvPnz9lutzSbTb744otX89AqpZRS6rfmyxsBOghQSimllFJKqd9hk8lEbuqXZbemaeL7vpQIB0GA53nkeU6tVnup2LbMtj84OJCiYN/3WSwWWJbFfD7Htm3Ozs7Ybrf4vi9xQNPpFMdxOD09xbIsKSIOgoD1eo1tv1hSb7VaANi2zXw+l+0E0zQ5Pj5mu91i27YccJcxQOv1mvPzc+r1uhTlrlYrVquVxPycnZ3JIfxut+P29lYGCb7vY1kWrVaLarUqw4PyfavVahwcHLBarSQqKUkSJpMJYRjiui4PHz4EkO//5SFEu92m0+lITNBms3npj+u6HBwcYJomtm1LTFEQBDSbTer1Onme4/s+rVaL7XYLwHK5JI5j5vO5DB6++c1vslwu2e12zGYziQEajUY4jsN7773Hw4cP+fzzz8myjOfPn7NarVgsFiRJQrVa5eTkBNd1aTabfPzxx/L9lFJKKfX1pNFASimllFJKKfV75C//8i8Jw5B/8A/+AbvdjqOjIzqdDkmSkOe5DAkajQbr9Vqib8q4m36/j2EYABLJ4zgO8/mc8/NzuRlvmiaTyQTHcRiNRty/fx/LsqQ3YDKZyGChLCrudruyVZBlGbe3t1iWxXA4pFarsdvtaLVajMdjiqKQuJ1vfvObwItOgfl8Ll+3jPXpdDoAUtBb3s4vi49PTk4kxqf891mWMRqN6HQ6PHjwQDoEDMPg+vqa7XYrpcvf+ta35Gcohy1FUeC6Lr7vc//+fQzD4NmzZ7iuK98/iiJ6vR6PHj2S2KKiKLi9vaVSqXBzc4PjOLKdsd1usSyLTqfDZDKRgUIcx8RxzDvvvEOapniex2w24+nTpyRJwnK5xDAM/sk/+SdEUcR0OiUMQ+I4ls6AMp7p0aNHOI7DZDKh3W4zm834e3/v7/Gv//W/fgVPq1JKKaX+JuggQCmllFJKKaV+D3z66af85Cc/4cGDB+zt7fGzn/2MZrPJ+fk5i8WC4XAoB/blTf3xeCx/v1gsyPOcTqeDbdtYlsXl5SWbzQbP8wiCgKIo8H2fu7s7XnvtNe7u7mi32xI9c3t7i2EY9Ho9KccNggDXdRmNRhiGwXA4lG2F+XyOYRikaSoH7Z7ncXl5SZIkwIuD/jKCaLfb0ev1uHfvnmwfPHv2jKIo5BD/8PAQy7LI8xyAOI6xLIu7uzs2mw3dbpder0cURXKQX5YS53nOgwcPaLVazOdz6vU6YRgymUyoVqsAnJyc0Gq1JHap1WpxfX1NlmXs7e3x6NEj0jQlz/OXyoThRWTT6empxBUtFgtc15XYoPfff58/+ZM/oV6v02g0ePfdd7EsiyzL+M1vfkMYhqzXa9I05eTkhHfffVeijMr3MggCuen/3e9+V+KKnj9/TpZlzOdz4jim0+lIxJNSSimlvv50EKCUUkoppZRSvyc+/PBDptMpg8GAo6Mj5vM5T5484eHDh0RRRKPRII5jms0m6/Uay7KYzWYkSUKr1SJJEq6vr+n1evR6PabTKcvlEs/zmE6n7O3tYds2jx49YjabyU1+27YJgkDiharVqhxKAzIgOD4+xvM8LMuSjoHJZEKe53S7XQ4ODthsNi8V8SZJwm63o1KpyACgLPpN05R6vc7z58/pdrscHR1hGAbT6VS+Rnkr/+rqCsdx2O12zOdzoihiMpmwXC6Zz+f4vs93vvMdbNtmtVrheR6j0Qh4URjc7/fpdrvYts1oNJIb/YZh0Gw2OT09lail8jWXGwflgGUwGEgvQfm+5XnOYrGg1+sRhiHvvvsuH374IVdXV7KdUA5qptMpnU6HH/7wh6xWK/n9zOdzwjCUguLvf//78jspBwCLxYI0TVksFniex3vvvcdms+Ff/at/9SofWaWUUkr9lmhHgFJKKaWUUkr9HplOp5LBf+/ePbIsYzweE8cx1WpVcv7Lkt4yjicMQ2q1GicnJwCs12uq1SpJkuA4Dp1OR7L2V6uVFBDDi+z/brdLs9nE9305aN/tdkynU3q9HoeHh7iuy2azIYoiKRNO01SKgAG5/b9er1mv11IEvNvtMAyDOI4lOqccFlxdXUlPQJZlMoRI05S9vT35eafTKbZtc3Nzw3w+5/r6mtlshm3bDIdDhsMhWZbJkCDPcyzL4vT0FNd1iaKIOI6xbVtikA4PD2k0GqRpyna7JYoi+W8cxwyHQyqVCrvdjlqtxmazYTKZyEZDGIYcHx8zn8/Z29sjyzIGgwE///nPZShS9jX86Ec/otFo4HkeYRhyfX3NZDJhMpnQ6XR4+PAhr7/+umw4zOdzgiAgDEMpi37jjTfwfZ8kSahUKvzmN7/RrQCllFLqa0o7ApRSSimllFLq99jPf/5zHMehKAr+zt/5O8RxzHa7lYigLMtotVrMZjOyLKMoCo6OjiiKgsVige/7MkDwPI9qtUq1WpVb/JZlEQQBtm1TFAXNZhPbtplOp5LFXx7yv/HGG2y3WxzHYbFYSH59nuecn59jGAa2bUvkTZqmUjx8fHyMaZpst1tarRaj0UjidtI05eLiAtu25d+laSrFvc1mkzRNKYoCeNF9AJAkCb7vs16vefvtt2k0GlLk+4tf/IJGo4FlWWw2G959913yPJeb/aZpyhDE8zwuLi5YrVY4jiPfv4wyKocEZUxQ2QdQZvh7nidFxaPRiHq9zmKxYDAY4HkejUaDu7s7lsslFxcXHB4e8uzZMxqNBp9++inT6ZQ8z9lsNpyenvKtb32L3W7HYrEgDEPpXFgul9Trdd555x15/WVfQJIkvPnmmzx//vxVPq5KKaWU+i3QQYBSSimllFJK/R76yU9+wpMnT6hWq7z++uvsdjuiKGKz2dBut4miCMuyME2T4+NjALIsY71eMx6P8X2fw8NDieUpy2nLAYDjODSbTRzHkYifMoN+t9vR7/dpNBpS0FupVBiNRti2TZZlXF1dMZlMaDQaFEUhB9tl1n6r1SKKIkzTlAN+x3FwHIfLy0uJwQmCQIYPRVGQZRmHh4fAi6Lhp0+fYhgGWZYRxzGmaUoM0Xa7lULdIAhoNBo8f/6c/f19zs7O5KZ9kiSEYQhApVIhyzJ6vR7r9RrDMGR7oBwEHB8f0+12pR+g3GIoNxrefvtt1us1QRDg+z6u65JlGaZpStb/+fk5n3/+uZQ5x3HMarXi5uYGz/NkaPC9731PCpGjKGK1WhGGIZ7nsdvt+OEPf8h4PCZJEkajkbyW3W7HbDajXq+/smdUKaWUUr89OghQSimllFJKqd9To9GIv/iLvyDPc77zne9gmqYMAzabDY1Gg4ODAwzDkCz5ZrMJQL1ex7IsAGazmUT2uK7L2dmZROSUB/hlNv9gMJDD5fLg3/M87u7uME2Tvb09qtUqi8WCer3OZDLBMAwMw8BxHE5PT7Esi/l8LofreZ6z2+2kaDeKIhzHIc9z6S9IkoTLy0sA2QQouwdGo9FLN+3LaKLRaES322W9XtPtdnn8+DGVSoVut/tSGXH5/XzfpygKarUa19fX8nN6nofjONy7d08GFM+ePcMwDNkm6HQ67O3t0Ww25XPK97csSjZNk3q9jmmaeJ5HURQ8ffqUTz/9VIYZm80G3/f54Q9/KDf7yz6CJElYrVbU63UePnzIdDolTVOiKOL58+esViuJLGq32zx48IB2u80HH3zAn/zJn3zVj6dSSimlfou0I0AppZRSSimlfo9dX18DLw7HTdOk1+vJAfputyNNU5bLJbb94h5Zu91mu93SaDRYr9fc3t7i+z6TyYR79+4RRZEc7JfxQdvtlvV6zdXVFZVKhdlsRlEUGIbBeDwmiiK63S6DwUBy8svBQnkAfnh4iGmaEgUEEAQBm82Ger3O6empRAiVh95lMa/jONy/f5/xeCwbBOVt/DRNqVQqmKaJbdu4rkscxxRFIZFJjUZDuhN6vR4ff/zxS7f5kyTh6OgI0zQlUsm2bbmF3263abVaMrDYbrd4nsd4PKbT6eA4DgcHB+x2O3a7HdfX1/I6bm9v8TwP13Wp1+s4jiOxPrvdjr29Pf7sz/5MehwuLi7o9XosFgu22y3Pnz9nuVxKB8D7778vw40wDCV6aDQaEYYhrVaLb33rWziOQ6PRYDQaYVkWP//5z1/ZM6qUUkqpvx7tCFBKKaWUUkopJX76059iGAaVSoUwDHn33XclSz8MQ0zTBJBD6MFgwHg8lkP4PM+5d+8eQRDQ652i3G8AACAASURBVPUYj8dym9+2bS4uLjBNU2KDfN8nCAKSJAHg/Pwc27YxTZPFYiFFwZZlYds2x8fHxHGM67qyYZCmKZ7ncXJyIvn6eZ5ze3uL4zgA+L7P2dkZpmmSJAm9Xk+KePM85/DwEMMwACRCaDKZUKvV5AB8OBwyHo/pdrvMZjMcx+Hs7Ixf/vKXtNttarUa7777LrvdjjzPGY1GchPf8zzu37+P4zgyHCi3GyzLotFoSCdDkiSs12uiKHqp5+D4+JhWqyXRQTc3N1SrVVzXlcil1157jf/23/4b77zzjmxYBEHAaDTCMAxM0+S1117j4uKCyWTCZrORguByK6BSqfAHf/AH0iOw2+0Yj8cAWJbFu+++y89+9rNX8HQqpZRS6rdBNwKUUkoppZRSSnF9fU21WpWC2jKapyz7bbfbpGmKYRiSMZ8kCZVKhXa7zXK5pFarMZ1O5SDadV0uLy9ZLpcURYHrukwmE9brNZZlcX5+Tr/fx3VdiaXJ85wgCOSWfHmoX+bclz0DZUROWcS72WxYr9d4nodt25ydnZFlGYZhyGZD+XobjYb8bFmWkaapxBhtt1ts22YwGNButxmNRjQaDTabDd1ulyiK2O12MnzY29uTzYEsy1769/1+nyiKSJIE0zSZTqeS5X92dkaj0SBNU+kiWK/XrFYrOp0Ow+GQWq1GnudSVmzbNkmSUK1WZbjQ7/fl+83nc1zXJQxD4jhmPB5TrVa5urqi0+kQRRFRFPHs2TMmkwnj8Zhms8kf/uEf4jgOtm1zc3MjGwRPnz4liiJ832c4HOogQCmllPqa+fJGgA4ClFJKKaWUUkoB8MUXX+B5HhcXF3IrvVqt0uv1KIoCx3FYLBYkSUKtVmM4HMqBeJnbXx5Q37t3T275O44jh+C2bWPbNqenp2w2G7IsI4oi4MWt/OVyydnZGZvNBsdxMAyD+XzOer2WAUA5mKjX6xI9VPYatFotOUD3fZ/ZbEYcx2RZxm63YzgcstlsXirvLW/fO47D1dWVDAls25btAcdxJLJoOp1ycHBAGIZMJhOq1apE/rTbbbIsk66BshshiiIqlQpXV1ccHR3Ja9tut/IaFosF3W6Xw8ND0jSV4t67uzs8z2M2m2EYBtVqlclkQrPZZLlcSlmz53kyxNlsNnzrW99ib28PwzC4vb2VA/5y4PBP/+k/5eDggOVySRiGzGYzxuMxk8mE+XxOrVbjgw8+oNFo4DgO8/mc29vbV/yUKqWUUuqvSgcBSimllFJKKaX+X33xxReSuV+W/JaxOaPRiFarhWVZVKtVGQBsNhum06nkyu/t7bHdbnFdlyAI5Da7YRi0222q1Sp5ngNwd3dHHMeEYYjrupyenjKbzSSfPkkSOew/ODig2WxSrVblgB9eDBAajQanp6f4vi839LfbLY7jEASB/Ft4ERm0WCxe6grY39+Xot48z9lut0wmEyzLwrIsuTFfqVRotVovdRWUP8ODBw/IsgzXdZnP5xJ15HmeDEQ8z5Mhy93dHdvtljRN2d/fx/M8Dg8PieOYKIpYrVZsNhuiKMK2bfb398nzHNd1ybKMoii4vb1lt9tRrVaxbZvHjx9zfn5Ot9uV77Ver2WgAPDw4UPee+89lssls9mMu7s77u7umM/n3NzccHx8zPvvv4/ruti2TRAEsq3xq1/96hU8lUoppZT669COAKWUUkoppZRS/1t//Md/DMAHH3yA67oALJdLXNdlt9sBEIYhlmUxHo9xXZftdsu9e/fk4Lks+y2jfh4+fCjDgHI4UJYJd7tdrq6uZODgui6j0YiiKEjTlG63S7PZlJvv8CIqKAgC8jyn3+/TbrfltZW39i3LwnVdHjx4gOu6bDYbielpNpvc3NxwdHSEYRgymChv4idJQp7nZFlGu90mz3N5ravViul0ysOHD/kf/+N/0Gg0KIqC8XgsmwJZlnF3d0e1WpUSYc/zuLm5odlsYlmWDCZs2yaKIoqikNe43W6laPgb3/iGDDfKLYksy3AcR4YUZdyRbdv89Kc/5bXXXpOM/91uRxRF7O/v8+jRIwzDYDQasVwumc/njMdjee3//J//c66vr3n8+DGGYfDFF18AL4Yd5furlFJKqa8f3QhQSimllFJKKfW/+PWvf02tVqPZbErBbavVksPoNE15+vSpbA3s7e2xXq9ZLBZMJhPiOKYoCjzPo9PpsFgsME2TKIq4ublhtVqxXC7Z39/n7OyM5XKJbdvc3t5KkbBpmhweHuI4Dr7vM51O2e12zGYzOSQ/ODigVqth2zbz+ZzNZsNut5NYok6nIwOF8kZ8uS3Q6XSAF0OFMtqn3BCI45jhcEiv1yNNU8nfLzcdfN+nUqlgGAa1Wo3dbkdRFFISvNlsuHfvnhz6h2Eo/3Y+n5PnOQcHB7RaLer1OqPRiCiKGI1GuK5LHMccHR0xGAxkg6B83UEQAC82GzqdDrZt02q1mM/nVCoVNpsN//W//ldqtRrr9RrHcXj77bepVquMx2OWyyVBEDAej7m9vSWKIv7ZP/tn9Pt9rq+viaKIIAiYz+fM53NGoxHtdpter8eTJ08Iw/CVPZdKKaWU+qvTaCCllFJKKaWUUv9Hn3zyiRyWP3r0SG63l9n2APfv36fRaMjmQHlgbZomp6enMkDIsozJZCK5+cPhkNdffx3TNMmyDOClvH/TNNnb2yNJEuBFmfFqtZItgPPzc2q1mhysbzYbLMtiOp3SaDRotVo0Gg35/CRJ8H0f27alX2AymZDnObvdTgYBcRxzeHhItVolyzKm0ym2bZOmqWwY2LYtkT9lFJBlWRJXVJYW9/t9DMOQLoJykHF5eYlpmgDyPV3XZTwe0+/32d/fp1qtkqYpt7e31Go14jiWrYD9/X3pHJjNZoRhSJqmTCYTGTZUKhW22y3n5+ecnZ3JNsdyueTm5obxeMxut+Ott97izTffJEkSGQ4sl0vu7u4Iw5Barcb3vvc9LMui0+lgGAYff/zxV/0oKqWUUuqvQaOBlFJKKaWUUkr9lXz44Yc0m01++ctf0u/32e12OI6D4zhcXl5iWZbcWC//dDod+v0+jUYD0zSZTCYSaWPbNvfu3ZMBQLVa5fb2liRJZPPg29/+NqvVik8++URuui+XS8bjMQ8ePMD3fe7u7mi329IhYJomrutyfn6O4ziEYYjnedTrdYIgwHVdjo+P8X2f+XyO7/syDABwHIejoyOKopDy4CiKSJIE13XJ85xarYbjODx9+pRWq8VmsyFNU1qtFkEQ4Ps+pmlSrVap1+s8fvwYgHq9Lv0I/X6f1WpFrVZjMplgmiZFUeC6Lm+88QaA9AbEcSyxRYZhkCQJR0dHxHFMvV4nyzK22y2GYXB3d4dtv/hf/GazSVEUfPbZZxLN5HkeURQRhiF5nvP2228zHA6J45jxeCxbBuWwplKp8N577/Hs2TPW6zVFUfD48WNs22YwGEjskFJKKaW+HnQQoJRSSimllFLqf+v58+f8y3/5L/kX/+JfyE39q6srbNuWAUCWZex2O7Is4/T0lGazKXE6nudh2zbj8ZhHjx5h2zZhGEopb5ZlcrP//v37cvMdYH9/n08//ZQoipjP51xdXb10OH59fS3Dg/39fQaDAavVCsdxZHOhHFqUkTl5nkvMkGma+L4PvDisL+OMvrwdsF6vsW1bSnwdx6HX60lBb7PZZLvdUq/XqVQqsp2wXq+pVqtEUcRyuaTVanFyciLxSIvFQgYWBwcHXFxcsF6vAWQQUZYZp2kKQJ7nMqj48ueWZcfVapXlckm9Xpf/rtdrDMNgOp1Sr9fxPI8PPviAu7s7JpMJYRgym82kz6HVavG9732PLMvk7z7//HPSNOX58+cEQcBms3kFT6JSSiml/v/QQYBSSimllFJKqf+jf/tv/y3vvfce77zzDovFgn6/z2effUZRFHLb/eDgAMMwWCwWVCoVAKbTqdx2d12X5XKJ53kEQSAH+icnJxRFQVEUWJYlN9uLoqDVanF+fo7v+yRJQrVa5cMPP2S32zGfz9ntdvzdv/t3aTQacms+CAIsy6JSqVCpVPB9H8uyWCwWrFYrXNfFcRzgxe15x3GwbZvJZCIxPkmS0O12uXfvnvwdwGg0kuFCEARSDry/v89yuWQymVCtVnn+/Dmj0YiDgwMuLy9xXZebmxtqtZq8P7Zts7e3R7vdZrPZcH19je/7ElV0cXEh/QZpmmLbNqvVSn4O3/dptVrYto3ruqxWKwCePHki78+nn37KW2+9hW3bvPPOO7iuK5FIT58+ZbFYsFgsaDab/ON//I9ZLpfSwXBzc8Pt7a30G/T7fc7Pz/nTP/3TV/AEKqWUUur/D+0IUEoppZRSSin1fzSdTvnoo48YDodcXl4ShqFk5pfZ++v1mvl8zmw2A14cmtdqNYmSsSyLu7s7iqKQgtsyxsYwDPlvuSkwHA7pdrtkWYbneXKg7/s+v/rVr8iyjPPzcz7//HOq1Sqz2YzVaiU3+8/Pz7Esi/V6LZsEQRCQpimDwYB6vY7ruoRhKB0DQRDQarW4vLykVquRZZlE8JTbCqZp0mg0aLfbzGYzKQuOooiDgwPyPGc2mzEejyUmKM9zTNPk9vaWoigwTZN6vU673WYymbDb7XBdl+l0ShRFvPPOOxK/U24mrFYrTNNksVjI+xrHMZVKhfV6zRdffMFisZDD+/39fWq1Gm+88QbVapXtdkue54RhyNOnT1kulywWC370ox/x7W9/m/V6zXQ65e7uji+++IIgCAjDkEajwR/8wR8wGAxkSPL555+/ysdRKaWUUn8FWhaslFJKKaWUUuqv5fb2VuJ85vM5x8fHdDoducVelu9Wq1WOj48pioL5fE6z2WQymUixcLfblZv7ZfRNOQgAsG0bz/Mk678s7d1ut3K7fTgc0m63+eyzz3j69Cmj0YjNZsP+/j6dTocoiphOpxRFIdn3vu9LzE+WZURRxHg8lsP2Bw8eSFHwbreTgt44jkmSRIYTRVEQBAHValUO/pvNJpZlEYYh/X6f+XzOs2fPME2TMAxJkgTLsnjw4AGtVkvKjU3TJAgC4jjm8vKSk5MTKSEejUasVquXBgF7e3t0u122260c5n/22Wcsl0tWqxXdbpeHDx/S7/eZzWb86Z/+KQ8fPiQMQz799FO22y3T6ZT9/X3+8A//UOKaRqORbAGUkUI/+MEPODw8lAHKr3/9a/I855NPPnmVj6FSSiml/gq0LFgppZRSSiml1F/LaDTiP//n/8x2u+XRo0fUajVWq5XcvDdNUyKCxuMxzWYTwzC4ubmRjPpOp8NsNqPT6TCdTiUOqFKpYJompmliWRbj8ZhOp8OzZ8/o9XqMRiMsy6Lf77PdbjFNEwDDMPj000+xLIvtdsvh4SGVSoU8z+Wg37Ztzs7OSJKE7XYrGwplibHjOJydnTGbzahUKhLHs9vtMAyD4XAoQ47xeEytVsOyLIqiIAxDXNdls9nQ7/fxfZ9Go0Gv1+Pp06f85V/+Jf1+n+9+97tSipwkCZvNRvoELi4u2Nvbo1qtslqtZLBiGIZEDF1cXEgnQBiGTKdTRqMRcRwzmUxotVr87b/9t2WIUQ4qXNflyZMn8j3jOOaDDz7A8zw59C8HCtPpFN/3+ft//+8zm83Y7XZSGDyZTJhOp7LxoZRSSqmvDx0EKKWUUkoppZT6/+Tu7o5/82/+DaZpUhQFtVqNVqtFrVbj+PiYKIqkSPj6+pp6vS6fVxbZVqtV5vM5jUZDooXKAl14sR3geR6u6zIYDLi5uaHb7WLbNmmakmUZe3t7PH/+nFqtRp7nfPzxx8CLPoO/9bf+Fmma4jgOjUaDBw8eUBSFlAybpslkMmFvb4/79+/LAbnv+wRBINn8g8FA+gmKoiCKIv4f9u7kOa7zuvv49/Y8D2g0ZgIgwAEkwUGmKIoiqcm2Bk+K7KRcFWfhZJVVlvlDsso+yySVVCVO7JQXtvJ6UBYaLEuUKA4gGo2eu+/t23e+t/tdqPCUVZkkWxOd86lClUoQgNN9e6PnPOf8PM8jlUqpxsDGxgbtdptCocBkMqFcLjMej1lfXycej/P3f//33L9/n6WlJc6dO8eDBw9U4G8mk2Fra0tNRbTbbZLJJN1uF8/z0DSNmzdvAqhw36M9/tPplMFgQCqV4qmnnqJYLNLpdIjFYhweHjIajfB9H4B3332XcrnMY489hqZpdDodtfKo3W4zmUyYTqdsb29z5swZTNNE13UODw8xTZPhcKj+/iuvvPI5fOqEEEII8buQ1UBCCCGEEEIIIX4rt2/fplarMZvNKJVKeJ6H7/tqH//Rrfpyucz8/DyxWEyFBZumSalUolAo4DgOANlsFsMwiKJITQak02kAFYhr2zZBEKhD+XQ6jW3bvPnmmxiGAXxwmH60jieTybC2tkahUFDrfyzLIooiCoUCq6uruK5LMplkOBziuq4KEz5x4gTFYpFsNstgMMC2bRzHwXEcFfTrui75fJ5YLKZChFOpFOl0Wq1IMk2TX/7yl/i+r1YjGYZBOp3m9OnTarKg3W4znU5VNsDCwgLVahVN03Bdl/v372MYhmpUaJrGjRs31K3/breLaZq0220Mw0DXdQDOnTtHtVpF13U1FWGaJnfv3qXf76PrOtvb21y/fp1SqYSu6x9aE9TpdBiPxzzxxBPMz89z9+5dFUwshBBCiC8uyQgQQgghhBBCCPGJuH37NmEYsrS0RK1WU4f4R82ApaUlAGzbVqt+DMOgWq2Sy+VU2O3RwfJRcyCfz2PbNoPBgFKpBIBlWWpyYH5+Htd1MQyDWq3G4eEhDx48UHU1m0329vZotVocP36cbrergnLT6TSFQoGlpSU1vXB0aG4YBsVikbm5OabTqVoPlEgkVP3b29tqCuEo+Nf3fZLJpMpDME2ThYUFNE2jWCzS6/V48803yefz5HI5tra2WF9fJ5FIfGhNULfbVSHIpVKJdDrNwcEBw+GQIAjUf3flyhWq1Srtdhv4IMz5/v37uK7LcDgklUqxu7vLyZMncRyH8XiM4zgcHh4CH0wXTCYTEokEX/nKV4jH4ziOQ7/fp9vt0ul0aLVahGHIjRs32NzcVKucptMp9+7d+8w+Y0IIIYT47UgjQAghhBBCCCHEJ6bT6dBut6nX66TTaXzfZ3l5Gdu28TxP7cHPZDIqSDeTyajd9ul0Gl3XCYJATQAcZQwkEglyuZz6d0e78GezGdlsVuUOOI6j/kf3N9m2ja7rFAoFPM9jaWmJQqGgbuUf7cEPw1DlF8zPz6uajhoBR7WtrKyo1USWZWHbNpqm4XkehUKBer3OcDgkn88TBAGO41Cr1Zifn2dvb4/XX3+dpaUlNSFxdDv/KOj38ccfZ2FhQU0IDAYDdYAfBAEXLlxgZWUF3/fVTn9d1+n3+1iWRbFY5PLly+RyOdLpNKPRiCiK1LRBLBZTr/mpp55iZWVFhQUfHByoZxmLxfjyl7/MxsYGvV4Pz/Po9/sYhoFt29y+ffsz/YwJIYQQ4uOTRoAQQgghhBBCiE/U0aF5IpHg7NmzpFIpKpUKrusSj8eJxWKUy2XCMGQymaiD+Hq9jq7rLC4uUqlU6PV6pFIpZrMZlUpFHeIfSSaTxONxRqMRtm0znU5JJpOEYUg2m+XOnTv/qbbBYIBlWdTrdS5cuECpVFIrho7Ce9fX1wnDUAUZz2Yz+v0+nuextrZGsVikWCwShiGO42BZFpPJRE0pLC4uEkUR/X6fYrGI67p0Oh1KpZI68F9fX+fOnTu89tprLC8vqyyA6XRKJpNhZWWF8XhMIpFQobwHBwf0+31WV1c5c+YMs9mMwWDAYDCg0+nQ6XTo9/skk0lu3LjB1taWyjvQdZ1Go6EO75PJJJVKhX6/zzPPPEMURdi2TaPRQNd1Op0OQRBw9epVNjY2ME2T/f19bNum2WyqxkSr1VKTBUIIIYT44pJGgBBCCCGEEEKIT1yr1WI8HgOwurpKGIbk83k8zyOZTOJ5HuVyGfhgzU+tVkPXdebn5xmNRqTTacrlMrZtE4/HASgWi+r3HwX49vt9MpkMxWJRNQGODtZv3bqFZVn/qTZd17l//z6VSoXZbEYymcQ0TYrFIktLS3ieRyaTwTAMMpkMo9GIXC7H6dOnMQyDWCyG53lMJhP1ZVkWFy9eJJfLEUURruviOA7T6VTdwPd9XzVENjY2CMOQX//619y7d4/19XXS6TSnTp2iWCwSi8XU7f9Wq4VlWeTzeZ588klSqRS9Xg/DMNjf36fX69Fut5nNZjzyyCNcvXqVbDbLcDik1+vRbDZptVrMZjOCIODatWuk02ny+TzxeBzLslSzotvtMhgMyOVyPP/880RRhOM4qgnR7/cZDAaUy2XW1tb44Q9/qEKIhRBCCPHFJY0AIYQQQgghhBCfCl3X8X2fKIrUDfRKpQKg9tDn83lmsxme51Gr1bAsi2q1qqYHcrkcjuOgaZr6vUf7+I/W8uRyOWKxGLquMzc3p8KD5+fnee211/7L2oIgYDgcUq/XiaKIlZUV6vU6iUQCz/PUgf9RoHG5XCYej5NOp1VY8NE6oHK5zOnTpwnD8EMNAl3XiaKIUqlErVajXC4zHA5VxsDi4iLNZpPDw0Nu377NY489piYNjgJ+jwKTn3vuOer1uqppMBhweHiIYRj4vs8TTzzBc889R6FQYDgcYpomvV6PTqeD4zjE43EuX77M8vIylmWhaRqapjGZTGi32/R6PXq9Hq7r8uSTT1Kv1+n3+9i2rXIJer0e+XyemzdvUiwWmU6nTCYTDg4OPpPPkxBCCCF+e9IIEEIIIYQQQgjxqen3+2iapkJ+y+Uyo9GIVCpFPB7HdV11Mz0IAgqFApPJRK3UicfjmKZJPB4niiLgg0bA0Y77+fl5tbonk8moHf6z2YyFhQWazSadTue/rE3XdVKpFI899hjwwdqg8XisAo51Xef48eOk02my2Sy6ruO6LolEgm63S7Va5dSpUxQKBYIg+NB0gGmaZLNZzp49Sy6Xw/d9ms0mAL7v43keURRx7Ngx3njjDWzbBqBWq6mDfF3XKRaLnD9/XjVEms0m4/GYXq+Hbdvs7u7y4osvks/nGY1GKuB3f38fwzDI5/N8/etfZ35+niAI6HQ6hGFIt9tVmQEA4/GYarXKV7/6VTRNU42IZrPJYDBgOp1y4cIFrl69ymg0IgxDFSAsGQFCCCHEF580AoQQQgghhBBCfKra7TadTodLly4xGo1IJpNomkYulyMej+P7PvF4nOl0ShAElEolHMfBtm0SiQSFQoHxeEwQBMAHa4GOvgzDIJvNEoahWvOTyWTI5/O4rkuhUODVV1/9b2s7ODggk8lw8uRJ4INAYcMwWF5eJp/PY1kW6XQawzBIpVIqY+Ds2bMUCgXCMMR1XSaTiQr5tSyLCxcuUCgUSCaT2LbNeDxWr8HzPKrVKvV6HU3TqFQq/OpXv6LRaFAoFJhOp1iWxbVr11hYWEDTNDqdDs1mk8lkwv7+Ppqm8d3vfpd0Oo1pmgyHQ4bDIYPBgG63Szab5erVq5w+fZogCNQaIV3X6Xa7ah3QhQsXWF1dZXFxkSAIsG1bhQ0fHh5imiYnTpzg2WefpVQqMRgM1Cqio1VCjUbjM/kcCSGEEOK3J40AIYQQQgghhBCfuvF4jG3bnDhxgnw+r4J50+k0YRiqA/hqtUoQBORyORKJhFqNUygU0HWdpaUlFQwcRRGz2YxEIoHv+2SzWdLpNLFYjMlkQhiGrK2t4boud+/e/W9re++999Tqn3w+z4kTJ0gkEsRiMdVs6PV6+L7PqVOnWF1dpdfrqboty1KBwZcuXWJxcVEFCR8dkk8mE4bDIXNzc+RyOXK5HJZlqZv+R6G/d+7c4ezZs2xtbakshdFoxGg0YjAYYJom3/nOd9RKJdd1OTg4YDAY0O/3CYKAK1eucO3aNeCDqYej6YCjHf++73P58mUKhQKpVEqtQ+r1ekRRpNYEVSoVHn/8cY4fP04QBOzt7dFoNGg0Giq74Be/+MVn8vkRQgghxO9GGgFCCCGEEEIIIT4TR6txTp06pXb5Hx1uZzIZdeh+FBI8HA7JZrOYponneSwsLDCbzchkMip/wPd9CoUCc3NzxONxEokEpmmSSCRIJpNMJhNyuRyvv/66mij4r7RaLY4dO0alUqFcLjOdTvE8T60aKhaLLCwsYNu2mmb4zayAo5yAKIrwPE81BlzXxfM88vk8S0tLLC8vA6hJguFwSDweZzabcevWLQDS6bRapdTpdDAMg8FgQCqV4vHHH1drkPb29hiNRkwmE2zbZnV1lZs3bzKbzbhz5w6+79Pr9eh2u4zHYxzH4eLFi5w4cQJd10mn09y/f19NMQB0u10mkwnPPPMMlUqFKIo4ODig1Wpx9+5dGo0GvV6PH/3oR+p5CiGEEOKLTxoBQgghhBBCCCE+M3t7e+RyOdbW1kgkEsTjcXUbP5FIkEgk0HUd0zRVXkAmk6FerzMYDIjFYnieR7/fx3Ec1tbWmM1mZLNZFV6bTqeJx+MMBgPK5TIA8/PzvPHGG/9tXY7j8Prrr/Poo48yHo8B8DxPZQ2kUilyuRzT6VQF9GYyGRKJBCdPnqRYLBIEgZoOOFoTVCwWmZubU1kGURRhGAamadJutwnDEICNjQ1msxn379+n1Wpx+vRpdF3HcRxc1+W5555jcXER3/eZTCa0Wi01DVAoFNjd3WVhYYHxeMxgMFDvYafTIQgCnn/+eTKZjGpgWJalphxmsxmapnH27Fngg+ZDpVJhOp2qFUB7e3sqf+BnP/vZp/wpEUIIIcQnTRoBQgghhBBCCCE+U7du3aJerxOPx1lZWVG3/AF1WO55HsViUQUCHwXnDgYDoiginU5z6tQpdF1H0zQVzjudTsnn82pPvuM41Go1YrEYg8GAdrv9P9am6zq1Wg3XdSmXy6TTadLptGoAxGIxl12plAAAIABJREFUwjAkmUxSrVYplUpEUfSh9UBHeQHnz59XNU2nU8bjMQ8ePGA6nXJwcEA6nWZubo6VlRXCMKRcLvPaa68RBAGtVou1tTXm5+f50pe+xMHBgfob/X7/Q2uCstks2WyWvb09tcao2+1iGAbb29vcvHlTNR+OsgqO8gTi8ThXr14lk8mQzWap1Wq0220ePHig1gm1Wi06nQ6vvPIKh4eHn8VHRAghhBCfMGkECCGEEEIIIYT4zBmGweLiItPpVK3cCYJA3U6fn59XGQJHq4NGoxGZTIZCoUC5XGY2m5FOpxmNRkRRRD6fByCTyaBpmtp/77ouYRiyuLj4v95m7/V6zGYz5ufnOX78OIDawZ9MJjEMg83NTZLJJLlcTt28t20b0zRVUPDCwgK+76vpgGazyXQ6xbZtyuUyS0tLbG5uEgQB/X5fNSpKpRK3b9/Gtm1SqRTb29tYlkWpVKLf76PrOqPRiPn5eS5evIhpmmiaRrPZJAgCDg4O8H2fhYUFvvOd76jmSb/fp9PpMBwO2d/fJxaLce3aNdbX1/E8j3g8TqPRwHEc8vk8g8GAe/fu0W63+X//7//JGiAhhBDiISeNACGEEEIIIYQQnzld15lMJly4cIHhcMh0OiUej6ugYN/3icfjAOo2fTqdplarUSgUME1T7e9PpVIUi0Vmsxmz2YzxeEyhUABgNBphWRaVSkUFEb///vv/Y22tVotKpUIsFlO/dzweo2kaS0tLmKZJLpej1+uhaZra418ulzl58iRRFOE4DqZpqubAUejx7u4uruuSTqcZDofouo5t2yp8+MyZM9i2TaPRoNPpcPLkSTKZDI1GQ4UDX79+nXq9rtYMtdttHMeh2WxSr9d56aWXWFlZUdMBnU6HdrtNq9UiCAK++93vcvz4cWKxGI7jsL+/r6YEfN9ncXGR+fl53nvvPf7jP/7jU/8sCCGEEOLTJ40AIYQQQgghhBCfi36/T7FYZH19nVwuh+u6pFIpwjAkFoupA/R8Ps/GxgbpdJp8Po9pmuogPYoifN9naWkJ27YJw5AgCIjH4wyHQ7V2aG5ujul0ShiGtFotdF3/H2u7f/8+Kysr2LZNLBbj3LlzLC0tqWbDYDBgMBioPIGrV68SRRFRFKn1O0c5Afl8nuPHj7OxsUEQBJimiWEY7O3t4fs+o9GIWCzG5cuXiaKISqXCm2++ie/73L17l2PHjgFQqVQ4d+4czWYTwzBwHEc1EyzL4o//+I/J5/NYlqVW/3S7XVqtFpZlsbOzw/PPP89wOGQymdBsNmk2m0wmE9rtNpqmcebMGdX8yGQyvPXWW/9jyLIQQgghHg7SCBBCCCGEEEII8bl57733OHnyJMeOHaNcLhNFEalUik6nQzabBaBerzMajZibm8M0TTVBkEgkmJubo1QqMZvNaLfbBEGAbdskEgkcxyEej1MsFtWanqWlJVzX5de//vX/Wluj0WB+fp719XUcx1E5BuPxGM/zcByHS5cukcvliKKI2WxGp9NhPB6rm/7nzp0jk8kQhiFRFKnb+Ue39dPpNCdPnmR1dVVNChiGwfz8PL/+9a8JgoDDw0MeffRRTpw4od6XXq9Hr9djPB5z+fJldnd3MQyDMAw5ODhQ3+92u6ysrPDNb36TXC7HnTt3mEwmPHjwAF3XGY/HBEHAs88+y/z8vGqwhGGI7/vEYjHu3bv3qX4GhBBCCPHpk0aAEEIIIYQQQojPlWVZFAoFVlZWyGQyOI5DuVwmFoupyYDl5WW1ZieXy1Gr1SiXy/R6PQCCIFAH5HNzcywuLpLL5QCIooh2u002m8V1XfL5PHt7e4xGo/+xLt/3aTabrK2tsbi4yP7+vjpsL5VKnDx5Etd10TSNdrvNeDzGdV1msxn1ep1CoYCmadi2zWAwUIfv7Xab2WzGSy+9RDweJ5PJ4LouQRBgGIb6mUQiQbPZxHEcwjCkWCxSLBbZ29sjDENSqRQ7OztUKhWGwyG2bavb/0fBxi+88ALFYhHLsuh0OvR6PXRdp9Pp4Ps+f/AHf8Dm5iaj0YhkMqmmC44yEVZXV3nvvfewLOtT/xwIIYQQ4tMjjQAhhBBCCCGEEJ+rfr+vVvdUq1VqtZoKAk4kEmiaxmQyUeHC9XqdwWBAPB4nn8/T7/fVDfZTp05RLBaZTqe4rouu6/i+j2EYTCYTMpkMsViMWq32kfbf+76PrusUCgXy+TyappHNZkkmkyqr4PDwENu2abfbpNNplpaWKJVKZDIZ9vb2GA6HdLtdfN/HcRxOnTrF008/jWVZ2LatAoU7nQ7T6ZRYLMb6+joLCwu8/vrr+L6vcguGwyHlcpnd3V2Wl5eJoojRaKSyAI7WEb300kusr6/T6XTo9/vcuXOH8XhMq9UiDENu3LjB1tYWsViMZrOJZVn0ej1arRaO4xCLxbhw4YJ6Jq+//vpn8EkQQgghxKdFGgFCCCGEEEIIIT53d+7cYXt7G03TVCBwGIbMZjM0TSMWi+F5HgsLC1iWRalUwjAMgiAgmUySz+fViqAoirBtG9M0GY1GHB4eEgQB58+fZzKZUCwWcV1Xrcr53+i6ThiGVCoVVldXKZVKuK6r1vw4joPjODz66KPkcjny+TytVkvd0t/f38fzPJ544gkuXrzI6uoqhmGonAHXdWk0GqTTaebn51leXsb3fVKpFLlcjrffflu9R8VikbW1NXK5HJZlEYYh/X5fZQY8//zzHD9+XGUEjMdjGo0G/X4fz/NYW1vjxRdfZDqdMplMME2Tfr+PrusMBgOVh7CysoJlWSQSCXq9HqPR6H+doBBCCCHEF9dvNgISn3MtQgghhBBCCCH+D/ubv/kb/vIv/5Lbt2+ztLREsVgklUoBoGkaiURC3Yg3DINisUgul0PTNDRNYzQaqbBg13WxLEvdntd1nUQiQS6Xw3EcstkstVrtI9f21ltvkcvlKJfLjEYjEokEuq5jmibb29vq9vx0OuXevXt4nqcO1o8dO8YjjzyCpmn0ej1M0yQWizEcDun3+9RqNa5evUosFkPXdfr9PolEgtFoxPr6Ojs7O7z77rsAvPrqq9TrdSqVCmEYqsDkQqHAyZMnaTabFItFBoMB/X5fNUSWlpZ4+eWX1Yqgo++HYajWKz355JNMp1OiKGIymbC3t6de//r6Onfv3v0En7YQQgghPi8yESCEEEIIIYQQ4nPlui67u7sqNDifzxOLxYAP1vQchQFXq1VKpRKO4zCbzfA8D4But4tlWViWxfnz58lms8RiMWzbVpMBjuPQarXUGp/33nvvI9XWbDaZn58nmUyqvf2PPfYYlUqFRCKBaZoMBgOGwyEHBwdUKhW+9a1vsbGxgaZpHBwcqJv3tm0zm8148cUXqVarBEFAEAT0ej1832c4HAJw7NgxlpeXef311wmCAIDbt2+r9Umz2Yynn36axcVFwjDEsiza7TbNZhPDMEilUnzve99jfX0dXdfVCqFOp0O320XXdb797W9Tr9fVlMDBwQGHh4dMJhPG4zH5fP4jr1ISQgghxBeTrAYSQgghhBBCCPGF0Wq12NzcpF6vk0ql0HUdz/PQdZ3xeMxoNCKTybCwsIDneaRSKQaDAZZl4fs+sViMeDzO6dOniaIIx3HodDp4nsdoNGI6nWJZFhsbG3iex9zcnAoB/iiOphXK5TKXL19WN+j39vbUmqAoinjppZc4ceIEw+EQwzAYjUa4rkur1SIWi3Hp0iUuXLiAbduMx2OGwyH7+/sqzyCXy3Hq1Cl83yebzVIqlfjVr36l6nj33XfZ3t7m5s2bNBoNHMfB8zxarRaDwQDHcfijP/ojLly4QKPRUE2Abrer6tze3ubLX/4ytm2j6zqGYbC/v6/e60KhwOXLl9E0jdlsBsD+/v6n8tyFEEII8emSRoAQQgghhBBCiC+URqPBpUuXqNfr5PN5dbgeBAH1eh1N0xiPxxiGge/76oZ9uVymXC6rfAHTNBmPx2rHfywW48yZM2iaRjKZJJFIMB6PKZfLvPXWWx+5vtu3b3Pz5k2iKAJQB++u63Ly5ElOnjyJaZrYtq2Cfo929p84cYKLFy9SKpWYTCYYhoGu6+i6DkAsFmNlZYVSqYTneQyHQyzLIpvNMh6P6Xa7qo533nlHrVDq9/v0+31M06RSqXD9+nWy2Sz9fh/Lsmg0GmoaYGFhgW984xvMzc1hmibdbpdOp6OaALlcjvPnz7O8vIxpmgRBQLvdxvM83n///U/2YQshhBDiMyGNACGEEEIIIYQQXyi2bTOdTsnn8ywsLJBKpSgWixSLReLxOABBEDCdTjEMg3g8zpkzZ8jn8+pQ3jAM1QRIpVLs7u6SSqXUWh/HcTg8PCSfzzOZTKhWq9y/f/8j19jv99nY2GA4HOJ5Hq7rsrq6yuLiIrZtM5lMaLfb6LqO7/sUCgWefvpp6vU6zWaTTqeD7/uMRiOVX3D69GnW1taIokgd3s9mMxX6e/LkSd555x1831d19Ho90uk0iUQCy7I4ceIEZ86cAT5oqEwmEw4PD9F1nWKxyEsvvcTa2hqWZTEYDGi1WrTbbVqtFslkkueee45UKkU6nUbXde7evUuv16PX69Hv92k0Gp/swxZCCCHEZ0LCgoUQQgghhBBCfOG88sorzM3NMTc3x4kTJ9R+/KOb977vo2kax44dY25ujtFoRCwWQ9M02u02vu/jui4vvPCCygWYzWZYloXrujQaDcIwpFgscubMGer1Om+//Tbtdvsj1ff+++/zyiuvsLOzw/PPP89kMiGZTKqgYMuyiMfjaJrGlStXKBQKDAYDoihC0zS63S6ZTIZ8Ps9TTz2FaZpqDdLRih7XdQGo1WqcOHGCeDzOn/zJn/DXf/3Xqo6jZsE3v/lNvva1rxGGIXfv3sX3fSzLIggCkskkL7/8MpqmfShA2DAMhsMhYRhy8+ZNKpUKhmHgui69Xo8HDx6odUzT6ZSFhYVP/kELIYQQ4jMnEwFCCCGEEEIIIb4w3n33Xba2ttRan6Od/0dfZ8+eJQgCTNMkiiI6nQ7j8Rjf96lUKly5ckU1BI6Cbw8ODgjDkHg8ztbWFvF4nPn5eYIgoFqt8vrrr3/k+g4PD6lWqxSLRWKxGI7jEASBCvrd3d1la2tLHcAfHb4f1fiVr3yFubk5JpMJo9GIwWDAgwcPVJBwLpdjeXmZxcVFHMfB932SySRRFH1oV/94PCaKInK5HEEQEIYhzWYT13XZ2dnhkUceYTqd0ul0ME2TRqNBp9PBtm2OHz/OzZs3icViDIdDNR3Q7XYZjUZEUcQzzzzD6dOnKZfLvPPOO1iW9Yk/ayGEEEJ8umQ1kBBCCCGEEEKIL6wgCDh79iyz2Yxer0cikaBSqZDL5TBNE9M0cV2XbrdLFEV4nseTTz7JysoKh4eHeJ7HeDzGsixM0ySdTrOzs0Mi8cFQ/Gw2Yzgcous61WoV27ZpNpsfub4HDx6wvb3NdDpVa4mSySQXL14EQNd11Yjodrvous7m5ibnz5/Htm11K//o62jl0bVr1wiCgHw+T7fbJQxDDg8P0TSNhYUFtXboSLvdplgsEgQBuq5TLpe5cOGCWn3U7XZVI2Q0GlEul/nyl79MsVhUQcLtdptOp0Or1VI11Go1dfB/9PpkPZAQQgjx8JHVQEIIIYQQQgghvrDefPNNzp07x+7uLmtra2SzWeCDBoHv+4RhiOu66LrO9evXqdVqtNtt0uk0mUxG3Yyv1WqcPXtW3bafzWa4rkun08F1XarVKul0muvXr3P37l36/f5HrvGf/umfeO655yiVSuzs7Ki/GwQBqVSKyWRCIpEgkUjw9NNPq3DeIAg+tKYnFotx/fp10uk0g8GAWCzGYDBgMpkwHA5xHIeVlRVqtRrT6ZS/+qu/+lAdP/rRj3jmmWd47LHHOH/+PEEQqFVArVaLIAhIJBK8/PLLAHQ6HTUpoes6o9GIZDLJs88+SywWYzKZqEmBo5VFYRh+cg9XCCGEEJ8LmQgQQgghhBBCCPGF0263OXPmDKurq6RSKaIowvd9JpMJjuNQLpe5du2aut2fSqXUIbemaaysrLC1tYWu61iWRRRF6pa84zhUq1Xm5uaYzWbU63Wm0ylvv/32R67Ptm0cx2FnZ4darUYURcxmMwzDoN/v43keJ0+eZHNzk/F4jKZptFotRqMRw+GQ2WzGCy+8wOrqKpZlYVkWnU6HTqeDYRiMRiOy2SxXrlxhNpsBsLq6iqZpvPvuux+qZW9vj0uXLmFZFrquM5lM6HQ6hGHICy+8wMrKCt1ul8lkwt7eHr1ej1arheM4fOMb32BlZUVNUfR6Pe7fv49t2yQSCWq1Gm+//fbHapIIIYQQ4otBVgMJIYQQQgghhPhCs22bTCZDJpPBcRym0ylhGGLbNmfPnqVSqWDbNrFYDM/z6HQ6xGIxptMpZ8+eJZvN4nkegNp/D5DNZikUCiwuLmKaJgcHB8RiMTKZDPF4nL29vY9c43A4pFwuU61WGY/HKnT30UcfZWVl5UM36/v9vsoTOHnyJOfOncOyLAzDUDkBpmnS6XQolUqcP3+e5eVlxuMxyWRSHcTX63WKxSKNRkOFKcMHgcrz8/P0+30GgwFbW1s8+uijRFGEbdvouk6z2VR1zs/Pc+XKFTzPYzAY0G636fV67O/vY5omFy9eZHd3l+l0iu/73Lp16xN8ukIIIYT4LEgjQAghhBBCCCHEF979+/fZ3NxkNpsRBAGVSoXFxUWSySS+7+M4DqPRiEQiQblcplgssrW1RSKRUAfvlmUxnU6ZTqecP3+eXC5HGIYYhqF283ueR6FQoFKp0G63GY1GH7nGu3fvsrCwQCKRIJ1Oc+zYMabTKYlEQh30H00hFItFjh8/Ti6XYzweMxwOGQwG9Ho9ut0u0+mUP/zDP2R9fR3HcfA8D8uy6PV6eJ6H4zgsLS2xurpKuVzmrbfeUnV0u11SqRTpdJpr164BHzRTjm75d7tdut0uiUSCZ555htXVVYIgYH9/n1arRbvdptvtsr29zbe//W1isRidTkcFGg+HQwkMFkIIIR4y0ggQQgghhBBCCPFQODg44MSJE2xubjKdTun1emqP/mg0IgxDMpkM1WqVcrnM4eEhQRDQ6/VwHAfTNPnSl77E0tKS2s9vGAa+7+N5Hq7rsrKyQrlcJpVKUa1W+Y//+I+PVeM777zDuXPnuHz5sppgODg4UAf4ABcvXlQTDoeHh2oSoNfrEQQB3/nOdyiVSgRBQKPRYDKZMJlMaDQa+L7P+vo6KysrFItFJpMJ1WqVIAh48OCBqqPRaHDixAk2NjZU0HK/36fb7WKaJt/61rfY2NjA932azSbtdlt93/d9/vzP/5yFhQU1YXG0QugoTNhxnE/02QohhBDi0yWNACGEEEIIIYQQDwXHcfB9n52dHYrFIolEgm63Szwep1KpsLCwgOu6mKZJGIYUCgV6vR7xeJydnR2WlpYIw1A1BUzTRNd1fN8HYH5+nnK5jOd5tFotKpUK8Xicu3fvfqw6B4MB+Xxe1ew4DlEUsbu7S7VaxbZt4IOwXsdxaLfbhGHIo48+yoULFxgOh4zHY8bjMf1+X4UJHz9+nN3dXQCKxSJBEBCPx0kkEmolUafTUXXcvn2ber3OaDRSmQFPPfUUpVKJXC7HcDhUWQT7+/sMBgOefPJJbty4geM4dLtdbt++Tb/f5+7duzSbTX7yk59IE0AIIYR4CEkjQAghhBBCCCHEQ6Pb7ZJMJpmfnyeZTDKdTllcXCQej2PbNpqmEYYhpmniui65XI7Tp08zGAzwPA/bttUN+/F4TKFQ4PTp02QyGVKpFOPxGF3XmZubI4oistks+Xyee/fufeQax+OxWgk0mUzIZrNsbm6SzWYJw1Ad2FuWheM4bG9v8+ijjwIfNBGObuaPx2O63S5LS0ucPHmShYUFZrMZYRgyGo3odru4rqsaHHNzczx48IDJZKJqeeutt6hWq+RyOZ577jmCIEDTNO7du0ej0eDg4IB+v8/S0hJ/+qd/SqFQwHEcHjx4wOHhIZ1Oh3a7DcBPf/rTD2URCCGEEOLhIY0AIYQQQgghhBAPlTt37lAsFlUOwGw2Yzqd4rouo9EITdOIooidnR3i8ThhGKJpGt1uVzUCTNPkwoULpNNpEokEtm3T6XTQdZ1UKoXv+8RiMTY3N4nFYgRBwOHh4UeusdVqkclkuHjxIidOnMB1XbXaxzAMRqMRyWSSy5cvs7CwQLPZZDQaqSZAt9tlNpvx8ssvUygUCIIA3/exbZtWq8VgMCCdTnPmzBnG4zHz8/OMx2Py+Txvv/32h2rZ29vj8uXL2LaNZVn0+32GwyH9fp8gCPj+97/P5uYm/X4fXdd58OAB/X6ffr9PFEWsr68DqIwBIYQQQjx8frMRkPicaxFCCCGEEEIIIT6Sf/zHf2Rzc5O3336bubk50uk0QRAQBAHz8/Nomkan06FUKtFut4nFYgCEYcjOzg5RFKmDcV3Xicfj+L5PPB5XIb+NRoNUKkWpVOLmzZu88847Hysk99VXX6VUKhGGIfPz84RhyP3799E0jSeeeAKAZDJJs9nEdV1arRamaVIsFrl58yb1eh3DMNB1XU0ZHB3eb21tkUqlGA6HpFIp+v0+nudRrVbZ2dnh3Xff/VAtf/d3f8eLL76oXqfjODz//PMkk0neeecdyuUyhmGoIGDXdbl06RKTyYTBYIBpmtTr9U/uAQohhBDicyMTAUIIIYQQQgghHhqGYXD69GkcxyEIAsIwpFarMZlM0HUdTdMYj8fEYjGGwyGVSoWdnR2azSae56k9/IZhEEURpVKJ3d1dhsMho9GIdDpNq9VidXWVVCpFJpNhb2/vY63HuXfvHtVqVU0YLC0tsbW1RTab/dDh/1Fo8OXLl/nqV7+KbduYpsloNGI0GjEcDnFdl3q9zmOPPYbv+0RRpDIPDg4OAKjVamxubvLGG298qE7f91UzwfM8rl69SiKRwDRNJpMJ9+7d4+DgQK0aWl5exjRNut0uvV6PXq+nMgWEEEII8fCR1UBCCCGEEEIIIR5K/X6f6XTK8vIya2trpNNp0uk0AEEQMBwOmc1mZDIZ6vU6lUoFx3HQNI1Wq6UaAZlMhkuXLpHJZOj1emSzWabTKVEUqb+VTqfZ2NggHo9z69atj1Xn7du3yeVyXLlyhXg8TiqVQtd1dQP/8PCQzc1Nvva1r1GtVjEMA8/zaDabDAYDDMNgYWGBJ554gs3NTWzbZjAYMJlM1JqgQqHA8ePH1eRDNpv9T1MB4/GYg4MDHjx4QDqdJooier0erVaLVqvFbDajWq3ieR6TyYR+v0+73abf79Nqtf7TyiEhhBBCPDxkNZAQQgghhBBCiIfWz3/+c3XT/2gCIIoiXNfFcRxWV1fRNI1UKqVCb8MwJAxDYrEYTz/99IdChF3XJYoiut0u5XKZdDpNuVxG13WSySRbW1tcu3aNX/ziFx+rzn//939Xt/m73S6+79Nut8lkMjz//PPkcjna7baq/ShIOJvN8uSTTxKPx3Fdl4ODA2KxGJPJhP39fUqlEhsbG2SzWUzTZDqdMplMqFQq5HI5bNv+L+v58Y9/rNYTGYbB22+/zcLCAteuXSORSOA4Do7joOs6r7/++u/2kIQQQgjxhSITAUIIIYQQQgghHjrvv/8+9XqdIAjIZrMMh0Oq1SrHjx8nHo8TBIFaFdTtdsnlcty4cYONjQ16vR6maWKappoQmE6n7O7uUiwWCcMQ0zTJZrOMx2OWlpbwfZ/XXnvtY9d569YtyuUyqVQKy7J45JFHOHfuHNPplMFggOM4tNttRqMRs9mMF198ke3tbbLZLJ1Oh+FwyGAwwLZt+v0+Fy5cYHFxUa37OVoxZNs24/GYzc3N/zQV8JsajQaNRkMFAFuWhe/7eJ7HaDTi1VdfVc0TIYQQQjzcZDWQEEIIIYQQQoiHXq/XY2lpiXK5zOLiIqlUilQqheM42LaN67oMBgPW1tY4efIknudhWRaNRgPLslQo79GaoCiKGAwGTKdTZrMZhmGQTCaJxWIUCgWKxeLHXhEEsL+/T7Va5erVq0RRxGQyodPpqKwAx3HY3t7m/Pnz+L6PZVmMRiM8z6PRaKjf88gjjxAEAb7v0+/3GY1GPHjwgPF4jGmaXLx4kWq1ShRFH/q5/42u67RaLbrd7sd+bUIIIYT44pJGgBBCCCGEEEKIh954PFa35NPpNKlUilarhWEYjEYjNE3j0qVL+L7PbDZTq4BisRitVotsNsuNGzdYWlpSTYKjNUOdTodUKkU2m6VSqRAEAbVajTfeeAPHcT5WnUEQEEURCwsLFItFNY1gmiaJRIKbN29Sr9cZDoeqOTEajVQOwOXLl6lUKpimyWAwYG9vj8lkQqPRIJlMcvXqVTY2NhgMBqTTaYIg4K233vqU3nUhhBBCPCykESCEEEIIIYQQ4vfCeDwmCALW1tYIgoBkMkkqlWJ1dZV8Po9t2xSLRbrdLpPJBMdxME2T69evs7i4iO/7OI7DZDLBsiwmkwme53HmzBksy2Jubg7DMEin0/R6PZaXl3+r/fmj0YhEIkEikSAMQzzP4/z585w9exZN0+j1eti2TavVot/vk0wmeeSRR6hWq2pCoNfr0ev1GA6HxONxzp07x9bWFoZh4Lqumgw4mhgwTfNTeMeFEEII8bCQRoAQQgghhBBCiN8bjUaDXC5HpVKhWCxSq9VIpVIAuK5Lr9cjHo+jaRonTpxgeXmZMAxxHEcd/h9NC5w/f554PE4mk8H3fYbDIYlEAsMwiMVi5PN5HMeh2Wx+7Dr39/dJJBJcuXKFpaUlUqkUg8GAVqtFGIa0221s2+aZZ54hmUwSBIFqABxlBbiuyze/+U0WFxcJgkCtMmq321iWRT6fZ2lpCcMw2Nvb+6TfaiGEEEI8RH6zEZD4nGsRQgghhBBCCCF+Zz/4wQ9IJpOXjiJNAAAV60lEQVRUq1Wy2Sy6ruP7Pq7rMplMWFhY4NixY+rwfTqdEoahagjMz8+zs7ODbdskk0n1s7PZjMFgQCKRoFwuMz8/z/nz57l3754K3P04fvnLX7KyssJTTz2lbvLbtk2z2eT06dNsbW3R6/XQNI3Dw8MP3fRfXV3l3Llz+L7PZDJRt/51Xce2bXZ2dlhbW+Pg4IALFy7wk5/85FN4p4UQQgjxMJKJACGEEEIIIYQQvxfeffddTp06hWVZBEGAbdtEUcSZM2ewbZtCocB4PAag3W7jui7FYpGdnR1KpRJBEKgQ4aMDeNu2WV5eRtM0yuUyh4eHlMtlxuMxDx48+K3qvHXrFsvLywAMh0MArl27Ri6XYzQa4TiOChI+ODggl8vxwgsvkEwmCcMQ0zTpdDp0Oh2GwyGFQoEnn3wSgFgsxmAwUGHDnU7nE3hnhRBCCPEwktVAQgghhBBCCCF+L/V6PVZXV4miiNOnTzObzfA8j1wuR7fbZTgc4rouABsbG5TLZaIoUmuCxuMxhmHg+z47OzvEYjFSqRSz2Yx+v088HmcymZBOpzk4OMCyrN+qzoODA+r1Oru7u6TTaQqFAoZhMBwOabfbTCYTxuMx3/rWt5ibm1NZBvv7+4xGIzqdDrZt82d/9mcsLS3R6XQoFov0ej3VDNB1nf39/U/y7RVCCCHEQ0RWAwkhhBBCCCGE+L20t7fHz372M77//e8DsLi4SKfT4eDggHQ6TRAEqgGQyWTodrtEUUQYhmqK4OzZsxiGwWw2A8CyLA4PD4nH44xGI9LpNGtrazz22GP88z//829VZ7/f55133uHixYvk83nee+89lVcwnU55/vnnCYIAXdfxPI9+v6+mFQzD4LnnnmNxcZFGo0EsFsP3ffUaR6MRGxsbatpACCGEEEImAoQQQgghhBBC/F7pdDrkcjmWl5fV7f/BYADAhQsXSKVSjMdjLMtiNpvRbrcZjUZUq1WOHz+O53k4jkMYhui6zsHBAa7rEovF2N7eZjqdUqlU0HWd4XCIYRi/VZ3tdptUKkUikcB1XRzHoVQq8eyzzwJg2zaHh4d0u13VAMjn83z9618nkUgwHo8Zj8c4jsNgMMCyLLa2tshkMmQyGfr9vpoyEEIIIcT/PTIRIIQQQgghhBDi99o//MM/kEgk2N7eplgssrq6ysrKCrPZDMuysCwLXdeZTqccO3aMzc1NZrMZvV4Px3GYTCYYhoGmaRQKBR555BF6vR6maZLP57l37x6Li4usrq7+1lkBAP/yL/+CYRhsb29z8eJFstksh4eHmKZJPB5nMBjQ6/UoFAo89dRTaJpGt9vFdV01QZBMJtXrGw6H9Ho9tUqoVqtx586dT/CdFUIIIcTDSBoBQgghhBBCCCF+L/3t3/4tf/EXf0G5XCYej9NqtfA8jzAM1U7+69ev0+v16Pf7TKdTms0mURRh2zaZTIYzZ86on43FYgB0u11isRjj8Zjd3V1+/vOf/051/uxnPyORSHDx4kUsy8L3/Q81Ir761a8Si8UIw5BOp4Pruti2jeM4LCws8KUvfQnP82g0Gnieh2EYNBoN+v2+CkcWQgghxP9t0ggQQgghhBBCCPF764c//CHf+973MAyDWCxGEAR4nsfi4iLHjh2j2WySTqd58OAB8XicKIrUlMDS0hKO43BwcEAsFlP/HI/HAdjZ2WEymXDp0iXeeOON36nOn/70p9RqNdbW1oiiCE3T2NnZoVarEY/H6XQ6WJaFbdv0+32q1Srf+MY30DSNKIpot9v0ej0mkwmtVotut8u//uu/fhJvoRBCCCF+D0hGgBBCCCGEEEKI31tH2QCVSgXLsigUCqyvr6NpGoZhEEWRumXf6/VIp9M8/vjj6uDfMAx0XceyLJrNJplMRn2/XC4zHo8xTZN33333d6711q1bFItFtra22NzcJB6P0+/30XWd/f19ut0utm1z48YNzp07h6ZptNttut0uw+GQvb09hsMhQRCQzWb51a9+9TvXJIQQQoiHl2QECCGEEEIIIYT4P+MnP/kJqVSKGzdusLKyQhRFuK7LZDLBNE00TcP3fW7evMlsNmM0GuH7Pu12G9M00XWd2WzGyy+/zGg0wrIscrkcnU4Hx3Go1+vMzc19IqG8P/7xj6nVamQyGWKxGLZtq0ZEJpPh0qVLZDIZ3n//fRKJBO12G9/30XUdx3E4d+6cWgt04sQJyQcQQgghBCCNACGEEEIIIYQQ/wf827/9G8vLy9i2TS6XU4fspmly7tw5arUajUYDx3EoFAqEYcjh4SGFQoFz586xuLgIQBRFJBIJptMppmkyNzfHbDZjdXX1E2kEAPzgBz8gDENyuRxRFJHJZHjqqaeIx+OEYch7770HwGg0wnEcEokE586dw7IsRqMRtm1jGAblcvkTqUcIIYQQDz9ZDSSEEOL/t3c/vW1U7x7Anxm70JRQaOktrSgLblVaVBYICekihMRdsOAt8GruS+INsAapoIpFqX5dIAhRVRM7f9zEcRLP+C6cccZjpy00FHry+Wwae8Yz55w5z8zY3zQGADgVHj58GFeuXIn9/f3Y2dmJS5cuxXvvvRdZlsXm5mZERPT7/eh0OrGxsRHXrl2L27dvx7lz52J7eztWVlaiKIp4+PBhbG9vx4ULF+L8+fOxsbERZ86cObE/xbO/vx9ra2txcHAQFy9ejCtXrsTBwUE8fvw4VldXY3NzMx49ehS7u7tx/fr1eOONN2Jrayt6vV50Op3odDrR7Xbjzp07J9IeAODl5E8DAQAAcOqsra3F999/Hzdu3Igvv/wylpaWIsuyGAwG0e/3Y2trK8bjcbRarfj888/j7NmzkWVZdDqdGAwGMRwOY39/P86ePRsff/xx7OzsxG+//RaDwSAiIt56663pdxI8r263G91uN3788cf4+uuvo9/vT7+zYHd3Ny5fvhxXrlyJlZWVaLfb0ev1Ym9vL3q9XvR6vfjpp59OpB0AQBoEAQAAAJwaDx48iNFoFBcvXozbt2/HwcFB7O/vx9bWVpRlGTdv3oy33357+uXBWZbFaDSKTqcTy8vL8dFHH0Wv15sGBNUH9N1u90SDgLpvv/02bt68GaPRKDY3N+Pnn3+OTz/9dPq9ATs7O7GzsxP9ft//AgAAFhIEAAAAcKr88ssvsbS0FMPhMJaXlyPP87h161YsLy9HWZaxsrISg8Egdnd3o9VqxcWLF+P69evx/vvvR6fTiYODg3j48GFsbm7GxsZGbGxsxNraWjx48OBvaW/1537qvvvuu/jkk0+i3W7HcDiMu3fv/i37BgDSIAgAAADg1Ll3716UZRlXr16Nzz77LMqyjFarFdvb29Hv96Pf70eWZXH16tX48MMP4/z587G2thbD4TBWVlYiy7JYX1+Pfr8fy8vL8dprr73wPvjtfwDgWQkCAAAAOJXu378f9+/fjzfffDNu3LgRq6urcebMmRgMBrG/vx9ffPFFLC0txePHj6PT6cR4PI5Hjx7FcDiMfr8fN2/ejG63G48ePYp229trAODfy50KAAAAp9oPP/wQm5ubsbS0FKPRKD744IO4fv16/P7777G3txcHBwdx9uzZ2N3djdFoFO+8805cuHAh1tfXY319PdbW1mI8Hse1a9didXX1n+4OAMAcQQAAAACn2srKSuzs7Ey/6HdpaSn29vbi1Vdfja2trdjf3488z+OVV16Jd999N4bDYWxtbcX29nb0er34448/Ym1tTQgAAPxrCQIAAAA49aoQICLim2++ia+++ioiIvb29mJraysuX74cV69ejU6nE0VRxO7ubqyvr8fm5mbcvXs3BoPBP9V0AICnEgQAAABAw507d2J5eTl+/fXXiIi4dOlS3Lp1K15//fUYjUYxHA5jfX097t279w+3FADg6QQBAAAA0NDtdqPb7c48Xl1djXPnzsVgMJgGBAAALwNBAAAAADwDH/4DAC+r/J9uAAAAAAAA8PcRBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJEwQAAAAAAEDCBAEAAAAAAJAwQQAAAAAAACRMEAAAAAAAAAkTBAAAAAAAQMIEAQAAAAAAkDBBAAAAAAAAJEwQAAAAAAAACRMEAAAAAABAwgQBAAAAAACQMEEAAAAAAAAkTBAAAAAAAAAJy/77f/53HBGR53mMx+MYj8fzK2XZ9OfxeDx9nGVZjMfjKMsysiybPl+WZbRarSjLcrp+td1qveZ+qsdVO5rr1vfbfG2e59M2VK8ry3KuH82219ta/dx8bZZlked5FEUxXZbn+cx6VZurZc391fe1aCyr56p+1PtY9b85Znmex2g0mtlffd3m9pv7rrenfhxbrdZM35rL62PdbFu9/c3X1ftZbb96TbN91Tar56r1qv7X+9Vcr35sF415tZ0sy6bHtNp+dZyr9YqimM7j5nPN+VSfM815tmjs68ua49VqtebGrd7H+ljX29fsf/21zeer19b7q/5Prv4/vPX+n6//LIuyNsfrYzxX/1keRTGK7FnqP7Iox0+u/1//83Ocv/hW5Nmkbyt/9OaOgfp/meu/3qY8yrKIVqsdRTE67FvrOeq/iOxw3kzGYrLtyTGePzaL6r+a960Tqv/i33j9L1z/q2Xq/znqvxxH3sqjLE7P9b/8N17/i5f5/n88V8dH9T+5Pky2mR8uL198/RdlZHk2ne/T+i9PR/0Xh6/LD69n9Wvhsdf/cZr1f6L3/+MTqv8/e/0/gfqvet7K8zj4q/V/uJFTcf0vXlT9F/PzLI6r//qyYvrzUf23p+fb6vn5+m/N3GdP9luNfT6zzeo6Ndum5v3/k+p/fk4fzbdy7vhO5lE7yrJYUP/5zONqMh5f/3mMx9V5JJ9Zt35M5+q/KKLpr1//W5P32s9V/9n0fVK9z7P135rOh2e5/h8cjKLVOq7+J8emGu+yKCLLs2nt1/tbva4oy2i32tP5dnQMmvVfzBzHah7U218/dvP1n0/nRp4fnW+Or/9y+v50svyoX8fWf+McVx+3l+X6/9T3/0UZrXZrWt/Pc//funDtvf9b1Jmqs/UXVpO8WSjNi0J9UlYDWL/YNZdX+1u0TrWv+s1Ftbw5YeoHovq3fuCrddvt9sy+6yfAqq3Ni/PkhJDPbLM+Idrt9sIxWbTN5vL6Aa6PY3Osmn1fNL71camW128u6se6edwWPa7WzfN8Zvybba+fAJs3p4v6U/+3OW8WFU19/Wr7zfXq7arfhDa3Wy+yanvN5+onh0XzqTnG9b5Vfa1OGs2bteNqIMuyaXHWn69OWIvqozku9WPRrMnquaqWmicO9X9y9f/2f1062kaWRxZ/of6z2bEa1/se44hnrf/x0+u/3+vF0rnXptt4vDtU/0nV/1GtF4dvAib7OLrBntzETZ6br/8i8rxVW29+LtdvQo/6Vt3s5XNtH4/Hh3uLaD+l/ovqxqV+Hqpu6BpzoVxwvF50/Te3MX08dv1X/67/zb4/y/W/3aj/f8X1f3xY/2fa0zefT6z/BctfXP2XtXP9k+q//sa9Ovfn0w+dXlj9V10aT35Ouf7HMflFkFarFaPnqf/D0KS6zqRU/ydy/1+U0T7zcr7/L8py8osSx7S53p+5+j88T2X5Kbv+H/Y3y7PI4u+o/zIisifUf/tw2eQePMvyxnGdraXqw/n6eWFx/WdR/8D/qB/l7PbLRfVf3f/PfkA4W//tWpsX3f/HtA1ZNn+umXwIXM3P6v6/Oadn91c188n1Xyyo/+Jwv5P3xCd7/W9Nt199GP3n67/VqPdqH/l0fCdtKGfG8bj6r7c5y46r/3w6vvUPiuvH+rj6L2sftFfr5nke7WPrv5zOjepYPlv953PtWlz/44ja5yfVPKgHVPVjV7X5Zbr+V4//1P1/dW6L57///39P3R3xqSw4gAAAAABJRU5ErkJggg==) **Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
# TODO: YOUR CODE FOR LOADING THE VOLUME AS A 3D NUMPY ARRAY
from os import listdir
from os.path import join
lstImages = listdir("ct")
lstImages.sort()
lstSlices = [dcmread(join("ct", image)) for image in lstImages]
imageData = np.array([slice.pixel_array for slice in lstSlices])
print(imageData.shape)
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# TODO: YOUR CODE FOR AXIAL
imshow(imageData[100,:,:], cmap='gray')
# TODO: YOUR CODE FOR SAGITTAL
imshow(imageData[:,:,100], cmap='gray')
# TODO: YOUR CODE FOR CORONAL
imshow(imageData[:,100,:], cmap='gray')
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
# TODO: YOUR CODE
window_center = lstSlices[200].WindowCenter
window_width = lstSlices[200].WindowWidth
rescale_intercept = lstSlices[200].RescaleIntercept
print("Window center: ",window_center)
print("Window width: ",window_width)
print("Rescale intercept: ",rescale_intercept)
vmin = window_center - window_width/2
vmax = window_center + window_width/2
plt.imshow(lstSlices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# 2) Play around with different Window/Level values that enhance
# the visualization.
# TODO: YOUR CODE
vmin = (window_center-15) - (window_width-70)/2
vmax = (window_center-15) + (window_width-70)/2
plt.imshow(lstSlices[200].pixel_array + (rescale_intercept-60), cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Which values make sense and why?
###Output
_____no_output_____
###Markdown
Slightly shifting the window centre, adjusting the window width, and changing the contrast by increasing rescale intercept results in an image that eliminates focus from the skin and surroudning tissues and increases the focus on the bone and provides a defined image. **Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
# TODO: YOUR CODE TO SEGMENT FAT
vmin = (window_center-110) - (window_width-390)/2
vmax = (window_center-110) + (window_width-390)/2
plt.imshow(lstSlices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
vmin = (window_center+20) - (window_width-390)/2
vmax = (window_center+20) + (window_width-390)/2
plt.imshow(lstSlices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# TODO: YOUR CODE TO SEGMENT BONES
vmin = (window_center+700) - (window_width)/2
vmax = (window_center+700) + (window_width)/2
plt.imshow(lstSlices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Are the segmentations good?
###Output
_____no_output_____
###Markdown
The bone, soft tissue and fat segments are extremely good. I experimented with various values for window centre and window width to obtain these images. There could be a better combination of values that would achieve better results.
###Code
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Defaulting to user installation because normal site-packages is not writeable
Requirement already satisfied: pydicom in /home/paulm/.local/lib/python3.9/site-packages (2.1.2)
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# TODO: Without loading the data, how many slices are there?
# There are 220 slices in the data
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![Screenshot_20210414_162236.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB4AAAAO9CAIAAADsa+3xAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAgAElEQVR4XuzdeXxU1f0//nO3WZNJJntIyAohrIJBioALFVQsakWjrZ+2lkrDpz/rVq3ox69W26rBDxW1rRVsP9XWqhAXxKWFREXcIgSUIlELYQmQBJLMZJ3tbr8/3s7xcgMhIAjB1/PhYx53zjn3nDNh7vTR15w5V2AAAAAAAAAAAHDqqqioGHfmjEX/Hrqz22GvO4FMk5l63f8nlZWV2asOZsOGDRMf05kgMUGw1wHASakgMXbruN2yvRgAAAAAAAAAAE4tGV52cqXPFpqm2YsA4JSws9uR4WUIoAEAAAAAAAAA4ITRdd1eBACnEATQAAAAAAAAAABwwmAFNMCpDQE0AAAAAAAAAMApLqQJBYmxk3MXDqyABjhVFSTGWkMCAmgAAAAAAAAAgFPc/z7+7C9/8v10j2mvOHFM0zQMg7HhAw+gn525XRRFATchBBgkWkPCY//3LAJoAAAAAAAAAIBT3Ja1L1639kV76Umgrq5u4AH097//fXsRAJz0jksAnZiYmJmZKUmSaX7xxZogCL29vS0tLdjWBwAAAAAAAAC+yYqKin72s58tX758/fr19jrGfD7fjTfeuGnTplWrVkWjUXv1KUcQhJ6eHpfLZa84UCQSsRcBwCBx7ANop9M5Z86cefPmKYrCC0VR3LVr17333vvJJ59Y2gIAAAAAAAAAfFMkJyefc845V199tcfjufvuu5988slXXnklFovxBqWlpbfeemtmZubEiROHDx++YsWKhoYGSwenoL///e+zZ8/mSxgPRRCEV155xV4KAIPBsd80R1GUF154Yc+ePStWrJAkiZfff//9d91116uvvmppCwAAAAAAAADwjTBp0qTLLrtszJgxjDHTNAVB0HX9n//859NPP93b28sYO+ecc+bOnZuamkq1jLF9+/ZVV1evXLkyHA7bejsiFRUVS5cutZcCAHwtDr8CWhAEr9drLz0E0zQVRTEMo66ubvXq1daqn//852632+PxiKJoLT8UQRBUVcUvLAAAAAAAAABgUHM6nXPnzj3rrLN8Ph8t9RUEwTRNSZJmz55dUFCwaNGiGTNmXHbZZV6vl6fPpmlmZmZ+//vfnzhx4uOPP37KL4UGgFPVYQLohISESy+99MYbbxQEof97jJqmSZ+hhmGMHDly5cqVtgYOh+PBBx/8xS9+QcuiBxJDt7e333LLLdi1AwAAAAAAAAAGqZSUlPvvvz8nJ4eSE2u6QkFKQUFBSUlJaWkppc+8lofUpaWlDz300O9///uamhpeCwAwWBwmgE5PT7/55pt37NhRVVXVf2QsiqKu67FoVNP1X//61w6Hw9ZAkqRXXnmlurpalmWP260bhq1BXw8//HBJSQkCaAAAAAAAAAAYpDo6Ot56663LL7/c7XZTvkwxtCAI4XB43bp1jz/+eE9PzwcffHDhhReWl5dnZGTwNtSDYRibN2/euHGjtVsAgMHiMAG0YRimaa5du3b58uX2uj4efPDBoUOHKooyevRo6+7PxOv1zpo1KyUlxe1yLVu2bHlVla1BX7/85S+NAeTUAAAAAAAAAAAnJ8Mwli1b1tzcfM0112RkZPBkub6+fuXKle+99x5v+a9//evf//73FVdcMW3aNLfbTYW6rq9ateqpp54KhUK8JcCJUlFRMXv27AceeOCDDz6w1wEcwmECaMaYaZr8U69/a9eufeWVV5577rlrr732ww8/tNXee++96enp9913n6ZpP7/+elvtQcmybP3tCQAAAAAAAADAYLR27dq9e/f+5Cc/GTduXFdXV1VV1dtvvx0MBm3NmpqaHn300Xfeeeeqq64aNWqUruuPP/74G2+8oWmarSXA108UxalTp+7bt2/q1KkIoOGYyc/PX7du3W233WavOIR58+bV19cnJSXZKxhjjM2ePXvHjh2jRo2yVxzCpk2bLr30UnspAAAAAAAAAMAg5PV6r7766qFDh9or+lAUZe7cufn5+faKo1JRUWEvAjhyY8eOfe65584888xly5b13X0X4FD629b5iMiyPGHCBJ/Pl56evmLFir5bcIwZM+aVV15ZsWJFfX29rQoAAAAAAAAA4JQ3efLk8vLyK664wufz2essZFk+66yzLrroop/+9KcD/FU6wNdg6tSp69atq6urM02zrKzMXg1wCMcggE5ISPjv//7v119/vbKyMiUlZc6cOcXFxYsXL7a28fl8y5cvf/HFF3NycmpqaiZPnmytBQAAAAAAAAA4hXm93nnz5t10002yLE+fPv0Pf/jDWWed5fF47O0YKyoq+uUvf3nzzTc7nc7TTjvtoYceKi4utjcC+NqJojhlypR33nlHVdUNGzZMnTrV3gLgEA6/B3T/xo8f/7vf/S4YDD7++ONbt251u92GYfz4xz9+vqrqww8//Mc//kHNnnjiia7OzptuuklV1QsuuODee+996qmnnnnmmQM7AwAAAAAAAAA41eTm5s6dO3fSpEn8Tld+v/+2226rra1dsWLFli1bqDA5Ofmiiy6aPXt2YmIilZimmZOTc8cddzz99NNr1qyhQoATYuzYsYqifPTRR4yx2tran//85w6HIxaL2dsB9PGVAujTTz/9L3/5y2OPPVZbW1tYWJiTkxOLxfbt21dUVPTiSy8tXbr0448/3rJly0033TRnzpzHHnusoqJi3bp1r7zyyqpVq+6//35VVauqquydAgAAAAAAAACcKlJSUu68887c3FzGmCAIVGiapiAIkydPHjVq1Jo1a/7xj39MmDDhqquuKigoEASBaqmxaZqZmZk33XSTw+FYvXq1tWeAr9O0adO8Xu+LL77IS8rKynArQhiIo9+CIzs7+49//OMf//jHd955Z8iQIYIgTJw4cfHixZdddtkPf/jD5OTkpqamJ5544oorrrj33nuff+GFrKyssrKyoqKiGTNmOJ3ORYsWffe73x05cqS9XwAAAAAAAACAU0VHR8df//rXjo4OxhhfAU0ps2maPp/vkksuKSsru+KKKwoLC3ktHVASbRjGa6+99vbbb1MhwNdPkqQzzzxz4cKFl8TV1tZiFw4YoKMMoEVRXLRo0cqVK2tra3Nzc/1+//e+973p06c3NDRMnjxZVVWHw7FixYr09PSqqqr333+/uamptbV127ZtkyZNOv/886+//npBEKqrq3/+85/bu/5q/H7/8uXLTdP0+/2MsYqKCtM0q6ur7e0AAAAAAAAAAI4/wzDWrVt3ww031NXVqarKGKPomdY4d3R0/Pa3vy0tLX3mmWdocwPegOzfv7+ysvKJJ56IRqMH9AvwNRo3bpzT6ayrq+Ml77///hlnnOFwOCytAA7uKAPoiy++ODU1taqqKjk5edasWZMmTdJ13el0xmKx9PR0t9sdi8WGDRu2cePG9959d9++fRkZGd3d3YFAoL6+fv/+/U6nMykp6c0333S73cf225KioqLy8nL+lGLoYDD4ZQsAAAAAAAAAgK9XMBi89957ly5d2tjYyLfXaGho+Otf//qd73znkksu+cUvflFbW7t69WrKphljoVCourr6lltuwS4HcMJNmzZt/fr11m9B1q9fL8vy6aefbmkFcHBHE0CLonjNNdcsW7aso6PD4/Gcc845iYmJn3322c6dO4cMGbJ161ZFUYqLi0877bTTTz/d6XIVFhbm5uYOHz68pKRkyJAhTqczFApNnz69o6PjiSeemDt3rn2AfhUVFS1ZsoR/E1hXV1dRUWFvFLdw4UJBEK688kp7xTHi9/u//FLSoq6urrKysqyszH4CAAAAAAAAAHxTrVq16oEHHnj11VcZY7W1tStXrpw9e/aECRMYYwkJCT/+8Y+7u7uXLl2q6/rnn3/+yCOP/OEPf6C9OwBOrN///vcPPvigtaS3t/fyyy+vra21FgIc1NEE0MOHDxdFsbq6mtYX79y5s7S09Hvf+15GRobL5Zo4ceK4ceMKCwsTExMlUXS5XPn5+cXFxcOHD2eMZWdnm6bZ1tbm9/vT09Nra2slSaKqgSgqKqqrqysqKiouLqYvDKuqqpYsWVJZWWlv+vW6/fbbaT6Ud2/fvn3BggV1dXVLliyxNwUAAAAAAACAb6qsrCxd12+66ab169dfe+21PBIxTdPtds+ZM2fChAm/+MUvVqxYMWrUKFmWDzwbAGDwOZoPsunTp2/evLm3t/fSSy8dM2ZMIBDw+XyhUMjn82VnZyuKoqrqpk2bPvjgg7Vr17a1taWlpWVlZd17773Tp0+vqqoyTdPlchmGMW7cuIaGhjfeeOO8887bunWrfZiDqays9Pv98+fP3759O5UsXLiQyquqqjZs2HBA6xOkqqqqqqqqoqJiyZIlFRUVfr//+C3BBgAAAAAAAIBBRNd1WvWcl5dHv6KmDTf446RJk4qLixMSEt577z37yQAAg9DRrICeNm3aBx988O1vf/viiy9OT09PTU3t7e1NTEzMz89PSkpKT0+nNc7z5s1bvHjx6aefvn///p6eHtM0N2/eHIlEkpOTFUUJBoOtra2MsTVr1qSlpdnHOATa1IJWXnMLFy5cunRpUVGRtZCUl5dXV1cvX77cWlhWVlZdXc23y1iyZIntXDqLahsaGhYsWGCtHaClS5dSOF5eXs57qKurM01zwYIF5eXldGy9QWJZWRndQZEsX77cuokH3+6jrKysqKiIz7C6uhp7fQAAAAAAAAAMCh999NGNN97Y1dUVDocpdDZNkx5N0xQEwTTNaDT65JNPLl68WNM0+/kAAIPNEQfQTqfT5/MFAgFRFH0+XywWi0Qi2dnZiYmJ4XBYVVVVVVetWjV+/Pj09PQxY8bceeedd95556233rpt27atW7cmJSUFg0FRFLds2bJ+/XpRFPPz83t6euzDHAKtce4bGc+fP7+qqspaQsrKymbMmGEtWbBgQXV1dU1NDW2XUVNTU1FRwbcToQbLly+vqqoSBKG4uDgYDFZWVlpj4oFbuHAh3f/Qtks1Bc2UGvMXQlt2MMZoYvPnzy8rK6urq+sbf5eVlVVUVMycOVMQhKVLl86YMaO6uvqg+TsAAAAAAAAAnGx27dp1xx13LFmy5NNPP6UQgFcFAoFXX331nnvuoX2iAQBOAUccQCuKYhhGb2+vruuSJBUWFmZmZiqKIgjCtm3bXn/99V/+8pcbN270eDyyLGuaVlRUNG3atO7u7sbGRq/XGwwGo9Ho3r17TdPUdT0pKSkxMXHgG+rffvvtwWCwrKysoaHhKPZ9Li8vr6ysXLhwIa1NZowtXbqUMVZUVEQZcVlZGTWg8u3bt8+cOTMYDM6YMYOCYL/f39DQ0NDQYFuFfVDBYJAS86KiImtAXF5efuWVV9L/xhQXFzPGKioqKisrN2zYwDfrWLp06fz58xljlZWVtvza7/fffvvtdDx//vwNGzb4/X5sNg0AAAAAAAAwiLzxxhsLFy7829/+FolEGGOCILz//vuUSDQ3N9tbAwAMWkccQPt8Pp/P193dfe6550YiEVVVdV1vaWnZvXt3bW3thg0bNm7ceOGFF+bl5THGOjs7N27c+Prrr2/dujUajQYCgaamps2bN3/++eednZ1nnnlmdnb2zp079+/f/+1vf9s+0sFs37594sSJNTU1jLEFCxYEAoG+C4T7UVlZGQwGefrMGKupqdm+ffv27dtpATX1Zl1MzUNkWwo8QHxbamsAvXDhQtt6bRqXUm+upqaGv1JbufUpdTVjxgwsggYAAAAAAAAYRNrb26uqqq6//vra2tpHHnlk0aJF9fX19kYAAIPcEd+EkJY29/T0yLKcnp7+3nvvZWVljRgx4u233x43btykSZPWrFmTm5tLjdvb2//3f/+3pKQkKSnp3nvvlSQpPz+/sLAwKSlJFMWcnByXy5WRkfH222/n5uaeddZZ77zzzoGjHQStSi4vL6+oqJgxYwYtEL7yyisPewdCWoZMe2JwwWCQ1iAT2q+DtsKwKSoq8vv9tvaHddCF0rY58PXRfV9CTU0NJct9Z87x+zEWFRXxYwAAAAAAAAAYFFpaWu677z57KQDAqeKIV0AHg0GHw+H3+5uamkKhUF5e3pAhQ95///21a9eOHDkyISHh4osvDoVCsViMMVZSUjJy5Mjk5ORgMNjV1RUMBsPhsNfrLSkpGT16dE9PT1JSUjQanTZtWn19vSzL+fn5kiTZhzyYqqqqmTNnUu5cVFRUV1dn2+u5r8Peqc/v91NeXFxcTPtj2BwqAu4HX5XcTzTMJ9Y3reYj9q3i+ukZAAAAAAAAAAAA4AQ64gC6vb29t7d37NixGzdubG1tDYVCkiQ1Njaed955u3fvdjqdO3bseOSRR0KhELXPyspavXr1b3/7W8aY1+vt6urq7OxMSkry+/3f+ta30tPTW1paPvnkkz179mzdunXIkCEOh+OA8fpVVVU1ceJEWjh82L04KMz1+/3l5eW2KrorIH960Cy7vLycp8kD5Pf7KVzesGFDPzGxdQnzgTVfzJlvA3JQPJvuZwgAAAAAAAAAAACAr98RB9CMsXXr1l199dWvv/76O++8YxhGbW3t5s2b8/LyRo4c2dXVxRibOHFib28vY2zHjh2bN2/OysqaNGnSd77zncmTJ8+aNWvkyJGxWKy1tdU0zd7e3q6urrq6upaWlgkTJui6Ho1G7ePF0a32+gbNdEe+vumtTU1NDeW5fXuorKysqqrqZ7vnoqIi2j/aVt6/iooKSoetu073xePpvsE3vSjbps82lHHTTtb2OgAAAAAAAAAAAIAT52gC6Orq6smTJ48dO/aBBx547LHHWltbCwoKotFod3d3KBRyuVxpaWnd3d2BQODuu+/esGHD7t27f/KTnyxYsOCKK66YOHHi2LFjvV5vLBb7+OOPa2pqGhoaZFkeM2ZMe3v7pk2bDMOwjxcXDAb9fj9PdbmBpLSE7vJXVlZWXV1Nua3f71++fLnf76db+VFSbG3AGFuwYEFdXd3ChQuPKIAuLy+vrKxkB7vlYF80sfLyclsGTVG4Lb+27SVCC7ptNzAEAAAAAAAAAAAAONnl5+evW7futttusxb6fL76+vo//OEPaWlpbrd71KhRDz/88MMPP/y73/1uxYoV1dXVy5Yt27Jly4cffnjRRRdNmTJl/vz5y5Yt+9WvfpWXlzd37tzy8vKzzz573Lhxp59+eklJyYgRI4YMGTJ79uypU6daR2GMbdq06dJLL7WW+P3+hoaGuro6vki5oqKioaGhoaGBUumysjLTNE3TpKeVlZWmaVq316iurqYGXENDg3X1NJ1iwxdN0wT4cH6/39aAxTf06FvOGKurq+tbSGjcQCBAL83v99NU+YYhfCzehsVfjvUFAgAAAAAAAABY9f2pNwDA1+ZoVkB3dXWtXLnyyiuvvOGGG6655pr09PRVq1Z98MEHNTU1r7/+uq7rqqo6HI6VK1dKkjRt2rRwOLxz584VK1aMGTMmPT19z549O3bs6Orq2rNnj6ZpmqYlJCQEg8HExMTExET7YAcKBoPFxcU1NTU8JqbdM4qLiwe4PHnmzJm33347362CdpG2bl5x++23X3nllXw9dU1NzZVXXtn/HhrswNi6rq6uqKho4cKFKSkphz2Ro3E3bNiwZMkSSpm3b99eXFzcd/X0woULKyoqaCy/308n2toAAAAAAAAAAAAAnHCCveBA+fn5VVVVzz///IMPPmgtz8rKevfdd6PR6Guvveb3+5ubm1tbW+vr66PR6Le+9a0tW7bMnz+/pqamvb3d7XbT1hx79+6dM2fO1q1bd+3a5XA4srKywuHwvn373G53S0tLd3d3YWHhJ5980tbWxkfZtGnT3Xff/fLLL1tG/uby+/2BQIAxxu+7CAAAAAAAAABwWBUVFdi6EwBOFNleMDAtLS0LFix4/vnnd+7c+e67786aNYsx9vHHH2/btm337t1NTU2NjY2Msbfeeis1NTUjI6OlpeWiiy7aunXr1q1bc3NzXS6XJEmKouTn50ejUZ/PV1tbu3379lAoZB8JAAAAAAAAAAAAAAanowygGWMvvfTSyy+/fP7557/xxhtNTU1nn312T09PLBaLxWKKonR2du7cudMwDFmWMzMzm5qampubGxoaMjIyent7NU2LRCK6ruu6LkmSYRiSJCUlJYXDYWTQAAAAAAAAAAAAAKeGow+gDcN46KGHLrzwwtLS0rfeektV1dra2vfff980TYfD0d3dvWfPnoSEBFEUm5ubu7u729vb09PTW1tb3W53b2+vIAjUSTgc3rFjh8/n03WdtpgAAAAAAAAAAAAAgFPA0QfQjLG1a9f++c9/Tk9PNwxjy5Ytra2to0ePDoVCzc3NnZ2dqqoahhGLxRoaGtrb20OhUE9PD616jkQiHo9HFEVq0N3dTRscG4ZhHwPigsEgpfYAAAAAAAAAAAAAg4JoLzhCK1eu3LdvX1tbW1dXV09PjyiKsixHIpGWlhZd10ePHq3renNzc0JCQiQSaW1t7e7u7unpURRF07RgMBgOhwOBwNChQ3t6ehhjDofDPgAAAAAAAAAAAAAADE5faQU0Y6yxsXHnzp379+8XBGHfvn27du3avXt3Xl5eW1vbrl272traPB6PYRiiKLa3t9MC53A4bJqmIAimaYbD4Ugkwhjr6OhwuVxY4QsAAAAAAAAAAABwyviqAbSqqh0dHcFgUFVVVVUTExNzcnIkSWptbS0pKent7d23bx9jrLe31+Fw6Lquqqosy9FolOJmWZZdLlcwGDRNMxqNappmHwAAAAAAAAAAAAAABqevGkAzxrq7u6PRaCwWY4xFo9Hdu3cXFRXRphyZmZnRaJQxpmmaqqqapoVCodTUVMZYOBwWBKGnp4eSa1oHDQAAAAAAAAAAAACnjKMMoGmvDNM03W53d3d3JBIxTdPlcum6LopiV1eXruvp6emhUEjXdcaYYRimaRqGQSumDcMIhUJUYu8aAAAAAAAAAAAAAE4JRxlAm6ZJB4qiRKNRXdej0WhPT49hGIIgRCKR3t5eXddlWRZF0TAMXdfb29vpLFoTDQAAAAAAAAAAAACntqMMoClWFkXR6XR2d3fruu50OlVVjUajtKWGJEm9vb3WU3hmDQAAAAAAAAAAAADfBEcZQFOanJKSoqpqIBDo7e2NxWKGYWBLDQAAAAAAAAAAAAAgXymA7uzs3LhxI5Y2AwAAAAAAAAAAAEBfhw+gJUmKRCL2UsYYY6qq2ouOKV3X6W6HAAAAAAAAAAAAADDoHCaAFgShvb39e9/73pAhQxwOh736OBs6dGgsFrOXAgAAAAAAAAAAAMBgcJgAurm5+Wc/+9lPf/rTxMTEr3mrDVmWb7nlltraWnsFAAAAAAAAAAAAAAwGhwmg6R6Da9askWX5aw6gRVHcu3dvOBy2VwAAAAAAAAAAAADAYHCYANowjGAw+K9//cteAQAAAAAAAAAAAADQL9FeAAAAAAAAAAAAAABwLCCABgAAAAAAAAAAAIDjAgE0AAAAAAAAAAAAABwXCKABAAAAAAAAAAAA4LhAAA0AAAAAAAAAAAAAxwUCaAAAAAAAAAAAAAA4LhBAAwAAAAAAAAAAAMBxgQAaAAAAAAAAAAAAAI4LBNAAAAAAAAAAAAAAcFwggAYAAAAAAAAAAACA4wIBNAAAAAAAAAAAAAAcFwigAQAAAAAAAAAAAOC4QAANAAAAAAAAAAAAAMcFAmgAAAAAAAAAAAAAOC4QQAMAAAAAAAAAAADAcYEAGgAAAAAAAAAAAACOCwTQAAAAAAAAAAAAAHBcIIAGAAAAAAAAAAAAgOMCATQAAAAAAAAAAAAAHBcIoAEAAAAAAAAAAADguJAXLVpkLzuhAoHAsmXLGhoa7BUAAAAAAAAAAAAAMKicdCugU1JSrrrqKnspAAAAAAAAAAAAAAw2XwbQq1at+stf/kLHra2tixcv3r59O6+16u3tXbx48bZt2+wVx0hKSoq9CAAAAAAAAAAAAAAGmy8D6OLi4q6uro6ODsZYY2Ojoih5eXlfNgQAAAAAAAAAAAAAOBIyP8rPz5ckadeuXcnJybt37y4oKJBlmTH2+eefv//++6FQKC8vb8aMGW63+8uzGfv444/Xrl17ww03MMYWL1787W9/+7TTTgsEAk899dTQoUNbWlqysrJycnI++uijjIyM2bNnu1yuaDT6xhtv7NixIzU19ayzzsrJybF2CAAAAAAAAAAAAACnhi9XQCuKkp+f39jYqOv6nj17hg0bxhjr6upatWpVQUHBpZde2tTU9N5773156uG4XK5p06bt3r17796955133p49e2jXjvXr1zc1NV111VWZmZmvvPKKqqr2MwEAAAAAAAAAAABg8DvgJoTFxcW7d+9ubm7Wdb2goIAxRsdlZWW5ublFRUV79uyxtu/fuHHjRo0axRgrLS0dMWKEy+Xq6elhjLW0tIRCoeeff76+vj4cDgeDQfuZAAAAAAAAAAAAADD4fbkFB2OssLCwpqamrq4uJyfH5XJZqxhjgiCYpmkrZIxRoWEYtnJR/CLdliTJWq4oyvDhw2fNmmUtBAAAAAAAAAAAAIBTzAEroL1eb1ZW1o4dO2j/DcZYdna2JEkbN25sampqaGgYOnSotT1jLCkpyTCMTz755MMPP7RVHUpubu6uXbva2trWrFnz1FNPaZpmbwEAAAAAAAAAAAAAg98BATRjrKioiD8yxnw+3/nnn9/Q0PDiiy8OGTJk6tSpB7RmbOjQocOGDVu7dm0oFLJVHcq4cePy8vKWLVvW1tY2c+ZMutUhAAAAAAAAAAAAAJxihEWLFtnLTgK33nqrvQgAAAAAAAAAAI5cRUXF0qVL7aUAAF8L+wpoAAAAAAAAAAAAAIBjAgE0AAAAAAAAAAAAABwX2H8ZAAAAAAAAAADg4C655JLCwkKHw+FyuWRZFsUvVnOapqmqajgcDofDL774YnNz84HnAcAXEEADAKlKd3YAACAASURBVAAAAAAAAMA3XVJSUl5ensPhcDgcl156qcfjcblcTqdTURRJkkRRFEVRkiRBEAzDMAzDNE3GGB1PmTJFVVVd1wVBEEWRHnVdj8Vi0Wg0FAq9+uqrqqrGYrFIJFJfX28fG+CUhgAaAAAAAAAAAAC+oTwezyWXXJKUlJSRkZGbm+t2ux0OByXOpmlSyswYM01T13UKl3kAbZqmIAiKoiiK4na7qSUtkabA2jRNallSUkJ5dCgU+vzzz7u6ugKBQFVVFZ8GwCnsJA2gFy1aZC+Ck15LS8urr7762Wef2SsAAAAAAAAAAE4abrc7KSmpsLBw9uzZfr8/ISGBttcQBME0TU3TNE2jxNkwDEEQBEFgjFGgzBhTVdU0TVEUDcMQRVHTNEmSrP3z5dIUUpumKUmSoihOp9Pn82VlZZmmGYvFZs2aFQgEHn30UU3TVFVtbW21dgJwyhDsBQBHKzs7+5prrqmsrLRXAAAAAAAAAMCJU1FRsXTpUnvpN9LYsWNHjBiRn59fUlLi9Xppew3GGCXLFBbTAme+xpnXUqbMGKN4mvdJ5ZRHUyTNUW+CIEhxmqYZhsEYo1MEQdA0LRwOd3R0fPjhh4FA4NVXX+U9A5waTtIV0DAYNTc3p6Wl2UsBAAAAAAAAAE4Cd911V05Ojs/no002KB3WdV3XdWszyoUpa6awmDNNkyfLvJCialEUqR96FONpNY0iiiLt1GGapq7rFGFTG0mSPB6P1+u9/PLLI5HIxRdf3NjYeN999/H+AQY7BNAAAAAAAAAAAHDKOuOMM0pLS6dMmZKYmOh0OikppuiZxeNj3liM7+Asxe83qOs6hcXWNdG0KQd/pEIKtQkVUn5NndA+G5Rus/im0qZpUhgdXyEtZWZmpqen/+Mf/6DV0MFgsL6+vqenh88QYNBBAA1H5m9P/23EiBHWks8///xHP/iRtQQAAAAAAAAA4ITz+Xw33nhjUVGR2+2mNDkWi+m6Tts6U8osyzKL586UGtMxxcF050AeOlOOzJdFUz90TCfyMJpn07wZZc2Ua1ObeFht0rmapjHGaMsOt9tdUFDws5/9rLe3d/fu3Zs2bXr22WdpIIBBBwE0HJkf/eBHH67/0FZifQoAAAAAAAAAcGKNGDFi5syZ48ePT0hIkGXZMAxVVamKR8+KosiyTImwKIqUO9viY4qhae0zpc+CZVk0iy+gpvb8gB75gRRfGU2F1JIOeJbNxzUMg2YbjUYlSfJ4PKNHjy4pKTnvvPP+9Kc/hUKhrVu3UlQNMFgggAYAAAAAAAAAgFNEYWHhlClTzjzzzNTUVEVRdF1XVZX23KD1zqIo0r0HFUWhtcksvgLaShRFWphM8TFjzDAMTdM0TaP0mR4pjOY5MjWjRNsaQ/PEmfrkYTdHzURRpK6of1VVNU2joDwjI+Puu+/u7Oysq6v7+OOP165d+8V4ACc9BNAAAAAAAAAAADDouVwut9v93//930OGDHG5XIIgUPTMGBMEweFwKIricDhkWaaolzbf6ItHwCweQ1M5rYam9dQUEFOyTDEx5cg0nC3O5j0Q6p/H0LTkmQp1XZckyeFwCILAA2hd1ynOVhQlOTl55syZU6ZMmT17dmVlZW9vbzQatXYOcBI6+JUGcCilpaWMsc8//5yejhgxorS09LPPPjugEQAAAAAAAADA1ygxMfEnP/nJqFGjEhMTKSOmmJhyXlmWKYCmtc980THly3RMC5bpFCqkfJn6oVFEUVQUhSfU1EAURUmSdF2npJj3YBgGNaDjL+fKGJXwrJnFb1RIdymUZVmWZb7OmjFGj/SiBEGQZXnYsGGPPPLIRx99tGnTpjfeeMPaOcDJBgE0HJn/+X//wxiz3ofwf/7f/2AbaAAAAAAAAAA4ge644478/HyXy0VJLiW2tM8Gpc8UE0uSJAgC5cIsvoMzRcbMslSZQmHrsbWK8mXeD0XbFB/THQsFQZAkyTAMnkTzBJl3yxijFdmKooiiaBhGLBajA5qVIAgUl1PPNAFVVXkG7Xa7p0yZMmHChGnTpv3lL3/Zs2ePtXOAkwcCaDgy1uj5UCUAAAAAAAAAAF+P2bNnz5w5MyMjQ5Zl2qOZkl9FUZxOp8Ph4LkztbemwJTqUgbNI2Z2sNCZSgRBoPyX98YYE0WREmFZlhVFoQlQM4qeKYO2dsV7o0XT9JSfZZom7elBnVN4zUekA13XNU2TZTkhIWHcuHG//e1vGxoaVq9eHQ6H9+3bt2/fPmoMcDLoN4BOLWVjrmZuv718UDDNv/9gcM48zjTNFStWvPjii/aKE+pbZ3yLDj5c/yE/BgAAAAAAAAD4+s2bN+/MM89MTEwUBIG2S2aMURbscrkofRbia5bZgYkz9WDNnenYFvXyBrZOqJbvucFPod0zKHrWNI3FI2/Dstqa8nE6ME0zGo0ahkGNqYEkSVRFZ1EPlFzzLJsxput6OByWZTkxMXHChAljxoyJRCL79+9vbm7u6OhYvXo1lkXDyaDfAPqqW4py012OftuclEyTMcNgrHPkyJH2usEjEol897vfPdkCaAAAAAAAAACAE04QhMsvv/yss87yer1m/E6AYny7Z5fLJcsyJbz2My0rmtmBC6J5uSAIZnyLZ15FJfxECoipkEqsDShElmWZyjVNi0ajFDGz+N4gdEdEwzBUVQ2Hw5Q100twOp2KopimSftv0MbT1IDf8JAGMgwjEonQcJIkJSYm+ny+4uJiVVWnT5/e3d3d2tr61FNP9fb2tre38wkAfJ36C5cFWWiOmNHemL3ipGeaJjMMNsRePrjQDVvtpQAAAAAAAAAA33g/+tGPzj//fI/Ho2laLBbja59dLhftvEEBMTXmB31RQm1tYMZXHPPCvkk0f2oYBg+CmWVBND2VZZliaNouIxKJqKoqCIIkSQ6Hw+12K4oiCIKu6w6HIxQKhUIhCpoFQRBFkVpSEk1DGIZBW3zQcm/emOZMj/SKRFFMSEjw+XzZ2dn3339/V1fXxo0bW1pa1q1b19TURJMH+Hr0F0Azk0U1kzFm3D/HXnXSE26v4scTJ0601AwOdXV19qKBeeGFF8LhsL10wNxu9+WXX24vPRjsvwEAAAAAAAAAX7+cnJwbbrihsLBQlmVVVVVVtaXPtO6YklkW39TCFjHzY9viP2u4TGhRMxVaH6l/XsuXWtNTjsodDgdvaRgGReQ0VUmSGGNut9vpdDLGent7I5EINaZ4mi+1pq4oj6Z+dF3ne0xTHk0To5XO9CgIgiRJaWlpF1xwgaqql1xySXt7++uvv/7BBx/Q8mqaNsDx028ADYNQKBT6KhuPfPrpp/YiAAAAAAAAAICTxnnnnVdQUOBwOKzpsyRJlOrSzhvmgXEzPeWFZnz/CjqgBhTjUuDLLME0ncXbW5/yBlTIyynz5d2qqkprok3TFASBNgnxeDxOp1OI7+ksiqLT6UxISKBtncPhMG0DTfG0dSzqn1hr+cbTuq7T+mjTNKmQUmYamvbomD9//jXXXLN169b169e3trZ+9NFHvB+AY67fANpk7ICvfE4kl8t1xx13lJSU7N279/HHH9+2bVtmZuZ9991Htb29vZ9++uny5csDgcAXJwxg5vSF2EGlpGgOB+vpERhjmsYikS+uZ/45Qr+DMOM/cOAfNIeSmJh47bXXlpWV7d+//49//OP27dszMjJ+/etfU+1B5n+0aCaHnc9B0SuylwIAAAAAAAAAnBzmzZs3c+ZMj8ejqqqmaRSt0qbJLpdLkiQe3VB7a9DBj21t6MCM3x7Q2pLQU15F4S+vpQTZGsjQPhjWrgzDoGaCIFDMTdkxP0sURVmWPR4PBU3hcFhVVZfLRQ1sI9Ip1lfBh6NRaJ9oLU5VVZpALBZjjNGuIH6//1vf+lZZWVlnZ+d//vOfBx54wNo/wDF0yASWMcaYyU6aONLj8Zim+eGHH/7gBz8YNmzYd7/7XafTOX78+EgkUlNTU1RUNG/evIsvvvhHP/pRe3s7Y+wrzjwQOOAvoyhKWlpaTk5OWlqa0+mkPS4cDoeu64qidHd3h0Khtra2vXv3RiIR64nctGnTZs6cGQwGp06dmp2dffXVVx9m/keLB+Jvvvmmva5f5513nhnf5AgAAAAAAAAA4GRTXl5+7rnnulwuvgMyBa8ul8vj8dC+z7ZTKPC1psbigXtDU/jL81w6hR8I8dWHvDHV8jSZurKGv/wpH4UX0lJI2sRZ13V6ahuFwnRKkHVdp5yaD2ftjR/T6VRifS2KotBqaMqgabW4Ed8/WlVVWZZlWU5OTp40adLTTz/d3Nz88ssv79ixY+/evbxzgK+u3wD6K0W4B+FyuRISEjwej8vlcjgcjLFoNKqqaiQSCYVC9N2O/Zy4QCBwzz33pKWlzZkzx/r7gs7OznvuuYcx9pvf/GbWrFk//OEPH374YcaO2eRlWS4qKho3bpzL5QqFQqIodnR0tLW10Q1GvV5vRkZGUlJSbm7u2LFjI5HIzp07P/nkk46ODls/W7ZsufzyyyORyPPPP5+Xl8fLDzn/r4A+WGVZPvfcc+11h7BmzRpEzwAAAAAAAABw0rr66qtnzpzpdrtjsRgFuJS6Op1Ot9vtcDho+TPFxNYTKeFl8VXD1loe4/IdM6gZP7C2t3XLLFm2EN+4g06hA+vp/KmiKIwxXddjsZgkSXzJNk/JKUaXJInyYl3XeeeEj8hL+r5qfoosy7RnNMXQsViMkmjDMHgSTY3dbndJSclNN920d+/eurq6v//977w3gK/ocAG0/co6GikpKSkpKcnJyV6vV5Zluir4I2OMX1GdnZ3BYLCtre2gSXR6evo///lPxtjixYvtdYy98847s2bNGj169BfPj8XMExISzjjjjMzMzH379u3du5dm5Xa7DcPo7Oxsbm5mjCUnJ/v9ftM0k5KSCgoKSktL8/LyPv74488++8wa6TY2NjLGJEnyeDwbNmzg5Zx9/keLPrBYvxuM9CXLMp2FGBoAAAAAAAAATjbnnnvuueeem5ycTEt6zfh+zYqi0DJHngUzy5Ya1h4o8egb1NKBGV9BzBuIokgpLT0VBIF3y8ey1vJjasDb8yEIbxOLxQRBoG1DqCvqgVYly7JMqzZ5D9QnnUud2Pq0sjamY1oNTYvHI5EIJdFmfNk15XIOh8PhcAwfPnzo0KFnnnnmSy+9VFNT07dzgCPVb0ZpftUYNzk5OTc3Nzk5mXZVt27EQ1emJEm65RcHPp8vMzMzFAo1NjZSvGvV1tb2X//1X7///e+vu+66l19+2VZ72mmnMcb6nnXUsrKyJk2aFAgE3n33XX59RiKRYDDocDhoFw7GWCwW6+3tTUxMVFV13bp1jLHRo0dPmTIlMzPzgw8+sO3IMXfuXLfb/dBDD1kLybGaP/8EpK/UBog3xscKAAAAAAAAAJxUfD7f1KlTMzMzjfjCZ4pNZVl2Op1Op5P/Vp7W87I+sS87cG8KSj8Ey/JkxphhGNbYijHGu6XMl8rpmHdIDfi5/eANaLW1ruvhcJhiMZ6MybLMVy7TcLRfBw/H6YXTMZ8nnwYd8+lZC2nmoigqiiKKotPpjMVilHRRe9M0o9EoJdGiKObm5l577bU5OTmffPJJXV0d7wrgKPQbQH8FLperoKAgPT3d6XTSUn8Wvxr5tUHfwMiyTO9sFt8xXZKk0tLSjIyMhoaGnp4e6nDcuHETJkxobGyUJCkxMdHlclG52+2+7LLLRowYcdFFF5mmWV1d/cUMvprCwsLRo0fv2rVr586diYmJra2tTqfT5/MNHTrU6XTKstze3t7c3JyQkJCSkkJXbCAQcLvdqampn3zySVNT09ixY88///y33nqru7ub+pwxY8a11167ZcuW888//9///ndTUxM7DvOnDxr+2TRA/EOTHgEAAAAAAAAAThI333zzmDFjKI2l7SOY5caDlC9ZU1dRFHlQa42GBUGgKiF+J0Cq4k/pLN6eMhaKsPiBtWc6YIxJkkRRDBXyKh7OWIMaQRCoPS1qZIzR0mNFUWRZ5ss3TdOk5d50TIXWCdBL5rNl8VGsJdSGHlk8W6d9cSmyczgctCDaiO/IoWmaJEmSJLlcrvLy8u985zufffbZXXfdxfsEOFKHC6CPKo7MysoqKiqi3d/pnc0vQkEQRFG0fh9lmqYkSbIsm6ZJb3EqSU1Ndbvd27dv379/P2MsFApdeumleXl5kUjk0UcfDYVCNJbP57vzzjuj0ejGjRtfe+21d95554tJHNXMSXFx8WmnnVZfX79t2zav1xuJRHJycuiC5CuFVVVNS0vj9yRMS0vTNK25ubmxsdHj8QQCgbq6ugkTJpxzzjlvvvlmKBQqKyu7++67BUEYM2bMmDFjnnnmmZdeeon1M/+jZZqmEd82yF53aPRvxBBAAwAAAAAAAMDJpKSkpKioyOl00g6uFF+IouhwOChC5cEuD2Styaw1nqZIl9JkekpVPEWhrihaoT4JLbu2ltj0TZw5SsZshbw9LTqOxWJ0H0Vac02nCJa9BAhlazyDto1ljZ4pgmPxP4LQZ603NXM4HLIsK4oSCoUikQjPoGn0WCymKIrT6Rw3btyiRYuee+45LIWGo9NvAG2yo9iCY9iwYfn5+bRM2FrOr2FN0+jdTxc8XUtUQjE0XWCmaXq93pEjRyqKsnfv3m3bts2ZM8fpdNKVwBhrbGycOHGidYgDHPnMSVpa2vDhw//zn/80NDQMGzaMrurExESPx/Ppp586nc7i4mLDMAzDCAaD0Wg0ISFh7969aWlpiYmJmqYpihKJRDRNi0ajH3300ZgxY84+++zVq1dv2LDh7LPPto3V3/yPFn0m0ueRve7Q+Icv/W0BAAAAAAAAAE64oUOHVlRUJCUlGYZB6TOFSIqi0HphW7RK6Fg4xGI7emqLTUzT5Gf1PeCszfhZdEzDHSqN4b0xy2poURRFUdQ0jfbZoNdI5XQKNeA7QdPL4Q1sqNaIr0rkE+PjEnohVC6KIi12phi6p6cnGo3SiYIgxGIxTdNisZjH4xk1atRtt91WV1f34IMPWnsDGIh+A+gjV1pamp+fTx8BLP5VEotfGHRRUSH/7sgwDHqvM8aonK4uviXNiBEjFEXZuXMnYywajVpG69dRBdCKoowfP55uhOj3+3NycmhEVVW9Xu/EiRNFUXS5XPS0p6eHfusxduzYYDBommZqaipjzOFwtLW1dXZ2SpK0devWyZMnl5aW1tfX2wc7Puizj//lB4g+mFifTyUAAAAAAAAAgBNlxIgRQ4cOFUUxFovxAFqSJNqzghY18uCVThHjez1bIw4qoSpeTgdUzuLRLT+F473ZTqSzePxiraVT+FMal54yy4mSJDmdTsaYLMsul4sWdFMbvnkApWT8mL9MZpk/s7x8awhuG5RPib9SailJkiiKlOmHw+FIJKLrOgV3hG6E5vF4zj777KKioscee+zf//437xngsA7yhcmXzPh/AzNs2LC8vDzaR4auLv7OprcyfZnjdDrp4mGWL5GoGYXRVCWKIm1/4/V6qecvhhmgAU/bauzYsQkJCe3t7bFYzOFwRKPR5ORkxphhGG1tbT6fLykpaf/+/c3NzXQ1KopCP5HIycnJzMxUVTUSiXg8noyMjJSUlEAgsH///sbGxmHDhqWkpNgHOz5M06S/M/+rDoQgCHSW9bMJAAAAAAAAAOAEuuKKK1wuF/0aniJRQRAURaHIyBouUaBECxwZY6Zp8tXEPO7goQflqpScUCF1QoEVLZTkT6mZEM+geVcUiFOJLVGx9cyrqJyY8T0AXC6X2+32eDy0nYAsy/yF6LquxvHeyKGOaeZ8FCH+KmhE6oS/Ruuro5l4vd6EhATKwelcQRAMw4hEIj09PbFYLCcn59Zbb501axYfAuCw+g+gzYEH0NnZ2cXFxaIo8g8Fus4Nw2CM6boei8XoJwO0CJrF3/dm/Crl5fTW55euKIpOpzMvL49/CzQwA5u3RUZGxvjx49va2jo6OkRRHD58eGpqqt/vT0hIoAaGYfT29nZ3d2uaRrcWdDqdPT099LpUVaVfJRiG4fP5KGdnjHV2drrd7okTJ9r2JDlO6O92FAE0/7ew1wEAAAAAAAAAfO1uvfXW9PR0QRB4ysQYE+NrdXmoesA58WCXalk8mOJPeahKx7yK9/NFVhVHp/OWxJbw8mPGGAUyvBlvzHuz9kxPJUmiSJ3fgVCMB8SMMU3TaFWydQtsjr8uM/6q+bj8mDfmoTM9FQSBh280GVEUKYOmNJwmY5qmruvRaDQcDnd3d4fD4aSkpLlz506aNIkWoQIc1rGJRF0uV0lJiaIoevzLKF3X6V1uGIamaZIkGfGtNnRd529xURTp4qHrh3qwXjyMMV3XJUlKTk4uKCjYunWrddxjSJbl8ePHd3V1ybIciUSSk5NN0+zq6nI4HPRNlNfrdTqdgUAgMTFRlmXKnWkXDrfbTT/9GDp0aGtrq8PhEAQhOTnZ4XDU19fHYrGurq7s7OyCgoJt27bZBz7W6A9Lf1t7nUVTU1N7ezt/KsZ/DGL7IDt+8vPzL7zwwiVLltgrAA5NURQjviUWAAAAAAAAnMIKCgoKCwtlWVZVlbZIZoxJkqQoCgW1kiRRlEFJFH9kBybLdMDLqRmxtmHxSITKeQP+SD1Yq6xjEVu0wk+xotCGn0XpGfXPWRsL8TulEWpJp9CxEd+WhJ9FnVOttSuaGLW3tqQOeYksy263W9f1cDjMV4vSiaZphsNhwzDcbvdtt922efPm//u//9u9ezc1ADiUfgNokw1wGfGwYcM8Ho+mafzdL8uypmlifNNnwzD4FUUNzPhO5/Qup3c/ZdMsfjFTCQ0hCEJ+fv6+ffu6uroOGPtQBjZzbujQoXl5eY2NjXQhZWdnU1ZummZSUhKlzKIopqSkxGKx/fv3h0IhSqJ1XacvhegVJSUl0VXq9XoDgYDD4ejq6srMzOzs7Bw/fvyePXsikYh97GOKPg6snyYHFQgECgsLrUEeRf/HO4BOS0ubOXPmrFmzMjMz33jjDXs1QL82btwYi8UuvPDC1tZWex0AAAAAAACcQkaNGpWZmWmaJv3UnpIlKX67PKnPRhP0yLNUOqZaa7LMDgyOeRt2YBjdDwq12IE9HDSEsba0jk6TtCYwqqrKsixYFm6bpkkpmcvlkiRJVVVRFHm2Rm0oAjJNk/4avFsalB7ZwV6XdWL8gHcrxDNoTdOi0Sj95Wloxpiu63SjQrfbPWHChOuuu+5Xv/rVEdyzDb6R+gugzYEF0D6fLzc31zRN2jeZ3rtE0zRN0+gSMuMxtGn5XkXXdco9TdOUZZmuPSrhbVj8yxmHw5GWljbAAJquMrqKDksQhNLS0tbWVlEUOzs7ExISMjMzNU1j8Y8AvguHx+Pp6OhgjNHPPZKSknp6emgdNGNMFMWEhIS2trZgMMgYMwzD4XDQZh3RaDQrK6ugoOCzzz7j4/ZjgDPvyzAM/lnTP/rXsZbwf4Jjzuv1Tp8+/cILLywpKVmzZs2jjz66YcOGgUwSwGrMmDFVVVU1NTXTp08PBAL2agAAAAAAADgluFyu2bNn07I/Wh3IqygyopSJ0ifWJwWmAwpeKYziASudy8OrvuEs4T0TnrRQ1Huo8ISGoBSL2tBAVEsz4Y35rAzD0DQtHA4LgkAZmmEYuq6rqmoYhizLdJeyaDSqaRpfAUkd8iH4hOlAtCwxpPnQzAVBoCp+LrXhM6enjDFFUWiFZSwW4+W8Gd0ajW7bdvfddz/88MNYKAb96C+AHqC8vDxRFHmOTG9lekqPLP5BQO91fg3QBUDXCV1gdBXx31BQLbWksRITE/m4h3Ek2abP50tNTd2/f78kSR0dHXl5eV6vt62tTdd1Fp88vRD6qi0hIYE2gBdFMT8/n14C/xChpd+CIKSkpBiGUV9fHwqFPB5Pe3v7iBEj/vOf/1iv52PO+re111nwP+yaNWv27duXmZl57rnnUomt5TGxevXq7du3P/bYY+vWraO/KsDR+fvf/z537tzXX3995syZ9O0OAAAAAAAAnGKuuOKK5ORkxpimaYZh8LiJYhkKoBhjZnyxI51lzTR4OY9HqBPriTwb4T3QU2bZGJra8AkcNGzhjalzOrYdsHjMJcTvl8jjJsMwaK2xpmkOh4P/Ip/Fd22mEvo70FiGYRiGwWM33rl1FBqC8BlSJ9SMTjQtcTwvp9PdbrcgCN3d3RQ3W88SRVHTtFAo5HK5SktLZ86c+cwzz1AVQF8H/57nC6b5xX/9SklJofcxX+lMb0daXStJErNcivxYkiSHw0HH/JE+RKiWXzBCfCMbURRTU1NTUlK+GLh/JmMHXvz9yM7OZozpuh6JRHp7e+l2n9nZ2UOGDElLS6NZ0aMoikOGDMnNzXU4HB6PxxG/6Sp9dvAOXS5XZmZmcnKyz+cTRTEQCLhcrnA4nJ6ePsD5D3DmfdGJ/GP0UOhfyjCMkpKSzs7OkpIS+gw6ukEP66GHHgqFQrfddtt11103ZswYezXAkXjyySeDweA///lPr9drrwMAAAAAAIDBr6yszO12U8zK4ouFGWMUv1gTD2saQ0+pAT3yAxYPTHhswuJxKu+BpyLW/nkwZU1+rIPySJcyYgpk+DR4MyrkBzx+ofaCIBiGEQ6HOzs79+/f397eHolEJEmSZZl61jTNjC/ZNAyDxqJRKI6zTolQIU2bT8b2Ak1Lws5L6NE0TVEUacdtKqSx+B7cjDFVVWnh9uzZs8ePH089APTV7wpo84sYtx8ul8vj8fD3Mb3/NE1TFIWuDbqKrBcJXQBUyBjjFwO9uRlj9AsLunL4lSBJkqZpbrc7OTn52P70XhCEnJycjo4ORVFaWlq8Xi/N2e1205yZ5Ts0QRAcDkdLSwtNrLOzMzk5WZIkvqpXkqScnBz6xKHviDIyMkKhkK7rtCFOWlpaW1tbfPBjjzbiYYwJgvDpp5/aCj16EgAAIABJREFUq+P43z8rK2vs2LFZWVmMMdM0j9OuPS+88MILL7xAG0DfeuutGRkZb7311ptvvrlhwwZ7U4DDMU3zT3/608033/zSSy9dfPHFx+lNCwAAAAAAACfE6NGjU1JSJEmi/7tHkRGLp8/0lMcafVH0xFMmQuESPdIBNRD67Kdha8ODLIp6rC2pjRTfjZoyMWsVHVAn1sn0nTllu4ZhaJqmqqppmm63m3JeOp3OoqwsFovRXhz0B+EDEd45vUD+16BzbW04WycsnsgpiuLxeGjJJjtwjbkgCIIgUAbtcrmuu+66J554Yt26dV92ARBn/3rkSPn9frobKV0VlMNSdMtTZkmSHA4Hi39BxC9dURT51cIvVzqLUDkNRJeKKIoul+vL4Q+HLjZ76YG8Xq/f76etpXt6etLT0zMyMvjqbJoV3WaQpirLcmZmpsPhcLvd+fn5fIaEPo/oVXg8nqysrJycHH7jwZ6eHlptfVgDmflB0fbwhmGkHw7tJWQYxhlnnEEHhmEc1yyvra3t2Wef/fGPfzxv3rzW1tZbbrnF3gJgYEzTfPjhh91u93PPPUdfCAMAAAAAAMCpYcyYMYmJibqu850oeEZEq31ZPAm1hid0bBiGaZq0QJhOJLwNpR+8isUXTXNCPHemKjqRuuXhCWftkyZA3dKxdTh6JDSQdWhBEAzDUFWVwmU6jkQikUgkGo3S34FmSyGby+WiOzFSVCXG1yYriuJwOOhH/IIgUDm1oTnQPOmRxf8I1mNi7dblcrlcLjG+VYiNaZo054yMjAsuuIC/RgCrwwXQJut/EXRqaiq9LxljFAPRu5YxJoqiy+US+nwu0Dub3uv8Pc3iX2TRuVROb3qqEuL38RxoAN3vtK1op2a6Mnt7e1NTU2m7jJ6eHlqIzSNmSZJUVWWMuVyu4uLiwsJCmszmzZvpsuzo6IhGo4FAIBgM9vT0NDc3BwIBWgwejUZlWe7u7s7IyBjoSzgq0WiUPozsFX1QMysp/u3i8dbU1PTkk09effXV9gqAATMM46GHHsrNzX322Wf5RQoAAAAAAACD2tChQ88880wpvuuxYRiURLP4b+gpI6IEiQdHhAepFPLQMVXRATWwJrBUZevH2oO1Kwp/rHgnLB5S885p8tYG1ukROmbxJNo0TUmS3G631+uVZVlV1Wg0GovFYrEYY0ySJPpr0Fk8RhMEgRI5GtE2Lh0Yll/20wFVUWM61jSNP6XI2zAM+stTakT9mPGQneZMXcViMVVVx40b9/3vf5/3D8D1u3LwcOkzY8zlcvG1z9FolN6ClNvSleNwOOitSe0Nw5Blmb9ZmeUXCryQv4npTS9JkqZp1Fgc+ApokzFLGt6PtLQ0wzAkSert7aWYeO/evfQ1TmpqanZ2Nl1jjDHDMILBYDAYzMvLczgcdM1v3769paXltNNOMwyjo6Ojp6fHMAy/30+704bD4VAoxBiLRCJJSUnRaDQ9PT01NXXv3r3WOfQ1kJkflMfjee211+ylAzbALaqPQkpKyjXXXDNy5EjGWH19/d/+9rdju5UKfANpmlZZWfmrX/3qz3/+87XXXkufGAAAAAAAADB4ORwOutkYD0bNA7eP6Pt//XgOS9mRFWVN1hiqr74JDAVTLN4zzYFQoRG/8xbvn040TZPCor7DWUfRNI1ZMnFKdSVJooXGPFmmcoqAaXUztefT4E9pJTIfgo5pYqIl5uaD0vR4ez5Va5+8SpIkh8PhdDoNw1BVleZjxifGz4pGo16v94ILLnj22WepHIDrP4A+TAKdkJDgcrlUVaVl/7RJDeXLjDFFUehDQRRFWjhMOa+mafTupwvA+o6ny4Oj73ZYfFdoaikc4vPCrs/Hx0GJoujz+SKRiKIoXV1dHo+nu7tb1/UhQ4bQV0y2y15RlHXr1kmSlJ+fzxjbs2fPli1b0tLSaLZOp5Om6nK5ent7m5ubRVF0OByJiYktLS1JSUn0lVRaWtphA+ijds8999iLTgI5OTlPPPHEihUrHnvsMcbY5MmTn3766Xnz5jU1NdmbAhyJWCz261//+p577nnkkUeuv/56ezUAAAAAAAAMKj/4wQ9oSZ/ZZ52yGGdNSDkq4amRLT7qG1tTA+qKOqcwytaYuhVFkVZbUjZljbP4TCg+ot74udZja2PCxzVNU5IkWqPJXyOfjK7rlLlRS2rct2c6oGlY0Z/ROjSfKrP8HXiH/O/Am4miqCgKbTBLXVGfNFUKu2j/kOTk5Lvuuus3v/nNFyMBMMYOE0Az1m/+zOj+e4qi0AXgcDhoP2i6YPg3IfSmNOLbXIiWHZ+p1voW5weCINApevw3F4wxKX5XwMMbWCvapnnfvn1er7etrS01NTUvL880zYSEBNM0ZVl2OByiKBqGQQm7JEl+v7+2tra7uzslJeWjjz5ijOXm5pqmKUkS7ZFPK6YVRXH9/+ydeZQeVZn/b93a3n7ft/fO3iHphISQCARRgig6MpEBUUREhZEZHD0unJnBER1mHJefIsrBg4wz6JxxGGVE5+jxsLiAsoUBRbYAKiExhITsnd673+53qfXe3x9f6/F2ddJJICihn8/Jec+tW8996lal6v3j895+qlCo1+sTExPlcnlwcJAe4Obm5tw0XvF8/OMfv/766++77z5s/uY3v9mxY8c//MM/XHnllZMDGeawCcPwi1/84tVXX3399ddfccUV+d0MwzAMwzAMwzDM0UNnZ6frulhpCxdEOgVaCT2kaKGJSBmTWZpqnIUQGAVzSn4JPdi73+HUCSFLm2hM46nMAMglOhb2QqDpTP4iv3kUaDFtuHhMHieOSUopMYQuFyU0QTxOHKs8kQfQcDTQQ2ZPZAUMtNaO4yAPDkqzUko1Gg3btnt6ek466aTf/va3NJZh8r+KTEJn/w5AW1sbljPjrwDiOKbHg9ZB41eaNE0hnS3Lsm0bHhnBlmWlaUr3NN3BtIlguvtR+OZIUS6X8VcMURSNjo7iaenq6mppaWlvb29pacG07exNiVLKlpaWJEk2bdq0efPmKIoQFsexlLJcLqdp6rounud6vT44OKiUam5uVkrVajVck46ODvw6NHNYtmzZunXrzJ57770X5TgY5sVTr9c///nPv+td7/qXf/mX/D6GYRiGYRiGYRjmKKFcLre1tcGQwoeSe0WDIiFYgGWUmECwGWmCvQgm8Yp4lS00hvClTrTJAkNh5Y4Od4RO0l+UHIjJysvMD+OEIahzC1BvAP0YKLOXCgohlFJxHGMlqJkfkTRJTBgXhE5cZqpdKZUkCfJgPojEnK1swbVlWa7rYo0mTQzJcU2QVmsdx3FbW9uyZctwmgwDphXQB4NWB6dpalmWUiqKItzrSinLslBJWRi/k+Dux72Le11kPzFhF/aigQDcx7SJJ+EQoSMeiFKp1NTUBAGdpml3d3dTUxPOiJ55kU0JPUop1NloNBpCiObmZvoi8H2/VCpZlmXbdn9//9DQkOM4ra2tyFmpVCzLiqLI87ympqbJE8lz0JkfXeAb0+wx/8cZ5sUzPj7+6U9/+rLLLrv88svz+xiGYRiGYRiGYZijgb/+678ulUpaa/hQMqeQJNBHBFQMdsEwUCcaZjDtwhB8qmxZsZUZJxwUx0VadNImjcJAAoewjfWLNAE0KJI2KTmyoRNDAOKxlySvOVxrHUVRvV4PwxCpYJO1ca1oPla2JJR2mYej6ZmdmBXtlVLq7F2FwqjsQVdMKRWGodZ6xYoVh/oKN2ZmMK2A1tm/A4DFyKg1gZtPZ0uVlVL0IwzdjvSplKJvBKTC3a+z8jHIgBh8QgQrpVBx5uAceNomTU1NePwajQbK2aDiO00Gn1b2G5QQAu9atLNi1sKYoWVZeCujEKJQKJRKpdmzZ0spS6XS3Llzh4aGkKFcLqOe0czhmWeeeetb32r2vO1tb3vmmWfMHoZ5kVQqlW984xtXXXVVfgfDMAzDMAzDMAxzNNDW1gZxCS2DBglTSCQSTeZAc5OGQDER2EXB8FcURsP1lMLNZFqnHhcgwBxlGTIah8AmNWggxqIT6glhiIFtMw9Bc7Zt23VdFMSo1Wq1Wi2KIsQjgIJpiGVcVbTRDzOGTsx/KlZ2RmhjuNY6TVMsP9XZlUnT9FWvetVME1/M9ExbCEILsb/nigjDMI7jer1eKBRs20YZCvPnKTxIwvgDBJGtI6ZO3K9CCJVVojFvd9zcQRCgUylVr9eNKRyY7OesfP9kCoUC6ocEQVAul2fPnh0EQa1WU0otXrzYcRzMEI+9ZVlpmo6Pj2MsHl2sa6ZzwTxt254zZ45SKooinEVHR8fOnTvTNK3VauVyuaWlpa+vz5hInoPO/EAsWrSoWCzmew+Zer2+c+fOfO+L5oYbbvi3f/u3FStWPP7441LKNWvWnHHGGR/4wAfycQzzIpg1a9bll1/OVTgYhmEYhmEYhmGORlauXLl48WKR/Sk8VAz0qJV5UsglOBN4JzQQTMAp0SbthcnB4kikohhKizYMFdoqU9XYCxcsDHVDAZgSIml4Tn/p7BWC6MGxKDkymGnRNmMIKaXv+1LKRqMRBEGapqhVQOeLo+QGmpNP09Q8U/TTZi4PHB3ltCwLl4KOiJ4kSXzf//jHP/6Zz3wmOyYz0zmYgJ6W/v7+1tbWMAyHhoaam5sdx6nVak1NTUmSWJbluq5SCo8NSnNYloXiFUIIz/Mcx5FS4k8DcJfTvY47GHctMqRpiiL05H8PwsEmD4rFYqVS8TxvfHzc87w0TTs6OnBQPEsyqzECbx6GYRAE6PQ8r9Fo1Ov1JEns7O2I5oOdpmkURRMTE4iJ4zgIglKpVCqVyuXypHkcOZqamr773e/mew+ZSy65JN91JNi7d++ll1767ne/+7zzzhNCbN68+aKLLqpWq/k4hnmhzJo16+qrr77mmmv+4z/+I7+PYRiGYRiGYRiGednjeZ7v+1EUUUlikRlSpRQ+SY8KQwSLycv4YEinbiKhudcMwy7EQElRJ1QPiVoE5MbCwJJNghoiQaSUwmyRR0rpuq5lWb7vY2UkFizW63UsZMRAHAWY+enQiIR/azQaSZJAXtFxMRM6KcpD5E4KDRoujNWiCIYZowPR/0iaphhl23aSJE1NTbNmzaIkDDOtgBYH0bhKKdd1cauNjIz4vm9ZFn5y8X0fP4wgUhuPLtSzUgqeGm08QvhaEULgi6Zer/u+77puGIblctlxnPHx8ZGREZrAdGghpnyVTMXzvNbW1lqtFsfxggULUJq5paXF8zzMTWTfaEqpOI6HhoZE9ihCXgshqtXqnDlzhBC2bUOv4yFMkqS3t7dQKLS3txcKhXK5XK1WS6USDmrMYj8cdObT88KGm18xR5xarfY///M/+V6GOUJcc801V1111Te+8Y38DoZhGIZhGIZhGOZowPf9crlMghWfMBWQtjJbBUzSg4LRiXbOsaKfcsLJ0igKQBt7qVNKCWeFhsgsNg5hHgiZ0YMkaKOfEqK/qamppaWlVCq1trb6vk9J9u3bt3fv3rGxMZonxkop0zRNkiQMQ9d1cSnM5LZtY8WnlS24Fpn1puRmQvq0jKXZ0F+0Fz30qbN1olg/jmsCD0bnRSu70zRta2tra2sbGxvLjs/MaF6UgO7o6AjDsKWlBfc9Co1rrYMgCMMQMhc99GDgVyzbtmGo8digH6uMtdZYUBwEAe5jz/Nc13VdF4XVc3N4MViW1dnZOW/evH379qGeda1W27dvX1tb26JFi3zfV0rZto1IIUQURf39/bDkKKOxb98+IcTIyEh3d3ehUIBSx/eC4zi+7y9cuFBKGUVRpVJpbm6uVCpz5syxLKtUKuGaHHFwwbXWa9euze+blnXr1mFgfgfDHA3Mnj2b7TPDMAzDMAzDMMzRy+rVq/G376Y2FYY1RqFU8qHkSYXhUtGmBiJzYRSQO5CYIqNF5r5zu9CGsdXGG/8wiuLxaVkWXLDW2rZtvCess7OTTDExb968tra2oaGh7du3NxoNSoUw/G29UgoLQM2BUkqyWDhQLkAY0pl6sIaSoFPYL1prx3GamprSNIV0TpJEZOeLg0KLYQ7FYvG88867+eab84mYGcnBBPS0rFy50rbtRqPheR5uMpW9gTCKojAM8ZwopTzPw5piei+nyH6fUUpJKR3HSZIET28cx67rwj4Xi0U8xmEYCiEajUZuDtODp30a0jSt1WppmkZRNDY2hmr3ra2tdlbXRgiB6WmtR0dHq9WqlNK27TVr1nR1de3du3diYmJoaCiKInq/Jx62NE3xtRLHsZRy1qxZe/bsGR4e1lqHYYgLAuu9Xw4682nAY+84zmmnnZZ7ZyOuPP4LQJqmrutu3rwZu14i7rzzznyXEEKIc889N9/FMAfmoYce+slPfkKbt95663e+8x1jP8MwDMMwDMMwDHO0smTJEq11nFVqxYpAKA4yGBBNYornRY/pUkinkq0iIIK11jjQgQyM1jqnaMkUYSw1cnloFA5NeaSUxWJxwYIFs2fP/n3GKTQ1NS1cuLBUKm3atIlqAEgpcbKWZUVRBHON60NHd13XzJM7L0xbZNcT5HYJ42LStcVebMLjmfEyq86BK4yriv8s13U7OjpktnKcmeFMK6CN23EqLS0tWuvm5mbP84IgoHtXZ794kEcW2bOHeh0iU7q4fSGvcTvCQcvsTYbojOMYD1KSJDmdOi0HnDmhtY7jOEkSVAVpamrCuub29nbXda3sNxyRPeq7du2SUhaLxde//vXz58+XUq5Zs+ahhx6q1+t9fX1tbW3m84yGbdujo6NUnwTnsn37dnr54REHXyVCCMdxgiBAkRD6fkEDP1XRZ6lUgmFHZC7hkeL9739/vothDpMzzjiD2rNmzVq3bt2ll17KDpphGIZhGIZhGOYVQFNTk5z8Oj6R6VEIDZV5ZDRyHgNaluK1sQoYAgpgFEC8Nv5wf2oAfeawsqrQ2EvToOHUgONyXbe5uXnhwoWdnZ3on4aOjo5TTjll69atu3fvpqXQwLKsJEnQwOSFcYLm6YtsVhSGXegBNATB6CSzRKlAmqZpVuUDPZgG/XfQfwEyrFixoqenZ9u2bUjFzGSmF9DTWdxSqYTqM01NTRMTE1jzK6VEhQ2llOd5QohCoYA1tihqkyQJ1DPA1wduYtypUsogCFByHmU3XNdN0xRu+jBKcBx45iZhGLa2tmIlsm3bSZIMDQ3V63Xbtru6uvCqQHqMq9WqbdtvfvObZ8+ejWd48eLFUsr7779/165dxx9/PCKllHggpZS2bbe2to6Ojvb29tbr9TiOhRDd3d29vb1oH3F09hXjuq5SChcW13kq+BKUUpINx/AjTpqmg4OD+V6GeREMDg6uXbv2/vvvv/jiizds2HAYXw4MwzAMwzAMwzDMyw/P88hUUCcpzjRN4zjG8j6ZLbw1JYZSCi4VnWhjLG0SuRjqh6pCQ2QCl46ls6rQGEU6i4YDSFioITS01r7vz58/f/bs2WSZpqepqWnlypXz589/5plnhoeH0zSFdhNCpGkKkeW6bi4bFJDZQ/3UxsxxYa3s+ghDf9HlpbPDLrQdx0EdXdd1y+WybduNRgOvQEQ8CIIAyz2Rk5nhTHsf5J+gSbS2tpZKJa11rVZrNBpwx1prKyvKg7sQ3xGO4ziOk6YpVv4Wi8U0TfGDSRRFeIqEEI7j4PuiXq9bllUsFlFQxvd9rB0+DMc07eSJarVaqVRc1y0UCqVSqVwue55XqVSq1Wp7eztirOybqLu723Gcjo4ObCqllFLd3d2vec1rHnvsMbhy+r7D1cDZdXV1eZ4npdy7d68QIggCpRRO7aVAa41pxHGMLwVtrHfGtAE2hfHlwjBHEQMDA2eeeeaPfvSjVatWvfe9783vZhiGYRiGYRiGYY4eCoWCbdtQSXAUllFoAiIlzSQs5GlOZaATSgS6g5QOIqGtaJMCdLYI2uxBHnSS8kKkqVaEsfiahlAwzgU26dDtM7Btu7Oz89RTT92yZcuuXbuiKNLZKmORFQzAxKSUdFnME9EZZlr0oHoB5kmnaQbgXCib1jpN0ziOwzCEYkIAlo3W63XSXDJT2y0tLeYrFpmZzMEE9IGdpFIKP2Vs2bJFKeU4Dt4raNt2oVDArWZZFjpxmwohfN/HFwrKcWBdM256QgihtS4UClprHIJUdbVazc/jQGghjC+UAzE2NjY2NgY7XKlU2tvbS6XSvHnzfN+HTUYGy7K01qtWrUKkEEIphUaaposWLdq2bRs9kzhT+g5K0zQIAnzipBzHqdfrCDsQB535gcBXgNZaShmGIRXdVkrha6LRaMRxjB4hhGVZxWKxs7MTmy/4uAzzJ2FgYOD000/P9zIMwzAMwzAMwzBHIWmamkvlzE7oI5VZUVIfCBaZ0IAVIblEisYUqWLyqmd0AhwInRhFFgibdDjqpyPCHaGNBh2rWCzOnj37hS0HbmpqOv7441tbW5999tlqtZokCaS8ZVlxHGMNqHksgk4ERhhtmj96cpHowSZdMRO6UEIIKysGQqePTrhBKSWvgGaI6e8DPb2A1lqjYAUWOKPfdV26w3zfL5fLeDtfmqYoOoHbFI8uQCcss85qytRqNdTHwbLocrm8Z8+ew6gBfWgidWxsTAhRKpVaWlqEELZtDw8PT0xMuK47b948vAJRZHP2PA8PuVIKs8JjZtv2kiVL8HThdNBQ2V+IDA0NwQULISzLKpVK+NnKmMgRQ2XffVLKIAggoMMwrNVqKNWdi9dax3EsjbLxuYAjwvnnn5/vYhiGYRiGYRiGYRiGMSD/C2cKv2HKU5gW9IvMh9Jwak+1qDCk6N/vEEqITQy0jAoViMFwWBTLKANNkxTZwkREWpZVKBRmz57d1dVFAYeL53mLFy8uFosbNmyYmJjABHAuSik4aPPsaM6YFe1Cg3oQgzPFVcVYOs1cEp39YT364ziuVqsoA6K1hjFDJJBSlkoldDIznGkFtBbTCOharVatVsfGxvBzB5SxlLKpqQmroaWU+NEDPlpnb/yDRFZKhWGIOxv3tGVZtm37vl+tVj3PQwUPPAbIWavVcnM4KPS0HIhKpTI8PNzZ2dnS0jI2NlYqlZqbm6GPbdtWhmJO0xR/VhAEwcTERJqmnuelaQpFvnjx4jSr+5z7znJdd9GiRWNjY0EQOI6DtAc16Qed+YHAdwSGB0HQaDTq9Xqj0ZgmoTnhacJeDDgEKpyY/fRGV4ZhGIZhGIZhGIZhZjLwQvAq5IvQCS2jlIrj2HVdOytDAWs0VWVYloWFg1prKSVWSVLm/cabDegdtHPxmBLaZIFp2mYdZMQ4jtPa2trd3U0ZpgHTszOTm2P27NmnnHLKs88+Ozg4SKZeGDaZjotZoQ0hgxiYYuxFPHbRWBpF0HWgS4cA6jRBJzxhkiSFQuHP//zPf/nLX5oJmZnJtAJ6Wmq1WqVSQUkKrTUJWWho13UtyxobGxseHkZMqVSyLAsrf3GzYiDadBOjhA2sruu6IqvaEQRBpVLJzeHF02g0oihqaWnp6urauXPn+Ph4X19fd3d3uVyuVCpdXV2k14UQaZratu26ruu6e/bsKZVKHR0dYRhCKwshLMsyKztjAXKlUimXy0qp0dFRKOyJiYkXINMPHTz2UsparTY6OorJT4Oe/EcrLwULFy689tprFyxYgFXwxNq1a81NhmEYhmEYhmEYhmFmJqSGsMKPbIaVLUNW2busECaESNMUcglhuVQ0CgFoQ5ggHiBe7K9UhalKbNvWRqlo5NeGz5VSwq6QY0F8S0sLagNMg9a6Uqns3r17fHy8paVl2bJl+x3S1tZ20kknbdq0qb+/P4oiOgVMm2ZLFwSYp59l+gM4EZyXeSXxiU6A4einy45+tOHEzOF33303kjAznGkFtBbTrIBGReZyuYzVzUqper0OOWtZVhAEbW1t5XJZZwV0UAJCZX8aALmM5cA6+3lHKdVoNFBCGrcylkKnaRpF0fj4eH4S06CFyJ6caUiSZHx8PIoix3Gq1eru3bvnzJkTBAHKUnd1dWH+IpuqEKJarTqOgx+1UOFaGF9MeMDwhdjf399oNFzXHRsbi6Ko0WhgCXC1Wm00GpPmMYWDzvxA4HpiJpVK5UD2Gd+bIvseoe+j/X4ZvXg+8YlP3HLLLbfddlt+B8MwDMMwDMMwDMMwTFa5QmTGGb4CygJKV2sdhiG8E2kNGkVKFBkwijaxixQzPkWmX3AUuBRKQmEUjAYpIBqOueGIWHqoMmzbbmlpoQxTieN4eHi4v7+/VquNjIzU6/Xe3t5KpfKqV72qpaWFToEoFAonn3zyc889t337diotICZPEtdNTBbKJjhTtOkUpoYBy7Isy0qSBH4Z/xf4kQDTk1J6nocVnNi0bRv/QVEU5bIxM5ODCegDg5oYuO3wCfsshIiiCI+04ziomyylxOOHRcF0/0VRRLeszkrJBEHgui4UdqFQwJroer1er9cnzWB6pp28SaVS8X2/o6Njzpw5O3funDdvnhCipaWlXC5PTExUKpXu7m6lVLVarVQqc+fO7evrw3sUwzDctWtXa2trZ2cnHtcgCEZHR6GtlVLt7e22bY+Pj2utG41GkiSlUqmrq2t0dBS1p18KVPYdbRm/FuaQUvb09OQ66Rsn139EWLFixZVXXpnvZRiGYRiGYRiGYRiGyZBSQiiJKfIU6wKTJKEqHHAvGIUwCqYGOSszjMAhRBZP0lZnK53N+KmKlnosy7Im1w8hB21ZFgqA7Jc0Tfv7+7du3VqpVHB2aZpKKfv7++v1+vz5848//ngsfDSxLGvp0qW2bW/dupWG4OzoRHKnTM4HU5JGTQL045MyYCyOJYRI0xRVXnF2uD7IQ8GO48RxjEPbtu26LqQfApgZzgGfASGE0FoYP/tMJQgC27Ydx8EDlqap1hoLosMwnJiYQBu3I25Nnb20FLcgblwMT5KkXq/7vp8kCRYOW5bVaDQEfzQ6AAAgAElEQVSwYrparR7ezyZaCOOrZBr27dvX29u7YMGC2bNnDwwMxHE8d+5cIcSePXsQgOewWq3u3btXCNHW1jYwMKCUam1tbWpqCsNw7969c+bMUUqNj4/XarVSqTQ2NtbW1iaE8DxvwYIFY2NjYRjW63XP89ra2nbv3l2tVv8wg/1xKDM/EHjarcmV8k201pZl3XjjjeYXAb6wjKgjjOd5h/c/yDAMwzAMwzAMwzDMjAF/jA6XCitCbgQeQ2uttY6iCMsfSUNhCAXQLoyFG5FSYgUkOtGgTWrr7AWD1A+QGViWlZunmRa+WCmlJte12C+NRqOvr69arWJxcZqmmHmapo1GY2hoqL+/f/Xq1R0dHeZUhRC2bS9dulQIsWPHDtTiQD8ugjBUPsybMK6DztTcH9IJIYTAzCkVJi+ESNMUUktlVQ1IOpmnhsuitZZS4hcClL2lAGYmM62AFr/XuAcCt6zjOOQxlVL4fcO2bZSqwD0nsmcgDEP8HoIfrDDQ930hBKpSqMxN4/4WQuAnlGq1elBpO4lpZ24yODg4Pj6+atWq1tZWz/M2bNhAs0IxEJF9cbS1tY2OjpbL5a6urv7+/jRNd+3aVS6XZ82ahTDP88Iw3L1798TEhOu6ExMTQ0NDxWIxTdMdO3a4rlsul+v1+sTEBE7tpQBTxRfigb7jsDdJEnzBEY7jTP0COiI88cQTl1xyyfe+973ciedKQjMMwzAMwzAMwzAMMzMJw5AkptlPkpfcLq20FZlRFVktDrhRAptmNgAxQm1kgHLFQJoDGhSPvSJbUol+TAaRruuaeaSUOfdigj+RD4JAZ28yNLMppfbt26eUWr169axZs/KDhejp6ZFSbt++HXYFx8UnTc+cG32igROh2dKV1Nlf1WMgBB3OQmdrnxFJwchAWsmyrCRJNm/eTIs7mRnOCxTQrutCX+LGStNUTn6LHW61Wq3m+z5uTdydaZom2StBkyRBZ5IkWuuxsbFisZgkSaPRwChKFcfx4dXfOBySJOnv7x8ZGVm+fPnGjRthupcuXYra07Zt4yvMcRys+G40GngjYhiGSinHcUZHRxuNBgqG+L6vlGpubh4ZGWltbW1ubu7v7xdZoY/Zs2dXq9WxsTF6UI84+F/AN6z5P5ID3w5KqQceeKC/v3/OnDl/9md/hp5c5BHhq1/96mc/+9mf/exnnueZ/aeddpq5yTAMwzAMwzAMwzDMzAQvsoJBghSCPCF/SiojjuM0TSGjpJSocYGBcKBwqXCjAAMpDMkBZaZNOjo2zQD4FuqXUlKJDCgvBFuW5XkeXFm1WtWG7zYZHR3F8metNVIhOamzMAx7e3uVUmeccUaxWMwNl1L29PQ0Gg3EWJaFcgI4HOaPs0YbiGyGmBJ1Qn/p7F1oBJY/J0mCqy2ltCa/IjL3ads2VprinYp/SMTMYA4moA9AHMdxHBeLRahhx3E8z8M9jadUKRWGIW5Kuqfp+cF3BL5WYHjr9XqapmEY4g4Ow1BrjefWcZwoioIgmDSDI8rWrVsXL168YsWK7u7uzZs3b968uVAodHV1dXd3CyHwYxReqGjb9tDQkO/7lmXt2rVr/vz5vb29HR0d5XIZj2JHR4cQoq+vb2xsrFKpdHR0FIvFzZs3u67b2tra0dGxd+/e4eHh/AyOKPpgJTiEEFb2k9ry5cu3bNlyxhln0Bf6S8Ho6OgVV1yR72UYhmEYhmEYhmEYhhFCCPHzn//8Va96FUpqwAgB8p7CWHWnsvfg2bbteR5GiWxBbs60oh89CECDDqEnF31GMA0BSIUwVKJAJ2KwRhN4nod10JhhpVKJ4xhONodlWVEURVEE9QyTDnWmtVZKoXhAf3//b37zm5NPPrmpqSmfQojly5drrXt7e6Mo0lojiciuFZ1F7tRo/ohEDBo0EEqdZB3OHbsQg7YQwnTohULB87x6vf5S6y/mKGJaAa3FgVZAi2wRNLyzUioIAty1KLshhLAsKwgCPC2WZdHfR+AVhVJKaFwhRJqm1WrVsqwkSfBTlVLKcRw8D3Eco3CH67qHUbFBCzH522QahoeHJyYmkiQ57rjjtmzZIqXcvn373Llza7UaaolEUVQoFAYGBhYsWDB37txGoyGlXL58+dDQUBiGruv6vp+maZqmOPfOzs5yuVytVvv7+yuVShAESZLMnz9fCBHHcaVSyc9gCoc486ngOqO9YsWKyTsngbC5c+eecMIJKHuttT6MK8wwDMMwDMMwDMMwDHOEwGLHpqYmx3HSNCXFKTLjLISwLIuWCQshdFbOWGXFSFPjBVc0BG6UbAm8MBlYGoi91EmbBJJYRrUKcxSRpikmCf0VhuHw8DCkUA7f97WxTFNm6zjhxERmhNM0HRwc7Ovr6+npyacQwnGcRYsWjY+P9/b2ko7H2QG6VriqmJht27Zt4wKKyVcYCjtJkjiOoyjCEKRCAxdQGxoa11wp5XkeqhrUarW77roLexlmegE9nYHGrzdQxng8hBB4VOI4tizL8zzLsur1upW9CRR3rZQSo4QQ8LaNRiOOY9u2kQG3Pm50Kp2jtbZt+zD06P6+KQ6E1nrbtm0LFixYsmRJT0/Phg0bkiTZtGnT8uXLXdcdGRnp7u72fX/lypXFYnFwcBCjfN/v7Oys1Wp79+5tbm4eHx+3bbujowOn09fXlyRJpVLZt29foVBIkmTRokVSykqlgmrXLxG4yEqpr33ta/l9k4njGN8Ur33ta9FQSr1E5eEvuuiifJcQQogf/OAH+S6GYRiGYRiGYRiGYWYeQRBMTExgpSPELnQTWU6yTyIzs1rrOI7JTWEX+k1PhWAaDtFk5kFDZIdABvQjAwUjRmsNwQVEJp0pbZqtYkbkvn37Zs2ahTWLJnPnzu3o6ECFZZTskFLijOhYtJ66v7+/u7t7ahIhRLlcXrx4caVSwevTIIhFdjo4Ba21bdu4UJizymQ0IrXWlmU5jgP/lmTvhERCnAvSQtxhiDAuERLiOkxMTLyk+os5upheQE/jn4UQAq8ZxJeC+ajjBk3TFDbT8zz8xiKE0FpjBTQipZRKqXq9Tj2IlNlPMbibsQAZb/ND2MFNtBbicNYRb9u2bcWKFaVSaeHChf39/UNDQ9u2bXNdt1wuK6VGRkawRnhoaCgIgvb29r6+vuHh4e7u7ra2tm3bto2MjBQKhcHBQUx7YGCgpaVlaGhoaGhIKVWpVFauXLl06dKBgYH+/v5DmdWhxOwXqPxcqeX9gqnmeg5+YV8QbW1t5mZXV9eb3vSm9vZ2FtAMwzAMwzAMwzAMwwghtm3btm/fvq6uLjhQKCBTj0BuwiChAeNJewF5Uit7CSGSUKQw/DWC6SjoQQBlwC4yV7mDogGFhTApZZIkWFgJzZKmaV9fX3d3tzkHIUS5XF6yZAneFhaGoW3bVC2AvLDjOHButVoNf5pvZiBmz57d1dUVRVEYhlEUWZYF7YMFoMKYIYGpWsaCbsuyzDPC0lJtLJ22bVtnbzWkYFw0umKO4yiloMIZBrxwAY2CGKSV8XMT3DF+CYHcxG1KNyLufgTgfg3DEAlVBjbxC0+apvjZx/O8jo6OJEnw+8nBy3EceOb7JY5jGOdVq1bVarWNGzeOjo5u3ry5q6urp6enUCgIIYIgqNfrePFokiRRFOH1fb7vFwqFIAjCMOzv7+/q6mpra+vr6xsZGcF3TU9Pz+rVq8fHx7dv3759+/b8sY8oURSdc845+d5Dhq7/keU///M/hRAdHR1nnnnm2rVrC4XCd77znfvuuy8fxzAMwzAMwzAMwzDMjKRer4+OjlqW5bou3g1mWZZpiiBD0YAzRQ8aJIiFoa0pXkyuMmFG5jbRMLOhXykF5Y29mJsZnOvExJAwDMO9e/d2dXXlijhLKZcuXRpF0TPPPDMxMYEqAq7rep4XxzHmhkMrpaZXYbZtNzU10SXCZHBZcIKYm5QS7l5kCgiGGm2M1VpLKcnLQc3R0lK6hnSmyI+c5oSxl2HEQQS0mE7j4qZXSqVpisce1ZDpRsz9VIXbMY5juhctywrDEHc/Hiq63XH3IwM6pZSu6zY3NxcKBXp4wjCcmJiY5vGjox8KGzdubG5uLhaLJ554om3bDz/8cBzHw8PDeM2o4zjVahXPXn9/vxDC87zZs2fbtr1o0aKJiYmJiYlyuTwwMNDc3DwwMDAxMdHX15emaXt7+4oVK1zX3bx581NPPXWIT+Bhzdzk5fmQX3jhhWvXrm1qalq3bt1VV13V29ubj2AYhmEYhmEYhmEYZmYzNjYWBAFWAQJoTVJMWKQIr0pLeuGmoJswCpYJDeohYSUzj2xl9SKgpEinYCw1TLVFmhU9ZpuGIx7JMbckSQYHB7ds2XLiiSeaY8Hy5csLhcLGjRvHxsbGx8cdx/E8j84ODcdxyuVy7u/L94uUkiwzBBrALsuysE7UypaKY9qmhiLtpowyI7RC1Lx0OXmltdbZcmnzP5FhDiagD0YYhnRLSSlxI+LWpBhUjbGMwjHoR7Bt22maOo6jtZZZeWjcwbhlk+xVh7Ztt7a2In+hUHAcB6/UHBoamsZBHzpKqUceeWRsbGzVqlWLFi1KkuTxxx8PgmDHjh14l2B7e3tHR8fg4GB/f39zczPeJSilnDNnjhACZfKLxWK9Xh8YGBgbG0POlStXdnR0bNu2bf369Udknkcjf//3f//YY4/dcMMNGzduzO9jGIZhGIZhGIZhGIYR4lvf+tbpp58OwUIaF75IZGpVCEEa2rZtWs9LMhRh8KQyM7DQSnQgCNbcKPqkvRhobsJuwU0ppXS2MhoBBI5Oba11HMe7du0ql8tLly41AoUQwrbtnp6enp6eRx55ZPv27WEY4tRQ7hmnUC6X586dO72ApmXjZJ9FZt5xCkhFzp3Ona4VziV3Jc1UGEiLpjEQqbBXa40fA5qbm9/1rnfdeuutSMXMcKYV0FqIA6/DparnSZL4vk/3KO5pfNJ3BNr0gxWC6QbF14pSKkkS3O6w0nTrJ0mitXZd17btUqkkpQyCYHx8PAxD1Lj4/ZxMtBbiD8/SIaK13rRpUxRFq1evXrFihW3b27dv37FjR6PR2Lx5c6lUmjt3bqFQgHO3bXvHjh1tbW04L8/zdu/ePTo6mqZpmqao3bNmzZqlS5fu2LFj/fr1VGzkUDjcmb/Mectb3vK6173uoosuWrZs2aOPPrpu3boNGzbkgxiGYRiGYRiGYRiGmdkMDw93dXVBNEGwkCGBOyKblKapmwHLRJp1UsYpOtjYI4QQOnNZpsKivWY208OS4xKZmKYwTBI9cMFwtVrrHTt22La9ePFiCjY55ZRT5s6du3Pnzmq1GkVRFEWO4/i+39zcPG/evPnz5+cHGPT39w8PDzcaDcuo1EwTtixLSolNzASdllGlgM4LpwZPLbOXtDmOk2Yve4PBS7OiHACRdDjf90855ZRarXbXXXdRDDNjOZiAPjCkffHHEVZWSkYIobKflcw7GHuTJMGoNHsjp+M4SZJglTQeSNSSFtlPN7Q4Wmvt+369Xg/DcGxsbHR0NAgCZNsP005+erZu3To2NnbSSSctX7582bJlzz777HPPPdff3z8+Pj42NoYJl8tlBAdBMDY2lqZppVLBSWmtwzBcsGDBqlWrjjnmmC1btjz22GMzvPh6FEUPPvjggw8+6LruaaedduGFF372s599/PHHr7vuunwowzAMwzAMwzAMwzAzlVtuueWKK64olUqkieBJyS8JIaSU5KYdx8GCRWGsXDbFKI1CBoqhvXBZ5GoRY4aRlVJKIRs+rckVPCheGH/9LzI9jbGNRmPr1q31en3p0qW+7xsjhBDC87yenp758+cHQbB3797R0VEpZWtr6+zZs9va2rDueL9EUTQwMFCr1WzbxtWQRg1oCsMkcWp0gnQuCMBpWoblx7njslBmMfmthnT14jgOggBzaGtre/3rX9/U1HT77bcjjJmxHPDeFUJM/xJCAq/mKxQKtm3DwOKLQCmFhpRSKYUebKaTC8cIY92+EAKSOk3TOI5xyzqO4ziOlDKKokM1uYcw82kYGhp66KGHKpXKMcccs3DhwoULF46MjDz//PPj4+O1Wq1erwdBoJRyXVcpNTQ0hCetWCy2tbW1tLTMmTNn8eLF1Wr1d7/73RNPPIFiHTOZX/ziF7keKeWFF17IApphGIZhGIZhGIZhGAJv1WpubobrFEJANEGGQoNiU0z2xZCnSIIeDLGzUsX4JGeKhjmEAmg4dgnDaNMuMycmRpvIjE/StTTnIAh27drVaDSOPfbYlpYW7DXxfd/3/dbW1lz/NNRqNagnLAZHJy6gbdvmlSHMs8BJweOJzCbTWUDrCSG01rZte56HgVB8lJCuANaYWpZVKpWEEMcee2y5XD5Um8e8QplWQItD1bjj4+P4uwAppeM4KNmMOw/3KPwyEEJEUQS/nMszlQNW2DgohzbzaQjD8IknnnjuueeWLl26cOHCrq6uU089NYoivJZ0dHQ0iqJ6va6UKmZ0dXV1dHRIKUdGRnbv3v38889v27btsCpvvFJ54xvfmO9iGIZhGIZhGIZhGIaZTH9//9NPPz137lz4UywlxieJJvKecRybCpWSmPKU7CqUlOlMsYvkbC6JEAKOlTq11mRjCRxFZXWWIWdxFJobiWBsxnG8Z8+e0dHRY489tru7e5qlzYdCFEV79+6t1Wrm4eDoxORLR2CGZrwwVjTjFMwrYy70tiwrTVNUL8AZmRdWa429wPO8OI6XL1/+1FNP/f7YzIxk2ltcCzHlF5IDMbUahuu6+3XHB+o/0mhx+DWgp1KpVJ566qmNGzfOmjVrwYIF7e3tnueVy+VyudzU1BQEAb5EhBBRFNVqtWq1OjAw0N/fv2/fvqnX5NB58TNnGIZhGIZhGIZhGIY5umg0Grt378af2sOHknEmAUr+FLoTCoV6MAQNBFM/+WjziGRaaQh9SilxaGEsgqYeCqMZ0i7kxJ/4k3eWRoUApdTExMTGjRvHxsZOOOEETIzSHiJa6ziOt2/fPjIykiSJ4zi2bbuui1S506GpUifmQ8G5i4AeuvLaqECCWgVIhRjkoVPACXqe57puoVCABGdmMtMK6BfHgSzzgfpfzoRhuGfPnj179kgpUWSjUCigPIjrulrrIAjwXsTR0dGj8QRfak477bR8lxBCiEcffTTfxTAMwzAMwzAMwzDMDObpp58+++yzu7u7XddFBVeROWXypCJbbIuKwyIT0ORAqZ0zsIAEKwVTP8WIya45550xJDcQn8isjHcAYjj1WFkVkSRJdu/eXa1We3p6FixYkKU/VJIk2bdv3/DwcBAEtm3TEmayzHSONHmsoSQQA0FMk0enlBIJkcE8ZcuycDiIZthnyiCESJJEaw0hPjQ09Nvf/paOyMxMDiagf/8cHYVoIYwvgiOFUmpkZGRkZCS/40hzxGf+p+W6666jv7Z49atfjfZrXvOa008/fVIcwzAMwzAMwzAMwzAzm+eff37fvn1z5871PC+KIrKogNpoJEmCV4iJKfrYNKpiSqUIbFIkaVb0mEekvWaPyGLMg5rDqY0GBLRSyrZtaiul0jQdHBwcGxvbt2/fcccd19zcTNmmJwiC3t7egYGBRqOBHjoi7LY4wAzNE6eAqSeutU6zN71RAMZqrXEB1eTq0ljcLaWUUjqO4/u+1por0zLiIAL6qFaghzB5/CkE80dgZGTk8ssvR/snP/kJ2nfeeeekIIZhGIZhGIZhGIZhGCG+8pWvfPOb3+zo6LBtG4WeTYsqMmcKSRqGIXSnuUgZ2hSqFJIUn6Z+FYZ7RbC5izbNo9NwGmUOhH7NDSGwalhkx4LAha5VSg0MDIRhuHTp0vb2dt/3c2Nz1Ov13t7e4eHhKIqgm62sxAe1RWa9RaaMLcvCYmfqpNPHKdCmyFYxoxMXEwPNURRsWRadspTSdV28DrFerw8PDyOGmckcTEAfgsZ9maKFML4sjjqO3pkzDMMwDMMwDMMwDMO8GJRS/f397e3truumaRrHMRSnymoNkzZBG14V1hUxUK5mQjSga6kfIN7chXgyrRRm2zaZVuy1MvNLYYSZEEPI/ArjzX70EsJGo/Hss8+2t7fPmzdv1qxZ6CQwKgzD8fHxkZGRSqUSRRESolYG2jIrnYG2yqALIqV0HMfKZLSYLN/pfHExhRBKKVx/KSVdYcqGflwTXEOyz2majo2N3XrrrcjDzGSmF9D69wL6gu/l9rzc0VroPxQ4v/vuu419DMMwDMMwDMMwDMMwzMuXMAzvuuuu7u7ucrlMi3xhP2W2tFkpRUWKwzCUUjY1NTmOgzAyqiJT0lCr1DBBMElnOhZtUlhuODwsQA/kLAVAASNSGCuFsWmeEaWtVqs7d+7s6+uDyUWp5UajgeuQJAm8M8BASutmbyCE3daZVSfZjZrapIx/P0sjnj7Rg0NAu+OIaZqmaUq1uQFdAdu2MWchRBRFlUqlr6+PwpgZy/QCWhytK6CzaZvPPMMwDMMwDMMwDMMwDHNU8PTTT+/cuXPlypW+7yulUEYVi4VhQikySRJUK4b3FIZlRgP+FJvkf/GJTojgNE1Jy5KKJRNNw6lhzgGdNBYBSGi6bPPT7JdSQgpDRqOwNVQv9kZRRIVkcSDIdzqiMNYsC2MZOBoYhfx0FJmV7KAhNJywbRtDMBkk1FrTOQrDrdu2TYVQbNuO45gLQDNgWgHNMEeIG264gdoXXHABGn/3d39HnQzDMAzDMAzDMAzDMMTg4OC2bdt6enqam5thY2E/IUDTrHwE0Wg0oEQ9z5NS0pJe6FHysNCpwpDRJFJJ5kLFwvCaWjY3hIQvkluTa3FQEmzC+YrJayXRQzoY4BwBSWdcAbSxypjyIAmB4+I06TpQJ10W5Kdzx14hBMl6XDoEINKyLKx9hvHH/wgd1LIs27bRjxMJw5DfvsaAgwnoPzwURxtH78xfidx7773Upm+f7du3UyfDMAzDMAzDMAzDMIzJjTfe6DjO2Wef7ThOFEUwsCRGxeTaGlrrRqMBx2rbNpTrVExnajpW7EI2qFgxeVEzhtAuM95xHKTCQdEJ1WtlyTEWu5BTZM4XGTBnlLZQSqVpiiFo4KTouGhQ2+yhfhwax6IDYZ6IocnQfNCgUWij386qndi2bWplSoizwxVwXVcpFUURhTEznGkFtBYiiYTj5ftf/uhXgoAOw3DqNwjDMAzDMAzDMAzDMMwM4YEHHjjttNM6OzuFEGEYxnFsWuCcNlFKNRoNpVRTU5PrunjVHrSpOYQ0MX1CuZo9YOo6a9jhqZ3mIcj2oh9t6qRPNNBGJDLTKLQplW3b0lhhTZoYk6TMlBMxSGVeNITBF2MVsxlvjqKBlNNxnDiOKRhgr8yWlkOUYx00vV+RmeFMex+s/4ZY8hZRaMn3v/zRWmgVx8u3bduW33X0YFnWo48+mu9lGIZhGIZhGIZhGIaZGfzud7975JFH/uIv/sL3fZR3EEJYWV0IxECDQrCmaRoEAWKwaBfB6EEwFiybY0mw4pP6kRbBNASgn2JooDByYoYQxxSAeeJTT7bhueHkiMn5ohNHVErRFcAuPUUfIw96lLFaHLvILCMem6SqEU8GGZsIs21ba23bNiwzzQpjMTFkRiqGmVZAj2wWI5vznUcPH3g438MwDMMwDMMwDMMwDMMcRfzkJz9Zvnx5T0+P7/taa6qGTJ4UYWaj0WgIIbTWvu8LQ85CoZK6hSTFKAoDNASRpGvNJAijsQjDJhqm4UUGCjbHAshctEnj0lEAxmK4OVYYghjHQhLsonikorFKKbNWCfZCJZuzBUiOSiAw+ObcKIllWRQTRdHUeTIzE/4hgmEYhmEYhmEYhmEYhnmZ0tvbe8011zz//PNSSt/3XdeFG4UDRQPmVErpOA4C4jhuNBr1eh1VO3KeFMG0Nhl21cyDBkQqcqKghzDUMwJs20Y/jZVSUvUP7AIQu5i2Usqs74GcWOKtlMKL/hCGY1EYToEOZE0WyhiOUfhEWjMGnZibMips0AREdm3NVEop7EUY3kOoMustjNPHVcUorEZnGBbQDMMwDMMwDMMwDMMwzMuXwcHBb37zm729vVLKYrFYKBQcxyEJC0dMLhi2NEmSMAwbjUatVguCALaUhiBYZEuVSZ7SEdEDLQtIxWIvxKu5C/3USXlI0ebIEv9e75oNDCHPi8mIKdkogzYWGuPsKJ4OgQZOHBcKYK8y5HWSJGZCmhulQmbKj4ZlWTguPpVSURRR8RBmhsMCmmEYhmEYhmEYhmEYhnlZs2XLlssvv3zXrl1a62KxWCwWPc/L6VStNVlUGNUoimq12sTERLVapXcYisniGGPFlNXBOC7yUGZ0wrpSHjMSnyJ7Fx/2YpKZ/f4DWuskSaIoItccx3EURUEQBEEQhmEYhpQfs1JKJUmCE6GjY4bImZuVZbh19ONCATo7xFM/beIoYnJBDzQQQDPXWltZ/Q3btjHPJEkomJnJTFsDmmEYhmEYhmEYhmEYhmFeBgRBcP311//N3/zN8ccfXyqVHMeJM0gNQ4+aOtWyLASEYYhKGp7nkQLGKPKk+x0ujALThDKqT+SAEMdYOfn9hMLQ3Nqwz1JKHALymiZAPpfGYjjKjCCM5kayGKoanThHiGZIYfSYZ4pzAXpyrWo0tLECGv0mGI6zxlgo6SRJPM874YQTVq1atWTJkh07dtx2221clGNmMp2A9n3/85///GmnnTY8PPzlL3/5qaeeEkJ84xvfWLNmjRCiWq0+88wz11xzzd69e/MjhTj//PP/8i//cu7cudu3b//617++fv36fMThc8sttzz88MPXX3+9EOKUU06xbfvxxx/PBzEMwzAMwzAMwzAMwzCvRLZt2/bVr351xYoVl112WUdHB2px2LaNNxPSQmAEQ5jCvSZJYllWFEW2bbsZqOMBiyqEgEtVSlEGdFIApYXznWp7zbz4ME0AACAASURBVFRokNs17S2OkiQJljlrre3szX4Ug4Smv8ZxEYwZ2rats/XaGIuJQVgjBgmRE8eFKUaPmUcYcpwuC2XW2RrwOI5pyTNGSSkpHgJdCJGmqWVZzc3Nl1xyieu6Qgjf91taWlhAz0ymK8Fx6aWXrl69+mMf+9jo6Og//uM/Uv/WrVs//OEPX3PNNcuWLfvUpz5ljPg9b33rWz/zmc/85je/ufrqqyuVyte//vUlS5bkgw6fz33ucz/84Q/Rfve7333JJZdM3s8wDMMwDMMwDMMwDMO8khkdHX3kkUduvvnm9evXNxoNz/MKhYLnea7rmhoXshWO1dSjcRwHQdBoNKrV6sTERL1eD4LAlKoiM7NSSuQ0M5B1Bei0DEdMDbhgapujtNaYRhiGSZJQTsxfSgmDjAxIrrNiF9ow7Cpb14wYmjY2yRFTHuzCsRBJmBnodCg/ksA+I615LEqCUQDzLBaLbW1tLS0tvu8XCoX3v//9Zgwzc5hOQG/ZsuWGG254+umnd+7cSb9gCCGq1epTTz119913f/vb3z7ttNO6urqMQUII8YEPfODhhx/+8pe/fM8991xxxRWjo6OXXnqpEGLJkiVPPPHE5z73ufvuu++mm2465phjEH/BBRfcddddDz300A033FAqlYQQUspPfvKT69atu/POO88++2yEXXXVVe95z3uEEGedddab3vSmU0899a/+6q8KhcJDDz300Y9+VAjxvve974EHHnCc6ZZ1MwzDMAzDMAzDMAzDMEc1991339VXX33bbbft2rUriiLf95uampqamqCMCXKjWPYrpbQsK03TMAzr9XqtVqtWq/is1+uNRgPFOqBc4WrhXuFh0S+EsLJix0hI9pbawAymbGEY1mq1Wq1Wr9dzJZIxT5hciDi0kYfAfFBhWSlFc7BtG+vBEYaZ40SEIbKRjc4LQjl3uegsEADI0dNpmjnp+qRpisvoeV5LS0u5XC4UCijbXSwWTznlFGRmZhTTCegHHnjgZz/72Re+8IW3ve1t//7v/57fLcRzzz0nhFiwYIHZ6fv+McccQ8UxkiT59a9/vWzZMgqYN2/elVde2dnZefnllwshHMf56Ec/+qMf/ehjH/vYaaed9sY3vlEIcc4557z73e++7rrr7rnnns997nPFYpGGCyHuueeeBx988PHHH//ud78bBMFDDz30+te/XgixZs2aBx98MPf0MgzDMAzDMAzDMAzDMK8w0jT93ve+96EPfejJJ5/s6+tLkqRYLLa0tLS0tBSLRSyLxoJixMOQQpKSKo2iqNFo1Ov1er2ONdHj4+Pj4+NQ0mSloXqhbkUmXuFh0YY7Js2NvTCzWmt6ryCsN5Zdp8ZbDUW2YNmyLDsriEGpSO/m2pDF2KTTRAwy4BNCGdmszL9T4WytNfwyjohPdMI4R1FEC58RSbvoKHEcR1EEJ45gIQRWPeN/ATVPPM/r6emheTIzh4MvFv7lL3953HHHXXbZZQcquIy7jcA9bXYqpejnFyHEbbfd9tRTT91///3nnnuuECJJkosvvvid73znhz70IcuympqahBCrV6/euXPnz3/+83Xr1t199924cQ/Efffdd80118yZM+fVr371pz/96fxuhmEYhmEYhmEYhmEY5hXK1Vdffcoppxx//PFveMMb2tvbm5qafN8XQsCTRlFE7tj0tqaiRUMplSSJziwwYmhZMbls7MUnNvGJ5JRKCAEVq4wF1LC3CMNAiieZBrGGSdI0EADdrLMazejHKNO8ISc1kuzFg0IInKPIzoJikJ/alMG8LJg8joizwFRp2nR9kAoNnLgQAoKe6xbMTKb7Xz/99NOllPfdd5/W+tprr+3s7BweHjYDli5dKoTo7e01O4Mg2LVr16mnnvq///u/QgjHcU4++eQnn3ySAnBf0hPiOM4Pf/jDZ5999q677lq9ejVicKciGN8U2ej98NBDD4Vh+JGPfEQp9cgjj+R3MwzDMAzDMAzDMAzDMK9cnnzyyV//+td33HGHbdtXXnnl7Nmzm5ubi8Ui3jSINbxJkkCSOo6D1bukfUmS4hOeF5mtbEkyGljMCzsMwWq6XZK2aZrGcay1Rts8hBACEhadSGUei9o4BAYilchEc5qmODryQ6OREMcm8sMUYzhWKOOgOAodCG3zItAuJKT5o03xOivxQadjJsRYy7K01nEcm7uYGcV0Avrkk0++4IILvvCFL5x77rnVarVSqaC/XC6fdNJJs2bN+uAHP/jYY48NDQ1NHiduuummz3/+81deeeUTTzxx3nnndXV1fec736G9559/fq1WO+uss2Cl58yZ09ra+n//93++7zuOg7t2w4YN73jHO84666zly5dffPHFa9eujeOYMgghlFItLS2e5+GvGH75y1++7W1vW7duXRRFZhjDMAzDMAzDMAzDMAzzikcpNTY2JoT4p3/6pzPPPHPevHmrVq065phjyuVyU1NToVBI0xSLf2Fy0zS1bVtlDhqd0KMkVUnIogcZaAkzZCtWQ1OwlBJh2ISWpSQYguEAATg6FDk8MtYLIwDDaapoUADNH/mhyzGE8lNbSolIaGv0UAaEYZOmh880TdNsHTeFATMSDayYFkKoTEnr7L2FGMvMNKYT0DfffPOxxx77pS99aXBw8DOf+QyeUiHEscce+61vfatarT7zzDPXXHPN5EFCCHHHHXf4vn/xxRe//e1v37Fjx+WXX75161baOzQ09KUvfen555//13/9VyHE3r17f/SjH1122WUPPvjgxMTESSeddOutt95xxx0rV6781Kc+VavV/t//+3+NRuMP2YUQQtx///3//M//fNFFF918881CiHvvvfctb3nLunXrcmEMwzAMwzAMwzAMwzDMjOL+++8XQsybN6+7u/s973nPvHnz2traisVikiSoVgxzSvpVGUuMSarCmeJTZRUqsJyZYqCbLcuiRc0karGJzIjE3NCgzJRKZAIa/aj4gbFpmtIKbqQl9SyldByHUmECGKWUooEYYmVnipg4jm3bxmsbkQ3T0FrTzBEM6YxNIQQcNw4HSKxrw3eTzScHHcfxL37xCxyFmVFM+tXlpWbJkiU//OEP3//+9z/zzDP5fS8Cy7LOP//8T3ziE2vXrg2CIL+b+SNy3XXXffKTn8z3MgzDMAzDMAzDMAzzp+PDH/7wf/3Xf+V7ZxJXXXXVkiVLWltbHceBkyVTDGcKtQrgUuFM4VJJzqJNu6SUsKtwuAimIWgAsrHYhTBsYhd9yux9hlDGmCSqZ2jDWdOb/SgbHYuSK6XSbDk2RqEfYNpUVISS0CcayJBmpTwswzsDIYRt25i2nnwxHcfxPM+yrCiKKpXK3r17v/jFL2ZzZGYQ062APuIoper1eu6RePGcd955n/jEJ2666Sa2zwzDMAzDMAzDMAzDMEyOr33ta8uXL7/gggsWLlzY2trqeZ7WGrWhSUMLo9oGKVToXcJc6oseNKjiM3VC+EJMowd7abjOXuUHO4y9qVHcGQJaZxKc8qChlEqShFZzwyDTLvNEzE0cneYACx9FEc0T/dSm5c+YKs0T80dOXBN84kA0HJH4jOP4V7/6lbmLmTn8UQX0jh073vjGN+Z7XzQ//vGPf/zjH+d7GYZhGIZhGIZhGIZhGEaIkZGRRx999NFHHz3nnHPOPvvs+fPnt7S0oCgHnCk0q8pUb5oVsoCiFca6YCurv5yzwxCyMitzDDmLHhqIHoC0iEQDYdSmAGFU8CC01kmSYBq0vJpSIQ/5YhqijRmiR2SWeb+HQACNwiZOhBpoCyFoobQwJoy9SqkgCDZu3IhOZqbxRxXQDMMwDMMwDMMwDMMwDPOn4uc///m999779re//bWvfe2CBQuam5s9z7Nt23VdU7DCvUJJx3Ecx7HIJLLMSjOTn4WcNbUsBeATPdC+WCmMXcB0uGgjODcZxJDYRSfqclAP4vGJghjYZWJ6bUpOBaaFMUMgJivy3CaNgrLXWWUSIYTjOI7jWJaVpmmj0Xj00Uf7+/tpIDOjYAHNMAzDMAzDMAzDMAzDzBSSJLn99tsffPBB3/fPOeecpUuXlkoleheflNJxnEKh0NTU5Pt+oVBQSkVRlCQJ6nWYK5dp6TElNxcXU781uUSGuYtWUovM/MJ6m8FisqRGJ1QvpcJerbXMMINFZq5VVkxjKmmaymyZM4LNkxVT5gC01pgzdtHRpZSO4+CSxnE8Pj5erVZzY5mZw59MQJ977rnvfe97p964U/nBD35w55135nsZhmEYhmEYhmEYhmEY5gUxMjIihPj2t7+d3yGEEOLCCy9sbm5uaWk58cQTOzo6IKPTNA3DMIqiaWpWmJsIgPuCvYXeJV2LIRhF/ftNkouhvZSW2iKT4FDDtJfykN22shLVNAfswiTRDyWNPLmJAXRSAIbgnYSWZUkp0zSt1WrDw8N33313fjAzY/iTCeiLLrqop6enUCjkd0wmCIKLLrqIBTTDMAzDMAzDMAzDMAzzx+GWW25B48QTT3zzm9+8cuXKWbNmlUqlYrHoum4cx0mGmGJ+AQysyBRtmqYQsiAngsn/UirEkAgGZgB2YSx1IgmsMXlnMzligDkHYUQKo4Q0+hGMA1E8gX6MRbCVnakQIoqiSqVy++2350YxM4oXLqA/9KEPrVy5MgzD/I6DgVtWCHFQ+yyymC9/+cvms8QcIr7vb9q06cYbb8zvYBiGYRiGYRiGYRiGYQ7G008//fTTT3d2dn7wgx9ctmzZrFmzisWibduoyAHVS+WPIa8IGF7yYBCyInPH5GrpkwJoYXJOcNMQa/KqZ7TJOEsp0Y8DIZuppM0hiMRxRWalsRflQdBvGVVEkJb6cSCdFe6QUqKmtpQyiqLx8fHBwcHf/e53NISZgbxwAb1y5cqTTjqpubk5v+Ng4FZ+7rnn8jsOzJlnnonHI7+DmZaJiYl8F8MwDMMwDMMwDMMwDHM4DA8Pf+UrX3nd6163YsWKNWvWdHV1FYvFpqYmz/OiKIqiKI5jaFkIWahY0/miB6KWIvXkNcUUg1EUKTKJbFkWZDfaCCPMQ2AOKtPf2MQREUOjsJcGIg+9wJAiEUNH3K+jsywL6hnVn5MkqdfrfX193/ve9/KhzAzjhQvoMAxfgH02wc84zEtHc3PzC1iizjAMwzAMwzAMwzAMw+R45JFHnnzyydtvv/0jH/nI4sWL58yZAwcthJBSwkEDvLuPTDSAj4ahJhesjFrS0Ls5rQxBjIHCML+khoWxqBnHhXdGWhqFhumvKRWgfvpEHtqkk0I8NTBhJLRtG+umkyRpNBrDw8PXX3/9+Pg4IpkZywsX0C+e3BPFMAzDMAzDMAzDMAzDMC9bsN752muvfe1rX3vyySevWbOms7PT933XdW3bjqIoSRLIYkhhK3PHsM8iE7U6K+hM9hk91BaZVobRtibLYsuyHMfBXhqCbEopDEQYhiul4IVpWTRGUa0PbQhrBCCYIhFMkzePi4ZlWbgIOHQcx2NjYxs2bGD7zIgXKaBzd//hgl9ODhHc4vlehmEYhmEYhmEYhmEYhvnjsn79+qeeeqq3t/ess86aO3duqVTyPA/e1vRd8M6QWiR/TRONfnRiuLmLNnOdruuiyDJehIhD2LaNwyEPjmIaZIhjBGMTbdMmY5QQwjIKiZg9Kqt5ba6wtm0bZTegxeM4Hh8f//Wvf/2DH/wAY5kZzgsX0I7jTExMvJgqHIcloJkXwMTEhO/7+V6GYRiGYRiGYRiGYRjmxZGm6R133OH7/hve8Ib58+eXy+VCoYCXEyql4IVFVpqZ1iYLQyiLyc7XjJwahvLKruvCNSdJUq1W6/U69LHjOL7vQ4JrrdM0peFkonEsna1uxkHpiAigI1KDJpCmKQlrma3vBlj7DPscBMH4+Hhvb+9///d/0yGYGc4LF9DPPvusbdsvoMQwbvdjjjmGHrCDsm7dOtzZ+R3MtPi+v2nTpnwvwzAMwzAMwzAMwzAMcyS49dZbd+3adfbZZ69ataqlpcX3faxETtMUDhoezLIsO3uzH1YQoy2y6hmwXvt1X1JKx3E8z/N933GcOI4nJib6+/tvu+22OI5PP/309vb2lpYWx3GEEJ7nwXRTuWeQ8844olIKc0MAdsHXSSkxSfLXSimU7KAkdobjOIgPw3B8fLyvr+/uu+9GDMOIFyOgb7zxxnzX4fDd73730FdAf/rTn853MQzDMAzDMAzDMAzDMEeIr371q8uWLUM7TdOBgYFbb731nnvumRzF7If169evX7/+M5/5TE9PT2dnZ6FQgKhN0zSOYywcJo1LNmzqukxSxlJKKaWd1Vz2fb9QKLiuq7WemJjYs2fPr371K6pu8fDDD59wwgkXX3yx53nkgjGQamWQYs65OChmxGBTTF7yTDGYD8IQIKV0XRernuHcgyCYmJjYu3fvunXrHn74YfNAzCuJ5cuXX3vtte985zvzOw7MCxfQLxLLshqNBl4VyjAMwzAMwzAMwzAMw/xp+fa3v71u3TohhOd5p59++t/+7d9u2bJlx44d+Thmf1x99dUrV6487rjj1q5d29XV1dzcjMIUQgiV1U1OM8QUHayzAtBkn6WUjuNQ2Y1qtdrX13fHHXfce++9QRDQQCHEhg0bPM97xzveEQRBW1tbqVTCoaGhscYZKlwIYRmvQCTXDMWcW/uMYMRDPWMvOk3ZnSRJEARjY2Pr16+/6aab/jAzhhFC/AkF9J133nnGGWfgbp4e13XzXQzDMAzDMAzDMAzDMMwRBStY0f7pT396/vnnL1++nAX0obNp06ZNmzY9/PDDrutedtlls2bN6ujoKJfLeEEfmd8kSUj4KqWmyjHoXXhnIUQQBMPDw7fddttjjz3W19eXCwZPPvnk7t273/CGN6xevXru3Lnt7e2u65IdhnrGHGDDxZRSGzQNWppNehrGGZ10Llj7LISIogj2+dlnn+W3DjL75U8moL///e9///vfz/cyDMMwDMMwDMMwDMMwLw9GR0fzXczB6O/vF1k52fe9733t7e3FYvH4449vbm72fR9rk6GGhRBw0GLyG/+EEFrrOI4bjUZvb+/mzZs3b96MxenTMDAwcNttt/30pz99z3vec+qpp86aNcv3fcuyoJihnsl3k26eqr9FZsAty0rTFPYZS7mxNBvqGQmjKPr/7N1rrCR5fd//X92ruruq+nouM7Mzszt71cJiTDZWWNDuGkWWYynC+gcZ848UKdgjbWIrEtgPDCZ5EhNhVsESxigEOWQNAcESW3FMLCyUOHGiOGaJ/yyE62Zn5z6nu09fqqu67vV/8M2ezDTL+sByZm/v1wPU51fV1dXdw7J8znc+vyiKZrPZ17/+9U984hObFwKUUi9iAA0AAAAAAICXDsdx2u22UsqyrAceeCDP8//1v/7X5kn4QXzqU5+SB3fddZdt2/1+/y1veYtt29KeLA0bBzXKsougpLpZli0Wi89+9rPXrl27dOnSDRd9XkVRfOpTn2q32/fdd58UUkt7hgw+SwAtMfdGEq2eLdxQz9aAKKWk3FluTzJo+c+6rsuyTJJktVqNx+NPf/rTTMrjeTxfAP3v/t2/O3bs2MGPWZY98MADf/Nv/s1/9s/+2UMPPbRara4794fR6/W++MUvvvvd7/7P//k/bx77Ho8//vh/+2//7Z//839+/eKHPvShqqp+5Vd+5fpFAAAAAAAA/KDe+c53vvOd7zz48QMf+EAcx9cdxw/vW9/6ljz40z/90xuP/F+PPPKIUurixYt/+Id/uHnsB/Sxj33sPe95j+d5tm0ftDabpnlQtXHwQF3XxaGenX2WdU3TdF2Xalxd1+XpSilpfF6tVovF4tq1ax/5yEfyPJenAM/p+QLo97znPbZt/+Iv/uItt9zyj//xP5Y/jj9C8qdffrUCAAAAAACAF9Hv/M7v/PEf/7FSyrbtn/qpn3rXu9711a9+9aAVGkftox/96ObSC/D444+/853vNE3T8zyZXzYMQ9d1aaBWSjXPbkUok9fq2Q0G5XHTNAeFG3KmbJ+YZVmSJFEU/fmf//n//t//+4knnjh4ReD7+T9bWD6nr33ta1/5ylf29/fTNP3KV77yl3/5lweH/uE//Idf+tKXHn30Udd1lVI7Oztf/vKX/8E/+Aef/exn5YRf+IVf+OIXv/j444+/9a1vlZW77rrrt37rt/70T//0scceu/322w8u9fDDD//xH//xJz7xiTNnzsjK7bff/q/+1b/6L//lv3ziE5+48847D8488Hf+zt/5kz/5k09+8pOj0WjzGAAAAAAAAF6APM+/8IUvVFV1/PjxzWN4mfjud7/7F3/xF+PxOEmSsiwPxpkdx3Fd176O4zjXt4JI/4bUVeu63jSN5M5RFO3v7+/t7V26dOl//I//8fnPf570GYf0fAH082ia5uMf//hDDz30pje96WDxoYceevzxx5VSf/2v//WzZ8++733v+/jHP/6e97zn3nvvVUo98sgjvV7v7//9v7+/v//zP//zB8/a3d394Ac/ePr06be97W2y8v73v78sy7Nnz6Zp+v73v//gTHHixIlf/dVf/YM/+INPfvKTd99998ZRAAAAAAAAvEBVVcVxLJXQeDmq6/rxxx//7//9v0+n0ziO0zSVHQVN0zQMw7Ksg8RZxpzlx4NZablCmqbL5XI6nV69evXKlStf/OIXv/SlL/3mb/7mZz/72R95UwJeXtrXcRxn8/CNnq+C43n8y3/5L+fz+bvf/e4gCA4Wf/d3f1f+psZ9992nlPqN3/gNpZSu66997Wu//vWvX7hw4U1vetP73ve+L33pS5JTi8997nNf+tKX3v72t/u+r5Tqdru33Xbb+973vm984xt/8Ad/8E//6T/t9/v7+/sH57/hDW/Qdf2xxx6LougXf/EXD9YBAAAAAADwo3Lp0qU3velNTLm+rP3+7/9+Xdf3339/t9vtdDqu6xqGcVCKK90aB80bUgxdVVVZlmVZStvGer1OkuSxxx6r6/r8+fObL4BXJcMwPv3pTx/8+F//63/9wAc+cN3xTT9kAF0UxUFV+YHpdCoP4jieTqc//dM/ff3R3/qt3/rKV77yUz/1U4888sgb3/hGKVZXShVFoZSqqkp+vL4S+voS9AOWZcl/H5RSaZpefwgAAAAAAAA/hHe/+90bK7/+67++sYKXnaqq/u2//beGYfT7/RMnTvT7fcdxTNPUNE2SaDlNkui6rmWDwTzP0zR95plnkiT5vd/7vRsviVe7b3/723/7b//tzdXn9UMG0M/vf/7P//nud7/7wQcftCzrn/yTf/KOd7zjwoULH/7wh8+dO/eRj3ykaZq77rpr8znPms1mTz/99Fvf+tannnrqZ3/2Z5955pnJZHL9Cd/85jd1Xf+7f/fvXrly5c4777xy5cr1RwEAAAAAAAAc+NznPqeUOnPmjO/7r3vd606ePCnlG+azGwxqmpbneVEUf/Znfzafz6uqKorim9/85uaFgB/KkQTQ3/zmN3/7t3/713/916fT6Qc+8IELFy4opT7zmc/8o3/0j372Z3/26tWrH/zgBzefc533vOc9733ve3/3d3/3qaee+rVf+7WNo1/72tf+zb/5N29729uuXbv2ne98Z+MoAAAAAAAAgA1PPfWUUuov//IvNw8AwMvIo48+urkEAAAAAABeVGfPnt1cAoCb5YZ6ZQAAAAAAAAAAflQIoAEAAAAAAAAAR4IAGgAAAAAAAABwJAigAQAAAAAAAABHwtxceC4/8zM/83M/93Oapm0e+B6f+cxn/uiP/mhzFUeP7wgAAAAAAADAS82hAui3v/3tt956q+u6mwdulKbp29/+dsLNFwXfEQAAAAAAAICXmkMF0EqpvzLZVM+e8/73v1/TtMOM4uJHommapmnUD/IdAQAAAAAAAMBNcNgA+vDeO7td6ToB9E3TNI2q68duyTYPAAAAAAAAAMCL6rABdFmWm0vfR7Oula4UAfTNIgG0+kG+IwAAAAAAAAC4CQ4bQNd1vbn0/aS10gigb6KmUU2tfqDvCAAAAAAAAACO3mED6KqqNpe+n3WtNNUoAuib5v8E0D/AdwQAAAAAAAAAR+8oAuhKaYr8+eZplGoqpfQf4DsCAAAAAAAAgKN32AD68PUOv/GGizqbEN5ETdPUda3UycN/RwAAAAAAAABwExw2gD78dO173/vezSUcvd/7vd87/HcEAAAAAAAAADfBoQJoTdPW67Vt25sH8JLBdwQAAAAAAADgpeZQAfQf/dEfvfnNb26aZvPA97Asa3MJNwXfEQAAAAAAAADglezRRx/dXAIAAAAAAC+qs2fPbi4BwM2iby4AAAAAAAAAAPCjQAANAAAAAAAAADgSBNAAAAAAAAAAgCNBAA0AAAAAAAAAOBIE0AAAAAAAAACAI0EADQAAAAAAAAA4EgTQAAAAAAAAAIAjQQANAAAAAAAAADgSBNAAAAAAAAAAgCNBAA0AAAAAAAAAOBIE0AAAAAAAAACAI0EADQAAAAAAAAA4EgTQAAAAAAAAAIAjQQANAAAAAAAAADgSBNAAAAAAAAAAgCNBAA0AAAAAAAAAOBIE0PiROX78+HQ63VwFAAAAAAAA8GplPvroo5trwA/l8uXLv//7v7+5CgAAAAAAAAAAAAAAAAB45Tl79uzmEgDcLFRwAAAAAAAAAACOBAE0AAAAAAAAAOBIEEADAAAAAAAAAI4EATQAAAAAAAAA4EgQQAMAAAAAAAAAjgQBNAAAAAAAAADgSBBAAwAAAAAAAACOBAE0AAAAAAAAAOBIEEADAAAAAAAAAI4EATQAAAAAAAAA4EgQQAMAAAAAAAAAjgQBNAAAAAAAAADgSBBAAwAAAAAAAACOBAE0AAAAAAAAAOBIEEADAAAAAAAAAI4EATQAAAAAAAAA4EgQQAMAAAAAAAAAjgQBNAAAAAAAAADgSJhnzpyRRvA4uAAAIABJREFUR3meT6fTJEluPAEAAAAAAAAAgB+G+dRTT8kj27Z3dnbOnz9/4wkAAAAAAAAAAPww/m8FR57nlmVddwgAAAAAAAAAgB8eHdAAAAAAAAAAgCNBAA0AAAAAAAAAOBK6Uuqee+7ZXAYAAAAAAAAA4IUx3/CGN2yuAQAAAAAAAADwglHBAQAAAAAAAAA4EgTQAAAAAAAAAIAjQQANAAAAAAAAADgSBNAAAAAAAAAAgCOhP/HEE0mSbC4DAAAAAAAAAPDC6Eqpb3zjG5vLAAAAAAAAAAC8MFRwAAAAAAAAAACOBAE0AAAAAAAAAOBIEEADAAAAAAAAAI4EATQAAAAAAAAA4EgQQAMAAAAAAAAAjgQBNAAAAAAAAADgSBBAAwAAAAAAAACOBAE0AAAAAAAAAOBIEEADAAAAAAAAAI4EATQAAAAAAAAA4EgQQAMAAAAAAAAAjgQBNAAAAAAAAADgSBBAAwAAAAAAAACOBAE0AAAAAAAAAOBImJsLz8XzvM0lpZRS6/V6cwkAAAAAAAAAAKXUIQPokydPbqy4rmua5hNPPLGxDgAAAAAAAOAV5sMf/vCpU6fk8Wq1+upXv/ov/sW/mM1mN54FPIdDBdDf+ta35IHrur1er9frxXHMnzAAAAAAAADgVeLjH//4f/yP/1HTtK2trXe9611nz579wAc+sHkS8D0OFUAf5M55ns9ms29961tVVW2eBAAAAAAAAOAVar1eR1GklFoul1/4whfe8Y53bJ4BPJdDBdD33nvvfD7/9re/XZbl5jEAAAAAAAAAryZN0+i6vrkKPJdDBdDf/OY3+/3+nXfemSTJbDZbLpdN02yeBAAAAAAAAOAVynGcdrutlNra2vrpn/7pr33ta5tnAM/lUAF0HMdxHCul2u12r9c7fvz4er2ezWbz+XzzVAAAAAAAAACvOGfPnj179qxSKs/zr3/96x/96Ec3zwCey6EC6DvuuOP6H4uiCIKg3+8/8cQT168DAAAAAAAAeEX68Ic//Cd/8iebq8Bf5VAB9OXLlzeXAAAAAAAAAAB4XocKoKV/AwAAAAAAAACAwztUAK1p2uaSUkoptiIEAAAAgJc7wzCqqtpcBQAA+FE4VAD94z/+4wdZs6Zp8ljTNDqgAQAAAODlot1u9/v90WhkWZZpmqZpNk2TZVmaplVVrdfrvb29KIo2nwYAgFK//Mu/vLkEHM6hAug8z5988kl5/NrXvlYe33fffTecBAAAAAD4Yfm+3+l00jR1HMf3/VarZdt2HMdFUSRJImNAs9lsvV7btp3n+ebz/yp33XXXYDBwXdcwDMuyNE2T6aIwDPM8L4qiaZqdnZ3VajWfz69evZpl2eYlAAAAfnCHCqABAAAAAEfhtttu29nZ8TzPtu2qqlzXrapK13UpQtQ0TcoxDMOIokh+HI/Hy+UyTVOJp4ui2LzojW655Zbd3d0gCOSyjuNI+lzXtWVZVVV5ntfpdLIscxzH87zt7e3jx49PJpNLly5JNr15RQAAgEMjgAYAAACAmyoIgtOnTwdB0O12i6Jot9tFUTiO02q1kiRxXTfLMsMwXNddLpee55mmGUXRYDCo67osy3a73TRNnudlWV69enWxWMzn8/V6LVfe2tqyLKvdbmuaZpqmPF3ibNd1dV1XSjVN4ziOPNA0Tdf1uq49zyuKQtO0PM+DIPB9f3d3dzqdnjt3brVa3fgOAAAADosAGgAAAABuhna7vbu7OxqNwjDUdb3T6VRVJUHzcDg0TTPPc9/3LctK07Tb7aZpqmlap9PJ87zVai0WC9M0O53Oer2WvDjLsttvvz2O4zzPpdNZ0zTbtnVdb5pGsmaJnpVSMmGtlJIJ6Lqum6axLKssyzzP5VnyFF3XDcOQayqlXNe9ePHidDplFBoAAPwQDhVAz+fzg8dPP/20PLh8+fLBIgAAAADgepZljUajbrc7GAyqqmq1WrquW5allJIWZsuygiDodDplWbqu6zhOnueGYdi2bZqm1HE4jlNV1WAwGI/Hp0+fLstyuVxKLrxaraSvw3VdSZNljFrXdSnTMAyjLEtJmauqKstSpp7X67XrulIDnSSJZVmtVquu6zzPLcuSmg6ZnpY7abVavu8nSfLMM89cvXp1420CAAA8v0MF0BcuXDh4fPB3ryaTycEiAAAAAGBnZ2d3d7fX63U6Hdu2y7I8aNVIkkTTNMdxHMcZDAa9Xk8pFcexNG+sVqskSba2tnRdr6rKcRw53zRNORQEgVJquVz2ej0JmsMwnE6nhmG0Wq35fO44ju/7Ek/3er2yLOU0SZlt27ZtWymladr29rbcj2EYvV5PJp2TJJHHy+XSMAzDMCQEt21brqCUOnHiRLvdnkwmi8XixvcNAADwfR0qgFZKbW9vK6WuXbsm/45SFAX/zgEAAAAASinf9++7777d3V1Jk6U9Q1oy4jiu69owjO3tbdkG0HVd13U9z4uiqNfrmaYZRZFMGTdN0+l0JpNJr9dzHKcoiqqq+v1+HMdN05im6fu+hMJ1XQdBIPsHZlmmlOp0Omma6rru+/56vW6aRp5YFIX8PzgZc7Zt2zCMLMtkIlvTtKZp6rru9/tZlsnr1nU9n89lFlsGq13X3dnZ8X1fBqvruo6iaPNTAAAAeC6HCqDlXzWkc+O2226TDSs8z+OvXwEAAAB4NXv9619/6tSpMAwHg4FsD3jhwoU0TU3TlLR3OBy22+1jx45lWWaa5nA4nM1mTdP0er2qqiT51TRtsVjIjn8yy7xer8fj8WAw0HU9CALDMNI0tW1btiWUjmZN03RdlzFnz/M0TQuCQKaVO52OYRhJkvi+b5pmHMftdtuyrPV6LbGyzFxLJYjneU3TSKAs+bhlWTI6feXKFU3TpDxa8vTd3V0pFbl8+fJkMonjePMTAQAAuNGhAujBYPCNb3yjrmvHcVzXffLJJ03TvOeeewigAQAAALwK3XHHHadOnRqNRjK2XJaljAzXdX3rrbdK4/POzs5yuVwsFmfOnFFKmaZpGEYURcPhMM9zqTS0bfvChQtbW1utVmu9Xu/s7GRZVhSFYRgy7CxPkVw4y7IgCJqmCYJgsVg0TTMajVarlaZpW1tbcRxLxYcUcdi27fu+7Fsop+m63uv1LMuSrLnX6xmGsVwuNU0zDEPCazlkmqYMaB87diyOY5nmlhi6KAqJ0W3bNk3z0qVLaZre+NkAAADc4FABtPyLlFLK933ZkLAsy82TAAAAAOCVbmdn5/bbb/d9v91uh2Eog8yLxeLOO+9USmVZ5nme7/uS21qWtbu7a5rm/v6+bD84GAwmk0nTNNvb21euXOl2u77vX716dWtrq91uX7x48WDqOc/zuq6ll7mu69lsJnmxaZqLxSIIgjRN2+12XddSDO15ntQkyrNWq1VZlgcPfN+3bTuKIk3TfN9frVZ1XUunRxRFVVXJ9oNhGCqlFotFp9Npt9uaptV17ft+kiSr1UrKqYuikAaPY8eOGYZx8eJFaYgGAAB4TocKoOu6tiyrKIp+v7+3t6eU8n1fisYAAAAA4NXg3nvv3dnZkWnlkydPNk2T53lRFLLfYJZleZ63Wq3FYpHnuVJK2pZ9359Op7Ztj0ajyWQymUykQGO9Xvf7fdllZ3d3N01T3/c7nU6n06mqKk3TTqejlIrj2Pf9K1eubG1tlWVZVVXTNLPZbDab6bpelqVlWYPBQPo6fN8visKyrMViIUPTi8ViOBwqpdI01TQtz3Nd103TbLVaQRAsl8uqqqSvQ2Jo0zR1Xe92u03TSD20ZVlJkrRarXa7PZvNZENF13XzPDcMQ1qqr127drBZPQAAwIZDBdBXr16944475Bf48/m82+2eOnXqO9/5zuZ5AAAAAPDKcubMmbvvvjsIgq2tLSm4KMvS87z1eh2GYZIk0k2h67pSStO04XAYx7HrulEUWZY1Ho+lhfny5cvD4bAsSymDrus6TdN+vy+vIhPT29vbeZ4HQZAkSZIkdV3rur5er2VXw263K3l0p9O5du2aNDjLWHQQBDIKbZqmdHREUSSXkqQ4y7K6rrvd7mq1CsMwiqLZbNbtdtfrtexe6Pu+Umq5XBqGITG0BNyGYbTbbdM05/N5p9PRdT1JkqqqpO5je3u7LMu9vT3btiV2B4BXp7/39/7eL/3SL2matnngRk3T/PZv//a//tf/evMA8Ip2w38xzpw589RTT12/cqDdbruuO5/PpYZMKVVV1eZJAAAAAPBKce+99953333tdrvf729tbdV1LQXNURTdcsstaZquVqvd3V1N086dO3fq1CnLsqQoQ3otpFvZNE3btheLheM4URTZtm1ZlqZplmU1TdPtdvM8T9NUIl3piVZK9Xq9OI47nc56vU7TtGma/f19pdRoNJLh6KtXr/b7/bqui6JQSs3nc8uyTNOU1FteVB4bhiFN0IZhyHC03JVhGBKUSLW053lJkuR53jSNDFAbhqHretM0RVHUde04TlVVcRzv7+/LcHRd13EcV1W1XC4vXrz4zDPP3Pj5AXgJOXv27Mc+9rHNVfzo/MVf/MW9997red7mgRut1+uvf/3r999//+YB4BXtUBPQSqk4jg82OCZ6BgAAAPCK1Ol0brvttjvvvPP48eMyp9w0TdM0ly5dOn78uKZpZVmePn1a0zTZM1CC5rvuums2m+3t7e3u7i4Wi9Vqtb29LZFxt9vd29vb3t6WJNcwDMl/JQhO03QwGBiGMRgMsiyTnkPTNM+dOzcajebzuZQfBkHgOM54PJ7P57quz+dz13VXq1W325V+57Is67qu61r2J1RKSdfzcrm0LCsMQ8uylstlXde9Xk9eXUo2pKkjy7IkSXzfdxxnsVi0Wi0JoCWJlpy6aRrJtbe3t5fLZZ7nlmXZtr1areI49jxPaqmZgwZesv7fe+992yOP5Ftbmwfwwsj/TGia9lemz0opz/M0TfvMZz6jaZr80xV4ZbP39j730Y8eKoC2LGtzSSmllPyyHQAAAABe1obD4YkTJ3Z2dvr9vuu63W5X1/Wqqvr9vhRu5HmuaZokvE3TRFG0u7ub5/lyuRwOh1EU6bp+6623jsfjra2tqqrOnz+/s7OjlFqv10EQ7O3t6bo+HA6luCPLsoNxaXnutWvXut1uVVVhGJZlKbXOuq4vl8vBYDCdTn3fD4JgvV4rpXRdN01zOp0mSSLnhGEozdGapgVB0O125/P5dDrd3d1VSs1mM6WU7/sHuxT6vn8QQ+u6LimzrutS9JEkyXq9LorC931N02QCSeIVx3Emk0lRFE3TVFUlkXqv19N13XGcp556igAaeMl62yOPPPjoo91z5zYP4IVplKqU+v++/OXNA9/f//P2txsbpQTAK9T89Gn1K79yqAD6nnvuOXhsWdZB7vzVr371YB0AAAAAXnZ833/961+/vb3dbrcty+p0OlmWHT9+XFJm2VcwyzJd1+u6dl3XcZz5fL61tSWJraTMdV33+/3Lly9LTXOWZceOHSuKIgzDvb29siyl3LmqKinBCMOwKIqTJ09evny5rmtd1y3LktLnqqparVYURWEY7u/vm6Ypk9TS3TwcDmezmexG2O/3ZStCmaTWNE3+0qpt29/61re2trZ6vd5qtZKB6263O5lMTpw4Udf13t5e0zQSQxuGMZ/PNU0zDGO1WsnNeJ6XZVmapmmaHrSClGUp2w+apinT3FJmned5URSu67bb7Z2dne9+97s3fsAAXiryrS3S5yNVluXmEvCq1z13Lt/aOlQAfX3Q/NrXvvbJJ5+87iAAAAAAvPzceuut99xzT7fblTRZ2jZkkleqkJumcV230+kkSRJF0Wg00nV9NpudOnVKepAHg8G1a9f6/X5RFIvF4sSJE2mahmG4Wq1arVae588888xoNGqaRoaUpWS5qqpr166NRqPFYrG7u5skSZqmdV1LUhzHsWEYMgotk9fT6VQOSZe0ZNxSdhEEwXw+t21bYmiZjDZNU5pADrJj0zSfeeaZwWCwv7/f7XZlmPrq1asSQ4dhuFgspIRaKbVardrtdhiG0kNdVZW0hSillsulbdvdbreu68lkUlVVXdeO48ht27ataZr0X2981ADwakBjLfD9HCqABgAAAIBXjAceeGB3d3cwGFiWVdd1q9VSStm2LdlBGIZpmrquW5albdvj8di27dOnT08mk+3t7cFgcPHixZ2dnaZpkiQJgkCGkdfrdavVkoFiabEIgmBnZ2e9XjdNYxjG/v5+r9cbjUZScLFarZRSk8mk2+1KbJ2maVVVuq7HcVyWpYwVF0XR7Xan06mMQjdNs1gser1eWZZhGEq/h2may+VSMmtpbZZbksJoqRkdDAZySLpElFLdbne9Xtd1ned5u92W5Ho6neq6btt2nuee50lrhxRxKKWapinLUtf1JEnkozvo4rBtW+7KNM3z588fbCAEAK8eBNDA90MADQAAAOBVodVqve51rzt58qQMC+d5btu27/u9Xq8oCgmd2+32arWybdtxnKIoBoPBcDicTCbL5dJ13TiOTdMcjUZ5nh8ExxcvXkzTdDAYrNdr3/frup5Op91uV6aVlVLtdjtN06IoiqIYj8dhGCqlwjCM41h6MzRNk2HkLMvkZgzDiON4MpnItLKmaWVZSuGGpmmz2SwMQ9M05V1IS7Vt21EU+b4v+w02TRMEgTR+aJpmWZbUdyyXS03TVquVFDfLzoFZlhVFIRG2DDvLBoNxHHe73dlsVhSFrutKKblJx3F6vd56vY6iaL1eW5ZVlmWapged0d/+9rdv/OwBvPjMJJmfPk0Lx9GhggP4XvPTp+3x+FABtPy6+3t/bJrm+nUAAAAAeGl66KGHer1ep9MJgiAIAkmQu91unufr9dp13V6vlyTJYrFwHMd13eVyubu7O5/PTdM8fvz4eDyuqsr3/TRNy7Ks63p/f38wGMzn893d3TRNpcF5vV5LQ4Vt20VRXLt2rdfrzefzTqcj+azMPgdBsL+/L5sKykyx1Fb0ej0ZxK7rWinVarVardZ8Pvd9P0mSXq+nlJpOp5JBT6fTfr/fNE0YhrJLoW3be3t7RVEc5NF1XQdBsFwulVJN0/T7fWkRMQxDXnFvb6/X6w2HwyzL1ut1VVUyhS0hted5MgG9Wq2yLKvrWtM0Kfqo67rT6SilgiCQZNy2bSnLXi6XJ0+e3N/flylvAC8Rn//gB/Vf/dV8NNo8gBdGfoN4xw8yAf34pz8t/6TdPAC84tjj8ed+53du+LN+5syZp5566voV8eM//uObS0oppb7yla9sLgEAAADAS4bv+294wxtOnDjRbrc9z5NZXcMwOp2O7DeYJInruq1WK0mSOI6PHTsmY86DwWBvb29ra6uqqrIspUajLMtut3vx4sXhcCglGHmeyxx0FEVBEByMEmdZFgSB7FiYpmmv1/M8T8afsyyrqqooik6n4zjObDaTzELTtLqukyQJwzAMw9VqJetlWcqZ+/v7ZVlqmiaFHpqmyeP5fD4cDquq6vV64/F4NBrVdT0ej3d2dqIoknMMw1gsFpZlWZZlmuZsNpOGaLmIzD4nSeL7fpZlWZbJCzVNUxRFu912HMcwjOVyee3atYN1z/OUUhK7yyi03G1VVZcuXXr66afH4/GN3waAF83Zs2c/9rGPba7iR+fLX/7yqVOnNlefyzPPPPPX/tpf21wFXtEONQFN0AwAAADgZecnf/Int7e3ZUL52LFjmqZNp9MzZ84URTGZTKbTaavVGgwGstVev9+XGV5JaWWWWXozdF0vy1LC2fl83mq1pMJif3/f9/2qqiSS3tvbC4JAwmil1Gw2831fKiykAFqiW9/3ZTZ5Pp8HQSAn5Hmu67qu667rOo4zHo+73W6SJFVVaZq2WCzCMKzrWoo7yrLs9XoyyKxp2mg0Wi6XhmFMp1Pbts+fP7+1tWUYxmQyGQ6HV69eHY1GZVnecssts9msrmuJzk3TlPluTdNM04zjuGkaub2yLCVHVkqZpum67nq9dhynruuTJ0/GcZxlmUz85XkuZdBZliml0jS1bVsC97quLcu6fPnyxpcCAK9UcRw7jrO5eiP55+fmKvBKd6gAGgAAAABeLtrt9k/8xE+MRqPd3d1OpxNFUbfbvXr1ahiGMuAsLRO+73ueJ8XNsjGgpmmWZWma1u12p9OpjBXLjLBt26vVKgzDqqqkOkNiVmnSkIxYxpyXy6WUPsudSIGGrusSN+/v73c6nXa7HUWRDCNLx8VisciyTNM0qcjwfd8wjKIoZF5bei3CMLRtW6JhmcWez+e2bR9k0BKdu66bpqnneaZpXrlyReadDcNIkkQqR4IgGI/HQRB4nleW5XQ6NQxDkugoiiTUHg6HcRxL38hkMqnrWtf1IAiUUpZlXT8Vnue54zj9fn+1WmmaJrsvGoaRZVm32y3LUt7a5pcEAK8sn/rUp37mZ37mrwyXNU379//+32+uAq90BNAAAAAAXjkefvjhra2t0Wjkuq7ruhIou667WCxs216v13meu64rg89XrlzZ3d1VSt1yyy37+/tyBV3Xz58/v729LW0YSinZQlDTNGlMlsFnqW+WnQANw5AIWLYf3Nvbk7Lp5XIpIXIQBGVZyuaBrVbrYHR6NpsFQTCdTjudznA4vHDhgoTjuq7LPHW73b5w4YKEwrIZYBiG0vtcFIW0QsvAtUxeG4bR6/UWi4VcZDQaSfOGRMxVVc3n88ViMRgMpDOkLMuTJ09mWTaZTHRd1zRtuVzWdd00jaZpg8HAcRyJ7KuqyvM8SZJWqxXHcafTSZIkz3PDMCzLKorC933LsuTjsixrNBrZti0RPwE0gFe8D33oQx/60Ic2VwEopZQyrv+h3+/PZrPrVwAAAADgpa/dbj/wwAP333//yZMnT5w4kWVZXdfSViF9zdIssbu7K9sDzmaz7e3tXq8XRZGu63meV1UVhuF0Og3D0PM8aWper9dxHEtthe/7dV1LE4XsKyjzyKZpdjqd5XIpo9DS3dFutyWl1XV9tVoZhuH7fr/fL4pC1h3HKcuyLMs4joMgkM0GZVdDeV3f913Xnc/nvV7PsixJmTVNk7e2XC5d1x2Px9JkPRgMDMNYr9dN01iWlabpzs6OxL6u6yZJIim5TDFLxXNVVVEUdTqd2Wzmum5d18PhcLlcNk2j6/p6vZYR6SRJPM/TNK2ua6WUzGV3u13HcfI8lznoPM/b7bZhGGVZttvtuq6lncNxHHndOI6Lotj4ygDcTG94wxueeOKJzVUAuCkIoAEAAAC8vN1///0/8RM/cfz4cdM0TdNUSqVp2m63JW+VamPXdcMwXC6Xs9lsMBiEYZhlmYzxDodD13WjKErT1HGcLMska+71ehLpKqUk3vV9f7VatVqtuq6lCbrT6Uj1hGTQnudJLfJsNnMcxzRNz/OqqpIk1zAMz/OKopBEWE4uy1IOSaLtOI5t2xIua5rWarUk5rZtW3YU1DRNxpnH47FhGFmWGYZx9erVOI51XZ/P557npWkaRZHv++PxWAaTt7e35/N5XddyTbkB0zQlF5ablDaPwWCwXC7lM5Rs2rIsx3HW63X1bCFJlmVSxBGGYRAEpmnKhyydHpLXa5pmmqbE1hLx53l+w9cG4CYigAbwIiKABgAAAPBydeLEiYceeujYsWMyddtuty3LKstSKjikfFkSYdu2q6oajUZFUUiQulwuoyiSlFliVsmm0zRN07RpmizL9vb2fN+XygullOd5B9PBi8VCqpwlxVZKtVotx3Ekbi6KwjRNwzCm06kk0YvFQnot4jj2fT+OYxmLlsYM+XE2m1mW1W63syyTnazSNDUMw3VdWXEcJ0kSGVKW2WfDMOQtH7zHuq7lzOl0appmkiS+7587d066O3q9XtM0juNIOcZBoFxVleM4dV3btm2a5nA4lBv2fT9JEsuyJH+XT08plWVZq9WS8epWq1WWZVEUBzcpWxE2TWOapm3bdV2Px2OZyAbwoiCABvAiIoAGAAAA8LJ055133nvvvbqut1qtbrfreZ5MHEvoKXv9lWXZ6XTyPK/rutvtXrlypd/vG4ZRVZXv+3meS5q8t7eXZZkM8A4GgyRJyrIMw7DdbktEKzGujD9XVSWNEzIK7ThOEASS0k6nU8/z5vN5u91erVZN07RaLcuylstlp9NZLBZlWTqOY5qm7/vz+dwwjMlk0u12R6ORxOKtVktCbWkF0XVdUuYsy4IgkDA9jmOllKZpstuhpmnyXuSyeZ7LdLM8llHlVqullNJ1fbFYeJ4XBIFSSj6og8HqPM+LorBtu91u7+/vj0Yjy7LqupZ7qKrKNE15xSzLJKCXD9wwDBl8rqrKsizTNKMokg9furPlfuSJG18igJuDABovXBAEv/ALv/DII4/83M/93Ote97osyy5cuLB5EvBcCKABAAAAvPxsbW3dcccdYRh2u93BYLCzs6OUsm1bwuhWqzUYDJRS8/l8OBxKrqppmuSkEig3TSMNy1mWDYfDOI6lbfnq1au9Xs+27fV63el0ZK+/OI7b7bZpmsvlUvqaXdeVWFmmp8uylBroTqdT17Xrut1ud7Va2bY9nU6lSaPb7XY6HWmdNgxD0mGpWpZLKaXSNJWoutPpNE0jFRZ5nvu+LzPX0+l0Z2fH8zzp3JBxadd1JeqV0ePt7e0syyShXq/X8nIyKx1Fkeu6kqRfu3at0+lYlrW1tSUFHXIDpmkWRWFZVpIkkiPL7Hbr2daRXq8nV1NKyfUdx1mtVkVRSIjv+74ckpFqpdTBG6GIA3hREEDjhfu1X/s113U//vGPf/KTn9zf3/+lX/qlS5cuXbp0afM84HsQQAMAAAB4OfmxH/uxBx988O6777711ltvv/32fr/farVarVbTNDIjLHnrtWvXkiTZ2toaj8dKKdu2V6tVkiS9Xi9JkiiKJA7u9/tZlqVpurW1tb+/v16vpYXD931N02TWOIrEM7V5AAAgAElEQVSiIAgkupWGDek7DsMwiiLLsvb3913XzfPc87zFYiFxs8S4cmOu6zqOs7+/r+u6VFFLnjufz8uyrKrKdd35fG7bdpZl0lnhuq5MYUurRp7n+/v7lmW5rnv58mXXddM0nU6nSqn1ei3DyBKO53m+Wq3CMCzL0jAM0zS3t7elJKRpGpluLstSBp/lXdjPdk87jhPHcRzHcsEgCBzHKYpCmjeSJOl0OvJJSuR90Phx/Yy2hM4Sheu6XpalUqrVahVFkee5FEwDuMkIoPEC2bb9y7/8y7/5m7/53e9+tyiKy5cvO47z+te//s/+7M82TwW+BwE0AAAAgJeB0Wj0pje96W/8jb9x6623bm1tua57+vTp5XLpuq6M967XawmjoyiSno08z59++ul+v19V1XK5HI1G3W53uVzatt3r9fb394MgKIpCTpbGDE3T0jTt9XpRFLVarfJZnufJxQ86oFerlaSujuNIghyGoed5dV0vl0vTNC3LkmniIAikbUNOlqnnOI4laJZIV2aoPc+TLmbf969vmq7rWrLj5XIpBSCLxUKuk6ap53mtVitJEtd1Jexer9dpmpZl2el05vO5XKTX62maJrfaarVkcjlNU9u2Z7OZbduSLzuOIxeUd6rruud5SZJI3Czv+qB2oyzLuq7jOJYsW95yURSSd7uuK5etqkrS8KIoJO7f/HYBHDECaLxAVVW95S1v6Xa73/jGN+Tvsnzta18jfcYhEUADAAAAeEm77bbb3vjGN77mNa85depUr9c7duyY4zjSMlEUhRQZx3Hsuu5kMsnzXPqXV6tVmqb9fj+OY8uybrnlliiKJpNJv9/XdT0IAsmXq6qSggtN04bDoW3buq4XRSHtFrJrn2VZ6/XasqzFYmHbdpZllmXleW7bdhiGrutK6Cxj16ZpdrvdJEkMw5DIWH92I0Tf913XjaLINM08z03TlLYNCaDlvXQ6HXmKvKK0SFdVlWWZruuWZWmaZllWp9NZr9eGYXQ6nTRN5bUOCjcOjsrrOo5TlqVMcMtwtwwpO46TJEkcx0VR2LZdFIVlWZ7nyQfbNE3TNFIY4vt+t9s1DMO27aZpJHGWcWlJuj3Pk48uSZI8z8MwrOtaZsB1XW+axjTNpmkks87zvCiKza8ZwFEigMYLd+HChb/1t/7WO97xjte85jW9Xu/q1av8QhGHRAANAAAA4CXK9/23vOUt9913X6/XO3nypPRj1HWdJEkYhpZlxXE8Go2yLJvP53EcSwmGlEXYti0DwseOHZMZXqVUp9PRdV3i3Xa7XRRFGIa6rl++fFmGiD3PK8syCAJd1y9duuR5ntRNBEGwXC5lXjgIAtd1lVKr1co0zfF4bFmW1HF0u912uy2bEEpHs8wUV1V18H+1PM9bLpdymrzHOI6vT6uLopDI2HGcKIpms9l6vW6axvf9gwno8Xg8HA7TNK2qStZlalspJcFxq9XyPG+9XssegIZhSOrdNI2k59Ixbdu2zDtPJhPf9z3Pi6Ko0+nUdV0URV3XUiQtNy/ZdFVVSqlWqyUXHAwGtm3LcLTMQUsmLnm6aZpVVUmnh/SKKKXW6/Vqtfo/3zGAm4IAGi/c1atX/8N/+A9f/epXLct66KGH3va2t00mk3Pnzm2eB3wPAmgAAAAALzm33HLLgw8++JrXvKbb7e7s7EgVcr/fz/PccZymaeq6lmaJOI5llz+phnBdd2dnx7IsGQdut9t1XXe73SiKyrL0fT/PcxnaXa1WEis7jiORdJ7nq9XK9/0oimQxSRL5Udf1IAhM05TqDOlrlsvKMPVsNmu323KmtGpUVbVYLHRdV0pJY7JEzEopSZ9932+1WhJGx3FsmqZEwHVdS/9GFEVhGEqCrOt6WZYSahdFEcfxer1WSum6Xtd1mqZxHIdhuFgspGpD7k0p1TSNRMYHSbQky8vlUiapZd7Z8zypH8nzXNd1Ca/zPJfR6U6n43neZDI5qCU5CM0Nw1iv10VRVFXleZ6u65qmyXNt21ZKxXEsQ9zyxLquy7LMsixnN0LgJiKAxgu0tbV15syZq1evTiaTJ5988gtf+ILnee94xzs+//nPb54KfA8CaAAAAAAvLW984xvvvffewWBw66237u7u2rad5/ktt9yilIqiKIoiz/MuXrwo6W2e59Ip0Wq1siwbDAZVVUnkahjGYDCQZgxN0yQwVUoNBgPLsqShQtO0KIra7Xav1zNNU9M0GfKVxoxer3ft2jUZrJZh4aqqZP661WrJExeLhVLK933btuu6ns/nmqZpmua6bp7n0rmhaZq0bXQ6nVarJQ9k3llGpCXnNQxjPB7btp2mqWVZMjJc13Wv1zMMY29vb3d3N01TTdMkSTdN07Isy7KkQ0M+hKZpZrOZvIQ8V9f1g89EKSUZsWmacifyFDlB0zT5kHVdb7Va8lzJkZMkCYLAtu3VapXnea/Xq6rKtm3DMNI0zbJMfkkgK6Zp1nVdVZX8wkApJVc2DEPTtKqqxuNxlmVyPwBuAgJovEB33XXXu971rj/8wz+s61pWiqJ4+OGHP//5zzdNc+O5wCYCaAAAAAAvFbfffvsDDzywvb3tOM7x48d1XU+SRNLb8+fPy/CshM5hGCqlgiAYDodlWR47dkw6JeQ6URQVRdHr9S5cuOC6rqZp/X5fgtqmaZIkkUleqdqwLKuqqiiKHMdpt9tST5xlmW3b+/v7Umcs7ROWZWVZJpUXcleTyUSiZE3TdF13HEemiVerla7rUl4h48PL5VJ2I1RKSeislFoul4ZhyOx2URSy15/9bMtHWZYXLlzwPO/8+fO2bUvnsmEYsn2i5MLylmXcW9d1XddN08yy7KBwQwpApMG5LMuqqqSjQ9O0g6IMz/PCMJzP52VZyiHP8w5mwKuqkrjBcRzTNNfrda/Xk0vJzo3ydJkrlwB6uVzK/UuSrut60zRpmiZJouu6UkrTtPF4LF8WgJuAABov0Hg8fvjhh1/3utdduXKlruuTJ0++853vfPLJJ//8z/9881TgexBAAwAAAHjx3X333ffff/99993X7XZPnTolXc8ysFxV1bVr15RSsuWdxNO+72ua5jjOfD4fDofT6VSC0aqqwjAsy1Ji5TRN1+u11GhIlfPe3l5ZlqvVyvM8y7IkJ51MJkEQSDwqo74SmIZh6Hle0zTL5TJ5dl/B/f39VqslUXIYhrZtF0XhOI48vdVqTadTz/MWi4WMKh+EzuPxWMo3lFK6rnueV5Zlq9WS/g2p3ajrOo5jx3Gm0+lBBGzbtqTAi8WiLEvHcdI07XQ6B28zCII0TSVZVkq1Wi25W03TpEZjsVjIh9btdqfT6UEMLbchKb/MOy+Xy6ZpWq2WZOhyVKbCpdOj1WrZtr1er6XW2fM8mRyXDm4Z95YYer1el2VpmqZSqq7rJEmkikTX9f39/SzLGIIGbhoCaLxAdV3/p//0n+655563vvWtP//zP/9jP/ZjTzzxxGOPPVY9+9drgOdhbi4AAAAAwM315je/+dSpUweFD2VZhmGYZdmVK1dM0zQMo9VqDYfD1WpVVdWxY8eSJJGUVo6uViuZtJ1MJqZppmna6/UkR261WpPJJI7jfr8vpRmSOCulHMfJsswwDGmuWCwWEqcOh0PLssqyrOt6Pp8XRRGGoSyuVqvlctntdmW+2HEcaeHodrvj8dj3fZl3Ho1Ge3t7nU6n3W6Px+NOp+O67nK57Pf7Mp7sOM5yuex0OmEYjsdjaXMOw1By8KZpoihqmmY8HmuaVhRFt9tNkqTb7Ur/hpC0dzQazedzx3FarVan08nzXOaaO52OZVmLxcI0TRmLlh9932+320EQXLlyRdM0wzCm02mv15tOp+12u9Pp9Pv9qqqKotA0TSk1mUza7Xa/379w4YLUm0iuPRwOz507V9d1Xde6ruu6PhgMnn766W63W1WVbdtN03S7XU3TZHK8qirpM5F4+vjx48vlcrlcbvxJAAC8ZCVJ8pGPfGRzFTgEJqABAAAAvGjuvvvuBx98cGdnx3GcIAg8z9M0rdVqpWla1/V0Oi3LUiLRJEnknOl0WhTFcDhcLpdhGLquu1gsdnd3Jei0bTuO49VqJc0YrusahtE0TZZlsqiU6vf7hmEkSSIZruu6YRj2+33Z1VC6lT3Pk7FfTdPa7bYM9kpRxnq9tiyr1Wrt7+97nic1GtIBXVWV3I/UbiwWiyAIHMeRqmXpVm63247jyECxaZpxHPd6PdnbUCnV6XQk8tZ1fT6fN02jaZqUfkRR1Ov1BoOBdIP0ej2JvCXerapqtVq5rislIUopmct2HEcCbgmmZV7bsixJw23bTpKk1WpJyi/3k2WZdHHEcSxtIYZhGIZRFIVSSq45n8/lTsqylA9ZVoqikOtrmibj21mW6bquaVpd13mey9Sz3AxD0MBNwwQ0gBcRATQAAACAF8eb3/zmO++8czAYDIdDmSPWNE3aIS5fvpym6fHjx7vdrszzKqU8z5OMVfofHMfZ29tLkqTf78uMs1KqaZokSWSEWSklGbSUciilyrLUNG2xWEgdR1EUdV13Op0sy0zTHI1G0pssrReyLtm37/uO40RR1Gq1ZELZsixZlAR2uVxKQh1FUefZnQbb7fZqtbIsq91uy1BwFEW2bU8mE9u2pfRDnh6G4WKxSJJkPp/7vn/u3DnbtnVdHw6HMtNtWZak8xcvXnRd13GcdrtdluX29rYUMSulNE1L01RaL8qyDIJA07SyLG3blg9NKbVerzudjm3b8l7yPLcsq2mapmlc153NZu12OwzD2WwmM+DyWhJMF0UhMbdSSkJnSasXi0XTNHLDsmLbtlx2tVq1Wi3J8eWbretaKWWaZlEU165dI4AGbg4CaAAvIgJoAAAAADfbiRMnHn744ZMnT54+fToMwyiKTp069Z3vfEfX9bIsZdRXxpC3trak5KHb7Uq/RL/fl8pjwzAkvVVKSRGHJKpbW1ur1cowDJnw7XQ6Msmr67qkn5IIS1JcFEUcx/KjUupgSlqi5CAIZBxYLthuty3LWq/XdV1fPxa9XC5lrjmKIgmdlVKdZ7coXCwWhmGMx2PLsuTNSjeIjEUrpSQxD8PQNM2madI09TxPPqgoihzHybIsDMO6rjVNy/NcgmB5rf39/W63OxqNZrOZjEtrmlYURfls73On05GneJ7neZ4E6FmWybxzWZZSEl3XtYxC27ZtGIZSyjRNuT15X51Op9frGYYhU8+maWqa1ul0NE2zLKsoCtM0dV1vtVq6rssUuWEY3rMV0nLZpmnW63Ucx0opudvlclnRHwocPQJoAC8ifXMBAAAAAI7SAw888OCDD25tbR0/frwsy729PcMwzp8/n2XZ/v6+UqrdbrdaLQlDr1y5IoO08/lcioaTJJGENAzDY8eOyfCv5Ji6rruua1mWtEPIyyVJ0jTNdDrVNE2qLZqmiaJI5pc9zzMMYzabJUlSFEW325VRZdlI0DRN13XlKUopObPdbkug7DjOQa46nU4l5pbp5vl8niTJaDTyPM+yrNls5vu+53nz+VyS6/Pnz+u6ruu6UmowGJRlOZlMLMsyDKPf77uuq2maaZrr9VqGiy3LUkr5vr+zs3MQo0dR1O122+32fD4/ceLEaDSSKWNN0wzDcF3Xtm3pfXZdt6qqLMvyPJdz5vN5HMeu625tbWVZlqZpnufyolItout6lmVZlskXpJSq6/rgTPlAlFJlWUogLtm3/AqhLEs5QdO0gzMP3m8YhpJNN03jOI4cBQAAr1RMQAMAAAC4SVqt1k/+5E/u7u76vh+GYVEU0iMxnU7rug6C4Pjx447j9Pv9uq6rqrJtu3i2tti27TiOm6YxDGMymciOfK7rZlnW7/cdx5E6i7qu0zRttVp1XR/Ezfv7+2EYdjodGVVumqZpmqIooihyXVeqNuq69jxPmiUkQpWkW6aqB4NBXdfr9Vq6nuUECZR1Xfd9P8uy5XKp67qUPpdl6TjOarXSdd3zvDzP4zi2LCsIAtu2ZZZZpoZlRNrzvPF4LPH0crm0bVteK4oiGcFeLpdBEMh+g0qpqqrkIlmWOY5T17Vt22maDodDeV2Jg6VjxDTNdrvd7Xb39/flI6rrumkaSagnk0mv17MsS64sr2tZlu/74/FYzpdYX9f1g/uR5yqlVqtVlmVFUUgcr2laHMf/P3tv9iPHkZ9rx5qRe2ZVV1OkxB6tXsaLxoAHNmAPxobt//9yLrzMaKEodtealfsSEd/FO5Wg5xycY/uIHwHp91wQ3dm5RETWDZ968Ubf95DmnHO4b2SiYcbrul6WRQihtUa39R98TgiC+MGhBDRBEO8REtAEQRAEQRAEQfz/wZdffvmrX/3q2bNnn3zySZ7nQojz+YxOZ+SO0zSFdIbA7bput9t57+u6fvHiRd/3cRzP84zCB+ccY2wcx7Is0aERx3Fd10oprTVCvlrrMAyHYTDGHI9HpZS1Fs0eyPByztM07bquKIppmtCzAcmbZRkMtZQyTVNIcGtt3/dwymjn4JxHUXQ6ndDmEYZh27awrsfjMQzDqqpgpde7nc9nTHae52EYEKk2xqz9G8uyoNoC6en7+/u6rpFH9t7HcWyt3Ww2nHPY4a7rUOWcpilsr9a6bds1cZwkSdd1QojdbielxF+ttV3XYQWwSpDazjljjJQS12K5cBATgTjGU4QQaZoyxiDloZiTJOGcwzizWyv3MAycc8YYZDQaVNq2naZpHMdpmt7+qBAE8YNDApogiPcIVXAQBEEQBEEQBPHO+ed//udf/vKXH3300eeffx6G4bfffvv69WtjDEQwnOYwDEqppmnqur5er1EUff3119frdbfbvX79Ok1TpRRSupvNBsUOVVV99913aI1wziHdDFMshGjbFoo2TdP7+/v9fo/7T9OEDfqcc+fzOY7jy+WS5/nHH38shECSFzUUSincZ5qmJEkYY845KSVS2BCssM9BELx48QKjUkpBxUJVh2F4OBxwt8PhEIZhGIZoVTbGhGHYdd3r16+llJzz7XYbx3FVVX3fv3nzRim13+/XRhHv/fl8HsfRWssY2+12Dw8Pzjlr7el06vv++++/d85N0/Tw8LDdbqdpstaiFURrLaV0zi3Lkuf5ZrPZbrfee7jmZ8+eDcOAvmZrLeccK+C9H8dxfSImuCwLbDjnnHOOzPg4js45zrn3Hop5WRZI53mesyzDmVrrKIpg2DebDdLWt48JQRAEQRA/QigBTRAEQRAEQRDEO+Szzz77h3/4hxcvXiRJkiTJ4+Nj3/dRFLVtm2VZWZYI4bKbVrbWGmPwr/ceNRdIPadpWtc13DHMZhzH6H9wzgVBkCRJURSXywUm9HK5KKXyPEdiFzdEBYT3PkmSaZqWZen7vixLBJmjKELMGQ3RXdehtQMZZFyCUHOSJGssGn0UkObzPOMEWOlxHJGwDsMQJ7dti4Lp0+mE2DL6KOZ59t7jkjzPl2VhjEH7RlG0LMtms8ER7/0wDGjkYIw55+7v76uqgj7WWqMnBGlxY0wcx977dVR932PFrLVYFiGEEIJzjsw4BhmGoVIqy7LD4YDODWyuGIZh3/fTNBlj6rrGynddV5blOI7Q6HEcc84xGKzVMAz4ggHvrus63MRai2y4c+6tTw1BED8wlIAmCOI9QgKaIAiCIAiCIIh3xS9/+cu/+qu/Uko9f/4cvcwoEeacY1O+KIq01thqTynV9/2LFy+MMeM4wpZO01QUhbU2CILj8SiEGMcxDEO0SF8ulzAMITqnaUJyGX0X1to4jpVS6IJA2tpa65xTSqVpejqd4jhelsVa23UdXDC8rXMOgjjLMuwQyBgLwxDyepqmMAyHYWCMYahwu1EUIekMXXu9XqFi53lu2xYjyfN8u93Wdd00DfYPxDCcc5Dj8zzneX4+nyGmoWUxHa11cqtyds7BIxtjYNillFEUxXHMGOv7nnMOgb7OWggRhiES39vtVghxuVxgz7XWl8sFQ8VDnXMQ0DDIQRDgu4EgCKSUWZZJKaHvcUKWZXhWEAT4bqDrunme118RPJdSMsamW+GG975t26+++qrrut9/YgiCeDeQgCYI4j1CApogCIIgCIIgiHfCv/zLv3zxxRe73e7jjz82xnz//ffX6xWqVEoJC4zqBqSD+a06GZHkqqrSNA3DEDnoYRiyLJumCWloYwy2FmSMRVHUdR0Mb1mWyPDCI8PqQn3Gcdx1nbWWMQYXjOdCuTZNg3wujHbTNIgtn8/nMAwhnbETIKotIKmVUuhTRkQahRLDMFRVBcOOfQWnaWqaJo7j0+nkvd9ut1rrx8dHftuKMIqicRxhuqGbgyDo+x5LtCwL5xwWWwihlII3Z4ylaYq5d12XJAlOxprUdY1I+Ol0wj27rsOSCiG6rguCIIoi733XdXEcw2Ufj0cI6yiKoJvbtp3neR0VMs5N08zzvNlspmlCqLlt22EYtNZQ0k3TJEkyz7N+aytCVD/DZVdV5b1njAkhHh8f3/7kEATxg0MCmiCI9wgJaIIgCIIgCIIgfmD+8i//8u/+7u8++eSTsizv7++//vrr169fz/MchmEcxx988EEURTDC3vuyLK21kJtofhiGAVnaeZ4hVfM8N8ZAcSKWa61FyBf1DlrrruvgmrXWaZoOwwBnCj0KS5umKSo7vPfoi4jjGElqlHLUdc05hw5GhDlJEq01Ys6othBC9H0Pn4uYdlmWWmtIW6017PD1ehVCIIXddd2yLG3bFkVhjIHU1loj942HJknivYcNl1LO81yWpTEGd8PBcRyVUnEcT9O02+0ulwvWBDUXqMLAHdq2xaOFEIwx/GnVytj5EEuB+xtjlFKQy0opqHl8W5BlGWNMa40UOe6JTudpmrDgnPMsy5RSuAqL2fc9Fhmfir7vwzDknM/zfD6fsVCc8+PxOAzDOI5vf4QIgvhhIQFNEMR7hAQ0QRAEQRAEQRA/JL/85S//4i/+4qOPPoIwbdu2qqrdbnd3d5ckyf39PQKwRVGM45hlGeQjfCgEcZZl8zzDmbZtm+d5Xddwl8aYpmk458hQ932vlBqGIU1TGGp+qx6e57koCghW5KDjOEah8zzPcNAwyIyxzWajlHLOcc7DMETsd1mWOI6RF0ay2BhzvV4ZY1DAzrk4jhH4he9u21YIEUVRURRKKWstUs946LIsyFmj6wPa3XuvtUZGe+0JYYwJIaZpgqrGWmEia9ZbKbXb7bTWWBkYbWvtNE1wylEUrQ4da4K5oOEafn8cR2vtsixQ5yjoGMcRbtoYI6VEnHm9LULQuA8OwlNj0cZxhMhmjNV1je8GOOfOOa01EtCY3fV6hYCWUl6v17qu/9PHiCCIHxQS0ARBvEdIQBMEQRAEQRAE8cOQJMk//uM/fvTRR6iYQAextfaDDz5IkoRzDiMMg9z3/bNnz6SUULHDMERRhNzuNE1JkkzTlKaptXaeZ8YY7Cpitlrrvu+FEHVdG2O892iTkFJio7xhGBCaRsL6er0i8oyD1tq7uzsYzziOYVSHYVhu5RVt26ZputlsoIyllGEYtm2LPo31QdM0YY6MMQS3oX2DIHh6ekKIGALdGDOO42azuV6vMLyQ6UhSo4kCchbB4XmeoaGllHEcI7B8d3eH/RW7rsuyLAzDtbgZC8g5j6KoLEullDEGPt0Yg5j24XBAJwljrG1bzH2z2eAca23TNGEYKqXyPD8cDvM8Q2pzznE5LHwQBAhBN00zDMNms4GJnucZOn6aJiGEECJJEnhqCGvO+brZoLUWizCO4/F4PJ/PGBJBEO8IEtAEQbxHSEATBEEQBEEQBPED8PHHH//N3/xNHMeffPJJURT7/Z5zvtls8jyXUnrvm6bBDn4wj1rrruuapkH9xd3dHWQ0NGjf91EUwUpDrcJvQuDO84zgMxQnTDT0q1IKdczjOEIcXy4XmN81PgzNyhgLggAH2c0gL8vSdV2SJEopzvkwDGuxxjiO2+3WWmuMqes6y7Ltdgv5i9A06i/QvJGmKS6BwoaN1VoHQYAgMwacpinsLWMM44dcxk6DEOJ93+d5HkXR+Xy+u7vDEsVxjOkjJ45h4EHee8wFGe3wth0iYwwG2d4qnpFchkfGQWOM1lrcWjuW2x6DuC2WDuFrznmWZUKIaZrKsszzPI5jJKDneeacIxzdti1auYUQ+/0+DMNlWTjn3vtlWcZxFEJIKS+Xy+VyefvjRBDEDwsJaIIg3iMkoAmCIAiCIAiC+H/l888///zzz1++fPmzn/3scrksy7LdbtM0lVKWZfmb3/zGWpumKTxyGIbjOKKnmDE2jiNjrG1bFFDsdru6ruFV33bQcJ3Y5Q8iFQobA1BKIZjcti1csBACmWK4YMRvOedxHBtjDodDFEVQtEIIdE0kSXI6nSCUkfOFU37bbo/jWNf1ek9EvJHpTpKkLEuUZmAAeIS1FoFuIQTmbq1FtPlyuWRZtiwLRoJyDKjYu7u7VaajaQQPnecZ3tx7jyk75/AUVIIg8rxa6a7r8GgMD+o/iiJrbRAEMMXTNOHkKIpwedM0sOdRFJ1OJ9x5HEecuYag27YdhiEIAiEE5/x6vQ7DUBRFlmUYCWNsGAbo/iiK+r7vuo5zjg8AVq/runEcB6qBJoh3CQlogiDeIySgCYIgCIIgCIL4f2K323355ZfPnz+Posg5VxQFgsnobv7qq6/KspRSwmYyxoZhgGs2xkBfDsPQdR0UMKxoXdfwlauD7vuecw4TjRzusixBEOA4BHHTNGmawk0jBZwkCeLJ0zRprY0x3vthGCCyh2FYlgV2VWu93+/R1xHHMQxyGIZIKIdhaIzhnCPka4ypqso5lyQJXGp82/FPCOGcW4V1lmXn87lpmiiKqqpijOGSZVkQdkb9xd3dXVVV3vu+78uyhArHqIwxbdtGUdS2LW4LLQ6Bi0EOw7DdbsMwRNJ81c3H4/EPxDR0s7XWe7/eEC8II5uT3pAAACAASURBVMFpeZ4HQRAEAS4PgkBKicgz3peUEkeklMMwaK0ZY2maMsZgqDnnjDEYaiml9x5fOSAJzhjDImCtxnF8enqapuntzxVBED8gJKAJgniPkIAmCIIgCIIgCOJ/zsPDwy9+8Yv7+/sgCBhjcRxLKYuigJdEOcPxeKzrWillrV2WJY5j/AuxO45jGIbwlYjWIjML+7w6aLhgxhgcdBzHiBvDYkNiYnM/1DsgzwtnzRhDKnmeZ+fc+qBlWeZ5XpalbVtx680QQlhrOec4J0kSpIyRksYl2MwwDEMhRNu2kOkIdEOOW2urqpJSMsY2m40QwnsPLwwrfTqd0P4hpYzjGMY2uO27iMFAr8/zbIxxznnvza39YxxHjHyt8jDGIOyMZDQC2tDKENPr8KDUIX9xprhtM4hhO+eccwiYI/XMOcfjcNo8z4yxVYKP44ibcM4RndZaY+5YqL7vpZSYSNM0+NN67TiOl8ulrmu8KYIg3gUkoAmCeI+QgCYIgiAIgiAI4n/IBx988Md//Mf39/ebzQalEHd3d1rraZr6vofrDMOwaZplWSBhsyxzzuV57pzLssxamyTJsizwtsaYIAjyPD+fz9CvcRy/fauu6xhjxhgIVu/9mpXmnPd9j0QwttFbloVz7pwLw1BrvW7EhzM55zhTCHG9XqMoCoIAKnYcR3sr4ljdK7ywMaYoCkjqMAyllEmSHI/HMAxxQ6UU7sAYg96VUvZ9vyyL9z6OY+SgsU8jnov74CnGmCiKnHNN0+AOw63NmTHWdR3umWUZYywIgiiKvPcYeV3XkMiIS6+Xo14D6yalzPOcMaa1jqJoWRasZJqm+/1+nmcUW8MmQwqjyBsZ6sfHx2maNptNlmVJksRxjFvhO4BlWSD98Zqcc3Ect22bpini4ZfLBSuPyeJTtCzLsixv3rzBohEE8S4gAU0QxHuEBDRBEARBEARBEP9DvvzyyxcvXmy3WyGEcy4IAufcNE2wkH3fPz4+Ho9HrXUYhpvNBifkeQ7/iF+NMfM8a63neU6SBD8rpSCOoVyRg+66DobaOQehaa2F8IXGHYYBJ3DOGWNKqa7rOOeQy6fTCcXKkLMouJBSdl2ntUa+WNxKKuZ5xmmc8zWJDJGNNPE0TRieEGK73cL8QiV3XbfZbKqqiuN4LZ5GZhklIRDBbdsaYxhjeJAxBmlxqGocUUohTn69Xp1zWJMgCHAVMshYQyTH4bXRsIGnJEmy3+9xJhbnbclurQ2CAJFkxpjW2jmHVyCEQMMGkstSynVVoa0hqVGyAUMdBMHlcmnbFh3QUsrj8YjIMz4huD9Gu9/vp2nC+mDKh8MBwyAI4geHBDRBEO8REtAEQRAEQRAEQfxP+Oyzzx4eHtI03e12nHOYUKUUY2wYBrT65nmOCovtduu9d85lWQb7nKbpNE34N8/zZVnyPIeKHccRwd48z8MwbJpmNdFrtBZFHJDOEM34FXHgpmlQ8aG1btt21dA4Uym1nimEuF6v8zw753DzKIoQiw6CwHsP09reqp8hstM0haGObrv2dV0HnSpu7RlSymVZEIvWWmdZdjwenXNxHIdhWFVVmqaw0v5WmrHf77fb7eVywVrBvDPGwjDknMPjO+cul4sQIo7jNd3c3HZBhIbGQcyOMSal1FobY+DrMcLD4YDTYLSllHVdj+OIPDKOrKoaph55cySyV21d13We58Mw4NckSYQQ0zQppTAkzjmizXhc13VSSoykbdswDOd5PhwOX3/9NVVwEMS7gwQ0QRDvERLQBEEQBEEQBEH8t/n888+/+OKLPM/RBSGEiKJonud5npVS4zj2fW+MMcZst1tjzOl0goq9Xq9a6+fPn/d9/8EHH6CVAk5TKYXsbRAEzjnYT2PMbre7XC6wsSiCgJZFkBa7/BljoI+99yjrkFKmaXo6ncIwhM6GM8X+fgj/Itib5znnvK5rdyvrQLB3VcN4Cud8nueu66DUcZ8gCIIg4JyjZgR3wIP+4A5SSqUUpgZzrZTCCGGQsRTGGLhmjNnfctlN00y34mZ267DGqKqqur+/t9ZCE69nRrciDqzYqpvlLcsspYTRxpTTNF01d1EUWAp8qYCbCyFWNZ/nOdbfWtv3/TRN3vt5np1zcNbw0dba6/Wapiny5kKIZVnGccQPUkrYdnyonp6efv/xIgjih4YENEEQ7xH1hwcIgiAIgiAIgiD+b+x2u2fPnvHbFn/GGMYYFKS1dhgGOEchxDfffGOMQVlw13WIPO/3+zAMm6aBcRZCvHz58unpKcsyFGJIKeu63m63SZLUdf3y5cs3b96kaYo0dJZlVVVZa6Fo8zw/nU5RFIVhiAwy/mSthZ4Ow1ApxRhDIhhh5CAIrLWn0ylJkiiKyrL03qMLgnN+d3f37bffwpNidvM8r38VQgghNpvNPM+Pj49pmhZFobXuus5au57gnHPOLcsCJx5FESLG3nucwDnfbDbOOYxWKYUxTNPEOcevMM7Pnj2r63q/32utYWw551hwBJBxE8aYUmqz2aASBKNljEH7MsastU9PT3EcJ0lSVRUOxnGM9cFXCDgNR6y10zQxxjAMnINvFzDNYRhQOQIvL6X03o/jGMcxHp0kCXqfMV9rbRzHeMr1ejXGNE3jnBvHMQxDCkETBEEQxI8P8YcHCIIgCIIgCIIg/o88PDwURbHZbD755JP7+3vG2DAMMJXX67XrOkSAh2Hw3mdZhp0JOedlWUKbKqWapun7/nq9CiGMMcfjcbPZaK2LolijvnVdv379Wil1OBzKshRCZFk2DMOyLFmWLcsCx7osS5IkXdf1fZ8kCf4EIbssSxzHfd+v5pcx5pxDNwg0KDTxPM/jOCLJK4RgjBVFsQaHz+cz4sbLsizLssrf9XwEfqdpgl6v6zpN0xcvXuR5Pk2Tc+5tEYwjh8OhaZokSfD01RdLKe/v798eD2MMfyrLMo7jPM9xPuaIM5dl2e/3VVWFYcg5Xw/i8ufPn5dliZWRUgohvPe73W4cR8TP4Zfv7++HYUCieT0yTVOWZX3fwzUnSQKnPI7jPM93d3dFUeAmeCNYlnmevfd4BfM8Y5GttUVRzPMM/45XgGw4XPbtI0YQBEEQxI8HSkATBEEQBEEQBPHfIE3Th4eH1Tvf3d1BODrn9vv9Bx98gJINlD5771GXDOc7juM4jug+Rm53u93WdY1CCfQ5IDp9OBzSNMU2fXVdF0URBMGyLH3f53mutcbmdeDZs2dN01hrjTFt2yqlOOeQ4Agyh2FojBnHsWmaNE3TNL1cLpCknHPUUOx2u6+//poxNs9zEAT4IU1TiFfY5/v7+7qup2lCpPrZs2f/8R//4b1PkgSLA7/Mbr6Ycw4nyxhDOhijhVmGhF0NsvceD4qiSAhRFIX3Xik138qmgyBo2xZemDHWtm2aplEUbTabruuenp7u7u6maYLMhX12ziVJwm/uG1IYC4UfyrK01o7jiESz9x5HhmHQWmMd8jzvuq4sS5SNHA4H3FxrzRiz1mKJ8Kv3fpqmPM/btvXeM8bKstRaI0WObQZRw+1vHdxCiHmelVJZltV1jdkRBEEQBPGjgTqgCYIgCIIgCIL4b/DZZ5+9fPnyww8/zPM8CIJpmr799lvG2Pfff18URZZln376aVmWjDHvPeqVrbVZlgkh2K2QYe19llJOtx3t0NeMjemklNbaMAz7voeEvV6vURRltz0MwzAcxxFmGRdKKVHQ0fe9vW3B5713ztV17b2HcmWMLcsSRRH+7+O911ojoM0YQy8HbGyapofDAdFd9B0zxvq+X5YFvz49PW23W1yCKaRpyjm/Xq9ocxZCQFhba2HMUXbRdR2kPK7K85wxVlXV3d2dtVZrjXOQKcawMZ0syw6HA8yy9x7DEEJA6UIx43IsOJYOajhNU8bY5XLZbrfIHUspMbxpmowxUkrGGBqcx3FUSuFI27bDMOBLAsYY2lSUUmma4lnOOUTXUWw9TVPXdVmW4ZLz+dw0DT4GWuumaVCWgjVZPwZVVeFbAUYQxDuAOqAJgniPUAKaIAiCIAiCIIj/KlmWbTabPM+99+M4bjab8/kMVfrFF18IIZIkgTbdbDbX6/VyuSDsfL1elVJ3d3fH41FrLaWM4ziKorquUZERx7G1tqoqiNQkSaBTkySBkg6CAMHnNE2Rdy6KgnPeNE1RFEqpYRjwJ+SR4ziGAH18fIQOrut6t9ulaSqlPB6P+AGKFtlemGIhBAQ3Y6woCmstpCokNXTt69evN5tNmqZKqbZtYYTFreMYtRveeyFEFEXb7fbbb7/d7XbQ3+wWRsbPUkokhTebzdv3ub+//+1vf8vfaoKGQ0dCGWp4WRZr7bIs4zgizc1u3dDOOXSVOOdQyuGcm6YJHSZIHAshdrtd0zTe+2EYpJRa6/v7eyzvPM8Y5N3dHZpVsFxCiHEcEXmGZMcA+r733nPOh2FIkqSuazwXKhwJaPRxt22LBY/j+HK5BEFQVZW7tZQQBEEQBPEjgxLQBEEQBEEQBEH8l4jj+IsvvkCvcVEU4zj+67/+KzT0p59+aowxxizLEsex915KyTlfliXP82VZoiiCDB2GAUleIQR2rrter4gtoxFYKTWOo9Z6mqayLJdlCYKg73sp5TiOWZYZY2Bd4WQ55yjEgDPFMJqmQTgXTlNKiRYL55xSSkqZpul+v4fAhWKWUqJno67rKIq01lVVRVGEiK59K+MMnbosC0x6kiTH49EYA5Eax/E4jtM0IXaN5DKWwlqLpHaWZd57ZLq7rsMU4IuttVEUnU6nZVl2u51SKggCTBkDQEJ5FcSQwnmeY1RhGE7ThIlnWYb7QN8LIZA4nucZKx8EweFwQM/1Oh3OOR6Bvmkp5X6/b9u2KIo1HH29XlHVDa8N0RzHsdb66enJGNN1XVEU+A4AD9VaY4RN0+DMeZ7rusZaSSmrqjocDquXJwjih4US0ARBvEdIQBMEQRAEQRAE8V9is9m8fPkSO861bWutTdM0z/M/+qM/GoYBPRX4K5ox+r7/6KOPllthxTzPwa3g4nK5JEkyjiNUL8xs13Vpmo7jiJPhmr33uBAJ3HEcESvOsux4PK6RWyllGIZFUZxOJ9wNWlkIEcfx+Xw2xmDM0LgQ01rrtm3RobzZbC6XC1qnMcgoiqSUy7JUVcUYC8MQFnU1tsYYyGXOubUWBSCQ1IfDYbPZWGtxSdu20zTB1SqlGGNQ5HDoGFKe54+Pj7gtY8wYI6VcL8TIhRBo0pBSXq9XhKa999batm3nebbWLsuCCQoh4HyxFFDSnHO8BdxtPRKG4TzPEMpRFIlbLzNjDL0i0zQh9I1S6dVQw9QPw4C3jOxzlmV93wshsJLee5yDN9W27fqhQtq6aZrr9QrBvf6JIIgfEBLQBEG8R0hAEwRBEARBEATxX+KTTz559uxZGIZZln388ce73S7Pc2MMVCljDE4Zvriua4jO7Xbbdd2yLPf399DBiO5aaxljfd+nabosS1EUxhikj5GShgA1xuB8Y0zXdbDPxpjz+YxqCMYY5xwi+3g8ovoDHRQYGGzsmmtelgU2Nk1TiGnn3Gaz4Zwju11V1eqpGWPoi0B6GtIWkWdUYcAvr33NOAG8nZtGK0hVVQgjn8/nJEnWyPO6gN57WGncGRei9BmnrTobuWbnXJZlKBhxzl0uFyz4dCsMgZXGG8E6NE2DoeL+3nsUUi/Lsgrl6/W6vkfOOa5CihmX4FfExhljyLMPw8A5x8egaZokSaCkz+dz27ZZliFzjT/hWwcMxjmnte667vHxcaIOaIJ4N5CAJgjiPUICmiAIgiAIgiCI/zu//vWvP//884eHh91u9/DwEEVRVVVJkkRRdD6ftdbYjg8tGfDLxpi+76dpQsD21atXURSN44j+h6ZppJRBEDjnOOdVVQVBwBhDTBgOGjsQJkmS5/nlcoGDRooWNhO0bYt8NA6itiKKolXj4j5N00BMw8ZCyF6vV5ymlIKNhfWGC5ZSosjCWovTIGTZrX/5bU2MyDNi103TLMsCTSylhKTWWnvvUQailMrzHHI5CAKtNWMMQtZa670Pw1Dc4tVvT0cpld22IsT94dPbttVaQ5TP84xnIbwMiY/5JkkihNBa4whjDIMfx1FrLYTw3uMIwuyMsXmeMRgo5vW2+CtWpmmaLMswWVR2CCGwShhJFEWn0wlPwX6MzjnGWNu2zrmqqpqmOZ/PlIAmiHcECWiCIN4jJKAJgiAIgiAIgvg/8fOf//zXv/71F1988fz5c0jP4/HonFNKjePYNA2Sxfv9HuHlsiyjKEKxb5IkENBd12VZhsxsXdfGGLhO7/3lclmWxXtvjEnTtCiKqqqMMatoHseRc16WJQLLSZJwzlFAjNO01ggvI0qMPDL8Jnx0kiSn08kYE4YhLDYyv03TBLeGZWjcJEkwO39rxmCMreFi3A229+3I86qkcQTamnOutV5z02va192i2VgBpRTufDgc4jjebDbn8xmJbxhhFCUvb2Wl8TilFHozoMXR+4ExbDabOI7x3cAwDAhBY9GstevgvfdYB855EARvH8EPiKUHQXC9XodhyPMcrdAINRdFkaZpEARBEHjv0dPNGEvTFJLaGKOUulwuqONAvBpRdCGEtRZRazzufD6/efNmWZb/9OEjCOIHggQ0QRDvERLQBEEQBEEQBEH870mS5J/+6Z/+7M/+TGuNaGpZlkEQoPwhiiLGGLTv2xvZHQ6Htm3RUJHneRiGa4ED2i045wjqYo9B5J211sMwwN5KKWFy+74fhgF72UHXIsUMAwsdDJ2Kg0gW29sWhfa26R8ccVVVzrllWbIsQ3MFYwwlHtZaKaX33lqLp6yaGLoZttfeOjdQxIwc8dtKGuIYHhb1F8utKhpatqqq7Xa7LItSijEGuYwEMYophBBCiCAIoPiFEEmS4MK3DfiqbnErIQRemZQSVhpjgJe/XC6bzSbLMgj3cRzHcSyKAosQRdHT09M8z6h4DsMwiqLD4VCWJdQ/7iOEmKZpHRJjrOs65xyG0d12HZRS4gOApfPeR1HkvcdWhMMwKKXeTkBjIn3fIwRNCWiCeEeQgCYI4j1CApogCIIgCIIgiP8Nf/u3f/uLX/yiKAqlVFEUL1++RPGC974oCmR7rbVZlkH45nkOa+y9D8OQcw5puyzL5XJ5/vx50zRxHE/TpLWGm66qKssyOEo4aJhTY8yqbruui+MYDRu4bV3XGCHaJ6ZpKsuyrmvvPUqiwzAsikIIcb1ejTHwv2ioQJ4Xuplz3ve91joIAmNMlmXGmDAMD4cDfrBvxaIPhwOmCQPLOUeyGFKYcw4lbf6XMPU8zxgnYwytI9baVS7jquv1CiuttZZSwlyvF3LOm6bRWmMxcQQJa6VUEATzLQRd1zWizTDFjDHMcX0p6OzG9DFOrCTnfF389cgwDIhsz/Psvcd88XRrbV3XeZ4j5ny5XFBdwjnHxDnnXdd57zGetm2zLENSvqqqOI5x/7qucfN5ntu2pQQ0Qbw7SEATBPEeIQFNEARBEARBEMR/Yrfb/f3f//0XX3yBkPJHH32EEK61dpqmLMtWaZim6TRNazw2TdN5nqGY67pGPNZaC58LZ41cs9Z6WRbI4mVZ0jTVWqM4eBxHKWUcx0VRXC6X1WUbYxATVkrBQQ/DIG7FDoyxpmkYY1EUwTh3XbeKV3XrTYb8LYoiiqIgCCDHvff+VrjBOWeMrVlpRJJxUCkVhiHcsbolr8uyROOHtbbrumVZoKTXW62+G8Hh0+m0LAvcOmwv1K1zznuPx2VZtl6II2ma7vf7ZVmwgFC9GAC+BlBKCSHyPIdKNrcCEyklYsVwx0IIdotdI4wsbrnyYRjyPEdOeZomuOyyLJGSrqoKGWdkzLXW3vtxHJVSjDFodNh/LOb1ekWY2hhzPB6jKKqqqu97vPqmabA41lo8ehiGy+VCCWiCeHeQgCYI4j3y+2+8CYIgCIIgCIIgGGNffvnlRx99FIYhY+yTTz4JwxCuOc/z77//nnOOfz/88EPG2Ol0evHiBYK9m81GCIGY8ziOWZaN4yiEqOsa6jaKImvtPM9JkrRtG0VRXddSyjRNz+dzmqZoeQ6CoKqqsiwZYx9++OF3330HOwynCXOdJEkQBEg9QxBba2GiYWmllAgCc85xzvV6fXh4aNv28fFxnucgCBhjcOhSSq113/eMMaXUOI5JksDeoi1aKdV1HWMMyjgIAiHE3d1dXdd93282GxhwIcR+v7+/v8fEoWjtbUdBuNo8zzE8jIFzvtvtvv76a8xibTWZ5xlZYNQle+9RV6KUwgSFELgQwhfHkSNmjHHON5sNRDxMN+ccRcxKqe12i60XYfM555hR13VlWcJKYwH7vsez8A0E6kr0bV/HoihwRCmFV4YKDsZYHMdt2y7L4r1P0/R6vSKyba09HA5JkjjnmqZBiYcQAu+C7DNBEARB/CihBDRBEARBEARBEL/n008/fXh4CMNwu92i7tk5l6bpb3/72/mtfoY8z9u2hUQ+n8/wleM4Ih7rvY/jGLFWIcQ8z1prlDYkSTIMQ5ZlWuuu65IkuVwuSqk8z4MggJPt+14pFUVR0zRCiM1mU1WVv1U8Q1XP8zzPMyw55Klzbp5nBJzDMKzrOssyXIu49OpYMRjI2TUWba1FBYdSap5nnLksC/bZi6II1rhpmqIo4G0Ph0Pf97gV5HLTNEopRJLhr9HCgbyz1ppzjpAvxgk1DDscBEF4627mnKdp+r9eiJw1QtB4F9575J3Rv4yANjZ1xHy992sTCLYQXEeOoDoEPTLRRVEgAT0Mw5qATpIkDMPz+Qw9je8AsIZ4lUmSwJvjdUOI411j8KfTCbF0KeXxeFS3ALv33lrbti2e2DTN+Xy21t4+jARB/JBQApogiPcICWiCIAiCIAiCIH7PX//1XxtjgiAoy/LZs2evXr0ax3GaJpQ+Q1YWReGcc84hzOtvmxAiJ4vMLBLExhhYZrjOIAistUVRGGPWWHGSJEKIaZqklOj30Fq3bTuOY5qmQRDA7UJZokrYGJNl2fF4xNORTQ6CIMuy0+nknOv7HipcSimE0FpD1EopUVthrUUFBzQuHC6sMb81KcMIL8sihOCcx3F8Op1g5JERzrIMN7e3MuXkrYaNIAggWLE+3ntclSQJY2zdUVC81YkxTRMyznC4nHNjDMQ3LkQLxx8o72EYoPgx+LZtlVLrCuOVee+x+DiCpo4gCFAJLaWEc4ekTpIkjmMsC1w5XD/nHK8J82qaJkmSpmngspumwbUYibUW6eZpmowxbdtKKZ1zeLnogO77HoN3zo3jeDweL5cLCWiCeEeQgCYI4j1CFRwEQRAEQRAEQTDG2M9//nMhRJ7neZ5P0/TNN9+gXAKxVudcmqb2VgM9TdM0TfOtBrooir7voSPhH733CAvv9/soioZhCMNQa933/el0yrJsnuf7+/vj8cgYq6qK3QRulmVlWeL4+XxOkiRNU+fc8XgMgoBzLqW8Xq8vX77ErSBeoYmLojgcDkopeEwI0GVZGGNpmgohnj9/fr1eHx8fcQLnHPaTMQZH7L3f7XZ1XZ9OJ6XUZrMJw1AIcTqdjDHoDMERzjn8LGMMUV/GWJZlzjkhRJqmsL37/R4Pwgla62maIOjXpo7tdtt13dPT0263Q0hZCPH4+CiEwOPgo8uytNYKIeDHhRD39/dN0zjn5rcKPX73u98xxnArIcR+vx+GAeIYY7DWDsOAm+Aq6G9MnDEmhMAPd3d3kMUITW82G1SCiNuXDXme41d26+DG4IdhQAKaMfbmzRuIacbY8XjE9wHee3wfgBQ5Fs17zwiCIAiC+NFBCWiCIAiCIAiCINjDwwN2HcyyrG3bJEmyLINCXZYF9naaJujIcRzHcYTfHMcxSZJxHFEfgTQu9p3rug5ZXYR20a2BPo0gCNI0hbfF46SU0zQh9dx1HcSoUipJksvlAoMZhuHxeBRCIOCMPo0oirz3SPK2bau1RuI4CAIpZZIk5/PZOReGoTFGCNE0DeLASPWmaeq9r6pqs9ngKUqp/X4fBEEYhtZaeSs1hoNGUYb3flmWMAxPp5O1FrFf7z2KMrAI0LKMMUSSkW5mjKVpiqvWogzOOUZubxsYXi4X9IeghQMvAkFpay3WExf2fT/PM27FOeecCyGCIMDjhBBZlmElseZaa601XhZWW0q5LAv+RYoZlRpKqXmerbXzPBtj8N4xKXsrVKnrGgnoqqqyLIM9l1LCcUPuh2F4vV6dc23bYhZYh2EY+K2h+3K5/Pa3v0WYnSCIdwEloAmCeI+QgCYIgiAIgiAIgn3++eebzSZNUymluAV48zyHxJzneZqmMAzR+XB/fy+EEEKEYcgYG8cxiiIIyjiOEYuu6xpmNo7jtTgCwVichspg9As752BOoyiCh4XkxaPRFLEsC0woYywMw1UuIzYbBAFsNe6GczB4xhhSt5DCa3Y7CAKtNeSvUmpZFpzAOWeMXa9XWGbYUu895xzVGShTNsZUVQWxO88zFiRNU5wGt85v3c1YAchZ3H912ZDLWZZh5FiBKIqu16sQwrxVppFlGeYSBAFcMMT66XQqyxJH8ERUW2Dk1tqqqpBxxiXLsqCKevXmaB3J83y9tmma1UeHYXi5XLquK4oC6l8p5ZxDqh0rwDlHuYqU8nQ6oVwFczmfz1EU4QsApRQ+FYyxZVlQLY03sizL6XT6/ceRIIgfGhLQBEG8R0hAEwRBEARBEMRPnSzLHh4e4ji21gohHh4e0MW8LMt33313uVwYY957pHTRa5Gmad/3iNBqrdu27bouiiLIZcSikcyFUC6KYpompVTXdXEcV1WVpilKG3a73eVycc5JKcdxlFKGYThNk721ZIzjyDlHpHpZFu89/CxjbLPZoPcZt3LOlWUphKiqKgxDCOWmaZDkbZpmDV9zzudbTjmO48PhACUNQQwlDT8Lx6q1z7cfwAAAIABJREFUfnp6Wq0xvHaapsfj0VqLzfqCIDgej33fh2GIiTPGkiQRQiiljDE4B7ey1lprx3GEEcbK40KI4yiK9vv9PM/OOa017rZ2WJdlGYZhEARVVUErY5WWZcG3AsYY9GUHQYA7BEEAmxwEAXT8NE2cc+89gu1w/W8noDE8vFP2VmbZWlvXdZqmEPGXywXVK9DZcRx777uus9ZO0xQEQdM01tq1LZpzjl+997D8VVV99dVX4zjiA0kQxA8OCWiCIN4jJKAJgiAIgiAI4qfOxx9//PLlS8Rv8zyHwcyy7De/+U0YhtvtFonXNE2dc845pdT1eoUvDsPQ3rbaa9s2DMNhGOI4nqYJ1cPzPCNP7b13zq056MvlspY2wIeiogFXxXFcFMX635M4jqWUWZZB+HZdhzYJzjnnHDIaoWMhxCrKEd1dG6uXtyovIENX48wYC261FZzzJEmOx+M8z/M8rycwxhDrLooCSeFlWTjnMLywxkmSYDzWWgwGV61Px4AZY1rrpmm22+2aMj4cDtM0rcllXBvc9kJEdjiKIqwARDZjrKoqZJY3m02WZZgy8s7rAJB3Xme33BLQSDQjs4xCZ1RqDMOA4PY6NkSV8zxHqFlr7b0fhgGKOQxD732WZVje4/HYNE2appDdSEBHUWSMwdNhzK21l8sF7RxYavrfKEG8O0hAEwTxHqFNCAmCIAiCIAjiJ02SJAjwYjs4xFoPh8PhcHj+/DnUM+wzgrppmiKW672Hh91ut4wxbDl4PB7DMOy6DiHovu8ZY8hQ53kOxYksM/brg67Nsux6vSZJ0jQNLC0kbFEU2LbOWmuMOR6Pz58/H4YBHcpCCHlrLobThN7d7XavXr0SQjw9PeV57r2/u7tr2xa2FDOa5xlSOAzDNUGMpyDGm+f5siyMMShapVTTNNCvwzAURaGUUkp98803MNTGGHbrTUbCelXkYRju93v4es659x46GOuAfLSUMkkS772UEoNENHieZ8bY3d0dUsmHwwFpZRRqa63v7++hxYdhwH12ux36Q4ZhcM4JIZIkwXpC9wsh8AbxdqSU2+22aRqUbEgpMaR5ntE6zTlPkgRLAYl8PB7HcSzLEi/r6ekJdRzr3aSUiKtba5MkqapKaz3Ps/ce31JgdnEcO+eqqpqm6XA44ANJEARBEMSPDEpAEwRBEARBEMRPmpcvX7548QIxW0RoUVXhvYeZRX55WRYYQ4RV8UOSJJCMnHOtdRiGnPOmaaApx3GMomgYBiGEEAJiOssymM1Vtq4h6HmejTF93yNAzW5dz03T9H0vbs3Ufd8j/FvXNdwo3LFzDvoY80IMGZlc8VYRB85J05QxtiyLMUYIgchwWZbLssDAtm07juM8z+sdvPfQ0Mj2KqXO5/M68WmapJTuVsq83NLW0PSQxdM0CSHgpjnn2GMQRhjrBi88z7MQwloLUWuMmaaJc84YS5Lk6elpWZayLNcR4kJciyNt287zjG0VwzBEB/Q8z1g3xljbtsMwINGslEJzSFEUdV2jlBmXpGmKEo/z+ZznOUoznHPw0XD6eMuc8zzPkbk+nU5t28ZxjIA5vpMIwxArjyljOlVVra58nmd8UUEQxLuAEtAEQbxHSEATBEEQBEEQxE+ajz/++OHhQUoppUSK+XK5TNOEPeW01sjhJkkihJjnGQlciONpmsbb3nfjOCqlpmna7Xargx6GIYqivu/xM4RykiTDMMA1M8ZwhyAI0jS9XC7GmPP5HMcxfOUwDFprdG5A+yK5jLwz/Gmapvv9HnIZDrRpGq31Wo4Biz1N0yqgOedd183zjAqIMAzLshyGYbo1V6RpKoRAblcp5ZyD5zXGrAoVUXFr7bIsRVGEYai1rqpKShmG4TzPsOFJkuz3+2VZ8jxP0zQIgqqquq5DvcbaXLEsC5R0ftvkEC9inuf1ifC2eClFUWRZhmQ0JovXF4YhYyy4bY3ovYfv1lqjNGOe5zAMhRBIN2OEeKfo3IBQhqaPoigIAvtW7YlSCooZehq/IsrtvbfWRlGEJYVbt9b2fe+cg9pG0Qoi2/gEDsOw3++/++47fOtAEMS7gAQ0QRDvEargIAiCIAiCIIifNPFtfzkp5TiOx+MRxtZa673P89w555zr+x4Ouus6/ND3Pewn8rlt20opsyzDv5fLBVpzGIY4jmGEEZe2t1qGJEm6rlNKee855+fz+f7+fr/fJ0myLMv5fM6yLE3TqqrQ44x7pmmKwDLkKSTvdrvFzTnnQohnz5797ne/E0IIIcIwdM6hqgI5a60153ye52maEOl1znnvx3EcxxHmlDEG877ZbCCIIYKrquKcB0GA1SuKwjknpey6jjHGOU/T9LvvvoMvhhfmnG82m2VZpmmC4C7Lsmma4/GIc6CDl2WB9cY5jLG7uzuk0XH/ruu01k3TcM4557gKRn6eZ6h22HOIZsyLc66UatvWOQdLLoQ4n8/TNJVlCdeM+unNZoMMslKqaZrtdgtljGfhPkKIdfpBEGCh8DUA/uWco1Hae6+19t4nSYKflVLLsgRBAK2PbDiy2HgXWFKCIAiCIH5kkIAmCIIgCIIgiJ8uH374IWK5y7K0bSuE0Fo752A2lVJQmVmWreqZc46GhziO+76fpgkZ5zAMoYAPh0NRFGVZXq/XYRiSJIG8hoCG6l2WJY7jtm2TJIGKLYoC2eokSS6Xi78VgDDGttvtd999B8HKOZdSCiHu7++///77ZVkYYxDNzjkMQCnlvb+7u3t8fGRvFR9P04TzUQ3x/Pnzuq6XZUF3BGMM6nkcR0Sk53mGkpZSwlxjskIIyFYppdb6dDpJKbfbLdo8jsdjkiRYPTx6v993XQfJjrHhKXEcQxYLIaSURVG8evUKJ3DOMSl4bSllWZbIbiuloJXjOMaw4cehiTFyYwxugu8DsGgIHS/LIoSI43iV5kopFHp0XbfZbKSUWOGmaZIkweVo5ICMllK2bbvZbCC7OefX63W73Z5Op48++ujx8RG2Gmrbe488O85Ewh2GGrFoPAvDGMdx/WQSBEEQBPGjgSo4CIIgCIIgCOKny/39/cuXL+/u7rz3l8tF3rojkPZdvSFywfhh/Rk1Gt77vu+NMcMweO+DIECR9OFwQPgXpnUYBs45YwwJXLRtBEHQ930cxwgpQxMj5ws3GoYh6ju2221VVc65ruuklNDZQohlWXAmCj3mea6qijHmnMMJSqllWWBRobbneY6iCOoWm+xBJTPG0KqMlK4QAnsbvv0rY6yqqqIocAeIVGNMFEV4Ck7DHonTNCml8FzGmFJKa50kCSLAzrnL5WKtRREzCjeUUvhrmqZxHCNgXlXVZrPpug4NIdfrdVkW3Bwvsa5rvDIsIGOs6zr4YqzYNE1oIMGrMcbUdT281QF9Op3w6/V6xbXYGRLTxFoJIVC4gV/HceSc4+sE7z1y7liNuq6TJDHGcM4Ph0MYhl3XQd9LKa/XqzFGay2l7Pt+HMdhGKqqOh6P1lrMiCCIHxyq4CAI4j1CApogCIIgCIIgfrp89tlnZVn2fY+YKiovxnG01iLRjLIFWOZxHPHrMAxRFOFgEATWWhhVZGPDMLxcLjDUCDXjB4hImEd40qZpoigahoExBgEtpYSbNsZ0Xdd1HXoe4MGbpmGMRVEEtQ2t6ZyD1IbnDYJgWRbY57Ztp2mapgnRZiEELOoqi5MkwRZ5UMz4FdOEO8YdgiCAoW6aRkq5yt8kSYQQa7MHY2yaJlSXaK2DIJimaZ0p1PbqiNu2hczFYLz3WZYdDgfIZQyPc973Pda2KAr4Yu+9ECIIAihgditRMcagTlpKOc/z5XIpyxIiG35fSok8+zzP5rb1IvQx7jCOY5ZlSZIkSQJvHoahlPJ8PjdNk2VZHMfGmOv12vf9Kq+11o+Pj/gr7DY+GPjYGGOapkFYG849juPr9co5P51OiHKfz+d///d/b9uWEQTxziABTRDEe4QqOAiCIAiCIAjip4sQAq5TSpllWZ7n3333XRzHz58/Z4w556ZpgkT23iPuigsvl0sURVLKpmnCMEQnA0zuNE3jOM7znKZp13WQm03TGGOMMeM4aq2ttdbaJEn6vsflUMPws2mank4nxG+n266AsNjw3d57zvnd3d3XX3/tvYfYhQWe5xlSlb3VoYxorfd+miacEIYhTivLchzHKIqEEM65PM+XZZnnGUoaPchrS8Z2u/3qq6+stYj3spuLV0rBg0sptdb7/V4IIYQoyxLCHXfDLNbVfvXqlRACQxVCKKWMMfg5jmMEk1ddq7X23kMiT9PEGMOCM8bO5zOKNSB/hRDLsmRZBsctpZRSQsQXRSGl9N5jt8PNZoOJQLtvt1us//F4RNO0915rjeNw8ZxzFFVj1pzzw+FgjKmqCl8kBEFQ13VRFFprxtjT01Mcx/JWDBKGYdM0Wmsk2ZHO1lq/ePEC9dMEQRAEQfz4IAFNEARBEARBED9dhmEYx3EcxziO29s+dYgSe++zLIONnaYpSRJITMRpoV+hpK21EI5BEKDjIkkSWFfn3PV6hd6FxMRxpRSanY0xXdchoTzPM1zzOh5r7WazieMYlx+PR1hp+FzOeZZlzjn4Ytzz22+/xZAgQJdlWZaFMQY7fH9/37at9x7H+W0rQmhQnD+O42azgYENggBVxUEQYMplWcKtY6hhGO73eymlc85aC1MMky6lNMZIKYUQ5/OZcy6lLMsSgz+fz1gW2HB4aucczgyCIIoiFI9EUcQ5xzmwzEIIIQSGhCdqrZVSmCNjzBhzuVzu7+/xCNzw7WsxfSw7LDNj7Hq9IlWNpyPvLKU8HA5d16110ijshqp+8+ZNeNufEOPRWsPFCyGcc8aYNdo8jmPf91EUWWuHYZBSaq0559M0vX79GucQBEEQBPHjgwQ0QRAEQRAEQfx0ubu7Q+o2jmMkguETrbV5nkOqOudgXb33RVEMw5Cm6TAMfd/vdrssy4wxfd9fLpe2baGe+76HQYZtrOs6DEMUa0gpV48ZBEHbtnEcd12nlIKcVUpprZGADsNwnmfkl6dpiqIoCAIUdwghpJTTNEH7ojWCMQYlDemMiczzzBjDRBhjuKFzDh4WAx6GQSkFhY26jM1mA8d6Pp+dc4jxCiHmeUbqWQjhvUd6mt/CwjCwx+MR12qtMaOiKOC7u67DmVmWvX79GpYWQw2CoOs63Bn5YsyR3/b9w8GiKL755huMH1Z6nmfMV0oZBAGmk6bp9XrF3LXWXdetXpgx1rbtPM+rNUaqGlNmjJ1Op7IsER7He2G3HRqx5l3XYXZ3d3eXy2UcR3wD4b0/nU7jOEopMf5pmtI0VUpB4gdBgMYSa+2yLMMwYBjPnz9HeTdBEARBED8+SEATBEEQBEEQxE+RNE1/9atfffDBB0EQvHr1qqqqjz/+2Hv/s5/9zDmHWLRzriiKcRzneZ7nGfUU8KFCiM1mwzmHJA2CYLPZYGs7dG6gfAPpZmhQbGrXdd3T05OU0hgzDEOSJF3XGWO01n3fSylXqwtXu2rc7XZb13VVVeGtS5oxdnd3980338ALQ54uy4LHQbbe39/Xde1uKWnv/bIs0zSVZRnHMed8v9+P48gYg4pljGG++BXOfZ7nNE3P5zOcr7g1aSilkJU+HA5lWUZRJKWEOoerxRSstUmSfP/995zzzWYDxXw6nXCrVfrD0l6vV8aYMQayfpqmZVmw2lprXAjlndw2+tO3Wm0EnNcXFARBmqZwwUqpeZ5xT+99FEX6ltTmnOO4MQazHsexbduyLKWUnHMkoBljcMpo2AjDUEr55s0bhMGhpxljWATc2XtvjEEEHq8A30zgLUgpwzDE8Tdv3tw+mARBEARB/NggAU0QBEEQBEEQPy3+/M///E//9E+fPXu2LEuWZf/2b/9WlmVZlsaYoiicc69evXp4eGC3YmIp5WazqaqqaZokSYIgmKYJUWhozWmaUNYRBEEYhth0jnO+OmiI0TzPwzB89uzZ4+Mjuji01nVdx3GstW6aBl3DSiljDPwsLuy6DhK267ogCPDvPM/Qo3mew1nDxr5dDI1MLuy5EMI555y7v7+/Xq/DMARBoJTa7XbX69U5N922FoQnxaOllOM4TtM0juNut4OwHoYBoWzGmFIK2eRxHL338MLff/89ZDFywVLKy+UCYwtzjWG/fv2ac77dbiFtz+fzPM+w0kEQSClhcnEH5L6llGVZvnr1Cm4Xgh4VIowxLCmegvaS4/HIGAvDcBgGfHmAp8Mvh2HIGHt6emrbFt8fSCmFEFVVlWUJ3QwzHoYhhgRbXVUVrH0URdfrFflrIQTeUZ7nGNvT0xO+PICMZowFQYBg9TzPy7L0fW+tVUp98MEHlIAmCIIgiB8rJKAJgiAIgiAI4sdPmqZ/8id/8sUXX3z66afLsuR5/v+x9yY9shyH1XaMOWdW1tTdd+As0jIFUSYkEZJgQjBkLwwY8L/2zgtvDcjiHdndNeQ8RWZEfIvjKl9r+b2mSBHxLC66q7IiIyLriuKThyenafqv//ovrfWzZ88450KI9957r23bPM/TNL2/v7+7u4OZFUJUVbVarRAcPp1OqOAwxhBC6rr2ff90OuV5Ps9zlmVRFKHgGA7a9/2+76GqYTBhkPH4Qa0157xpGphlHJBlGRQwPDWEJudca10URRRFkOCcc0IIZsIYC8MQAV5o9GVZYIT3+/2LFy+01lDAyHcrpaB6GWMITS/LAkXreZ61dhxHSOTdbtd13TAMcLvb7baqKq11mqZQtOfzmRACj4xfUeuBoaB0pZR1XYtLzQjnHE9xZIwNw2Ct5ZzHcTyOY1EUu90ONpZzPgwDLPBqtcJlgssmhLw7OE4HW42tMMYMw5BlmZRSCCGEWJYF0WxKqdYa9w+EEJvNBuJ4tVpRSgkhYRh2XSeEgIUfhgE3HhhjDw8PeAgh1ogdmKYpCAJ5KXRGyzZjbLVa4ZvAOZ/n+VrSgjsBmE/f99M0uQS0w+FwOBw/YpyAdjgcDofD4XA4fsz8zd/8zc9+9rNrRfI333xjjLm5uaGUfvDBB5CGlFJjjNZ6t9uVZfnkyZPtdjsMw83NzTAM8zyfz2c0Po/jeHd39/j4KISAplytVpDFSBBTSpMkMcZUVTWOYxiGaF3gnCPR7HleEASofoaDhsKu63q73VJKGWNwrG3bQnp2XQfd3LYtrDTksjGGUhoEAZK20zRByGqttdaUUghoQshqtdJaK6VgYzebDcaUUhJCNpvNy5cv1+s11OrDw4NSyhgTRRHGgbD2fR/OelkWpRS8qjEmz3OEeRHyRfq7LMvtdgt3fDweUSrCOR/HEWuM4xgd0JvNJo5jxtjpdJrnOQzDtm2hmIUQuAHAGEOOmDEmpcQDBuGdkcimlO73+6qqnjx5glestdeCb/Qyz/MM042dRDqbUvr4+DgMw3a7vRp52GH4ekqp53moMeGcZ1nWNE2WZdDuvu8Pw7Ber33fx1AQ2Z7nEUIeHx+DIIC7t9auVitCCIZSSs3zXFWVujRKX7+xDofD4XA4fmQ4Ae1wOBwOh8PhcPwISdP066+/9jwvy7LVahWGIUK46/Xa87y6rj/55BNjzOPjo1IqTVOEmsuyTNP04eEhCII4jpumyfP8T3/609OnT2F1q6qq6xrSOcuyOI7xDDohRF3XNzc3TdOkaTrP8+3t7el06vs+TdO2beFSKaVN0zx9+hQtHHEcSymVUlEUwdWWZYlc7TAMYRg2TcMYy7IML2LmCNtyzuG1cWp8RErped5+v//mm28opfM8QzFDGaOvmVIqhIC0RV01IWS1WsERM8Zub29RK4GQ7+FwQP4agptzrpQaxxEJYs45Br965Pv7ewzVdR188Xa7bdv2dDpxztfrNTwvqpw55+M4EkIopWEYUko559ZanJpzXhQFpXSz2ZzPZ0opPHsURV3X4Vc8NVFr3bZtlmXjOOZ53jTNsixlWUopGWOww1eBzhiD98epsfl938OMU0oxFGZyPB7R13ENXOPeQHDprbbWYgJCiOu9h2EYCCG+7zdNgx7qeZ5PpxNuOUzThK9TkiRt2zLG9KXSxOFwOBwOx48PJ6AdDofD4XA4HI4fFb/+9a+fPXv25MmTPM+TJDkcDowxlPwiJNs0zfvvv/+nP/1Ja/38+fNhGGCNlVK3t7eEEKUUpbSu6zAMp2l69uzZarXq+/6bb765ubmZ51lr7XleURR5nsdxHARBVVU4FzTxarWCcU6SBKq6bduyLMklIJymqTFmGAatdRiGaZrudjvf91+8eAEBCvsMc4rYMue8aZogCFAJTQhBs7OUUkrZdZ2UEoLVWpumKWo0hBBXSzsMA6qNCSHouFBK4XhIZPxKKcXPMLa73e58PiulrLVoloBoHsfR8zzf929vb9u2naYJWhyl0lprY8xV2jZNA93cdZ0xhjGWpunbt28555vNBib9eDzO80wpXZYFyWvP8+DEx3FEBQdW+vj4uNvtsFjOeVVVkNSw7UhAE0LW6zXnnFKKzhBUbNNLInu73UI9c87btlVKyUsc3vM8yGtCCDLUuFVAKUUWHs+fJISUZTnP87UwGmHqPM8550hMB0HgeR78sud5iGD7vo+rP8/zNE048r+/vg6Hw+FwOH50OAHtcDgcDofD4XD8SPj8888//vjj58+fp2na9/08z0opFEFAnhpj4jhGAXQcx0jyKqUIIUVReJ736tWr/X7v+/79/f2zZ88YY1VVBUHw8PBwd3f37NmzOI6HYXj58uV+v+ecI7SLuK61NgzDoiiEENM0UUrff//9ly9fRlE0jmOWZUjLQlxqrTGHIAiEEMuySCm11lmWGWOMMcjYQmvC4TLGfN/v+x4JaJwX9vnt27dhGAohEFWmlJZliQ9KKY0xhBCYXxhtSmmWZcuywEpDuVJKx3HknAshkFlWSkF8bzabFy9eoKMD4xdFobUOgoAQsixL0zRouIaDhiLP81xKyRg7n8+e51VVxTlHlJhdakYglDFDhNAZY/D7lFIhxPl8xgR838dQ4zhGUeT7/ul0opRKKRFGbpoG3hlimlKK3WCMoX8D9hn+VwhR1zVyyp7nQXBDEB+Px3EcN5sNxkFsGXvLGEuSBBODng6CAKvGGT3PI4T4vo93T6cTVsQ5N8bM8zyOYxAE4zheE9zYDZeAdjgcDofjR4wT0A6Hw+FwOBwOx4+Bf/iHf/jkk08QW46iaLVaMcYIIVDPhBDoXUR9l2WZ5xldGcjqWmvR2jzPs+/7YRhaa9GtAT2K9PHLly9vb2+fPn0KE11VlbUW+hJJ5yzL7u/vN5sNZG6e59M0rVYrdF9QSgkhVVWlaUoIOZ1OhBDYZMxzs9m8efMmCAKtNSzter1GQrYsS9jnIAjqus7z3BijlIKiRQJaSglPipy1ECKKIiEEIeT+/t5ai6wxYwzmHQluaNZ5no0xMKdY+zRNsKuo44A8hcNdrVbLsiRJAlOM+DAGJ4SEYQjZyhgTQqzX61evXiHLDDkrhAiCwFqLquhrl0hRFOSd5xmeTicsBKUWjDGcDq/AznueVxQFYwy5Y8YY5Ph2uy3LEse0bbssi+d5cMp932dZZq2FkpZSWmshggkh0NPIdBNCgiAwxgRBgINRJ43rRSlFHwveIoSUZZnnOVQ7hiKECCHglznnMNRSyjAM+77HLRCt9TzPxOFwOBwOx4+U//UfOm02G/w/HofD4XA4HA6Hw/HXwkcfffS73/3u9vY2y7Jnz56labosy+Pj47Isq9UKpvXm5oYQggf9EULgl/EWDOw8zwgyq8vj/tq29X1fKZVlmZSy73ukoQ+HQ5IkeIrgsizr9RpZ2jzPUZ0hhBjHEbngOI43mw2ixGgZHoYhiiIpZZIkSikkYaE4EaRljNV17XleXddobQ7DEP6aEBIEQdu2nHNkbyE3u64LwxAKGINordu2NcbA2FprjTFCCMwEkp0QggMYY1EUWWullF3XrVar8/lc13WWZWma+r7veR76MaSUnHNrbdu246UD2lrbdR1GhsPFr2maonbjfD7DFEspYXURvsY00EGBkDIhBHu+LMs4jtC1Qgj8KaWsqgrSNo5jIYQQomkaWPI4jjFVrTXnfBxHeO2r88VHOOfzPPd9j8vNOUcZNGaOtQzDgKy3EKIoiutCOOdaa0opRDb2WSl13WGtNc6LrcAXA+/O84xOFaj5vu+NMbj0bdtiXf/9hXY4HN8Bv/zlL//93//9z191OByOvwguAe1wOBwOh8PhcPwV88UXX3z++eebzebm5qYsy/P5nCSJ7/v7/Z4QUpal1hrtzFEUeZ5XVRWeaHdzc4MELoKo0MH4YZqmZVmEEMi3wiriaXJ4qODhcJBS+r5vjEG01hjTNI3neZ7nzfOc5/k4jtM0pWlalmWWZV3XMcaQyaWUnk6nNE2fPHlCCBFCtG1rrcWK1MWJP3nyJIqi29tb1HpAeXddB3fMGIMOhqH2Lm3LlFJK6XUyTdNIKSmlvu9fq5MppYfDYVkWenn0H730Pud5rrXebDacc6UU0t+EEESe4XAJIdDccNDy8phBNFpIKTebTdd1CFBzzne7HR6KaK3NsszzPMZYEARYO9LH0N9FUfDLgwqxS8uyGGOyLMPqkJsml3AxNoFe0sdQxmEY4lPYH875/f39breDFOacY6/elcjzPAdBcJ0YtgsZZ9h/jMM57/v+mvtG4zO6OzCZpmnyPMfIkM5CCKwOcx7HUUqptfY8r21bqHaXgHY4HA6H48eNE9AOh8PhcDgcDsdfK3/4wx8+++wzRGWNMah7ho5EHJUQAr9JKR2GAbXLr1+/vr29PRwO1lpEjN977z1UK1ztc5Zl6HwghByPxzzP9/v9+XwOw7DrunEcrbXH43G1Wj0+PmZZhshzXddFUex2u4eHhzRNV6sVAs5Qn3hK3uFwOJ/P6Ee21qJfOIoipRSM9mazefXqFVQpLO3t7S26pLuug9zSeoaFAAAgAElEQVREKFsI8c033wghoJtheK92taoqz/PW6zX2BOUkEKDi0vKMCcAyozUb3RfQskopvGitbZoG2tT3fUrpOI7zPF+LkrF7xhjOOawrNsT3fc651nq1WmEcSF44d845YwxPF6SUVlWFs7dti5qLOI6RCw4uddha64eHh+12i4/gYMaYMSZJEmxa0zTGGOwwpfR8PgdBgFXjaiql8N2glHLO0WcCFX5/fz8Mw3q9hmHHNUJSG6cLguAaaoYW77puvrSXxHEM7Y7v5ziOsNXzPB8OB8hrjDNfKrOFEEopKeU4jviUw+FwOByOHxlOQDscDofD4XA4HH99PH/+/Oc///l2u03TFL0Tvu8jZQyneTqdtNa73Q7Z0jzPm6aJoihJkqZplmWx1vq+vyzLfr9vmma/31trGWOwydM05XnedV1d14yxZVnCMFyv10VRRFFELlUY8zxnWUYpzfP822+/7fs+DMPz+bxer33fL8tyGAZ42M1mczgc4jjGiVC/gIlpra+BZUKIECJNU2SBtdbXwx4fHyFS2SXmjCA253xZFillfnm+X13XeMSfEIJfAr/zPJ/PZ2hcfLzve0IInu9HKYV5hwzFipqmQbb6+i5aRAghq9Wq7/trAhqRZ6UU2ic458fj8VruzDn/9ttvr1KYMVYUhe/74lLBjI+EYXg8HhljV69dliVkMebMGOu6Lo5j7AOltCiKOI4ppShixsgQzYwxXFDP86SUTdP0fc8Yuzpxz/NwgO/7VVV1XWetvX6RMALnvG1b6H5CCJ5PuFqtkI9+fHy8Wnh6CUTneR6GIb6lWuthGLTWxhjcCRiGAdcCX6ppmqZpwvTwEYfD4XA4HD8+nIB2OBwOh8PhcDj+yvjiiy9+9rOfcc7ppXsX6VrYT2vt27dvIXbLskySZBiGoiieP3/etm3TNLvd7vHx8enTp3iEIKVUKXU4HLbbbd/3UMlIQMNBQ0QyxrTWaZo2TRPH8TAMcRzXdb0sy7IsnPMoirTW8JgITYdhuFqtXrx4cTgc2IX1el1VFcwypXS/3yPdjISylFJrPc8z1gJ3aYyZpgkyt2kaa61SCmrVWtt1HYLDmOT5fO773vd9aE3GGJwvujuEEDgLPLIxBnleWNFpmtbrNUo5jsfjNE0w2pTSd98lhOBdcunQMMb0fY9X8KLv+03T8Iv+xvMewzDEVYMUrqqKEMI5xwyh7xljdV33fc85hxmHF8YCkVxu2xZ6Pcuy169fo14DKyrL0lq72Wyw26fTCRnqNE0ppYyxqqqWZcnznDFGCEF2e7vd+r6P48dxXK/XuBmA+aPcg1x6P67qPI7jZVmu3zq823XdsiyEECwEpSXLspxOp77vocuXZTHGsIscV0olSdK2LXbP4XA4HA7Hjwz3EEKHw+FwOBwOh+OviV/96lcfffSR53nPnj1DPlcpNQwDpCr6GYIgGMfx+fPnnPP7+/unT59uNpuyLH3fH8dRKRWGoVJqu91CrUJSp2kqLy0K1lo4x2mahBBQsXEcI/M7DEMUReM4hmEIPco5R5kGPPgwDJ7n4VdrLR5Gl2VZGIaQksuy1HWNrC6C1Z7nocZBCJEkSVEUxhhrLdK1SZIcj0fkdq9WFw7X931ksY0xaNgghHie17atEAJR4uuLeH0YhnEc27bVWkOGXlsmlmVhjF116nQplEDuWykFg3xV3kmSCCEYY8YYiOYgCBAQFkLEcYzNub+/x6P8hBBQsVpr3/c551DJUkprbVVVWZYRQoQQQoi6rqdpwgellEVRIDKcpim2F2o4SRJcfQSoPc+7zkopBb0eRRFy05gnvLAQYlkWbDgc9zzPMMv49Xw+48LFcYybBDDjhJB5nnFZIbK11kEQGGMIIbhkhBBrrVLKWot3rbXjOBpjlFK4qQCt3/f969ev8fV2OBzfEe4hhA6H43vEJaAdDofD4XA4HI6/Gj744APUbux2u6qqjDHLsoRhiGrdZVnSNEVpgxDixYsXWZZ99NFHVVXd3t76vn9zc2OMub+/h2D95ptv7u7uPM9brVbjOPq+f39/X5blZrNRSqGLI47jsiyllL7vD8Ow2+0Q1IUbJYTAvRpjNpvN69evYXhhwIUQ1tppmuI4btv24eFhs9mgMAQfFEKgGOS99957+fKl1tpaC3e5Wq2Ox6MQAmFnxlie55CnxhhEiYuiCMMQCWhrLeccH5dSIgRtrcVnCSGohGaMRVFEKaWUGmOEEJCz0LVlWV4T0IfDYRiGzWaDXx8eHqBN2aUNAxXSUkrI32EYYG+xtL7vrbWe5yFZjES553lSyu12W5YlhkIZBbt0N2M5aHNmjGFd14w2AukYlnPOGJNSQkMLIQghvu8XRbHf7yGmr5NE0ghOvO/77XaL9TLG8FDK0+mEX+GOsVGEkKvoZ4w9Pj76vg+Lja3GdfQ8jxBijEEqHJXWnPPT6QRXDuv9bgf0PM+Ih1/viLgEtMPhcDgcP2KcgHY4HA6Hw+FwOP46+PTTTz/99NPVarXZbN68ebPZbFDOC+lpjFmtVoSQq8OFB+y6zhgDufn27VtKqe/7cLhBEFRVxTkfx3G/33dd9957743jOM8z57woCjw6LwxDjMAYO51OSZLEccw5L8syDENUOkgpl2WJ47iqqiiKuq4TQkBfbrfbV69eBUEQBMGyLNbaZVmUUsuyHA4HtD1wztfrNd7FYpVS6NxQSkGALssCXau1RoNHGIaHwwEyOk1TzjkhpG3bvu+xLZDdnPM3b954nofUMIQp1uJ5njFmnmdCCGQxsuTWWqhSDCWEgNWdL90glFIsREqJc2EVYRjiXVyIruswZ2hoiF3GWF3XhBBKKZ7jxxg7n88IRF+1uBAC1SV5nuPjx+NxWZbNZgPzezgccA8Ajduc8/P5DNcfxzHWCAsvpby5uaGUFkWBwDVm+Pj4KKVEJh1fmLqujTGUUpyi6zrczyCESCmrqkKNNT5eVdV6vT6fz/iI7/uUUlSd4AtGKa3rervdHg4Hzrnv+33fU0qxjW3bYnDP85x9djgcDofjR4wT0A6Hw+FwOBwOx18BURTd3t6GYeh5XlmWeZ7neY5+CWut7/uPj48vXrz4yU9+sizLMAzPnj07n89SSnQgEEKqqoLVZYx1XffmzZv9fj+O42azefv27cPDw93dXdu2aZo+Pj7udjtIVa01Us/jOB6Px91ulyRJVVVwzUqpNE2ttYwxzrm1No7jvu9hfjFbPPoPTz4UQojLU/tevHhBCLHWcs6bpkG7NIwqpfT29vZPf/oT55xSGgQB5xwPSzwej/v9Hgb8eDwmSQLvjAlAgEI0XzU3uj48z+u6brPZYJ54DCBktJQSdhjxYc/zIFgR2YZvtdbWdY2VUkoZY03TIGlujIFrxgJhouHx8bw+xlhVVdD6eDdJkmVZjDFa667rsASEgt99UCEcLj4Cm8wveWRCiBACewWvfTgcfN8nhEBh4wCo4aZpYIfDMMTIjDEsmTFmrfUuDyT0PO9qwK+nhlDGBLquY4xh/CRJcK2NMYSQtm1xF0RKuSxLURTI0Rtj1uv18XgchgHhbqwatxY45y4B7XA4HA7HjxvXAe1wOBwOh8PhcPwV8NVXXz1//ny32+G5gkmSHI/H+VJ8jKriIAhgRY0xSqntdns8Hjnnt7e3sJxJkszz3LZtFEVoxmCMRVHk+z6MLRKyYRjCG9Z1nSRJ13UQpk+ePDkcDlJKhGpRJUEp5ZzDO6MAREo5DAO0Jt6y1iqlvEsvM3yrMcb3fehpCOW2bZdlCYKgbVvG2H6/55xXVSWlFEJYa5umwTwhha+bkGUZJHVRFJTS6zhCCAhWIQTKi5FQhgNFPUWSJNCgCINDEyullFLGGDhlQgiUtBBitVqFYQgni5lLKaWUeDdJElhdfHAcx2VZ5nmGGr52fWCrsyyLoigMwzAMcbdASoljhBDzPOMH3GbgnH/77bdKKeye1rppmmEYpmnCd0AIUdc1ZO51N6ZpQlU3zltVFWox0OmM+wpolKaUFkWBxmfY6tPp1HVdHMeo3SjLEpcYg3POzaVO2vf9IAiwA3Dc2GEhRBRF0PfY2GmalFK4zaC1nqZpGAbcDvmf77rD4fgOcB3QDofje8QloB0Oh8PhcDgcjh86P/3pT+/u7oQQ0zR99NFHvu+/ePFit9uhpIIxhsNQTDHPM0Twy5cvoygahuHbb7/d7/eHwwGy9XQ6WWuvwdjj8YjS5zRNu66DIz6dTnmeL8vCORdCHI9HJHPv7u6apsmyDCUbeZ5P06S1LoriegDO63keQr6EEKVUGIZd1wVBgEZjY0ySJIfDASo8CAJK6X6/f/HiBfLXkNdI1EKJwmifTicpZRzHyOrmeT7PcxAEMMJSyqqqrjIUJlRK2bYtHCs8Mue8aRqIXcaYMYZSyhijlF4/xRg7HA7b7RZ6GtuF5RBCKKVd11lrMTEkoNEmIaVkjEFhoxCZEFJVldZaSokU8ziO0eWpgJTSsiyxXkIIY8xaSwip65oxhsg2Y+x0OgVBAPXPGCOE4LN4hVLKOcfC8SulVEophEAkmRBSFIW8gDHFJSiNn2GWr7Fo/Nw0jVKKUhqGISEkDEOMdjqdlFLLsizLgoOxAxjQGINSbIxsjJnnue/7OI6hy9HogrXgXkjXddhbh8PhcDgcPzKcgHY4HA6Hw+FwOH7Q/OY3v3nvvfeeP3/OOT8ej/DIT548gT1ED6+1FnHdLMvwM/6E/pum6Xw+R1H0+vXru7u7KIpQuFwUxfF4DMMQD6ajlK7Xaxwspez7frfbwU4SQuBJsyxTSolLifBut2OMvXz5EudqmiaKog8++ODly5fTNDHG4DR3u92rV6/CMGyaBqUWnHM8Oq/rOt/3EQHGul6/fo1oLTQoFCcyuZ7n5XnOGJvnGQHheZ4RqsVuaK0R675qWcZY3/fQu1mWeZ53Pp/HcYScxUeufhk+1FpLKRVChGGISmgcGYYhvDlG5pxrraMoYowxxmBjYW+B53me52ED53m21l5vGIRhuFy6QbAuZMmTJIGgR2L9+nF2kcU4mDEGdy+lhHYnF6sOYY2rA2MOMc0vrSDr9bqqKuwPlDFUNXYAz368vb09HA54d7PZeJ6H8TEmMs5ZlvV9j3A3DL7necYYTA8uXgjh+77WGoYaQex5nqWUQRBgfPzq7LPD4XA4HD9iXAWHw+FwOBwOh8Pxw+Uf//Ef//Zv/xZCGeUMSZLEcQw/GEVRkiRlWXLO8QOe6VdV1d3dnZQSOVxowWsDL+Sv53mU0iAIEEC21qKaQym13+8hbZdlQWwZnnGe57Is0dgwTRMM4/l8Xq/XjDHkW8MwxHPnYD9934fctNbWde37fpqmvu9DRNZ1jUw0XsdoELuIJ8P2zvPcti0UJ3K1SimIziRJOOdXJx5FEUo5UIWBlV5Nbl3XYRiO42itxSvo7pBS3t/fSynxEYzMGENLBuytEOLx8TFJEtRkn8/ntm0hi7XWSqmmadBWwTk/Ho9932NWEOhd103TxC9WvWmaaZpQwREEAeLAMMXQwcYY9Gngs8Mw1HUN244Esed5XdcppeI4xtrR7Iwd45yfz2dCiJQSB0AKU0qvSl1Kaa0VQlwLOrTW4zgmSWKtDcOQUorvG96t6/ra14GNHYYhTVNkovFrkiRIYaPNI47juq5R9NG27TRNQgitdd/30P14/Y9//CNKORwOx3eHq+BwOBzfI05AOxwOh8PhcDgcP0R++tOffv3119vtNkmSm5ubLMtQmAuhnKZpFEWU0nme+aW5mDFWVVUYhtbacRyzLGvb9unTp4QQpF8RNUVmFmUIOMZaW5alMQYR5qudPJ/PWmvf9/HAQ0IIY2yeZ8hWONx5nmGx8zwfhgFBV8/zPM+DSIWAbpoGgpUQgjhz13VCCNhnpRSlFAli/CsJnCwCvNM0zfOM08VxDMUMi41xYDYppYQQay1jLE1TKaWUclmWtm2RXF6tVpRSiOC2beM4lpduCkIIvC3qPoIg+Pbbbz3PW5aFc26tRYx3HMcgCFAnfd1GLBBhXiEEISSOY7jdKIp83/c8zxjDGEMCGpvGGIPCHoYByltKGQSBEIJzfjqd3rXhOAVWhH3AzHEKzjl2j3N+/RWnwPGMsePxCKcPrYw/cUFxAGMMW+f7/rvHQ6ljgTgdZojTjeM4TROEOKVUKYVL4HmetXae5ziOsQOUUixHa70sCz5FCFmWxVp7Op3wtXc4HN8RTkA7HI7vEVfB4XA4HA6Hw+Fw/OD46quvPvnkE8/zdrsd0rVBEPi+H4YhIYRSaq01xhhjCCF5nmutjTFCCGutUkprPQxDGIZ5ngshVqvVPM8olICGrqpqGIbNZnM8HrfbLeLMCDsPw9C2bZZlSZJQSk+nE1LJfd/D4X7zzTe73S5JkrquIcHtpe9iu90eDgdMEmFqYwzMI4oaoHFhY29ubl68eBEEwdVOcs7R/oFejmmaKKXGGKXUPM/4LKUU5dTXR+fhT0hhzHO1Wo3jGIYhsskI88KiUkqFEIhFE0Levn1LKYVjbdt2u91i/4/Ho+/7nPM8z+FS67qmlIZhGF06N4qi2Gw20LWU0sfHR8TPcZa6rnF1YHubpsG1YIw9PDwghY1AOmOsaRpCyGq1uo6MOWut2aXgG89IRNicUloUhdYaHSkQuEopXAjMAdd6vV6fz2dKqZQSU8qyDFvRNI3WGifFgNjkeZ4JIZ7nNU2z2Www/+PxqJTabDZlWd7c3CDibYzBx40xmM96vfZ931pbFIVSCvXTxpjz+dx1XRzHlFJ7sfme50FDf/vtt1ijw+FwOByOHyUuAe1wOBwOh8PhcPyAiKLo97///aeffoqfPc+b5xk1CMiKbrdbzvnpdHr69CmytPv9Hm0MnuddZTQkL6X0cDj4vi+lXK/XcRwvy6KUSpIEowkhoIaTJEEPb13XeZ63bcsYQ4UCIQQB5yiK8DC9YRiKokD4F8dcH0uYpil0KqUUzrppGkppFEVZlkFTXsPLELVa6yRJ8KLWGr0cvu9jfM75six4EaFd9FfAtBJC2rZVSuV5vlqtoihC8Ha1WiF4uywLY0wIEYYh55wxdjgcMKzneVEUxXGste66Tl4CyJRSJLKllAhKn06ntm2XZYnjuGkadB8TQhB5nud5WRZjDF6P4/h8PjPGOOdQrpxzYwyywJTSaZoYY1LKa5rYWss5h6+f5xlan3OOApAwDB8eHqSUjLHr1mmthRCQztZaSH/GWBAE2FgsYRgGnEIIgSnBtgshcNJrLHpZFhwjhBBCQKxj96y1vu8TQpRSCHfjdPwSuGaM4bKiAIQxNs8zVjTPc9d1lFJsFxaotZ6mqe/7vu+LoqiqylVwOBzfNS4B7XA4vkecgHY4HA6Hw+FwOH4ofPTRR7/61a+ePHkSBEEQBOM4wu4xxrquS9N0u90qpeAK27YlhFBKz+czHiHILo+GE0KkaTrPc9/3QRBUVRXHcdd1kLAIO8PhQqeGYVhVVZ7nUI2o4wjDkDGW5znahOd5LooiCILVagXvGUVR13WMsTiOq6rq+x456CiK2rallKLu2RgDbXo6nbbbre/7WmvP8xhjbdtiPhCdhJAwDBHHRmUHpdQYAzsM20sIiaIIQWlEfaMoOh6PnufBmbZtO44jrCshpG3baZrQTSyEOB6PlNJ3nSyltCxLKGkppdb6eDy2bQtJyjmvqopSisQurgiOZIzhpDjR8XjM8zzLMkzJWkspxZT4pVID44RheDwe0zSFYfd9H/niOI49z5OXfHp8KXe+6mx6eVogY+zPqq455w8PDxgBpxuGAQvH0mDtcUaMidsDMMvn8xntLjgp5xwN1NcDTqdT3/cYnzGG8a990FLK0+mE7bLW4obBOI7RpYFESnk+n5F211o3TaOUwr6VZYm49PVvgcPh+C5wAtrhcHyP/I+A9jwPiYZ33nU4HA6Hw+FwOBx/IX73u999/PHHWZbBckopP/74Y9/30dH80UcfIesax/EwDM+ePZNSXgO26KxQSo3juNvtqqparVZBEESXHgwEouGgp2na7XbzPI/jqJSCdcXxxphlWaCwkyS5GnBjjLVWSpllWVVVSZJAI+IZdEhDa62rqoKghJKGNg3DEIYXp0AqFt42TdPT6aS1hpJGjthaW9e153mwpUKIeZ7hxOFSsV3LsgRBcD6fjTGbzWaaJpjTOI4JIcMwWGuVUkEQEEJQCU0v6WMIerjRx8dHeXHZENwQqVLKPM/jOA7DEHlevAtn3TTN1f9SSouiEEJggbDDaJzwfR/vYmI4GEJZKaW17vseFwgbBSe+LAu/ZJPxK+aMWLfv+2/evBFCiHdi3UVRYNuLohiGAeJbSomEshBCa41TcM4550opxljf99OlwRnH464D59xeOqwxPqaEWo93G58ppfMlAC6E+LM5Q4Ijnk8pDcNwWRbcF5mmyRhTVVXTNIfDQWv9P38THA7Hd4AT0A6H43tEfPLJJ/hJKXU8Hv/3uw6Hw+FwOBwOh+O7JYqiv//7v8/zHEnh/X5PCPE8TwgBi/rZZ59prcdxNMYMw7AsC2LO2+324eHh5uam6zr4RyGEUqosSxRi5Hn+9u3b/X5PKS3LsqqqIAjgkQkh0KOIQnPOm6YpimK1Wq3Xa875+XxGVQWkMzTxsixFUeR5LqUchoEQ8vj4uNls0GUM/yilRDf0fr/HOEmS3NzcYARo6HmeYXsZY9vtdp5nxhgGsdaez2eoWCkluySUPc/zfR8nhdk0xrx582a1WsGBItDNL8/fQ2EIuxQu932PXotlWaZpujY7n04n3/fLskSrNWx+Xdew1eTySMO6rjnnaPngnJ9OJ3HJUFtr+aXhGjqVEAKTCxvLOQ+CYFmW65hYuNaaUooFYqVSSnEpjEbXM86Fwmh8B/DdwNfDGAN9DM0dBMF15vgTc6OU4l0YfAzbtu1ms8HP9FKEgsZnQsjhcJjnmRCCyTPGpmmapinLMky4rut5nq/HE0Latl2v1xjfWoslYA74iLV2GAYsnBBCKTXGEELqup5c/4bD4XA4HD9qeHEB/x/iz993OBwOh8PhcDgc3w2fffbZl19++cUXX+z3e+jg3W73/Plz3/e11mVZ3t7ecs6ttUEQTNOE1LNSSgjRtu0wDAjn5nm+LMv5fL65uRnHUUqJNgxrbRiGcRwvy0IpjaIIsVNCSNu2YRgqpay1vu9P0ySE8H0/juOmaeI4DoKg6zrOOeKuaZquVquiKCDBGWNBEPR9vywLDoM/RdwV4eKiKCilaMPA2X3fP5/P1tosy6CYm6ZJkkQphQkwxgghMLB5nkN9nk4nay06Ma4h4nmepZRBECCDLKU0xiCnjHP1fY9ZJUlircX4Wusoigghfd/DSgshYLev6WMEt4UQeZ77vi+EQE58t9uhdKIoCq21EAJOHHN+eHhA2poxxhhDfwXeWpalbdskSXDJlFJoBYkuBdBVVSFM7fs+5xz7liRJWZa4moQQWF1rLSEEnxJCXAPjGGEcx6vy7roOY0opOecYCqdgjJ3PZ0rpNE3XCcNNJ0mCi4WdhH3GD0hAXwPRuJcQX/o6yrIUQiBmTggpioIQopTCPQBCSNM0Wmvf9+d5Vkph/6Gkx3HEu5e/Fg6H4zvBJaAdDsf3yP/qgHY4HA6Hw+FwOBx/AX7961//9re//fzzz3e7Hed8u91uNhtEidHGEARBlmVoaUDaN0mS4/G4Wq3CMESo2ff9tm1ReSylTNM0TVOIPGjBtm0JIcMwJEmyLMtqter7HnqUEBIEQRzHeZ6XZQkHDbForRWXboeu64QQcRzDMq9Wq7IsrbXXjo4sy8qyhK0WQkCUN02Dd6WUWuu6ruF2CSFJkoRhCA1NCIFyTZKEEAKxjpNyzqdpQuYXfhOxX/hQSimsLvbKWquUQumwEAKiFvOBTm3bNk3TqwHvui5JEnRrdF0HYyulnOfZGLMsC6pFCCHQ+lhs3/fGGKwFk4ROnabpeDyyd/orsI3QtfC/WutpmvCuuLRh4N13D8bS5nnGnHEiKF1sfhzHaZoej0e4+DRNgyDA/QAp5W63S9M0SZLr7YR39bEQom3bvu/7vofEp5fkNa4OhD6Oh2HnnPu+Ty8PUWSM+b5vreWcW2uxXswZS5imyVprjPF9H1/C64mMMcYYcUlt13WNlXZd9/r1a8TGHQ7Hd4oT0A6H43vkv/+DKYfD4XA4HA6Hw/EX4Msvv/zss8/CMPQ8b71en06n58+fSymVUsje3tzcKKXQsTDP89OnT4dhaJpmWRZrbV3XQRDgkXGQjFJKKWXXdThsv99P05QkiVKq6zpCSN/3aZpCBeZ5Po7jdrvFuZRSlNL1eo1kLmOsqqq7uzu0PEPCVlV1Op1QtlDX9VWYcs7LsoyiyPM8HMA5r6oqiiJjzMPDw2az8X0fOWspZV3XUKLW2v1+zxijlL59+1YIEYYhJkwIoZTe3NzUdb0sSxzHnHNCCDSx1jpJEmttWZZlWWIPoVDJpQICCV9Kqed5wzBIKYUQu92uLEvOuRDi5uYGn5VSWms9zyOEQLtDwhZFAeuK64WVYpfwytu3b3F1IFs55/iVUsoYg58NgoBSihfpxfNinpzzq5/FK3Vd4z6EtfZ4PCKlnqYpPoJ0cJ7nOL4oCoS4r6N5noeT4gBKKS7uer3GBDAmnDWOROHGarXCJpRlaYzZbDb4FU9EJITAZRNCmqahlK5WK0IIpbQoimVZsBtSSkopsvB5nuN6lWWplFqWBX8aY7qui+OYUjoMA+5hKKWGYej7HqdwOBwOh8PxI8YloB0Oh8PhcDgcjr8EH3/88W9+85tPPvkkCAIhBLzbarWCNISX9DxPXZ7Rh86EoiiQej4ej3d3d8gp13XNOd9ut13XXZt8p2nyPO98PmdZNgwDGh4IIcMwLMuite66LgiCNE2VUr7vV1VljAmCIAgCNFr0fR9FUVVVCN70qPQAACAASURBVNvCL1dVRQhBqDYIgnEcocLRR+H7PlQpkq2I8fZ9r7W+JnCnaUKtByLAnPOqquA6kyTBbsCrep4H6wovCWlurb3aYXhhpJvbtl2WhV4egmetXZZFXp5SCKWOsyulmqaBouWXhDVGrusagWhIZBzGGPN9n3POL8/iQ2e053nXp/+hYzoMw2maOOd1XV8rNdC/gbYQQkhRFH3fIw/OGENdBm4JQCgzxrDbWZZB7DLGEC4WF81NKV2v13VdYz5aa621Uspai8cwJkmCgxHHxphYC3aJUor9xKI459fLcY1gE0KghimlQgiobcaYtVZKiSuFs+MH6H7GGNLNSI6fTidcKUqplNIYg2HHcTTGWGsJIX3fY5mHw+F4PF51tsPh+O5wCWiHw/E94gS0w+FwOBwOh8PxnfPVV1/94he/kFJCuX7yySdICkPaXoss8jyvquqDDz6glEKnQjLCLSqlNpsNdKHneXgU3jzPWZYVRXF3d+f7PuQv5xzNG6j1QJ+D7/ur1WqapjRNKaXwsJDdGEQIgaQq8sJpmkoppZTwnn3fQ1PidIQQGNiqqsIwXJZlWRYYyTiOz+dzFEVt21JKgyC4hpFRvgFX2zQN7Kq1FnFmzvnpdJrnOY5jdWnkYIz1fY9G4zAMKaVKqaqqoIxhiimlxhgpJforjDFQ3lEU+b4vpSSEzPMshMC08VnOuVKKMQY7zDlfloUxttlsgiDwPG8YhvP5nKapuESJYWbX6zXcetM0p9MJn8L8r4NEUQQ5uywL5/xdHcw5x0Kw/1ggvxSeEEJwFnycXp7jNwyD7/tFUUyX7mYpZdd1mDnOC4QQeHGe54eHByyWXIpZkO/mnMOAY11xHFdV1XXdOI5t2+IS4FM4IzbKGLMsCzqs4ziWUjLGiqIYhgEN1H3fM8batlWXRmxrbVEUeBffoqqqlmUhhJzP5+PxiDj2O39XHA7Hd4IT0A6H43vECWiHw+FwOBwOh+M75NmzZ7///e+fPXs2jmMYhkII1C4jRur7PrLJq9XKWtu27Wq1qqpKa73f70+n093dnVKKEDKOo+d5SDdrreGLy7IkhHz77bebzWYcR0hDSmlRFNbaJEmapsFJrbWU0sfHxzAMy7L0fX+326GrYVmWLMtgaT3P6/se4yAJC1+JEgb4XM65uBQKs0tWt+s6pJ5R77vZbKDOkbrtuu7qrKWUj4+PWmv0YHDOT6cTpZQQghd93xdCpGmKbmt2eZAg1C2ltO97ONbNZgNz3fd90zRoroAInudZKYVFWWsRc8ZUj8ejMQbuNYoirBF+XGvNGMOcGWOHw0EI4XkexC6cuJQSCe7D4dC2LT6IGTLG0BbCGCOEKKW01g8PD1D5WOPDwwPSypRSXBF8MRhjnPO2beM4Xq/XYRjWdd00zfROeXTbtpxzQsj1glprsSdSSmutEOJ0OimlILi11uM4TtMEZy2EwCB/po8ZY8MwwEdj7Z7nQbvj64E1YvfYpWbkqv7h0CmlQghCyHWGvu8TQrB1+M7M86y1HoZhmiZr7TRNb968cQXQDsdfBiegHQ7H94gT0A6Hw+FwOBwOx3fF3/3d33355ZdKKd/3nz179uzZs2VZVqvVbrcjhHRd9+GHHxJCOOdPnjyBEISkQ8CWEKKU2m63p9Pp9vY2CILj8Qi1l6bp+XxG6nmaJrQ0dF0XRdFqtUqSBKlhxlgYhlDMjDEIzTzP4Te11lCxWuu2bYMgiKIINtlaSwiBlER+eRzHKIqQhIWVvjrr68jomoDExASgvxExXt6JSA/DIIQ4Ho/WWljp0+lkjIGdhKCE8YQLHsdRKYVzIcGNpUGJHg4HKaXneUjdEkIeHx+zLOu6Lsuy4/HYNA30K06HxDGOREwbnrRtW0h2djGzWCyUq7UWHhmtI/M8SynxCmwvZts0DT5ijCnLEuoWh51OJ8YYtginuC6EMSaEoJRe7yJgq6F66eVJgPzyNMIwDJVSZVmO44gDCCGIrm+3W+wMluZ53m63w7q6riOEMMYopfM8W2uxKH5psvZ9Hx0sENbY29VqhUZvKeWrV6+mabpe5ePx2Pc9PD72Ew0k+CKhf+N8Pvu+j8kcj0dEucdxfPXq1fF4nOcZf1kcDsd3ihPQDofje8QJaIfD4XA4HA6H4zvhq6++evLkCSHkvffeE0Isy2KMub29RV+BMcb3/aZp2rbFk/HmeYane/LkCVLPML/GGAhlaMrz+ay1fvv2LZ4oCMNLCEEaOk1TqGT46/P5jHpiKaXv+0IIeGEpZZIk0zRprSE3pZT4FKo5+r6nlA7DQCkNw3CaJuhm+k4mep5nY0wYhmVZJkmitV6WJQgCHHDNRA/DYK31PA+DMMastcgso0Wac344HNCYEYZhEARhGELRYrGQ0XDWkLNd103TBJFqjNFai3f6JRhjOHuWZfgsIWQcRyEESiRQoq21xp7DvPu+r7WGSYcktdbSy7MEGWNlWS7LEl26nuH38zzPsixJEmMM6jjQ2Q3byxjzfR8fYZf+DTRd4BSUUvjiIAjGcaSU1nWdpunxeMS75J3nDR4OBwwFk4vx0QeC02G2QRBgwvM8c84ZY+KdCDxuJGDriqJQSk2XkDVjDJluRLYJIWiUhv1v23YcR0KIlLLrOlxBXNB5nq8nxb2QMAwppdZaeHzE4aG/l2VRSuHjb9++xV8Wh8PxXeMEtMPh+B5xAtrhcDgcDofD4fg/5ubm5re//S0sapZlaZpmWZbneZIkRVGg74Jfsr1CCOhU2FVCiFIqz/OiKPb7ve/7dV0TQqCMi6JAFPrqjpF6Vkrd3NwMwzDPM/Sf1hoPyhvHMU3TeZ49zxvHESISoWyl1Hq9hsAlhERR1HUdui9QjMAYg5VOkgQPu6OUQhmP4wgBPQwDfGWSJIwxxLebponjeJqmuq5934cARe64LEsMgiV7nkcI6fteSgnpzBg7n8+EEGttEARQnJxz1I9wzjEfeqkEEUIcDod5npGzZpfwsud5CAITQhBPhoiHJL3K7qqqYGmttVEULcvCGJNSVlWVJAmywL7vo0wZslhK2TQN4sO4XsaY4/EopczzHBcRV833fUKIMQaZ7uPxmGUZ9o1SWtd1kiTYTyHEw8NDlmVaa3SM1HUdxzFKRcqybJqGUnrd+WVZhBDGGFwC3N6QUkL7zvOstT4ej0qpIAig3c/nM0YIggBbisVeDTiMtpQyCAJ8k+d5hjrHvmF8zvl6vcYNA6h/+k5ptbVWCCGlNMZYa5dlsdYaYzAa7kYwxk6n0x//+MdpmnAih8PxXeMEtMPh+B4Rf/6Cw+FwOBwOh8Ph+H/g+fPnn332GWMsjuOf/OQnTdPA051Opw8++GC73X777bcffvjhMAxN0zx79gxh0tVqhQqIeZ7jOH758uXt7W3btnmea63HcdRa/+d//uft7S0cMUqfi6JATtb3fSnlarVSShVF0TTNbreDZjXG4HGFRVHkeb7f79FAXVXVer2WUiqllmWBoERTRxiG6P2AOuScU0rxCiHkcDjkeY45vHr1ihCitZZSWmuVUlrrx8fHNE0JIZvNpus6uG/G2DzP9/f3eZ7DfkJQwmDudrumaaDUCSGwzDCb1lpKKV7Hn9C+y7LgvISQ1WqFLDPkL2OsLEuUaN/c3JzPZ4hOxhiG6roORpUxtt1ukSbGeSG1CSHGGM/z4K/P57PneVggvDkhBP6XEEIIKYoCJh2LgoFljBVFgWy4lBLV25zz/X5vjMG1wxoppafTyff90+nkeZ4xBltEKdVas0uEWWuN3eCcXxeOjw/DgITyarXCSqH+CSGUUuwY1msvEEIgmuGOCSHoBN/tdvjI4XDAvuFbwRirqsoYs9lssPbj8Qi/jFIUSmlZlsaYLMtwfYui6PveGBMEQd/3sOTYz6qqsHUOh8Ph+DO+/vpr/BdC+EcJpRT/cMF/oNO27b/927/9+Wccjh82TkA7HA6Hw+FwOBz/l3z22Wee5yVJ8sUXXzDGPM+rqurjjz9elqXrOqXUs2fP0DW8Wq3u7+/v7u6WZVmtVijJrarKWuv7PuKicRzneQ5DPY4jGhJQy2ut/eCDDx4fH+u6FkKg9XhZlvfeew9Pz8PHN5sN+jR83w/DsKoq1HTAQa9Wq9VqNU3TPM/W2qIo8C+9YRiO44gca13XSMimaVqWJQwp/n04SRJrLfLUhBCt9TzP+IFzbq1FVheR25ubm7quoSyttbvdrq5r7A+Szlrrpmk8z/N9H//KTSk9HA7XV+I4thcfjY/DWW+3267rqqq6vb19eHhAnrqqqjAMT6cT5DiuBSGEUhqGIao2rLVSyiRJUBlBKYU9f3h4QPAZFhi2V2uttcYIUPxIWB+PR8/zdrsdfO7hcBjHkXNeFIXWGtaVXSSyMWaeZ/yKM14tNqV0v98zxowxXdddvTAmiR8wAaxuvV5jXafTSWuN64JLc51zcnngIeoyCCFxHHPOGWNv3rxhjK3Xa0KIMQYGHBPDlDjn/jv91+zS/Y0B+aU2Ghcd/SqYM77D8zxzznFllVKMsWmacLfDWlvXNULiDofD4QD431XG2B/+8IcnT56kaYp/yuCfC/h/Eefz+eHh4T/+4z+stfhvof58FIfjB4mr4HA4HA6Hw+FwOP5veP78+e9///unT5/udrsPP/wQTRRpmj59+vTaHRHH8X6/r+saEV1CSFmWUsplWbbbbd/3T548gfxtmsb3fdQrCyGQzLXWooohiiJ6acMghCBPGkVRnuee55VliX8vresa4hg1EdM05Xnu+7651G5AdKIuA2YwiiIhRFVVqHGw1kKAUkrRoXGNzVJKodStteHlYYPoQQ6CAP8WjUw3Ylw4fp5neWkgGYYB9hm/IkeMXznnRVFYayGChRCQm+wSDcZo5JJr7roOHSBxHGN6UkqcCMvJsgzbVRQF1DzG9DxPKSUudjgIAkopnvJ3Op1Wq9XhcMCLeZ7HcZwkCZ55GAQBNmFZFlzH8/kMzYp5Yhqcc2w+pRRdJUKIruvsxaRP0/Tw8NC2LSpECCFIZIdhaIxRSh0Oh7ZtwzCM49j3/a7rhmHY7/dYAqpRGGNaayHEdrvFpa+qyvd9qGFjTFEUy7J4nrcsyzzPp9MJO+N5Hm4SIKguhCjLsq5rfDE458YYccmVA2hoxhgS0FEU4XV8Z+I4xoCMsWsd+bIsfd/jW7osy/39/f39vVIKf3EcDsdfAFfB8cPn/fff/5d/+Zevv/76888/vz5OAP80oZRyzn3fT5Ikz/MnT558+umnx+MR/7vqcPzwcQLa4XA4HA6Hw+H4f+UXv/jFP/3TP3355Zfb7TZN0w8++EAp1ff98+fPlVLr9ZoxhlTsNE3H4/H29hbSeb/fT9N0d3dnjDkej+v1uizLOI6llFVVbbdb6F3G2OvXr4MgUErt93tKKeQdahbQk8AYg/uDV4WUlFKmaQoPDpWMtLJSKk1ThHOjKEIZMbxh13WMsTAM0zSF3CSEeJ7HOY/jGKleeGG8gkIPCGi0c0gpjTGe58HkQitLKRljyB1fE83w19dfsyyDC2aMUUqHYYADhai9AnXLGENXMg6OoghBb0IIyiiuUd+6rrHzUPCe51lr0zSNoigIgr7vIXYZY4wx5NAJIVgUpTRJkrZtcYyU8ng8EkI2mw3nXGuN0glCSBzHnufh+3A16biXwBjDgFrrYRgOh0Pf99M0weRaa7XW/FLNTAjBxfV9H5ceNydwH6IsS2stNIRSCuIYnh2Xr+/7pmnmeaaUCiGWZeGcIxLOGBNCaK2xjbhAQghrrRAC4fQ4jrMsi6JoGIZpmuI4RiK+aRrcS/A8b5omKGxcHe/SGUIIgZfHFxhfJPTP4L8ZJ4SgwLrrupcvX2KvHA7HXwYnoH/gfPjhh1988cU///M///znP0/TFHcEhRDkUjyF/+WPoihJkvfff//29vZwOODWY9/3fz6cw/EDwwloh8PhcDgcDofj/yc3Nze//OUv//Vf//WLL76AAfzoo4+CILi/v3/69KnneUVR3N7eNk1TluXt7W3f93d3d0KI4/F4c3MzjuP9/f3t7S0k9TiOkNQo00iSJMsyOM2qqhB53mw2CMYaY+AiYQxhrpG0bZomCIIkSSCv53m21sIM4melVJIk5/MZchD9Hp7nLcuyLIu1Fqkr/Ltu0zR4RV4ecAeT6Ps+5matbZoGYeQoiqSU4zjC80JK4oye53HOoygihCzL4vu+ECKOYwhWqExCSNu2+AEn4pxzzqGYrzDGEI7GIIQQvI7sthACAheZ8asehSqFhGWMITGNLDPMtbUWKw2CADllSFtc2WveGfliIYQxhlK6LAulFMrV9/23b9+O43jV/YirI0RsreWcG2Pmed7v99ZaNF/DUFtrjTHjOOLi0osgZv8fe+/WK8tx13/Xqc+HOc9aax+87Tg+xomJk9gYgkQIkRBwRxJuOEgoF1zwUngbiItEEQgJhJQgJJBQQEEExYlNsr3Pe6015+npY3VX1XPxZZplJ8/zB/2f2Ju4PhdbPdPV1dXVs6dWffo3v2JMax0EAWMMfYXewCGccyHEbrdDfHQQBE3ToDMRPRcEAZ5JcM7jOPY8z/f9rusOh0MvlxHvjIuFnSeEtG2L+4XT4SV6gxDCjvHd2KaU4n20p+s62HZK6W6345xvNhu8SSnNsuzu3bv4FFkslg8MK6CfcP74j//4jTfemM/nGC+UUviSZ4wppTA68+PaAEII3/evX7/+8Y9/XAjxzjvvvL86i+UJwwpoi8VisVgsFovlf8wrr7zy+c9//hOf+MSbb74ppaSUep53dna2Wq0Oh8OtW7fyPDfGpGm6WCzG4zG0I2KKESVdlmUQBF3XDYfDoijKspzNZpvNZjwex3GMmSeCc6FT4fi01siqgZNyzhHIzDnHXijCOI5hGAkhm80mjuOiKPA+JDUkqeu6VVV1XVdVleM4QRCkabrb7XoLDOnsOI4xBoISu8qyhGumlCLQ1fd9TIkppUEQbLdbOFwUaI9ZODjnVVUhkBb6Ms9zRDrDqJZliUswxkRRVNc1dkG5cs5Xq5VSyjsmiSaE4H0cjrsThqExJs/zpmmklNiI4xgn3e12VVV5nleWJec8z/M4joMg4Jwvl0v0odYaHUUp1Vr32ZzRchhzSimlFL1X17WUEik40Kq2bVEPIQRdDRec5zljTAiRZRljDBHfnPP2mPAEvYRMKRDKlNLdbrff7/M8h5WGOPZ9f7FYtG07mUzwhACZPRhjg8EAdr6qKnSd53lozOPHj2GxcS8QRo2bNRqNoOyrqmKM4VrwGAMNQ+WU0v1+3zRNXdd4rkAIybKsaRrf940xWmtkEamqyhiz2WzQP4yx5XJ5fn6+XC7btsXNslgsHwxWQD+xvPLKKy+++OJv/dZvnZ6e4hu1H84w8uLBJL6WGWPquBjAcDjEYsJVVeFL+Gq1FssThV2E0GKxWCwWi8Vi+e9ydnb28ssvT6fT8Xh8enratu3FxQUWGFwul0mSBEGwWCyqqtJa49/xeHx5eTmfz5VS8L/37t07OztrmiZN0+l0ihQWhJDtdjsajYIggNBEuCilNAiC8XgMjbvb7aB9CSHj8fhwOGy3W8bYbrdLkmQ4HOZ5vlqt2DFlZBzHxpjlcjmfz4MgQNQztCBMouM4XddBTQohlsslIrJhkxljxhioQ0Qrn5yc3L17NwzDpmmCIKCUjsfju3fvMsY4595xKcIwDKWUiHeeTCb3799v2xYz6vF4fP/+/aZpMK9u27Zt267rEE+NoOC+eUopTLwJIYvFIggC3/fpceE+zjk9xt7ieqFKoU2RswJ7e+VNKUUIcB8Z3Zcxx4Divtr9fo9icRyjJGNstVohGtoYs1qtwjBEYBrUMCEkTVOcCIHVOB0EAVYORG4QpRQC3rGdpiljzHGc8/PzOI53ux1jLM9zrfVgMMB6g/v9XghRlqXneavVCsHOWussy7TWnufhrnVdV5alMcb3/c1mQykdDAYQvlmWBUEAf4EHG5xz2GelVFVVhJD9fq+1xlHozP1+jz6P45hSSimFzkabCSEIcyaEQDR3XQdHH4ZhWZau6zZNY4zBB2+/3yNe3mKxWCyEkN/4jd/49Kc/je95DP0YUrXW/BgBjQ0Mc4QQKSX+GAjDEOm//vRP/9Su7Gp5krER0BaLxWKxWCwWy/+Z55577pd/+Zc/8YlPTCaTZ555hnPetq3WejabYY24GzduXFxcIOdGVVXz+Rw2c7fbQRemaVqW5XA4DMOwbVsEn6ZpSgiZTCaO42w2G7yPCOjhcMg5f/ToURiG8IaweJRSKSWSZmitlVJwo0mSIOrZdd31ep0kSZZlmJ26rrtYLDjncLJFUcA2IiwXshtJGBCriywT0KyIzNVa+74PR2yM6Y0n5xzOVwjh+z4ic3EtKAAVS45pN/ryMMKcc3jttm3hoymlVVX1M22EV8Nywu+7rus4Down5xyVY0KOmnG/4jhGg1erled5lFL/uLYhZvJoCWPMGHM10URd19iQx+TUjuM4joOQ9rZtESOM66qqSggRRZHv++hGz/O6riOE1HVtjJFSQq/z45qKjLHe0sJoV1WF0+33ezy0QHcRQrTW0+kU9xRevne4/JgKA90ShiE+Cbgc3/eDIFitVkKIwWCArqiqijHmOA460/O8MAyxkGBd147joPeUUo7jjMfjMAy9Y5y44zh9fDT0sTEGnyKttdYaHdJ/ICmlSinUVtd1Xdda6/1+/9Zbb11cXOAeWSyWDxIbAf1k8qUvfelXf/VXT05O8KWKIYAef/MkjjmgjTEYtowxGCAYY1prlMQAFwTBvXv33le/xfKEYAW0xWKxWCwWi8Xy/8XLL7/8S7/0Sy+++OILL7xw8+ZNQgjsMJyj53lweefn54hsRQpmqMOyLK9fvx5FEWaJjuM8fvwYAnoymXDOLy4uBoNBURS+78O0Ip8vY8zzPESt1nVNCEFM8XA4RLytlBIrECZJ0rbter12HCdNU8dxlFKHw6GqqtFoFIbhfr+HtzXG+L5PKUWAqjEGbjEMQ/hB3/eLooCrbZoGySKCIEjTdLPZQD4yxg6HA5pnjIFCLYpCSon62THvc57nOC8KNE0Db0sIyfMcJhcvsRcCGnsZY0IIY0wURWiSMaaflhtj4jiGp8bLHnoF1BbHcd8Szvl6vUbyE7xzOBwQ343bejgc4OtxeJ7nhJCmaaDLcQiUNGOMUpokie/7nufleY5eBYwxrXUcx2maDgYDtEcIgRqaptFac86R6RsFsixDb2ANwKupV+q6xmMAQggyO+MDxjnHcwjHccqy1Fq3bau17rpuu93i5iZJopRSSiHjhziuTtm2Led8tVqhW4bDIaqt65oxxjlHYyilxhjOedd1Usq6rruuQ0oNqHZjzOFw6LoOt6PrOkLI4XBAR9V1fXFxgbZVVbVcLm34s8XyYWEF9BOI7/t/8id/8uyzzzLG8Oi3/9ZlxwUSUBLf+VprfKVjDNJa46+LIAieffbZMAz/6Z/+CYOFxfKkYQW0xWKxWCwWi8Xy05nP52+++eYLL7zged7zzz+/2+2Q+9h13e12i8QaWutHjx6NRiPO+Xw+p5TCmXLOt9vtbDZDlgZxDCWu6zqOY8dxgiBo2xYqkBCCqOfBYBBF0Xa77boOAarqGA/Vti1sIyEEEhD6so96hkyEJ4XzreuaH6Oeq6rqug7veJ7XT3SjKHIcJ45jKMte1EI19jYWDXBdd7PZoOVd16ljLuYoijabjVLKuZL32XEcrTXibcPj2oP9y81m4x7jiIMgwJue53HOlVJSyn6m3ctiZKXAtNwYgwQXaBs2sH3VR4Moig6HAybqvu8TQpDPBDHghBD0GGNMSgktboxBuhLf94UQrutCMTdNg55pmgZi2hiDfkiSZDKZjEajruu01lC0y+USVQVBgAMppVprY0zbtlLKsizRmU3TdF3HGKvr2hjTNA16TCkFB0EpxQMG13WhiXEJRVF0Xbder4MgiKLI8zy4YH7U0xDBQogsyxAEjY9cfczgXNd1GIa4a5RSxlhVVVVV1XW92WzQSCEEpAa2J5MJPhv9uRA7zzlHJa7rlmWJS27btq5rKeVisbh//z6xWCwfBlZAP4F85StfeeONN/AFC8WMb3v8S44rvhpjMHD07/Bj5igUg4b2fT8Mw+9973soabE8UfBnn312PB5jqRP8cfD+IhaLxWKxWCwWy0ePN99889VXX71169ZsNpvP5/v9fjKZSCmhR13XHQwGm83G933f96uq8jyPHGOUDofDeDwOgmC5XAohELncti3il5VSxpiiKOI4llImSbLdbg+HQ5IkSDpMCKGUSil934d3htPsui7LMtd1YY0R9Qzxp7UuiiIMQymlEAJGNcsyZIfgnHdd13Udzus4ThiGfZ6Nuq4ppcPhcLvd4nKgEbXWnue5rrtarYIg6LoOAcJCiN44Q9dCqjLGYJyhpLfbbdu2QgjMlquqapoGihkTZkQ9YxuGFLvKsoyiCH6zl86EEJh92GdCyOFwUEpB40Kmk+MsHRtAaw2t7B2TaaAzUW2WZXEco1XoJSh19D/sMEQzTDSllFIKC+w4Di5fCOE4ztXriuMYqwhSSo0xSqkkSfr8J23b4io8z0MLu66DKU6SpGkaSqlSSghxdnaGTxSltOs6KaWUUmu9Xq9938fHAE8a+nUIsyyDkgjDEMICj0OyLMNyVWmaSimNMfDFxpg4jrXWQojtdts0DWoOggA3iHOOeH/P86SUaIwQAncBzcPHGJ8QNH6z2RhjNptN0zS4xrt37967d8/ONy2WDwsroJ80bty48Qd/8AfXrl1r21Zrja9WIQQ5rnCAMQVfthjR6PHHPb2Yxga+iuM4nkwm3//+933fPxwOV89lsXzo8O2Ruq7xh/X7i1gsFovFYrFYLB8lJpPJr/3ar52enp6dnXHOD4fDbDbT94L7HgAAIABJREFUWtd13bYtVGyWZScnJ1B+g8FAKcU5F0LA0A2Hw+FwaIzZ7Xbj8TjPcyRzgO6E04T8pZT2DrdtW2NMEATQeYSQpmmSJFmv1wiF3u12CJGGCEZAq+u6QghoU2SaDoIA6lkIoZRqmoZz7nkewlE552h2H/UMj4lJrFIKGTaiKKrrGjYziiLXdYuikFJCvGJWDPGNxodhuNvttNZoCXpSCJGmKS7w8vLyqrAuiqJpGky2GWOHw6GfeEPoc84ZY2EYIiMHzphlGTlGfkFSQ5WizbC66LeqqgaDAW6K67r7/d4YA/t/OBx834dE1loHQeB5HiEEQdmO4xBCLi8vXdfF3B7/uq5LKcU9JYR0XQcxrbVumibP8ziOoQagzh3HQW6Ktm09z2vbllK6XC7TNB2NRvjMdF2XJAlaCCe+3W5xF6qqCsMQmbt3ux3CiuHZy7KEboZWVkoZY5RSUNtd16EkvLY4amWllJQSyU8IITDLcRzHcRxFkVJqvV4TQoIgmEwmh8MBAea4cCh+5PSglLquq5QihDx+/Liu6yAIyrJEbhMghMB/nKqqcODt27cfPHhQ2OQbFsuHhxXQTxp/+Id/+Nprr3HOm6ZhjOFrGQNcP+r10J8GSkJM46XjOHhwbu+15Unjv1JwKKWm0yn+4rFYLBaLxWKxWD6a/OIv/uKbb745Ho9HoxHMIIwwYwxWFMo1CIK6rn3fh9pDJAcS6UKYXl5ewtsiZHi/38MwhmGYJMlwOIQuNMYcDgfXdeM4vri4gOEdDAaO4yilEEBtjPF9P47j3W5XVZXjOLCWvu/neR6GIZQfjOTl5aUQIooizjl0pNYabajrGl4SypUxxjnP89zzPMTqFkUBxRkEgRAiSRLGGPwpYyyOY0KI1tp1Xc55WZZN0/Qhw8BxHDjutm2Rs8LzPNd1caDjOO0xDwksNvQ0ztK/SQipqkocQ6cRwIs5eRzHVVWhjNYaraWUxsclBx3HQc9EUYQ5OSHEHNflQxgvXgZB4Lpu13W+7zPGKKVN0/Streva87w0TRFHJoRA2xhj6E80yXVdtF8IoY+JNbquM8YYYzzPi6IIsc8IXqaUKqW6rlutVlEUxXHsui56EodnWcY5xy1AADLqHI/HKNNfeJ7nUkrY5CRJfN8PwxBC3D0+k6CUbjabyREkJaeUQrKjSU3TrFYr9Cp0+W63Q+czxrquk8f1Evf7vZQSn0yt9X6/F8c80UIIeGpjTBzHkM5lWbZtW1XVfr+/uLhYrVb4L2axWD4UrIB+onjhhRd+53d+Bz9zkVIKIfDFi2ELG9jGxvvex8t+mLta4OTkJAzD1WpFKcVTW4vlSeA9OaDH47EV0BaLxWKxWCyWjyavvfbal770pWeffbbruhdeeAHGLY7jJEnKsqzrmjE2n8/bto2iaDQaGWOEELCEjLHhcHh+fh4EAXQq1CEML9QwpZRzDrMJfam13u12w+EwjuM8z6fTKRLvOo6z2Wwcx9FaV1UVxzHinafTaZZlRVFAAUN9rlarJEmCINjv92EYwg+itZ7nQTrD50ZRlKbpfr/3fb+ua0IIHKVSCqktwjBEec/zPM8jx9UCITQZY1VVwSkLIeI4Nsagkbg6FIYM9TxPKQUtC3fs+/5ms0mSBAXgT9F1xpg8z5umIYS0bQt5LYTA3jiO4aAZY6jH8zzHcfgx3TBsrz6Clz1XbzHeybKs67qqqqSUnudlWZamaZ9BG61t29b3fZy0qiqckRCyWq1w+agZvpsQgtuBXuKcQyX052WMoYdv3ryplErTdDKZ5Hm+Wq2wROTl5SWeZFBKkyThnCN6mhDSdV0URfgkMMaUUniw4ft+27ZKKcdxxuNxXdd1XWutm6aRUuJ9BGvjEpqm2W63+OAhdptSihvBOR8dMcbgojjnUORJkmw2m7ZtOecw5pDmjDHc3OFwmOc5mtc0zW632+/3TdOUZamUUko9evTo0aNHyq6LZbF8qFgB/UTxu7/7u5/5zGc8zzPG4G8GdrTP5hjRzBjDwNe/RAFs9wMc9mJbKeX7/mAweOqpp7Is+4//+I//PJ/F8mHzX38VWSwWi8VisVgsH01u3Ljx8ssvX7t2TUo5nU6Hw+Hl5WVd15PJpKoqRCfBP242G2Rkruv67Ozs/Pz85s2bu92ubdvtdhsEAVzzZDJpmubi4sJxHKhSxth4PC6KYrPZRFGU53kcx4PBIMuyzWbDOaeU6mOM8Ha7nUwmruvCk0IjJklCKZ3P54vFAhY1juM0TQkhy+WSEBLHMWOs6zpowR4E5FJK0Qyt9eXl5WQy6V/CDEKezmYzJOpVSmGuC6vLGNvv91EUIZQbc12llJSSEIKZ82QyefDggZTSdV3MqKWUjDHP89q2Nca4rlsUhRDCdV1kJoHHNMbEccyPC9lhLg0RD40bx7E45l/OsgxqmFIqpaTHqGTOOfQxLpZcyZvZv8T2ZDIhhOBNOHEp5XA45Jwvl8uTkxNcEU5BryTlxEtjjFLKHLNzoDcYY+gunAJNIoT0DcCu8/NzIUSe53DN6LfD4YAmoTas+yelHAwGYRgikYUQArqcc66UQiCzOa5VWJYlgqzPzs4YY8jLsV6vkyQhhAghoL/Rh23b4jHJ4XDIsoxS2nVdWZZFUUCFOI6DHNMI/ZZS4vJ93++6DlHS6BxsHw4HhJNXVeW6LqQz8plst9t333338ePH+JxYLBaLBTz33HP4gjXGYPylR4ncb7zvJYabnveVpJRiYMKAeHJy8sYbb7Rt+7d/+7d9GYvlQ4QTQl566SX8HspGQFssFovFYrFYPmq8/PLLr7766sc+9rEgCJC4mXMO0/f48WPO+Ww2C4Lg6aefjqKIMVbXNVTger0OgqCu6/F4DF3LOUcW5qIofN+HtsNscDAY7Ha7MAwR+TscDrENKy2lRBB0mqZaa4hF13XDMISkRpINIYTneUjEcTXXs+M4SqkgCPrVApVSdV1zzoMgaI6JMiArYVqNMWghgprVlezMxhgYSUIIrCKks+/7lNKyLDnnQgicS0rZdV0fLEwIadvWdV3GWBRFm80mTVO0UAixXq/REs651jrP8yiK4C73+31VVb1xzvMcSTPEMZswptnGGEQ9w317nue6ru/7iORljLVt2x6TYMCAQwRLKdFpjuMsl8umafI8r+va932EjaN5nPOiKND5uApKaR+bRghBNDoajDtijIFixseJMYaSvSNAJUVRnJyc4HDP89Dm0WiUJMlgMMB6kpRSIQRjDBellNrtdmVZLpdLuOC2bcMw3O12Sil0VBiGQgg8t3AcpyzLy8tL3/fX6zVEsJQSDcBNx21yXXez2VBK4SnQSGMMISTLMtd10TDcXFwjwq7RBlTleR6euCB6HRcupewTeqzX6zt37ty+fbu1Cw9aLE8ANgL6yYEx9rWvfc33fTyc6wdQ0A8o2Oi/onsBjTff56P7GvBlzhi7fv366enpt7/97a7r/rNqi+XDg3/mM59xHOf8/JxYAW2xWCwWi8Vi+SgRhuEXvvCF119/fTgcEkLg7yilbdsWRVHXdRRFQog8z+fzeZ7n6/V6MpnUdX3t2jVE9SI0FdG74/FYCIG8tzC/SqnBYMAY22w2nudRSoNjLo66rne7HeqHJIWV9jwPOtgYA+lMKUWEVBRFcMqTyQReUggB/1gURdd1dV2naeq6Ll7iAh3HSZJku93mee55HkQqpDMuNgiCJEl2u53jOK7rEkIOhwOaBP/bB1ZDHEdR1DRNURTQylVVQV5jLy4fVVFKCSFSSue90cRJkkB2QxbjpGEY4koxzfY8D0G7hBCoavhNY0wQBPDOjuO8731YY+hdbAC0JwxDTMujKArDMI5jSH+00xgThmEYhtgL2UopRWoO2GF2JSlnURRKqSRJkGcTF2iMuby8hD0/Pz+vqqooijzPq6rinOfHhQq11m3blmWJZN/b7fbWrVvD4RCPMYQQvu8zxqDdOedwwb7vX15eoscopUmSHA4HpVTbtlprHGuM8X2/bVsI7sFgALuhlEIZfAC22y3uKa40DEOYaITnIyR/s9lorVEMn0Y0e7/fe55HKW2aZrVa5XlujMH/hcVisd/v8bIoit1u9/jx46qqjv/hLBbLh4kV0E8O169f//KXv0wIMcZg+Oh3YSjp38EGRsMefNtjREMZ0JfHk2ytted5cRzb+255ErApOCwWi8VisVgsHznCMHzttdeef/55zjnnvK5raNAgCBAG+8wzz1BKq6oaj8fQiFCZUsrpdPr48ePZbAZLyBh78ODB6enpbreL43g0Gkkpt9vt4XCYTqfQmoSQ7XbrOI7v+8PhcLVaQQU6jnM4HBDj3HXdZrMZjUZxHNd1TQhhjDmOs9/voRo3m814PPY8b7/fP/XUUw8ePIAfXC6XEIhKKUIIpdQ9pkFQSmGFw2vXrt27dw/l2ZXEGvQYM5UkSdu2xhjGmOu6XdehAMqjcuzFy67rcOzp6enhcGjbFg2ez+e4Fhw7Go3u37/fNA3nnDGmlJJSwjJTSruua47r/jHGpJSQy5TS5XJZliVjDHG+URT1Vp0fl/6jlI5Go81mA8XJGENVaBhm45jGY6O/+7hwXE6apmgbaqDHOf9ut0Or8HACBbAXVWFuf3l5yY85MbTWjuPo4zqBQgghBMwvDr/ahqvbkNFd1+GMcP1nZ2dBEBhj7t69q5Sq6xq15Xm+Wq2iKFqv1+gHSHPcBcjo6XQK779erymlTdOEYYh+k1JuNht8LIUQ6HalFKUU/wuQeSbLMs651tr3/TAM2TGHNYKv67ruPzxd10H9Q5dTSrMsy/P8/Pzc5h61WCyWn8oLL7zg+35d1/japJRi7AAYIOh7TXT/Tl8Gu66WYcdAaXMkSZIvfvGL3/rWtzabDdJ5WSwfFvzatWuEEBsBbbFYLBaLxWL5iPD5z3/+9ddf//jHP+66LhIglGVZVVVd11BpCPKFX6aUwrGORqMgCLIsQ67nNE0ZY+PxmHOOgGIhBEJf0zRFnookSZDMgR8TE5dl6ThOkiR1XSulmqZJksR13bZtYXiLonAcB/l8YfoGgwHaud1uEUgbRRECpiBw4zh2XRcGExHZQRDEcbzb7YqiCMMQQa+EEHhSxlgQBFVVIZbWcRxCCF4iJtrzvDRNEbUNJVoUhZTSdV3XdSmlcMG+7+PYPuoZraqqCleNkxJCpJRwnb7va62llFrrpmkcx6GUIrcDpbRtW6hqcgyIDsMQShqxw3Ec+76/3+8ppf3EOwxDFMPdaZqmqiqE/aIAzohWdV3XdV1ZlvDOnPPNZgNTj12Yt2ut0TOMMcYYSuLqlstlHMfoMdSPxsAdMMbyPG/bNssyxhhOrZTinEO+wykrpdq2HY/HOJ3rurjRlFLGmFIKIns8Hvu+Px6PHceZTCZt24Zh2NeP2621zvO8rmspZZIkWmvOOezwZrNBz3POXdfFNWqt27YdjUboDWMMIQTNc12XEIIeJoTgIw2jXdc1YwzKG/U0TYMPrRBis9lst9uyLPEBq6rq0aNH1j5bLE8aNgL6yeFrX/vajRs3uq6jlGLU60cTfC0DvOzfxHc7/jIBhBB1fPZM3vsYlRzrFELcunUrz/O7d+/2uyyWDx4roC0Wi8VisVgsHxWef/75N954Yz6fj0Yjz/N8318ul3VdF0WRJMnTTz+NOd6tW7ccx4HHdF13tVqdnJxcXl4iFBRGDyu8VVWF2FJjzG63o5TGcbzf70ejEWMMjs/3fSllmqaYaiJYNY7jruuUUr0yrqrKGMMYQ4EkSSBGwzCEgZ1Op1mWwbSu1+swDPsCnHMpJfQivDDnXAhRFAUELj8mxzDGBEHAOY+iiBCitcbeKIq2223XdUEQQBazK2k3oFyVUpDycRwjMQW6Is9zGGcI5aIoEK7LOWeMlWWZJAmqXa/XZVlGURRFked5h8OhaRqUNMbgQPhTpdThcEANaHNRFIQQY0wURb7v4+yr1QoTcuwKwzCO4yiKoKodx+n1q+u6eAcbSiljjNa67x/nCCbwnHP8yzlfLBaww4vFApd/eXmJhpGjIICWRVM9zwuCAOrfdV1so8F5niMP9Wq1YozVdd22rZQSyTTCMESrOOfb7RY+nTHm+35RFOPxeLfbjUYjxOA/fPjwnXfewZMDSmnfaZ7nYRFCGOTRaIQHJPD+bdvGcUwphUDHQwghBD6llFIYeWOMMUYcl14khPi+H8ex4zhIsoGuw+3GA4m6ruu6Xq/X3/3udzG7tFgsTxRWQD8hnJyc/N7v/Z7nefiW7pUxNq6+fN875kqwMwYgcsVKY/DqC6CMEEIIMZvNpJQXFxfW+Fk+RGwKDovFYrFYLBbLR4IvfOEL4/F4NpshMLlt28vLS7jCPn3zcDhs2/bx48enp6dCiOFwuNvtuq57+PAhvPBoNFJKwWxCDsJE18ckHlmWQXqORiPkK0D07na7jeO4ruvdbjcYDIQQjuMopQgh2+12MBggFkQptd1uR6OR4zjz+fz+/fvL5XI6nSLlwsnJycXFhTFmOBzC7SqllstlmqZxHKdp+uDBg8ViMZlMkJlhPB53XccYg2BFyGo/WTXGwJkyxtBsKSVeUkqhLNu2dRyHEAKhqbV2HIdSOh6Pm6YJgoAQMp/P9/t9c4wZn0wmjuNEUYQDXdctyxJ9MpvNdrtd0zSe5wkhJpNJURRw0IQQRCWHYYgJedd1iLwmhAghwjD0fZ8QslqtfN/3fZ9zPh6PCSFo0mq18jzP8zxcGi6EvDeszBiDkGRKKUy3Oa7Lh2LoK601IYRzTgjRWiOknRASRZGUcrVaoYXGmMlkYoxB/5CjL+jPyDlHM4wxQojlcjkcDvf7PWMsTdM8zxEETSnFhwH5WCilZVmi88MwRPKW/X6/WCxWq9UPf/jDf/3XfyVH7t27RwiZTqcnJyenp6d4SoGgft/3tdYPHz7EYwY8seCc40GI4zjBMXUGIt+NMfhICyHathVCKKUYY3Vd414YY9q2ZYx1Xde2LQ5EhbDPFxcXP/zhD5um6ZtnsVgslvfx8ssvDwaDfvzFm/TomjGmkPfq5n5kwfvGGK01Bpe+AKrqSzLG8H1OKVVKvfnmm4vF4vbt2yhvsXzwiO9+97svvfTS+9+2WCwWi8VisVh+Xrh58+Yv/MIv3LhxI0mS/X6/XC4ppZTSyWQyn88JIXmeD4fDpmnW6zXMaZZlYRhuNhvIR0SVhmG42+0YYwjITZLk8vKSUgqbjFDT1Wo1m82QuLlt2yRJLi4uKKWj0YhzjoQG+/0+juM4jqWUUkrOee9nHz58CO/JOT8cDpCAy+VyNBpBW08mE/hTxth0Or137x7mnzCnvQaFVEVU7Gq16g31vXv32raFOIZiPj8/H41GQoj5fH44HKSUnuehc+7fv6+UQv3qmL7ZdV2IyL7xhBBsSynREuyFfpVSwjgjvLevB1PlpmkQHUwpXa/XkNGYPx8OhzRNwzBEycPhgG7Buo78mBAD14KewW3FTQd45+qbQRDA+ZJjBBkm831JvOzL91N6/Ou6LiS+lNIYg2s0xnRdV1XVdDo9Pz/vpYDjOPyouYUQcRwfDofxeEwpheGt6zpJEsYYpTTLsrquEZgshBiPxxcXF1rr3W6XZdl6vf7Wt74FD/6TrFar1Wr11ltvhWE4n89v3LgxnU7jOB4MBmma4l7gY+/7PhbSpJQuFgvcPnKU+GEYaq0PhwNaqJSC0MdtyvNca805b5rmcDggdu9wOBRFcX5+/tZbb723URaLxWJ5P7/5m7/5+uuvY3TGlz85umYMHIyxq080+wMxpqNkmqbD4RB7kfXoamGMQYwxDMfYiOP41Vdf/fKXv/yNb3yjr9Ni+SARhJAf/vCH73/bYrFYLBaLxWL5ueD1119/6aWXGGNSyq7rsizzPG8wGERRBAOYpmmSJJvN5uTkxPf9wWCA7MlQilEUDYfDsiyfeuqp1Wq12+2CIIiiaDQabbdbaNA+WcRgMNBar9drIQQCqyH4KKVFUYRhGMdxlmVt2/KjdKaUQmju93vkqei6brlcjsfjIAjiOH733XfZcYk8Qkjbtl3XKaWEEFmWcc7btoXfXC6X169fb5qm6zpMOGez2f379wkhWmtCiDFmMBigBsdxptPpnTt3jDGQoVpriGMEMlNKEdmNWGNYdSkl5szj8fhwODTHXM/j8fj+/ftSSiEEIaTrur6RJycnWZah8xljk8kEmbJx7YiPxumiKKKU+r7fV+J5HsKoV6tV76khnYMg4JxPp9PtdssY67oOWhnOF32OD0A/LYcp7uf5uFgUYIzhcHQyijHGUAzKmB6DpnE4iqHf8JIQgm2oW/QnY0xKOZvNcAqtNRaTxLlWq9V6vb558yZjzHXdzWZDCKmqCjk3tNYPHz48HA5f//rXUf//kbIs7969i0Sfs9nsmWeeSdN0MBi4rnt2doYGV1WVZVmSJMaYuq6VUnhYwhhDs83xOUFZlmiq4zhN0yB2G5/V/oP68OHDH/3oR+9vh8VisVh+Gl/96ldv3bqVZRm+Wsl7o5vxEhsYGbHRv4+xaTgcPvfccyi23+8fPXrU6+x+YMKYVRSFEAKPEl966aXnnnvur/7qr+zvVCwfCv/5xBvgd39X37FYLBaLxWKxWP6X8vLLL7/xxhs3btxI01QpBdfm+/6zzz6LXArXr1+nlEopMVVTSjmOU1UVop611r7vb7fbJEmEEEgNURQFLCQSGiDX83a7xSFCCMzrKKVRFDVNwzlHKGsURXmec86jKOq6DrkLoihK03S/35dlGUWRd1z9zxiDl5h2Hg4HJJ1Aa3E4YywIgjRNt9stak6SBO1v29b3fcdxsAxd13Wu68IUI+WF4ziY9xpjENKLAFjsdY8rExZF0R5zBPd7e8OL/BjOMWky2oZ6giCo69pxHBxYVVVVVVpryHHMhxljxpiyLJFbg3O+3+/ruuac9/Vjm1IahmFVVZxzXHWe5zgcktrzPLQ5yzJCSNd1kMtQwBDuXdc1TVNVVRzHjDHOuRACvYrDARrGGBNCoMcwk0fji6KAJRdCoOThcEBkN84VBIHv++5x2cn5fB7Hse/7TdM0TaO1RvMcx9FaSymh+JHLQik1m80Oh0Mcx4vF4nA4XFxc/Nmf/dkPfvCD//pM/08oy/LRo0fvvvvugwcPpJR4DIDOR3gdbj0+tLi6ruvgLxhjTdN4nlfXNXJ87/d79GqWZXDQ+/1+tVq988477z+xxWJ5IrE5oD90GGN/9Ed/xDm/OkTS4y9syJXf5eD9q7vw5ayUevrpp+fzue/7fZ2u665WK3x19wf2VTHGMOjjQaPjOFfzOFksHxg2B7TFYrFYLBaL5eeNj3/84y+++OLNmzerqkJ2WsbY2dkZDGNZljdu3AjD8PHjxycnJ1VVzefz7XZbVZXjOE3T1HU9Ho+LokC8cFmWvYkej8dSyt1u5/u+OIYwh2G43+8ppXEcI+r5/v372+0WuZjzPA+CAMvHIaEEJpOMMezCtJBSutlswjC8devW/fv327YlhGCxwfF4rJTabDZxHMNrI+CXMbZcLk9OTqSUUIqEECjXi4uL4XCIYO08z1E/IWQymdy9e7dtW8QUx3GMuGacbjKZHA6Htm2hpJHiAy+NMW3bojNhmbuuk1LKYxaOtm3jOA7DkFK6WCz62TVjTEo5GAyCIMCsGHNvSiljDGYZPTmZTJDnRAhBCIERdhyn7wfkBiGEBEHgeR6KbTabfh4exzHkL1QvCvcbxhg4a8zh+6tmjBFCpJSnp6eEECEE5u3kmAYaLgAyuq5rdIWUEjc0z/M8z5Mk0VrjFKg8iiIE9xhjpJT9z6Xbtu26Lk1T13Xh5Y0xcRxXVXXv3j2l1Ha7Xa1Wf/M3f7Ner9GM/0vyPP/+979PCImiiHOOsOjpdEop9TzP931kNodwh2iGc8czAHwACCHQ0JeXl1io8P8tH4jFYrFYfip4Io5xpP9jgLxXMWMA6nf14H3G2OnpaZIk/fuu6964cePhw4dlWWK0QuX4I+HqiKa1FkJ86Utf+sY3vrHb7foaLJYPBiugLRaLxWKxWCw/V3z+859/5plngiBYLBbT6XQ4HLque3Z2huXvTk9Py7Jcr9dd1xFCdrvd6enpo0ePZrNZGIbr9ToIAkjVKIp8359MJufn54gGRQzRcDgcDAb9kndpmsLMwq5WVQU9KqWEQAzDsK5rxJZSSne7HXIBbzab4XAohLh27dqdO3ew2CBigbEW4mq1QoJgpVTXdeyYWXgymTx8+HCxWIxGozRNOeeIetZac85PTk7effddaFBCCGOs6zooUc/zjDGDwUBKiTkq4nbhrx3HwVEQjojMQmpsxFNjgbu2bdu2FULAViNS2xjjum5ZllD8/WKDcNCj0QjZGxzHYYxBVXueRwjZbDZVVfUTZoQwB0HAGGuahjHGGOOcp2mKHyxDLpdlSSmFPg7DEPcFtcGV6+Ovj2GiMQmHcZ5MJoQQcQxw7q2xMUZKaYxp2xb9LIRomqZt29lsRinFNF5r3XUdHjyQ96rtnrZtR6NRnufT6RQ9X1UVegn9vN1usyxzHCcIgtPTUyws+eDBA9/3F4vFX//1X69WK/IzoCgKQsj3vvc9vEzT9OzsTAgxHA45567rIlgbe9EPRVFcXFzs93tjTJ7nfVUWi8Vi+Z/y5S9/2XGcqqrY0T7jX2CuLD+APxjI8REpdhljoigKgqAvBjjnzz333A9+8AMcghGQUmqM4ZzjbwA8ujbGDAaD11577e/+7u/eV4nF8rPGCmiLxWKxWCwWy88JURR98pOfHA6HWZZB7QVBgLjai4uL69evV1WVZdlwOKzr2nVdQkjTNMjJ+/jx4+l0GkXRaDRaLpdlWXqeh6jn0Wgkr0Q9QwT7vq+13m634/GYEDIej/M8R5jzer2O4zjP867rYJnTNB2NRogmTpIEYhTqExWOx+No7tiyAAAgAElEQVTFYgEnDk0Ju0oIYYxNp9P79+9TShGmSilN03S1WqEGcox6btsWEnkwGHRd17at53kQpk3TBEGAc+ElOQZbhWHIOZdSokOSJKmqqmkaONa2beu69n0fLhjHwh0jNrZtWyHE1TZwzgkhXdfB4DPGLi8vEYSOKXGSJFCZmEvjX5wO7cE1IjgXrUJsON6nlPq+DydOCMGTADwAQDg5upceEzrjEEIIeqNtW0qpUgrva62llFLK6XSKDkcfonk4V9u2xhgpZV3Xk8kEF9JX3h8VhuFqtRoOh+hzKWVRFHiEQAjZ7/eDwYAco6oppU3TIMo+TdO33357v9//+Z//+f9fUc//HbIsg/EHURTBUFssFovlZ8GnP/1pDEC9gAb0mDGjf9nv7ccjY4zWOk1TjLmEkMPhYIxJ0xR/RaAMRhw8fEVVV99RSnme99nPfvb27dv37t1DGYvlg8EKaIvFYrFYLBbLzwmvvPLK008/DZM4m80mk8lqtTocDlhjbb1eJ0kipbxz587p6Wnbtqenp+v1+nA4QOxCXzqOMxqNLi8vEeCMUFlI5PV67TiOECKOY5hZYwzEIoo1TbPb7RBzXde1Oi7pBrs6m83atmWMcc6DIIAk3Ww2URQlSWKMaduWEJJlGRZCbNtWa42pI3JDI4wah0wmEyklIqGw2KBSarFYRFGEpQ57nY3FBuVxdUEsGIgrYoytVqu2bYfDYRzHhJDlcimlHA6HYRgaYyB5m6bBZBi2Oo5jhAO7rosEGkII5C8uy1JKSQjxPA8JHIQQCJ1GDg1ytMme51FK4frRTs45kjsTQvgx6pkQwhhLkiTLMnNMi5nnudaac26M8X3f8zz08HK5hN5FDTDRqJkx1rYtPhu4FszVtdZCCIS9Q0/3MppSitqMMQisNscfTRtjoNoR0ez7/mq1GgwGqFkppZSaTCZFUUBG13WdZdlkMjHGlGU5mUwWi0WapnmeI/j929/+9sOHD9HyDwtrny0Wi+Vnymw2w6BwVUBjXMMAZ34i/wbewQDUdV2apv0u/HoG7ziOo44/e+qPwgbecRwHT0allG+88cadO3esgLZ8wFgBbbFYLBaLxWL5X89nP/vZp5566tq1a8gh8Oyzz+73+8VicXZ2hhChKIp2u52UEgkfOOfQx0mSwKW2bVsUxdWo57Zt9/t9WZbz+RzTQs/zjDH7/X40GlFKkSd6tVoxxqIoCsNQSmmM2e12aZoiTUfXdavVajwe+74P6YwVC8MwTJLk7t27w+GwD9pVSm02G8TMIupZSoll7qIoWq/Xl5eXiCzmnHdd13UdpqyUUqSrhoftRSqyZzDG0jSVUkJ/U0rTNIXdhjiGnkZMFpJESykhiJVSUsrRaBRFEWNsvV43TQMfjclw0zSu67ZtSyktyxJpoAkh6/UaYeaop6oqGGFK6Wg0OhwOjDHGGFJmIzq772REPSNLNQx4nwMaNUgpfd/HBPtqDmjXdR3HQZwXwr0xIUclQoh+CT48Pzg9PWWMGWOMMTjKcRzIaGMMJuqj0QjH4ofPeZ6XZTmbzfBRgXQG+qfRtm2WZVEUdV2XZRnnfL/fn5ycMMYeP36cZdk//MM/9DkxLBaLxfLzymg08n0foy2ebr4PDEbkSkB0v6G1xpiL4QyFHz9+3HXd2dlZkiQYg3AItnEsPSb6wAaejzqO8+lPf/ov//IvpZTHk1ssP3OsgLZYLBaLxWKx/G9lOp2++uqrzzzzzHg87roOWjDLsvV6jYncgwcPEPUspZxOp48fP57NZtvttigKKeV+v59MJlrrNE0fP36MyGLf9xHSm6YpEjXu93vOeRRFbdsiZQf2CiG6rkMN+/0+jmME7SK4qSiK4XC43W6llJRS+NB+eomY3MlkAnXLGDs5Obl37x4KgDRNLy8vp9MpCg8GA0wdCSGUUlhUKaXjOIyxruvatlVKeZ4HsZvnOewwIQRxT23bIouFMQb+Gnshr+UxC0eapn3KDgQvSykRDD4ejw+HQ2+ToeBREnXWdS2E4JyPRiMhRG+WgyBApLPWGl1a1zUhRAjhOI7rupiN9+HMxhghRJZlmEUPBoMsy/rpNLw2Y4wQ0jRNH/nluq7v+5hmo0KUx4H9SxSLoqhpGq01lPp8Pl8sFoSQ2WyGdqJwWZbo3qZpJpMJzrvdbpE8pG8S6j85OcEyUMYYrTXuFzo/CILFYjEej7Msq+u6aZrvfOc7b7/99vn5ObFYLBbLzzsnJydCiMPhAFPcgzELYCjpx5R+A080u667WvLi4qJpmizLkiRBGRRmxxUIlVKonFLKGKNXnk9/6lOfwlPzvkKL5WfNe566jMdjrNRssVgsFovFYrE84XzhC1/45Cc/+cILL0Bu3rp1K8/zLMvm8zligoQQWuuyLHvxCkcMWek4TlEUZVliibzRaOR5XlmWh8OhKIooioQQrutiOtd1ned5bdumaeo4zmazcV1XCOF5npRytVoh8lcIUZalUqqqqjiOHcdJkmS/3/u+j+QSCNB2Xdd13d1uF4Zh13Xb7RYutaoqWGCEOOV5LoQghMCJF0WBsGXXdRljURRRStEw+HGsrNib2bquceHYSynt9XQYhpvNBpHXlNLD4VDXteM4juMYY2Dnsbd/CY9MCEGSDRhnpRTybGBXnucIE+acr1arsizhqZVSfRx3H+AcxzF0f1EUmGMbY2CZCSFw+n1tqMTzPMyisyxDJ8M+Y17NOd/tdkqpsiybpkGMc1VV6IemabBUY9d1ZVlmWRbHMaUUBwZBgL4ihCCCm3POGMOpMY1XSgVBgHYqpRzHkVIi+Fop1TQNZLqUcjgcIjNJmqZ1XUdRhLD6oiiWy6Xrurdv3759+/a3vvUtu6yfxWL5wPjMZz7z3e9+9/3vWj4ofvu3f/vVV1/FQ8p+/MWQh6Gtf/CJQa3fiwIYfSilZ2dneLOu6+12++yzzyKD1t27d/t6tNbO8fdA5Epejr6eOI593//Od76DAhbLB4CNgLZYLBaLxWKx/G/i5s2bn/zkJ09PT+fzuVIK1g+L/jHGmqZZLpdJkpyfn89ms6ZpOOeTyeTOnTvGmLZt8zyfTqdVVSENheM4ZVk6jgM1DAdNCMnzHNUqpaSUfdQzY8zzPEKI1vpwOMRxjJXluq7bbDZxHA+HQ6RvNsbsdrsoim7evHn//v3RaIQJJyEEphiO2xzzMxJCTk5O9vs9joX97LoO0tkYM5/PcRXquH6RUqrruq7rUE+SJF3XBUEAkfr48WNCCLR4X7htW5wuSZKqqiCdJ5NJHy7NOR+NRnme13WNSSxiin3fx2w5yzJk5MDEuG3bw+GA63KPKaHR5zDIaEwQBEiIrLVGHyITxXK59Dyv7xnHcYQQaLDv+7D8xpj1eo1ganWMdMb7lFIhhOd52MbVoXP6XuWcO46DfqOU8uMShdiLdxhjKMA532w26P+u6/plnSilfZ34t//U5Xkex/F0OsUFdl2ntUZv13W92+1OT09d110sFovF4nA4rNfrb37zmzi7xWKxWD4ivPLKKxgdMKD0w1A/xBhjII77XeSonlEAIxQeVFNKn3rqqdVqhUGWXLHMqIdzrpTS74225pxjJBVCfPGLX/z6179uf4Vj+cCwEdAWi8VisVgslv8d3Lhx41d+5Vc+97nPzedz3/eNMZPJpKoq3/eTJEFK3zAMKaWLxQLpesfjseu6QRAwxoQQWuu6ruu6HgwG8J7GmP1+r5Q6HA5RFA0GgziOi6JgjGmtEeCcJAlihKFWYWmhm8MwxIFd1xFCEK5bFEXXdVVVJUnieZ4QAjYT23EcI20xlG6e5wgoxpwT4cZlWVJKPc/ro5ixF1NTrTUmn9DEqLY/FtdFCDHG4JJxLApj2xzjmnEt9Er6ZiHEZrNB4+M49jwPKTg453DExpimaSilbdtKKaFfB4NBGIZFUYRhiOtar9dIXoHykNFxHAdBANePAGTf97Msw/3VWvfb6K48z7XW6AHf93Glu90uCAJcI+f8cDi0bdtHQ6ORmGNTSnHfcaPxknOutdZax3GMvb2wxrydEIJixhh8nHDVkPvGGKVU27ae58EyJ0mCDsHTjqqqKKXb7RZR8HDQQRDcuXNntVp985vf/Ld/+zdco8VisXyQ2AjoD5evfOUro9GoaRpCCEYrSimGGEIIY6xtW6UURreru/qX+MsEf6gQQoQQ4/E4TVMMfLdv30YZFMaj2d53kyv5PQghXddhBPyXf/mX/iwWy88UGwFtsVgsFovFYnnSefHFFz/1qU9FUTQej5VSvu9XVUUIQfKKqqr2+/3169cXi8V8PocHHA6HRVEsFosoiqSUo9GoKApCSNd1ruuWZVnXNdImuq7LGCuKIk1T5PYdDAZd1202G855kiTr9frs7Ozy8lJr3Xvq9Xq9XC4JIWEYMsbgIqGzh8NhlmWYZGJaqLVWSsGBLhaLk5MT7DXGzGaz27dvI2yWUtp1HUKWMFGcTqdYEhBWVyklpaTHUN+rCZoJIVg/EIHMaJKUcjweh2FojMGctpfCcRxzzrGyH/YieNlxnMlkkmVZVVWYnSI+GtHilFJ4VVw1pVRrjewTjLE0TRENjVnxbrdDFDMhpGmaoiiUUugT3/cdx2GMEUKgpDEVd13XdV3P87TW/cKD6IrdboeqcKzneZTSy8tL3/cJIYwxtNYYg22I5n4XzsWOyTqQtgVaWUqJvM+4I4wx/NtP2vE+moFb2XUdlL0QommaqqqMMUmS7Ha7+Xx+OBzm8/lqtbp79y7nfL1ev/POO2+//fa///u/5zbnhsVisXwkiaIIgztGk358IVfCllEAu+iVTND9v0qp5XI5HA7xUHY+n1+tBCMpXmIERA1XNzA+Nk3DGHvttdfwpsXyAWAFtMVisVgsFovliebXf/3Xb9686fs+zO92u728vHz++ecJIcvlcjAYaK0RGKuUunPnzunpKUKhhRBKqaIooihar9ewonEcSyl3u53jOGVZDodDJOqt6zrLMs65MSZNU+Rh2G63nPPJZAJ9udlsZrOZ67pZlt26devBgweILdpsNmEYlmUJ++y67nw+v3v3LvbmeY5M0KvVajweIxdH0zRKqd1u53nejRs3sizDnPPk5OTdd99VSrVti5nkcDhEVJQQYjabIQtH13VCCH7M0YEQb611v34gmn3nzp26rhHmPJlM7t27NxqNoJ4Xi0XTNJiCMsa6rkMwNTnGYUVR5Ps+pXS1WrVtOxgMEP8Ldes4DqKwpZTIs4GSiN1GxDGMv5SSEAJNjKBjSulut8NdY4xFUdTPmT3Pgz2nlMKGwyOvVqswDPkxunm/3zvHSHD4YjSbc66UwgZuJbny++Wrs3qUx/ycEKKvJOXALnQyjDmkAG4KOjyO46IohBB5niMhyX6/D8MQUeGw/MvlErfpxz/+8Y9//OO33367r99isVgsHzV831dKKaWMMb0O7jE/IaCv7iJXNPT5+Tl+B3a1DDk+YcW2MaZt2377fWfEGCel7NNJWywfADYFh8VisVgsFovlCeUTn/jEZz/72ZOTk7Ztz87OmqbJskwIcf369fV6PZ1Ofd/P83w2mymlXNeFfMyybDgcXl5eQllyzrMsY4xB+/q+DyUqhKiqKoqiPM8RcmuM4ZzXdc05T5KEc16WJQKoOefj8TjLMrjXKIoYY4SQ1WrFOU/T1HXduq67rkPMb5Zlk8nk4uKCUgoVXte11hp7KaVFUUBZwiP36ZhhRaHO8bLPlSGEgEV1HAfXy35iOUEUhrrFbLNtWyEEjsVLxA5HUWSMQfCyMQbqGVocMdTI14FTYKbKGEMcNCEkjmPHcdbrteu6iJU2xvi+b4yJoggX1bZtGIaIYs6yDHkzGGOox/M8x3GEEJDRWmtULoRAGVyj4zjYBatOCNlsNp7nYZsfU23ghqIwpRT33RiDbUop7i+6t23bpmn6xwxaa/ycWWvddV2app7nQYXro3eu6zoIAnyiGGNVVWVZNp1OhRAw0RD92+2WUur7/oMHD4QQP/rRj/75n//5H//xH1er1fFDbbFYLB8ONgXHh8vv//7vu66LH0v1o1iPMQa7nPfmgO7BHwDmSlYNjFw97777rhACJQH+DMB2XwxvYij0PG+5XN6+fbvfa7H87PjPpTksFovFYrFYLJYnis997nPPPffcxz72sclkcnJygpzLp6enlFIEwJ6fn3ddFwTB4XAYj8d1XZ+cnEDUPnr0CApSCDGdTm/cuEEIQVpe6N3ZbAZxudvt4jj2PA8umBDiuq7v+1mWITJ3u93CbBZFcfPmTRhMxth2uw3DcDab4SU/5oZer9dFUcCWjsfjfu/p6SniZ3F1SikE2PZTUEhYzC37vU3TIBK5rmulFGaMiNdGGa018l/j8MlkkqaplLJtW8xmq6qSUqI2iNe6rtESCGL8jHe/30PHD4fD0WjUtq2UsmkaVFVVFYSs7/tFUVRVhUrG47GUEorZ87w+5wZehmEIz77dbh3H2Ww26/WaMbbZbGCWoYYZY9jmnO92u6qqmqbBGXGPUCGkNgpDUvfvoIfft4EpNzbIcQaOiTc5RoRhL7aNMdDN6Mmu6/DAA1eNbM54kiGE8H3f9/31em2MadtWKVVVVVEUnPPz8/PLy8u33nrr7//+7//iL/7i+9//Ps5osVgslo8yGLwwfl01wkBrjTGrf6mPv9G5WphSioHm4cOH+pitq6cf+zjn+HPifWNfD54ld1331a9+9X27LJafEf+tFBw/+X8D/OSH2GKxWCwWi8Vi+b/ni1/84nQ6ffrppyE6B4PBYDBo2xZutCiKa9euVVVVVRUkbNu28/m8LMskScIwxK/6Hj58OJ/Pt9ttkiRJkkgpjTFFUYxGo8PhEMdxlmWEkP1+r7VOkgTvI6AYEvPpp59+8OABPCOiX7uuWy6Xxpg0TRljRVFordfrdZqmQRBAcEMrs2MaB601LPN0OpVSYkLoOE7TNIQQrTUs+X6/v7i4SJIEkblJkkRRhL/D9/s9OS4ZhETPo9HI931Um+e5MSYIAs/zCCFt20ZRFAQBIUQIgVahV33fp5RCJRNCHMdBQDcagIUWIViRxtr3fSGEMcZ13bIs4YhHo1Ge5zgKF4IGUEr7LmWM6WO+bEx92dEyr1Yrx3FwuDFmvV4jMprz/4e9M+uV3LjPflWxuDS7SfbeZ5kzZ0aa0cixJXhPjCRALt4bXeQi9wGCfJ98lQQIENgJAiibAQNG4sC2LNmyNDNnztoL2SubWy3vxRMyrfHrF+N4lDhJ/S4O2GRVsVjsg+L/4b+fsu7u7rzaG5pSmiQJrEXQVZQnhKAbpPZrZozJ2nYDl4kOoEv42AjKg8FgPp+j/dlsxhiDKI8kbtw7rTXKQ3yHvpznOfR0zvlms+n1epxzKeV6vWaMFUVxcnJye3tLKb26urq6unr//febzhgMBoPBwOv05JcP1P4bWmvLstRnLaEa2Q2TGg4xxna73ccff3x0dNTtdlEAUxsKHBZ+CTRIKeWcV1XVVDcYPm9eSYD+yle+0mzTgzR+8/MNg8FgMBgMBsPr5fd+7/dOTk5OTk4opZAXYQFRVRWl9Pb2Fj7CnPN2u51lGQTW/X6/WCw8z2u1WkVRPHz4cDablWUJew1EZWgHtsto+fz8/OrqSkqJjF1kPW82m8brmTHW6/XiOKaUWpa1Wq1OT0+vr68RH1qW5TgObCvwcTweP3v2DB8ZY1pr5CkvFosmkXk6nQZB4Lou5FrEnAg+ES5SSkej0Xa7raoKSnFZlpRSeGtQSmH0XBSF53mEECyZmKZpGIaLxWK73TLGIOMOh8MXL150u10IwYvFoiiKZkCQ44zLJIRsNpsoipzazTnPc1yg1jrP87IscYMsy4LRMyGEMZZlGTKgcRTZypZlUUqr2mdDa43VHaGAYzyhy0dRZFkWxGVeg6Zs28blIyZntdCMzkMpxoBgD6nXXKJ1wIIhxQgrpZRSd3d3uC94Y0EpRSY4KpZlmWUZeo6/vu87jqOUYoxtaovwyWSy2Wy63W4cx1VV7Xa7VquV5/mnn3662+0++OCDjz76CJdgMBgMBgOp1ysmn3VqbsAkpetVcDGFNYdQHkebukVRXF1d2baNORcPHk2tw4q/CPbjJXS73caDzcuFDIbXzSt5QN8eMBwOf/jDH97e3o5Go+l0+nJRg8FgMBgMBoPhP8RXvvKV3//9359MJpzzXq8XRZHW+sGDB+QgoZUxFsex67qr1arVanmeNxwOoa4qpSDLRlGExQDTNN1ut1preGJg+cE0TauqCsMQQi2aXS6XruvCyhnZvr7vbzYbSqnv++v1Gu4KcDeGaolkYdd1syyDUgzpllIqpYQxSBAERVFst9tWqwVdFYUhyzLGsgPrZ9g3O7XXM3KTbdumdQYxnDSKorBtG2YXnHPLstI0LYqi2+1yziGyl2WJlGEMC5RrQkhzlDGmlHJdl1IKBxLXdZVSeZ43JZVSkF8551B+Pc9jjKFAEARQrsuy9DwPxZIk2e/3WuuiKPI8t217s9kURZFlmeM4yHRGlWYEsFKfUkpK6TgOhGZK6Ww2QzY0bjrnnNU/TG632+hwlmW4RoTrTQFSa9BCiCzLwjBcLpdSSmjWuEFIqMdooAxGzLbtoihGoxESzHe7HWRoQohSyrIsLIe42+2UUvP5vNVqtVqtZ8+erVar9Xr97W9/++bmpumGwWAw/OZgPKD/C3n77bffe+89zE1SSkzf5MCRGRvNpEYPtGM8qzQf8aSBuSzLMvxY5+7uDg8tmBDRZtPIYWsNlFK8Wh4MBt/97ndfPmwwvG5eKQPaYDAYDAaDwWD4/Gi327/zO78TBMFwOKSUtlotpdRqter3+1jlLwiCJEkmk0mapoivHMdZLBZHR0eXl5eDwaDRcOGtATFxOBwWRbFer13XdRwny7Jut2tZ1mq1SpIEYV6v11utVgjYoFP7vt/v9+fz+Wg0QmJRt9udzWZSSlKbSzQ5zpTS8Xh8cXGBSG+9XiMve7lcwqNjMpk8ffq0qioImkop6K24cKUUwk5Ekr1eD/oypRRnwYlgu4FEZoi27Xa7KIpWqyWl7Pf7u90OCcuWZcE7AlGlUipNU4jFlmXNZjMI9LC2WCwWWZbpOuVqt9t1u12ozLPZbL/fM8Zs28ahdruNBC4ApZgQYts2VH6tNWRrx3Hwcb1eI+UZ7SyXy+PjY1J7aCAexjaa0nWmM6XUqU2isR8FSB1F4+9gMKCU4vvQ7J/NZlrr8XgMhw2lFJaChOiMMxJCsizr9/u6zqFeLpdKKcaYEAJvC5RSnueVZdl0ldSZa1VVBUFwd3cXhuGHH34YhqEQ4tvf/vbV1RWKGQwGg8FwSL/fJ4RIKS3Lwix/eBRzGaUUM1RZlpjXtNbksy4cTXlMcHgoqqoKZmJ1e/+WUo3Wmj2o2OyxLItzLoT46le/alkWHnIMhs8PI0AbDAaDwWAwGP4reeutt95+++1ut+v7PgTo09PT5XJZFEWapgiiIJtOp9PRaGTbthACLhA3Nze+7ydJ0u12q6qybbssy/V63Wq15vN5EATwMhZCrNfrXq9n27bneVprKSVEauQpM8YWi8VwOIQAeij+YpVCdAPaa6vVIoRAR4YwKoSYz+eEkCAILMtK0xRHEU/2ej0Ek4SQ0Wi0Xq+RAswYGwwG2+0W6vlkMlkul2VZIrMYrtCccyiksN1wXRfC+uXlZVnnNeOMWmsovzC18DwPBh1VVeV57rouIQRtQqomdUJ0u91G7jZjrCgKnDEMQ601cpAJIXmep2kKrZxznmUZqbOrkC1u2zZCX1RHSdu20SVCCDLWZ7MZpRSyPvqADYTEGC4wGo2g9s7nc5Q/LEAIaTqgtZ7P55PJhNRpYqoGieqDwQBjKKVEx5BIjpuCG11V1Wg00lrjSiE9Q3ZfLpd5nkdRVJal7/tSyhcvXnieh/sYBMFHH330wQcfGPXZYDAYDL8MzNeY/RsBWtdvTzHfUUo555ZlNQI06mIabUBJTJ2Y4rXWRVGgHXLwbrtpBNtNXczXlFI8Nfm+bxkB2vD58ysL0Lvd7uVdBoPBYDAYDAbDf4j/83/+z8nJCbQ/x3Gqqur3+3Ecbzab4+Pj5XI5mUx2u12aphCCb25uJpOJ1vro6Oj29lYIsVqt2u02IaTf7zuOc3Nzk+f5fr8/Pj72PG+73Z6ens5msziOOedVVXU6nQcPHlxdXaEuvBeklJ1OR0qZJEkYhkEQaK2n06mUEvK0UqqqqjiOoyjyPG+/3wshhBCU0uVyeXZ2lmUZgjdKqZRSCIGPkEHLsoReTCnFRwSHqla6ERmiTSklZF98hOrdNEIpZYx1Oh0hBJRfSmmSJL1eDzEkY2w6nUIIbqJT13URc2ZZ1ul0Wq2WZVnz+RzmGGgzz3M0aFmWUsq27d1uhxAX1SGvI3xFSjXCV/hvSCnjOEYLjLHZbOb7PvKpMSYYRpwLQTKlFB3GNr4SKDObzY6OjnAuWmeBgWYbG4wxKeV+v1dK4RZHUbTb7YIgoJRWVYU2pZRFUfR6PVL/xhlO30mSCCHa7bZlWVprXP5+v0e+OTxSCCHQmuM4nkwm19fXRVHc3Nwsl8vpdPr973+/6ZvBYDAYDL8IBGjM6eSXe2JgGnIcBw8Y5JdYOWNmZIxhvhZCEEIwtzZT5GGtRn1+6RD0bqXUH/7hH/75n/95U95g+Dz4lQXoZ8+eYePp06efPWIwGAwGg8FgMLwqv/3bv/3w4cPhcPjgwQOs+3d6emrbdpIkZVkSQpbLZafT2e12SD51XRdZz4yxVqvlOM5oNEKWtBDCtu2qqjjnURTleQ4puRE3HceJogjqoed5m83m5ORkPp9DFU2SJAgCKJioglQgLO6H3g4Gg6urK6jGlNLJZPLpp5/O53OlFIyhEQFCPJ1MJp988klZltB5h8Phs2fPhBCoC8W5qiqoulLK5pBt2zjzGJYAACAASURBVEhlkrXzxmazQdSKXGkI0Frr3W4npYR2bNs2EsBRixASRRHymgkhnHM4JqNwnud5nmNYWq2W1trzPPTEqdFaL5fL/X4fhiFC081mEwQBxO75fO77PgRujC0iWGzvdjtcDtKWMeaLxcKplzrEgN+7d49SOpvNOOewjeacw+KDkH/7cTGlFBdblmWWZaz2A0EAD8kbt6zX6+FOKaXCMMQY4kKKohgMBhhSQkgcx+PxOEkSKWUYhpvNxvf9sixXqxWsnymllmXBQQXtowN5no9GIynl7e3tdrt9/vz5arX627/9W3w9DAaDwWD4/4BJTUpZliUeM7AT0jCglOLtb6vVgrcG+eUCNP5yzjHnMsZwiqYAwEec5fAoNjjneHz6kz/5k7/4i7847IzB8Nr5lQXoBpMKbTAYDAaDwWD4D/DNb37zyZMnYRhiNbmbm5ter/fo0aPlcnl0dMQ5Pzk5gT2xUqosy/l83u/3r6+vR6MRhFHP85C8LISAoLzZbBzHSdO0SXCGEfNgMICQDZUzSRLGWBAEiNkIIavVqtvtoq6sF6nbbDbdbne1WkGChExp27YQoqoqz/Mopb1eD1IyQj4I0EIImF30+/2qqoQQUGkhECulOOfj8fjp06dNxDgcDrfbbRzHrVbLtu3JZIKQUmsdxzGMmB3HUUoVRdFut2Gskec5zqhri5JOp7PdbpGVvF6vSS3j9vv958+fQ/9VtSU0FGRCCIRabNu2jYiXUgrXEejRlNKqqrbbbbfb1VpblrXZbHBUa73ZbKSUkIZd18U1QhdeLpeu68LF23EcyNyMsaIoYHiCc1FKtdacc8TPhJD5fI7gfDqdQvXGN0drjfBYa41bg1smhIDQjFqoglqMsSRJhsMhY2w2m0F0dl1XSonlBJF97/s+NH1Zp7Hf3d35vt9qteC4cnd3d3NzI6W8vLzM8/zi4sIkPhsMBoPhFcHbXCGElBJzJallYmxorWm9/AOrX4Q3BQ55aQ9K4tVsM0WSWvLGR7SP8ofbhBDbtouisCwLPyxr9hsMr51XEqBHo9HLuwghhMDqzmAwGAwGg8FgeBW+9a1vnZ2dBUHw9ttvp2laFMXR0VGapmmaLhYLKeVsNjs+Pr65uRmPx3A8KMuy0+nked7v933fJ4RADG21WpRS27bDMOx0OkmSJEnSJDjbtr3f76WU6/W62+26riuE6PV6WC4PWudkMpnP59Bw0RQ0TfhsIA4UQsDdwnGc8Xj84sULKJ6r1SoIgt1u18jKo9Ho2bNnZVnu9/t2u91ut1erlRBCKbVcLlutFvqDc3W73bIshRCWZUFLhS0GIWSxWHie5/s+pVQIgQUDIRAXRYFEYMSxVVX1ej14XLiui1Rx27YZY1JKeEdAiu10Or7vu66rtW5sJSCjw6ADdyfLMpwLKnZVVbvdjlLKGNtsNkgtp5RCw8WJtNa2baOK1hpaP4Jey7Js28Z+FMBoNKHvbDZrChBCcL1FUVBKy7J0HOf29pZSikFDGSllk+jNOYelBkRnzvl6vR6PxxhDbBBCNptNGIbL5ZIx5vt+kiSdTgeic6vVyrJsOp3iS2VZFl484PLn83mapsfHx2maMsbW6zUi/MvLy7/5m79B4waDwWAwvAp4qNBaQ3rG30YjxsyIv5CSMWlSSlHgl9FUR4OH+w8rYg79f9K8sn306JERoA2fK68kQONp2GAwGAwGg8Fg+I/xta997dGjR0EQhGHoui6UPt/3b29vj46OHMcpyzJN07Is4zju9XpBEMxmMyQFbzYbIYTneXEch2FYVZXjOKvVarfbjUaj+Xze6XQ8z8uyrFm1bzQaoQznHEbGUCGrqorjmDHmeZ7neRA34ziG4W8URVdXV6x2gZhMJs+fPye1KzFjrNvtCiEamRUqcFVVnHNKaRRFVVUhvIR4PZ1OwzCEVi6EQM8hEKOi4ziEECEEtG9aL5QnhLBt++joCA7FiFGhz2I8fd8vigL50YQQuGBvNhv0BLnGnudZloWLhUW11vrwEKWUcw4VmNQZ0M3lb7dbeGJorR3HgeJMKV0sFo1OrWvTZIwGNiilhJAkSRpLDZwLsXccx5DCMaQYDVIvnYQgWWuNVw5xHGutpZT9ft+yLM45BGgU5pxvt9vxeEwpbU6K68X7DM6567royWAwUEr5vp9lGaRtIYTrur1eDwO+2WzyPE+SBBp0u92+u7vDjVgul4SQ6XT6/vvvLxYL3AKDwWAwGF4drTWmSErpS9uYE1GmKArHcTAvvwQKv7z3QG62LEvVyyOTA+eNpmJTEo8c2MM5F0K89957//RP/9S0aTC8dl5JgH7x4sXLuwwGg8FgMBgMhlfgq1/96v3793u93htvvMEY2+12Z2dnaZpOp1OtNTTf4+NjSunp6Wkcx2maKqWWy+VoNKqqajAYdLvdJEnW6zWU3H6/zzm/vb3d7/dpmmLtQeT+KKV2u12/31+v1+12e7PZQAuG0EkI6Xa72+0WCiZjbDgczmYzBGaIxIQQhBApJTTTfr8/n8/hQM0YQw5sE9odmjtrrYUQcK9GVvJgMNjtdmVZIuoTQjSdGY1GT58+bbRUIYSo5enRaATrD9u2tdY4I2RlKWW324UoTyl99uxZGIZQtOM4rqoKRwkhy+UyyzLOOQRuOJMgfzxJkv1+X1UVEsDhTILLybLM930MJqXUdd0m+Ro6LOJhSM+UUqXUfD5vZG5KqW3b6LbW2rKsJn7G2DbRr1JKSkk/uwIhammtKaXD4RBjNRwObdvGCENAL4oCdswoSQiJ45hSSiktiqLb7WIbp4D7c7vd3u/319fXlFIpped5/X6fUiqE2O12+/0ea076vr9areDcjYs9PT198eIFY+z6+vqDDz744IMPcDkGg8FgMPxKNNMcJnRMYUDXkyN24jHjcAJFgZe28ZHUDhsAHzGx4nTN0ZdaaE5KCLEsq6qqt99+u27YYPhceCUB2mAwGAwGg8Fg+FX5xje+8fjxY9/3T05O8jxP0/TevXue593c3Ewmk3a7DVMLyIvIHe50OlA8pZRZljmOM5/P4VmM9fdWq1W/32eMYa05Qsh2uw3DUCn18OHD29tbpdR6vQ7D0Lbt8XiMTNj5fN7r9brdLhaXE0JAzvZ9v9/vL5dLVWfvRlG0WCyEEPDN8H2/WeCOUnp0dPT8+XMhBLJrlVIw00AgB3NnrTWOaq2hLEPOHo1G2+1W1baPURQVReF5HkRnODygHVkDiRx6NOd8PB7D1gNqbxAEeZ4jlA3DEMOFj8j8xUetteM4jQGI4zhSyiAIyrLs9XqkXkVQa805h4JMKZ3NZhBktdZWbaaBIBaBMaJWxhjnHLcMVdCUUopzblkWrhcdRpXxeGxZlhBisViwA02fMQbVGNtCCNwUQkhZlmVZTiYTVTt4LBYL9KQoCviloEtJkozHY601ZGuo2LPZzPM8XKzWerfbXV5edjodDPLd3V2v12u1WshkR5o8VrupqipN05/97Gf/+I//SAwGg8Fg+I+CGVBK6bouIQQzHXY286PWmjGGJyJsNHsOm9L1+9fDv5gEMY1i6kStpvBh9WYDhzjnRVHgLbXB8PnxSgL0cDh8eRchhBDzAzSDwWAwGAwGwyHtdvuNN9744he/iMTkXq9n23aWZQ8fPtRaI8MUsdZgMNhut+fn56vVCgvZwXyDMQZ5FBIwdMxut8sY22w2iJG22y1sN5RSSZJwzjudzmq1goiptUZaaxAEUsr1eu26rmVZEKbTNJVSEkKgOCPSK8sSTbXb7eFw2Ii8hBB1sGoQY6zb7c5mMyEEMoUhMZdlCbE1DENkTDuOo7UWQlRVJYRAXjA+2raNZptDlFIpJQ4xxgaDwXq9RjF2INFSSiGSott5niOv2bKs2WyW5zn2U0qbXGZEpLe3t1EUcc6llHCvhqDMGHNdFyellLquu9/v0Tff9zFu6APnHNoupRSKM2OMEGLbNuccN9SyrEZ0ppSiM9jGTUHfaJ0FNhqNMCxSSkqpbdsohs4cbqNZLD9jWZasU5sRZm82m8lkorW2bRvDTsi/5Vkjv7vb7aZp+vTp01arJaXknGdZFoZhv98viqLdbidJgiUWHcc5Ojq6urrinG82mw8//PDv/u7vmu+2wWAwGAy/Ps1sSD8rDTcfoSM30+VhmQYcaqpgomSMRVGEX5KRWtpGSTyZNHtQERuYai3L+oM/+IO///u/PziJwfA6eSUB+uzsLEkSbPf7fWz3+30jQBsMBoPBYDAYwJMnTx49evTkyRPGmG3bEH+FEK7rwsgCtgnHx8er1Wo+n08mE2Q9Q2Xe7Xa73a7T6SBAQoKz1ppz7rpulmWIqbTW6/V6NBp5nldVVZZlhBB4PcMm+PLyMo7j4+NjLDcXRRHU7dVqFYYh5xyqMZRZBF3IcY7jOAgC6JtCCFobJY9Go6urKwipWmshRBAESF6mlA6Hw4uLi7IsIelKKTudTlVVKD8cDmFsrZTCUcA5n0wmq9UKVo9KKSyQ2AScUsomAQqiMxocjUboOSEkyzLkNaOrUspGVuacw83ZsixCiOd5nHPf99Ga53lQvReLBSRamDLv93tkoCNwhZ4rpWSMLZdL13WRd9xqtZrRQxleJ1BblqW11loTQnA7hBBJkjTiMi6N1ksLNiI1BqcpQwhRSmGNQUJIkiRa68FgQAi5u7srikIpNRqNKKWu62JklFKsduVGO0KIzWaDq261WjhLFEW4R+v1erVanZ+fK6U6nU4cx1dXV1rroijm8/n19fX3vve9f/9yGwwGg8Hwa4CJlVKKWR7zOGi2sYEZTQiBnZgTD4s1+5t2dD0F27Y9Go2w6gOqoDXMbpxzPBigM+RgiQvwR3/0R0aANnx+vJIALYS4uLjAdhiG2I6i6DOFDAaDwWAwGAz/+wiC4Jvf/OZoNILHhW3bVVXdv38/SZKTkxNkJSdJIqWM4xiC7GAwkFLe3Nz4vr9cLqH/uq5bluVms3Fd13VdZCIvl8skSXa7HbKSj4+Pf/rTnwoh0Gy73YYDQ5qmhBAsmnfv3r27uzsI3FCQoyhC/gRCtdFodHFxIaWcz+dRFIVhmCQJ1F4AYxBCCDRNSmkURWVZQjmF7zNjDMqsrO0s4E8NPRrqJztYpVAppZRqUpvRbFVVZVkWRQE5uCxLzjkhhDEm6mUJCSHj8ThJEsYYkseh+bI6URrJ3ZRSrTW0YEKIbduEEEj5EF43mw3Eetu20zSllHLOu92u1tq2bSjI6BVE5MVigYUWsd+yLM65lBICt2VZlmXN5/NWq4Va6Dar3bFJHfdip9ZaSglBGb2N47hJEi/LEqbM0+mUUjoajQghyHcWQiilMEqoCEEZY4jLwWsMXSc+397ePnz4sKqqIAi2221RFM3Clev1ervdnp2dua4bhuFqtfrpT3/KGMMp0jS9urp69uzZRx991FyFwWAwGAy/Po0A3Wy8XIIQciA3E0IwPR3ub6ZUfSA9NwW01mdnZ+12+8GDB9fX13g0AnioQBWIzk1d/MWjxcnJSVPFYHjtvJIAbTAYDAaDwWAwvMTv/u7vPnr0CKaBb775ZlmWR0dHaZrudru7u7t2u71arXq93nQ6hYSaZZnWutVqua4LDXq326E61hW8vr7O8xxGEEEQUEqhShNCGGNZlsE2erlcIq+22+32+33YIidJMhqNHMfZ7/e2bXe7XYRYq9Wq3W5jjb4m6IqiaLlcNhcyGo2w1pyUElpwFEXz+RxZurq2zqiqCuIsFE8kEzVHhRAQf6Mowh67tt0Iw7DdbnPOp9Ppfr9HmhKlFCJpk1C82+1YLfVCfEekyhjzfR8LG+KKKKU4V57nSK9mjCHU9H2fMZbnOc5IKZ3P51mWQU0mhDSZ4GgNp0OUi44RQtA+9mutkUuOli3LiuN4v99PJhNWQwiZzWau62Kb1iCsxc3VWh9G3VCcKaVKKaR+E0Iwbni7gBcV6/WaEIJkc8/z0Ijruuv1WimFBne7XZIkDx48QPdwjZ1Ox7Isz/OWy+V2u7137x7Ou16vP/30U8uyoEHf3d1xzler1XK5/Pjjjz/++GNiMBgMBsPrBtPc/x88cmC7mSIPCzQTq/4FARqT+Hg8JoRgEebtdosqhBC80m6qoPGmKUII3isPh8Ojo6O7uzu0aTC8Xl5JgMbDnJQSS0K7rks++2bGYDAYDAaDwfC/gfv375+fn9+/f//s7KwJZggh8/n83r170+kUxr5FUWy327IssywbDoebzWYwGMxms6urq9PT02fPng2Hw3a7DbV3s9lwzpMk6fV6d3d3y+XS9/00TaMoGg6HSFxdrVbD4ZBz7vs+pXS32zmOY9v2druNoogxtlqt0ILrup1O5+rqKkkSpRTE1l6vN5/Pq6pCYm+n00mSBNm1CMOiKIJI7XkeIQQVhRB44h0MBpeXl7DLoJT2ej3k5CL7GEeRY0sphVTd6/XQVdd10zRFSvJgMOCcox3O+WAwWK1WkIkppd1uF4sKFkXBGPN9v8luXq1WUIoppciDRgAJy2xUtywLvs+UUtu2saohIaQxPsa1QFmmlOKu+b7PawEa8SdoTs0Y45yjw/ho1asbNac+jGYRLR/uYXW6Fr4qTYFmD1KVtdadTkcpJaWUUsKtOwxDFNNa393dBUGA6ricfr+vlMLF3t7e4r5UVZXnOURn6OPL5fL58+e+76Nwu92ezWY//vGPz87OLMu6vLz88MMPjfRsMBgMhs8PpRQmOFIvzEsOBGWAnYAdvNDFlIr9h7On1hrFyMHyhuD4+Pjy8pIQYlkWTo05valIDhon9U+vOOd//Md//Gd/9mdNOwbDa+SVBOjlcvn222/j+f7FixdPnjzhnDemHAaDwWAwGAyG/8E8fvz47Ozs3r177Xbb87wwDG3bFkLcu3dvtVqNRqPtdrvZbBaLBUTDbre7Wq0mk0mSJEjLbbKehRCXl5fwbYAae319vdvtIEZzznu93mw2k1J6nodE106ns9lstNbr9brb7bbb7ap2tMCigowxKSVkWUKIZVnL5ZIxFkURxEpK6Xg8zvMcudiIwZAiLaWk9XJ5YRiWZYkwbzKZwKZD1a7ERVFUVSWltG17NBo9f/5cH/hswBUanhL9fh96tJTSsqyqqoqiyPMc522ynhlj0+k0y7JGEa6qqtPptNtthKabzcY6WCaxkYP7/T5jDDHnYrFwHAepx2EYojziTMdx0B9KKcrgqJTScRyIyJZlYUMpNZ/P4ShNCNFaQ5VGdc45IQQJ7Ov1GmYj8/kcYndTBX8BuoE7orVGGdwOlIRSjBNVVdW4i1RVFUURyhBCVqsVVGbOueu6tm1LKZsW1us1GpFSZlnW7/erqsKSg4vF4vr6WghxcnISRdF6vb67u6OUCiHgLX5+fn51dfWTn/zEGG4YDAaD4fMG8yD+YnIE+rN+0IBSSn/BKvrwKObBZq4khEgpgyBoPgZB0Ol0ttstnpfw/NA0C/AgATA/2rb9ta99rdlpMLxeXkmAfvHiRRRFruviF3xxHNNf+C2AwWAwGAwGg+F/DG+++eaTJ08Gg4Hv+61WazKZKKVs2261WsjGrarqk08+OTk52W633W63qqpWq7Xb7YqiWK/Xo9FovV5DOry+vj49PV2v10EQDIfD2WzGOYfX82w26/V6yFd1HAdKrhBitVrFcWxZFsRuaNla681mEwQBsmLX67XWGmFVq9UaDoer1WqxWPT7/Xa7HUXR5eUlQiyEW5B3hRAIw5RSTYKzZVnHx8dPnz6ltZyqlIL1R1n7MsMGuvHZCMNwOp2WZWlZFtTPqqogBONonudNmvNmsynL0nEc27bxEaLzcDhcr9fQprXW7XZ7t9txziEQQ3RGxNjtdmktBMdx7HkeMrUbIwtKaRzHrusiAaosS6Q24/Ity1oul5PJZDabwcoDdxlCNq2F+8lk0mq1CCFa691uh3EjdajcNPWL4jKlVCmV53kYhpTSxWKBW2PbNlRsVedeYcyhehdFgYUEy9pBm1KK7w++ORgW6MU4XVmWeK+ArGf4O6MzSZLc3NxIKe/du+e6brfbLcsyz/Mf/vCHhJBOp+N5Hl4weJ633+//6q/+6pNPPsE4GAwGg8HwuYI31rqG1OtMNBMcwCHMv9Dcmgn3cBvFsIE5XSmFpXobTk9Pf/azn2mt8TocFfFXKYX2m9MRQqqqUkp1Op3DRgyG18grCdCEkPV63Ww3/zAGg8FgMBgMhv9JfOtb37p//77v+2+99VaWZWVZnp2dpWm63W6R5JskycnJiZTy6OhosVjEcez7flEUvV4vTdNWq4UMUyklsp6hOOMQ5zzPczQYx3Ge58h67vf7RVFYloXf2xFCXNeFwLrf79vttu/7cRwjeINMKeuUZzgau6673+8hGZP6x61hGK5Wq6qq0jRtt9vn5+dNYrLjOJPJ5Pnz50opKSV0WNh0iHrdeSiYjV2GlLKqKqjMhBAhBLKVoecWRSGlhAMGIQQp247jtFotxH6dTqdJHM6yjDEGX+OqTojGoTzPKaUnJyfT6RQ54PDugOiMYr1eb71eN3EsYww6b6/XWy6XiCeRUY7OILXZdd3FYmFZ1mEUioFqBg0opRaLRbvdxlA349lUwXkppVrroiiwLaXUWgshcBaUAThEKRVC4B7Ztq2UiqIoz3OMXlEUg8EAynKWZUVRIMzmnM9ms4cPH0opbduGqWUURZTSTqcTx3Gapvfv3yeEFEVBKV0sFjA84ZzDBWW/3/f7feTQOI6T5/lf/uVfLhYLXLXBYDAYDP8J4PU2pmBs46VyMwvrz4psSikI0C/tbKbmZiJGSdu2fd8nhAghcBZkke73e0zcePULmpNio2kQr2nfe++973znO01hg+F18UoCdBiGL+8ihBCy2Wxe3mUwGAwGg8Fg+G+F7/vvvvvuF77whSiKiqJ49OiRlDLLsl6vBz3x+PhYSjkYDBaLRVVVtm1jfxiG+/0eOcVIPV4sFlhv8Obm5vj4GOqwZVlVVRFChBBYVg4/C91ut5ZleZ4HY98kSRA+dbvd4XB4fX2NpF3OuWVZvV4vSRLEWvv9vtvtEkKQWYxgDCYbu90OOjWE1LIsZ7PZaDRq0m8ppVCcLcsaDAZYaZAQQilVSgVBUNZr+g0GgxcvXmitfd+3LGs8Hj979qwsSymlZVnD4fD58+fIayaESCmLosDgIOoryzLLMpzRtu00TSH+YgSKokBACIXUsizkSnPO2+22lHI4HCIHHNdSliWqo30kROP2weoagWijgGutl8slcoc9z0PHpJScc8ZYHMeTyWQ6nbbq9Q9pndqMAJgdCNOLxaLJh8JOiPjoM61/I1xVVb/fT9O0KArbtquqQlTMGMvzfDgcWvX6jZRSznlVVZvNZjgc6ho0zhizbXu5XCIJGntub29hryGllFIKIZAF7zjOYrH49NNPO50Oku4fPHhwfX1t2/Z+v8flW5Z1d3e33W4ppf/wD/9gvJ4NBoPB8J8Ppj9M4kIIpRTeZzezLebfBsjKzXSM2fZwD+ZNTKBKKbz1J4SUZYkfM+H1Nn5hVlUV9O6mqea8zU7M10qpP/3TPzUCtOHz4JUE6DfffHO322G70+lgOwiCH/zgB58pZzAYDAaDwWD478M777xzenr65S9/2XGc/X6vtf7CF76w3W6Pjo7W63WSJNAu5/P5ZDK5vb3FyoFYuS7P8yAIer1elmVSyt1u53necDj0fZ8xtlgsiqLI83y73SI5OgiCxWKBlF7LsrBeHCEEudVRFCF7l1Ka5zmSfznnSqn1et1ut3FeQshyuUQCrNa6KApCCMTQ9XrNOYePs1IqSZJ2uw0ZV9ZGz4c5zrPZLAzD4XDYyKnj8RguHFVtuAwVFYI1qXOiEUNSSnEupBr1+/2Liwvo0ZZlIXU6iiIYOsdxjEOu61qWJYRA6jStE6Jx4ZZl7XY7VtPtdjebTdPz1WrVdAlKNIJJiNGISJMkweUTQjzPQ6DLGKuqCgUYY6vVCvutGlLHsbhMUsfDkOYxFKpOFe/1eii/Wq1wC6BrDwYDlO/1eowxvBhwHEdrzRjzPM+yLK21bdu6jrHRoFIKfbi9ve12u+iDlBLyNHLYtdb4Mti2vdls8NU6OjrCOEgp5/P5er3GVysIgv1+HwQBY+yjjz6yLGu9Xu92uw8//PDq6gqnNhgMBoPhP5OXFpYQtQkYwCyMvweV/v0oIYRSiqOY5XXt0YHp+8GDByj8ox/9iHP+9a9/nRByfHwcxzEhBFMwQBW0cHhGzL9CCDzOmZ8KGV47ryRACyF+/vOfY/udd97B9rvvvvuZQgaDwWAwGAyG/w688847T548gf3ucDjknBdF8eDBg+122263tdY3NzdHR0daayHE+fn5YrG4uLg4Ojq6u7sbjUZhGCJ5Z7lcDodDQkin0+Gcz+fzo6Oj6+vr0WjU7XaLooArNCFkNBrZth3HMVRC27bb7bYQYj6fD4dDqIr37t27ubmJ43g4HC6XyyAI0jSdz+foJ6W02+0mSQKx0rKs8XhcFMVut0MWs+d5vu9fXV1prZGdzRgTQnQ6HSjdlFJUqapqsVgEQWBZlhBCCIH8YsYYbByqqoKOHEURjkIsFkIURVEUBTTZqqryPHccBy4cYRhmWQbJFSnDWZZBce73+9vtFo1rrVutVpqmlNLmkO/7rusyxizL8n3f8zxCCIR+5DE12xCdkySpqqqRrSHvog+4UgwCIUTXajWlFEI8pZQxtlgsOOdNPtR8PnddFxUR0AKl1GAwQFKVlHK73WJDKdXtdl3XxTdhu91WVVWWZa/Xw+A4joO+QTVGlaqqitq1mTFGKb25uWnWVFRKpWnaJNpblpVlGW5TkiR3d3ec806n4zhOu92Gpl+WZb/f9zzv/Px8Op32+/2rqyvOuRDi+vo6jmNc4Gq1ev/995uLMhgMBoPhP5m7u7tDAVpK+ZL+iym4+XiIqGB9GAAAIABJREFU/qwAjY+ojv2MsaOjI0KIUurnP/85pRQC9GAwYIxprfEXtV5qpwH7lVKc8wcPHhgB2vDaeSUB2mAwGAwGg8Hw35rxePyFL3xhPB632+1er0cphfFFVVWDwWC1WimlXNddr9dYxAaWBY7jrFYreAgqpYbD4e3tLRThk5OTJEmQJb1arQaDgRBit9vh956w74CjBQRuSmkYhtho1hJcLBZxHFNK2+22ZVlIoYX9AlRpLBs4n8+DIAiCgBCyWq2EEKvVyvf9k5OT29vbNE1RkRASBAEyqdvtNud8NBphKUIpJURYCKYI25RSvV7v6uqqLEvXdVer1f379y8uLhptV0oJ8bqqKkLIYDCAjgwNfTAYPH/+HGq11jrPc6x0hxyiOI673a7jOFB7i6KIoggmznEcYzy11rZtw4IDui08kaFE9/t9ZD1zzuFAgiqUUtd1Pc/jnGut5/M5OkkI4Zw7joP9cNNGPIkNQggWIURhaNboLTYwJpRSOGYwxmazGauXHKSUdrtdy7K01ovFgjFWVRVGBsK367pNdA11eDKZWJZ1d3eHLxulVCnViM7o0na7HQwGlmUtFgtKaZqmGKjlcpll2f3799G31WoF9f/4+Hi73Z6dnS0WC6XUarVyHAcp/EVR+L5fFMVsNtvv90dHR9vt9gc/+MGPfvQjXLLBYDAYDP9VlGWJiU/WYE5vChxuH37EBh5dKKWWZYl6vQqttRACL30JIUVRpGmK18aEEKte0Jhz3ky7+rO6M9rETsz1VVW99957//zP/3xYzGD49TECtMFgMBgMBsP/BIbDYRRFnuc5jkMIcV3XcZxOp4NwAmWEEPBNDoIAphm2bcPaIs/z2WwGS2IhRK/Xm06ng8EAocjJyQlSmweDQRiGy+XStm2U6ff7sL8YjUb7/R7qLSEESih8opVSjDG4Gy8Wi9Fo5DjOer2+f//+9fU1IQRZrqenp3d3d6jOGMM6gVprKIxQSKuqWiwW6Bj65vt+E4kppSCJQpLWWqO3ZVm2Wq3FYnF6enp9fV2WpaiXNCyK4vb2tqqqVqultQ6CIMuyVqvFGIMph5QSbVZVBZWTUnp0dDSfzz3Pa7VakJVhS+K6LgI/qO2HGdCwJNZaN+aMuCjLshrvZtd1kRxNCKGUIrOYUsoYw4ls2yaEIC8Jknq/38dwHd5o0OyZTqda6/F4zDnnnDcjbB0sSVRVVbfbhULtui5jDMXIwWBCfdZaw2eDEIJuk8+Gx6TuDzRlvNhAcrQQwrZt9KG5TMjW+CKladrtdrXWvu/P5/Obm5t79+5RSi3Lms/nm82mqiq82BiNRsgEf/r0aafTcV334uICCvXZ2ZkQ4qc//eknn3zywQcfoIcGg8FgMPwXUpYlpk7MlRCgdZ2SjI3D8s3HZgMzrGVZqNvsPKzrOA7WycAhIQTm+mbObQ5howHdYIwJIb70pS+9dNRg+PUxArTBYDAYDAbDfw+Ojo5arVYQBFEURVE0Go0IId1u1/O85XJ5cnICo+QgCNrtNsRQrfWLFy/G47HjOBcXF7QW+wghaZoul0tkrRJCkL0LQ4ybm5t2u31ycrLf7zudDhb3u3fvXqfTmc1m0H+n0+nR0dFkMgmCIE3TJEngp9zr9bIsg0pLCAnDcL1eSym3220YhmEYxnGMrOdOp8MYgySNC+GcI6YSQiCT+uzs7ObmRtbLBo5GI3QGijalFObRhBClFOd8OBxmWcY5h+KM/WVZTqdTpZTv+5ZlRVE0m82Qmi2ljKJIStlut1EeniFlWTqOo7WOoggat+u6lNLb29terxdFkVJqOBw+ffq0LEtkPYvaoIMxxhirqgq3ABJtlmWdTgcG2fP5HB7QjLHpdFoUBSGk3W4TQvb7PXqCcBFXYds2YyxJElwOIQQu2BgB6OAQo7XW2IlwlBDCGNNaI3m8KApUJISgOiEE29g4RAhBKW0sNXCi8XhMCEmSxHEc27Zxp5DsjBuEbxc6kOc5/KmFENCdSe0onWVZWZZKKYj76/UaOdFaa/i64P6enp4uFgssI7nf7+/duzefz4UQ2+12vV5fXl62Wq2iKPr9fpZl2+12MpkgKf729jaO4/fff98smW4wGAyG3xAwp2P+xQQqa2ssUmciv1ynhtVvhbXWlmXhhS6pZ/NGTXZd9+jo6OzsDB+rqmrer6PM4enwESUxceOolBI/bDIYXi+vJEAfrtfRJBGYJaQNBoPBYDAYPj/G4/Ebb7xxdHTUbrc7nc5+v+/3+5xzmF1oraMoQrpuu91GXnOn0xmPx9vt1nXdwWCw2Ww8z/v617+OtfW+8Y1vJEmCFF3O+Xw+11pLKbFADdJ7Ly4urNp7d7vdaq2Louh0Or1eb7fbUUonk8nNzQ0hZDAYpGna7/c3m81kMoGRBee8UU7DMNxut6j77NmzbrcLIbXX68VxDJkyjuOTk5O7u7sm7hqPx7PZDH2DWURVVVJKKSUUTCFEu92uqgrRFGRfbKAAXDgIIagyGo2KorAsq6oqx3GqqirLMggC5GVTSjebDWOsKAooxY08jaONwIqjvV6vLMuqXqUwiiIchfa9Wq1arRakaijCrVYLFYuiQGq51tpxnN1uZ1kWurdcLqEyo3yapv/2DSDEdd3NZnN8fAyPZtd1cY346DgOpTQMQ0op5xwDSOsYErElqZcVaj4CSulLZbTWd3d3jDEhRFVVEPQRKtM6vkV5CMrD4VBrvdlsoMVj0LrdLsJpFIaITyktigKLVWqtpZRCiCiK0NR2u2WMSSkxsFVVwfWl1+ulaYrvA76cnU6n3W4/ffp0PB5HUfSTn/zkjTfeGI1GURRdXl4Oh8OyLOfzuZTy8vLyu9/97n6/P7xkg8FgMBj+C9nv9xB2LctijDVPMjiKSfyXgWIoj3n28JCqHboYY2+++SZ+GkUIwRNOUxIcnhGPOtiJV7/YyTm/d++eWbnX8Hp5JQEaD/Gg+bLmed7sNBgMBoPBYDD8Orz11lvdbrfX60FPRA6s4ziQnsuybLVanuctFoskSYQQrut++umnojb+01rbtn16enp6ego9WkrZbrfX67UQ4v79+7PZDH7NSJeGwLderz3Pa7fblNLlctnr9fb7/XK5HAwGT58+rarq8ePHeZ7DFAKSNGMMJrwQEC3LQvotcpOllK1WKwxDKeV2uw2CwPM86Imc88VigfTtOI4hMYdhyDmHaglbZ8/zoiharVaIryilURQlSVIUBfToMAyTJEEAxjkfj8dZlu12u7IsPc/TWkM/haaJsS3LklJq2zb00E6nc3t72+/3oa0HQXBxcZHnued50JH3+32WZch6Hg6Hm82mKApcwmAwgM8D8o+qqiqKAtI8pRSyMuccMi6WKCSEQODG1VmWNZ/P8zznnHuexxjb7/cI+aSUuAT0hDG2Wq3CMCzLstfroR0IwbgWaNbT6dTzvCa/iTFmWRaSi5ttxKWUUii8yJ2fzWZKqclkQn4hBwpVSK2bU0o552martdrQkiWZUopx3EsyyqKoqqq7Xbr+z4hZLvdoqt4H2DbNu6Cbdvr9fr8/FxKie8G0uSVUlVVSSnLsoyiyLZt3/cx4L1eLwiCzWbTLNiIXHjP825vbzudzjvvvLNcLnGjz8/PLy4u8FrlX//1X43ps8FgMBh+04A7c1n/ZkgIoZRSSlkHFhm/DEzxmKCrqsLb6Ga/lLJp56233mqaWq1WlmXhGeCwKWxgxschtIynERR4/PixEaANr5dXEqANBoPBYDAYDK+RyWRyfn5+cnLS6XRGoxGkSUII0pyFEEEQ+L5/eXkZhmGn07m8vET262QyYYwhYoFKWFVVlmW2bWdZdnl5+fHHH4/HYyllnudf/OIXOeez2Ww4HPZ6Pdd14zjWWmNxPNjyep4Xx7Hruvfv34dAud/v0zQ9Pz9//vz5fD4/OTnJsuzq6ur+/fuwsFBKdbvdVqs1n8+hGMZxDAeJKIqQqUopRdI0YywMwyiKnj17FoahbduWZfX7/dls1gjEiKYQ9iDyQTIsQiApZVEUZVkiNIKILOofrhJCwjBUSuV53m63Lcsaj8dFUaRpCkmaUhqG4Xw+D8Ow3W4zxhaLhed5ZVkOBgNouJ7n5XmOPhBCiqLQWud5jhARoi1UbMZYu92Gy4dSyvd9rXW73XZdF/2H7QZjLAgCrXWaplVVUUpd14VO7TjOYDBYLpee50FuhpQM5vP5fr9HFcuyoHojpGzeH+CMjDH01vM8fDfwfWCMcc5fCi9J/dtexpjrukVREELgX8EYQ+IzCmit8dVCFaUUxlnXbiSWZcGjWUqpte50OlmWeZ7HGEMHkiSBwG1Z1u3tbRRFWmvLsiilcBVH+9Pp9OzsDDd3tVqh5TAM8aZhsVhcXV2hV8PhsNPpXF1dnZyclGWZ5/nJycnFxcXR0dF4PL66ulJKCSHKsvzRj3706aefmp9pGgwGg+E3kO9973tf+tKXGGO+7/u+j7fplNJmpj4Ee+gvqNKcc1pnLqMMHj8+/vjjN954w3Xdpkqe50mSNOp2M8tjg9QPBthAGTSutc7z/Ld+67f+7u/+rjmvwfDr80oC9Ne+9rWXdxFCCPmXf/mXl3cZDAaDwWAwGH6BIAjeeustmDgfHx9DZT46OlosFv1+P0mSKIrCMHzx4oXjOMPh8ObmRmt9fn4OE9vHjx8/e/YMyc7QJbXWUAkppcggjqJoOBzmeZ5lWZqmg8Hgxz/+cVmW4/F4Pp8XRfH48WPHcaDVQuO2bRv+D1D6CCGQMiGePnnyZL/f73Y7NAtt9PHjx8hX3e/3g8HAcRxkLjuOgxBIKVVVVRRFlmWVZWnbNucchtF5nqPDaFAIobVmjA2Hw8vLSyTp4GOWZVXtAjEej6HJLhaLTqfDOUdOdFmWyBOH2q61ruqcICGE7/t5nkPWh5lDlmXwxxiNRs+fP5dSVlXFOSd1inRRFJBuoygSQmRZJoRgjG02m16vB5cMQshut+t2u77vM8am0ykUZ1Ssqsr3fYjshJDpdBpFEYYXGj1KEkIcx3EcB7Vs297tdkhf8jxPCNHpdGzbZowxxhaLxWQyQVjIOWeMEULiOPY8D1o8BsGyLM75vPaDRl4VWijLsixLZDqjY03Mads27FbKsoQq3dxEQoiUUgghhKCUOo4jpZRScs5t20aGMqtl6/V6PRqNlFK2bUM9x7ksy0KHoVYXRWHbNqW00+lUVYXXG4SQbre7Wq3m8/lmswmCgFIK45TT01MYOgdB4DiOEMLzPFh5fOlLX4JuPhqNPvzwQ9u2b29vv/Od7xCDwWAwGH4j+fGPf4x52fM8zvl6vcYcSv9fAvQvgpKYVVERtTBxv3jxYjgcBkFgWRYOpWm62+3w1EE/q1mD5mGgaRwPCYSQoijefPPNpqTB8Fp4JQG6EZrffffd5hdt77777r+XMBgMBoPBYDD8Al/+8pfv3bt3fHxs2zYUuuPjYyyzliTJdrs9PT2N43gymez3+zzPB4MB5MLz8/MkSebzea/X22w2Qoj79+/btg2/Zinler12XRcCH2IPQohlWZ1Ox/M83/ezLPvmN7+Zpund3d3JyUmaprPZTAjRrOT24MEDzjmlFMojJFHGGMIPtAnX3SzLjo+PkRm9XC5Ho5HWGjK04zitVotzDkmUENLr9RaLBeRaJPi0Wq1ut8sYy7JMa71er3FUCCHrJGhs4yOlFB+rqoLmjo9aa6WUZVlwz8jzHCrwaDTKsmy73Yp6QZ5er4drRF4tpTTPc8uyyrJEJBYEQRzHRVE4jmNZFhTnoiigv2O71+shpZoQkud5q3ZDRpYurn08Hq/XayjXlmUFQYCkb3QbH8uyhDLbZEBTSrMso5QqpaSUaZr69SKHlFIpJXpFKV0ul+12G/r4fr9HeV1nQONaMPJKKaUUBHp8K7TWELgR6xZFAQm4KIpGjNZaIxaldUxLKdVaj8djHKqqCjeFMYYb1HzfVqsV2sGXAXWTJEFWeNPyfr/H8pKEkPV6fXNzA2MNx3HwEd7Nw+EwjmMp5e3tLaXUdd0333xzNpudnp7e3NyEYei6LjxAzs7OptOpbduTyeSTTz6xbVsp9dd//deffPIJMRgMBoPhNxW80CX1a2/sxLTblNG1sowCh0cxUzd7DstQSpVSz54963Q68LMqimK9XnPO8UYZxf7tHJ+VnrGNj5i7GWNKKdhVGwyvkVcSoA0Gg8FgMBgMr8ijR4/Oz89Ho9HJyclwOFwsFvfv39/tdlhubrvdIvF5MBjM5/Oqqk5OTi4vL+GGAQsOz/NgvoEl/uCeAWlvOBz+7Gc/QyYvNEpCiKztkimliBw4567rvnjx4uHDh47jZFlmWZbv+2VZXlxcPH78eL/fI6G4qqqyLFut1mQykbWHIOe8qio05XkepRTG0JZlOY4TRZGUEmov5EjG2HA4ZLWC6TiObduNEgrQtziOkeI6HA6vrq4a4TIMwziOq6rCGOJjURStVosQ0u125/N5I7AqpaD5itoCWwiBNF7EToPBYL/fM8Yg7JJ6Nb+yLKHzCiEgrENx7vf7MDpsEpAppXnt4Nzv958/f44xRF3Ix8fHx9PpNE1TyKOU0sVi0SjdhBDk83Y6HcYYRPkmVxpDBFzX3e12uHbGWKvVWiwWx8fHlFLOuV3nU1uWhfJa69lshta01kiFboJMq4bUP6rFGQkhkLZd1y2KohHZcchxHMdxVO1lgWHEx6IohsMhLlwpRQ/WLAJJknQ6Hbi72LYdx3Gn00Gz6BLeBFRVlec5VPgoina7Hf4FcO+klIPBYDqdHh8fp2laFAX8neM4fvTo0XK5LIoCa1pCQH/27BnyvL7//e8/ffr02bNnTX8MBoPBYPgNJE1TQgitX/xjogT6QBEGeGrCzqZkU0bVqzuQOmOAEBLHcZ7nSinHcTDh4sEMc3qzjRYOzw5wRjyzKaVgrWYwvEZ+NQGaUorv4ssHDAaDwWAwGP538/jx4zfeeOPtt9/2PE9rfXx8DN9hxlhZlrDFGI/HcRzPZjNCiBACK4xrrU9PT5Hm3Ov1rq6ugiA4OjpKkoQxFoah4zir1SoIgjRNV6vVyckJJEUIlIyxm5sbSimMGoQQCCqEEPv9HjnUsMgQQsAE4+OPP37rrbcsy9rtdufn5/v9HnmpSqmqqvb7PQRHQohSSikFYbqJXq6urhpFslGQm1jI87wgCFCyiXYopSiDLFpUgSiplMLjZafTKcsSYjoUZOiknPPBYJCmqW3bVVVBUI6iKI7jsiw9z2OM9Xo95IYjq0hrDSEYOjVjLAiCqqqyLIOe3u12Ly8vB4MB/DHm8zl0Xt/3IZqjNRTWtdMxtnF/wzAUQgwGA8ZYnud27SxBCHFdF7q5EMLzPCT/Msa2263WGqoxolBIup7nSSlt21ZKcc632y2SrBljsNeglDLGGlkfgwwtWylFKW1EZ9hba62xvxHrcb9QEncEBehB7CqlxEazX9copTC2eZ73ej2l1Gw2C4IAC5U7jgN/6uFwKKW0LOvy8hKaspQSNxGvOiaTyWq12u/3rVZrvV6fnJxEUUQpxXqVUsrz83NYjuDdjO/78/m82+32+/27uzv8vxRFEQQB5/yDDz64uLj4/ve/TwwGg8Fg+I1nsVhorQkhZVk2T2vNhGtZFvZgJ6UUU3CzgUPYbj5iD6WUMYbHqu12yzk/fNxq2iH1i+HD6s2jGgrjWUtr7XleU8ZgeC28qgBt23YYhpZljcfjxWIRRRG+uAaDwWAwGAz/mzk6Ovryl7/c7/eDIBgOh0VR5HkehiHnvCgKGPD1er0gCLIscxxnvV5HUYQMUErpvXv3NpvNcrkMgmC9XldVNR6PoThHUeS67qeffgofjDiOXdcNw9C27dVqZds2IaQsS8uy+v2+1jrPcwQY0HmbeOPm5uatt966vr4uy/Lhw4f7/X673cZxDH+Jn//85/fv34cyCDtgrfXl5eWTJ0+Q/YolDbMsg08xNNMwDNG+ru0XSB3VIArCBgqoWrwejUZPnz5tgp9er1dVFSTm0Wj04sULtA+RtKoqeWDTjPOK2mRjs9l4ngcXDsuy4MLBOc+yTAhBKXVdd7VadTodCLW4liiKfN+HJN1qtfI8L8vScRy4QldVVRQFBOJOpyOlhFUIIeTu7q7b7eJ1wmw2y+uVBimlRVH4vv9/2XuTWEuSu+w7ppzOkGc+505Vdbu62+2BNm43xsbd9soSQvIOL5Aswc4ICSTYIW9YIm/YWEIIJBYIYSEZJATCssHCwrLdtCd5rK6pq27dW3XPPOVwMiMzI97FQ+Z3utqvv9bbtnA38Syu4kRGZsaQVSfil//zBDg4IWQymeD6lFLf94MgIIQIIbrdbhAEuCClNEkSQGRK6Ww2q7YxRGizEALcGQYmtm1rreM41lpXzBplCCHL5bJer2O16ThOhZhx3zzPHcfZbrec8yzLpJSj0UhrjS7a7XZKKSklILVSan/sMKZ4lpDIS6cU3/cBo9GxFRknZVB8r9fDIViUgICjB6bTqRDiypUry+Xy4OBgs9nUajUYjNi2HYZhs9n0ff/i4sL3/cFgEAQB+hOe0Tdv3pzNZo8ePfrnf/5nYmRkZGRk9BZRlmVxHDuOg29efLeSkiBjyrSfxqHqL4RDSil83eOblxCCKRBmMpiY4Sxazsr2c6o0EtWXPgozxqrJmJHRz1CveaS63S4CGR7Tc889h13C79692+v1rl271mg0Hjx4kKbp40WNjIyMjIyMjP536EMf+tAHP/jBd77znYPBoNFowA642ootjmPEAnPOlVLtdhsQcDgc5nnOGPN9P45jSmmj0cAvJT3Pc10XGNpxHOyzB2aKo47jrFYrgFHHcaqQWABrKaWUstPpLJdLoEZsE8c5Z4whwnc+nwNEFkWxXq+Pj4+FEHAJBFg8ODgA8JVS1mo1FEPi8vLy6OjIdV0QcM/zQNjr9Xq1YqGvFSEETheccxBVSmme57Ztc86jKMqyDK2glCqlgiAAliWEVB9t28ZHoPxerweHhzAMtda1Wg0rq9lsVhRFq9VqNBqocFEUSimAXVSj+kgImc/nFcmllGqtOedpmiKYGp4YqCdWZWmaomStVlNKJUnCGCuKwnGcKIrQdkIIqgQQvFwu0XaUDIIAzZRSWpYVRVFRFLvdzrKs7XabZRn8JRzHQSUxUq7rouvSNK1qyzkXQmD0UQZNwECTvTUkqoT+V0oppRCmjTKo836+UgoPmC5tvnu9HvLxgHmeVxRFkiRBEPR6PVyEUooI/WpdDSodRVGj0YjjeLPZIAi9VqvleY5ewiJ5Op06juP7/nK5tCxrOBzC0yMMQ0II+mSz2dTrda315eVlGIbf+973/uM//gOtMDIyMjJ6g3r++eer/b2M/kf08Y9/vNFo4FuSECKEcModIFCgwsFVDjLJHjjGFzTmA8iklDLG8L62mrdgDoAy1bkoWV0Twh2R1lrneY6J6Oc+97mqjJHRm9cbioD+8Y9/XLHmu3fvvvagkZGRkZGRkdH/Ip2enj711FMIeT44OMjzvFZutQcc2W63tdatVms8HhdF0el0JpMJwGIYhsCvWus0TX3fX6/XiGCtuOfh4eFyuVytVpZlSSmVUlmWIcQVW9JRSpVSeZ4DQTLGVqsV6CEoZ71ez/M8jmMpJaUUUaXdblcIwRjb7XadTgcWya7rAtQSQkajURAEk8kErn/r9brb7VqWVRTFo0ePOp2O7/uvvvrqO97xDkKI1hqcWik1m82Gw6EqnTT2lzSEEEppUTp4IJ1lGRD8YDC4d++elBKMFT0JzFp93O12iBrudrtJkqRpikXRaDSSUjLGADrzPEeQOEKeCSF5niMaOgiCo6Oj2WwmhAjDEHHflFLf9+fzeRzHQggpJXBwt9vNsgxcNcuyKIpwl+126/u+bdtCCK01XDJQz9lshthkgObNZoP+4Zwjzh0RzUopy7KqqGc8MOgQzrnneRi1/XcGs9nMcRyMmlJKCAGOTAjB6UgD3+vS+xsrT1U6bKDb0WR83B8asucdiUNa66IMn8eoVflKqX6/H0VRq9VijKFWRVFUDxuiy9FMQgjKh2E4Go1Wq9Vms+l2u3Ecd7tdGEYj8eSTT47HY9u2EQcthBBCbLfb4+Pji4sLKaXv+9vtdjKZSCkfPnz4+c9/vmqCkZGRkZHRW0jz+Xw4HJKSBeMLF9/CENL7OY99pHs7MVT5+H5XSqVpqrV2HAdvoytVX+Vk7/dq+PjYxTFhqG5hZPQz1BsC0BV9NjIyMjIyMjL636ler/f+97+/3W4rpa5fv66UAspENK7v+4hTRsnxeAwkigDYo6OjMAwHg4Hrunfv3tVaF0URRRFCnpvNJmb5o9FICIGI41arpbWGAa5lWfP53LbtRqMhhIA9MRYJnHPLsqoo4DRNpZRZlsEMJAiCfr+f53lRFJeXl51OB+sNWE8opW7evHn16lVEqvb7fcdxMOsbjUZJkiwWi06nUxQF/CIIIZxzUOA4jieTybve9S6gUtRH7wk5yATjxmImz3ME/uSlUcZut0OYc6/Xe/DgQZZlMJ3o9/sw5ciyDC1FnfM8dxxHaw00DBBMCHEcxy1NOTAKURRxzn3fB1O+d+8exgi3tm3btm3HcWzbppR6ngdmDYaLLup0OvV6nTG23wlaa5BxwOh6va6UajQabmm+jCuDIFdMlhCCgPdqcYi7a61Bk3FfSulms0GkMx4MAOLFYoFM5KBD8GQiR2uNDqd7wVDVIcDianQgFMPQVEeRwJWRQynN94TVcpVO0xSvXvCew7ZtuKNYloUQdQRk4S6EkDzP7TKkPcuyNE2TJInj2LKsZrN5fn7OOW82m48ePUrT9Pj4+M6dOzjlu9/97q1bt+7cuYO6GRkZGRkZveV0//79d73rXdWXNb5S9wtUX+Kvz6zSutyTA2lcDV/ZWbm1BqYo+yeiwGPXqWpSCZMHzNmyMWHcAAAgAElEQVTwYnj/qJHRm9EbsuAghIxGo0ajgXk87DgMlTYyMjIyMjJ62+v4+PhXf/VXn3/++Y985CODweDo6Kjf749GI6WU67qUUsZYp9MJwxBhuev1WgjR7/ezLGu1WoPBYDqdcs4bjUYQBEqpfr+PmN9er1e5cGBVsFqt8jwXQiiliqKglDqOAwLrui4CgTebjed5tVrNsixATNu2i9KcFziYl5vvKaVWq5Vt27jgaDTCKVmWDQYD0EytNdwPdrsdSm632263u9vtdrtdr9fTWud5fnl5ORwOp9PpZDLBbocAplEUBUHQ6XRIGSSLtlSLHK11vV4PwxCLHNDhRqPRbDZd18Vec4jzxVmAmCC2Sil0rG3baNpjphybzcayLIQAB0Gw3W611ggxxhosDEO3tLBQSqGq7XYbDg+U0qIohBCEEISxN5vNer2OiS5jTEoJ9GzbNqW0Xq/bts0YG4/HjUYD0dbo/4oOY7Bc18XpSZIIITCIRVEgbJkxtl6vQW8BsjGOlNLFYmFZVlXn1WqF/kefoKqEkDRNHcchhGAgqnzGGFgzIQSEGmkMdJ7n8GLGSJFyyPDGAg9Jnud4B4A640Z5nqdpGoahUqrT6WRZttvtYMHBGEvTFAbinHOEdW82G4TYW5aVJAnnPM9zpRSWsmigUirPcxhuHBwczOdzxli3251Op1LKXq93cXGR57nruq+++upLL730n//5n8vlEs00MjIyMvp/kLHg+B/XZrP5jd/4DXBnSikhBN/4VYEq//WqDmG+R0tVBWAIhokNJgn7QsmqPBLAzY9lpmmKt8u3bt26uLj47/ONjN603lAE9MHBASIRCCHXr1/HzN7zvPF4/HhRIyMjIyMjI6O3vk5OTp566qmjoyPsBCiEODo6iuO40+ms12tANMZYr9cLw5AxNhgMgNhOT0/v3buntQbiVEpdvXp1tVohgpVSmud5p9OxbVsIcfv27ZOTE8uyFouFEGI0GnHO5/M56kDLIFalFGMMZsFVEDSWK7r8TaXWuiiKwWBACAEuHA6HaZpeXFx0u13kwD8a0PPy8vL69euWZQVB0O12h8NhEAStVotzrrW+f/8+diP80Y9+9MQTT4CZKqVarRalFMA0z3OkhRA/+MEP3v3ud4NpAlwizRgDG5VSEkKSJCHlAkkIwTlvtVqgnGgF6LOUEgunNE1d10XgM67caDTSNJVSCiFgcAGPDtu2+/1+HMecc9d1HcdhjE0mE9d1AdaLotjtdk4ZcG1ZVrvdfvDgASpZNSqOY4DgZrN5cXHR6/WazSZjbD6fV8wXIDWKIlIqCALUHzQWTeOcdzqdIAgAjimlALug51WaUoqhRM9zzvFoUUqn0ymqWl28WiJWK088AI8lkMZZVUKVoc0AzVheDodD+MNYloXHLM/zXq8HEo2gb8uyLMvCHeHjLKXcbrf1ej2O4yiKYHSOK9PSe1qXJpK9Xm8+n3c6nfF4LKXsdrtwKYmiqN1uB0Gw2Ww454eHhw8fPoSL9HK5LIqi0+ncuXNnNpvdunXr/v37aIiRkZGRkdFbVzdv3szznJVOzdVXc/U9Du1/fOxQlVlNCcjeL5aQyX6SyzO+oKvCyNw/WhWudO3atZdeeunxXCOj/1e9IQDd6/Vu3LihlHIcx3XdH/zgB0KId73rXQZAGxkZGRkZGb2ddHJy8swzz1y9ehUOGK1WCxGdwIVhGBZFAY7carWiKArDsNVqXVxc+L4P4lwUxdWrVzebDTZScxxntVo5juP7/mq1wpLDsqzlcuk4zmAwkFLmed5sNm3bxm5sjUaDc16xPM55URSE/HesKxYPBwcHj61GEKubZRkhRAhRBdf0+/3pdKq1Bjfc7XZPPvnkdrullO52O1w5yzLc6MaNG6enp4wxpRTgqdbadV2QRCEEAn5ns1mv1wMHhx2wEEJrXRRFkiS9Xg8LqsvLS855kiRa61qtZpeOE4SQNE1R1V6vd3Z2hpBkSmlRFIvFAn4mjDEpJXA87t7tdh88eKC1dhzHcRxKKcJs4YWitW40GqvVSkoJaOv7/mKxaDabjuMIIeI4hl214zicc/Q257zZbIL5rtdrGJ4wxmazmeu6CAYnhFiWpbW2bdu2bfQJ57xWqwHLokqWZVFKwfTR+Yyx7XZbDeVms5FSYgRBdVFP+KugRRh9XJZzzhhDea010kVRzOdzz/Mw7sisVrAYhYrX671IZwxi9VEpNRgMiqKAubaUEufudjutdbfbxZuD3W5XAeswDLMs63a7GFA4j+OCnHOgajz2eK4Gg8F2u2WM7Xa7Wq2G0Gm8p3FdV2sNt5nT09NHjx7h6QrDsNFoHBwc3L59O47jR48effWrX0VLjYyMjIyM3urSWuPlOiGEUqqUwjc4Ywzf4wDBmFrglCqNQ2TvJTQ+Qphs4ChmqlUOKWcRSOMQMquc6i/Z2xyi2Wwix8joZ6I3BKA553hAm83mer0mhOR5/nghIyMjIyMjI6O3rH791399NBoheBa2wq7rCiGCIBgOh7PZLM9zz/N2ux3QM+wapJRSymvXrq1WK+TUarUgCGq1GjZYo5S2Wi3Lslarle/7juMsFouiKECcQVexKqCUNhoNy7KEEMvlcn8pQn/SOuT1UkohfBUw8fLyst/vA5j6vp/nOQJ11+t1lmVaayllr9cLguDy8vLatWs43XEckErbtj3Pk1KCCNu2fevWrSeeeMKyrO12C7tqLIGKMvJaa61LAKq1HgwG1QpnNpsBlTLGstL5F0edMiqZc57neb1eB0xnjMHJervdpmlagU5KKVgnpdS2bSEEAniRcMuQZ855lmW1Wi1JEuDabrcbRdFut4PJRq/Xu3//Pq5GCGGMVQYRlNJ6vY4I6DRNOeetVuvBgwcA6EVRgJOu1+vRaDSbzcIwRDMZY81mk1LqlD99tW0bF0Tatm2kwWpPTk601pzz6tUChhvnVvl6z9+ZMYZOQBlSdnu1zsRHPFFFUUgpYZDiOE6tVsPQAASDBRdFkWUZ3rjgRrvdLssywGggeM65bduu66L+Yk+NRiMIAiHEdDo9PDyEHY3v+9PpdLPZoMJPPPHEcrnEoOMFwGQyAfjebDZJkiD8mTE2Go1u3LgRx/HFxcVXvvKVqo1GRkZGRkZvA2mtx+PxE088gW9qXW78+/pipJzy4at5vwwmDFA1H6heP2MKVM0KaEmiqyvsXwoXrz4SQmj5w6yiKI6OjvYPGRm9Sb0hAK2UsiwLIQ/T6ZQQ0mw2U+MBbWRkZGRkZPQW1/ve976rV68OBoNOp7Pb7a5evRrHseM4w+FwtVq1Wq0kSZbLJThjq9VCmHO73XZd9/79+2DKm82m0Wh4nrdcLqWUwLWvvvoq4lun0ymIZ5IkWZYBPXPO1+s1NrjD6gKMbzqdgvxi9o+FgRCiKApElaI81hJYM2RZlmXZcDiUUqZpighWrXVRFEEQAC8uFounn36aEGJZ1m63Gw6H2+0WoBwrFjDHPM9fffXVa9euZVl2dnZWr9ezLIPZAqixEAKVR++laXp0dGTb9m63Q22xjsLfSpTSfr+/XC6xqoGPB2KiCSGTyYRSKqV0HGcwGJydnSmlsiwTQmit0zT1PK9WqzmOwxgDyEZNtNaIjfB9H4HqjuPM53PLskBLETFNKQU2ZYxZlhUEAbYN1FojRAhvHRhjQRC0223HcYQQVT7w8Ww2w2XBoF3XjeMYbyDa7TYhxHEcNGc+n1dW0bS0hMYF8zyv1Wp4KnBBdCNjDFiZEDKdTitrDs/zhBCMsaIo0HUYcbQFaQjNoXv+G1prpRTeJeBjnudJkiRJMhqNlstlkiSccxSOoqgoCrwwcF23KApUBmkpZZ7ncRwLIebzebPZ1GUg1YMHD05PTxljYRhKKdfr9XA4LIqiVqt5npdlWb/f32w2Sqn1et3r9fCoUEqPj48vLy/xNEophRCe573yyiuMsclk8q//+q9V04yMjIyMjN5OevDgwT6AzrKs+lVTJXw7V1/r+9/4yH99uiiKPM8x1aGUFkVhWVY1N2CMKaXw3Y2L49yKU1eq5hhZll27dm3/kJHRm9Qb2oRQKXX16lX8gPH8/Lzdbj/xxBP379/HjNbIyMjIyMjI6K2lq1evfvCDH/zQhz509erVXq8HI+Z3vOMdaZoeHh6uVivP86rt0eBLOxgMLi8vCSH1eh2+Cr1eDx64AI6bzQa8EnsJtttt3/fTNAW2DoIANDDLMiklSB/gNeAyCG8Vt1IUBSm5ISKCgWWzLAPeRT7gIK6GZQyQqG3btm2DAIIaw+5ASpllGShqURRVkO9iscDWggiYlVKCVIJ+djodgEiYbwBlep5XJXa73eXl5fHxMZAlVkFZluV5jprneY4q2bYN+mlZVgXZgyCoUC+lNAzDigJrrTebjb23LSH6GUexP7ZSCuci3tm2bbwDEEJEUWTbdrvdbjabnuctFgvHceCsgqpiG0POOaqdlnsPop+zLAOldV13NpuhWJIkQgg8CTg3yzLP86p0kiSktBnBO4AkSRCmbZe7KSZJgiFDzyAT+eglUoJprAyrBaFSCr1Hyp8ksvIdBoqhAu12G+OOscbDI6WEQQo6B7H5aEuSJI7jJEkShuF6vVZK7Xa73W7nOI7jOEopYOIkSXzfR0d5noe3NbZtK6VarVYYhq7r1mq13W7neV4QBIyxPM+FEP1+f7vd7nY7tBH9E4bhaDSaTCaz2SyKojRN/+3f/u073/kOMTIyMjL6OchsQviLoCzLPvaxjxXlXI5z7jhORYH163AzKSnzT8nP8zxNU8xh8HM6/VrPjdecRgillFKKe+0fxVwC8xZCiG3bn//856ujRkZvUm8oAno+n+92O9d1z87OCCFBEPzwhz8siuLxckZGRkZGRkZGv8Cq1+vPPvvs0dHR6ekp55xz3u/3V6sV6NjDhw+Pjo7yPH/mmWcuLy8fPnwI2Hd8fPzqq6+22+2TkxPXdTebTbvdjqIIsa6dTgfv72ElvF6vEeMM1gk2DXcOkD5CCMgg53yxWNRqNSGEUoqQ//7ZoyqjWbE2yPO8WkUopZRSAJEgpP1+HyQ6DENdhh6Px+Nut5vneZ7niE0GfIzjWClVFMX9+/effPJJMGLbtl3XTdPUcRywS0S5WpYFzw3btu/cuXNycoKGAFmu1+tut8sYWy6X7XYb9QexTZKk1WqhFUCfSKxWKyDXwWBw+/Ztx3EwBFLKer2epik+RlHkui74bFEU9Xo9CAJMRIUQ7XYbGw+6rksI6XQ6FxcXjDHXdbMsI4REUSSEQGg5sCmlNEkSEF7f9zGtFUIwxrIsQ6yu4zi4COccodmU0tlshu0ieWkYjTQhZL1eNxoNFKOUbrdbUnpf1Ot1DLFSilJqWRbWgZTS5XLped7BwQEKoA5Y+O0nhBC4LJ4WsrfC1FoTQtDzhJDJZOI4DiEERLjf7xNCXNet1+vVyjZNUykl3qagHwghWusoivI87/V6lFK73FKSc25ZFlqK02ezGSHE930hRBRFWuuiKKSUaZoKIXB9xhisS5IkieOYEIK9HIMg4JwPBgP4b2AfwjzP8c9ECHF0dHT37l3w6xs3bnzjG9/A6UZGRkZGRm9X/dd//RemB2ma0tIGuoLFmDBU6ccS1UdMCaqJQVEURVHQ8sdejLH9C7Lyp1T718FHFNvPx2UZY0opTGmMjH5WekMAmhASRVFUbvZt0LORkZGRkZHRW04f+tCHjo6Oer0e0GGWZbAYbrfbSZIcHR3N5/MgCCilcRz3er0wDOv1epIkq9Xq5OTE87w7d+40m816vQ6S2Ol0HMdZlpsNLhYLz/OAbheLBTLB2pCGfQSm9YwxIFEppZQSCwaQR17uvUHKuFeyF+WKsJThcHh5eQnAigJKqU6no5RKkmS321llNHEcx+DCeZ43Go28DEmOoijLMlwBePo73/nO4eFhkiSTyeS9730vKCT4KYTmCCGwtqGUCiEANHEvrH/yPMdqCkK1CSFgwSjs+36SJJZl2bbd7XbPz8+73S4oZ5Zli8Wi3+8DCs9msziOB4OB67r4aNu2EAKwmFKKKqG2jDEE3gJnK6UQVYQO4ZzHcey6LvaKPDg4sCwLWJxzzjkHV0XUD2Os2WwiwloI0Wg0tNZCiFqtprVO0xTXBIOu1+uu61qWpZRaLpd4qYAJsxDC8zyMeJZl6C7gbPQhRhZ/cV90Jt0LTUICjwQ6EAlYb6NAvV5HV+d5niRJmqYHBwewv0DQOqLCfd9XZSg9KsMYE0Lg2aPlzodSSrzboJTO5/PqycmybLvdNhqNWq2GQHXssTkYDJIkaTabYRh6ntfv98MwPDk5GY/HWuterzeZTFzXxZsDPPN5np+fn3POb9269corr9y8ebNqoJGRkZGR0dtYeE1Lyl8yqXLW99OFr3vyuihpfPUjE2/rUbiaP2CagZLVWeS1XtJQVQAJXMrI6Gcl8zwZGRkZGRkZvc31gQ984PDw0PO8w8PDg4ODxWJxfHwcxzGltNVqBUFACFksFtjrotPpNBqNs7OzbrdrWVaSJFgeLBYL+B0jAtpxHCHEZrNpNpuu62KnPmSuVqtGo+G67nw+RziqEIIxdnh4SClljE2nU2BTUr7XBwqUUiJNKQUmRnmlFOpASkCplOr3+1LK3W6ntQbqHY/HvV4PCPji4mIwGGDV0el08jwPw/D+/fuNRgP4D7/QJITcv38fOxASQtrtttY6SRIhBKAz5xxV5Zzbtg3Ii0wk9utDyihgkOiiKFA31LzT6SD6mBCy3W5brRZALVyPpZToCmwVCG8Hy7K63W4cxyCqjuP0+/2zs7OiKNI0BZIGyqyIs5TSdV3HcVzXpZQCZ4NfU0rxnmAwGHS7XeDUer0exzHWYJ7nEUJs2wa2BlzGXZRSGG7U37IsrXU1iJxzcGoQWyBdy7Lm83mtVmOlLMtyXTdNU7hgo+vo3hqyAtP4+NhRrAarv+hblEEPpGk6Go0AnYfDoVIqTdNms7nZbAghRVHEcYz3LkIIPKgYozzPoyhqNpvVBSmlgNRZliVJslgsEAwO7gzWrJRyXXc6nYZhSCndbreO42w2m81mg50wXdft9/u73S5N0+Pj48lkcnBwcPfu3X6/zzmfTqeXl5egz1UbjYyMjIyM3vY6Ozt75plnkMa0zSr3K97/3v/p2i+JeSAt90DG1AsTBiQwtahyfopwWUwz3nhljIzeiN6QB7SRkZGRkZGR0VtRv/Irv/KRj3zk6Oio3++fnJw4jgNQSCkdjUbYZhBRnL7vSyl7vR4ii+HvjG36pJSUUpgIB0HQaDTq9fp6vSaEwMAB9sq1Wm02mxVF4Xme53kwkga+nM1mYRg2Go3ZbAZfXQBcrAdQH+BI/MWkn5b7zimlENbabrc55+v1ulpgFEXR7XZd1wULBi4HjiSEgNvO5/PhcIiGwx5aaz2ZTGAHXBQFQqdxluu6WZbdu3fPsqwoihaLRa/Xy7Ls4cOHrVYrTVMweiklXBfg89vtdrMsWy6XrusitBlAk3O+WCwQlC2lrNVqvu8jmlhKiQhr3/cXi0We5wC+RVHMZrMsy+y93Q632y2qDU4dhiFQOJZtsJAWQmiti6JgJS+meybRYMq1Wi1JErXnGT2bzXzfbzabtVoNLxJA23HTer1eq9Vs2wYor9VqGDiMOEKbabmDIu6bpmkcx3meo39AoimleOuANtI92419xXFc3b1iwdUiEAksMrXWWZalaRqGoe/7y+WyKIrhcKi1BiVnjKVpallWHMfVK5A8zx3HqdVqlFI8J57n4cpA1Xge4jiWUvq+TwihlAL3V7Yq+EfBy80ki6LY7XbdbpcQEgQBVhNa6263u9vtKKVJkiBKutFoXFxcdLtdvAu5uLj4whe+MJ/PMcRGRkZGRj9vGQ/oXxB1Op3nnntOSomvXcuyMLvA1/3jpUvh+7pKYw4JZeX+Ir7va63zPH/9dR47ff8jKX9ph0OYCSilXNf9u7/7u6qMkdGblImANjIyMjIyMnob6j3vec+TTz6JOFDXdU9PTxeLxZUrV7bb7eHhYRiGi8Xi5OQEu+qBThJCZrPZaDSq1WqYeXc6HQA+z/NAV5vNpuM4y+USrgtCiMVi4TiO67qz2QwQ07ZtGDsAkhJCDg4OsFTgnANBZlmmtWaMMcaklFhy0NIKEIW11rqMclVKkTKkBUHBWLQopcIwRDOVUmmaIjIaILjf7ydJkiQJQC1g9PXr1z3Py7IsCAIEF9+5c+f09BThq8DNSinsNbdarYBEq7/r9RrVRhuzLJvP551OJ8uyOI5PTk4IIUEQ9Pt9VK/f76N14/EYLBhrG1QJbWy1WlmWOeV2dpZlcc5d18W4YBNCx3FAS2ezmeu6OEoIcUqhYy3LCsNQSskYG41GlmVhm8E4jtGrQRAIITzPw609zwvDEDVkjOFGuK/runCUxrIQbyBc1yWEAJczxlCfWq0GwmvbNkA5Y4wQQimFazZeACATwnAjPR6PDw4OMNZpmrbbbUrpYrGwSwSPqqLf0jRNy0hnrfVgMKiGHlbUCMmvrDaKohDlT2h5+T6jKLe4xEOSZRml9Ozs7PT0lBCCp329Xvu+zzn3PG8+ny8Wi3a7bVmW67pxHHc6nSRJttstnrTdboeY+uVySSnlnK/X61arNZ1OW63Wer2+d++e53mc8xs3bqxWq6985SthGKJWRkZGRkZG/6v0/e9/f7vd4i01ZkRKqeql++uFOcB+Dj5W+ZghaK1JGQ2Nb3zkQEhXp+x/xDQDxR47Wp1uZPTmZQC0kZGRkZGR0dtNH/vYx65cueI4juu6o9EIgSGO42w2m36/v1wu4cBw9+7d0Wi03W5HoxGiVjudTq1WOz8/b7Va/X4fdree5y0WC8Blx3EWiwUS8/kct7Btez6fe57neZ5lWavVCiASpI9Sats2IWQ2mwFTVqsFkNxqUcEYw7IB6aIopJRSyuFwmOd5lmUIKcW5RVEAHSLiGAG/WmtQy9lsJqU8PDzEldFkXEQIgV1lgBE552EYqjICGjWvZFkWeCL4KWOMc661Lopit9tVkDorJaUE9EQkDinhKSEEm9FJKS3LQt9KKfM856X5MlqntW40GqvVKkkSLJ8ajQbCqFEN+BEDKGut4zi2bTtJEsdxhBDdbjdJEiFEq9XK8xywnjGG4GVKaa1WY4xVlBn1bLVa6MB79+6hPwkhYRimaYrmM8Ysy1qv1yDsQojtdqu1FkL4vg8vC/TMZrNB96K7ML7VEKNDkED/EEIGgwF6aV8Y+tFoNJvNKKVKqdFoNB6PKaV434BOhms5BiLLMqWU1hoG38PhkBAihFgul9hskHO+XC5brZbeW1LiqcDTBcps2zbge7vdRl8B0/d6Pdu2a7UaeLTrusPhEDsNpmmqyjBzy7Km0yn2gex2u+fn5ycnJzdv3kySJE3TyWTypS99qbq7kZGRkZHR/zb9+Mc/RlREXu7MUU0J9OtYc5VZfXdXBfZzcB1MO5G/f0p12f3E/t/qFJQnP8kh2sjoTcoAaCMjIyMjI6O3j1544YV3vvOdXumNcHp6OpvNTk5ONpvNycnJfD4viuLk5ARWAEIITMQZY0CWQM+DwcC2bTgwuK67WCw8z3McBwGeoM9VHC4y6/U6AjyXy6Vt26CZIJKMMa01IkOLosDaAJmY7mOKj+UHpv5YQqBFutxbRimVpilcnheLhdYa1E9K+eDBg263i6tdXl72+/1er7fb7S4uLjqdTpqmURSNRiNEEI/H41arJaWM47jf73uel2UZIltd171169bJyck+Rr9169bR0REhJI5jxNWiSp1OB7XKssx1XWyCF8fxdrsFGc/zHDXHWOAoziWERFEE/xNKqRBCKRXHMdobRZFlWYg+ZowBLluWZds2uiuKon6/77ouji6XS6WU67p5ntPSyKJWqzmOwxhzXXe9XgPUYlDCMKzVamgI4DteJDDGGuXWkYwxWFXgxQNjbLFYNJtNKSWlFJvpoZhSSgiBEddaV6QeDUf+bDYDTK96A2lSLvYw9Pt/CSGj0UiXpiJVRLmUEuwbTiZZlhFCgI9938fpWbnBICFElJ4euFdRFNvtFpYjSimwaSGE53n1ej0Igl6vZ1lWvV6fz+fr9brdbhNC6vV6URSr1arb7dq27TgObE8YY47jzOfzNE273S5jzPf96XTaaDSqB77T6fzoRz8qiuLhw4cGPRsZGRkZGSmlPv/5z//RH/2RZVlKKcwZMCvYl96D0fuJav4AoSSml3meW5aFAtXp1cfXX6f6WGVSSjHxIOXeJEZGPysZAG1kZGRkZGT0llej0Xjuuec6nQ68nsMwvHr16nK5dByn2Wxut1tKKaKA4zherVadTgd+EdvttiiK8XiMLfJ6vZ7neavVqlargWAi0rNWq81mM6cU4p2BIGEH7LrubDZDyDDZ207Qtu3JZCKEsG2bcw7oyTkHcgVqZIyBLSJqVe9t3IfQ1CiK8FFrfXl5ORgM+v1+mqZhGALy5nkOHJkkiVKKc+66Lu7lOA4SYO5SStBby7IcxxFCoEWvvvrq8fGxbdvz+dyyLLTr8PBQCLHdbsFMF4vF8fFxnuew+l0sFlEUAVzmeS6lDMNwMBgIIaIo8n0fXUH2VjtZlgHlI0dKmWWZEKLX6927d6/dbiN2OEmSxWLRaDRAfh3H2Ww2iC7nnEspbduGlTbnvNPp7HY7NAQF0C7Qc8ZYGIYYQfB3OIe4rttoNLBUy/MchSmluDKug/BwXJYQYlmWXb5aQBeJMp69KqOUEkJUabyuQNs552DT1QoTnYDln9YaidFohKNaa7yE6PV6FxcXtm1rreFJnee5Ugp93mq1UL4oCty3KArbtpvNZhzHWZaFYWiXwfh5nrdaraLceLAoiqIoNpuN7/uu6/q+v1gswjAEnvZ9H2PdaDQQ9bzb7YCn8XTtdruiKGq1GpqW5znqPGTXmyoAACAASURBVJ1Oj46OBoPBZDJhjEVRNJvN/umf/gn1NDIyMjL6xdenPvWpj3/843/6p3/6jW984/FjRj8LfelLX/rt3/5tRBXw0m4L2Pf1qiYMSFeJan5VzSIwB3j9iftnVYf28/cz89JC2phlGf3fhP8ikJZS3rt376/+6q9u3br12lKPywBoIyMjIyMjo7e2PvzhD5+cnAyHw16vlyTJbDa7fv16FEW1Wi2O49FoNJ1O2+12s9k8OzsDpA7DkFIKvIhicRwj3rnaXw4Q1vM8z/NAnMEZERDteR7nHGDUcZzpdAoeKsqoakqpEGIymXDOtdZpmpJy3zlE0WJyDwxNCNFaK6XG4/FwOJxMJkqpwWCABhZFgXBjKWWe54gUBoKEsUaSJGDoiDs+OzsbDAZSSsQ+oyS4KkQpdRwnTdO7d+8++eSTlYmHZVmAtmgIMC4OidL92bKszWYDBPnw4cMrV65QSgHKEVgNAamTcqlDS84OMcayLINXhuu6RVG4rovAZyFEr9eL4xiODYSQWq0GQIyaoyai3ElysVgEQWDbNvofTQuCABYQWmvP81arFT4KIer1ulIK7F5rvdlslFKu63LOAVVhu4HBCsMQ48I53+12WOAxxnzf32w21SDiwcC6MUkSxphSipVC81G3/Q6ploL7az9SPgm+78PTmXPe6XQqvJumKXb/Q5UwEEVR4Mns9/u448XFBTYVxGOz3W673S7nPAgCXAf8utlsVoOS53mz2UzT1HVdPDxKKViawItGKbVarUCrZ7NZs9nEpXq93ng8Xi6XJycnDx48OD4+bjQad+7cybLs0aNHX/ziF/HEGhkZGRm9JcQYe+GFFyaTyQsvvGAA9M9Pf/3Xf/3pT39aSqn2dvvAJKFKVB8rVXOG/QQtXd201phvQDhUFcYc5rFZx/4ttNa6/OGdbduvvPLKXkEjo9foa1/72p//+Z8TQhqNxu/8zu/88R//8e/+7u9mWfZ4uT0ZAG1kZGRkZGT0VtVzzz13/fr1brdLCLl27dpmswF6Xi6Xh4eH6/UaSPrw8PDi4qIoCnhxLJdL3/fb7TaibgkhRVFgd8HNZvMYfXYcB8TZLbcc9DwPAdEIpLVtezqdIt4E7JUxZts25xz5ANCY9CPIBVgQa4Asy4qigNkC1h5KqV6vl6ZpHMeqFIw1itIVGnyZUrpcLsGpKaVgskjbto2cOI7hzpFlGefcdV1w5+vXr4NBc85BpRljANDz+bxWq6GxwL6MMSEE6DDnPAxDuEtvNpvDw8MkScAlgyAIgqDVatm2ned51UxS2ozAgJtSyjlH6C7QLeoJRw70WxRFg8EAXFhr7bpuHMcIbfZ9PwxDznmz2cyyrN1uY19BDAdjDEHrqDnn/OHDh/joOA6GZrlc9vt93/cZY0qprNz/kBCCO9brdcuyCCG4NQ6hc3DNyWSCsPeiKLTWiKrGSg/FsPxL0xS3wLiAy1dLRFVCagwxWgR7jf3nRGstpfR9nxCCEcyyDKeHYYiHAWONuqHPKaW9Xk8pJaVcr9eoG1qU53kYhrgg3Ev6/T4hBGHOhBA8UfV6fTabrdfrTqeDJy3P8+Pj4+12i1cC2+3W930ppeu6eAvy6NEj13Xv3bvHGHv11Ve/+MUvEiMjIyOjt5re8573OI7zF3/xF3/4h39o27Y0Pgw/H339619P01QIUZTv7PFNvQ+O8Rf5j6nKRAJzD+RUCVxn/yLVfGBfKEDKV+AA0JZlff/7339tQSOj/09ZlgVBQAgJguBzn/vcZz/72ePj4/v37z9ebk8GQBsZGRkZGRm99XR8fPzLv/zLg8HAcZyrV68icLXVagE3g+VFUQRXje12e3h4GEXRdDr1fb9Wq63Xa0JIo9FAQCgimtfrdWX67LpurVZzHGc+n4NsIvAWeBqIFjhyNpsBodLS9FkIAd6qtW6325j6T6fT4XAI8jgejwkh2Eouz/PJZAI6/Bi0LcrAZ8QCAy+i+dj1TpU70eV5HsfxdDp9xzveYVmWUgrUGAsJy7Icx5FSIt4ZsNWyLFSblDbBd+7cOT4+tiwrCIJmsymEWK1WcOG4fft2p9MBMgbcJOUaBk2Yz+f1ej1N0/V6jaBaKWWSJIPBAIwVp0wmE8/zbNtmjI3H4ziO0TmUUsTepmnqOM5gMEBMulWaeARB0O/3K7Br2zZ4q1XGREdR5DhOkiSEEMuyAEmxxHIchxASRRHnnDEG747tdtvr9dB24GNCiNY6DMNGowH7kel06rpu1UuWZVW/RW00GqIUpRQh5Hmec87X67Vt2xhE1JZSigElhFQfsdSkr11YYky11pRSpRTeHGitpZRZllFK8zyPoggDgcJ45gGj0bGccwzobDbrdru4HS1dPkDM0VLP82DWXFFm3/e11qvVqtVqua7reR46yrIs13Wn0ymedlwTnVOv1x89etRoNB49egSvmPF4bAw3jIyMjN66euGFF15++eVvfetbWuvnn3/eBEH/nCSlnM1mx8fHaZoW5eaBmCdgeoDJwGNn4dBjOVprzGDxu7dqukjKTSaqi1ekG9epEpUwSSCECCG++tWv7h8yMvq/Cc/M/oP3E2UAtJGRkZGRkdFbSaenp88999zBwQEo3pNPPlkUBYhqr9ebzWaXl5e9Xu/Ro0ftdrvRaACKWZbFGOt2u67rrlYrBHfkeQ4/AaeMfUYQKOKdLcsCiQZ9hjG067qz2QwUlXM+nU4ZY1mWSSkB/oDnCCFaa0SSTiYTy7I45/P5XJdcEuwYLep0OlEUYepGCFFKwaN5u90i7CjP8yzLzs/PEetKKc3zHDbQSZIg0BvrDcRTZ1l2dnbW6/WyLNvtdnmew/0ZvWTbtm3bk8nE930hxM2bN09PT3kZB23b9mazAcG0LEtrnaZprVbr9Xrz+fwHP/jB008/DQqP7mo0GlLK1WqF+jPGiqJAxQaDATKr5c1oNJrNZqgqWGcUReiH7XarlHJKe2hQY4SiE0JWqxUoPBirlFJKWavVUDhJEtu24Y+M7kVjYbshpcT+ga7rcs49z5tOp4PBAOHJaZriAXBdl1K62+3iOHZddz6fc87hHo4mxHGMtw5YmyFAHg20LMtxHM/zCCFpmlbLvIr8UkrRt7hanue4OymltcYSEQmllJQSvYEm+L6PZ0Zrvd1uB4MB0qvVqt/v53luWdZ6vcavAXAvXm6HyBjL8zwIAt/3fd9fLBZJkvT7fbwkaDQaeGbwUSk1Go3guVGr1YqiwONq2zbaiFc1gPhZli0Wi3q9/r3vfY9zfvv27a997WtVo4yMjIyM3nJijH34wx/+7Gc/m2XZt7/9bePC8fOTUurLX/7yJz/5Scuy8NWPTMz0Hi9dgmZMPKoC+IhpG761i6LA9KM6Zf/0x9I/sRillHOepulisUCmkdHrJYTA3i31ev23fuu31uv1+fn544VeKwOgjYyMjIyMjN4aajQaH/3oR1ut1uHhoVLqiSeekFJWhgD9fn86ncIHebVajUajs7OzVqvl+z4inev1+nq9xu527XYbvsCu6wK5gqztxzuvVitwydlsBtiKtOM4mOhPp1MkUD3GmBBCay2lxNwd5K6ycq7m90opWBbgLBQmhGitEdaKUOh2ux3HcafTqdCw53lYmRBChBA4RWtt2zYhhHO+Xq8HgwHnHJNCKWVRFJvN5uDgwN7zUOacbzabdruNkvhbJRzH4ZwzxrIsazabUkpQY6UUfBsIITdu3AANR4uKothut81mk3MOuopW69JXpGqX53mUUsaYZVmPHj3C6ICQgho7jqO1xmsD27bBlOv1Ol4GoG+tcrNBtAhN2+12qH+SJK7rwvNaCCGl9DwPBtOEEM/zUN4qrUUIIUEQ5HmOO2qtG40Gji6XS5RUSuEszjmldDqdep4nhAA9x3aXWmt0L6UUw4TOxBDr0lhDay2EwDV1uW8kxlFr3ev1tNZFUWDscE2klVJZluV5jj2LsixL0xQOJAhYRg/j+Uc1cF9UUkoJhl6v1+fz+XK57HQ6zWZzsVgopbDjYq1Wm81muGCSJJ7nLZfLOI43mw2cVXDlRqNxcXExGAxu37692WwIIY8ePfryl7+MZ8PIyMjI6K2rZ5991rKs7373u4SQl1566fd///dt48Lxc9Pf//3ff+ITn6j2Y8AEr8LH+9KldRsmBvv5hBDMVRzHUUohjOD1ZTAlQA4mKlWB6hBmDpRSzrnZgdDop+vFF1988cUXCSFFUZyfn3/mM5/5//2PwgBoIyMjIyMjo1901ev1D37wg6enp/1+n1LKGLt27dp0Oj06OqKUdrtdANCjo6OzszPYOmutB4NBs9mEWwVwJGMMRsObzaZer3ueZ1kW6DOCXr1yp8HlconAWLhtgFqCPiO2VCmFylBKqx0IsTbAyoHusT+QR3DGav0AVjgajVS59yAykyRZrVbD4ZBSSgiBSQhaNB6PEeyslLq8vBwMBkqpoijSNNVaSynRTFx/sVgAXtu2zRizbbter5+fnzcajTRN4zgGdBZC4KgQApDXsqzbt2+PRiPkgOqC1S4WC8SeB0EwGo0YY6C9tVptMBgEQRDH8XA4rNVqqAPialFPpZQQAvHgaI5t25X5Ay4O/ktKEAzmSwjZbDa2bbuu63keY0wptVqtwLKVUq7rImIXQ2Pb9nw+Hw6HgL+WZYHLY4wopa7rRlGEJjebzfPz8263W6vVOOeI57VK243tdlsNnOd54NRIY6WHpxFRz0opfCSE0PINBO5YSZc/gEUz0Ut4yCmloMxa6zzP8RKCEFIURZIkeF0hpQyCoCgKbDZYFMVqtYJjOAg7pTTP88FgsC59n4GqcQuEkCdJAsOTzWbj+369Xp9OpzCrQQ+HYYj0druFcc2VK1eSJGGMZVkGZ/Nut/vDH/5ws9ncuHHDeEQaGRkZvW304osv1uv1f/zHf6xyjAvHz09Syr/5m7/51Kc+he90Xf4cCpMfUqJhWv4YqzpxP03KLSLwNY35QzXb3C9WqcqvZiPVRyml1tq2bRP+bPTT9ZWvfOXP/uzPHs/9qTIA2sjIyMjIyOgXV4eHhx/4wAeazWa73T4+Pg6C4MqVK3EcX1xcIBz45OTk/Pwc8ZuLxeLo6MjzvLOzsyzLfN+HrbPnefP5nDHWbDZt24bbhuM4y+USxBmgGWlRmgLjLERJW5Y1m82ALMkeN2SMwRVXaw0KzPb8diml+wuACgjiI/LBHH3fh+EG+KNSCgsArXWe591uVykFBAnGyhjjnAOwCiF2ux3KaK3DMARwD4Kg1+uBgY7HY3QRJISwbRtwVggxm81g+rzZbOBujPAZy7K22+1wOBRC3Lp1C1vVkXLPxjiObdu+uLg4OTkBFEaHoP5pmnY6HcdxtNZpmoKiFkWBHkY3jsfjVqslpRRCtFqt7XZr2zawMgqjsaT8bWkQBEC9wMcYL0rpo0ePHMfZ7XZoVL1e32w2KGPbNmKEoyjC0DDGYGFhWRbnHC8VQJYPDg7QdkIIao6htywrz/PZbFav14UQWBwiQB5rPCEE8DFjzCn3M6SU4nZIjMfjg4MDjDueBKQrGF0URZ7neDyKopBSdjodQgiCvsH00RutVivPc8Bo9CcOrVYr+GnsP11JkrRardlsVhTFcDiEyfVsNut0OjCo8Tyv0WiAL9u2Xa/XKaXwIRFC9Pv93W6XJAkefsuyLi4usiwbj8df+MIX8DwYGRkZGb09xDn/tV/7tc985jOVn9KnP/1p48Lxc9U//MM/fOITn6h2fSiKglLKOad7JhtIY7awn6jS+AbXWlc/maoK7Bd7/Uck9j+maaqUsiwL004jo5+hDIA2MjIyMjIy+kXUu9/97uvXr8Mu48qVKzAQeOKJJ8bj8eHhoed5vu/neX7//v1er1ev14MgyLKsKIrZbNbv9x3HgXGw67qTyaTT6SCKGZGzjuMg2te2bcdxlsslkLRlWavS63mxWFSZiPpkjGVZBp5oWRZjDFQagSrgkowxxhhm//iIaFZYK6AYIYRSKqXM8xwuw1h1aK2Hw6GUMkmSzWajSzz98OHDwWCA65yfn49GI0QWY6uZNE1BVF3X1Vpvt1v4kGD1AlArpUzTNEmSMAzb7bbrunEcf+tb33r22We3220Yhu973/sAo4GnhRCMMcuyNpsNlj1pmiLS2XXdRqMxnU673e5sNgNgJYTcuHHj8PCw1WqNx+NutwtCutvt9rcu1FrDtwSNxR0BgieTiW3bcRxrrZVSQRAopVzXBXL1fX+5XPb7/Xq9jkstFgt4bnDOfd+fz+fNZtNxHM75dDpNkqTRaOAjVmWcc4Q2M8bW6zV2OBRCNBqNOI6zLLMsaz6foz44pLVGV0Ce57mu65SWGngSMJqWZTmOg2ZWjSWETKfTw8NDQsh4PEZONdZIADdXj0T1EWOtlFKl7QbwdJIkcF6GS3iF4NEPqE+WZarcnbLRaCilHMfB2xpCiNbacZx6vR7Hcbvd9jwviqIoivr9fpZlWZbFcYzHtSiKNE3xxG42mziOHce5cePGer1++eWXTWCUkZGR0dtP733vex3H+da3vlXlfP3rX/+93/s927hw/Dz1L//yL5/85CcZYwg+wJc45pP7xTB5QBoJXf6ujpSvsfHKvypfnvoTPu6rurJSCggb6cfLGRm9ORkAbWRkZGRkZPSLpfe///3PPPNMt9vFgufatWur1ero6Gi32202m6tXry4Wi16v9+DBg06nc3Jysl6vwzBstVrAgthubr1et1otxlgQBFUUM9w2PM+bzWau6wJAV8TZsqzKGHo6neIoY2w8HgOSAoZSShljAJ2UUrDdijmqPR8GAEQkHj58eHBwoJQaj8da69FohHwpJcJepJRCiCiKKjQJa5Esy+CcwDm3LCtNUzBQIQQcopVSruveunWr0+kkSRLHca/XE0LYtn3z5s2nn37atu08zznntm3btn337t2rV6/W6/WKUYKl8tJqwyq3sNvHmu12G2uhmzdvDgYD8FlKKQKusyzb7XbNZjOKoiRJPM/TWi+Xy16vh1hm9FJRFGC1wLVaa4TW2rbd6/Xu37/fbrebzSZQLGAo+jwMQ7vcQbGqISA1LT2gEbRLCEEE9G632+12Tuko3e/31+v14eHhbDZDu+wyTnmz2fR6PRhYLxYLz/MqAG3bdhiGQLqO46xWK5yFOuB0dF31AAA6U0onkwlyUAwLP601+q36qJRCjpSy2Wzudjs8DHDJoJQCOg8GA9xCKeX7PvpztVpxzq9du1ZdLcuyTqcTRRHCnKWUvu9HUVSv1+fzeZqmQM+u6y6Xy91uB4Rdq9WCIOh0OkVR2LatlMqyTGuNNw2bzSYIgsvLy/l8/vLLL8/nczTKyMjIyOhtphdffPGb3/wm3j5C3/zmN//gD/7g/e9//0svvbRX0OhnqZdffhlTX0wSlFJ5nouf5ONcCYf2M5VSaZpixoJDeg9P/3TpPQBdXWG1Wj1ezsjozckAaCMjIyMjI6NfCI1Go+eff344HMIsYrfbwR95u91euXIlCAJsM5imqed5q9Xq8PCwVqs9ePDA931sKthsNmu1mm3bt2/fbrVaFWAFXF6v10CflduGbdvr9XqfPuN0xDsDfU4mEyEEwlEppUCfAIjYZ48xhjKcc0z3kamUUkrB2bkofRUIId1uV0oJc2EsM2AATQgBax4MBsC+q9VqNBoBbq5Wq4ODA9wXR6voGMdxsFSAS0OWZa+88srTTz8NyAsOG0WRlLLValmWBZiOqF4kGGPT6bTRaDDG7t69e3h4yDlfLpdYt3ieBw8HcNsoilCTO3fu9Pt9pRRgcavVUkrJUohBBoPGXVzXRees12ulFIY4SRLLsvI8J4R4nrfdbvM8Pzg4QAUQyc4Yq9VqMON2HIdznmWZ53lCiFarxTlHrLpt28hE1wHBW5bFGHNdF7AefHa1WrXbbVBmdCC6hVLKOV+v11pr27a11mEYVjHXjDF0HYYAnYNTcJSULycwTGgsHht8hJCDRwIPyWAw0FonSbLb7fA2onqRgJGllCIIOo7jTbl7JKV0MBjM5/Pqyog6p5QiWjlJkuVyeXR05LruZrOxbXu5XJ6cnKCfbduezWanp6e2bbuum2UZWpRlGSHk8vKSc478y8vL2Wx2586du3fvEiMjIyOjt68++9nPPpYTRdFv/uZvPpZp9LPVxcXF3/7t3/7Jn/wJKWeGeA2sS2M35PNy12tShifji1uXiDmOY0w1KaVKqWpCUp0F6b1IalZaqEFKqTzPcfSVV16p8o2MHtNf/uVfPp71BmQAtJGRkZGRkdH/sN75znc++eSTCD1+6qmn0jTVWl+/fh17x52fn8/n84ODgzRNj4+PwzDM8xzz6cVi0e/3waMbjYbnebdv3242m/1+HzGenue5riuEWK/XiGjG7oIVcUYBpB3HEULACwKzdvg7I+iVUkopRWAsMB/n3LIsQgi8fUkZioK/eZ7neb7b7XAiEDauo7UuigJ4UUqZ53kQBLq0YgCarJYHlNKiKIqi2O12CDSez+edTkdKGcfxdDqFjXKSJJ1OB8gVnNS2bUBqZAZBgMjlCphSSn/4wx/+0i/9ktZ6s9kA5iql6vW6lBKUNs/zu3fv9vt9nAXwijYiLtvzvCAI2u32crkMgsDzPAQ1I7/b7SKUppLWular4e7oWMdxPM9DMy3LgrNKt9uN4ziOY9wOjHi327muyzlvNBqwJbFtm3MuSttuEGrGWJIklFLXdbEScxyHUorxJYRwzqMoAqoGVka3aK1ZaeKMruCco8lKKTxO1aDgwcAwTafT0WhEKZ1MJhjB6nmoHonhcIjxRQE8A3me9/v9Kg3KrJTKsgwcvyiKKIq22y0MuIUQQgj4n+R5jrVoVb5Wq63X69FoNBwOoyg6Pj6eTqeA0WgRISQMw1qtBtruui4+CiHm8zlGpFarnZ+fF0WxXq+3220QBN/+9rdnsxlqbmRkZGRkZPSz1W63u3Xr1r//+79/9KMfRfQApgRaa8xDCCGqBMr7J1ZzDLzOx0SLEILp337JSjjlMVFKMZ/BJISXYRCPlzMyenMyANrIyMjIyMjof0zHx8fPPvvstWvXACW73e5isTg5OXEc5/z8/Pj4eDabnZycRFHEGGOMrVYrtbdlH9w27t6922g0tNaXl5eDwQCwEkTyMfoM32fwOMQ+I2YW9NmyLNBnTPcppcPhENP9+XwOsIuP1V+l1HQ6RVswfYcQ0ayUStMU7s+TyUSXQdNSSsdxQLEJIUVRZFk2HA6BEYMgGI1GrutKKTebzXA45JwLIRARDLiJaiO/1WrBt+HOnTtPPfUU6kkpBaxcLpedTgeZpCSYt2/fvnLlCi29g5GJymAFQgjJssz3/TRN5/M5ArFv3LhxenoKZF+r1fYHZTAYFEXR6/VgXQ2QikSe52maJkmCOF9CyHq9tm2bUtpsNs/OzjjnUkpCCCi8bduO42itwzDs9/vNZpMxNpvNgE09z0OdXdeFg4Rt271eL4oizrlt20IIxhiu7ziO4zic8/F43Ov1rD1jDcaYZVlo7Hq9RvMty2o2m2EYVr2BBmKsqzTyMdwY9+ovOhAJLAKVUpRSpZTWWmutlEqSREpZ5WOxl+d5lmXg9VmWRVGEB5JzDm5OKUU1kEAxxlie50EQuK5LKa3X67BAoZSGYUgIQRuFEI7jLBYLKSWsPGzb/j/svdmPJcld9h9Lrifz7GtVdXV3dU/39IyNkRmMR1jMvLaQEfcs4oK/gz+EWyQEF0hgWUbIYISQLWwZjC3A1tjT09t0VXctZ9/PyczIWH4XD5kuD/7pBXvMC574XJTy5ImMjCVb/Y0nn/MN/AvCE4UGTCYTrF2VUhi3d955xxqgLBaLxWL5WbPf7//oj/7oU5/6VBzH+F9eKaWUQtziOA6Cig+AkAMhk+/7jDGtNWNMCIFQ8IMX/Kj9uQQBCS1+nkUIQSUfKGax/JRYAdpisVgsFsv/Az7xiU+cnJzcvXuXc77dbvv9vlIqTdN79+7N5/M4jlutVpIkh4eHFxcXrVYrDMPLy8t6vR5F0WKxqNVqvu+nabpareCEhZ05DEP3mvrMOccx1GeIp77vIx1HEASMsfl8DuFyPB5DqHUcZzab8WtOYSiAEIgZYyiGGB2aMimWAVAGIf9BK5xMJqUoeXl5CR0WVRljut1umY6DFuZoCJTIybBarfI83+/3m82m1+u5riulnM1m3W4XWiFjDHK2EAJ6ZRRF5+fnjUYjSZLtdoskGJzzyWTygSwcrutCV0VncRLNRsdxIZoH0BHGGKRexth6vT45OZFSDofDSqVCCIEAnaapUgptjqIIXh5Kaa1Wo5QSQkajUaVSqVQqyP5BCFFKYbIIIUEQQIf1fb/dbp+dnQkhfN9H2yAT43aEkPIjMmZUKpXpdBoEQZqmjDHP85BF2vd9Y0wQBMaYUq2uFFtNskLp5pyjp7xItYEuo5EYH3SBFqah8i+5tiBEpm8cAyztoEcrpeAih+y73W6VUp1Ox3EcpdR+v9/tdrvdrl6vB0GwWCyq1Spq1loLIfAaplKp4PVDHMdQ9qfT6cHBAZ4rx3HSNDXGoINa6/F4fHJygvYbY/b7fZZljLE0TfGqoNPp/OAHPzg9PX3nnXfQeIvFYrFYLD9TjDFpmn7hC1/4vd/7vWazSQoXM4IHqM9KKURr5VXlsdZ6s9lQSh3Hwce82DQblf/YA2PM9diGECKlZIwh1Hz+/HlxH4vlw8EK0BaLxWKxWP5beeONN+7evQuLR7vdFkKcnJy8fPlyMBggk0O32x2Px+12Wym1Xq/jOMa2cr1eLyj8zpBTwzCsVque583n81JGhOIMC+dyuQzD0Pf92WxWbkUIvzPU59lshpLD4RCXQ4FFRgVKKfJBQ53EAgCaLORUfISAWIb1hBBZpFbAV6RYDEgpd7tdWQxl2u12nufr9brX6zmO4zgOPMVakzN54wAAIABJREFUa8YYJPU8z4MgWC6XUC232y2E3TzPF4tF6XFmjHmep5TSWuMMNFYcICcGbsE5d10XAivKQLHlnD9+/Bi26ydPniDzAxI1bLdbSilj7OnTp9VqNYqiNE193x8MBljAzOdz13Wr1eqLFy9u3boFDRoDJaXUWkNzp5SibZ1OBz7cPM8dx8nzPCu2WKSUIheH7/uwM+MSzCyGhXPu+/71jxCvKaWbzQZTXKlUMHFSSgwCpXS73WJeyu5jThlj5QFjDH55zrmUEh3HrJH/4HpGpm88DzC/l8Uw+8i/oYt82Z1OB01KkqRWqymlpJSEEFjOsyxbr9foC2MMpnLMKSFESrnf72GBD8NwOp0KIVCSFWJ6kiSEkM1mo7VO07TT6biui4WrlFJKmWVZpVJZLBZIBk0IMcZUq1Wl1Je+9KVHjx6VXbBYLBaLxfLfw5e//OXPfOYzjDHXdREEIpIkRbJm9h+ycACt9W63Q8SotUbc5Ra/9CI/qjijPD5SShGroJgs9qzOsuz8/PwDd7FYfkqsAG2xWCwWi+W/gziOf/EXf/HVV18NgkApdXJystvtZrPZ8fHxcrm8e/cu8iQgu3Gz2YRzs9FohGF4dnaGjdeQDiIMQ7ikoU7C2gz5uPQ+Q7mDHodN6qBdTqfTUtOcTCZBEDiOMx6PcdJ13dFoRCkdDAY4TymVUrbb7dFoBA0UITshREqJ30iSwgmLRQJC/4uLi8FgIKVEvxDfQ4OGCVoIgbzMrutmWYZlANRP13Unk4mUUggxHA4fPHgAY+9+v8cmdb7vLxYLiNFJkqAS13UfPXp0//59WKEZY47juK775MmTk5MTyKy4Bb4lhDSbzel0Gsex4zgYLjQDauZqtTo8PPR9nxSyu9YaiZjjOJ5Op1jeLJdLjFKSJJVKZbVawcFNCDHGoMtpmrZaLVQ1nU7h8KWUbrdbOHw55/gWW+gYY5CRA1/hvkIIjAPnvFqtzmazNE2hnuNjkiToY6VSwdsLuK3LfQXzPMfooeWksHKjd5RS13UhT+PWEOsdxyGEwCmMfhFChBBIUQ0nNU4i3QqWiGV2C2NMlmW4NSEkSRK3SNQITR+1rdfrLMva7TZmDVPACv815hojDwWZUur7Plab+BhF0WQyybKsXq9LKTudzmw2Y4zleY7xx0OFpDFSyiAItNYYpaurq+9///sPHz5ERywWi8Visfz384UvfOHo6Oj3f//3HccpdWFjjDEGIQGiAoSdQGvNOZdSKqU8z5NSIjIp41WUB+UxzpcFcIwgx/M8u+2w5WeBFaAtFovFYrH8zHn77bePj4+73W4Yhoyxo6Oj8/Pzg4MDZGcOw3A0GvV6vaurq2azCamuVqvFcfz8+fN6vT4YDIIgWK1WSPo8nU6jKCrVZ1ibOefIwuH7vuM4y+USZabTaaVSgbgMoRnKI7zPkHo9z0OwjsKEEKTFgE5NKUWaDs65MQZiMWOMFCka0EdjTJ7npQRJCJFSGmOQCgM/qHQcRwgRBAEhZD6fQ1c1xmDDPeiY0J3r9bpSarfbGWMmkwlswvv9vtfrhWFICEnTtNlsMsaklISQIAhwAAvtbrfL8xwJN6CuuoX/l1KqtT49Pb19+/ZqtcL2g7hvHMd5ns/n89LazDl3HGe1WjWbTaVUFEXNZnMymfT7fZxHTmGttVKqVqthKEixkXqtVttut61WKwiC0sPbarXQDAis6BqltN1un56e1ut1uOMxnmEYep5niowZmHTG2Hq99n0faSsopev1GhI8psz3feQbmc1mtMiggmdAKeU4Duc8iiLHcRhjp6en5USHYaiU4pzjvQLmHdNdqsxoG9BaCyGyLMNV6LgxBo9Keay1brfbhBAhhOM4cRzjQq11pVJBMTxguAueK8wUWoIh4pwnSZLnOYz/SZJgomGj9ov8JBg613XxSCil9vu91tr3/fF4PJvNPM8TQuAVzng8/pu/+ZsXL178+8xZLBaLxWL5f8S//uu/np2dfepTn3rw4AG9lhYDejTiKJw3xftprXUZi/q+zzlHREEIyfO8LP9jD8x/SAmNOOR73/ve9ZMWy4cCv/6h1Wot7E6XFovFYrFYPiT6/f5nP/vZN998s9fr3bt3T0oJUzD0TZg0YUOGIhaGYaPRWCwWcRzDS9tqtarV6vvvv59lWakXQ2pkjM1mM1Js4pfnue/7lUplOp1qraFEw+/s+z7EZdyIc75YLDzP832fMZbnOaRJznmtVqtUKmEYRlEEpyrKQPuDqqiUEkJAVEXlQRAEQYDlAURnpVSapvv9Hg5cSqkQAl9RSpG6F5dst1u4eimlu90O5bXW2+22VqsxxjjnMMZC0Gw0GoQQLEKgXSqlhsMhUnYYY3zfx0kk3JBS5nneaDSga19eXjYajTRNN5vNYDAQQggharVakiSwVyNt9I0bN9I0Xa1W7XY7SZLVaoWrsHNjlmUouVwu6/X6YrFwXbdSqUBD9zwPM0sIgfKO1mZZtt1um83mYrEwxpRdzrKMc47m4a/ruoSQIAhQcynsIl+E53mY9PV6Xa/Xq9UqVGNKqVIKivZqtaKU1ut1TKiUklJaWuZHo1Ecx/AOz2YzYwy+IoTgIUEDMGsQr0sBGjOIOSqXbeXyD1OP81CBCSGyMCyjg3iSpZRJkqzXa7yZIITkeQ5t2hiTZRljzPO8NE3DMFytVlmWIa2z1tpxHCFElmWu62LuygbjuV2v10dHR5vNBtfmeW6MWa1Wrut2Op3lconyl5eXX/3qV7/61a+uViu0wWKxWCw/x7zxxhv/8i//8sGzlv9hSCnff//9t956q1KpIHjQWvMi+3OpNSMOQciHv0EQlKEj4mT8mhBxFCmc1IhqyhgGf7XWlNIkSXzfz/P8D//wD7fb7b83yGL5kLAOaIvFYrFYLB8+r7322snJyY0bNzjnt2/fXq/Xxphms7lcLnu9XpZl1Wp1OBxOp9NOp5MkCSEkz3OIa0dHR77vQ3QLgmAymRwcHEA7Xi6XjUYDIvJyuYzjGFE1pdRxHNd1sYkcdOHRaIQDqM9esekcbK2e51FK5/O5X3iiIR26rkspRfKNNE2TJIFUikAfvdNaD4dDcm1zuX6/b4zRWqdp2uv1pJTz+dwYk+c5Y4wxppSCiAzNd71eK6VgVcat8zxP05QQ0m63N5tNmqaDwcB1XSklRFjXdV3Xffr0aa1Wg/7YbDZ934diixFgjM1ms3L5QQjBwWg0Qg4TXuw9iMJAa00IqdfrWJPgKxRDecdxpJRRFEEsZkUKQiEEVjvL5bLT6Ugpm80mtsFBnRgrKeV+v+90OtVqVWvd6XTm8znGBGZnTBOGHUlXGGPIvr3f79HaPM/DMIQgSymN43iz2biu63ke+rhYLHq9nu/7jLH9fg8LsOM4lNLVamWM8X0/yzJCiOd5GEx0H8OCHqFr5aqsPI+TxhilFL/mbjaF/whn8EjgjNYaG1RCdJZSYvaDIECrfN+PokgIkSRJHMd4nzEajSBJw5i/XC4Hg0GpoaPjo9Eoz/P79++XGZyXyyW2KERJrfVms2k0GpiXFy9ehGEYx7EQAhlXzs7Ovv3tbz958gRzZLFYLBaL5X8IeZ4/f/7861//+uc+97lKpYJ4CZGGKbYNLAMtRFl44e0UP6RTSiHSwIW02JzQFC/IywCGFAI0/iIk/uu//uurq6uiORbLh4Z1QFssFovFYvkwefPNN3/5l3/5Yx/72OHhYbVaLZVT13Xb7bbrutPpFDlnq9Vqs9lMkkRrDZ8sLKvr9Xq5XIZh2Gw29/t9FEVIsgHjcKVSgbYLq/JsNpNSSikh7Hqe5/u+53mQLyE1Qn2GHImN8jzP45wjPwPEX601VFech+oKdbKUKSmlCPqxGEA2DFySZZkQQilFCAmCgBDieZ7jOPv9vtlsopGr1SoMQyllaY6G9In8vBioPM9heDHGXF5e9vt9rfVoNELaCrSkXq8zxnAeWzViRZHn+Xa73Ww2zWYTA4LMDFLK9XoNN3SaprVabbvdBkGw3+89z0Mm7m63K4S4vLys1+tZlk2n02q1mqbpeDwOgmCz2dy8eRNW6GazudvtHMfxPE8p1Wq14JXGwXg8Rg3Y0S5JEqVUs9kMw1BrrbXO8xzSMF4MzOfzLMscx3FdlxBijBFCYEgx0UIISmm9XhdCwHBdrVZ9318sFpg7z/OMMXD+5nlOKYUjGIkmIBbjNQDEblKkPw6CwBiDFkLy1kWGa865LozGmAvMO4Ya7VRFRhH0ixCCAlJK3AKvLvIiBXaj0XBd1xiz3W4x0TgIgoBSutvtFotFpVKBVx3XQp5mjC0Wi/1+L6U0xriuG8fxbrcTQuChrVQqu92Oc45XF0EQzOdzVFKv16fT6dHR0XQ6XSwWk8nk4uLii1/84re//e35fP7Df7QWi8Vi+QhgHdD/i3j48OFbb71VrVYR4CHGQBhDiwxdWmsEOUIIzjninM1mg/RcSimlVJqmjDG32MKEXUskDco6afFrvz/4gz9AMGOxfLhYB7TFYrFYLJYPgU6n8/rrr9+7dy8Iglar5TjOaDTqdDqe522323a7Xa/XR6NRvV7v9/t5nt++fbtSqZyfn9dqtSiKTk9Pq9WqW1iYa7VaEASPHz/2PK/dbsOnDBl6MplERQLo0WhUqVSg6yGwhjA6HA6DIIDoDCcp1GfkfYYuSSnt9XoQFqE4u66LuBzpI1AheodgvfxYqoq4nFIqhIDxOcuyzWZTDgtkzV6vJ4RI09RxHGOM67pSSs45IURKCb8JK/Yl55zD5Ot5HhYVURQ9fvz43r17kMU552WPOOe+7xtjkCUDJwkhnHPHcZ48eXL79m10Kooi6L/I+LFcLlerFY4hKGPVwQuDM6VUKUUIybIsjuPJZIIWViqVOI7X63Wn03Fddz6f45IP8OjRo1u3bhljoM+i+/V6Hc3DKwTHcbrd7vvvv4/x5EUiDmjQjLFarSalrNfrSql2u73dbpMkqVQqvu93u93T01NCSBiGGCikEKlUKo7jTCYTnMfAep5HKcWzQSmVUu52Oyy0oigy1wxBrHBAE0JGo1EYhqRYlZXn8ReDg/FpNBqU0vl8DrWdMUYICYIgz3NCSJ7nQRA4jqO19jwPDZNSOo6zWCxqRZIWLCYhTIdhiHcJ3W53s9kcHBxgD8xms7ler6FZG2OazeZqtfJ9H+MGgX48Hp+cnLx8+TIIgsVi0el0nj59qpSaTCZf+cpX4Li3WCwWi8XyP5kkSf7qr/7q+Pj413/91xGkIVhCuMKLpHAozBhzHIdSKop9KRB9UUoJIfv9nhBSBg/X70KKqAahiOu6y+USP0y0WD50rABtsVgsFovlp+Wtt966e/cuxDXIx1dXV6+88sp8Pg+C4ObNm+fn58aYbrf78uXLer2OJADVarVarQZBgDwDQRA8f/68tKmOx+NOp8MYOzs7C4IAoi2Mw67ruq47Go1wnnM+Go0IIbTwCEN5dK/tOsg5n0wmcN1CD+XFvnzwSkOpvLq6opT2+/3y2BgzGAwIIaPRCF1AjG6MSdO00+lorR3HWa/XMKtSSrXWzWaTUoqlAjYSFELsdrvJZNJqtfI8T9N0Mpk0m808z1EM2bG11owxaMrGGMdxfN9Htl9WJIuYzWbNZhMdJMWGdZxzWmThmE6ncEkzxoQQlUoFThYUe/ToEVRpzjmWNLiKMWaMqVQq+/0eGmgURdPptN1uz2YzpHQghOC+q9XKcRx0HCkjoijK83y5XEop4zhWSkHdhjG5XBEppSilEEwx0cgujRcDaHaSJFgObTYbdW1vvSzLGGOQoRljGJMwDF3XpZR6nrfZbIwxBwcHjuO4rrvdbnGLMAyn0ykWbJRS5NfGuI3H40qlwoqMiiiPu5cngb6Wc8MY0+v1jDFaa/SLUtpoNJRSUkrGGJ7VdruNZu92O2jZu91uuVy2Wi2Mf3kLrTWc45xz5AlxXRdXbbdbz/O01rClN5tNpNK+urqq1WrIfxJF0Wq10oUje7Va1Wq18/NzpdR6vb66uvqHf/iHH/bEYrFYLBbL/3j+9m//9saNG5/85CcRNiDCRMiBcAXHiIgQE4piY2eElAiQ9vs9Qkqn2E75OghsUCHn3L6otvzs+DHPn8VisVgsFst/htu3b9+/f7/T6YRh2Ol0KKWHh4cXFxeu69ZqtSzLjo+PkyRZLBYHBweQX7vdbqVSOT09vXnzZhAET58+jaII3ufFYjEYDHzfR8KKOI59359Op+V2fzDhItQuo23HcUajEUzNpNCgHcfxPG88HrtFFmNKaa/Xo5SifOmDhrEUCqwptGZCiNZ6MBiUih6ltNPpCCEQxJdCJAoYYxqNBozPkCmHw+HBwQEp8v/CEo6WxHGc57nneZ7nQYAuJVrUPx6PHzx4gMLT6RRas+d5s9kM64okSdrtNip8/Pjx/fv3IWVC0/Q8D25oKWUURTAOO44zHo9rRW7o8i8WM41GA1oqukMpxWByzlHMGLNcLg8PD2u12nQ6xa0555TS9Xp9eHgYRdFkMuGcI/nGarWC5I0RgHgdhiEsOZRSOHmxcMrzvFarwblMCEG6avzmVCmF/uKrSqVijInjuFKpUEqRRGK/36Mlvu8TQur1ep7nzWbz2bNnzWaTc26MQZvxGBBCcIyrynFD3/ERk4u/aCQ+4pEYDofQlFG+FJ2NMVmWIe9zmqaU0jiOSZGaw/M8pRTuhZMY/CRJoiiilC6XS9RQq9Xg4N7v90qpZrM5Ho/b7fbFxcV4PB4MBkmScM7r9frV1VW9Xm80GkjfsVgs0K8oit55553VavWNb3xjOp0Si8VisVgs/wtZLBZ/+qd/+ju/8zuHh4eIhWBcYMVeHcYYnEd0UX40xiC0M8YwxmCDiOMYwU9ZP0Id+qO/8bJYfkZYAdpisVgsFstPwq/92q/1+/2Dg4MoilzXXa1W+Pn/8fHxfD5vtVr1ev3y8rJWq7VarZcvX8Zx3Gq1oNIeHx97nvf8+fN6vQ7/KRRbz/Ng8wyCIAiCyWSChL++7yMfQhlVQzp0XXc4HMK/jMibFNknaJFyAe6P6XSKSzjnEH9d1y29zxD+KKWIy5VSiNehSyqlSCFNUkrzPB8MBtCLoUJCTHQcp9vtaq2zLMuybLFYwOebZRlSkSilkFQBTdXX9jRHs33f11o7jpNlWZ7ncCJ3u92gyFZcr9d3ux3awwsHN5rNOUdybUIIpGdjzHq9ppSiGPIzoCRuKoQIw7DZbC6XSwwFY6xU7SeTSTmq6DjqQfINznm5SuGcl+cxQeWiCCNwcHAA6RmXaK0ZY1JK13W11shcDMMOpbRWq8EQ7bpus9mEZT4MQ0ppmqbT6RR6NGOsVqvlee66bhAEnHNkykabKaWYWZTEk4PcHZRSDDJjrJxlTO5sNgvDEAs2dC3P80ajwRhbLBZorTGm2+1mWQYHt9YaJiOtdZ7n1WoV6ZjTNE3TFF7mMsMJIcT3/Uqlsl6voygihDQajbzI0ojHpjTIwz8+HA6bzebx8fFms2m1WkmSZFlWr9eR7iOKItwOoxfH8Xg83u12z549+/rXv44uWCwWi8Vi+V/Kbrf71re+9dprr2mtb9y4gWCSEIIApjxA6JIkCX5D5jgOIhPEM5TSPM+zLMOregQkCAURUeOAEKK1dl33h7e3WD5U7CaEFovFYrFY/mt88pOffPvtt3u9XrvdPjw8XK1W/X7f9/1+v4+d33q93mQyMca02+2rqyspJQywSF+LnBtKqUajUa/XN5sNY6xSqSilRqMR1ExKqZQyDEOoz8ik4XkevoLS6jjOaDRyHMcYkxd58TjnEFKRNtp1XaiHOHYcx3EcCNCMsWq1ilsEQYBmoH5SZKtgjOkiM2+r1UIDIAdDgVVKtdvtMAzxVZZlUkqormmaNptNx3Fc110sFkEQ5HnOOV+tVkEQCCGSJJnP52EYpmmaJMnV1VW/36eUGmM2m021WpVSKqXq9TohRCklpaxUKlA8Ly8vu90uet1oNKB+ZlnW7XaxFGm1WmXJdrsNuRyG6yzLqtUq5M7JZFKr1ZIkmU6nh4eHxpjlcgkJeL/fY7M7xpjneVprCKDr9brRaCRJcnFxAUdwu93Osmyz2XQ6nSzL4NoWQuz3+zAMhRDn5+edTgdrG/wlhOz3ewyvlHK73WLiKKXb7TbPc6+wtE+nU9d1Pc8jhGy3W8ZYnueO40gpPc+bzWaccyllmqZ5ngshMINCCKVUkiRa6yzLsBILwxDadJZlkKcJIWma7nY7rfV+v3cKW7QxxhgTx3EQBFjp7ff78r54JLTWpsisUqvVgiBwHCdJEkw6pZRSWq1WsSxMksR13SzL1us1KV42YJWI59bzPCklIQQu5qOjI3jtsyzjnAdBsNvt0C/HcYwxvu+v1+vyeWOM7ff7Fy9enJ2dffGLXzw7O8MgWywWi8VSYjch/F/KeDyWUt68eRMvpxGjQk0mhGitEcys1+vtdksIQaRqjKGUQoNWSiG8QayLAAwhGc4j+tVaM8b+7M/+7If3tlg+PKwD2mKxWCwWy3+B//N//k+tVuOcHx8f73a7xWLRarXW6/XBwcFsNrt161aapovFot/vR1F0enoax7HjOJPJpFKpQFW8fgxhFyrtdDrtdrtwsI7HY8YYKVJqQDvGeSiVy+WSUlrKlKTYgAXKMnRwCJqMsW63y65tNug4DiFkOByWFwIppSjyOLMiDwOAnohjaLiDwQDJDc7Pzw8ODiABQ3+EoRUN9n3fGKO1hiiJGsplA+fcdV1jDCRXVmR5RiM9z3Mc59GjR/fu3WOMLZdLiJsYgXJYxuNxs9k0xiilUKHjOJPJBPmUOee0sEhjgcE5f/ToEVzJ6CwhxBiDMpRSIUQURUmSBEEAuy6szfP5HEO32+0g5rqu67ru6ekpLN6u6/q+v91ucbBcLjudDt4csMKng7+MMWTTxsgTQiAZ4/zz588hvDLGkMslTVPP81qt1vPnz1utlu/7juNMp9MwDCuVCtzEmAI8P06RnNp1XaXUtEiKXd4Rx5RSx3EgMQOMhtaaFjI0VmgYYVlsHSmE6HQ6SimYtaFKJ0kSx3GWZYSQJElqtZoQAqIzRgA9RT14igghSincZT6f12q1w8PDq6srxlie58YYGKJns1m328XzBim8TJytlNput0+fPh0Oh9/61rfKjlgsFovFYvn54PLy8t13363Vam+99Rbi6jJ4k1JmWbbdbne7XZZliPQQaCGAQeRzPapBkIOaywPUhnD0V37lV7797W+X5y2WDwsrQFssFovFYvlP8Qu/8AsPHjyIoshxnBs3bux2O8jNy+UyDENsJDgcDmu1WhzHL168iOMYuRo4541Gw3Xd2Ww2n889zzPGwLzJGBNCUEpXq5Xv+zBxUErDMCSFKYMxBkEWuZ6h5ZUhteM4CKlpkUADnllE52XaaKiZkHTRnX6/TwhhjI3HY9RPriX3IIRIKTudDiEEMuJisUB74CWRUvb7/SzLhBCz2UwpVaqQ6F2WZS9evEDm6DRNnz9/XmaRns/ncCVnWaaUQhJtQggcu7Cx9Hq9IAi01owxWKSDIHjy5Mkrr7zieV6apuWwlDkx0AUMONRqHKP7juM8fvz4+PhYa+37frVaVUqtVivGGOfcdV1KKXrRaDRmsxkhBMOYpiluhFTOlNKDg4PpdLparTzPC4KAEIKDs7OzbrfrX8PzPN/3gyDwPE8Ve7KXgi+llFJKCn83ngfGGLbjwyz7vk8IQVWEkCAIkiTBcHU6nfV6nSQJupCmaZZlvu+7rouSjuOEYcgYw3k8A5RSpKgOw1Br7bquEAINY8VqTWuNtqHNuCrPc0IIvi1fMyil0jTtdrtKKcZYkiQQppVSmGVci/6SIkU4ISRN01qtNhqN0jRFDu7BYHB1dbXb7eB37vf7k8lkPp8jQ/RsNguCYDabGWMajcZ8Pm82m1mWPXr06PT09Lvf/W55C4vFYrFYLD9nvP/+++fn57du3bp582a1WkVYpZTCe+7FYrHdbqWUnueFYYiAyhijCj+ElBIRNSnc0AhOaJH9GUCA/u3f/m0rQFt+FlgB2mKxWCwWy/+Fw8PDT3ziE9je7ejoSGsthLhz5w528HNdF6lsF4tFt9vdbrcXFxfYU246nQZB4Lqu53mc8yiKarUaLTIyQ9/ELSChKqUQB0O2M8Zg1zXHca5n4WCMTSYTUiiY1zU+VuzKAuEVmwpyzqEy8yKnM6XUcRzf9ymlBwcHqMcUEEKgh242G9SvtYZZm1IqhJBSbrdbWmxHLoRot9t5nqdpul6vUYxznmUZug/91/d9tMF13dIqu9lskMBEaw3LMLyxo9EI2aKB7/tCCFSF8XzvvfdeeeUVVAgFljH27rvv4iTWGLgWnaWUKqUqlYoQYrVaoV8ohr9a6+vlUaFTwDnfbDY3btxIkmQ2m2HeATKr+L4Pq6/v+0+ePLl16xZ05yAIsixDk6SUGGe0DbdmjBlj8jyHfxnrpWq1ip0GJ5NJ+RUG3xiDlB2c8+1222g0giCglGI2fd/HAZZkaZr2+33OOXJfuK5LCLn+HgI2agjQaFt5TAp7MiEErcK36A4GUEqJxN9SSjwheAyWy6XneXgMMLy4Ns9zY0yj0ZhMJoeHh5xzx3GUUmmauq4LrV9KCYNztVrdbrebzebo6Aj/1vI855y/fPkyCILvfOc7l5eXVnq2WCwWi+XnHqVUkiRf+9rX3n777ZOTE0RrWmtCiOM4pW/D931E4IhbypCGc24KSwcCMIAgp4zNEI8dHR21Wi1s9WyxfIhYAdpisVgsFsv/L5VK5bOf/Wyv1yOEcM7v3r27Wq3CMNzv9/AjM8Zu3bpVGp+x2WCn02GMcc7r9brneZPJZDweE0I+9rGPTSaT5XLpuq6UEjFxGUBD8sNHnKeUhmEItRTOVtyREAJlmVKKvfJKjQ+KnjFGCIE2QAcshVRIF5fVAAAgAElEQVT0C2cIIVLK0WjU7/cZY0qp4XCotUblupCqoRvmeY5sG/gKSRjyIpECNlfMsgyphBHBJ0nCOYdCnWUZRHZUyzn3fT/P8yAI1uu1UirLst1u1+120c7VatXr9dCd2WzWbDY9z0NPsXgo/6IjOHAKYRSrC8YYpfThw4e3bt2CflpeWA71druFGxpzVK1WYSFnjF33kmPcHMeB4oxkKU+fPh0MBpgXSM/4ixQorus+evTo7t27nuc9evTo9ddfp5QqpaA4o0608ODgYLFYwABOKW232+v1Wkrpum6n0zk9PRVCeJ7HOW80GkIIbFOJSYd06zhOtVo9OztzHCfLMmOM67pu8R4CA8KLpN6UUnrN8oNjQAjBag0ltdbleWThMMYIIfBgSCn3+z1eHuR5vlqt8O4Bo4S70GLocGG1WkWGDc/zFotFu93Gro+VSuXi4uLBgwej0QgW78vLy36/3+12X758idcw5+fntVpNaw3X8/e+9z3UbLFYLBaL5aPAN7/5zTt37vT7/TAMXdclhDDGfN83xgRBgNgSAU+e5whjENKUYaFTeBSAMQaxCooRQoQQ9Xr96OgIG0SXJS2Wnx4rQFssFovFYvkxdDqdj3/8471ez3XdPM9fffXVq6sr+I57vR6cmI1GYzgcLpfLbre72WwuLy+R8aA0PruuK4SIoiiKImMMpOder0cphR7KGIPShwgYcmT5kXMOyc8UyqnWGgomAmiI4K7rskLTpIWyPBwOGWNHR0e8yBydZRlUaTTMGAMRvMyZYIyp1+tCiOVySa6lA0Z7oCMvFovBYDCfz7XWSZIQQqSUWZaV+jLnPE3TVqullPI8L03Tco++Z8+ewSouhJhMJvfu3QvDkFK62+1arRYh5HrzsFSApoxqcYCMzziDznLO33vvvTt37mD00HJK6cOHD2/fvk0pNcYkSVKtVvU1ZZ8QUo6t1jrPcynl0dHRbDYbj8evvvqqUmo+nzcajTiOh8NhtVqN4xj3LffrW61WaNjTp0/xqCCViu/7juNAj3ZdtzxDCzs5BFwsddAYUtiNP9A8Smm9Xk/TNAgCQkir1To7O4OyzxjLsiyKIiy3JpNJWEApRfKKIAgwQViYXX9OTOG1N8YIIbTWeJAw42WT0JIsy1zXpZRyzvM8b7fbpJgv7J9JKY3jWAix3+/x8AdBMJ1Oyy0iMcUQ6F3XbTaby+Vys9lIKdM0RbXz+bxer69Wq/1+j2TQ7Xa7UqlMp9P5fO667g9+8IN/+7d/s3sMWiwWi8Xy0URrzYtNOBD4KaUQ1iLCIYTgV1lKKQR+CNXINb8zqroedOGv4zj7/V4p9Vu/9Vt//ud//u6775b3tVh+eqwAbbFYLBaL5YO8/vrr9+/fbzQanucZYxzHgezbbrd3u912u+33+0KIzWYzGAyiKFqv17Vardvtep63XC6RFYEQMhwOhRCDwYD9qL+YUnp5eWmM6ff7kJXH47ExZjAYcM6vrq4IIfhojBmNRoQQfFXuHEiLBNAQgllhdoZKyBi7ceMGY4wxBocpBEfE2RAcoS3ijJTyerVlEG+MUUpBbcwLpJStVgtpFtrttuu6CPF5seUgIcT3fSmlMQbnPc/DpjG1Wk1KudlslFKTyaTVai2XSyFEr9dD458+fQrLsOd5KDCbzbIsazQa6CNuCv0X6wqcxwFjTBc5i3EspZRSNptN5AxBT40x3//+91977TUp5Wq1iqIINZeDQIt8Hc1mczweoxeO40ynU8dxcHf8xUGpO+PZQOORLhzT5LouZp8QopSCpx4NLm9aDjulNM9z13U9z6NFnpP9fo/yQRAEQYBfmwZBkCQJllVRFGmtsSSjRYIXHJdNLW+K+2qtMZiYrM1mo7VGI9EYTL1SSgghi5TlSZJUKhVCiBACOy7iWtzL9/3NZoMXFd1ud7/fx3E8nU7hcMcTOx6P4ziu1+vYHfHs7Kzdbt+/f38+n2dZ5jgOcnAPBoOXL1+i5U+ePHnnnXdevnyJ4bJYLBaLxfIRJAgCRIwIDzjnSilVZAkDiFcRkSIgRAhUBlofrLQQoFGbMeaXfumX3nvvvaurq8Vi8cGiFstPihWgLRaLxWKx/JCPf/zjh4eH9XodAetut7t7965SKsuy+/fvY6fBWq2GbADtdvvZs2dwdCql4EemlG42G1ZooEqpFy9eQFU0xkDxJIRAwtvtdoQQSik+Iucy7NLGGCTfgKF1vV4bY3zfL+PmUkZEDZxzxhgEVs65UySO6Ha7uDVKAnzURdrfD3yri53ilFJ5nq/XawjEQgh81FoLIXa7HWPMGKO1VkpBxMT58Xj84MEDKSVEW6QJVkrN5/NWq1WpVBhjaZrmeR7H8Xa7ZYy5ruv7vhDCcRzf97XW+/0eWqfWGgUAIQSK6uPHj09OTjAOprCTP3r06OTkBAUw4PV6Hd2hlD58+LDX64VhWNqZGfv3TQixmIFoC1arleM4juMsFoter+cUGx46jvPs2TMccM4hl6PZm80GSvRqtarX667r/uAHP7h//34cx/v9vtPpwC2OtmGC8JzcuHED2V0wcVLK/X6vteacDwaD5XJZqVTwdD19+lQVir9SKo7j8oeoWuvlchnHMZ4H9IhS6rqu7/uO46AMOo5RBWgGZh9jZYzJsmy/31NK0Z52u00pFUJ4nodUGHAJwWTtuu5iscAmjZRSXWSFhk3e933sSTgej7vd7uXlJczO2+2WEPL6669fXFxMp9M4jk9PTzudjpQSWnOe58+fP//KV76CtlksFovFYvkoMxgM4jj2PA8BDCJSvCYnhOjiTT+iLMaY4zgQrBGIqmJbQl1sd0EpRQyJv4h5lFKf+9znzs/Pv/GNb3ygARbLT4wVoC0Wi8Visfw7b7zxRrfbRQKBO3fuSCmxZ12/35dSjsdjCIi73e7w8DAIgtVqdefOHeiP8/m82Wy6xW8AEfgiih0OhxDv8jyHgbTdbkOCVEqRYq88rTXiZgTBEBnL8LqsEN8SQiiluJ0q8jmgJEJq3K78Ci1BLl0Il6WxmhACY7Ux5uDgQCk1HA6NMf1+3xiDKDzPc0opDiCkos1pmna7XfRrtVq1220kIfE8D3fHkkAIATVzt9v1+32llOd5WANgVfDkyZN79+5B9iWEOI7DirTFOIkCkINp8WtKzjmOWbGXYHlsjMEudoSQ1Wo1Go2iKMrzPE1TJH/AmGDQlFK1Wk0VuU1QFcCwo1rO+W63q1arURRNJhNcMp/P8QbiyZMnBwcH0GF931+v127hQUZHUInjOJj0EkopLgmCAK2ilBpj0Fo0JkkS13U9zyOEwH6eZRkhpFqtrlYrXMUYE0Jgq0xcUj4M2JAHY1vmLteFEo02UEoJIUIIiMiMsfL5wTOGpxqzmed5nuebzeb6TKEkbopcLoyx3W5njPE8L8sydHy9XjcajfV67TjO8fHx1dUVTPrGmPPz8+Pj45cvX0opsyx7/PjxP//zP+MljcVisVgslo84jLFOp4NQU0qJyCRNUyEEQmhCCEIRRFz467oughxKKaIUBE4oXFaO4Md1XYS1vV7vjTfesAK05UPECtAWi8VisVjI/fv379y5EwRBpVKJokhKeXFx8corr1BKscdgo9E4ODg4PT2t1+txHD958gRmVWyTzYrt4IQQi8UC6p4Qot1uG2PiOIYMZ4pMyqenpx/72MfKfBqkEJ3JtVAYB1rrwWBwdXWltc7zHBvZjUYjyLKkCJfLS3RhXs7zHB8hC+LbbrdLCIGqiCwQ+KrX6yFwRzTfarXyPF+v14PBwPM8IQScv8aY4XCYZVm/3/d9H82DrqqL5NQ4b4zhnCMPCaJ5NEkIQSkt1Wde6Mue5zmFGP3o0aN79+6Vii0KoAZcct34TAr5/smTJ7dv30aTKpWKlFJKORqN4HReLpfwlTcaDTQJA57neRAEyDustT47Ozs+PsbsK6XiOB6Px06xl3qapr7vlzOOBi8Wi8PDQ9d1kYPC9/2Liwuk1f7ud797eHiIBClY/+BAKTUejzH+hJDhcOh5HgawnE2MvO/7XpEEZr/fu65LKU2SJIoipDTB5IZhiGFHec/zMMj4iJldLpdhGGqtPc/jP+q/xjGltNvt4n0DIUQpBdFZa51lWb1eT5KEUoqcG2in7/vo9WazQTvL8njlMBgMer0eJG8p5Ww2a7fbFxcXhJAoirC9z61bt66urvD24uLiYj6fJ0ny+PHjf/qnf8L4WCwWi8VisRBCEHj7vm+MwSbMhBC8cafFRoIIe2ixJTJCLyklziNQMcYg3MKZsv4ytkHw9ulPf7r8ymL56bECtMVisVgsH3XefPNNpOvFPnLNZpMQAq2t3W6fnp4eHR0lSXJ1ddXpdKrV6unpabfbhYI5mUwgj3LOUdtgMNBap2mqCgszIQSqHPToPM+zLNtut9VqtQyUCSE4hoqHM1rrPM8dx7lx44YQQggxm81Qp1IKojYuRDyNZuNC1ICPKAaPapZlpnD1msKCjUZCcEQzICAiKbBSKs9zxPeNRiPLMsTxeZ5LKc/PzweDAYyxZ2dn3W5XCLHf75GFA1rzbDaDuGmMOT8/73Q6aZru93spJcrnec4Yc13XcRyncKxwzieTSbPZZIyhj9A9AXTnsvGtVivLMkjP6Cx0Z+w9mOd5GIb1eh2ZTDALQRDUarXlcnl1dRUEAQZNKQX99ODgYDabTSaTV155pVKpZFnW6XSm0ynnHI3ELTD1eAaCIMDtYHifzWbYEvDhw4f37993HGcymXQ6HbT/6urKGIMCmDJKKTRoIIRASUJInufVatX3fc55pVLZ7Xac8zAMKaVwSWMcdrsdBtkYU6lUeOG5Ho1G5QsDFEYZHJQPqlIKM8uK9NndbtcYk2VZkiSlPL3dbtM0TdN0tVq5137TioMsy1arFTaKxBSv1+s8zznnUPBv3ryJpDTdbvfs7MzzvCAITk9P1+v1fr//kz/5E7TQYrFYLBaL5Tqf//znK5WKKbbRdl1XFN5nhEyUUoSRCGbca5uglBGvUqo8T65Fzoi+IFsjFKxUKp///Of/7u/+7kdbYbH8hFgB2mKxWCyWjzSf+cxnut1uHMfVajVN0+PjY8YY51xKCY0yz3PXdbfbbbPZrFQqy+Xy4ODA9/3xeIxt67TWg8FgNpshlq3X66QQ+BATQwCVUp6dnX384x8XQqxWq/Pz8wcPHvAi84YpvBjQ+BANQ3Q+Pz+Hc9kp9kLEt4ibybXsvaQQo9ESxOJoCb4qCyPIhsiY53m/3/9AIC6ESNMUcmqWZZTS+XyOZlBKF4tFp9OhhRzMOQ+CAHXCCet5HiyuWuv9fr/dbnu9nu/7iP6h1UopkczE8zzHcR4+fAjjs+M4hBCIp5xztLPVakGMhuxbdufi4uLWrVt54dfebDac89FoVKvVUAO6j9HgnBtjdrtdGIZxHF9dXcVxbIyZz+eHh4foOAZhs9kcHBzgLrjjer3udruO4zx79gwH9NrmfrAbr1ar/X6fpulyuQyCIAgCz/PCMFytVlBsN5tNud0iRFvcEX3EeEop0UGI8jjZbDYXiwWllHMORd73fbTBLfzjhBDP83AMOXg+n0NYd10XNwJ43jAmePxwptSR8RGed6VUnufNZjPLMryTwNIOHUeBbreL/QPn83kYhrdu3cI2g9VqdTabvfLKK+fn577vV6vVFy9ecM4xTVrrKIqePHmilJrNZn/5l39JLBaLxWKxWH4cjLHf+I3fQGCT5zkhxPf9NE2NMYhJEDVxzh3HSdMU4VkZ8CD+QcRFil+A4YAUvw7EtZ7npWma5zml9Dd/8zf//u//HpdYLD8lVoC2WCwWi+UjShzHb7/9NsREY8xgMEjTdDgc3rp1CwbY09NTQkie57vdDgrjYrGAwIestf1+H1VRSpHaOMuyNE3b7XYZ6SJmhQDteR52IKxWq1LKZ8+ewRONkoiDtdZKKbTHdd3hcJjnOXbAy/N8NBrBXYub4u8HYmjGGJREbKVojBmNRqiQEDIcDhljyP+gtUZ7cDnE6DRNkf3ZGIP0u1LKNE2zLIPnGn5YWImFENhlDueFEJ1Op8y8AcUZCj4ptkZUSnHOfd/PsgyyKdRMjBiWEOgm6q9UKq7rwvNb6p55nu/3+ziOV6sVFgycc1OozMvlEomMtdb7/b5Wq10f29FodPPmTUrpfD6PoggdDMMwiqLFYiGEiOMYq45yPDnni8Xi4ODAcZzy4MmTJzdu3IAkfXx8jHwdh4eHUsrtdntwcOB53vvvv398fJzn+Ww2azQalUrl/fff7/V6EHPb7Xa5pEFrsf4xxlBKO53OfD4nhGCU8BrA8zzGGLJhgDRNWWFgx+6OGI3xeByGIb5CL3AvLM9IIXyrYvdIjH+e5zgvhNjtdiiZpqnnefAye55XrVb3+/1qteKcYxGIv61Wq9lsYo/K9XqtlGKMLZdLFNvtdkKI+/fvD4fDTqfz8uXLNE33+/18Pv/Sl76ERlosFovFYrH8WNrtNqI72J/DMOScSynhBihfzFNKETO7rus4jpQSkY/neXmeIy4qo27EOThAUEQp9X0fQVGWZYPBoNFoIB6zWH5KrABtsVgsFstHjk6n89prr52cnFQqFa217/vdbnc6nR4dHfm+j1/zPX369ODgIE3Tg4OD3W7XbDbPzs5gPhVCwGDrOI7WGlv2DQYDRLfj8VhrzQsTqxCi0WhAb2WMbTabdrvtOI4QghACZRZxsFIKx5xzaNCMscFgIIRYLBZaa601BFBYU1E/FENy7SeE5fF4PCaFuEwpxa6DlFJjzHA4hAbd6/WEEJvNxhRyuTFGCAHhOE1T7KzIGHNdd7PZdLtddDxJEmzJiON2uw2f+LNnzxqNhhAiz/PJZIKtBR3HefTo0auvvoqVwHg8brVanHPGGGJ9WuyUCP10uVyWqSpwBqsLLBuyLLu4uLh79y7ETVLYpd97772TIjc0uqOU6na72+0W3x4dHXHOoVljrJIkCYIgDENjjJQyy7JqtbpYLJRSQRDEcQxRHi8M0LxGo6GUwu5//BqUUq01xO7lcun7fhiGk8nE932Yow8ODjA7ruuieegOwDThbzkmQgissgghvu8HQeC6LqXU9/39fo/2VKtVzrnneYSQ0WgEa7lSCisxlEe1JUopPH7lTXELY0y326WU4pUDHkIhBGaQUhoEwWq1Qh7twWCAFBye583nc8jQu92u0+ms1+t+v7/ZbIQQt2/fHg6HURRNp1PGmOd5lUrl3XffrdVq0+n0a1/72uXl5fW2WSwWi8VisfxHarVaHMekCJvhblbFjtae5yGoRnTkuq7rupxzXcTknuch9EJc9IHQCJEhQNiDCLBerz948OAf//Efr5W1WH5CrABtsVgsFstHizt37ty9exeJdBljUJyHw+Ht27fX63UURZDPqtVqs9mEX5hzjgQUURTN53NzbQM3Simy0W02G3ys1+tCiCzLIPNJKefzeaPRaLVa0+lUCEEphRlZKbVYLErJjxBijGGM4cLz83OYnR3HUUp1Oh1SJOVIkqRsAy82lCsjaTQD9bfb7aLf/w6lNM9zKSU8xbhKa4198+DghgIrpRRCTCaTBw8ewBqcZVm/33ddF+E+59z3/TL6h9/W930I0NvtVimFNQA80fv9Xgix2+2gwnPOGWOPHj165ZVXUBshxHEcyKaMMRw/fPjw1VdfJYVPGb1YLpeYF1xVHmNdgT5iM8nhcFitVjE+SZLEcayU0lqnaer7PrJCY8yVUrVaDauacliw9sAWhcjXvNlsGo3Ger3G7fA3y7IoirTWaLxXbAAIybhcF+F3o06xq+F0OsW0lrcrl0Zaa0opcp6gwGw2k1JyzvG0xHEcBAGllDG2WCzwHgJucYjRhJDJZIJKjDHY9rC8ESsM0bipufb6gRRe+FarJaVEBpUwDIUQ2MkwyzJM5WQyQcbzLMsajcb5+bnrur7vz2azOI4ppePxmDHGGNtut/1+fzQavXjxIkmS7Xb7x3/8x2ikxWKxWCwWy/8VhFKIWIwxnucJIQghCEEdx2HFe31KKURkfIsziCqNMQilrlVMSJHYDTU7jhMEwXa7JYQIIX73d3/XCtCWDwUrQFssFovF8hHi3r17d+/eDcOw1WrVarUsy27fvn1xcQG92PM8KLZhGCqlVqsVBETOeRRFYRg+fvw4jmPf9yFiGmMglUKuRVpkYwwMpP1+Hwd5niulNpsNfi242+2m06lSCv5oSulgMBgOh1LKLMsePHigtY6iCNo3Y0xKqZS6uLhA/ZBcSeFl1tc2izOFEk0pRf3I1cA5R0lyLQOD1hopINI05ZyXuTigO3c6nTzPsyyDQF+mAEZf0jSF1xVitFJqNpuViTtmsxkM0a7rksJygoCeECKEcF0XKwGUYYVR5fHjxxCj2Y9uOQhtVGs9HA5hQnccB/3inD969Oi68VkpFQRBrVbb7/ec8+VyGUVRkiRhGCKpRZ7nQRCUsrIpNiSs1+uLxWI8Hvu+zzmfTCae56F+2NsfP34M/Z0VejelVAiBxwk+a15sSDidToMg8DzvO9/5zuHhoRDi7Oys1WolSTIajR48eIBnhnOuikzQJeUklvNljIEojG622+0y/7gxBiZrlJxOp/v9HnPtuq7WmnM+Go3KdRfqhEhdvswASiljjNYaTyb+JkkSRRHKY2PDcjqazaaUklIKNX8wGLx8+bJer3e7XWw1iSlrtVpI4ZKm6Wg0+vKXv7zf7/EMWywWi8VisfxnuHv3LudcCKGLDQOzIjsZYjNSvNEnxW/jyDVrM6JHVWym8oFwC9DCAYCfmjHGlFInJyd4iV4Ws1h+Mn74y0eLxWKxWCw/x0RR9Ku/+qt3794NguD4+LherydJcuPGjRcvXhweHtbr9SAIms3m1dVVo9HY7/dIztDpdC4uLiDnXV1ddTqder1eq9VqtRoUOnyM4zhJktVq5fs+VD+lVJZlxhiExfDAhmEYBEEQBOv1Gge+7+92u/l8jgKu604mk+l0ulgsXNctUwA7juN53mw2g/A6mUwuLi6iKKpUKnEcp2kqhICAiFZBFkSETQpxE02q1+vVajWKoiiKdrvdfr+HSr7f79E8WHdxRzRpNptB4gyCAFmwwzCMoujq6grC4m63gyMY2jGCflz75MkTaLWO48xmMxygmxBqsWbAScdxUMBxnPfeew9iK9YPqHOxWKBfOIny5UelVKVSqdfrjuOMx2OsIoQQURS1Wi3GWLkswXoDg6O1NsagPKV0Op1GURTH8WKxgNGYUoq77HY7tO3p06e43fPnz5vNJm6HLrz77rto1Xq9DsOwWq3W6/VOpwNBvBxGVowSpqYEc5QkCeYRoq0QQinV6XSEEBjtzWaDeqrVaq1Ww7M3n8+xB2CtVmu1WnjFAjd6vV73fR8TUd4a46C1hjPaGINXCLh1q9XK8xzKchRFQojVasWKlwHdbldrjXcVSZIcHx8vl8vlctlsNvFyRQhxcXGhlIrjeDKZMMbW6/VXvvKVv/iLv7Dqs8VisVgslv8qn/70pymlUkopJc5kWcYY830fUQ25JkAjxvsAnPMy/EPJsjwov/I8j3PuFobru3fvXi9msfxkWAe0xWKxWCw//7z66qtHR0eDwaBSqSilrq6uTk5OCCHz+bzZbAohbt269fz5806nc3BwcH5+Xq1W4zh2XXc6nfZ6vSAIptNpmqYQNCGSVqtVYwx+oKe1hp47mUyUUpAO5/M5ckeMRiOYkV9//fVSVWw2m1priMVaa6iWxhhkeCCEoCTqJ0WqDfgyIA4i2bQxBhpxr9ejlKLOPM8Hg8FoNMIZpHtGrmohBCEEUqYQAiZo6I8QKI0xEFsppfDAQvTUWiMbw2Aw8H0fIbvjOFpr6MgQajE4UDkdxwmCYD6fQ1fd7XbdbhdC82q16vV6OH7vvffu3buHESA/mk8Df7XW12VTxhhj7N13371z5w6WCsitgQUJii0WCyQKJEVOEpTE39FohMQp77zzzuuvv66UglEaQ0EIkVJmWdZoNJBmGlc1m000DFJs2UecgcaNjxgQzjlEfMdxfN+H0xxLGrzMYIyNRqMy+Um9Xsfd0VpMynQ6Ldc/QRDgckrpYrEwxmCo6/U62sOKsSLFWweMAPw+MDsnSZJlGVJ/lP0lhEBT1loLIcpM5YQQ+MTjOOac+75PKcULCa01pTSOY+TZWK1WsIc/e/ZMSnnz5s3Hjx8zxvI83+/33/zmN7/3ve/hRhaLxWKxWCz/VeAJUEoppTzPU0oZYxBulWUQ/CAURPBWnqfXHAzXQSCECKo8oJT6vi+lVEpRSj/zmc/YLByWnx4rQFssFovF8nPOm2++2Wg0KpVKmqaMsUajUa/XPc8bDAZpmlarVeiPt27diqLo+fPnN27cCILAdd2zszMEu6PRyPd97JtHih/xlRIhVDzoyOv1utFoMMY452maImE0BETXda/L0+Px+PXXX9daX11dCSFeffVVaH9lsItLNptNq9UihEAvPjs7Q15gzrlSCqIhFGckoUaXHceB7RTBNNoJ2Xq5XCqlIDVC9zTGQAe/uLjo9XplC5FGA0F8GIawnIRhiOQPeZ4jy3O320WDX7x4gRQT0NA9zwuCgFIaRREGx72WbQMDyDnnhYbrOI7jOOPxuNFo4CO66TjO48eP79y5g2WD1hoHrNigvMyA4TgO9iEkxSuBPM+xUDHGKKVWqxVOwkldqVSEEKz4JeZ+v8e3GLF6vU4ppZRKKdGF2WxWPjCMMcdx4PsGZXdYIaBPJpM4jjEX1WrVcZzvf//7kNrL3N+4tdYaQ2SMoZRi1gBm3xhDCJlOp3gqKKXVahU3QiUYEEIIrOWMMbw8QNoW1Il5N8b0+308ALqwP7fb7d1uh3txznFHrXWSJLXa/8femfVYktxlPyL37ZzMs1dVV+9d3dUztsfj8dhGQrIRQiDkG9+AuUDmghZX3IwAACAASURBVE/Ch+ATICFhCwkEN2AhY4M0srDHM73vXevZtzwn94jI9+J5M6kZ27y2X4w9Jp6Lo1wiI2PJkv75y3890cRig5vNpl6FcrVapWmKCieTieM4t27devbsme/7t27dmkwmw+Gw3W5/8MEHcRyfnJw8ePCg7pGUlJSUlJSU1M8rxD+IZzRNQ9qBYRiIfyBEUAjGSMWd6w3ES4ipamEXxcgFAG0YRhiGnHNCyJe+9CUkmly8UErq55UE0FJSUlJSUr+xunv37s7Ojqqq3W63LMsrV64UReG6LqU0jmMgvDAMYYwwn8/jOG61WkVRTKdT0zQbjQYyLODqYJomIeThw4dlWX7qU5+qGR8wH+BsURRCCK1yMQ7DsN1uA7ASQsIwrBffI4SAR+u6LoS4f//+G2+8MZvNQJMPDg7KsiyKYjgcgi0i4C7LErtJkjDGgNRJ9U+FpHKvY4zBzBrUeLvdIrwGXG6322VZouXr9RpIUVXVLMtAnMuyBFPWNI1zbhjGfD6HzbSiKHEcd7vdJEkMw0DeK/JhhRC6rsN54+nTp7dv39Z1nXO+WCzAsoFi6wFB/RClVFVVTdNWqxUKKIqCgxguvDZQSsfjse/76IjrupzzMAwxAihZlmXdflwIW2chxHw+xxwBr69WK1qZZed53m63sZqfaZqgrqiKEFKWJUay3W6v12sMNee81WrVLUd3MHrYWC6XzWZTVVV4U9TFMIMYTEqpqNKQh8MhctVxXxzEnGIbqfE4Mp/PkRCNxqBaznn9j6iqqlqWFccx5xz9RT2YKbQfG2VZFkWBVyxRfc/gnKdputlsttvt/v7+ZrPpdrvI5Q+CAGz95OQkz/Pbt28fHR11u92bN28+ffo0z3PTNNfr9evXrx89evT06VO0WUpKSkpKSkrqFxbCG1H9V5wQQlXVOhAqyxIhGWIbciF8wna9AdWXoE5EQRcLa5oG5M0555x/4xvf+Mu//Mu6jJTULyAJoKWkpKSkpH4D5XneO++84/v+zs6Ooii7u7uKomRZtru7O51Ofd+/dOnSyclJs9ns9Xrgnp1OB+BsMpnous4Yi+P4IuFVVZVS2uv1iqJ4/vw5VmADu8TKeMDHQHJCCMbYcDjEVbTijICkhBAhRO3bgDbD4hmIcDqdlmXJOVcUZTQa3bp1C87RnPOTk5M333wT/xgoqmVYWLVyIKkoc5IktIKSoKtlWeZ5DjDa6/VAPxVFAWnN8zzLMrQEtU2nU/QRpwaDARAq6DBAMxJPcBA9RbyOMjiOU7qu67pe825N0548eXLr1i28RdQjU7cKBa5fv45dvEJwzlVVjaKo2WzitQHE/NGjRzdv3sRQ5HkOOw68gVBKZ7PZwcFBURRhGJYVwcfltOL1F+0vptOpbdu0Itc4gp4+fvx4d3eXcw4Kj6fF8zw0Gw/JxUUR6+7grHLhWaobMBqN8LUAU885R0tqOlw3DG9HmqZRShuNBoYRU1ZLVVVCSP0eRas3K9TGq+R3WsHosizxZsUYw5OT53mj0ZjP581mU9M0wzC22y28aBRFmc/n7XY7jmMhRKPRSJJkPB5fv3795cuXzWaz3+8/e/YsDMPxePzee+9daJeUlJSUlJSU1C+uOnZCYCku+Nch2iGEIMDDNqKsi9sXSxJCEBOi/MVT2FBVFTkiCJO+/OUvf+tb35pMJvXlUlI/rz4etUtJSUlJSUl9ovXuu+/u7u72+30gxd3dXcZYkiRBEMCGeG9vz/O84+PjbrfbaDTCMFyv15qmcc5HoxGsNkhljwvGV5Yl0mbhXZBlGa3QJ5AieLSu67jw0aNH7XYbmG84HB4cHBBCYJGBVGIA7izLgiAQQoDwpmna7/dBALMsQ1Z1nud5nsN7QVVVxpiiKKPRCMSwKAr4IGuattlssixTFEUIgVMw68jzXFGUi54PmqZFUaRUbDrP8263axiGaZppmuK+WZYZhhEEAWNsu90KISaTSafTWS6XeZ4joxnp4ZvNRgiRpul2u0XyMqilcgFMk4q6YhvH6/heUZRHjx4dHBxcLIDLgW6FEI7j4CUhDMOrV6/C+wLvDEqVCJPnuWVZzWZzs9kQQs7Pz2GpURQFrSgwioFfc86zLHMcB3nNk8nEsiw8RWjbZDI5PDxkjIG6+r6PpGlVVeExrarqarVCmvPTp0+vXr2qKAoeCbSKUlqWJfgyjqMlqqo+ePDgzp07KF+WJWMMI4+ZxQOAxqCbeCDRMFoxccMwWLUUDyEEU4N7GYZRVhJCoAZVVR3HieM4jmNgetwOd+ecp2m6Wq329/cbjQZyves/hPl87nneaDRSVbUoCuQctVqt58+fa5rW7/cfPnwohDg7O/uXf/mXuklSUlJSUlJSUv//QrCnV4tX53mODGUEOaSCyAi3SPUhX6ksNeqgERtlWdaBWR1c4RSK4S4IrRljrut++ctf/uY3v4kCUlK/gCSAlpKSkpKS+sTLdd3PfOYzg8Hg0qVLRVHs7+9HUeQ4jmEYy+Wy3W53u904jlVVbbVaoM+tVst13eVyads2LKFhmtzr9UzTHI1GcDq+ffs2pfT8/Bzgcrlc7u3tTafTyWSSZdlnP/tZBL6ist1AXFuWJZImwP6ePHkCMlsUBYhtr9dbLpdCiIcPHx4eHpIKLwIWl2VZFMXR0RHWFVQUpSiKTqcDPpjneZZl/X4/TdOiKMCFy7IMgiCOY8ZYv99frVaohFIKylwURZqmlFLAzTzPkdCK+yKaRzOATZHuMZvNer0erJyTJCmKotFoRFH07NmzO3fuANfCpZpzbhhG7byhquqjR48ODw8VRVFVFY2sYTQO4kbYwDaOP3r06NatWyiZJEmj0RBCTCaT2poDNSiK8ujRo+vXr2PcMI/L5ZJXyb/L5dKyrDzPYZQB3AzXjsVisdlsgKfH4zGWH4Qrt+/7cO62LKvRaOBCxthgMFgul+DmnHM0FW0WQsCkBS8w+ICBobYsq9Vqwcii2WxSSkejEdylYWkihAAENwwjTdMoirrdblmWcJ1GR/BEoRmYLM45iLxWpczjsUGOuaIonPMkSeDvTAjBcRysc+HzPEcbUHOe59vt1nVd/B+A4zh4NgCpO53O2dmZqqq7u7vn5+c7OztPnz61bVtV1atXrz58+FDTtNVq9Xd/93dos5SUlJSUlJTUf6M0TWOMqapqWRbCHu3C/4EhBsNGvf2xUz/tyMXd+nJEeriREIJz/tZbb/393/99Vv2/oJTUzysJoKWkpKSkpD7B6vV6b7/9tud5V69ezbJM07QrV66cn58PBoNWqzWZTPr9frPZhNtGEARhGC4Wi0ajYdv206dPTdP0PG+xWCCZdGdnB1nMhmHAauPk5KTVamHdOSEEIWQ4HGqa1mq1oih6//33P/e5z+HUcrmEvzPnHICSUoos1HoXoBCIEAmkhJDJZCKEACMWQgwGA0VRwK8ppSCANY+u0WEURbgqTVM4S0DIZm2320VR1H4XlFJN09brdW3jm2XZ7u5uWZar1aqG0QCRGDRN07CLpiLy1jTNMIw8zwFb1Wr9QLVy2MBxTdPAoFGmZrXYhfMGri3LEpejQF1bTUXb7XaapoCbrVaLEIJhpFWGSxzHjuMURYFTlNJHjx5dvnyZc26apu/7hBCwflJ5XKAvs9ns8uXLiqIsl0vHcTjnGMCiKIqigL0GGq9pmlaB76Io6oxp1JbnOXKuAcEppZvNJkkSEOTxeNxsNjnni8UCKfl5nruuSwhJkqSatLIsSyDgTqeDFgoh0JgaE0PoCEYAXUbDygtZ0kKIoigGgwGlFP1CYc55EATb7Ra2MzBpEUJgnGFsslgsMD51nZ1OB52N4zjP81arJYRYLpdXrlx59OgRaDUh5Pvf//6jR4+IlJSUlJSUlNQvQbquIy61bZtzLoTQNI1USc0IdchHzTQgeiG1Gbv178Uj2IDIhWwJBF1Zlt2+fdu2bQmgpX5hSQAtJSUlJSX1yZPrup/+9KcPDg48zxNCIDv10qVLm80mjuObN28mSQIijKXSms0mLAWKonBd1zCMxWIBY2iAP0opeCjCSixUCPS53W5hhlCWJWMMjI8QwhjTNA0Zo2dnZ4wxgEXDMLbbbZ7n8DQAL4a9Rp7nyD5utVplWSKdOc9zpDPHcRxF0Wq1YpUb7+vXr9944w1N09brNcAlY0xV1TRNPc8ryzLPcxDh+l7gxQi+KaXL5RJEEhXmeQ6kC8wqhGi1Wmmanp6e9no9tA2tZYyhPf1+HxE/6LBhGGDN8/kcd4RZBIhzDaCxTapUZfBlEOqa6qqq+uzZs+vXr6NAWZZ1rI/RbrVa0+k0CAJcXtc2Go2azWaSJLZtd7vdMAxpRVoxR0DSwKaEEM55GIYYvadPn+7t7SmKAn8JQogQIo5jmHLUDbv41pHneRAEtFo8EMfRJEopTFTqFxs0YD6f37x5U1GU5XIJf3BardvOGMPscM6jKIKVM96jyrI8Pz+HVwzsU+oelRU6L8syyzIAd0VRyrLEtaiw2+3WxdA7VVUty2KM4dElhOzu7mIDxYqigNc56DnKr9drIQRjzDCM5XJZFAXw9Gq1ms/nvu8fHx9TSjudTpIk9+7d+7d/+zd0X0pKSkpKSkrqlyEEmaqqGoax2Wzq0OvHVUdB9QZEP0qif+JBbONCxHuMMUII55xS+tWvfvWv/uqv6sJSUj+XJICWkpKSkpL6JMl13S984QtBEOzu7iZJAtvcOI6vXLmyWCxg1BvHsaIovu9HUXR6emrbdlEUJycntm2Dso3HY9hK6LpOKd3Z2VErYDoajcqyxIJ7qqpi3T/GmKIojLHhcJjnOZbOY4ylafqd73znjTfewOVPnz71fZ9zrijKYrHI83xnZ6csy8lkAreHmhienp7CXgNcOI5jRLqapi0WC9j4MsZUVR2NRkVRIHrGBmOMcz4cDmHKAawMgxHg4+12W98IdBgbNV0lhCiKAjdqLLKHW2NwKKWGYSiKAqQbxzEhBO4QQJwoBjOHOI4559PptHbYgFcGRo9UFs9ov6IoiqLUx9WK4SqKIoQAay7LkjHWaDQURVmv18vlMggCTH2WZdvt1vO81Wq1t7fHOYfTCCp/+PBhv9+3LMt1XRyklTtKWZZlWaIZ6JHv+wCsSKDudDpw6Oac53mOs0KIPM89z6OUrlar8XhsmmZZlvP53LKssiyPjo4uX768Wq0mkwka/PDhw5qno7/1hvLRZQ/xAWB3dzfPcyHEycnJ1atXMWWNRsPzPM45ubBmIL4K9Ho9PF2maWJM0DUUEBcSw1VVXS6XhBDGWKfTwU2zLMMDz6olK9M0hVGJbdvIo18sFr1eDy4umqalaUoIaTabR0dHjUZjMBg8e/Zsu93atj0ej4+Pj7/zne9EUYTGSElJSUlJSUn9koRoSlEUTdNY9Tmf/BhBJhU+RoyEMnUM9rGSPy4UQPk6iMWFRVF87Wtf++u//msEaVJSP68kgJaSkpKSkvpk6Pr164eHhzBACIJAUZROpwPAKoRYrVbtdrvZbJ6fnwPhhWFo2/bNmzdN05zP5zs7O6ZpGoYB541Op2MYxmw2Q1hJKjcDsEWsNUcpRYY1/C7KsoTr3MuXLw8ODoqiyLIsyzK1cpMAgUWQKoSoHTDg5jGdTmFtASGiBZc8Pz8/PDzM89yyrDRNsfQfKi+KYjAYIBn56OgIfg44i6ZqmhaGYRRFg8FA13W0CqQS0BkJwkVRJEkCuo1TaZqqqlqWJeBmt9tVKug8n89h90Ep3Ww28IUoiiIMw8FgUHdBrZw3VqtVt9vFOCyXy263iwKPHz++c+cOxmQ0GmHu4BzNGGs2m4QQx3FQD5wrkIlMq6wTjA9Ised5mqbVec14CcFoFEWhaVoQBHDuFtVyNGmaIht6tVphzb2iKHq9Hj4PuK7b6XRAsTnnSZIARq9Wq+l0apqmEAIfD7Dx1ltvCSEWiwUQvFJlQ69Wq2azifZcfJywgdYyxpIk8TwPbQbmPj8/930/TdM0TcuyREk0RgiRpmmWZf1+Xwih6zqejaIo0jSFj/NkMkFuPu6FQSOEqKqKeczzHF1Gwjil1Pd9fK4IwzBN006nY9v2YrHApKAG/LEURcE59zxvOBwWRSGE2Gw24/E4CALO+fvvv396evr06VOMs5SUlJSUlJTUL1WMMcMwsMEYq6OsOuKqd7FRh1UXz5KPAuuL2xdL1md1XUeyBSGEMea67qVLl46Pjy8Wk5L6GSUBtJSUlJSU1K+73nrrrVu3bnme1+v1GGOqqvZ6vTAMdV3v9/uTycT3/atXr8K/ApYRq9XKNE3TNC3Lev78uWmaIHeapsFkOUmSPM9BP8kFZHkxbEVEi/TSPM/7/X6WZVEUJUmCdGBFUXRdf/78+dtvvw1enGVZp9MB41MUBf8hiBrSNF2v12CLRVG8fPny7t27cGTGkbIyPj4+Ph4MBkIIgGbcC20DbQeyHI1GyG6GaUNRFIyxPM9xCaU0qyw1LlpkYHE5gGOs75ckiRDi6dOnh4eHsHvWNA0xN+f8ImjWNI0QgiOgwwDNNfcEk6UfXVoQI8AY29nZieOYMfb69es7d+4AJeMqdHwwGGw2G03THjx4cOPGDQwLkpq73e5oNGq32+C8olof7/Xr15cvX87zHGYjtEoBZozBWeLatWsYuqIo8jwH4MYRVCKEwLeKIAhq3DydTt98800hBMg1IaTVaqGbeP3I8xwmHvVQCCGwDmQQBJqm5ZVlimmaeZ57npdlmWmaWZYxxjqdDtLeMcVA/JjK9XqNXcMwOOeYRCT7149cmqZCCNM0Z7NZv98nhFiWhcrxpKGwEAI3wlORpin+G2Cz2Xie57ruZrMxTdO27SRJXNedzWa+708mk6IoGo3G8fHxdrvd29s7Pj5uNBqvXr3KskwI8eTJk/fffx9/L1JSUlJSUlJS/wNilQ8eIl5d1+tTNTguyxJxoKgCe0R9Fzd+FtUVIvRFbZxzhOISQEv9YpIAWkpKSkpK6tdXX/nKVzqdTrvdJoT0er3VarW3t1eWZZqmwMGj0SgIAsdxXr16ZZrmpUuXbNvGthAiz/PZbIbs1BqDYvU/4DmQVkII8j1p5QQNbtvr9RRFyfMcaLgoihrLrtfrVqvFOc/zvM6MAEVN0xQYVKuWzkM0XJNZcFJVVSeTCVI5hBCTyaS2/WWMOY4D4GsYBpZARBdev37d6XQAmpMkWa1W6FSe58jv1jRNURRATNM0y7IsK8grhBBCILsZN0W2Mu6IXSFEURRxHCN3u6arqqqCNT98+PDu3bvYhdsGejoej+G8gUi9vop8FFhDOKJpmq7rDx48QJY0OqhUScRoLYSzSHLHfHHOoyhyHGe5XKK8WqX9cs6RklyWZZ0QnWUZfC3CMBRCoP46zRm3oJRiQUIMWn0jpE7DCplzjrlGO9FmIQSw9Wq1GgwGcADv9Xrb7VYI8fDhw2vXrqmqOp/P0ar5fI5vBpvNZnd3lxCyXq8xWVEUBUGAN6s0TYMgwECZplmnPyP9Hw8kVhpES9BTSulgMMAjkWWZqqo1+EbNSPR2HAcfKvD5ARfiCe/3+2dnZ/v7+5jHyWQSRdF0OrVtO4qiv/mbvyFSUlJSUlJSUv+zYowxxlRVTZKEc45/3qoDtlqiWneaUorA8mOn6qgJUhSlPl7XVtepqqqu61mW4RK8R9TXSkn9XJIAWkpKSkpK6tdOV69evXPnTrvdBgYNggDJm77vE0LiOG61WsPh0Pf9vb29KIoWiwWSfEejkWVZQRDAxZgQAjdnSul4PF6tVrdv3waiHY/HIG5Yk204HIIkAs4uFouiKBRF2dnZAUcuiqIsy52dHSEEKKSmabZt53lu2/b777//mc98RlEUcEY0G3Sy0WgQQupk5NoBA7v9fh/51HBFKIoCN8rznBACXDibzQ4PDwkhAJHNZhN00jCMLMvg5BvH8YsXL1qtFmMMmJIQ0uv1YCEthBgMBmh2nchc94IQ4rquqqqNRoMxFkWRruvj8bjf76NHgONaJRysf0GTV6sVeq1p2mQy6XQ6SJrGNi4khKCAesEGGtuoDXG/oiiU0jzPQd5h+0ApFUJsNhu4aYdh2O12l8slZhmXEEIIIZRSOEejqrLy60DlnHPk/xZFMZ/PDw4OAJcxTZxz27brLGmQ2fpNJo5jPIT1+wmMQTjnaD/6iNs9evTo6tWrdavqCtGv5XLZbDbRHkwZXquwm+d5FEXI98czgPxrjBU+IaiqqigKJhdwWQhRliWmtSiKLMu2222r1ULGdJIkhBAM6Xq9zrIMZtnoIO7LOc+yjDEWhuFmswmC4PHjx5i7siy/+93v3r9/H92RkpKSkpKSkvqfVJZltm0zxhC6IAKE6nDrohB6YaMWKDP56L884nhd+GJtiqIgcwW7Qojr16+/9957dQEpqZ9d//nISklJSUlJSf3K1ev1fvd3f/e3fuu3Dg8PL1++nGVZp9MBfW6321hX7dKlS0jdbbfbWZZxzoMg8H0/SZI0TVerFRDeeDyez+fLSrqut9vt4XB4cnJyfHxcFEWe50mSzOfzxWJhmqZpmpZlrdfr9XptGIZlWdvtdjqdLpdLJBdjnToAXFVV5/M5GKuqqnEcbzYb5KgWlWsEKCcsdMsqhzrLMsTNaAAsKSillNJ2u93v91utluu6r1+/xh3TNOWcp2mKS7IsOzo6Aq9M0xSw2LZtz/N83w+CoNVqNRoN3/dRfxAEANZ5nuMXleR5Dmw9nU51XTcMQ9f1ukeapq3Xa8BldJZSqmkaiqmqijKapqFMPQ74Xa/XoKWapi2Xy7rkvXv3QIprHIyAnldruTx48KAsS7Tc931FURRFWS6XGD1RfR4ghMznc9SjVHkrhJDz83PP8xqNBkAtqd4uKKWc89rvG2nRQK5oj6qqrFqQEGXKskzT1HVdmG5j0GBaXZblaDQCjO50OmhDTWYfPHgghEgrm+ayLIMgQGE8YJianZ0dQghjLIoi13XhBB2GoWVZcGT2fT8MQ03TTNNsNBogyLCf9n0/juPVaoWRB4nGrxACU4y+NBoNDF2SJKZp4oFcr9eEEGS+A2RrmtZqtfBeN5/Py7K8ffv2cDhEmj9j7OnTp9/73vckfZaSkpKSkpL6VQnBMAJg8lHojODt4hFSAej6VwiB7fpgvV3vIgL8WBnTNOvAknP+zjvvYFtK6ueVzICWkpKSkpL6tdCnP/3p69ev93o9y7La7fbJycne3t7+/n6SJLZtU0rn8/nu7m6j0Tg7O2u1Ws1mc71eF0XRaDRgxQBjAUqpoiibzcZxHFVVAS5rQkcI4ZzDyQFUEVmitPJ8yPMcPstZlhVFgYO4SghRZxkD89m2TQgpimK5XB4fH1++fBn3evz48cHBASGk1Wptt1v4M+jVOnu9Xs8wDMMw0jSFC3Mcx4SQ4+PjVqtVFAUGpCiKXq8Xx/F2u4VhNKV0tVplWWaaJhpMCInjuCzLLMviOOac9/t9wzAQZ2uaRqrsYCBgSqlaZSsDTYZheOnSJWwDyJqmmec5WDznPMuyKIoGg4FpmkVRmKapaRpGjzEG6AzRis7DZRt3RMiO+6qqimieUnp+ft5qtdBT3MX3fU3TRGWFPBqNagZdlmVRFEEQoDxeMNB9nM3z3LZtQHN0E6eEEEdHR5cvX+acz+fz27dvM8ZWqxWYMnKZOedRFHmeJ4RYr9c4gudqu93Wu81mczQa2bZdFEVRFFeuXFmv1/fv37969WpZlowxy7KyLEuSBFnGq9Wq2+1iGAeDwXq9vvhWA95tWVaz2dxut1EUgWXHcdzpdFAJMtzjOG42m3iSMaGqqgoh8JzXo6EoCrsgzjkmNEkS13Uty0Kvfd9XVRWGHpvNRlGUPM+FEHmer1Yr13V7vd7R0VGv17t06dIHH3yQ5/nx8fG///u/o9lSUlJSUlJSUr8S/ehHP/qDP/iDshK54LNRx1c4+J/XXMhoxkZdvq6BfNR8oz5YC7FunueEEHEhGUJK6ueVzICWkpKSkpL6Fevu3btf+9rXPvvZz16+fFnTNN/31+v17du3kcs8GAxs2zYMo9vtFkXx5MkTIURRFCcnJ0DAmqaNRiNS/ZecYRiapsE5utVqgRcjEbjZbAIRhmFomqbjOKh5tVoBoZZlCRjKGAM8FUJ0Op1Op4Pk4hqnIkgdj8eLxQL5rcvlst1uN5tNz/McxwFrLopCUZQHDx6QKvliNpsVRQE4yBg7PT3N8xynGGONRqPZbNq2bdv269evoyhCvAu8yDn3PM80TUVR0H7XdbfbbZ1dmyQJkqaxoaoqRsMwjNlspus6UrkNw9B1HWnayL0FZcYCgEiVxSJ1ruui/HQ6xVW6rj969Ag167o+n8+1KkX69PTU9/1Op0MpBd6FiQraj7lGTI8RgJ8G1Ol0ROXUDC/pxWKBQc7zvNFo9Ho9Sum9e/cIIZzzZrMZx7Hruo7jIFEX3Ll+Z8jz3HVdGGvUw4tr0zT1PA8PBoYRJuOA10IIIUSj0RiPx7hqvV77vk8IWS6XqK1+AAghjDFMgRCCVybUUF1SCIGSSZJYloVZaDQawL7A0DhlWdZyuXRdN47j+XyOiQ7DkFKK6VutVrTKRp/NZmrlrI0nqigKfD4pioIxhj4yxsD3gyDAXWazGcpjwJMkiaJIVdXVaoXPPC9fvnz27FmWZd/5znckfZaSkpKSkpL6lesHP/gB4knEdUIIBFrlBeEsjuOqiwfJhbzp+ggO1tsfO0UIQdBbR3SNRsOyrPqslNTPLgmgpaSkpKSkfmW6fv36H/7hH37qU5+6evWqqqogvIZhgJHt7+/3+/3pdFqWZbPZPDs7K4pid3e32+2C1QL16rquqmqn0/F9H/nRw+Hw9PQ0DMPXr1+fnp4WRXF8fLzZbJAW7bou3AyA9mr6BtIahuFyuVQUpc7GPTk5qSkwpbTdbsP9A2nU/X6/3+8DcMOjTEkTcAAAIABJREFUAMXG43G32+12u61WCw0mhLRaLc/zXrx4YVmW4zhxHAMI2raNhtX3QijcarWCIHCrDNbtdot/PxwOhzVA3G63hmG4rgtqjDRw0zRt23716hUuQRr1arUKw3C73cIiwzAMkOv5fA5+bVnW48ePQZl1XV8sFmq19iBAJ4ZI0zS9suAAegZuFtXifhhwWllkcM7zPIdNiuM4zWYTid6woaCUYtzwIoFKsOs4Dkgx5xwsOEkSNDtN0yAIAHkXi0VZmUeX1RKOSZLgVQHTgXnB60otXF6/zJRlWRTFq1evms0mIWQymdi27brucrms++U4ju/7mB0hBFg2mLsQglKK1uK9pSiKNE3xcQJPted5gO95nud5nmUZGDHmhVd52SjfaDSiKMLspGm6Wq0w0XEcwxam2WwiZRvoGdAZ7DsIgizLUGGe5/jGkOf5fD5njLmuWxRFEAT4VrG/v7/ZbAzDiOM4DEMYqb948eLb3/72cDj8v3+rUlJSUlJSUlK/OnHOVVVFwIYorj6FuAvx3sUNHMcRhGrYRiU4dbH8xe26TFmWhmHg1ogGv/GNb9TlpaR+dkkLDikpKSkpqf9pua77xhtv7O7uGoZx+fLl1Wpl27aiKFEU7e/vr1arVqsFo9vtdru7u+u67uvXr2sUi2ReQkgQBI8fPzZNU9O02WymaZqiKDC3JYQoitLpdAghjLE8z7fbLdJsi6JQFGW73eq6bllWURSEkGfPngEFGoax2WyQb1unl5ZlWced9+7du3nzphCi5r+cc0VRTNNcLBagmaDDMMcANt3Z2dF1HXWapglQiDTe2Ww2GAwIIYvFAnnHWZYRQiilqIEQUpbleDw+ODhI0xQrFq5WKwBH3AL9yrLs+fPnd+/eJYToug48yhgDMm42m5zzOI6FEJPJBAsq6rqOAqZpZlkGmq/rOkaJVq4ay+USyzkCSaMjAKmNRkOtfJ8x7CiDccvzfDKZ3L17F6h0OBwC3CP6xy0Q69cHMbboGh4YmDujmzhIKRXV+jO4HONgWRYy6OfzeaPRwCnGmOd5aZpaluW6LmpGF8bjseu6ZVneu3fv7t27nufVLhyoH7eIosi2bc/zcHchRBiGtm3XELwoCtM0cXyz2QCR27bNOV8ul3h4CCH379+/cuUK5/z09BSP37Nnzw4ODoIgWC6XWPFyuVx2Oh08/K7rYjAxsGgMNjDaeNLqByCOYxi8bLdbIOk6nR9DhIPtdnu1WgkhkC9vmqau65vNJgiC8Xj88uXL0Wj0/e9/H4MvJSUlJSUlJfUrF+dc07Q8zxENimoJEPJj/51Wb9TCESEEoruyLHHJxbM/rroMwmPcuiiKr3zlK//0T/90dHR0sQ1SUv9PSQAtJSUlJSX1P6e33nprf38fHgvdbjeOY8uydnd3t9vtlStX4EiAPFlQaWw/f/7ctm1VVRljZ2dnpmmCyi0Wi93dXXphUTtSJSAjNsUpIYSmaVmW1eSUECKEqDkpIaQsS1VVaZUT8fTp09u3b6Mqzvnx8fHh4aGiKMhjre9CKZ1MJjdv3tQ0rayciMEHbdtGVYyxPM9/+MMf3rhxQwiR53kcx5qmgd4yxrbbLWwTfN8HFtc0DW1LkqTT6aiqKoTgnCP/QlVVwzCQcRzHcRzHL168ODw8BIvM8xy8sqxsoNFfSul8Pu/3+7jvarW6dOmSpmmaptVJzYZhGIYBuIkLIRRTqjUJNU3DUGMEyrLEJaqqYngxAmgAxgq/iqLMZrN2u41tnEVVmC9CyHA4dBwHCc5CCNQ/nU4ppfBWrmd2PB77vl+WJTJ5HccpimI8HjebTVrNO2MsjmPP85bLJS5EY87OzgaDQVEUi8VC0zSAabQE8+h5HsgyhrTVasHEmTEWRZFlWZ1OZ7vdlmWZJIlpmq7rimphyW63myRJWZb37t27cuUKupnnueM4eELiOF6v1/1+H18RoiiC7UaWZfhmMJvNms2mpmmO4+i6Dg8Q13WjKIqiyPd9DDhGA93knCdJ0mq1hBD4SoEp29vbg3MIPjlwzimlq9Wq2WwOh8MgCMIwHI/H7XZ7Mpngg8rr168//PBDIiUlJSUlJSX1a6OiKFRVrSNG6OI2ueD5Vm8jDKuLcc6VyrRNVDAaheuIFBdyzhHZEkIQDOMq1PDnf/7nf/EXf4GUESmpn1HSgkNKSkpKSuqXrrfeeutrX/van/7pn37pS1/a2dm5du2a4zic8/39fUJIo9G4ffs2AsT9/f3d3d35fA72lyTJ0dERnCXgF7Gzs9Pr9eBl4bquWVk5I4sTFNWqbHYNw9B1HWcNw1itVqCN2F2v15qmYVtV1el0CmMN1ByGYRzHeZ6DWiIMnc/ni8Xi5OTEcRzXdeERkaYpIQRk9j/+4z8QtoKV27YNW+dms+n7vu/7nucFQZDnebvdRk63bdswx4jjGFeBYyZJAnsN27Zt23Zd9+joKEmSPM/TNH3x4oVpmrZtW5ZlWdZ8Pl+v16ghyzKkvkZR9OTJk3oc6vFBnYvFYr1eR1G0Xq8VRakLfPjhh5qmqaqq6/pkMtE0jRAyGAxGoxEos6IoH3zwASCvoij3799XqjUeMVYI1hljtBIml3w0x2Q4HGKWgyDYbrfoJgBrv9+nlAKpJ0li23av18PtEPo3Go0syzD1WZYFQYAbLRYLXIKBdV233W6jVYqiTCYT4FpMK+c8yzIYbhBCgOYNw2g2m+gFCjcajbqwbdsA6OPxmHMexzF4N6UU3SnLcjgcisrPGrMTRVGj0Sgqi4yiKBzH0TRN13VVVWHrURtwbzYb3/ezLHNdF9nTnudFUbRYLFBeueAZwhgTQgwGA3bB8TmKIjxmAMrwksaqib1eL01TfLdYLBZCiH6/H8fx69evNU0Lw/Db3/62pM9SUlJSUlJSv27abrekin8QpGH7YhmEZB87iPI/8dSP6yeWoZQahoEoVAghhDg8PPz85z//sWJSUv+1JICWkpKSkpL6pcjzvDt37nz1q1/9+te//vbbb9+9e7fRaOzs7Ozv76dpeuXKlU6ns1qtGo2G7/vD4TBNU0VROOfn5+cgtp7nxXFs27bjOJZlUUrhB12Wpaqqk8lkPB5PJhPGWFmWk8lkOp1OJpPRaDQcDvF7dHSEFNflcglvBEqpoihgbZvNBtu0Sn1NkgSgWdO01WqFs0B+o9GoLEtQY0opYwzdFEKcnp42m014UnPOdV3fbrd5noOfIlRljL1+/TpNU845XKd1Xbdt2/M80MaaRyN3FXDZ87wkSQAuGWNIj62hs6ZpYKy+7wdB0Gq1ms2mbdvPnj0DccYvpRS9UFUVAbTruoDsrusahqFp2ng8Rq9BRRVFUVW1LMs8z4GDMQgYPWyQKqm5PqIoyocffgjWTClFgC6EQFouLiGEoCONRiNJkprgI5cZxRD3c85/+MMfOo7TarUwzgDEpmk2m02Q3LLi3XgqiqIwDKPRaARBgJHHHU9PTx3HaTQaq9WKc+55Xt0YCNdalgXXDtwryzLTNFutVj3deMwopZzz4XDoum6r1Xr48GHdYDyuSZJYluV5XpZlQoiyLFutFkw88LUDKd6Y02azWZs7J0myXq+NKs89DEMMznK51DTt0qVLqqpeunSpLMvBYIDZwbMhhCgqa+miKNI0RRI0PuRgFoqiSJIkSRLTNKfTaRRFd+7c2Ww2r169IoTEcfzd7373W9/61mKxuDgyUlJSUlJSUlK/DprP5wirKKWqqtaRXh3U4VQdFuJsfbwOX7Fb1fqRy3Gq3iBVAjWlFOFZWdlPF0Xx9a9/va5ESupnkbTgkJKSkpKS+m/Wm2++ubOzMxgMFEVptVr9fn+73SqKcvPmTaxrt7Ozg/9Z29/fd133+Pg4CALTNMGFXdc1TROYTFVVznmz2VRV9enTp4qijMfjskpoBSs8OTnpdruu6+LuiBrLsgRKDsNQVVXTNFEeXs/AxGVZPn/+/Pbt2wC1cRy/fPny9u3bqL8sSzj8xnGsKEqSJGEY1shvuVz2+30hBFZ1w38FappmGMZ777339ttvx3EcRdGrV69QISEkiiKYMICcrtdrQkhRFCi5s7NjGAYylyeTSVmWyIOez+d37tyxLKu+i2manHPO+cuXLzudTp7nURSNRiN0BJUoioJiZVkiLRr51Iyx2sp5Npt1u10QZ3QHnBrMGmE3Rrje5pzjVH283sbgK4rCGEPy76tXr27duqUoShzHaZq6rosuOI5TliVYcP3yMBwOW60Whr0oCiREN5tNvGlwzjebjWVZzWZzNBrV+c6cc0KIEOL4+Hhvb8/zvDAMy+r1A68HURShWlaBb2RJg+/jIDg+WlKWJe7eaDQmk4nrukKI+/fvv/HGG5h327YJIcibBtgFVl6tVteuXYO3Mue8/lqQpulqter1eugyspLxtCuKsl6vu91uGIbr9brdbruuu91uNU3bbreu6y4Wi9oyu75dURR4AIqiaLfb2+0Wjw2ltNVqbTYb0zRRGA9Av9+HyYau64vFotfr3bhx4+TkJI7jIAhevHix3W6fPHlydHSESZSSkpKSkpKS+nUTIh9SucxBauWSgeP1dr3x46oDRQSZKEkvJD7XR1BYURQhhFItv4GSjLHd3d1OpzOfz/+zaimp/1ISQEtJSUlJSf03yHGct99+u9vtWpaFpf/KsvR9nxCSpmmn0ymKAva+juNgAbdms+m67mq12t3dtSzLNM0nT57oug5IijRegObFYkEp7Xa7uFcdFyISRVasUvkSIB4lhICWEkI0TUOIibMXS4LQgR1j5Tdci0o++OCDg4MDURFY8GjwwcVi0el0KKWtViuOY8uyVFXNsixN0yzLoihK0xQ2GoZhlGU5Ho/jOFZV1TAMVBjHMWpgjHHOAS6RM8sY63a7aZqiPbgKGc2TyWQwGIAwqqrqOI6iKEVRYKwAoPM8xzZ6N51Oe70e+jifz3d2dlRVRRhNK8tmwFAcVD7qpFELA1iWJZqHISXVROA4YK4QIgxDzjmp8p2xDiEuAXTGtZiCPM8JIa7rYttxHGTHo3AURZZlBUEQxzEhZD6fA0zjWthiwEWEUorGCCGKonj16hVW/MPYlmXJLwjjnKZps9kMwxAXpmlqmmae58i4Rwax67poIdqD/OWiKJDpDHKNytfrNWC0bdtBECwWC3QTFwI9G4bhed5yuWQVjl8ul8gEB2THJOJjAOaFVknZjLHT01PLsniV7KzrOujzZrNxHAfZ66Zp4gPD1atXz87OPM/b398/PT3d2dlZr9ej0chxnJ2dnQcPHjDGptPp9773vQvzLCUlJSUlJSX16yjkrxBCEMuVF/DxR8pdwMcfO46DuLDeID8GoC9eiF1Ey3UNnHPbtvf29iSAlvrZJQG0lJSUlJTUL6j9/f1ut7u7u6uqKlgzpKoq0l0ZY7Zt27YNhri/v28Yxmq1CoLAsizLsmBknGWZYRjI3ARpXSwWg8FAq5b7IITQCpXW3FNRFEJInueqqoZhCNfgsnLFJYQgTBRCLJfLIAiAoVVVnU6nt2/fZowh9fj169dBEOR5DqZp2zaiTF3XLctCYApY+eGHH964cQPxblEUwJp5njPGHjx4cHBwAGKoaVqr1YLPBqUU7QmCQNf1H/3oR4eHh2CX2+221+tZ1ZKGcRwjH5kQst1uQZmLojBN8/Hjx4eHh5vNJs/z9XqN1qLxg8GgttpAm3Vd1zTt8ePH7XY7z/Msy5IkAYBG8zCSGExKaQ2dz8/PUUxRlA8++ODw8BBnf/SjH73xxhtAui9evLh7966qqnEcD4dD+Gb4vp+m6Xa7hQMJugNhIiiltILUhJAPP/zw1q1bqNCyLAz7er3GawAhBCMGOmyaZhAE6/Uat8OEJkkCWo1kZNBn3AhXIaMZCBs3Mk3TdV0MHR5OnGo0GsPh0LKsvFoAc71eLxYL13Vx9263i0X8iqKAjUbdNt/3V6sVqcw3TNNst9ubzQZH0F/GWJZljuOs12t8dInjGPOlaZplWcg9X6/Xq9Vqb28Pz49hGK7rrtfrq1ev4huGaZphGM5ms93dXQDxPM83m02z2dQqwxYcjOOYUtpsNjnnRVGkaaooSlmW6/Uajwrn/Pj4WFXVzWbzzW9+k0hJSUlJSUlJfRIUhmGz2URkjlhLVJklF1W/PtRCPI/jOIXfn3j5ReESXIsoGvEtpTTLst///d+/d+/ex6+RkvopkgBaSkpKSkrq/63Lly/3ej3P80zT9DwPLhaGYSiKAk7qOI5t26qqrtdrSqnruoZhRFEUxzHSSH3fT5IkjmNd15MkURRF0zTP8+pkT8dxAEYJIVhvrSxLXdfB+xDtaZrGGBuNRoyxw8NDlEeeaZqmuq5zzpGd6vs+wlPGGKUUqDTLsjiOFUVBLm1Zlkr1/3SqqiK4/P73v//5z39eUZTlcpkkiaqqvV4P4aaiKL7v53muaZqiKLPZ7ODgAOyVUmqaZqPRSNM0SZL333//c5/7HNDqs2fP7ty5wxjjnKMjuq6bplmWJZoBRhzHMVw4GGOMsfV6LYTI87yovH2DINhut4QQRMAQIQTQ2TTNV69etVqtoigYY8CsqqoKIZAtgkHWNA1hNLozGo263S76PpvN6p7SKtpGMVIRVfSorJKd9/b2hBDr9XoymcBQWFGUs7OzTqeDsB6XgNUWRYHpQM64bds4gmGnF9KoT09PDcOwbRur/6HLi8XC87w8z5FcHEVRWZYPHjy4ceMGBsqyrDRNHceJ49jzPDy3SDp2HKcsSwwjEo2LojAMw/O8+Xw+nU4PDw89z8MtMHqWZTWbzcViEYYhvovg1Gq1YowJIRqNhqhSsC3LAiXH+CAf2TRNXOi67nK53NvbWy6Xm80GiwHiLrUXh6ZphmE0Go04jouiAKS2LCuKImRVj8djx3EwC3meA4U3m82iKODgEQQBgDvmOgzD8XhsWdbe3t7JyUmWZdevX3/06FGj0cBT9ODBg/feew+jJCUlJSUlJSX166+XL1++88475KP/yPjxQh+lxti+eBy7F38vnsU2Lv/YQfWjC0Hnef7OO+8gd6GqQ0rqv5IE0FJSUlJSUj9BBwcHvV6v0+mYpgkf5/l8HgSB4zgAmjDQODk50apF/JDm3Gw2FUVBWGbbtuM4pArjVFUNgoAQUoNIEGRK6Xw+N03zYmBXFEW73VYUZTwe13EkQkCUfPr06ac+9Sl48pZlud1u+/2+oihAe+fn58g1VlV1uVwOh0P4XeCO8/kcu7hdnue9Xq9OTWWMlWXZarV0XV8ul3AUAVs8Ojrq9XqEENxls9lgI8/zp0+fHhwcxHHMOd9ut3Ec53meZVmapovFIs9zsGmAYELIfD6nlNYuHGiqURl0JEnS6XSSJCGEcM6VC9nNk8kEfs2apj1+/Pju3bvA2YqiwB2CMWYYhqqquETXdTBl3Pr+/ftvvvkmjtQCaMYgY/firxACBSilYKDA3Ij7cQlicfTL932Q2fl83mq1gM7X67XrurZtr9fr3d1dUHtkjpdlCbbruq5lWShQ22Lg8wZjDBnWcHkuy1IIgUxzy7I8z8O4UUrhdwz4C0wMNIy7jMdjZAHDEIYx5rouHjnf9wF2kV6N6Wu323ivAMs2TRNkfD6fbyuraDwbeZ5jgtAeOIbHcQyEvVgsHMeB3Yeu65hrz/OWyyVu5DjOYrFoNBr4i6CUYuoZY6qq1pQZD2dRFLquw/EZwJoQsrOzA3Obs7Mz13Xfeeedo6Mj/KE9e/asKIq9vb0f/vCHYRi+fv0aaw9KSUlJSUlJSX1S9A//8A9f+MIXGGOIPDnn9SkEq4hFSWXBQQhRFKWOV3EWMWFdDFEldmsh6EVsiWKUUl3XUR43YoxZlvXWW29997vf/djlUlI/URJAS0lJSUlJEUKI4zhXrly5cuWK7/ue5+3u7i4Wi3a73Ww2J5OJaZp3795N03S5XIL/LhaLNE2xshnoJIinaZqj0QjJmL7vK4qyXC4RqIEYLpdLhIxnZ2f7+/uapq1WK4R37XZ7tVoBwBFCXr9+TSo/B8R5WJAN9yKETCYTvUqRLopis9mAzXHOKaWMMUVRAGrDMAQ4RkDJOXddF6EqIQRomFe+wB9++OHVq1fzPGeMMcaQo0oIWS6XlFLP87CrKApIK3hiDQHn8znqRzFKKfJ/gVzv379/69atLMtc191sNkDJwMTj8RjGI2COGM88z9FZFNM0DXxW13XLsiilSZIAQUZR1O12TdPM81zTtOl02ul00P0nT54cHh5qVRI0wuh6GLFRh9T1gKMMjmNGCCF1EE+qCB67jDEUCIIAUNhxnPF43Gw2AVix8mR9Le5CLiQpw4MbFYrKG7q2xajvK4SI4ximGYvFotfrwfVCCIFnwLIs7OIXaBiZyEEQjMfjO3fu5Hm+XC5VVXUcZzabGYZhGMZqtbpy5UoYhoQQPFEwm2aMxXHsOE673V4ul5zzNE0ty8qyTNf1siyTJFmv11a1UKRt26vVyrIsVVUBwWGvMZ/PNU1D9jdjLIoi3FfXdcMwlsvl5cuX8QwzxvAYoEJN03AQ4wYqzRhTFCXP87IsCSGGYSRJAoPs/f39s7MzsG9Udf369WfPngE9f/DBB5gpKSkpKSkpKalPkF6+fIkYFcEPwjwEq+SnOD4j6BUXUDIux/H6krLCytiu4966krIsEZwjIi3LEhuf+9znJICW+hmlXtzBe8XFI1JSUlJSUr/Zcl3385///LvvvvulL33p+vXrV65cuXnzJjDlpUuXQMG63W6n08myTAjR7XZbrdZ0OjVNc3d31/f9s7MzpPfGcYxfVVWRyoqIEHmvlmUBxvm+77qu4zi2badp6nkedh3HURSlPgWCaVmWbdvIOKCU2rZNKQUtJYREUeS6LraFELXZAiGkLMvxeAzMhyOj0ajT6QBrZlkGDghqWRRFv983TVOpDJGxLiKlVFGU8/PzdrtdFAV4H5JtUcPx8fHVq1frWJZzHscxxmE8Hl++fLksS8bYaDQC/hZCcM6B0UE2gyBQVbUsS3BSTdNqmhwEAa0yjofDYa/XQ2uFEO12G+wYM4ja0DU0j3MehmG3261hJZYNxDYsMjjnRVGYplkURRRF+DbgOE6e53meJ0niOE6WZWgPeGue5xi3NE3zPIfRMAojNZgxtt1ud3Z2UEme561Wq64QlWCOHMdBe3Z2diilSZIcHR21Wi3k0QshTNMkhCyXy3a7naZplmWwj0DoD96KRgL+np6eDgYDSmmWZb7vY4JM08QnijiOKaWwdvE8b7vdlmXZ6/WiKIqiqNfr1fnphmEURYHM9CiKKKWO4xBCttutpmm4sNlsJkkCq+jVaqUoSrPZ3Gw2nHPHcaIo4pyrqhpF0bry7IZ5i6Io6/W61WplWeZ5HlLXKaXr9dowDN/30zQVlYN5lmWoOU1TXdfBpgeDAb5exHHMGJvNZkEQaJo2m81s2w7D0LKs1Wo1m80URUH69mg0Go1G4/H4H//xH8fjMR4bKSkpKSmp/2165513fvCDH3z8qNQnR6qq/tEf/VGe5wi8kQKC1IqyLEmV+Ixt7OJIfQoqq9W2EYYpikIIUVUVMRgOfuwSKE1ThNA4Synd3d3927/9248Vk5L6ifqv7MalpKSkpKR+U3X16tXf+Z3f+ZM/+ZM//uM//u3f/u1Pf/rTtm1fvnw5CIL5fI5c0eFwWBSF53mO47x69SoMw3a7HQTBaDRqtVpgfCcnJ41GIwgC3/d93280Gq7rNioBLjcajWazaVmWpmlIIjZNE2nFmqYJISilgLBpmsKvwLZtFEDOr2EYIInIj0a+MC6nlbMEVFbpCYqiaJpW/1JKOecwTEDYikRXhJ5CiPv374Owg94eHR0B8HHOsyxbLpeu62ZZxhg7OjoCIsfvZrMBNCzLcjgcttttEPYoimoIm2VZGIZxHGeVIwfMhaMoev78uaIohmHUjL7u2mw2Q2cRW6Mv2J3NZvP5fL1ebzab+XyO8TEMY71e4xKUJISgKlVVhRCoR62ynsuyLIrixYsXQRD0+/06WFcURVEUOHdTSjEmcRx7nhcEAa5K0xRPghAiSRLbtnEKNSB8R9QuhMBTlKap4ziu66Zp6vs+hr2sFo1EnRgHFKiDfiBvjA9SkkejEZ4BwzA8z2u324yxslJRFIZhGIZhWVaj0cCRzWbjOA4sLCzLArUHyQVGT9PUNM1Wq4XGA8q7rssYQy/G47Fpmp7nzWYzYH1c0mg0JpMJWj6bzeI4NgwjiiJMynq9xsQBEAPiJ0kShmGj0WCMxXGsVx4pGG1KKaj6dDoVQtTjkySJ7/tCiDzP4zhG4vaNGzcWiwXn/ODgQFGUfr9/cnIShuGdO3em0+nR0dF8Pi+K4uXLl//8z/+M8ZSSkpKSkpKS+iSKf9RMA7kUiFRJFdVjAyIXvJ6xUZ+tj+AXldRlLupinXjXqOsXQjQajY9fICX1UyQtOKSkpKSk/nfpM5/5zM7OTq/Xu3HjxmQy2dvb22w2Qohr167leU4IuX79uud5H374oeM4g8HAdV2sbqfrepZls9nM8zzYC6iq6nkeNhDkjcfjnZ0dgM7z83NAzMPDQ0VRjo6OCCHIDN3Z2dE0De632+32xo0blFLOeZ2Bq6oqq7yVkS9MPvp/drquCyE45/P5vNfraZrGGFMUZbFYdDodNKAoCvgtZFkWRZGmaQ8fPgyCADUIIRzH4ZzXiNbzvKIocBYZwUmSoHdInUZ+7na7LYoCTSrLcrvddrvdKIoYY2COYKxJkui6blkWYmVUTikty3I6nd64cSOOY1VVkS5dVFnPRVH0ej3DMPI8D8Ow9npWVZUQggoppVmWIWk3jmM4cqCYVnmD1JOCDU3TJpNJr9dTVVVRlAcPHrzxxht1vF5H0tguy7IoiiAIwjB0XTfP881mc+nSpTiO8zyPoujSpUuqqm632zRNd3d3W63Wer1+8OCuk4vkAAAgAElEQVTBrVu3KKWKoqA7juMURWHbthAC2cGEEBQYDodBEHDOOeegsZZleZ6HApgdvFQYhuH7fhiGKJxlGZyX0U5CCPisECIIgizLAJprHg1GjDxl1ImPCkVR6Lru+/5yuQSS9n2/LhNFURiGnU5nuVzO53OQbtyUc45kZzS4KIrtdmtZFh5XVVVxL/Bi9EsIURQFrsV3FMuy8OnCMAzMSD3ylFJN07DdarWAv8MwbLVazWYTd8GDce3atfl8Dt9z3Aj/l9But1+8eKGqarfb/cEPfrDdbh89enR2dkakpKSkpKSkpD7JYowxxhCyEkI453meI2eljqbqmBaqY936l37U9xnbiABxOY4jHEVh7JILWdK4S1mWiNL5BTdqKamfJpkBLSUlJSX1v0KDweDLX/7yn/3Zn7399tvvvvtuEARJkty8eXOxWPi+3263x+Mx59xxHMdxTk5OdnZ2YNpwdnbm+36z2Ww2m6CKSP41TXM+n89ms8lkUud4KooymUwmkwlWDoR3x3Q6nU6ng8Gg0+m0220hRBRFaZo2m80gCBqNxunpKWim53mMsZOTE+Tzbjab7Xa7XC5Rv6Zpuq7DurfmreoFF2NFUYQQaKrrusi33W63eZ6Xlbtxr9fr9XpI5X748CG5kKh77949UZkaM8ZwIaDn+++/v16vkyTJ8xzsEgzRtu3xeBzHcVEUaZoCDmKXc/6v//qvhmEgV/f58+e6roM/otmmaWJXvZDTHYYhdlGGUope67r++PFjTdNwFarFhqZphBBN0zAIw+FQVVWMxqNHj9BrRVGQe17vcs4ROiuKgl4jiLdt2/M8pLdj9UWlyh/HyCAcxxBxzusonFSvATW9xdmPxf1CCGQBwyUZecrwu0Cr8jz3PO/4+NhxHM/z0Dwcf/bsmeu6KIzKQbpRCdAz3EUAaqMoiqIIbSurVTFt2waSRiVQWZZZljmOE4ah4zhBEIzH4/V6jRF2qzUJDcMwDCNJkiAIgJhBrrMsM02zRu3Aypgpx3EwWVEUNZvNJEkGgwHgted5m81muVxqmmbbNmzTVVWdz+cYTFFZpqA7MO5I0zSKoiRJkiTB5wd0Af1dLpez2ezWrVvT6fTBgwej0ejb3/62pM9SUlJSUlJSvxlCAgeiU1It2oGYEMcRc5KPZj1jl1QhK34hWhlxIF0AgWtd/mO/uq4rilLXUJYl5/yLX/xiXZuU1H8hCaClpKSkpH7DdevWrd/7vd/74he/+O677/Z6vbfffjvP82vXrvX7/e12e+3atUuXLp2fn9u2rWkapXQymfi+HwQBzGc9zwMKDMMwiqLVajUajeI4Pj8/RzQmhDg+Pj46OgKRBCxLkgS7vFqxDTgYgDtNU6T9ogz4nVXJNE34S4DcbbdbpVrhMAzDMAxJRZxBXRGDInzk/P+w92Y9lhzn3WdERu7L2U/VqaV39samSEuiRAuWlwt5gS8M6AP4yp/NgAHfGLqQN9mABQGSSIlusslm9VrdtXTVqbPlvkZkzsXfmS5KHs07g8G8Iyr+F4U8mZGREU9Edj/5yyefEA8fPgRxJpdiHHCoLMsoitA20D3TNBGwDP8V+NV1Xc/zQN49z4M1RqPRcDh0HMeyrI8++kjTNMuygKQBHx3HcRzn5OQEKTg6Oqy18bNxHIO95nmuqmrHpp8+faq06TUwBOgatnEIwn6cBRiKU87OzlBSUZTNZtM0DewDLxz7FUWB3bDRgWPawmVwXnEJTOPczraYTl15/MRZeZ47jhOGoeu6g8EASSFQgHNeFAWsul6vYeEsy6bTKeo5PT1NksRqM5A4jlOWZVmWpH1yyLIM5u2uyDnHhLFtO01TDC7QNmKTfd93HAdJQmBqXddd10UHu/mQJElH813XraqqLEvOeRzHhmGAoQsh0jQFSq5b8h6GoWVZnufhipZlcc6zLNM0DXdNkiSmaSLfC6p1HAfTgHMOSI03CpqmrddrSqmmaVEULRaLrpsdgMb6n4Dpqqru7+8DUuOJi1J67dq1NE0nk4nv+5vN5vnz52EY/vznP//Zz36G2qSkpKSkpKSkvgIKwxDeKTxVoGf4S5RS1i6RAl12XyGciz2Xi3X+bXcKtn/lJ5zz7nRKaV3X9+7da2uSkvpNkgBaSkpKSuorq3v37n3/+9//1re+9Y1vfGNnZ0dRlP39/fPz893d3aZpgiDwPM+27devX29vb2N1wbIsXdd1HMd13aOjozzP0zSNosj3fcMwtre3p9PpbDbbbDYdAEWg7mQyGbUaDofD4RDRzeDOIM6cc7hxQoher9fv913XBXlEhR1XjaII3BB88Pnz58vlcr1eg8OCvsGDbJrm+PgYWLPX69V1PR6PQeVw9Pz8HEmoEXA9n88R/gyyjKhn7On1emVZgqhWVVUUBYghmDhiWk3TBHksyxK9ME2zy9LQ2QS8kjGGXnPOy7IEXkeBDz/8UFEUMGUIXQMdxh5FUQ4ODnBI13XTNBFenaZpkiQXFxcwPmMM0BmnMMbgXqN+1Ib9pPW/lUsR0JTSs7MzjAu2O+udn5837ZeM5+fnKNA0DeKO8S5B0zTLsvr9flVVk8kE3jmgLWy12Wz6/T7nvKqqra0tMNymDaDO8zyO48FggJ+d69+Ngq7rrut2O5GCwzRNzCWwXUzOyy3BmpmA0V1tiHH2PA/bhmFgwtR1jV5zzk9PT03THAwGaGFZlqqqIpE09oBZ404BN8e00XXdcZyqqnzfx04waDQMI4vhXq/XaNJgMBBCwCCkDQ/HJwKEEDxNjcdjNAPxzqPRKAxD3/evXLkSx/HVq1fPz8+zLIui6Pz8fLPZJEmi6/pyufz7v//7w8PD7p8CKSkpKSkpKamvgC4uLuCO4i8hpKoqvJInhMDXQkl6iRTDMe5EW37d1QN3F0e7n6hHuZTcA0715UNN02iahp9SUr9ZEkBLSUlJSX0F9e1vf/sv/uIv/uiP/mg8Hn/ta18TQty+fRtgdzAYgIoiycbZ2ZnXCtl1gVxXq5VpmvCohBCIDCXtonbb29uj0Wg8HoPbgsHxS6ulhWGo6zpCp3u9HqC24zi2bRuGYRgGMmmALxuGkWWZqqrgsx2z01rhijs7O6PRqN/vl2W52Wxs206SBMi4qqrOU1wul2CjkOu6aZqibWB5YRhmWQZvNcsy3/eTJIFNXr58CWxqWZbjOFEU5XlelmWe5x9//LHv+2maIoqZMYamWpZ1cXGR5zlqS9NUURTUYBjGw4cPuy6s12tN0wCgtTbzBn42TQPDqqp6cHCgtBHfoMmdJT3PQyAt3GsUgx8Mn5gxprbQGYfgLqPM2dlZN0MAly+LfPkzQ0IIUng3TSOEAHu1bduyrMPDQ8BTUGOcUtd1WZZVu5Zgv98H9l2tVkVRZG3aCkBnz/MQRNzr9SileGZA+bIs8f7DMAyg508++QToH5AXmZF939c0zbZtRP6C5GLiGYbx8uVLlKyqChPJMAxw5KqqTNN0Xbeu64cPH1qW5bruxcVFGIamaXqeh3HERLUsq65rQGeo1+tVLWX2PA/A17KsqqpAnCeTSVmWeIPCGGvaaPQoivABQWe37e3tuq7RHRhZCIGNqqo457BYWZZFUXRvccIwVFUVZdI0xXD7vr+1tfXixYskSR4/fvyDH/ygG2gpKSkpKSkpqa+MHj161PFfuKxwiuq6rtsFt+EYQyj569v4CUe3+Y0AulNXpjsXf5GzTkrq/1ISQEtJSUlJfaVk2/af/dmf3b17d3d3t6qqq1evrlar3d3do6MjTdP6/f7u7q6iKMPhsN/v27a9t7c3m80AdsEBO3aJoGbkce7imj3PA4U0TdM0TcdxPM9DqgpwQ8uygiAIwxAuoK7rjLH1eo06OxqrqqqiKPolKYqiqiros67rINSMMUopWGEURfAvCSG+78/nc7iJjLGDg4O6rimliGX+/PPPOefwEYUQJycnYN+O4/R6PWTJ6PV6aPlwOJxMJoPBALgTqJoQIoR49eoVglhxbgdGVVV99uyZpmmmaeZ5XhQFeorG//znP+86hf5CsAm24RzDDpqmPXnyRFVVHIVlsKG1nBolF4sFrArPGPAX3QTohM7Pz+EiU0rn8zmY5mAwAOElbXqHNE17vR7GvSzLNE37/T4Kl2XpeV6v17MsC33P87xL64wy2Dg/Pwd0tm07jmPbtsGUUaCqKpDf8XhcVRUgr+u6WZb1+30gV8/z0jQFsvd93/M8vCfA40SSJPv7++3TQdOlvMBMKIrC87wuHt+27dFohEsXRQHGjXY2TZOmqa7rjuO8efMmz3PMXiEEiPlwOCyKAi1xXbeD6Zqm4SdahZc0o9EILUFrUQPmSTdwjuNkWYZivu9j1IQQ4/HYMIwgCAghiqJsNhvYAUeFEJxzzjlCudEG3/eVNhc2ugamHwQBY6zf779+/frw8JBS+vr165/85CeX/0GQkpKSkpKSkvrK6N///d+xAW+2abNwNE3DOVcUBT42a7/8ay6tKwhfCx4y9neVkPbbSuVSvDP2w41EDYQQPJt0+5umuXr1andUSuo3SAJoKSkpKamviK5du/a9733vr/7qr+7fv++67v7+PmOMcz6dTpfL5dWrV3d3d7uQZ+DUw8PDMAzX6/V6vX769Kmu64qicM5fv35dlmX55XwURVGAypVleXx8jJ9CiJOTk6Ojo8PDQ03TgOEsy3Ic5+TkBGWOj4/zPH/x4oXSBvb6vh9F0Wq1opQahgF6CIIGeAca23FY7MTp3SHSsnJ4k77vu64LBxS0jhAColfXdZZlXV+qqsICg9j+5S9/eTkmmjFmWRbYcZZll9ugKIqu66Zp2rZd1zWinhEqu1wu0UhQyCRJqqqq6/oydNY0jVKKzhqG8dFHH4VhmCRJmqZxHJP2s0G4v50d5vN5ZxDOeRRF/X5/Op3WdQ1WDp8Yqz7iXCR5IIQ0TXNxcWFZFugzTrdt27ZthCQDd2ZZ5jgO+HKe55ZlIc4X44tLUEo556hzOBzmeQ4QjLBf13U55+fn5yhcVRXq8TwP7jvnHFMIGNd13SiKYAeAb94uaVgURRzHjuNsNhvHcUaj0Wq1qltEDsRcVdXp6Slocp7nsAZGOQiCLlYaCxJWbUi+ZVnooKZpaHBRFIh6xvwBes7zHDmgdV23bRs97d6LGIaBQ47j9Hq9OI4xOoZhuK5bFEVVVah2sVig73VdIzkJZkKSJIwxXdfTNMUKhJ7nRVGEpgoh7t69m+c55zzP8yRJ6rpWVRUJN5bLZb/fJ4QsFovd3d31ep0kyXw+9zzv8PDwn//5nz/88MNL/yRISUlJSUlJSX2lFIYhpRQImBACzxN+IBxO0maf+9Jpl4QnBTxEKG0iadF+0gd3GhVevhCOEkJUVb28RwgxGAx+w+WkpDr99yzRdb2qqkuHpKSkpKSkfmv0ne985+7du3t7e5PJJAzDnZ0d3/evX7++s7Nj2/Z0OkXI86effloUheM4URQ9f/4cYacAdltbWwhzHgwGXYAwUHWapmmauq4LdhnHseu6SMULAclhMb31em2apmVZhmEAbevtWnyr1Uppl9RjjCEItCOt8Oc6d1Bt4xc6ertcLrFHVVVFUYQQ4/EY/RoMBuv1ej6fwxpCCCTTcBwHENyyLLvNB4J+ITGI1wZBD4dDoOSPPvoITTIMwzTNyysHfvzxx6qqAkGiDYCSmqadnJxomoar2LYNBI+SIPswArrTFcMVsbOzzK9sI/qVEMI555xvbW2BqnPO0zR1HGcwGIzHY7BOOMpISZHnuW3bnufBqwZQnkwmpE2EV9c1aZ1peO2oWbRrEgKJ4nTP88CaDcPABEB6YhRGedjZMAzAWVRe13VRFEjfkSQJup9lWa/XwyXqukYiC8MwwjB0HKff75dliVUKi6LwPC8IAsdxhsNhWZZg6OgXJjPmGKDwaDRCq8CRLcvSdR2mTpJEVVXLspbLJa5o2zbnHNuIgMasdhwHmJgxhmt1VNrzPM55VVVpmhqXskiDcWua1j2owCzY7n52O7tijDFCCNqMxoOMx3E8m81UVQXLbto4naZpLMvCFwC9Xq8syy+++OLhw4c//OEPl8tldzkpKSkpKSkpqa+2OteoaR1XRM/gUeJXXDI4YPgJTxueHmkDVroySkulcYlOOKqq6uWqhBCu67I2LFpK6jdIudVqe3tbOu5SUlJSUr+N+pM/+ZP9/f3xeAyUtre3h8QFk8kE2QYMw9A07cmTJ8hQgZBbMDvGWN2uH40AzLIs+/0+oj6NFsLGcfzs2TNd158+fYqfcRyvVqvFYoF6oCdPnkRR1BXokCsUBMHFxcVyuYS3V9f14eEhgliFEJTSZ8+eUUq1NkMFgrUBiMfjMege3DshxHw+T5KkLMum/bZuPB5PJhPQc9pG7IJQf/bZZ4jnLcuyLMtXr16laYouZ236aWBKz/MQLl3XtRAiiiJQV9iQtrHJmqbBILquW5bleV4X3pvn+Wq1MtrI7ouLi44mP3nyRPlyHLfWIuzOUKqqPn78mFLalSQtr+xGqiiKPM+Pj49bj/q/8ma4ros4XNd1J5MJLgEHGtAZtupsCGedEAI7ANBXVVVVVZ7neFEBXN4lzcCUw7nY5pxjjjmOgwJN0xwdHSHoGG8j+v0+ooPBwT3POzs7w1W6E1FnURSIdMabDIQkA3bHcWzbNrazLAMpxtG6rjnnR0dHyA2taZplWXhNAhtalhXHsRCiKArGmOd53dVd163atM6u64KSm6YJO5umGQQB3iikaer7PobVMAzLssIw1HVdURQ0JooiPNIkSQJkTyk1DCMMQ9/3KaWMMbwDwIMNWo7G4+4TQmRZhpQdCJHe2tqazWYXFxdlWRZFgU8W7ty5E4bhcrlcLBbr9fpf//VfMRZSUlJSUlJSUl95waftPFvSRlTAFYeDrbUPDp0IIXBf4X3BGYMXjQ1UQr6cM7oTdnbYGmqapizLv/mbvxkMBpfKSkn9D1JfvHjxq/ukpKSkpKR+G7S3t3fnzh3btqfTqaqqw+Fwa2sLfhUg7CeffOI4jqIoQog8z/f29oA7QT8B0S5DSaC6uq6/+OILTdPAdi8uLuBRNU1zfHyMDABCCM657/uTyYRSqqqqruvw2+DMCSEA9YbDYdM0jDEhhKIohJDZbIaQ2DRNsyw7Pz8He23avGzsUhBBVVUgp0obyICmKorCGIObiBOLovB9v24zY5RlCZhb1/V6vS6KQm9pOyEEAbmMsaqqDMP46KOPbty4URQFwC66A52enm5vb+u6rmmaYRi+7zdNg5anaQryCHoIUgwLn5ycjMdjkOuuCzAvpRQ+saZpGAWMyOPHj+/du9f1Dl1Df9Fm6OTkBPkcukN1u/Dj/v5+nufdEMBiiqKcn5+Px2OUPzs7Q2IKIcSbN28AiKuqevHixd27d9M0Lcvy8PDw9u3bQogwDDebjeu6aMDp6elwOLyMiU3TLMtS1/V+vx8EAaotyzJNU1VVr169GgRBXddlWSZJ4jgOCnueF4Zhnufb29uYJAC7QPyWZfV6vTAMZ7NZnudFUTiOE4ahbdsIUgbX7vV6AN+2bW82G8uyMDo3b970fZ9znqYp0nFwzgGsGWOj0ci+lFXDtu2iKDpIrba5m9frdZ7naMxyuRyNRpgStm0PBoPFYrFYLGzbXiwWvV5PVVXHcYIgUFXVMIw8z+M41nUdg6u2i09aloWQfwxN96iD4auqCqg9CILpdGpZFmZjHMfL5dJxnPv37yPK3nGcZ8+eFUXR6/U+/PDD4+PjR48eddNDSkpKSkpKSuorL/pryZ3hVgkhCCHwujVN45x3HnX31NC5yqgBzx2cc2x39XendKdjJ2MMjly3v6qqP/3TP/3oo48eP36cZVl3ipTUr+i/s7dISUlJSUn9Vsh13dlsdv/+fcMwrly5AlY1mUyGw+HBwYHjOIhXXa1Ws9lMa1/+g1TCczo9PdU0DRBZ07TT01NCCKV0NpvVdd0R5zAMGWOGYaCGzgmjlAoh4K7BUQMwretaCNE0DXbWdc0Y45yDM4LYpmkahiFKUkpVVVUUpSvMGJvP5+Dp0NOnT+/du9cVe/Pmze3bt0H3VFU9OzvrfoK03rlzB9iOUvrZZ5/dvXtXCNHr9RRFyfOctCtlg0ejI3VdE0I8z0NrdV3/xS9+8c1vflPXdcDZJEnQr7qu37x5c+vWLfiauq4vl0tg0DRNnz9/fuvWLcMwsiwDXEbwL2g1IqPTNGVtpLOu60+fPr158ya8ZN/3YROMETAlzI6rQzAdRuHyoCiKgvGFzs/PR6MRDvm+j6X5PM9DQDcwcRiG+/v7cRzjRFQO4Vqkdejrui7LkhCCgOiyLC3LUlU1CAIYsGrXIQyCAMDa931cqygKwzB6vV4QBFVVCSGyLIuiyHEc3uaasG0bqWPqNl+HZVnPnj27deuW4ziIAg6CANQYWaGzLDMMw3Ec3/e3trbiOC6KwnXdzWZjtLHn4L+r1crzPNd1fd/HPFRVtdfrrdfrNE21Ntx+NBoFQVC2yww6jlNVVRzHhmEMBgPUD7IMC1RVpWlanud1XW82m36/j0kIm3fWa9pkgsPhsG5XEezGsWkazB/DMJDoI8sydD8MQ86567qU0tVqpWkaGHdZlvv7+wcHB3EcHx0dffbZZ7iilJSUlJSUlNTvguBlwXftnkHgZOq6jp+0XesbftevnN7JMAzDMOCHE0J0XUfNdeuH4xR419jGAwvnHF4fCgshvv/97ydJcnBw8F+XkZL6NUkALSUlJSX1W6DZbHblypXJZNLr9cD+xuOxoihAcv1+X9O05XJ55coV0zQ1TfN93/M8kLi6zcAAhwnuFDggqJlt23CqwjCklML36rwu4DNyiabBpUOBOI5Jm8dWCFEUBeA1PDPO+fn5ueu68AXRhq5OVVWFEKvVajgcwntrmub8/PzatWu6riPWGFyVtN/K1XVdFAVaIoQAIoTTWZZlURTAuEVRAImqqmpZ1mKxKMvy0aNH77//Pgpzzl++fDkajXBWmqaEEHDhTpZlcc6RTANgWtf1MAxVVTUMw7Ksuq7LshyPx3EcN00DPoij2NB1HUHQDx8+fOedd1DtYrGYTCaKosCAMCkhBOsEwjjYDymK8ubNG2QHHg6HaZo2rdsNs3Tb8/kcAemKomw2m9FopChK0zRlWQI6R1FUliXsjGI4HfVUVZWmqed5qqpyzquqAkpG98uyjKJof38/DMOmac7OzhzHgYnW6zUio8MwRB6YLMuQKwPQ+ejoyLZtGBDzDebSdR2cFyso4hkARxHFjFlq27bv+6ZpInFH0zSGYSCeOkkSwzBWq5XruoZhBEFw/fr19XodRdHNmzcHg8Fms0GbQXUxcCjZDZbneUhkYVmWZVkYJl3XDcPo9XrL5dL3fcuyTNNcr9c7OzuYwxi+5XI5nU4ppY7jrFYrWAAW7kZnOp2uVqvt7e2zszNCCHK+Ic+1EAIdhxknkwny5xRFcfPmzePjY0qpZVnz+ZwxduXKlU8++aRpmqqq1uv1j3/84yRJMHxSUlJSUlJSUr8j4pzDWyaX/OG6rjnnpmmiDBxdxhicavzFfjjYQgh82QZX0DAMPD7A9+4cbJyLq3TbcAUv19Y0zZ07d775zW9KAC31GyQBtJSUlJTU/+/kOA6WE/Q8z3EcVVV7vd50OiWExHHsui5inF3X3draMgxD07QwDIfDIXDnxcVFnudVVfV6PU3T5vN5XddCCM450GeWZXCYnj9/3vlnhBDOOa4C7wplLqNSqDtF07RuD2hsXddAn4CYYHn0EshmjK1Wq9Fo1F2CEDKZTIQQSZIkSaJp2ps3b7a2tpqm6VzJ7qKKorx+/bpLVUEp9Tyvrmv4l5TSwWDQNA0CjRVF4ZzXdT0YDBhjdV2jMfApkcmBtNz5xYsX/X4/yzLOedM0MGBVVaZpPnz48L333kMx1ACWCvy9tbWlqipr02sYhoG42g8//PDrX/86TqnrGriTMbbZbDAKhJC6rpMkAaAH2EWzVVWdz+fD4RAUGMvuMcZAn4UQOHcwGICe4w1EURT1pXgN0FvLsqqqwikQug/zpmmKJBVVVVmWpShKHMdZls1mM8/zEBEshMD8QeoMOOhBEMxmsyiKOOdKm+PFtu0wDJEqerPZBEGgaZqu60CrCOkVQoBlAz0DIpumiQKwPOKskQQD9Hlvby+KIgQg9/v99XqdJAn4NRBzkiR5niMg2rIsRBMHQWCa5pMnT27cuOE4znK5RHuWyyWGAFkvwjBEA1arFe61MAzLssyyzPd9wzAAsoMgmEwmaF6v1zMMA/VYloVmUEqbptE0zbKs9XqN2T6fz5EYRwiBe7DbxrjcvXv36OgojuNbt25dXFzAhqqqBkFwcXGxvb29Xq9v3bqF7Oq2ba9Wq4ODg48//rgbUCkpKSkpKSmp3x1VVaVpGnxp5VI4BedcVVU8LxBCKKWq+l/ET7mU3Q5ecdM04/H42rVreDQQQqxWq8ePHwsh4O2jPJ5BcCK2KaWsTQNIWr+aEMIY+/M///O//du/xU8pqV+XBNBSUlJSUv875TjOZDLZ3d0FYdQ0TVVV0zSHw6Gqqmma2rZttIHMjuNgPxCYaZqWZamq+vz5c0VR7t69C9iX57mu603TvHnzBk4YbeFvEATYIK37hUjquq4555zzLqL5sj8Huto0TVVVnHOcAuYrhIAHpqoqKmmaBnmfkVjAsqzFYoFTcFbnFIKBlmUZBAEhBEEHlFKUn0wmgKGff/75vXv3CCHwDpumKcuyaRrgvEePHl2/fr0sS9DMw8PD0WgEdlxV1eHh4Z07d7oTj4+Pp9NpVVVFUeR5zhjTdd22bSFEmqbg9ZRSzvmzZ89u374NfAxwrOs6toEdcQhGANbUNG25XI7HY4yXaZpxHAMybjYbQgjnfDAYhGFY13VHIdFaRVE6FxkNWK1WV69eVRQlju6JFnsAACAASURBVGOU7M5CU+u6jqJotVq9/fbbRVHAnlVVua4LywCvh2EIxxo1Y8iEEFVVGe36e0II2BP7waOrdh1C3/dd1zVNMwzD2WyWpqkQYjAYAMWaphkEwfb2NvD0zs5OlmVpmuq6DpKLXqdtRubVaoUk1EEQhGHoOI7neZTSIAjwKgXNRiR7VVVIteH7vm3btm3jENBzlmVxHCMw2fM8DEc3FpZlDYfD9Xo9GAxAq23brqoqTVNVVT3PW6/XOL3f7y8WC03T0BfwXww6Ng4ODpA/PYoihFrneY6N4XC4WCwURWmaRlVVWElRFF3X4zhGdD8Grq7rqqqwgRCb4XB4fn6e53mWZR3vdhxH13XEPnPOl8tlnudFUViW9ezZsyAIfvrTnyJgX0pKSkpKSkrqd1BFUWiahqcG0sa4wBmD80Yupeno/l6WEMJ13clkYhhGt7Ou6+vXrxdFkSQJ/GpUggt1lyOEAEB3P7HNOfc87y//8i9/+MMfdnVKSV2WBNBSUlJSUv9fyLbt4XC4tbU1nU7BmoUQmqbVdc0YGw6HjDEENeu6rqoq59y27d3dXU3TEKEJGYYB3FwUha7rVVUtFgsgvNPTU3BM0zThJxFC6KU1/QAWkZkBdA80E34baZcWnEwmlFKcgkoAjquqwp7lcrm9vY1r0TZ1Bippmqau64cPH967dw9eIM5FJbgKIaQsy62trTzPQQNBCbsGlGXp+/50Ol2v1yChpmlqmoa+MMYcxxFtHmRFUbAeXZIkjLE0TRE4HEVR0zRhGOq6Tgi5uLgoioJzvre3Z5pmWZamaYJWl2UJ/AdfFoYFqQfN1HWdtpQZcBDJtYE78Rd6/fr1dDoF6xRCmKZZtSsQvn79+vr161EUKYpSt9lIGGMXFxeg85RSpISGMWkbft6NHee8KIqiKAghvV6vMylsXpZlmqZ7e3tZljWtUA+qapqmqirP89I0RfRxFEUtGq1xNE1T61JWDcdxwjDc3t4uikII0ev1QJZN0/R9H1Hq4OlCiKIoHMc5PDy8efNmVVXIR6HruqZp6/V6Op3WdQ0aXlVVGIad0QzDCIJgb28vCALUgwmfZZmqqla7DCDs3E0MBMsDE6O1lmX5vr+zs+P7flEUURRpbfz1lStXmqYB4MZf8G7btuM4Xq/XrGXNuHcwS6G6rsfjMU6cz+d7e3uwJ4aPXfoGE/sJIYwxSmld14PBACH5uN2Gw+HZ2VlVVWVZYp6DPiNiOk3TOI4Rpv3gwYNnz57NZjMhxHQ6ffr0aZ7nL1++fPjw4eW2SUlJSUlJSUn9rglPTJ2j2zSN0j6VqKqq6zrnHK4vyl927bATsLhbLgXyPO/evXvz+fzw8PDyflyIXKoH7ne3UwihqioerP76r//6n/7pn+pL64dLSXWSAFpKSkpK6jfJdV2EOhJCwP4URXEcx7KspmniOIaPYrdplLusrK7r3rx5czQajcdjXdeBm/M8r+t6e3s7DMN+vw/Eqaoq8BYwKz69f+uttwzDMAzj8PBQUZQsy7IsY4zpuo4l9Zo2ENh1XaXN50Db3Mr4CYcM3hgKg1SiWMfFAECFEIgjXq/Xs9kMdXacl1Kq6zqcOVQLytYh49lsxjnXdd0wDMdxmpbQAaYj8TEawNvE0KicMdY0DTLnJkkCxNmhN2RdePPmzWw2oy3sPjk5QaoQIYQQ4ujoaDweYxtkWdM0IUTTNJRSIOxer9c0DaJTTdPknAshEMDbNA0iKdBIAEpN0xhjGALTND/77LMbN24AHcKAGDXG2JMnT27fvg0Sqqoq7I++YxsCe4Xl0WuUf/36NV4JYKRgzMFgoCgKOts0DXra+bL0y0EZONT97Q51pwshXr16dfPmTdd1weUdx0GB09NT27YBnTG1oiiqqmo2m6GzYNBIjRdF0XQ6LYoCNSNCxLKsqqpQA87lnGMQXddNkoRzXtd1lmVRFBmGEYbh1taW53mAxWma4hkAyS5wI/T7fd/3YWpMALweQHt838ddYxiG2y4tWLX0fLFYWJYF2y4WC8MwFEUxDGO1WgVBcO3aNa0NVMcIYnQYY2DWiqJYlrXZbHzf393dRW6N3d3d5XIJy2PmN+0zT2fnra2t9XoNgzftGoOY7RhWqKqqoijSNLVtW9M0XdeDIGCMua7LOTdNM01TxhilFJ8ywICbzeZHP/qRDHyWkpKSkpKSkjo8POyS9cExI4Q4jmMYRudpkzYxdFVVeITpXGvOuRBib2/vS5W26vV6169ff/nyZVmWdbt+OwT3jxACD18IgcvhipTSqqoMw/jDP/zD//iP/+jOkpLqJAG0lJSU1G+rQDkBZWzbVhQFX+jXdQ1a9L8inGiaJiFkNBo5jtPv9y3LopRqmoYgUDAg2qab8DwPDgeY14MHDzzPU9v33mmadnQJECpNU9M0Pc9DBoAgCIqiePvtt03T1HX99evXTdNUVeU4jqIovu8rioLsunEcq6oK3IzWwt0Bn+r2AGXCCWuahjGmtXkkzs7Oer3e1tYWY6yqqjzPHz9+bFnWdDpVFKUsy6IoGGO6rlNKdV1XFIVSCl9K0zTU37SR1LAAnDk0gFLatKkwiqJA29SWOG9vb7M24YYQAqmBQagxUqTlp5zzV69eTSYT4EvO+YsXLx48eMAYW6/XnHOYFFUpioJMCOgmYyxJkn6/n6YpLPn8+XMkR86yLE3Tg4MDJCfBddM0resaQD9JEvQUOj09VVU1z3N4nBhQdLksS71dUVDX9fV6LYRAJXEcYxSAMskluPz8+fOrV68ahsEYy/NcafX06dO33noLxoGFMVXKsuxQL6W0rmsYtt/vn5+fj0YjFIM9cVa/30+SBCHwmqZVVYWoEEVRzs/Pe70efoZhiHGs6/ri4kJVVcuyTNOMogjplYUQ5+fnnufFcWxZ1osXL9566y3btqMowkjholmWITxZ0zSMCCKg0VTXdeF5O46zXC7jOIa5dF23LGswGARB0N0mQMmu63bx1MhYcnR0hFtD13XGmOM4QLGj0cgwjPV6vb29raqq4zgd1NZ1XdM02BN/uyv6vt/v9xFwbZpmnudxHGua1uv11ut1EASz2QzjhYGjlK5WK03TLMsKwxCHMPlVVb1y5Uqapr7vj0ajNE3DMJxOp03TXH4gwYTBNMbcVhSlal9dWJaFnNeWZUVRNBwO0aT9/f2XL1/u7e1hIObz+fb29mKxiOP49PT0Jz/5STcKUlJSUlJSUlK/y3rz5g2eQZo2JkZRFMdx8EQDl6yua3hlnHNd13EijgohKKWTyeRLlbayLMuyrIuLi9Vq1Tl4hBBKKZ6AaBvuwznHftpGYQshVFX94IMPJICW+h8lAbSUlJTU/x7Ztu26LqXUMIwsy/DftqIovV4PMHez2ZimORwO8f07IQR+BsgXqJAQoqoq8Cb4FojKzLJMVVXDMHAivnMnhKiqapomeFBRFIqiuK5L2vBSy7J0XR+NRqB+iqL0+33DMKbT6Xw+z7KMMcY59zwP1zo7O7MsS1XV6XSqqiql9Pz8nDFmGMbW1tZoNIqiKAiCKIrW6zXwq2EYk8mkKIoXL15g8TrHcSilnPMoikajUa/XAwur65oQoigKyFoHuVjLl+s28QX2NE3z+PFjQsidO3dUVWWMXVxcMMaCIFgul2+//baiKM+fP2eMHR4e9vt99KUsy9PT081mc+/ePVwXxgSEZYyBmlVVtbW1BWdLCIHKQe6UloAjO4Gqqrg6TIqOCCGEEHmekzaGlBByenr64MEDjJdhGEmSTCaTso14BfRvmmY4HOq6nqYpAm/hR9Z1nec5Rh/xpLquN20scJIkvV4vz/OmaYqiQOixruuGYVRVBbSqtYHn6BSYadM0GNymaTjnL168uHHjhq7rKMkYA9asqurs7Ay5MoDdMbU0TdM0bT6fYz6gm1mWIXPFfD7HYGGed6ZQFKVpA9Xpl7H+ixcvbt++TSmN4xjsklKqqmqSJHEcI2Y2juPt7e26roGem6aZTCZpmnLO8zzf29tTFCUIAkJI0ybZsG0b+bVxFfDQqqp830fgeRzHogXcZVlqmiaEKMsySZIoiiaTSRAEMBE4cl3Xtm3Xdb1cLqMowhzQdd227TzPwWHzdmnBsiwNw3BdNwiC6XSa53me57ZtIydGNyK6rjuOU5ZlFEWapmECYG70+/3NZpMkCWtDzhlj+GcBQDkMQ1BdXddxq+Iv6sf4rlYrWFvTNMdxFovFzs6OpmnL5RKfJmBQuhmOUV6tVl0yaMMwCCGMMWD6JEk2m01d18izcfXq1aqqOOfT6XS5XCKSejQa7e7uvnr1yjRN13XfvHnT7/e71x5xHCOa2zRNMOgkSR49evTy5UsiJSUlJSUlJSVFCLm0bEzTpj4zTdNxHNbmQCPtgoScc/hjqqrWdc0Yg4OH2KPfoGvXri2XSzjDhBC47hAurWlanueKotR1TSkVQsB75Jy/9dZb/12RlNQlSQAtJSUl9f+yPM9zXdeyLLAewzBM0wT21TSNECKEADYC/wLoxClgT6qqAtXhXPyEh0FbkggW2fkEjDHGWJZlaZpmWSaEGAwGOBFhqkVROI4DRAiSFcdxkiTD4RBoCWwuiiIgM0C3+XxeluX169e3trbA+EDTqqrSdX02mxVFgU/7kRT42rVrAH9FUeR5bhjGeDz2PC/Lsvl8/uDBA0rparVC44+Pj7/zne8oisI5Pz8/F0LYtt1ZBt4M/BvGWF3X8/nc9/179+7puk4p/eKLL2CT0Wikqup6vTYMg3N+cHBw48YNRVHEpQQXi8WCEAL2nWXZo0ePPvjgA03TFosFMB+QPboGLoYo6brV+fn57u4ueBxtM3igfkVRsFMIQSklhDRNU5Yl4D5jTNM00zSR4KKu684XjOMYVi3LMs9zpADO87wsSxh2e3sbjc+yzDRN2K1pmqZpgC855+Djvu/D8nmed3HNaptjAZ4iJgkajJmgadpHH330rW99S9d1kFba5tbQNA3R6F0lOAThEGujnrsZyBg7Ojra3t6mlMJu29vb6/UaFWICQ3VdK4pCCOn1emC7iGJ+/PjxW2+9hZ5uNpubN2925sJO2Gp3dxfON64ihOCco0z3EwPEOfc8r65rGJAQAjtzzouiMAyj1+t1uB/2BGTXNA0jGEURaHKv16uqKggC3IN4DxSGIVqYJEkQBKZp4nYriuLVq1eGYRiGgfK4neu6Ltq3DpvNptfrwTKdATebze3bt4MgiON4OBziCQF3E/6JCIIA9tR1HSC7LMuyLNM0NU1ztVrhPlJVlTGmKMpyuey+ssQgLhaLwWCgaZphGMjKous6ZuDlYpjMmNvYiaOEkMlkwhjTNA2vN8ilWH7R5kIp20TPoNXXr18/PT3Fy4koimBMzvlgMDg/P9d1fXd39/z8nHOeJImu6+fn5//yL//SXVFKSkpKSkpKSooQwjmHO909HsIzxB48GnDODcOAi9g9TtI2DsZuv8WEiqIoy5JSCteXENLv95EgDj/ppfBnuNOd41pVVXeUEFLXNfIESkn9uiSAlpKSkvp/Ltd19/b2PM8zTVPXddM0e70ekiHgf2JFUaqqQt4DIUSH8+q6xndSSruGA2MMlAonAuKMRiOtBYggj6QFmpRS+BZVm8JYVVWQX5SklOJE4LCqqqqqQtBxnudRFLmuC5DHGKOU4oP6KIqyLOtyVoCpHR8fg8GlaVpVVZZldV1/73vf22w2WZbBuUFfGGPz+Zxznuf53bt3GWNCiDRNnz59ats251xRFIAnTdOA2Bhjq9VKCFFV1ePHj999911VVc/OzhBNadv2/fv3NU17/PgxpdQ0TaSEVlUVYK5pGljbMIymDaSNooi2KZurqqrrOs/zDpDVdY0GUEpt266qijF2eHj49ttvo/2qqiIlAqyNfmFEsK0oiqIoZ2dnOzs7l+cD3MEOw/3nf/4n0mhQStGSXq9X17Wu67qua5o2Go2EEIZhABcWRbG9vZ0kCXzEs7MzZNJAvPZHH310//59AHTEw+7s7BiGYdu2EOLo6OjOnTuEkKqqVFX94osv7ty5gxllWVae5wCCeZ7DXGgAJm0QBBiyLMueP3/+1ltvoT0YU8xAVVU//vjj3/u931PbNCOEEGwzxnA5bLM2mQNjTFGUuq5RXlXVjz766J133iGECCEQwlwUxWazOTk5uX37dhzH8FzR1F6vRy7lce5ugbqu+/1+lmVg1oqinJyc9Ho9x3FUVcVsxyHMH9ISZ0IIRgd3qOM4QoggCIqiiKLINM2iKHRddxwniqKqqtAMRDSv12t8HxAEAVJ2EEJOT09d13Ucp6oqGMowDEVRFosFELCu68PhcL1ex3FsGAZSYURRNJ1ONU1TFAVfBmiattlsEJRdVZXneUEQdHbGPxeoH5MWI2LbdpZlHYxWVRXTkjGGMt20VNq0zjDaZrOBKVAYg4VtRVF0XR8MBuv1er1eTyYTRVFI++XExcXFO++8s16v67rGsGJDtOroc1VV4/GYEKLrelmWRVFMp9MkSZIkCcMwCILd3d0wDPFU0zTN1atXnzx5sr29zTnHeH3xxRfPnj3ruiAlJSUlJSUlJQXhkQrbTdMQQuAHNk1DKYW3L4SglIIsd26w0qaBdhzncoVRFK3Xa8ZYB6AJIfv7+1EUoX6IUgpfnbaProqigFyjGNx1PKNJSf26JICWkpKS+l/V9vZ2v9/v9/uIhVRVdTgcDgaDqqoQYAsAtNls8H+wruuUUlAVROkqikIpxfZlQkQuMSDgG7A2OArwMMB6qqra2dnBHgQY1nVdliVjDBdVFIVzzjkfj8c4vXv7zTkfDoeMMbwJX61WiPREeyzLgrMym836/T6lVAiht+kXEA7cNE2e53Ecr9frH/zgB9euXUO1QoiTk5P3339fUZTxeAyD/OIXv7h27Rql1Pd9wzDyPH/x4sV3v/tdQkiWZScnJ0KI58+f7+7u6roOCwgh5vM5OgJgXRTFy5cvGWOmacK5adpcJZ1ZOOcgZdjmbegrbdN0MMY2m82NGzeapsmyLMsyIcSPfvSjr3/960EQ4KyiKCiloH64ysHBwf3797sL4S8GpXOzOOfYyTkvyzIMw52dHVisQ8M4yjmvqipN06ZpgOSKonj27BnSbnSeIkYHjUzTFB0H2uOcgwOCkwZBQFuO3E0nTdMYY6qqhmF4cXExGAziOIblb926pWmaruu6ri8Wi+l0iprrugaFR/0du0e1y+USb0EgpY3QZ226km5W4ycEK2FKf/HFF9evX0cL4cUKIYQQZVmiMGOsA5FCiKIoEAqNaFzeznDXdcGsOedBEEynU9u20bs0Ta9cuZLneZ7nSZLgBkmSBLdMXddFUfR6vTiOPc/DMM1msyzLMCKWZRFCwjCEncuyTJLEtu3VauV5nm3bAKaocDgcpmmKyRkEwWQyubi40DTt7OzMdV3DMJbL5Ww203W91+uBsdq2jSEGVob1DMMIgiDPc8w9XMUwDMYY5iEMiJKr1QqvanRd931/Mpms1+swDPGOBFNRURTTNH3fv3bt2mq12mw2u7u7eFHhuq7v+6gNg6Wqaq/XW6/XuE0wCvhLCFksFqqqwsLI5hGGISGEtPlkhsNh06bthjjneZ4jvgajABY/n8/xwmO5XLquyxhbrVZIu+G67v37958+fVqWpaqqnPNXr15xzk9PT3/84x/jclJSUlJSUlJSUr+iqqou/6zrmlxaIbCqqiRJCCGqqhZFAV8awjMOIeQyaG6a5ujoKI5jPCkgOIkQ0uv1dF2vqgqndIWxAb+UUloUBSH/lUCPUlrXNWPMcRy0QUrqsiSAlpKSkvqfhdjG6XTa7/cHg4FpmrZtG4YBngUI2DTNyclJURSWZY3HYwBHUBhUAooELAheU5YlpVS02SHw/3Rd1wiiVC5BVc75ZDIBHhLton/kEubD/s6TYIyhWvwEGURLFEXRNK1pGrAepeVNqqqCSOq6rigK5xyY7/r162hwkiR5njdNk6bp3t4eYyzPc71dyAKR15xz3/ebpvnZz352/fp1zjmlNE3TKIrm8/l3v/vdyWSS5/l8PmeMYdGMIAg457quh2H43nvvoZI0TRVFWa/Xu7u7SkuTi6LI83w0GnW24pwLIcbjMWMMVsJOYDVw2KqqqqrC919ZlimKQggBtaSUwv6U0rIs+/0+4GBd148fP/7a1762v79/fHxctwkiYH/Y9vz8fDab0UtRurZtY4gxymVZlmWJEWmapqoq13WbpmGMATu+fv36wYMHsLaiKHEcIwHF5Z9CCEVRhBBamxBDVVXf94HgcSE4iAgSB6oGOMaAWpZV1zVqRpxvFEWMMQy0YRgXFxeAzkVRpGn67NmzLuoZ0wMbmqYdHx8j2QL2Y5rh58HBwf7+fq/Xa5omjmO4m7AVimG7S9YBYX72+/0gCLp3JKgZhyilVVXhZqmqKgiCfr+Pabm9vS3aRSDPzs7G4zEmw3A4zLIsiiIERGPaOI5T1/VmsxkMBv1+3/f9nZ0dTOYOBzuOw1vibJpmWZaapsFiYRju7+9nWQYbBkFg27ZpmlEU7e/vh2HYvR5wXXe9XiNIJAgCy7JGo1GSJPDjDcPQ2hByDJxt23meZ1lmGAaIM2NsuVxi2iuKgokahmFZllVVxXGMMeWcZ1lmmuZyuaSUNk0zn89N09R13XVdBBfrun5xcYGdURRpmtbZFpMZk6QzO2sXOQzDcG9vzzTNxWJx586d9Xqt6/p6vdY0rWlDWuo2pSCGQFEUjFHRZhRZr9eDwaAsyzRN0zTN81wIgSFjjAVBcPv27RcvXiiKcu3atdevXyuK4jjOlStXDg4Oer1eEARPnjz58MMPuwZLSUlJSUlJSUn9ijoADYdQtM+DSpsSME1TVVXTNC3LEm4bXDK44pRSz/O62pqmefnypRAC0Uj4EJYQgq914cjBG0RhbCiKYhgGHnOEEJTSuq5VVRVCKIrS6/UkgJb6dUkALSUlJfVfsm3b87zRaDQajcA3Xdd1XVfTtJOTE03Tqqoqy5IxZtv2zs5OkiSMseFwCMQDsgPehP+DgSNv3boFb4BzDjBatUkz4DR0IoQoLXjFHkVRuqMoT1oShHPxF+1HGcYYAFMURfjZXErfgVPgVQyHQzgiYIubzQZglFL6+eefX79+XVEU5B8ghERR5DgOY0zTtDdv3lRVRSl98+bN1atXVVXt9/vga0dHR++8807TNI7j2Lbt+z6iLNM0JYRwzh8+fPj+++8Ph8OqqtI0rev63/7t3771rW81TbNYLMBDb9++DZOWZamqalmWuq4zxmBD9IVzDnPB0UG/YC4hRFEUWZbt7u7CYnC5oihCaCocLM75er0ejUad0RhjZ2dns9kM9qzr+uXLl++++64Q4ujoCKTStu2maRhjq9WKc358fIzy3XUdx0Fj0J7Dw8OtrS1AujzPEUzK23wXWZat12shBCByHMcXFxfb29uGYZimyTn/+OOP7927l+e54zhhGH722WcPHjwA1rRt+/Dw8M6dO5xzGP/JkydI2ZHnOWi+1oY8a5rGGDMMo6oq+JGGYZB25UA0FcUwebAfZkGxbi5haOq63mw27777bpIk6Cx2cs4Hg8HBwQGmPUTady1ZliHgAhG1naH6/X5VVXmee54HS+Jm4ZwjoL5pmrquj4+Pbdvu9XqU0iRJ0jS1LEsIgYhmXdeTJMmyDNM4juOiKPb29sqy5Jxzzg8PD2/cuGGapuM4nufpur5ardI03Ww2o9EIEdC4dJZltm1vNhvP8yzLCoJgb28P0xXpO/A6arPZJEmCyGWES7uuqygKop51XQ+CYHt7G8UQSx5FURzH4/FYVdXlcgnDappmWZbneUmScM49zwM7xrRHd2BnRVGEEEr7T8H+/n6e577v+76PrxYopTs7O77vowD2/MoG/nb/UGw2G8yNKIqWyyWOdkODbdgfMwdWms1mq9UKLznKssTUcl13s9n0ej3cIK7rXr9+/ejoSFGUXq93enqKwp7n4Y0FIrvxdHR8fHxwcCDXG5SSkpKSkpKS+s16+vQpHGx45nCeSevC4UGPEFIUBaBw5zrCtdN1/TKAPj4+xoNVWZZ1XT979gwZ8wghw+GwLEva5n1u2sdVXJcxJoRQVZVzDtcRBTjnf/zHf/x3f/d33SWkpCAJoKWkpH6n5Xne3t7eaDTq9/sdmQKBSpKkqqooiuq6nk6nwECEEARCLpdLhEWTNhi2LMsbN25QShVFAXDMskxVVbBX1ubWKIqiqiqgH1QIbDcajfB/drefXHrJDKcBrAopXEH0cG5VVdPpFP/xo+buREVRFEV5/Pgxsh/8/u//vqIoz58/f/PmTRiGRVEIIRD9muc58FOaprZtn5+fM8Zs2xZCIBzy8PCw3+8TQvC6u2maMAzB3xEPTinlbTaMLMuCIFAU5dNPP/3ggw9M0/R9P8uypmk++eQTrCzXNSCOY0qpZVlwmH7605/ev39/Nps9f/4ctkXuDk3TiqJYrVbr9VpRlO3tbU3TOOd1Xc/n816vBxYMA4Zh+Pr1a6ydWJYlsCzsA8yqqmoYhvfu3SvLEvw3yzKYS1EUSiml1Pf9umX9QoimaY6PjxGpSggRQrx58+bGjRu4KKWUMfbixYudnR1wTNSMMFuMBaXUdV0wRHhySZIgvQnaeXh4OJvNDMM4OzvDJKTtuiK6rqdp2vHrJEmSdrFB8PQkSXq9XpZldV1rmvbxxx9/85vfxLmapi0Wi62tLU3TGGPsy+sQBkEAHKlpmqqqH3744fvvv489GBGcwhj75S9/+d5778HCaDM6Xtd1GIaIlY7jGPaB3QaDweWpeH5+Dpe3rmvMtKqqgHpBbGn7mqGqqn6///jx41u3buGWZIxdvXo1TVMQ9p2dnSzLhBC4C8CjKaWYM7DG0dHRcDhMksQ0zSAIusKvX7+2bRvYd7lc2rYdx7Fpmuv1ejgcuq4Ljt2BVAAAIABJREFUdoy7tdfrxXFs23ZZlqDVy+USdFhpPyaAJTHPt7a28K+H67qISrYsqyzLMAxRHlJVFXUWRQHMbbZLxGB+MsYwIhcXF7R9+QRL4twkSXzfxz9NGBQMBwzebfz6Nm0TpGAEMV6EEGQUoZQ2TVO3HwHUrXAXwIZ1mymoLMuqqvb39zEu165de/XqVa/XU1UV4edVVY1GIzyZnJ2dqao6nU4xLi9fvkySBP8WffbZZ0hkJCUlJSUlJSUl9Rt0cnJy2bXrXDX8hENI2sVp1PYrRuzEHvvSIoRwwJqmwUPl8fFxB6D7/T6O4lw8DWEbjj0hRNO0PM+7/ajn29/+9j/8wz/ked5eREqKEAmgpaSkfgd19erV8Xg8mUzwCX+/3wceUlWVc356eup5nuM4s9msKArTNBEpjG/wDcPQdV1V1S5ytmPNuq7jf1nGGPAZECSCOimlVZtkQ1VVxMkSQkAqKaWLxWIymdQt7kTJy0lagYRwFmNsuVzGcYz4wbquUaFt25zzs7OzNE17vd5kMhFCFEWBSzRN84//+I9KG+va7/eFEHmen56eWpalqqrneUIIVVWTJEEorqIoSZKAka1Wq6997WuMMcB6RVGqqmqaZjweF0WBZK9CiCdPnty4cWOz2biuq6oqIQQosN/vg8RtNhuYUdd1MPpHjx790R/9ESEkz/PValUUxaNHjxB8jY6XZXl0dIQuIHj25cuXXS7sZ8+eweAILFUUZbVa1XWN4avrGgOhadqHH374wQcfMMbgIdV1HQQB/KSmaWBVbHSIEGOxu7t7dHRECFmtVsjCMZ1OkRshz/NudKqqyvMc1L5pGgxWGIZN0wAZp2n6xRdfPHjwwDCMsiw550EQzGYzxDsLIUib2rvX62Eb44UCdV2DNmqahkHENoS+67puGAZeCXRHTdOcz+eO43S0/enTp2+99VbXTUwqbKuXEhCrqrpYLAaDAX6yNvUzYwy9JoQQQpqmWa/X+GSPUlrXdRzHjuPUdR2GIe4Rx3Fc13316tXbb79dVVVVVYPBAG8FcApsCAp/69YtIQTORd8x9LithBCI9U6SBPAdd2WSJEVRbDabyWTSNA1ukJ2dHc/z1uu167qr1Qrc2ff9vb29zWaDiYGbNAgCQghuCoSc93o9UOnBYACD67pu23av11ssFr7v37x507KsMAyFEJZlFW02Z8uytDaDCsyI0GCQa0qpaZq9Xi8MQyxIuFwukT2cMYbLYQgopbjoxcXF3t5ecym4HkOmtNAZe/5HwbAoQNtnA5zi+z5WC8R0reu6G4imacSlkGfRJiKvqoq0r83yPN/b2zs7OxuNRkdHR3hlkiTJzZs30zRdLper1QrPJMhS8vTpU9gWUeF40XJwcPD06dMvN1lKSkpKSkpKSur/VHDCO79OCIGHWUVR4KjDh8SzgNLGHMCv+3Wn0XEcxAMJIZJLqTNs267rmhDC2jVycF3SYm7GmK7rjDHOeXdWVVW7u7tXr16VDp7Ur0gCaCkpqd8JeZ535cqVK1euAB5NJpMwDG3bRtQegviQYmJnZ2c8Hvu+n+e5ZVm2bSuKUlXV9vY26JtoMy1cRj+UUtQAPwCMBlwMiBP7QUKzLEP2W9qyZtT8+vXrDv+BBKFyVVVPT0/zPK+qSlVVTdPqurYsyzTN0WhkGAbn3HEcx3Esyzo5Odnf3yfte2lN0wzDwIXgMaBC1lLyoijKsuz3+0qLegHR0jRFw8Dg0jRN2kXYCCFPnz6FiX72s599/etfRy8cxyGErNfrGzdu4IstnPL8+fM/+IM/MNrkJJzzTz/99L333huPx8+fP8/zXAiBiHLf92HYKIq2trZQOMsypDjY29ujlGqaVpZlHMc/+clPZrNZ0zSaphFCgiDwfX93d5cQAhN1XTAMA1fHfnqJrj59+vT27dswCyGEUoo6QfbTNP3000/fffddSunW1tbJycl4PBZCEEJQmBDy6NGj+/fvd5YsigLB5mVZdj8Rn15VFWyyXC6rqgKSTpIE2TzyPM/zPE1TuIxos+M4r1692t7eRlVpmuLFAHirpmnz+Xxra0tVVeBReIfoaed6AuByzlVVreta0zRVVRFrjPmmquqTJ086Ho1KsJ8xhmBVWJJSWhQFgrg55xh3HMKLgaZpgCZHo1GWZZzzoiiOjo6uX79+cXGhtEtvE0I6xgpLfv7553t7e1EU2bYNngtbOY4j2rBlxHeDOIdhiIDuKIo453VdF0WBOGXc0UmSaJrWGQpcGJcQQgwGgyRJLMtyHCcIgu5am81mZ2cHG7hPDcPAPxppmmIUgiBA9gzHcQDBcZtPJhO1zVgCM65Wq8lkEgRBkiRbW1uWZSG2uizLNE1938dYoDyGDD/xngY3Hb54wGRTVRWgHzNW+XLIM+zfqduztbXl+z5ucMYYKk+SxPf97kT8g9NJCIE5DxrOOceI4y+ltKoqvMngnFdVlSQJvfThBTj7N77xjVevXnmeZ5rmYrHQdR3vGxCM/8tf/nK9Xsv1BqWkpKSkpKSk/u+q+XImRrjccCybptHbBd7xLICS8BhxSt2GJhBCkAUOvlxd1/mlsGU8Znbu+uUasE3bYAvGWNcYIYRt29/4xjckgJb6FUkALSUl9ZXVdDqdTCbT6XQ4HI5GI5Ag5HTOsizLsuVySSkdDoee56mqWhTFYDAA7bJtG+iKMRYEga7r4Cyc8yzLiv+DvTdrlty4zkUzE4l5qHHXsKcmm93NQU21KFGWFEdShCjK04NDetDfsP+N/4IU9oPDYSpkazAlkaYoiSIZavbuQezeu7t3771rLhRQKAwJ4Dx8Qt4idW/Evef4yvQhvocKIJGZyFy5ULnw5cLKKuIEpvM8z+M43mw20oUZdJJhGIqigJallAoh0jQty7IoCnw5pSiKZJHgTqtpmq7rZVk+fPhQUZSdnR1d1+/du2dZVqvVAjkFDg4EtKqqq9VK13UUNAwjCAK0ilRcs1ptRFaWJYwPpBdVUDBFUeDRzBiThgul1HVdVIX+cs6fPHmSZVmr1TJNkxCi67rv+4ZhlGXZ6XTiOFZVtSzLdrtNKUWpsixhEu3s7Gw2G9CFjLGiKJbLped5YAlv3bp148YNrdqdryzLX/ziF9/61rc0TZvP54vFIkmSg4MDtLYsS7QctwYjnKbpu+++u7+/bxgGHJY553fu3HnuuefUCug1JA9hYnmgrGjTLMvQtfF4nKYpjDOoRFmW8NgFzY12QphweUYKIcT3/X6/TynN8zzP8+VyORwODcPIsgxN9TxPCMGrMCwQDvwOhBBwkZb5wzDknEP+mqY9evRoOBxCVTAinPN0y6/56tWr6KxlWY8fP+52u0mSgOyWHccvIQRCUBQF0SFwiktSORVFQTshJc/zkiRB5nfeeeell16SmozeQXnyKuw4lB/ayBgLgsBxnLIsgyA4OzvDGk+SJNPpFLy27/tJkui67roulCQIAmg7/IXTNAXjnOd5FEWmaWZZNp/Pu90uISQIgiAIdnd3QWRj2SbP8yRJHMdZLpeWZem6Ds/f6XRqmiYu4V7QATxiy+Uyr0KphGEIz31IQ63IYjzpkACOl8sl/mRM02w0GqvVCs8XOG6sK8iy2zLnnKOU67qLxQLiguQZY5qmxXEcBAFjDPz4arUaDoeQMMQOLcUx5I9TVn0gads22oMU5EEpHEjk1WYyWbUhZFmWQgghRLvdnkwmULnLly9vNpvlconFALl0p2kaIQQLKtgBEi13HIcQMpvNjo+PHz58+N5778n216hRo0aNGjVq1Ph/D7Zle8NJgnOuaVpRfTEpTXpYejiQbyiwBgkhu7u7SZKMx2NpFspbcM6zLKOUwtYlhMA+RAoOYAMriiKEgM0P0/HP//zP6zDQNT6GmoCuUaPG/1GwbdvzvOFw2O12d3d3TdPM87zdbmuaNhqNTNOcTCZglAzDME1T0zRKKfz14MRqWZZhGGA2kRPRNjjneZ5joiWEFEVBCKGUYsoHAb1cLjGvk8oUiOOYV2EckAg2CqQSWCfGGNwMQU4ZhvHee+8ZhqHrOuyD3d1dwzDATymKEoYhsmmapihKmqYgHFVVZYyB/EXbKKW88nWFfIqiQAbGGNilNE3B6CEdvBLnHPWjFIoLIfI8N00zjmPTNGF2CCFu3bp1/fp1XdfRZV3X//3f//3VV1/VdR1xQtI0ff/997/+9a9DeiCh7t69+8ILL0AmRVGkaXpycvLyyy9nWRaGIWNMCJEkCdrsuq6u63fv3v3Sl75EqiAneZ6fnJw899xzMKFA6d68efPGjRtq5Vi6Xq+RH9LjnL/zzjuf//znMUwYhTzP+/3+8fExuvPb3/72c5/73HA4fPDgAbhIXdeRGfJMkmSz2eRV4O8oinRdL4oCxlaWZcvlcn9/vyzLLMuyLNN1fT6fF0UBJYmi6Ojo6Pr163m11Z7v+8PhUNf1LMuEEOv1erPZSKVaV4GewTirqkq3/JoJIY7jpGlaFEWSJKvVilKqVSE7wjDc2dnhlWMCwm5IUZAqmAbkA0EBf+wQDVnh0jPPPKNU4Y/RcXDKRUVluq77wQcfDIdDUPNBEKD7aBWKQHp7e3ugfaE8EKZt20IIIQSI4EajEccxxJUkSRiGpmkKIXRdZ4wFQZBlmaIoQgg4UEON0zTdbDaO4yAajGEYkHMQBFEUcc4ty2q327g7bm3bdpZlpmkiWIfrupLohwzxTElloJQqimIYBlhXVKKqquu6GDhli6TGM46/GlSOakejEZa7LMsKw3C5XOKhKCvvEl5tVIh6OOez2Wy7McgM4eO0KAoZFL4sS1VVHceZz+e0+h9AEZkTPvhCCDxW0ENKKWSepqnjOJxzx3GSJEnTVCrkbDYzTZMxtlgsKKWdTuf4+PjFF19cLpfwffZ9HxT2pUuXjo6OyrKMoigIgqOjo4cPH6IxNWrUqFGjRo0aNf4/IU1TWL84zfMcDiIw5/C+oFSUsbQJy8r9eTKZ4LNRQohpmjs7O0+ePNlsNpRSXdermxBKKQw/Wr03EUIODg7wUSYMUUII5zxNU1kKVmWv1/vGN77x+uuvy/QaNWoCukaNGv8nwHGcw8PD4XDY6/Vc17UsqygKbAXWbDbn8/mtW7fyPPc8LwxDz/M8z+OcZ1mGwMSg0ljlL8w5B0EGIgbELqZwOfVKSCqnLMs8z+VMDD5IURS4/oFF2s6P4kgESwW6Cp/tg5xSVVVVVZgRgKIo21c551EUaZomM2iatlwuZYWEEM/zlIqxzbIsCIKyLGFJgGCK4xjhaFm1jTIowizLut0upTSpPulSVRURY4UQ6/Ua7fF93/M8Sul6vYbckiSJ4xjZLMsqy3K1WiG8MtsKUtxqtcqyBN+d5/l0OsVVTdOSJImi6K233rp69WpZcfpFUbiuyxjTdZ1zPh6PsyzTKpYcXP8HH3wAT2pFUcIwVFUVxdF+zjljrNFoWJalVJGgYajt7OyA/QSTmOd5u92G9/HJyclwOBRCZFkGWtM0zaIoID0hBHZNFEJsKiAINWjQJElc14VuCCFAE6PxcBEFUwlyFuCcE0LksCqKsj2+qBxlQdajawDd8kRAH6G9ShUvGMec89///vfgo3m1OzbyKIri+z6UEymkIvEVRRmNRleuXIGqgF53XTcMw6IowjAEGz6fz9M07Xa7QRDAIAbbOx6PN5sNPJqFEEEQ4FTXdXTW8zxQlvhYwXXd1Wq1Xq+xLJQkCSRvGAYhBLINwxDOzru7u47jBEEAB2SjQqvViqIoyzIoua7rpmn6vr/ZbFarFarVdR1KBeU0DEPXdUVRQLBCqugyRGFZ1nK5RAOgD1mWqapqmuZsNsNdFEWZz+eu685mM1k5RlMOga7riDwTBMFsNlNVFbeAZsoD6DDAGMNg4WpRFIyxTqeDZ5ZU/zx41jjnlmX5vs85Rz3bedI09TwPzzsi3RdFAQ0EVx7HcRAEruu22+3pdHr16tUnT56kafrMM8+cnZ2hC77v7+/vB0Hg+36r1RJCjEajNE1ffvnlR48e7e/vLxaL8Xgcx3Gz2Tw7O3v48OGvfvUr9K5GjRo1atSoUaPG/wJgFcNKh4GXJAkhJE1T2PbSdJS2JexDQkhRFE+ePJEENOd8MBg0Gg0hBKu+V5NI0xQmX1EUiqIgEbXh7pRS2JnydnhFUlX1u9/9bk1A19hGTUDXqFHjvzdc133xxRcR3HkwGBBCNE0LgmAwGKxWK9M07969++jRI8/zDMNgjIGhdhwnyzKwWqz6fAmzJlgb0LXgdMDBKYpSFAWu6rqepimp6GPwkmmaYt0Y7FJeORLKWLeKosA+AIUEQkpRlOl0miSJEKLVaum6HlWRi5GNENJqtVCcMUYpRTRVUGaqqjYaDVBXmqbled7pdCaTSV4FQEAlSkW2ol8IVkApzbIMl6IoknuRJUmCjuu6Do9a9AXdfPLkied56HVZ7dfHOWeMWZYFIRiG8dZbb73yyiswjGCI3L59+ytf+YoQAlISQrzxxhuvvPIKjpMksW07jmPZDNu2wzBEUJQkSRhjWZb98z//87e//W10Fre+d+/eyy+/rKpqURRpmsZxHIZhWZau6yLQ7a9//euXX34Zg1sURZ7nvu8jMwbo9PT00qVLiqJMJhPwiZqmSRONUorICZxzmRIEAal2CxRC2LYN4hXjSykF47zZbPI81zTt9u3bzz//vK7rWZYhA+LqrlYrtAT6AIoZwoR64PTo6AjFTdPM8/zx48fXrl0jhCRJoijK3bt3r127BlsTo4kRB+jWPnhBENAtehpLDrjEOR+Px3D2RyKUpyxLz/N+//vfP/3006jZ87yi2qeOVFQm1COOY9u20UHGGJQQQsMvMiM9yzIwy1giwtBAPUzTRLQTIUQURa7rxnEsh4ZXmw3med5ut+M4hj5wzg3DKIrC9308d1hwWq1WcDGez+eHh4fz+dw0TRRxXXc+n69WK865ZVmLxSIIgvF4DI/pxWLR7/en0+lyuWSMgSnOsixJkvV6bRhGHMeQ3mw2QwO2+WUAI4hjXu0Mg1EwDMP3/UajASlJWS0WCxm3B5jP50r1+CN9u0hZlv1+f7VaoTjyKIoCXh4eLnjwB4MBnJ2x1uK6blEURfUlRJZl7XYbmpxWby9g8JEBDtqtVksuGxiGMR6PPc/DGsN0Or24uOh2u1mWzWaz09NTz/NQ7Xg8Ho/HP/3pT0mNGjVq1KhRo0aN/z3gPRc2obQMYb+pqkoqM5IQAsscpSilMODDMFyv1zA4gaeffno4HMItQyYSQsqyzLJsu05afQuIFPyiWpmnKAp4OH3nO9/5p3/6p636anyqURPQNWrU+G8J13WHw+H+/n632x0Oh/1+f7PZdLvd8XjcbDY3m83JyYkQ4smTJ3me9/v9g4ODNE0ty9KrcMlBEIDEJITQKhgWHH4VRcnznHOOqRS0DqlCKnPOCSGY8jG/AriK4owxUJNpmq5Wq06nA+4JtaEgaCkwVt1uV602iNjZ2aFb87qiKNKDEkBMVdgZeZ5TSpMkwSnql8YE6kF7yrIUQuAAN0LlRVGgp0hEw3Aax3G73SbVV12oU1VVUGZgP1Hqgw8++NznPidp8aIirw3DQHEhBIhCVG6a5mazQbwCznmapmBXf/nLX3772982TbPVammaRik9OTm5ceNGkiSwe8qyvLi46Pf7hBDXdSmlvu+Dy7MsC9w3SGpFUdDxIAgQ0gEkqRDiN7/5zY0bN7IsAxNKCMFxu91er9ec8wcPHgwGA1CNSZKkaRpFUVEUqCFJkiAIer0eRjnLsjAMQaGiU0EQHB0dfeYzn1FVVdf1NE3RTVJ5W2dZBlUxDCNNU9M0Hzx4AB2OoiiKort378rQ1dDVxWKRZRlcnqG0vGKooSpSPT7GR0+n03a7DTXD6EuVUKrYGtCTxWIBAppSmud5EARwjw3DkBASRZHjOHmeLxYLdMF1XXQHMqSUHh8ft1otqASYx81mA0oa2fI8RywIwzDyPIfkkySBPiyXS8/zEDcZ3LRkosMq7Mb5+Xmj0fB9H+ztYDAAzQq/9SRJQDe3Wi3QxLwihXFgGEaz2VytVsjs+z5YVNwC1XY6Hd/3oyiyLGs+n2uaZppmmqa+72NEAATTQLUQ9cXFhW3bGJHxeGwYhqIouq47jjOZTPD4QOyO4/jVTobQakop3SKXCSHT6VRVVZSSZZGnLMtyK2Qz8uOgqHYqVxTl+Pj48PAQus2qkDtY1LFtO89zDEqSJFEU2bZtGEaWZVeuXDk9PbUsC+tJSZJsNpuiKGazWavV2t3dPT8/73a7ShUcf7PZJEniOE6r1To+Pr5+/frp6akQYjqdXr58+b333sMi1htvvLHe2lS9Ro0aNWrUqFGjxv8yxuMxtpzZtgOzLINVSSpqGMYhr17ukJ9SGscx3tTwWkEp3d5ZRAI2Z57neItEJWSL3QboFgEtT+M4Nk3zlVdeefPNN6UlXONTjpqArlGjxn8zXL16dXd3d39/33Ec27Y5591utyzLZrPpuu5kMnnzzTfTyj15OBwqijIYDHZ2drIsA/sMsCqsKngZIQQhRFVVTKiMMc45Jl3clzEGHkdV1TRNXdclFVukaRoInbIKlUuq7RfyPOeV2yP4PjlzyzzSH1nSgru7u5PJBByiqqrNZnP7lFdhN+Tp/v7+fD5HdxRF2d/fL4oCM/1gMFAUZTQagbMGYYpmw+AoikIIkSSJEAL8u+w7Y0zmlHeHrzR2+dM0DRZJEATgmlF/r9dDGBBZP2R++/btF1980TAMIYRhGIZhQFZatSTAOQ+CAHWiAQhVUVT8PmPsvffe+5u/+RswfaDMwJAiGwRLKVVVFTE0CCH4xVUIH/socs6Lolgul5qmYWiQAf6tahW+g1K6WCygSzDClsvl3t4epVQIURRFWZZhGPZ6PUpplmWqqoZhOBqNhBBxFfd5NBoNBoP5fB7HcRRFYKg1TYNTcxiGe3t7uq6Dagdnp6qqpmnoo2majDH4NQRBAD0BVquVUnna8irkt7zq+36n04EGtlqtyWTS6XSUyhsXQlYUhTHm+z56B0fmdrudVxxxlmVg5yElvQocwRh75513PvvZz1JK8zx3XReiM01zsVhgfH3fz7Ls5ORkf3+/1WqNRqPFYuF5HkhnTdMcx4FbbrvdTpIkjmNN0zzPS9M0y7Ioio6Pjy9dusQ5Bws8HA7zPPd9H/RokiSWZeV5DoYUfwvL5dJ1XVVV5/N5r9fbbDboxWazWa1Wuq5DXGihaZrT6dTzPMuyOOfj8diqIrRsyxl/GkjhlbM5RIerzWZzsVhAVRhjarXp32QywXMEdVIqqppUCyqGYUDN6EdteuSHSuMUQB78vRRFAb0ty7Lb7U6n06Io4jimlLZaraIosmpxRVVVy7LgwI5YGVmWdTodIUSapgcHB+fn50KIfr+PJxTe35zzxWKBBSE4jyuKgvAjGEHGWBiGRVEoioJlkjRNB4PBeDx+/PjxZrO5e/fuycmJ7EWNGjVq1KhRo0aN/0389re/vXHjBixMsuUvJa1HHGybl0jhnAshGGMwR/Fh6/8T8jxHcRicMERlVcgj71IUBd3ahxyvZr1e77vf/e7f//3fb1db41OLj0QyrVGjRo1POL72ta9du3bt2rVrzz777N7e3uHhoWmahmFMJpOiKG7evPnLX/4yTVPLsp555pmDg4Onnnrq4OCg1+u1Wq2nn34a/rmgbvf29mS1jDFKKeiq+Xy+WCx831+tVsvlcjqd0sofGTQTeKv1er1er6MoiuNYCAE2x7ZtuG2apgkXQtd1pVmAmRukZBiG2laEXxBb0nTAMXgrpGC+xzGldDgcIgXTP/LDLMApKDNd18MwjKIIPo+QlWmatm0jDolt23rlWx0EwXQ6JYTM53MEpV2v19PpdDgcglBDnZzz+XzOGDs8PERBVVVN00ySJMsyeXfO+e9//3uytQGgruu+7zPGMAqGYei6/sMf/hCSBxeZZdnPf/7zXq/Xbrc9z0MLLctyHMeyLBRBHzVN29vbA0V769YtzrmmaZqmgVuU91UUhVL64x//2LZt27ZRA7psGIZhGJZl2bb929/+No7jLMtAgCZJEgQBBnez2azXa9/3Oecoq+u6aZqQEsS7Xq9BuOOS4ziu6zabzVar5bqubduO44AqbTablmXpuh6GIWMM6gT5MMagD2g2ugD14JxDSsgMhcFVQPYXgK4iGyEkiiK0hzHmfzS482QyEUIkSYJ2+r7faDTg3/rrX/8a+obMUv0IIXfu3CEVlV8URRAEUKo0TWWADijkx0hSKL9hGO12WwiRV0G08cy+//77hmHgkXn8+PFqtcKArtdrxIXIskw+PqZpYqx1XYdvMoSpadpiscDDZZpms9lcr9dIQW0YfZyaprlarVAJUjAWEKOUEoTJq93/xuOxFO9oNEI9pmnigYXMWbV4A4lBGhACQClFZhxIeZZlic8OkILasKOghJQbTlEzRL2zswP5Iw8UGEH9PpYtyzLHcRBJw7KsNE2llkZRFAQB2gMFWy6X165dUxQlCIJnn302TdNGo1EUxXQ6ffbZZ4MgKMtyOp1eu3ZtPp9nWXb37t3pdHp0dPTaa6/V7HONGjVq1KhRo8Z/Lu7evcu3PqQrq/dNVn3timzSmNwGjNsoilarFay4j2WQwAsL6pfApe1byF+ZoaxsUcMwvvjFLzYajT/UWOPTjY/rYo0aNWp8MtHr9f7sz/5sMBjA/XkwGDDGVFWN4/j+/ftgfz744INut7u7u/v0008fHBwcHBwoitJut13XNQxDq3arw7xIt2hcSik4KUlQgtSTkOQRpRRcFXKCqwIVhQleHkhWS9d10zRBb8mccs4GW0QIubi4mM1moDVXq5Vt27u7u3LKhxM3uDzk7/f7aDzaj76AYCKEcM5RHDUoigKPXfBovu/v7e3t7u4uFovVarXZbOLxKmXfAAAgAElEQVQ4Bl0OH1LTNNE18FCMseFwiIbBECGEqKq6v7/POYfLKkhAMHFg4Qkhrus2Gg3wyLZtS25UMoC6rlNKVVXtdrvgQNfrNUI0JEmCvvzbv/1bFEVpmqLvZVnmeY5KOp0OJNloNDDQrut6nvfaa68xxjjn8IunlC4WizAMwZInScI5B/sM6i1JEtCI4KORjr7oum4YRhRF6P56vY7jGByopMU1TYOgoBjoGq+oeagKyHe14kl5Fej5Y3Qn+sU5H41GSOGcK4oCchOnqqreuXMH6Rjii4sLDBMADZFso/SAZowpipLneRzHjuM4joOIFu12G7JF+BfcSKnCQCMFaoZKRqMRIQQOxZZloRSejjRNEbzC87zbt2/n1Q6ESZKA6PQ8D2sbkMyHH35oGIbjOOCji6LIsiyKIqyagGZtNBpxHMO3GtLTdR0u/5PJRFEUKJLneXJpx3VdEKlQS32LdNZ1HasFk8lEPuyQua7rq9WKV1sOQlw4plWQEyRC+DhgjKmqilUZPNSKoliW5fv+bDaD9OS/ByClKoGCEKy8ikRSeTdjNJMkAeMPChijXJYlBAjA67nX6+V5DvZZZpDZ4jjGykSSJHEco+OapuEB55wnSbLZbIIgQAShMAwnkwkew6IoEBfI933f923bhvf3gwcPPM8Lw/D4+Pj73//++++/L7tQo0aNGjVq1KhR4z8Lp6en/I8iuZVlqapqXrkt4xcGKvKw6vNHWLCLxeLs7CzP849UvYXxeIziuAsMWmnW4r0MNRdFgcpxSWbIsmxnZ+fw8PAj9db4tKImoGvUqPGJhm3bly9f/vKXv/y1r33tueeeu3LliuM44KFUVX3nnXcuLi7Ardy6dcuyrE6nA29fXdd3d3evXbsGvgkezUgHswbGrdfr9Xo9yb7t7Oz0ej3QXmB2iqLYJm7KssQkDRYGBWVr4YhdVsEcQCSRajUYORljmPWltzVYXbQN5JesAQfyLrJJZVlSSvv9PrikoigopYPBoGrUHxwtkV/eHb7Mu7u7h4eH6/V6s9lcuXLlqaeegjBVVUXDDg4OfN8Pw3Cz2ZimGYbher1OkuTw8FDXdU3TQPOFYQhHyH6/r2kaXGs553rl3axpWhAEoH3RDMbYnTt3TNOUrs2GYbz99tuWZTmOA2tJUZQ0TXd2dhASwTRNxpi2Rexqmvbaa6/leQ5JIiA1eGFSRb+FTJCBMUYp9TzP8zzbtsEyv/766yCj0zS1bTvLMtSQpimcRu/cuaOqqmEYtm3Do9l1XTDO6Jp0eQanrKrqaDRSVXU2m/m+HwTB7du3lcpJWVKWMj8ISggZDqdHR0eQP7own8+ltjDGJH+tVmsekKeiKIyx+XwOlUANR0dHUhSMMRxDFeH32uv1pDaiHlQ1Go1gREr9IZUReffuXSEEFirAHXuehwUAqXVpmhqGISM/ILPnefBcxh3zPNc0zXXdNE23OVPw1IvFAkXyPI+iCDfCgeM4cDYHT4qHBceu68Zx7Pu+pmmmaSLOhtyZE+MlRWdZluu66/UaKaZpguyGnztGCkJAjJHpdMoYWywWuq6bpomAJ4CmaeDTF4uFFD4eme0vJyDDjwGJCNuCx5MQ0uv1IHlKKSILlVuO5GmartdrXdebzSbSi6LodDpl5WOS5zlUWgiB2BoQ+7achRAYjiiKbNuGg39ZlpqmwZFfVVXLsmzbRi9arVZZbTJp2zaldLFYXFxccM5930dMlTRNz87Oms3mdDq9efPm9773vbfeegsdqVGjRo0aNWrUqPGfDrzI8I86QVNKFUVRVVXalvjdBi4hP9waTk9Pnzx5sv6jvTrwKTCsWbxQbFut+JU2rYTMhuM8zxlj/+N//I/tmmt8alET0DVq1PiEwrbtK1eufOUrX/n6179+/fr1559/Hk6yoPAQnBRzm+/7jx8/bjab3W630+kcHh62223btlVVXS6XRrW7AubLbZoGTA3Imk6nIzmafr/f7XYlayOEAE8N4qaoKGkQQMhTVM65ZVnKlDzPcV9cxSlYKlVV0RFJUcniqBYpZIvpJoQMBgO0B80ghIxGo/F4PBqNLi4uxuMx5ns0j1Iq82dZVhQF3QoNjJuiPb1eD4FfPc8DD3v58mXJ8sPyANk3GAx6vR5IfLq1j6KqqmmaBkEQBEGSJEIIVD6ZTOBpCybRMIz5fB5FkRACjVFVdT6fCyE45yCILcv60Y9+BP/WNE0hSbRT3gs1yL5wzn/961+LKiIBqRzVTdM0TVPXdV3X/+Vf/gVO0HEcQyBqRbRJSGpV1/XNZrNYLIIggDduEAQ3b95EAyAW2Xdd1w3DcBzn7OwsiiK4kGuVJykajCLogmEYuMuDBw9QDwDDTvYoCALZa1yVAmeMBVufyzHGUBbZGGNhGMp0zvndu3dpRYYiJw6AsiKgGWOLxQJ6QghpNptwIoZY4DQNchlaKvUWfrIg6y8uLsCTgjKGrmZZBs7XNE3btuM4Bj1tmiY8o5E5TdN2u401AEh1tVrJS6CeOeeTyURVVRDTzWqLQgyEUnlDa5oGmePU87wkSZIkefjw4Xq9BpvcarXAaOu6DiWHbEFJr1YriBT3cl03CAJQ0pCeZVnr9RpE88eAQaQfDcEhNxdFDaTi9/F0y5yEEPllRlEU8nmHGPM873a7yI9HA3nkY44D+eAURdFut0EfZ1kGGUInDcMIw3A4HILB32w2m83mxo0bknbPssyyLMaY7/s47Xa7GIhut7tYLBaLhW3bJycnjx49evLkyQcffPD666+//vrrsiM1atSoUaNGjRo1/n9CkiSc/2FTN1ZF3lAURdd1WJIwL6WRybZiccCShEF+//79Dz/8EL5E2xiNRviYDxYy6pe1wY5FnTCAcSqP8Yu7/MVf/IWstsanGTUBXaNGjU8onn/++aeffnp/fx+hnAeDAaYxsH5RFJ2cnMxmM/BHnPODg4O9vT14zoIAYowJIYqiAHf8x0jTNMsyMDWSwREVudztdtvtNrZiy/OcEDIcDmUcDJC2/X5fOlCXZdntdpHe6/XyyvWYVHO8pISQKI+RXm45O28foBIUL8tyOByOx2ME6/B9//Lly/v7+4yxonKOxi8YK7QZncqyDNVKICeryGVVVefzOeTGOR8MBtitDt61hBBJoVJKLy4u8jynWxvlScoPVCDS4zimlKJyUIqUUrCQpmlalgU2lnNuWdZwOIScSRVSA/Sxpmk/+tGPysrhF7eANGjF6K3Xa8/zGo2GJJHff//9LMtIxcNSStvtNiJyGIYhhOBbMZ11XX/77bc1TQMlDX9nOE3LNmiadnFxMZlM5vP5arVarVZnZ2eoBD7sauXprKoq+hWGIfjrMAxXq9XNmzfRBV65OUM4KBKGoZSVtPNkTs657Auuksq/oNFoIDP96AIDcqJOPDjAnTt3ZE7GWLEVmHi1WoFxtiwLLskyOge0EeSmbdtQISGEruuWZXU6HWjU6empbdvNZrMoiizLwEp7nieEkPFS8jw3TdN1XTyGeOKyLLNtOwgCUMZBEJim2Wq1wEdDzqC5QVKjefBDVxRlsVhwzufzOQQoh9WyLDhEQzl55T2t6/pkMoFiSFlpmhaG4WKxkKLWNA3UM8RFKVVVFes0yIZRQD0oggMpUohu+zjLsvV67TgOZCuB9EajkVfxMWT3LcuCQFzXReY8z+VfE57uzWZj27aqqo7jeJ4XRRHyJ0ni+z7nnFKq6zq4ZrD26/U6TdPNZmNZFqol1Y4xzz33XFmWu7u7FxcXSZJ86UtfyvN8NBodHh6uVqssyzzPS9P09PS01WoFQfAf//EfP/zhDyf1Fuc1atSoUaNGjRp/Esxms6Io5EsBfuVLH9lyb8IlHMNSRQZSvSAURfH48ePbt2/fu3cPvgX379+fzWbIj1cGQkhROaDI4qi83NrcHjeS2QDDMJC/xqccf1gwqVGjRo1PFNrtNtwtQQGvVquyLBljT548gVvl7373uzRNW62WaZqDwUBRlFarFcexpmmKogyHw8Viked5r9cDM9XpdDRNw6QInJ+fK9Xn9qSKVjEYDORULSqPWnmMzNt5MBkPBoO0cqZGbYSQXq93enqKbEVRDIdDSmlRFOfn57gdsqExcraGQYBSuMQYQ2wHyakdHh4iPgParChKt9ullXuspmnn5+dlZQ2oqjoYDBCoBIlIx61xF5mCpuKSZNMopeCaKaXg4Bhj8/kcG6OxragRs9ms1WrBRuGcc86jKBoMBoQQ3/ePj49BI4K0yrJM9vH111//2te+xjnf3d199OhRkiSMMV3X+/0+yEd0Fk3CqP3sZz/76le/ii5AYvP5HGwamPf79++/+OKLWZZBaEEQcM5BlOu6bhgG1vnTNAWbGcfxfD4viiKOY4TFmEwmg8HAMIw0TeEV22w2syyLogj3BUsI1+CiKFarFZqdVY6okBK4eNDKhBBIBsB4gRiFbCFJVVWRmOe5zIx4CEVRIOpIFEWNRoNSCrdWjAtuAfHGcdxoNBRFgbu0HDt0HIPLGINilGWZZRk43yiKoCd37tx55plnoBhCCFVVwRovl8v1em0YBnjqO3fu7O7uQqpxHIOkjqLItm0McVEU8EFerVagpPH02badJMlyucQYgc6ez+eGYSA/+GhIAysWnHM43kI+WhVrW1VVXdfTNNU0LcsyVVU1TZvNZp7njUajRqMxnU51XYd4ISVaeWrgWAoQv5qmrdfrIAgQygO6h4PJZGKapiwly0KkECb+naC3WZZhpIQQSZLQKrwGLpmmica7rjuZTMBBW5Y1mUyEEI7j6LoO3cPzXpZlp9OZz+e9Xm80GrVaLag0qPZLly7NZjPLsnRdj+N4tVoNBgPHcZIkee6556bTaZ7njUYDTuu7u7toMFqSVmHWx+Mx+GVVVZ9++ml8ZoFhms1mZ2dneBzwaNy/f/8Xv/gFFKZGjRo1atSoUaPGnwaPHj3q9/uw5UjFAuMdBCkArHG6tSUJYyyvvtNVFIVznqYpnGw457C3kyTBJ5WwjWn1vqwoyvHxsUyRt8CtkUG+OcLILIpC0zTHcVBhjU8zag/oGjVqfOLQ7/evXLnS7/fhiWkYBud8uVymaUopdRznzTffDMOw2WxiQ8Jer/fUU09pmjYcDknFpWZZlmWZEKLdbnue5ziOYRhBEKzX6yRJhBC9Xg8Bo13Xtaod5xC9d71eg0fL8xxTaVlFWUX9SMEB2oxZVl4lhDDGBoNBt9vtdruEEFmk1+vhI3rpQ93fCuUshNjZ2cGtMZdrmobdFBljaEZRFJ1OB4S75IxwCzQSgTKkBIqiGAwG8lQIkVXsZKPRgJ9vs9lstVrdaqMzVCgNC9wXKZKem81miyqAdRRF6/V6uVyWlYkDa4ZXYVJ0Xe/3++AKHz16BMpM0zRN08BaKooCBhPj9Q//8A9I4ZyjC5AqGgA2Vo4a9ATLFSDvdF3P85xXPs64ESq0qm0G33zzTcdxcAqn0Waz2awCiKuqev/+fXCOq9UKXCQ6blSAY6lWMaRgJ9FrdH+xWDDGtGrXQdSAnBAFcuKAc350dKRUUFX16OgI9SuKQgi5uLjwPK/dbkPH7t27RynlVXSO8/PzzWYD3+0sy2zbhuJtY5skJYQUReG67tHRERxyQZIW1SINY4wQIoTYbDZgjTEKWZZpmhbHMU6FEFEUua6LCoUQhmE0Go1Op0MIgVuubdtoGz4XyPP89PTUMAy4jUM+qqqCs26320mSbDYb6AOEjM0GDcNYrVZSeghYPJvNMC5aFRgaGVAthIkDqOXHfhljs9kMnSWEIH06nSrKHzZ4pFtPAQ7kKXy6kY6FFjxZlmU1Gg2w8JZlNZtN5MfAQQIw613XxfMohJALIagEaom/AhTBw4uy4H9t207TFA7mWZbleS4fc1S12WySJEE0FSwAaJo2nU7Lsux2u7quI7wGpdQwjPV6jVUcLAxEURRF0WazQQPa7fZyuZzP54eHh4iCcufOne9///s1+1yjRo0aNWrUqPGnx09+8hNFUWBhksrUxAEsW5zioKheRQkheNcDRBUXUdM0QogQIgzDOI5Z9S0m0mVtsh7YtPI0r0hniaLyryqKghBy48YN1FPj04zaA7pGjRqfFDiOs7u7u7e31+v1NE3b3d1N09R1XdM0NU27efOmZVmKosAxsNFotFqtVqvFGANdiJmyKApwMYPBgDE2Go3ATIGTAikGH+T9/X1CCKUUp6RaH0aiJJ4wceIAPCCmWCGE5LtxU8zBIGqRB/MxgKuytn6/j1NFUYqi6FWbwlFKhRC9Xo9SOplM0AZVVXd2diRfBiEYVRwJIQRMBAB355zv7OyMRqM8z8HEdbtdxlheQQixXC4JIbJTspFoBg6QYTabycQ0TeM4juMYXSjLMs9zRVEURcGytqwB/N1Pf/rTb37zm5xzyJ9zjtgFyIBxgSjUKkyHYRic8yRJyrIEuVaW5c9+9jO5fwXkhlAAskc/+MEP/vqv/xqMM2jN6XSapikYNELIz372s5dffhl9z7IM2UzTzLIMbN14PB4MBuAxTdMsy3K9XgshwL1mWXbr1q3r16+D4lSq0My4F1r+q1/96rnnnsNN1+t1GIZKxTgDk8mkLMvNZgMX1A8++OCFF16QPCmMReTknK9WK0opdAYji+Fg7A9bV0M90LbZbHbp0qUwDMuypJT+8pe//PKXv4w8qAE5HcfJ8zwIAvDU+FCg2+2u12sINs9zUKhJkiwWC/hEwzc8DEPTNB3HWS6Xx8fHV69elRyoLIVVorLyp7Zt2zTNJEmyLHv48CHEparqbDbTNM0wjMVi4XleGIagVuM4BsUMCRiG4TjOZDK5fPlyGIZhGEL4EvLplnIejUae552dnSGADCQGUEonk4nruoyx6XS6t7cH4UCko9FI13UoLQYCEEJ0u13odlmWjUZDuv8XRZEkCb4/kI9PXq0kUUqzLEM6ftFHxlie59t8MaqFWuKqEALjJY/R1CzLwjD0PA+Pf1YFmQFlv7e3B+K41WqNx+O9vT3btn3fbzabkLymaUEQPPPMM0+ePKGUjkajTqfzwgsvPHz4UFXVGzdunJ6eWpZ16dKl8Xh8eHh4fHycJMmlS5fu3bsXhmG73Z5MJsfHx8vl8h//8R+liGrUqFGjRo0aNWr8iYFdXiilMDWlOSrfGnBAKx8pZJDFywowkhVFQfGiKLIsQ7Vq9aa2XefH6pFVoTGkumNRvUuizoODg+0iNT6dqAnoGjVq/NcDlMfe3t7+/r6maf1+HxwK/PXAtcVxDEdLxtjOzo5hGPBfbjabURRhjsy3Ym4QQlRVxVfqmPaKomCMlWXZbrcJISCsFUXZ2dkpiuLs7Kwsy729PczBlNInT56geXIqxXRbVBy3DGXAtuJmbB+Ab0LD8iq6cVkFdM7zvN/vy0RCyPn5OeccdCTnvNPpoHJa8c5g4U3TNAxDVdV+vz8ejwkhqqoWRYFssnLOOXxg0WxWrYTLfu3u7jLGLi4uID1cBZBCKVVVlRBiGAbSRbX1H+qULacViyeEGAwGSZI8ePAAiYQQGDHoFIrjdkiEZYMWgk8En1tWDuAgzsqy7Ha7lmXhLvDuRBRdeMsGQSBrMAzDNE1kyPM8TVNd1xH/Ic9zkNTr9RqLGXDfjqLoww8/HAwGqqr6vg/3T8aYWsV2ADdKt6I2q9UekoZhgE8sikLXdUIIrzxwGWM4NgyjKIr1eg3uMk1TiIIQAslwzn3fx0BDUBgyDCsgR1mWRQMASAyglOZ5XpYl1O/k5OTq1atCiOVyCUYVTDFuAaUlhAghjo+Pr1y54vs+Rhy6lCSJYRjNZjOO47IssyzTdR0usdBzy7JwSgiJogisNCJg4HuC9XpdFAU0FpqJmh3HiaLI933DMDAuuq5TSkFbg061LGu1Wum6rqpqWZa6rs9mM9M0ITTIilZLNaZpTqdTwzAgJUgGQoYM0WscnJ+fQ6MkCCHQaiHEZrNxHAcqtNlsut2u4zinp6cHBwdQKkIIHNKRP45jfGwB9S6KAt3EKODZEUIgQBAhJE1TIUSapmVZDgaD+XwOd35IBtWqqpplGdafsCJl23Ze7TQYhmG32+33+6vVyrKs5XLZ6/UIIdA3xN+Yz+fg7n3fn81mjUYjSZL1eq0oyuXLl8fjcZqmURRpmoaDfr+/2WzCMEzTFOP45MmT8/NzrNCUZXl8fPzOO+9AUDVq1KhRo0aNGjX+SxBFUVmWjDEhRFmWiqKU1ceyjDFFUfI8J1W4ubIijlEExn+e54qi4P2CVK8keImAk4SmaZRSVVWTJEEeUjms4BR1FkWBm8o8SIQdjmOjDgNdoyaga9So8UnA008/vbu72+/3sQ0dfDNBLamq+t57711cXFiWpeu667qdTgdkUJ7nYCcx0e7s7Pi+r6pqt9sFv1MUhaZpcATGvIiJmVRzMOZmxlhZlqAFsywTQmD67HQ6yLaNJ0+egB4CAUS2iGncUQixu7uLJonq43ohBKhSJAJFUUgKG6Rht9u9uLhQVRUWA5pBKWWMzedzy7Keeuop0G2KooxGI7DwkoBDXyil4/FYcqOyC7gLreLPEkLgbgnna/DvADy7z87OJAtWlqUQghCCfpVlSSmdTCbtdrvf75+cnEAOiqLMZrNer6coys7OjqZpYRjSalmebPmi/u53v3v++ecppeg+5/zHP/7xN77xDUopr4Ja/Ou//uu3vvUt9A7KoGkaIQSM5HQ6/clPfvLqq6+C34QTLq02RQRZ+e677/7lX/4lmE1N08BQg+I0DMOyLHCFlNI8z9M0DYIgiqIsyxzHKcsyy7Lbt2/DQ1lVVV3XbdsOgoAQstlsQFvfu3cP0U7g8ixJcF3XkyRRVfX8/HwwGKD9iqLM5/Nms5llWafT0XUdtckOKhUfjV9W7aKJIUA7pcpBsPIU6ocDSBVamiSJ4zhhGEpLkTF269atK1euIGez2RRCbDYbeD0jjooQAu7Mq9XKcZxms7lYLI6OjobDIeIzgMjebDbgKBljRVEkSWLbdqfTCcMwy7I0Te/fv4/FElVVhRBYO4HmYyAQmlnXdagB59w0zdlspuv6fD53HEfTtKIoMKyapk2nUxzIp4YxhmFF8Bxw0+gpJIMHgTE2mUzgfIFTAKeoCgIhlaeGfNKFECCakVlmA7IsW6/XzWbTdV0MFspigEQVHGOz2Xie57puHMdBEOzv70OAmqYpiqJpmmVZ8/kcd4zjuNVqTadThJSJoqjZbOq6zjkPggBPPVZZ1ut1mqa+7yuKous6NBMrJZqmrVYrhA73PA+jk1ShOQaDQRzHcRxDvaG9ZVmuVqs0TS9dunRyclKWJYK0GIaxXq9PT0/feuutj3W/Ro0aNWrUqFGjxp8eRVHga0VWfQsrL9Hq5RSX/q8ylSkrM8gDQgjnHC8XMGU557I4Dopqz0MAl3BrvIYURaFUfiESMI+feuqp7cQan07UBHSNGjX+i7G7u7uzs2PbdqvVajQaqqqC7Xr06JHjOB9++OHJyQn8ChEU1fM8xAEIwxCz3WAwQBzYtPIq7XQ6s9kMLJKmad1udzwef2xGBAkFnpdUUyOtsD2RowhjjFLa7XbBKed5niTJ/v4+pu3Hjx9jqgbtuLe3Rwh5+PBhUe3upVQ7M8hq8QsoFQXZ7XZxI7BjiqKMx2PDMLCXHXo3mUx0Xe92uyjFGBuNRigOYq7ZbKLNqIdSenZ2hquc/+FvH51ljEEsOzs7eRX6WQhBKe33+3EcZxX6/X5ZlnmeP378GDUTQuA8PhgMHj58iMTVaoXiaH+r1Voul+fn591uF4JFF+C5maYp57wsyyzLEOSBVHQ85zzLsrIsGWOy47/61a9eeuklRVEYY7ZtbzabZrMJZjOOY03TEFAiTVMMUJ7niqJI7lLTNNxls9mkaZqm6Y9//ONXX31VXlVVlVYUNjjT1WqlKIqu62Ahi2r1HqpFCFmv13DFFUKkaarrOpqH4VBV9cGDB4gGAz3JsgxBrhEPWqnCqkhAPWjF2stBgfRwFSNICEFn4zhuNBqyrBDCdd08z8MwRIya1Wq13WboPHj8NE2xtqEoinRnlnE28jzvdrubzaYoijiOCSG9Xm8+n6MjruumaUoIEUKAp06SJM/zIAj0Ku627/uNRgP6iZDNtm2D4sTQICdYac75dDptNpu2bWOTPWQYjUb7+/tqFSkbBxBdWZaKokwmE6wZSLVcr9eEEFyybRtCQ5GLiwvDMMqyhMCl/FEWMs+yDLw8CiJeBwSIlKIoIEakQDcAKF6WZd1uNwiCNE2jKHJdV9O0JEnCMBwOh57nbTab5XJ5eHgITcjzXAjRbDbn8zn+BsMwdF13s9n4vj8cDtM0DcPw2rVr0+k0iiLHcdbrte/7iEMNjR2NRnhOIfzZbNZut/H8Yh2uLEuwz6qqRlHEOT84OLh//36z2Xz22WdHoxH+KsuyPDs7+/DDDy3LKopC1/VHjx699tprld7VqFGjRo0aNWrU+K/HeDy+dOkSXhyKygdZmsSwcoUQpHqTRSJOy7JEzqKKlcE5T9M0r77fNQyDV5Q0jFVYv7J+1FBWtrGiKLINErhUluXzzz+Pu9T4NKMmoGvUqPFfg52dnXa7fXBwsLu7WxSF53nYne/4+FhRFMyUpmmenJwYhkEpdV13OByWZWmaZp7nhJB+v+/7fpqmjDHwWZ1OZ7lcggHsdDrj8RgzKOccdO1kMiFbPoy04p622kXIR/dqw5wqc5YVPwXCKI5j5Gy1WuA9dV3HLmpFUTSbzTRNQQDJHd7QJDnTozGoH/M0q6I8g5uDQ/F2MGv0ZTwe45RzjqAiKIv2U0rPz8/BonLOd3Z2WBURW6ncwGVPkQLSVgiBr/gZY5qmoafoNaVUUZRer3fr1q00TUEaUkpR/82bN/Fxlqj2suCco/I7d+585Stf2b5pWZawadIKkFhZ0eKgGrGkH8cxnKK4dl4AACAASURBVEAfPHhw/fp1KSI0XlVVo8IvfvGLb33rW3mer9drVVUxfJCbruuGYdy6desLX/hCnudw/AQzi3txzkHhIQqHqqpaFaSYEALabr1e3759+/r167iapqmqqoqiUEohZ8750dHRCy+8gBq2hYBsjDFSeYKj/efn5zs7O3Lob968efXqVbS81WodHR1duXIFVwkht27deuqppxDjIsuy995778tf/rKqqvBHPjk5efbZZ/M8930/z/Nut4sxzfMcoTDAOMN/udFohGGIBw3jvtlsQD3DdiyKwnXdJEksy8qyLEkSQggoVClwx3Fms9lms4GuqqoKV9zJZCK7bJqm53mz2UwONKhP5FQUhXNuWRYytFqtzWYThiEEiEFBzVhkknLDtwLQQFIFD7l27Rqussq1eftXPssf+5WamWUZfLqbzeZ4PFYUBTqJpxWDgk8ZINWy2pwTT7QUo67rYOfX6zVWzoQQaBKoaiEExlcIgZQsy6Io6na74PHjOG6326PR6NKlS7ZtYy3BMIw4jlerVafTybKMVmHT1+v1YrFoNBrNZlNRlIODg/F4jD8fNIkxtlqtoNs7Ozunp6d7e3uIyAGVyLJsNpudnp42Go1Go+F53ocffmgYxmazOTk5ef311yGfGjVq1KhRo0aNGp8c/OAHP/jbv/3bPM+lscoY45zD7qVb5LI0g2HZcs5xClM2yzJp1m5bzqhBqaJ5yFMUxCmtPsOVFrW8HWMMBrMQwrIsebXGpxY1AV2jRo0/NbrdLnjnS5cuqaqapqlt277vg6h1Xff09FRV1fF4DJKl0+lomtZoNGzbhmPjzs5OEARCiG63CxprZ2dnPp9TSlutFlLKsgQHjVlWVVWkYJrE9EwrIJH+EV0lE2U6ZlAALZfzK/qiqir8JXEL8GuKosxms8FgUPzffbiEu1xcXIBo03WdMdbr9RDyFRn+OJg1YgJs94JSCldTkHftdhtXWbXBWqvVktYGII/zPM+yrCiKsixPTk7A+iEdiaSiTdM03d/fPz8/Nwzj3XffffbZZxVFEULs7e0FQRAEwfHxMeScJAnGFETYdlWMsTiOIWFWIUkScOvgAXVdf/vtt59//vmiKKQYwdbJZr/zzjsvvfQS+quqap7nrAoDDUoUFLOouHWEyJAiMgwDQTAQiyCKopOTk93dXVVVsc5RliWGuKiiQKAGTdOEELquZ1kGsRiGkSSJpmm+77Nqqz0Ap6CnVVVVFKUoCpxCMTqdDiGkKArP83zfD8PQcRxCCBoveUwhxHK5dF2Xc45AGYyxolrSQCMhUqjWyckJPhoQQvi+3+v1MBC4EYQsiWxszzifz5HZtm1EhFitVnmeG4aByBgIUlwUhWEYYJODIFBV1XGcIAiiKAqCYDwew6XaMAzf9yFYENZq5fKcpqmmaYwx1BPHcRiGmqbdv38ffwt5FWAHgmKMKdVumbKbZVmen583Gg1oLxZOCCHn5+ee50lNQ85i6xlHDUBRFDiFhkBVcAmfBRRFAX2GDpNq63AhxP7+/nQ6FUJAnlilAN0M9XYcB49Ar9dbLBaU0izLhBB41obD4Ww2g0qoqmqaJvydbdvG6ZUrVxaLhaqqmqat1+vlcoko+avVand3dzweI6Z2v9+HHtq2Da/nNE3TNL18+fJ0OnUcZ7FY9Pt9RVE2mw30inPe6XTOzs7wv/T73/++2+3u7+/funXLtm0Ehr5169Ybb7wBkdaoUaNGjRo1atT4pOGNN974u7/7O/ZRbypla7ee4o8IaNi9MnOe50rFLyMzrGXUSSnN85xXO8ajOOovigLpiqLAUEe15Zb/NZoBs9lxnGaziQ1janxqURPQNWrU+FPj6tWrnU5nf39/f38/TdPz83MhhGmapmkul8uzs7P1eq1pGufc8zxFUdrtdqvVwow4GAwQKAC0jqqqaZpiFmy328vlkjEGr2RCCOe81+vRj4Y4QBswL8rjs7MzZCNb0zOruF0kyikZPBTYUhk4oigKIQSotHa7DZIR07lSucEuFgs538u7YM5WVbXX643HYyRSSlVV3dnZQeSQsiyRgRAyHo9lUwFFURRFAY2FUAzbjYejtOd5yEApvbi4gChkY3CKejjnoNVkijRcZGvLshwMBojMgDpBLKZpmmWZjAMg2UOwXYSQMAxBqjLGfvjDH37zm9+EjQK8/fbb3/nOdyzLsiwrz3PG2Pn5+bVr1yilpPJTtiyLUhpFEShmOL1m1QaAKIXGAL/5zW+++tWvJkmiqqqqqkEQUEo1TdM0LUkSvdrXDim6rgdBgGECmRiG4dHR0fXr19E71DOZTPIq9kUURbdv3+50OtgzE667ytbeiWoVGAENUFX15z//+de//nWlijhcFAUYZ/DLWZa1221RhXSI49jzPMMwYK7Ral0E4wu1xG+z2QyCAKVAhsZx/MUvfjGKIhCmWZZFUWSaZpIkvu9rmtZoNBaLRZZlrutCe6Mo0nW92WxuNhvJC3/xi1+cz+foLOfccZzpdJokyWQyATE9Go2Wy+V8PoceLpfLXq+3XC5RRFVVXdfH47GqqvP5XFVVznme567rLhaLOI7v3bv31FNPYSDKaufJ8Xh8eHg4m82gnOiv7CzbepBhMfd6vfV6LdWJbH0VWJYl1kukJkvgbwT8Mlhj0NkANAoHhBC4kPu+X5alEAJMuuM4WOVqtVqWZSG2TJZlaZoiiPNgMHAcB5Lc29vDXdRqWULXdSh2mqbr9RrBiJIkgbt9p9NBhouLC8TlgJzhhZ1lGVpCCFFVVVLPWZYlSZKmaVEUCNaxv78/mUwGg8GDBw96vd5nP/vZk5OT6XS6u7t7//59zvmlS5fiOH706FGSJOv1+t69e++///7HZFWjRo0aNWrUqFHjkwYhBF7HcArTVx7jfYFVUeOQTVEUHMOoppQqiiKq0HMA51zamXjDKqrvDsuy3L5jURR4BYjjuKwMddyXVJQ3Ev/qr/7qe9/7nrxFjU8hagK6Ro0af1Jcv3692Wy6rttoNLIsg/+j53l5nlNK79+/P5/PHcfxPI9zjqjQZVnqug66E86DIHfgMtnr9cBSUUqTJFEUZWdnB86GaZqCqMK0qigK55x81O8YiSBMMa3SrWgbRVE8efKkLMvhcCinzzRNUbP0jS2KIssyUlkAiqJgT7+i4qbLsuz1ejIFUz7uAoIYwhkMBpIdJoSoqtrr9c7OzrJqS0ZKqeu6aD+yocGoTf6enZ2BUdU0DX7QoP9AmcGvExKQlZydnYEZxC88ytEMWpka6AvdIs3zPAelK8UC3hPdYZWvMef8Rz/60auvvsoYQ8ha5E+SBAQf7gI2EPUjAm8cx/nWRoiMsddeew37CgJBEOi6nue5WgENk92fTqeKouAYReI4Looiz3PbtpfL5c9//vNXXnlF13XDMLIsE0JIr/Ysy1AnatB1Hd+ONZvNsiyjKILtZdt2s9lMkgR70K1Wq81mk+d5HMebzQaRQ+BVjaoURdlWmzRNO51OkiRSaOgsZI5TOQq0MuayLANnHYZhs9kkhARBkOd5GIa2bYOFXCwWQgg8MkmSLJfLbre7XC6Rgs7GcWxZ1nK5XCwWlmV5nrdYLMIwDIJA0zTTNMuyxLKBqqqmaYJThhs41ECpFj90XSeE6LouhJBhNBRF4dWukmVZapqW57lhGLPZLAzDZRUwB3nyPJ9Op5xzDCIOIBb0Gqsv5+fn8BDP8zyKon6/D7FIqUJQZ2dnrusiWxzHWL8piiJJEs/z4jh2HAd3jKKo0+kgLjPyF0WBPIjdPJ1ODw8PJ5PJZrNRVdWyrMlkUhSFEEJSvUKINE0NwwjD0LIszvlqtfI8L03TMAyHwyFEt1gsdnd3J5PJ3t5ekiRRFDUaDcR9NgxjvV7jMWeMGYax2WwQcAPdR++azWZReXDPZrODg4PZbNZoNEajEZ6pJEkODw9Ho1Ecx0EQlGXJOR+Px51O5zOf+czZ2dloNJrNZp7nPXnyZGdn5+joaDQaKYqyXq/v3r377rvvQuY1atSoUaNGjRo1PuG4uLjY29uTrwwwEXGJUgojGe8RSMEvEuUlpXKCRgpACCmKQtYsiyOPtNIJIXgjkA1AIlqCuyDx85//fE1Af8pRE9A1atT4E2E4HD7zzDOO4+zu7hqG0Ww2JR/HGAMDuFwuVVV1HKfRaFBK8zxHTFhVVRGdQFXVdru9Wq0URUmShHOuKEqv14PHbqfTgRN0u92ezWaYGvGLeVFUkVgxFzLGwNAplZ8vfpGBUiqEAM3HOZezqTxGTFUkoiOaphFCVqsVwlUXVVhYIcR4PEYiIWQymagVSbqzs4NQG2jncDhkjI3HY9kw+A6j4Pasj5uiICCr7XQ6qMQwDHB/oKEl0GYEieacc8673e5oNAJLiJZ0u12EzMYd8zyX7TcMwzRNMM4yJO5qtWKMyRYCqIpzvlwu0zQllYGCX1GFO5Dp4PJQCillWYKz9n2fUgoP5TAMQV4jJySpaZqu62+++eZLL72EJoFJnM/nIOaEEGVZvv/++1/4whfAxWuaBh4ZDs5pmsZxjJGSdcqYG8vlEozhnTt3XnjhBTCteZ77vg81MAwD9N/du3efe+45yFbTtO0eMcYopeg1lI1SKoTAAa7KsUZ+ZMvz3PO8oiiCIMDCzHK5LIqi0+lkWVYURZqmlmU1Go2kiqbdbDazLIuiCB7lRcV3W5aVZZnv+81mE0Eb8DxCquv12vd9eJrruj6bzXRdl+QyonBomlaWpaIo4ItVVf3www8PDg4QuBxhJdBxVVXhKK1pWp7nmqZlWaZUO0OqqgoF+5/svcmTJMd15+/h7uGxZuS+VNbSG7p6AdgkCIIQF8lIQaKZzERKMi1mPMg0V9kcJJPNHPQ3aA48zVx+B8l0II2UKJmJEEFRC0EDFxCAQCyNBhvoRnVXVWblnpFL7BH+O3wVMSVwpOGMqN/PRMb3kBYZ6eHx/PmLWj7+8jnOLJdLzvlsNsNzjd0C8dfwcDg0TZPm254kSRLHcZqm/X4fE414w8ALYfhY+4miyPf9TqcTxzEiP8uyLMvgfPgHEyqE0DQNZS7ScxsSxnEspdzZ2VksFoZhYMUiiiJd14vnyDAMVVXB91GkHmYEQYAqKLdu3cIiwWq1qlQqjDFd1y3L8jzPdd1WqyWl1DTt4sWL4/EYufAyr8/+6KOPnp2doZrHarXCxHHOC0uCILh8+fLx8fF2u0Uqd6vV2mw2vu9TSpFVvbe3d3R0hDE+fPhwOBzO5/O7d+/+4Ac/wFyUKlWqVKlSpUqV+o+iN954Y39/nzEWx7GiKMq5us+KopC8mIbMKzcW5/HXcvHXNWOM5OnMOEkpjaII/x7iz2b8n4K3OC46LO7C8q0I0zTlnMMkfNTtdmFYqZ9alQC6VKlS/76yLOvSpUv7+/uO47Tbbd/3TdO0bRt5plJK0zQNwxiPx3fu3KnX62Auqqo2m83tdquqaqVS2Ww2SZI0m83VaqUoCtAz3qqqCuIDGosDSilQDn4d4gyldDwek3M1rd7zKxMECpWaCxFCdnZ2wKeKX8YQusUwOefFR3Ec421hgKqqSG/EJY1GY5zX1tA0DcU3pJRpmoJk1ev1wjZ5bjEZfeIusF/Ji2wIIWzbRv/ggNVqVQiBxoWwdZuqqpTSer1OCDk7O9M0Tdd1bFRIKQVYBJSETwowRwgZDAZZlsV5/mwxUrRRFIUx9s4773zkIx+Jouj+/ftFn+cxK1StVrMs45ynaQo++Nprrz355JNpmgKbgvY++eSTiqJgQzzP82q1GuBmEASqqr7zzjuXL18GJNU0bTqdqqoKvgZQXq1WkyRRFCWOY9/3XdcFs9PyffOm06nMa24EQfDw4cPd3V3OOXKZcZWUEjUuijlCh3EcF29xX1VV8Qccy0uvuK6L4EGoEELiOEZoYVoHg0Gn0ykm9Pbt24eHh0mS2LadZdn3v//9p556Kk3TxWKRpulwOETKMyEky7IoirbbrWEYSZKsVqs4jj3Pw8DBSaWUoMar1arb7ZqmOZvNhBDr9brdbhuGUalUVqsVvk8ghDAMY71ed7tdlN3AFJumOZ/PPc/r9XqccyGE67pHR0cXL16czWZBEERRBP+TvFiKEGI8HrM8P1pV1eVyeeXKlclkAtaPOJRSTqfTS5cucc4Lv2F08MZkMnnPI1C81ut1KSXoM1yx2WwajYZpmuv1OgiCdruNxkV8Fm7H0w2BMiND+fr160mSRFGEeU+SxLIs13U9zxNCYFyWZSFm6vW6EGK5XNZqNQyqWq3OZrOdnR1sQgjoTCnVNM00Td/3oyjClwYopZxzTdO22+18Pt/d3UUIGYYxm81arVaapkmS4EGD8RjpYDDwfR9536Zp4sFJkqRWq6V56oqmaZVK5fT0lDHW7/eHw2GlUkGtGEVRTk9PEUvz+fzevXvD4RAOL1WqVKlSpUqVKvUfS9///vc/9alPKco/5TgXf98WZ/BnM/6uLv6QllLifxP5z5Og0QwX2raN/7nw9l8SYyyKoqLUpJJX9iB5AjWlFH9soyZkHMfv7aLUT41KAF2qVKl/R7Xb7WvXru3s7BwcHFBKu93uYDAAhqaU1uv1Bw8euK4Ljnz9+vUgCADdtHwjPlVVF4uF53kk//VGKW00Guv1WlXVRqOBtM3zSdDgX5xzRVGQ6ggxxhqNBgzDL138GgaNAuspTsZxHIYh0DOwDkRzBs0Yy7JsOBwiZznL93NAhywvs6DmOZ5FujGMabVa0+kU7YUQrVYLt8avfLyiK9hTvBZmKIpC890F0SellFJabGaoqqoQoiDOqqpyzqvVatE/pXQ8HluWBfLIOS86NE2TMcZycM85l1Ku12v4MEkSpPpqmhbHMVazwXaTJIF/4IROp8M5d12XUpokSZZl5535la985TOf+YxhGFiByLLs5OTkxo0bQRAUzZbLJaioruugxvAhbFZV9c033zw8PMRbSFEUIQR4qBDi+eef/7mf+zlcq+u653mr1QpQD7PmOA68kaapYRibzcbzvDRNK5UK/oSaTqfdbhe3U1UVflAUpXDs2dlZt9uFAznny+VSyQuV0DyPu5g+RVFee+21a9eu4ZhSulgsWq0WPoVJtm0rirJcLtN8g0FcrihKmqYo8oAQvX///s2bN4GewzAUQqCMRhRFQRDMZrNGo4F0ad/35/O54ziWZc3nc0IISCtYsK7rrusW5Bqp0HAjaGwQBJqmOY7jeR68B7PxrGGaNE0bj8ec88lkYuSJ0pgUTdMsy0J6Nc2jFy0ZY0jeR9go+VMpz/0Vi2BAFPm+7ziObduTyQQZzbZtr1Yr/FgIw3C9XjcaDVyI5xdZ4UC0cRxHUYQLEcmdTgfZxLZtY3SqqjLGTNMEaMbxer2O4ziOY4TQ/v4+ngIgezwy6/UaKyX4GoRt277vh2G42WyQ4IyQ4JxjRWq5XOKHEmNMVdXFYoHnK8lrDV27dm0ymQBnY2jVahVFPC5evDgcDoUQ3W4X2eJ49mGhqqpYayGEdDqde/fuVSoVy7KGw+FqtRoMBi+88AK8WqpUqVKlSpUqVeo/qF566aXf+q3feu/ZH4deeuml69evG4bx3g/+uXzff/PNN3/913/9vR+UKvVDKgF0qVKl/l1kWVa/3798+XKtVtvf39/f3y8YDSHEMIyHDx/evXt3Op3WarVut8sYC8MQlNBxnEqlYpqmqqqTyWS73VYqFSCVer3uui5jLIoiTdOyLAM+i6IILIwxNp1OwbxUVUViKcmJLQSwVXC9gj5j9zDP88Iw7Pf75xEYhGOSryRDSF5GPwXtwl2klOBKnPN2u60oymQyybIsyzIhRKvVKhglzCt6Pt8/hE9JvkwNnWfNoJ+UUvhwNpuZppllGcpxnO8Bx0peKhqXI2laVVXHcYr+cWFhW3Eh/CyESNNUSonxpmmKhFDO+WazieMYPVBKa7WalPJb3/rWhz70IQwf5zGPGJHjOOChlUpF07Q0TYMg0HUdKw0YFIDyX/zFX/zCL/wCOmeMIUkZ5sFv4OA4n2UZ6uoCc4dh6Pu+YRgAkUEQCCGee+65T37yk7quAy9GUQRngr0C5O3u7mLW4DHOeZZlOGCMzedzxHAxoRgjjFQU5fj4GNm4eEty+AtvbzYbBGEQBJVKBRnTcHXRWOaVHKSU9Xp9u92CTRNCgiBAEnQURa+++urVq1d939d13TTNVqsFcOl5nmEYrVYLbD3LMinlarXCiBaLha7rSZJMJpMiJEzTBEdeLpeGYYDGep7nui6ugkNGoxHnXM3LQBcZ1pxzznmapkKI6XSq5bvtqaoaRRGGBl+Rc1nPGBHMK0IdfoPHhBD1eh3geLPZHB4eLpfLYuoRXYSQ4kn0PK/dblcqldls5vt+q9UCIPZ9PwgCLGyAyKdpul6v9/f3x+NxmqaO4/i+v16v9/b2NE1zXVfTNMYY59w0zUqlgp9LxcDjOMY0KYqSZRlusVwum80mvtAAIUiEELAZI0WJj3a7XdRxRhzGcYxRVKvVs7OzIAh838cEIWbSNKWUrtdrrJFcunRpMBhomtbpdFAC++7du/v7+yDOZ2dnp6end+7cgatLlSpVqlSpUqVKlfpX9L+lz+RHa1OqFFQC6FKlSv2Y1ev1dnd32+22pmm7u7uEEJSaFUIAInPO4zh+++23AZrr9brjOFEU1ev1MAyRpmdZFqAYvt3POQ/DsLiWc95sNtfrtRAC2+UBPGmaRghBmiESoknOpPANd1CtgvsA8XQ6HSllQXziOC4wFpoBG9FzOzBQSjnnJycn4NToGVAsjmNVVT3P293dlVKenp4CxuEVOLgglehqOBzyvP4AwChqCqNnQv5nIQ4lh8KKokynU8uywAoppUUlDexphjoVRbc/rOFwaBiGqqrz+dwwDMuyOOfnb0QIkVLi8gKfjcdjNS/vC2ofxzGqnaiqipYYYJIkaIPb4TxOSinTvFhHHMeFV3FHmIGpR7PVahVFEbqilKZpipFCmqbh1rquG4bhed53v/vdp59+mhACDO15HqgryYMBOdHA2UIIlEbRNC0MQ9A9zA4+1TRttVoBGmqaFgSBqqqvvvrqrVu30AyewWQVb994443Dw0NKKbxx7949VDhBy9lsduvWLc75er0mhAB9ImnXdV20gUOyLKtWq77vr1YrlCNfLpdxHAOjB0Ewn881TXMcB0nQhmHg7Ww2syxrs9lgLSdJktVqBU6taZqmaVmWGYYxn8/xWE2n02azGYbhbDbzPA8Rq+t6lmVwFOb9/v37+FoAY8z3fXSVJIkQAsh1s9mwfLdPxthyucSFjDGcVxQliiLLshBLlFJUFEFgEEIURZnNZvAn5suyLN/3m82mbdtINsdjgoUNODDLK2l4noefJID1juPg0fY8r9Vq4SFFKeT5fE4pjaLI933YZprmdrtFrRIpJUDzZrOxbRsu2tvbQ1Ah9qIoiqKo1WqtVispZRRFjUZjNBphsUHJH6J+vz+dTpHpjJMYWhzHeCKyLAO8xg8Q27ZhcxiGsJxSenBwAMp8fHxsGMbly5dPTk6ApxFXlNLVajWdTuHJVqt17969JEnm8/lsNvvOd75zenqKoCpVqlSpUqVK/dTq0acf/d3f+d2O3nnvB6VK5ZL5XjU/uj7/+c/jL9L3flCqFCGEkHEw/h9/8j9KAF2qVKkfm/b39/v9/u7ubqPRaDQas9msVqtxzqvVqmmaDx48qNfroMbz+VxKyRirVqtAXa1Wa7vdtlotNEiSpNVqoYDAcrlMkqRWq4FfN5tNTdOWyyW4jKIo7XYbYIsxNhqNwA1RyqPAoMWvQyllkiRxHIdhmCTJzs4OeE2B/BhjiqJMJhOSg1dcJXMSzRhDJ6hsgE5wIWMMOElKud1uKaXNZpOxf9pzDweU0rOzM56LUlqpVAghIGi46enpab/fhz24e8GtAJo1TbMsCx1O83rN0+k0SRJN08DLituhh+IVNaNrtRo+xXDO3wU3wqe3b9/W850MGWPj8Rg4UlGUJEmQkrlarUDc4O00TZMkOTo6un79Ouc8SRJwtCAI6vV6mqbgxWBnt2/ffuKJJ9I0LYDys88++4lPfEJKidrNvu+/8sorTz31FKirqqqKonieF4ahzMthv/LKKx/84AeRzapp2maz4ZyjN8MwwGrTNEW+M8KPc75er8MwhM89zyOEAPmFYfi9733vox/9qJQSZzB2klc35pwj67bwkqIomHScgefBEFHzZLlcIoQQHiRH4QjFNE07nU6apuhKURSQXNM0oyiaz+fICl8sFkmSRFGERAPEXpqmURTFcbzdbtFmsViYpgm4fHx8fHh4iGgXQui67rquqqpqvvBQrVYXi0Ucx3EcB0EA16mqOpvNsD4xm8329/fhMThB5FnPjDH4GXnTaKOqapZlnHNU1UDYFzO+Wq3wJQb4ttFoRFGEyAmCwHGc+XyOlvjSg+M4uq4Ph8OrV68irjBeVM2mlCK6sizDkKWUruuuVqtqtYqsZ8/zQK7DMPR93/d9x3Ecx8EPk2JCx+MxvqMA58BRRWxzzg3DAIzGTzM8a5ZlYc1juVyiKDOmHs8RzZesaP4MFkPAMSHk8PBwPB43m83lcpllWRRFURS12+3hcIhVqM1ms1qtfN83TROsGQMxTdP3fc55p9NBEefVahWGoW3bb731VhAE4/HY87yTk5MvfOELpFSpUqVKlSpVihBCyO/+zu/+t8F/OwqO3vtBqVKFJCGSvHT1pSTPJfrf6rN3PksUQkr+XOpf0EX94n/5nf9SAuhSpUr9eHThwoX9/X2Ue5ZSArw2m02UowXpi+MYdWCn0ymADvAcAA2QXKvVQulSRVGCICCEOI6DtNNarabruhBiMpl4nodcaWR0gnOBH4EBEUKklHEcgw4D/0FpmoL+nD9ZwCNyjjufnZ2df4sDcCWgwCiKWq3WcrlEA/ScpmmRAon2zLDUYgAAIABJREFUjDHUYgbJbTQa8hwRhkk4KAAlhAaFMUBjIq/XzBhDyWYwx3q9Dr8V3iisRSdItdZ1HZdDxX1pXjgCfrh3757jOJcvX0YeJRxbq9XQLYyEYaijIqUEaAZ4jaIImem+74OxSikXi0W73U7TFNUPPM+bTqeu64ZhWPx9s1wuGWPAx4Dd8/mcMQZ2DPjuOI6qqkmSeJ6nadrdu3dRdyJJEswmpRTXQl/72tc+/OEPR3m5D0VR4jhutVqqqkop4zhGJWXg4CiKlssl55wQgvkSQrz++uuPPfZY4Rx2bpsORVEopShuDodIKaMoQrgul0v4GR7LssxxHMx4Mfv1eh0HOB+G4Xa77XQ6i8UCzYqTQNJJkmRZFoahZVlhGLqua9s26kiEYdhoNLbbLUAnIQQrOkKI+XwuhAA81XUdlSiCINhut7quA9RqeRFnPFMFogVBRnTBJ4QQuALENk1TNa9CDv/grZI/VqPRSOa10eHzIh85iqKLFy8iBgzDME3Ttm08WXgcsiyL49jPq0/oug7cjIzgOI6RuQyKbdv2ZrMxDAMeazabURQhr/ng4MB1XRz3ej1AXkVRNE0zDANZzKqqWpa1Xq8xCqDnKIo8z1utVnt7e0EQFJ9iaHiaMIPFwaOPPrpYLBDSWZYlSdJsNsfjcZIkCHUs0eHHCCYCgVTE1Wq1unDhwng83tvbOz4+DoLgwoULDx8+7Ha76/Wac54kCed8NBrN5/PpdNrpdGzbvnPnjud5f/7nf45blypVqlSpUqVKnVdH75T0udSPKPw3UarUv11HwVFH75QAulSpUj8GXbp06cqVK47j7O7u9vt9cFvHcXzfB+6UUjqO8+677xJCkOp4cHCgKEqtVkM11TRNm80m6jvXajXkSIKonp2dmaYphNA0zTTN+XyO7F2gZ03TgLrAtopSGzAsO5fdjAMcAxSCFuEYOGkwGIA6cc4ZY81mczgcogEhBFnJUso0TaMo4pwLIdbrNb6wD8yUpmmapq7ron9KKahZs9lEP6BLsC3LMhDG4nIQ88LOwjxcBdoFC/F2Pp+bpqnrumEYQggQsfOjOz9edFIc46YQmsEJL774ImMMhWsJIZVKZblchmFICEnTtNVq4do0r0EMb6RpCpoG3EwpBWcXQhBCFotFlmWvv/76Rz/6USA2WBjHca1WAxqOoggkdL1eF/yUEJIkCcsrYMCZf/3Xf/1Lv/RLhmEkSUIpzbLMcRzGWJKXR/B9H4OCl8IwrFQqnHNwTF3XVVVledlxXdcfPnz4/ve/H6gxCAJd1+fzeZqmvu97nrfdbtEV/APBNvSvKMpsNisANCEENBMBAHch+TdJEsSG7/tBEFQqFUVRNpvN6emp4zjgy3Ec3717FynAOPPOO+8cHh5GUYS6K67rOvlegmEY1mo10Fv4Ybvduq6LcaVpauR1NoQQk8mk3W7XarXFYuH7/jzPrZZSLhYLoGTMlxBiNpsBPeN1MplQSpFALYTIsozl9dYZY1JKPOaUUqRRF29JHleICkzZdrvNsgxUutVqkXOUFk8Epg9w1vd9bDMYx/FmswGADoKgXq/DXRhyu922bXu9Xm+32263iyUupA/btu04DsqDYB41TbNte7vdBkHgeZ7ruvV6XVVV3/dd14VJQggsmFUqFRyvViu4a2dnZzqd4mnCLOOJUPJceMRtmqZxHMdxjOMoivCME0IwruDcdwhGoxHKdCyXS9Rkh21ZluGhEELgObp48eLx8TFWIAaDgZRyuVzev3//H/7hHxB+pUqVKlWqVKlSpUr9G5X8yBnQpUr9KCoBdKlSpf7v1el0QJwty1JVdWdnp91uF1/er1arqHKwXC5/8IMfADB1Op1+v58kCUgZgB2+gU4pRRogIcRxHJBW1IvQdR3VchljjUZDCBFFEVgk6A+4UoH/CoH9Fa/FSZKX2gBFQiec80ajgUTjQq1WK01T0NXJZNLv9xVFwS/js7OzOI7r9TpwpMwTPGEJBACH18Ieco6MZ+e+d4+PcB7ti6uwHRxM4pxTSsH+bNs28tIT8BhGV+h8V8UdKaVxHAMfR1FECKGUvvzyy7Zt12q1vb09z/MAGWFYrVYbDAZZlmVZhtIHWZb5vv/w4cMkr13b6/XSNF2v1w8ePMiyTFEUMGsY4DhOlmVIh4czC5NAKnVdB2LWNM2yLJC77XYrhBBCKHniLdpgBzZKabVajaJI0zTGmK7rhmEAW3/zm9/8xV/8xTRNgflAnAkhAJdBEPz93//9z/7sz5731Wq1SpIkiiIM88GDBzdu3FAUJY5jVVWxIsI5R1WHzWZzcnKys7NThB9igDFWrBCQPNLQ4XA4RO4ziCRYPEB5HMfj8fijH/3oer2Gc8BYsywDYk7TVNM00zQrlcp8Pn/w4MG1a9fQDzg1sph1XZ/NZrquR1GkKIqqquv1WtM0nDcMI47jKE8H5pyDHWuaNp1OOeeLxQKsGTBaCIEhjMfjSqWiqqrMkbqa82gME2EvpRwOh4DC+Gg8HiuKMhqNdF0vXA1vICqyLMN5jLpSqUgpC24LOCuEMAwjCILtdmuaZq/XC4IgDEOU6KnX64icLMuQ8rxcLg3DGI/HjuNgxQLHeHAmk8ne3p5pmuv1Gvn4xawJISaTCVYyTNM0TXO5XK5Wq93d3SAINpsNxk4IkfmPlGJoWZaleR1qjAWxdOXKlfF4jARnwzAcxwnDsFqtnp6eYk7X6/V8PgfUllLO5/NLly6Nx+N+v39ycrJer3d2do6OjgzDwFRSSrfbLfD0/fv3kyQJgsB13W9/+9twb6lSpUqVKlWqVKlSPxbhH6tSpX5cKgF0qVKl/m/U7/cvXLjQ7XYbjUalUkmSpNFo1Ov1IAiiKFJVNYqi9Xqt63ocxy+88EK9XkclVrTfbDaqqnY6Hdd14zjmnIdhSClFEiIQHqV0uVzWajXkb9ZqNcuyQLpVVQUzonl6KXgWOUd4C+CFg6JZlmXov2gGRgbQqapqt9sdjUb/NM48vRc4CbuNFSd1XSeEoLfz9yqOYR4h5PT0FJQKOdQAcKBUYRhGUdTtdnESdhb9DIdDGGZZFsY7mUxAWmu12mw2q1arqqqCwHLOT05OKKUYab/fxxiB+aSUnU5nMBjABmg0Gs1mMyRRdrvdVqul6zoQMHAbbsoY63a7URRFUXRyctLr9SilWZY1Gg3f933fBzGEM5vN5mazWa/Xb7755iOPPEIIgR/gEFR2llJGUQQ0/Dd/8zcf+9jHilFLKZ977rlPfOITmqaFYQjj//Zv//aDH/xgHMdopigKuCSk6/qXv/zlT3/605RSx3GSJAEmxt9MaA/aGEURJpdzniSJkic148I0TaWUcRwLIc7Ozt73vvcJIQBwoyhCh2gWBAG248O1EEYKh9dqtddff/3atWuFwSjyW/wZV0DbLMuyLKvX63Ech2HoeZ5lWajvnCSJ7/uGYQDc40wcx4QQ13WFECDLSIjGggSQKwgsaDJjzLKsMAwZY6vViuXlTbIsE0IgbFRVxVspZXFG07TRaMQYKx63LMsYY+PxGE6rVqsYRZIk0+n0+vXrgLCFK2he/qV4xXjhBBwAoKNk82g0ajabuq6rqjqfz3u93mQyqdVqhc1CCDgTDx0M03V9u92u1+tGowGzTdNcrVZAzLu7u6qqKopCCIFzzvcAOznnel4sRVVVFPEwTZNzbhjGcrnEVxx6vd50OsVAGGPAwZPJZHd3F2O5fv36fD7HHIVhGIahpmlY8AA0931/u92CeuPxOTs729nZQQMsbMCBANOO4wghVqsVWPa7776LkTLGjo6OZrPZt771LYRTqVKlSpUqVarUjyIv9S7qF8sqHKV+FJUAutSPSxf1i5NwUgLoUqVK/R/riSee6HQ6vV6v2+3W6/XJZNJoNJCKC8oMkguQ9J3vfKfT6eAj1P9VVbXf7y+XSwAvwFwU5EW+c6EoioQQtVrNNE3LsnRd1zRNVVXOOfIEQZFAlArz8BaS56qyZnnZYsbYycmJqqpCCPQwHA6NPL8Y1KwoNJEkiaqqoOTL5bLT6YAQZVnGORdCuK6raVpxR9wLNwLbUhTFtm1gqZOTE3BhEKs4jsHswOYgjAKJ2KZpgrKxPBkcRCxJEsMwkK0J+wHXNE2Lomh3dxe3QM8YeJZliqJ0Oh2Q4vF4/Nprr61Wq2azeXh42Gg0BoMBfBVFES4khFBKwemQxQk1Gg0lLzWA1/l8juoBGDhekySx860go7yWwosvvvjUU08ZhhHmdZ+RJp+eq8sxmUzAmpExCjCH9NggCBAGvu8nSQLKianJsgzegM2NRgPd4r7f/OY3f+3Xfk1RFNg5n88tyxJC4FNd1//u7/7u6aefRuAVkaDrehiGhmFEUXTnzp0bN25g0jVNc113uVymaQom6Lqu7/v1el1KqSjKYrHAp0VIUErhvUqlkqbparXKsgwU3rbt5XKp57m0gJiu61YqFSzAJEmyWq0wcLBjwzCQOavr+nq9RsUJRVFUVZ1Op8gLRl4zVikQNmq+u6AQYjKZ7O/vI+uZc56mqRBiNBrBpYqi8HM7ClJKVVU9OzsDKK9UKtVqdTQadbvdwWBQr9fDMITbgWiL1GBEEUIRhj18+BAPHUhxEASIySAI4IrT09Msy7bbLexESBBCGGPz+bxaraJPdDIej1utFvywWCzq9Tqcj6AlhOCRWa/XRbo02O7+/j5jTNf1SqWCCh61Wo1zjuN2uz0ej23bLuJfyYtZc851Xd9sNoh5PL9pnvUchuHly5eLrGc93wbz4ODg5OQEfkazVqsVhiGW4vb29qbTaaPR8DxvOp2iRgqcPBgMkiQZj8d41kaj0TPPPIOhlSpVqlSpUqVK/Z/qj/6fP/qv/+m/trX2ez8oVSpX8f/jjw6gP3/j8/gP4r0flCpFCCFkEk7++x//9xJAlypV6kdSv9/vdrt7e3vNZrPf74dh2Gw2HccxTXNvbw+lUXVdF0I4jjObzR48eDCfzw3DqNVqmqbVarUCUt+/f980TQDfnZ0dfJdfURRFUTqdznQ6BfTJsqzf78/n8yItlzEmhKCUImFZP7eZnpIXu8CvPUVRBoOBmmc+ApLiPBBSq9W6c+eOkevw8HA0GimKkuRf/5/NZiT/7YszQRDUarXiRiBoMAlsVFGULMswENwXJ4tOYACOgeTwEQ6gYggYF8TydG+41LIsgGmeV4Le398vhi/zDe5wI6gwgBDyjW98I4qiRqORZRnQZBzHm82m1Wqt1+s0r2OLC4v6vMWIpJQAxzC7OI+b4hUHruuuVispJVgzTJ1Op1EUSSnBapMkCYIAw4miyDAM4GZgPkgIgdRdIUQURbDwzp07t27dSpLENM3tdguamWUZOKbned/4xjdu3rwZBAEGLqVUVRU0UNM0wzBeeeWVS5cuxXGMKZhOp7iFEAIVSBRFUVVVCMEYU1V1sVgoigLQL/JcY9B8jPfk5MRxHDgZCvOdA4MgmM/nWJsBK4/jeD6fIxl2uVxiKQKkHlnPyC6PoigIAk3TNE0rKmMAIpumOZlMOOfz+VwIoev6dDotDMaaDZgsHLJerxVFqdfriCghBOccw2SMLZdLDDNJEsbYZDJBCjAMW6/XhmFUKpUwDHd3d1HkPY7jer2OGaGUyrxAByGE5psuAstiggzDQC3motoJUGwQBMDrtm3j2LZtQgjsoZS2Wq0iAnEjfAqzO50O/LNcLhuNBs0rhOzu7hqGsV6vl8vl7u4uUq1d18UKBDwAl8IPRl6/pbBfUZR+vw+knuWZ3aPRSNM0KWUURdvt1nEcLK5gIL7vM8YMw9hut9vtlhACH2KkWZY98sgjyFWfTqcoGu66LsrXTKdTlKKezWbHx8ee53HOCSHHx8fD4bDcV7BUqVKlSpUq9W/X7b+7/Z//7j+/92ypUj+kl19+GZuvvPeDf64gCAghn/3sZ9/7QalSP6QSQJcqVepfE9Jj9/f39/f3CSEgQbVazbZtz/MMw9A0TVVVfDk9SRIhxFe/+lXGmOd5KNBRrVaPj48JIUBphJCDg4P5fE4ISdOUc97tdgGd0zRVVXV3d5dSOpvNsrxmNOccGb7gRJxzx3EA0Uhe3oEQcnp6yvM90zjngMUkT4gmhJycnCDPFLpy5cp8PgfhAmmK4ziOY3CxarVKCJFSxnEMcsQYUxQF/EtRFCEELlQUhXMOKDYej0GpCCFJkuzu7hJCQOIgdJidQ8NpmvZ6vfNtlH9eEgQjGo1GlmXFcVz4nOf0ufCAzHE2+oflURQBIkspv/nNb7711lv1en1nZwe1dIfDIRCtEIIQYpqmoijAcOgnSZLzZkspAVthMNrAbJwszsAzmEHDMFzXxXngVEVRCrgshHBdN0kS3/fjvMgGMKKmacV8AfWigofv+2+//falS5d838+yTFEUxthmszl/+WKxaDabqqpmWRaGoa7rq9UKDoeRy+XStm2QRPDcIAgQA0C3L7744o0bN0AVt9stHI7oAv0sholoRMozZgH+t22bcz6fzxEeaZomOXCPoqjT6cCeKIocx1mv147jYJbn87nv+8W6DuquIBva8zygSVBXQoiaV6IwTXOxWACJBkGAca3Xa03TGGOqqqJIxWw2g/3oBJWaWZ7eOx6PUVsD8zubza5cueK6LoaJloyxJEmUnAgXWc+IIhTmrlQqhmGsVqtKpYJpSvLVne12C49xzrfb7e7u7mw2w3cOEPBSSsdxpJSUUmQ949parYZoxCyjMZ44wzCiKHJdF4sZOM/yFQVki7P82xK4BMNR8qI3lFJMH6VU13XcutfrFXeEQ+Dbg4MDfK0Ba2aqqqLGixAC9WdqtdqVK1dOTk4wv7qubzabZrO5Wq2m06miKNVqFZEwHo89z7NtezKZLBaLfr9/fHwcRdHJycmXvvQlUqpUqVKlSpUqVarU/+f60z/901/+5V/G38D/ihRF+au/+qv3ni1V6n+lEkCXKlXqvbIsq9PpdLvdnZ0d0zR7vV6n02k0GkEQWJa12WwAQIGGGWNICazX6//4j//44MGDSqXSarU8z6tUKqDGN27cQJJmQXy63S4oWJqmjLF+vz+ZTGRONhljvV5vPB4TQqSUQohOpwNCBAEkMcYGgwGAIOAa+i+aEUJeeeUVwzBAMGu12nA4xBiTJAFxi+MYeYhSSpA1xlgYhtPpFNU2cC90OJ1O0zTd2dlBY9ggpcQGZYyxRqMBUgZuC/uLA5zHK1Imu90uPlJygjyZTIQQSJWllKKUgWmazWZzPB5XKhUANYya5OnS2bld3eI4jqIImKzT6ZycnDz77LPT6XS9XsM/YRjW63VVVW3bLnYRnEwmn/rUpxhjhJDj42MgWvQGBnd0dBTnBUOiKNrZ2SGEeJ6H7FpVVQHjpJT37t2DPZTSd955p9lsUkobjYaUcrFYaJqG6hO4Lxx77969xx9/XFVVdO553rPPPvvhD3/Y9/3Ck6+//vqtW7cwlYZhGHlyfZZlvu/ruv7MM8988pOfLNoHQUAp1XUdoFbX9VdfffXJJ5+MokjTNCEEmDLCAw3eeuut69ev41jX9SAIEOF4Xa/XJC8ZrKoqY+zOnTtXr17FECilRQBjLpQc5UNpmiL9FgUWsiwDSjZNMwxDkGXTNBEbnufN5/NKpTIejzVNQxFnz/Nc1wVdHY/HjuMYhrFYLIIgCIIAiw2GYdy/f98wDFiIEEXCcr1eXy6XmHqE9GQyqVQqiqIkSeI4ztHR0d7eXhRFSZIU0UUpVVWVc46xuK578eJF5VwBFhzXarUgCKrVqmVZg8Hg0UcfPTs7w2OFOXUcBwhY07QoiqSU4LCEEPQPH+IBVBSl1WohQuDMJEniON7Z2ZnNZqhFoyiKEELXdeRTJ0niuu52u/U8D8U0AJ3xpNC8WLyiKLgpOVelGrfAWyHEpUuXZrNZHMecc1wFbp4kCcY4Go3CMCzWS7Is6/V6w+HQdd3xeGxZlmVZnucVtn3gAx+YTqfg8tvtttvtYqPOvb29+XweBEGapmdnZ6vV6uzs7Itf/GIeMqVKlSpVqlSpUqVK/f+jz33uc5/73Ofee7ZUqX+DSgBdqlSp/6lLly71+/16vd7v93Vd73Q61Wo1juNqtQo0RintdDpAYJxz4NHJZLLZbLbbLchms9ms1+uXLl3abDaGYQghVFXd3d1FGVMQZ0JIp9NBZijoT6/XQxEMmpeS6HQ6jDEUJgY2AiEC/gOEbTQap6enBcAieT6myPeOM01zs9kUd7l+/TrwE0AhuJUQAg1gHm5UUCoYg1srefqnlBKj+GFRSuM4zvIv7KNPQGegYUBbVIKGFEUZDAZwlG3buMV4PNZ1HdgO3BZ5qYUKC7Msy/KyHkmSAI01Go1XX331q1/9qmVZ7Xb7+vXrb7/9NhpTSoUQsO3GjRs/+MEPYNsbb7zxvve9jzGG5OjT09N6vQ6bFUVpt9tgzZ7nbbdbXdcxKYwxzrkQIoqiOI4JIe12GxUzcAtAOtiMVNa33nrr4sWLSZKkaYrhn56ePvnkk5qmIb9b1/XpdFqpVCilYHaapp2enh4eHmKCIFVVsywDLNZ1fbPZOI6jKAo4rGEYQRAkOY8mhBwfH1+9ehXcEJPC80IfoK5HR0ePPvookKWqqovFghCCAeKVUoqxIB6AF8MwRLouamuAt6LQ82AwaDabaGOa5nA4xAMVx7FhGLid67rww9nZmX4Olz98+PBDH/qQZVlSSiHEfD4HcZ5MJpqm1Wo1z/OwjGGapuu6gK14pgC1K5XKbDZzHEdV1el0inFRSjnnw+GwXq8nSWLbdhiGeDaxOyVjDNUhTk5O2u12GIZSSkoplojq9XqWZZhW0zTv3bvX7/crlUoQBPv7+whCQkiapghCIYRhGI7jDAaDLMt83w+CwPf9nZ2d9XpdrVYxj2rOxIvOKaWYI5wZDodpXjM9SZJut4swFkIsFotGo1FME81zqxEkGBEeXtgGIfBwMs3L4yBo8eS2Wq3JZIJHo3h4oygKw/DKlSuz2QyWr9fr1Wrl+z6K2BwcHDx8+DCOYyFEGIZBEGRZ5nneer3GgkcYhpvN5uTkpFarMcaWy+XZ2dmf/MmfFIaVKlWqVKlSpUqVKlWq1E+eSgBdqlQpQghBkY29vb2DgwPLshqNBr44b1mW67qEkCzL2u02EA+yMt999118ndw0zVarZdu2ZVmO41QqFdM0dV33fR+dg/X0+/233npL0zTwJuAwdAX2KoQAdcJVnPMkSZrNJjgRwBlA0nA4BHvinLfb7du3bxfdqqq63W6TJMFNO51Ov99n+VfvlRzahvkOeFLK09NToDdQRbSRUhavEGxgeT4p3qJNmqZ7e3s4zvIy0ISQ4+PjbrcbxzFQlO/7tVotDMN+vw8WBiNRrhoOUVWVMTafz23bXiwWSZIkSVKpVDD2An0OBoP79+97nqcoCopzIY/ScRwANTC4a9euAbBqmvb+978f+Nj3/ZOTk6tXr2IU+/v76/V6s9mcnp5ev34dMFRK2Ww2fd9HDBBCOOeFQwDUKKW4F/AcWkopGWNZrtVqFeclR+AWSunJycnjjz+u67qUEhcicRVTjynAMEFjNU1DD1G+OyJc8cwzz3ziE58oJoIQoqqqaZpoxhh77rnnnnrqKTRA6FarVU3TFEUBT3zttdcODw+zLANCnUwmGGnhapIDaEzQ888//5GPfAQWEkIwNbqur1YrTL3rus1mU0qJHOfRaNTv923bjuN4Npstl8vNZoMiG8vlUlEUgEuwcimlaZqTyQRhvFqthBCWZc1mM9/3Hzx48PjjjxuGASevVitd12FYlmVqXliDEBJFUafTAS9WFGU8HgMEI1BREwNovtvtFsVtkiShlEopEXIAzVmWKYpydnaG9SfHcVAMBMg7juNut4sZQWCcnZ2laYpIqFarYRgC2oLb2rY9m83Q8vr168fHx1LKRqOB2JBSRlGEGdxut7hvlmWPPPLIcDj0fd80TU3TsCwxmUyQ5iyEiOOYcx7HMWNMVVVFUbIsQ/xgporZRGxjWgubcXDz5k3YhkgLw3C9Xtu2HUWR7/uUUsMw1uv1er1OkgSVQDjn+MIHanGoqjqZTFqtFp7ES5cunZ2dVavV8Xi8Wq0Mwzg9PV0ul7du3RqPx0EQvPzyyy+//DLitlSpUqVKlSpVqlSpUqV+slUC6FKlftrV6/UuXLiA3QV3dna63W4YhpZlaZoWBAEhpNvtAvXSvCJEGIZvvfXWbDar1+u2bTcajXq9Du587969IAiAkwBfQNOAEa9cuQK+xjkH9rJtGwVkC4x1Hl8WqEhRFCXPPmaMnZ6eFqmyuq4jzxq30HW91WqNRiOgqDQv+JumaRzHu7u7WZYRQjjnBaXCLQoaVbyFSQVgLa6FMYSQJEnSNAU+K/rBK65K0zQIAqDnRqOh6zpobNEM/RTjms1mYPcgzr7vw1ec8zfffPPtt99OksT3fdu2CSG6rgshGGMAxLZta5oGP6BbjLrVagHAsXMUXtf1KIoIIXALEDljLMl3FwSfBTdfrVY4GUUR6GSSJGEYEkIIIUUzMLvC1YXnq9VqmqZAnEEQGIax3W5FXnLa931kK4PiRVFUzCOYLHxr23YQBKhunGVZGIaaps1mM1VVMWR0kiSJlLLRaGB2ZrMZzmDslFJN0yilcRyPx+M0TU9OTm7evKnrOvpRVRXTwTlnOc0s3nL+T78x4V50iKnEnEop7969+9RTT8EJRQxkWYahRVFUr9c3m02SJHARcDlqSqBMjWmaxR6bcKznebBkNpvByOl0ure3Nx6PCSGNRmO5XAohhBAIRcYYpVQIMR6POee4VlEUIGmMglLquu7u7i7nHJaTfDEGzzJGenZ2VqvVfN9vtVqVSmU0GlWr1dPTUwzccZwwDOM4dhxnOp3GcRwEQaVSsSwLoeJ5nmmacJ2qqpRSbHWI29F8oz94ZrP3aQvAAAAgAElEQVTZID08SZL6uQ0qFUURQoRh6DgOYh4OGY1G3W5X07QkSXgOoIvHkFLK8wIaGP54PO71esVgsyzrdDqz2QyTVcRtFEW4EUJ6d3eXMbZcLi9fvjydTg8ODk5PT9fr9XQ6tW0bvem6/uDBgyiKLly4MJlMbNu+f/8+cs/v3bu3XC6TJDk9PV2tVsfHx+VegqVKlSpVqlSpUqVKlfopVAmgS5X6KVWr1UKV536/7zgOMhkBlLvd7mKxkFJiTy1gnfF4fHR0tNlsTNPEdn/dbrfVaoGoKvk32a9evTrPd/bjnO/t7eG7/zRPGt3Z2cH3/dM0BRjtdDqj0QjMCCQIZApncNVoNOKcAzXqun54eIhCBECuly9fppQifRUX7uzsFCgKXAkWKvnX/As/gEbJPPPx/GsYhmEYdrtdx3HQc9GenMPQUkpwrqIB7ggYt1wuO50OSGsYhkXljfM9UEpnsxkgLEbEGPv2t7+9Xq8B0TRNOzo6EkJQSpEBijboAXe389odQIqFDfP5vN1uw5mcc6BAxli73c6yLIqiwlEoQYtRgJCGYYjKKlmWZVmGM0CEQRCgNrTv+zzHtb7v93q9NE0BGUFdX3jhhY997GNSSs/zimgJggAHSl7w5Otf//rjjz+ONnDOCy+88PGPfxwzDiALDwA0E0IYY57nFYiZUvrMM8/8yq/8CjAiuH+1WhVCJEni+34QBF/84hd/9Vd/VUpZqVSiKPI8j1Ja0GdVVW/fvn14eOi6rud5QRB897vf/fCHPwyvUko9z4Mn4XxK6WAwQA1xvAV0RhCmaYpRr9drwzAqlcpsNovjGMzd8zxUVrFtOwxDpDlPJhPLsmzbnkwmpmkuFgshhBDCdd0oijRNw40IIdVqVVXV+XyOSVcUhXM+mUyq1ep8Pldybi5z4owzeOgQgegqTVNgYuQUR1EUx3Ecx3fv3r1w4UK1Wo2iqN1uY+LSNG00GuPxuFar6brOOW80GqPRKIoiXddt27YsC/VS8LROp9PLly/fvXu32Wwyxrrd7tHRUZinRWdZVq1Wh8Mh51zTtFarNZvNRqNRlmXNZnM0Gp2dnSGh2PM8z/NAwFECyLIsFCHxfd91Xd/3HcfBFMxmM6zQKIoynU4Nw5D5M15w/yzfERHD73a7URRhXggh2+12sVjouo72cRxjLYRSut1uV6tVo9E4ODgYDAaMsTAM8bNiPp8jkX+1WnmexzmfzWau6w4Ggz/7sz8jpUqVKlWqVKlSpUqVKvVTrBJAlyr106gPfvCD/X5/b2+vVqs1Go04jpHniPxQKSVKrE4mkziOhRCvvPLKbDbbbrftdlvX9VarVa1WpZSGYXQ6neVyCWxKKaWUdjqdt99+G1ml4FCUUpIn5IJDjcdjeq7SAlAmakD3ej3G2NnZ2WQyKWipZVmgnAXr3N/fH41GQEhFt+Qc1cUxUGBxBiqAFLgqhH7ATMMw9H1f13WU5kBjxlhB7nCSUgqoR/LRpWka5lmrlmVtNptarQYGres6muHa0Wg0m81Wq9Vms+Gcn52dWZYFj2E4iqLYtq2qqq7r8PnDhw9h+dWrVznnSCbFfQHUGo0GXApwlmUZbAauTZIEbBQMrqgaEccxxsI5X6/X169fl1ICpaESwunp6aOPPiqlxLjQJzajw/ShpAA49f7+vqqqMGA0GsVxTCkNw1BK2Ww2YUYQBC+88MKtW7fAjgubkSROCInj2PO8yWSy2WzivIQ0pfTLX/7yZz7zGcZYrVaDq1HnAX0ahjGdTqMownAQCaiZkCRJcRe4FBGrqiohBFEqhNA07e7duwcHB7Ztx3HMOfc8j+RFhDHdMBi9KYry7rvvYlxKvuUgeK7v+6ieYVlWmqbj8Rg52rquVyqV8XhsmmaQy/d9hIGWV5IRQqRpqmlaUbs5yzIhBLKe4fN6vS6EmEwmjLGHDx9evHgxTVO8nUwmQgjGWBzHjLHxeMwYg5FSyuPj41ar5fu+aZpSStTNQCAlSTIcDm3bbrfbSZKkaYrHwfM8rAQ4jgMmmyTJer0+ODgIw9C2baBwQHNMBGMMA+F5krWUMo7j1Wql5knZuq6jIAkiM01T3/crlQq+J6Fp2na7xXg7nQ5mCqnr4LyVSiXNq2rgyZJSEkLSNE3TlHOOeEOEB0EA58dx7Ps+bIOHl8tlr9dTFGWxWNi2nWVZGIaKouDTRqOxXq+Pj49xl81m43merutY0thut4jP9Xp9enqaJMloNJpOp3/4h39ISpUqVapUqVKlSpX6yZLjOL/927/9xBNP6Lr+zjvvPPvss9/+9rff26hUqf+VSgBdqtRPl2zbfvTRR3d3d/f29vr9Puo7t1oty7J6vd50OgWvURRFVdVWq/W9733vnXfeMU2z2WxalgX0bFlWGIaWZRmGIYTY3d1FcWc9r4Zx8eLF6XSq5PwX2I7kycJCCOCe2WwGq8ABAZKm06kQolKpYIMyJEeneZVYRVGAkzRN29nZAcYqaCN6A+oq2hfn8RFYm8zpM1gVGCW4Z61WM00TCBKdEEIopTCP5BQPB+gzO7f7n+d59XqdELLdbrfbLed8Z2cHd7x9+3aR/Yo6BrquK4qCk0IIwzA0TSP5HomEEMdxgEcVRbl69SoMDsOwXq/XajUpZZIkd+7cgQGPPfYYOOn3v//9NE3jOAY6PDw8RMv79+8HQZAkCec8iiJwf1S2XSwWQKJAwMXQpJSbzQYTVJwhhBQwOkkSrF6cZ9ZJkqRpWq1WKaWr1QpZoriQ5BnfjuMAVSMTeT6fgzUr+YRmWVav14MgkFIiQdX3/TQvb805Z4x96Utf+s3f/E3DMBAhQK64F6WUc/7cc8/9zM/8TGE2Y2yz2URRBA9nWfbMM8986EMf2m63uDsuBAXmnK/Xa4QQJr1arb7xxhvXrl3DGcwRSG4QBOCwlbzCchzHSA0mhCRJAji+WCyq1WqlUplOp5ZlrVYr0zSFELqux3EshJjNZkC3qAGt63qWZQDQjDEwaEVRMCMwI03T3d1dOBYzAq9Wq9XFYoEGAOLT6bRWqyVJgsWe4XBYqVSyLIPxwOVg+kEQWJalaRqY7NnZWbPZtG0b40KaM7LLMfaLFy8WMP3q1auDwaDRaGAKMClBECiKQildr9c3b97knGuaBvT88OFDrD2YpjmdToMgIISkabrZbC5cuIA88YODg+VyuVgsHnnkESklMpEZY1mWYSAf+MAHNE1DFGHlxnXdIAgQKoZhmKa53W739vZc14XNUkrXdRuNBlByEASe5wVB0O12e73ew4cPUWNEVVXP85IkuXz58snJiW3bp6enCJX5fH58fLy3t7darebz+V/+5V8iwkuVKlWqVKlSpUqV+onUH/zBH6Rp+kd/9EcPHz68cePG7/3e7yVJ8r3vfe+97UqV+iGVALpUqZ8W9Xq93d3dbrfbbDYdx2m327VabX9/33Vd8E1CSLfbRaHY0Wg0n88Xi0Ucx6Zpdjqder2O1EVKaaPRAOtBeVNd1y9evDibzTjn6IcQ0ul0ZA4TeV4/mnMODoi8SBTfIIRIKYUQ/X6fUorkaLQhhKCEMcgayKOiKIPBAIwS90J7nJFSZllWMF8Q2/e8Ao+meZGKdru93W6DIOj1eqZpAkSiZ/JDKdXok+QoFqTP930Ubeh0OkKI9Xq93W4bjcaDBw+Gw+Hdu3dVVX3w4IGu6yix7fu+oihCCLBOXdcxQFVVAevTvJIDbp1lGaUUBgMvImEWtHF3dzeKoiRJXNdFPwcHB2DBnud5nuf7PoxvNpvb7Xaz2Ww2G5T3VRQFSNGyLMBWz/NwEo6K4ziO481mg7EXboyiCPmzgIBwCCHEsixYWzg5SRJkAUspAShhG0bt+z6qMRBCDMOglILCQ2gTBEEcx2maKopSqVQ45+jHNE3P82BDrVYDkXz++eefeOIJuI5SGsdxq9UyDENRFEz3N77xjSeffBL2KIoShiFKrIRhyDlP88owAJeMMd/3wfoJIdhacLvdWpaF1OD5fF6tVsFMz4dEEASGYViWBT8gSLDGMJ1OsVQzm83wHIF3L5dLVVUNw0DCMu4uhAAsRiRQSimlWJYYj8eapjHG0jTFWwRJGIYoZwFLLMuSUo7H4yzLMKdSyizLHMfxfR8LS5vNBg5BcOq6Xq1WT05OpJQg17VabTAYKIoyn88Bsk3TRMmOJElAb1erlaIojDFd1yuVymazQURJKQeDwXq9xkRvt1shxNHRUa1WE0JYlrVcLtFPHMdRFNXrdaDwLMtUVd1utzs7O0XgwdWI5ziO8fAqivLgwQPDMBzHiaLo6OhI0zRUox4MBvgpt91uGWPVatXzPFVVx+Nxp9MBcXZdt16vI8AWi0WlUiGE4El57LHHptMpIeTs7KzRaKiqOp1Oh8Phs88+m6YpIeTk5AT2lCpVqlSpUqVKlSr1ky0hxAc+8IHf//3fPzo6IoS89NJLzzzzzM///M+XALrUj6ISQJcq9ROudrvd6/V6vV6z2ez3+0KIVquVZRnojKZpvV5vNptRSqMo0jTt/v37k8kkiqJWqwV0yBizbbtSqfR6PTDoyWRi27ZpmlEUATBRSnu9HmNsOp0Cb/G88iyoHOccDc7Ozsi5JOV2u43v1OMSzjmIM8lZG6VUUZThcKjm2/ExxiqVisxzkNGGEAK4BnKa/VCOc5rvCAduGARBs9lUVVUIgToPOzs7BTOFPXt7e7gLVIwF3WZZFsex7/ubzcZxnPV6Xa1Wp9Npp9NhjL377rtf//rXUcrAsixFUXq9nhBCCAG6d+nSpaOjI4wuy0Fzmid6YzhpvptfkiSU0izLwrx+LpKXMUxKKSjwdDpttVqFkTIH2ffv379586aUUtf1YnRIYsXJKIpwo81mE4YhegYuLEaKDuE9YMrbt29jmSGOY5zZbDZvvvlmu90mhAB8w9V3794FrAyCIEkSQoiu60EQpGkqpaxUKmEYep73/PPP37hxIwxDDEpRlOeff/7xxx9HG+BjLBXEedkQ4FfGGKIiiqLJZIJywGiv6zrnHMAXwBrwNE1TfAq2jujCK82XTDApKPdROHOxWNy6datg+oqiIOpAnA3DAM2Mogh1Nt59993Lly+bpmma5ng8dl232WyioM1ms0E8YG0GVBSrOxgUeqCUSikVRTk+Pu71enEcT6fTXq+XpinI/mKxQPAEQWDbdhiGeCIw14PBwHEcTKht22+//fb+/j5csbe3h6lP8jT2Wq1mGEaSM/RKpYLHPIoiz/Pq9fp6vXYcx7IsLCHous4YAyNGejVmFj8oiogdj8eHh4eIHKwHzOfzKIo0TbMsq1qtpmn69ttvN5tNni9iJUmiaRrnfLvdZlkmhKhWq5xzgGMMUEqJ5z1NU0IIBuL7fpZlvV5vs9lst9s4jvf39/GDAg8Iomg+n/d6PcMwfN8Hnq5UKvP5fD6f37x503XdOI5d110ul1LK1Wo1GAw+//nPk1KlSpUqVapUqVKlflqF/7Y+/elP//Ef//F6vSaEfOELX3hvo1Kl/gWVALpUqZ9kHR4e7u/v7+/vV6tV27ZrtRrg0WKx4JxnWZYkiRCi0+mMx2NCyFe+8hWgzFqtVs1lWRaSNzVNA+3t9Xqu60opu93ufD4HqpNSSimbzSZAT5ZvMQf0AxamqiqqS2OrsTRNC+JcJDUXlzDGQOUYY8jVVc6lPCs5qCV57i1oVBRFQMmwJ8uyIAiAvZIkWa1WQRD0+/0kScJ8P700TbvdLjAieNnOzg6uRf+4l/zn+b/oE59uNpvVajWdTjebzXe/+11VVSuVypUrVyilII9CiEaj4bqumotz/sgjj4zH4+VyWavV2u020Btcl+Upz4Zh4BaEkCiKGGNAb2le0QJ2YuxhGDqOU1iOk0By8/mcEIJjQGHP82azGQbl+34URXBFGIZpmvZ6vcViEYZhkiSO4wwGg8cee0xKef/+ffSQJMnZ2dm1a9cIIb7vHx0dBUEQRZGqqo1GI8tXINB/HMfNZjNJEvDiKIq22+0777xzeHhYZDoDSgIywiTDMN59992bN28WscQYe+aZZ37jN36jaON53te+9rWPf/zjYNCIT1BdAG5CyKuvvnrlypUiTgghq9XK9/04r/hBCOGcI9hUVX3xxRff//73oyvG2Fe/+tWnn34a18IMXIWQyLJssVg0m81qtYqk4zfeeOPpp5/Wdd2yrNlsNhgMms2mpmnm/8vemz1LctR335WZVVlrb9Xd1X22WQ9CGmlGGGFsJHiAQEQ4bN9w4yvf4CsifOMIR9h/CRGEb3xhImzAKBzGIITEIiFCGjGI0Uizb2fOnNP7Wt1d+/JcfF31HrD9vn7f98EGnN+LjjrZWbn8MmsU+tSvv2kYlmXdunXr4sWLSHbmnI9Go0ajgRcDsixzzlnhs4FVNgyjVqv1+/04jvGLAcy9VqspigLH5yiKLMvK8xzvchqNRpIkeZ4jaTcMQ865YRh4R4L4wHAjDMPDw8NWq6Wq6unTp6fT6cHBQaPRWCwW4PV4SzGZTLBGCBEhRJblzWbT7XYnk0kZc875yadyMBhwzhElQgil9OHDh0mSfPrTnx6Px2EYViqVfr+vqmoURa1Wa71e4zlyHGe5XFar1U996lMA/XBIx6OHmOd5jjliHX3fT5JE13XOuWmao9EIK4XFDcPQ8zxN06bTqed5y+USkH2z2WDXoQVZlvHWZLPZoMHJZPLVr3715s2bmJGQkJCQkJCQkJDQ/3B9+ctf/tKXvvS3f/u3N27cuHr16g9/+ENkwwgJ/T9KAGghod9Otdvtc+fObW9vNxqNnZ2dra2tIAgMw0BS4fb29ng8TpKEFicBjsfjg4ODOI4bjUa326WUAj17nsc5D4IA3Kfb7c5msyzLWq2W67ppmiLhF430+31wKGBEqTjajlI6GAwURUnTFOy11WoNBgNQP1mWgZgxcmDEkrRKv2iCAQFyEULApOI4juO4pMZpmibFz/kdx1FVNSmOFgRcjqIIdaIoCoKg2WwCqiJbOc9zgMuSpqEjtBnHMWrqug5e5vv+z372M8MwKpXKfD6vVquVSqVSqaiqihxnTIRS2mg0yv88Z1nGGMP7gDRNl8slIgk43uv1lsslrAAcx8FIjo6O0sLAejAYPPXUU5IkAU1iskEQzGYz8D5Q5iRJsMqNRiPLMlQAKTZNE5nCWZaBe2JgURR1Op0oihqNBlwLPM9jjIVhKElSs9lEYikhhHPueR5uqVariqIARCqKkuc5GB+CBvJOKdWK0/845zdv3nQcByy7pIqKoiCPWNd1pCcjfzwMw7JEVdU4jnVdj+NYkiTP8+BNXN4Vx3Ge5+DgYRjeunXrwoUL+Ap1arWaqqr4VlGUV1555cUXX8TIZVl+9OjRs88+WxLt2WyG3QJmius0TaMoQmswM8F24kVCPf6EarUa3GyiKMIDpWkaNsBisWi1WpqmDYdDxhgQPDY8nrLxeEwIkWU5LhzJTdMMw7Df7yM1OwxDEGpsJMRksViAd69WK/zEgXOO0RqGcXR0RCldr9dnz54tX0fdunXr1KlTuq6XjyTQc61WW61WhmEgOIyx4XCIJ5RzjgoYIWNsPB7HcSwX/s6nTp06ODjYbDar1YpS6vs+53w+n5um2Wg0KKXD4bDT6WRZFkUR8sHLnYzZoRdd1/Fg4k/DMLIsMwxjNpu12+0sy2RZVhSlWq0uFgu8VgnDEO8wsOHxisX3fdM0Hcfp9/thGC6XS1VVV6vVarWC43Mcx6vV6u/+7u/ee+89PA5CQkJCQkJCQkJCQqWuXr3653/+50888cTv/M7vvPjii3/yJ3/yla985Uc/+tEv1xMS+jcSAFpI6LdKpml2Op3t7e2dnR1ZlrvdrqZptm1bltVqtUAnsyzLsqzb7cJ/+eHDh2EYTqdTTdP29vZs28apXIqilFDScZzFYhHHMWOs3W7PZjPGGHIwAekGg4GqqvV6HRyKMUYIGY1GiqIoisI5b7fblFKUoA4IFCmcbUGX0B0KgeH+dWInoDMuTnLAfxc9h2G4s7OTJEmWZSiJ4ziO4zAMAafa7baiKJqmeZ4XRVG32wUDhU52isbDE8Ydd+7cOTg4MAxjuVwmSQL41Ww2T58+fXh4iEEyxp5++mlZlh88eCDLcpIkjLFKpUIKX2xgO6mY/nK5ZIzJsgy4Brfo6XSKDHFCSLvd9ovT0gCXKaWO4/i+j3K5OFqQUup5HphvnufI6MREsGp5nq/X6zt37jz99NOSJCGS2Bjr9Xpvb6+cOOKwWq10Xc+yTJIkVENNTdMwfpQkSeL7/nK5lCQpjmOMc7PZAGFLkgT0DAy92WxqtRomAr4MSpgkiVSsPi3OMIyiKMsyxA2MlRBSq9XiOPY8T1VVXdc1TQNifuWVV/7X//pfeXFoJKVUKbg28Pdrr732uc99DsRZVdXxeIw6EHYvpRRhz/N8Nps1m80syxaLRZZlGHm9XscDNZ/PXdcFJA2CwLIsz/Nc1zUMw/d9WZbDMMRyQOCz4/FYUZTVajUajTCS2WzWaDQmkwnm7rouxo/sXeQvp2mKXw9EUWQYhm3bk8kEm7zZbFqWNRwOEVXLsjjny+UShfDMARbfbDaNRgODAaJFZjRjTNM013W3trZGoxHGiY2KgM/nc3xVrVYlSVJVVVGUPM9JcexkGIZlL0mSLJfL4+Nj/AiDc66qqu/7k8kE0DnP8zzPfd93XRdvCzabTavVStOUUqqq6qNHj5COnZ1wb/d9P03TVquFoGFsiqLgn6BarQbCPp1OLctyXdfzvDt37uzv78No6MqVK+fOnUvT1HXdt956a39/H21evnz56tWr2O1CQkJCQkJCQkJCQv9WjuN0u91r167dvHnz5s2bf//3f/+nf/qnX/rSl15//fXy/xyFhP4jCQAtJPTbowsXLuzu7p46dUrTNNCZZrOZpqmu66BpnU4HP06XJOn27dur1YoQAuij6zpsBNI0VVW12WzO5/M8z5vNpuu6nHPHcabTKe5tNBrAUoBonHPbtk+WQCCnSLVWFEWW5UajAbgmFScH4s9+vw/YJ0lSlmXb29u/MDFJAuTCt/hM0zQtLJLTwo8iKZKU4xOeznEcg88isRe/8TdNMy4O9AM8BRHDRdlplmVlgwDWP/rRj/I8D4JAURRWuGPHcQzcyRh78skn79y5A4iZ57mqqqdPny49ATCRPM9B2SRJQs1yjviE0OBsNsN1lmW+78MlgHMexzGqYcwgdJgsKYyJ0zTFRRRFmA5ChMIwDLEH4gLZA9MjP5cQUgZZ0zTA1jRNgyAAi/d9fz6fI/goAX9cLpedTgc0HDsBrxzyPMduURQF7FJRFEmSgJ5Bh1erVb1ej+PYMAzP83Rdv3r16gsvvKAoCvqSJOnBgwfnzp0DcQZQns/ncCtGBSBpfIuOxuMxEroRhzAMQVc1TcMwKKXyCeV5nud5FEUg2o7jYJ8gbtevX//4xz+OSCJiyMZF5U1xROFoNNI0bb1ey7Ks6/pisZBlOQzDyWSCJ3Q0GlmWhd4xAExkOBxisZD22+v1sLjYmUmSWJZl2/bjx48552EY4sXPdDpFYBGT+/fvt9ttzrlhGJVK5eHDhzhitFarcc7hIj0ejxElRVHyPMduRNL0YDDA87u7uzsej5GITQhRCkCPjUcImUwmlUqFc45k6kajce3aNcbYeDx+5pln1ut1EASVSiUMwziOz549iwvst81mU6lUkiTxfX+z2VBK9/f3j4+PX3jhhclk4nne8fGxLMtpmt67dw8bAzvWtu08z5MkuXPnzpkzZ7CpxuNxmqayLBNCbt26denSJdQZDAbdbhc79v79+++9995isWCMua778ssv42EXEhISEhISEhISEvq/1+7u7l/8xV/82Z/9WZIkkiTleX7lypU//uM/JieM+ISE/iMJAC0k9NugZrN59uzZbrcLAB0EQbPZjOO4Wq3iZ/hpmkZRpKoqbGTfeustoCLHccDCGo0GEN56vQZoa7VaQGZARYwxeEQgixnEx7ZtIGZZllFo2zaYFKiWXEBn8GUoz/PBYMAYY4wpikIIwSFpoKhpmqKadCIBGdMs/ywr4xoXeXFMXBiGrVYrCIIoioIg2N7e1nU9jmMQUhDDpMieDoKg3W7j3hLwJUUK5/b2dlYcvvfqq6+iEcdxOp3OcDhUFKXVaoVhaNs2sowxu/PnzzPGFosFRssY63a7ecHQgRoRkH6//0sTRB1JkhhjIKGcc4wBdBXV8sIDV5KkKIoweMwddsBxHCMymJTnea1WC9dlxNI0bTQaiAMajOPYdd3xeLy9vc0Y6/V6IMsIVKfTSdN0tVrh3izLDg8Pn3766TRNfd9H4Wq1iqJoZ2eHEMI5x8aQZfmNN9745Cc/iU0CbKppGrJZMSOM7fXXX//sZz+LGRFCwDERFthchGH4zjvvPPnkkyCtgK1vvfUWbJotywLK3Gw28Ym3ApcvX37xxRdrtRpY52q18jyvDA4ky7Jc5O+7rmuaZr1eh8cxdgvqYy2Q/Q3iLEmSYRiSJE0mkyiKoigCjdV1PU1TRVHAplVVBffknIM4A+aCnGLPI+s5TdMgCEzTHA6HcCCZTCaWZT18+LDb7dbr9eFwCN8Yy7I0TVutVqdOnRoMBniWl8slvDXkwlRa13W8hZIkqVKpOI4DS5zxeNzpdBhj2JN4TYJrQojnefV6HQGcTqf7+/vj8Rh/SpIEU28MdWtr6+joKMsyAOLJZHLq1KkLFy6ggmmayBAnhFQqlfl8jskyxtI0xU8rsGSmaT569OjixYtZlmmaRgjJ81xRlCiKlsslfjOB5cArJc/zNptNEARBEKzX6/V6vbu7u1qtkiQZjUb9fl+W5SAIer3e+++///7775drLSQkJCQkJCQkJCT0/1bXrl1zXfev//qvv/a1r00mE8dxvvjFL/74xz8++X9VQkL/kQSAFhL6jZdpmo5D/uwAACAASURBVPv7+zs7O9Vqtd1ut1qtOI7DMKxWq7quy7Lcbrfhi5rnuaqqb7/9dhAEwFKWZem6XqJnQkin01ksFkmSyLLcbDaXyyVulyQpz3MAZWCg8XgMAlhSKkrpZDLhnNfrdYA8MC9Jkiil4EEQfGPLKZDCZBm99Pt9JEGjTl5wZ2BKfKYFQoWAJqMochxH13Xf98Mw7HQ6eZ6DgoGuxnEMzwpw1U6nU6lUUCcpfKLRdZ7ncRx7nhdFke/7SB5XFEXTNEVRDMO4ePHi4eEhkLQsy9VqVdO05XKJ0VJKHccBvizjUE4kTVNJkhRF2draOvlfa1p4PhweHp6sDKPbx48fx3GMewH7EA1MDZ/z+bzVam1tbcmyPBwOwY6jKDo6Ojp16lSapqCZCIUkSaqqokFN0waDQRAEyK4FGq7X6+v1Gt0tFoudnR1KqaZpZcyBmEESOedxHGN9S46pKIqiKGrhtIthJ0liWdZ8Pn/77bcvXryIdcemopQmSdJoNJBwLUlSHMfoERRVVVVCyHQ6Xa/XfnGQoO/7nHMQYUDYer0eBEGWZUEQaJo2mUxkWS6ZtaZpQMaqqnLOVVX97ne/+9nPfhbDJoRcu3btE5/4RJ7neZ7ruv7++++fPn3aMAywzjiOMf71eo3Nhs0TBAEahP+Drus4M1ApMqwVRYnjmFI6HA5t206SZDqdMsYIIYj8aDQyTbPZbA4GA6wRtpxpmqqqViqV8vRC2H0gzRlnBlqW9ejRo3q9TgjBXbCwuHHjxu7uLmNsNBo5jtPr9XRdB/smhFiWFYYhbDGm0+mZM2fW63W1Wg2C4OjoqNFoIMW41WoRQgghcRwTQuDCgZXNsiyKosePH9u2PZ/Pz58/3263ZVlGMnue561Wa7VaBUEQhiHCe3R0pKoqdkIQBJ7n9Xq93//935/P5/P5HGFUVRXxZ4zhhYTneVmWbTab1WplmmaSJJvNZjwenzt3TpIkjOrdd9/FArmu+zd/8zf4h0tISEhISEhISEhI6P+/kiT5q7/6qy9+8Yt/+Zd/CUrwk5/85Gtf+9ov1xMS+vckALSQ0G+wut3u3t7e9va2ZVk7OzvgYpRSOMPmBahljOE0s7feesv3fc/zHMdxHIdSappmtVp1XVcqEmOzLAOKAkEGM2o2mwCLQITj8ZhzXq1Wf6lQ07RKpSLLMpArfj4P7qYoSr1el37xZD/MAhdlOW4HMQfzAh1OkgTWsRDQVbvdTpJkvV6X4BjYLi6sOdI0jYvD39rtdhRFruuGYQjyC4qanEDPZeMoCcOw1+s9fvw4z/NmswkQTAhBs6Zpwod3sVhg8K1WCyPHjJAzPplMAPtoYUlMKS1nh4mXzeLbbrcLcBzHMexx8zzvdDreCcHiQJIkILzNZoOwY1J5ntu2vV6vGWOSJG02myiKMGW8MMjz3HVdwFNMGa8NVqvVrVu3wIVBCSEsYpZlqqpGhbeG67qyLGfFKXDQZrOBXUYcx2D9UeFlkSSJrusowWe1WgVexCsB3/fb7XYYhvBYCMNQ0zRVVdM0LfmyruswnUiSxPM8RVGWyyWlVFGUchjf+973XnzxRdQHYqaU4gIV3n333YsXL3LOeeG/TCnF3mOMLZfLMsfZ87xHjx4988wz8/kcJZvNBvs/juPpdJokyWKxAHFeLpeyLHPOgbw558vlknOO3GdK6Xq9bjQajLHRaJTneZ7n2EL9fj9JEsuyjo+PGWNJkoAyP3jwoNFoKIriuu7Zs2f7/T48NICATdPEdCiliAz+lCRJURRevB7A7loul9vb27PZTFGUKIoajUaWZbdu3Wo0Gr7vNxqN9Xrd7/exGaTiHQylNI5jPFnlugdB0Ol0fvazn+3t7WXF7wYopciyx5bGLdiKQRCYpsk5p0UC9Xw+p5QifzlJkv39/d3dXUVRut0uBpzneRRF+AfN87yoOJnQ9/3xeLxYLDRNS9N0uVy+9NJLSIiOoug73/mOJCQkJCQkJCQkJCT0q5HneV/+8pd/uVRI6D8hAaCFhH4j9aEPfWh3d3dnZ4cxtrOzE0WRbduNRiOOY8ZYnufgOEgAvH379u3bt2VZNk2z0+n4vm/bNsAfYJDjOK7rghnhc3t7ezQaUUqbzSYrjDIGgwEQHg6yo5SWJYqiIDMaP+2HbNuWCpdnUlBmjB+9QGWhoiiyLJffZlkGYIpvgcOAhtvttmEYIGL4BFDOT+REx3HseR4gtWEYSF8tK6dpmp9IfE4KCIvCIAgODg7gjwFw1mg0TNPMsqxSqazXa4D17e3tMrUc2JcQAufc6XSK8XDO2+02IaTf7xNCsF7D4TBN0yzLsFjllCFAZFQop4PWykhKhQdCeXtevG/AxLMi1zhJkjiOdV2HRTK4bTnf+/fvP/3001EUoT648Gq1kmWZEJKmKRBtGIbr9Xo6naJ9vMZYr9cgrWmaImhA9mVfQMxAh6vVCreDMiPmcRxrmsYYC8PQ8zzg3XffffejH/0orpFQ/PWvf/1Tn/qU53lxkfT92muvfeITn2CMYSFkWQY+LuW6rlqcTIhGcNQeL45AfPz48Uc/+lGlMJMhhfs2pZQQ4nlerVY7efydqqrVanU6nZabEOFC8rWqqgC7mqaBOCOBtxwhIWQ4HOq6bhbGGpRSrAIy6wkhSGSOoqharXLO+/3+s88+a1nWwcFBpVJBXrBhGIeHh7VabbPZNBoNTdMODg4ajcatW7e63a6iKMvl0nGcmzdv4uXHzs7OdDodj8eSJBFCZFnO8xy4OcsyQkiWZfinII5jrHi73e71etiZtm33ej1EqVqtep539+7dWq0mFU9rHMdBEHDODcPIskySpDAMkySp1+t4NDDHLMtQ7rrucrnUNO3w8PD4+Pjpp59GR67r3rhxA7BbURTTNOM4Xq/Xq9Vqa2sLdT744APbtj3Pm06nN2/efPPNN8tHRkhISEhISEhISEhISOjXWQJACwn9hgnmD61WC+nPyBWN4xiGG3EcgwaCo7Xb7W984xtJkiiK0u12u93ucrnE6XyGYTiOc3BwEIahYRimaTYaDc75fD4PwxD3Msb6/T5Ik6IozWaTUjoajcDsVFVtt9uUUrghoxqyawHyMOASmILxSZKU5zm+xZ9QWQKQCjoGAWOBY4ZhuL29XVY7+ZkVGc2gqJvNRlXVbrcLgJicsPEFxyxvSdM0iqJWqwVg7TjOyy+/zBjzfb/T6bRarXPnzh0eHhJCkOVq27aiKHfu3DEMQ9d1WZa3trYIIbPZDO0rimLbtizLKOGcM8ZgYgDE1m63FUWhlGIMx8fHeZ7DNznP836/jymX02m32xgnggysmSQJnL6jKEIM8zwnhIRhWK1WkyTxfR8Tj+N4tVrNZjO0GQSB53lIGl2tVqPRqN1uz2YzYGXf9/M8BzPlnHPOV6uV7/uGYeCMyjiOKaV54fi8Xq87nQ62XJ7nURS5rjscDjudDjhsFEXg0YhnEASEkCRJgiBAKjEhRC3ODAR45ZyDHUOu6+L1RhzHm81GlmV4m4DtQogtrhVFyfMcWxRSFOXNN9/8whe+gC4UReGcU0pxDeV5jq1LCNF1HTONokhVVdgKQ3Ecm6YJw2JN03RdD4JA07Q4jhE3RVGm02mlUhkMBoQQSZIODg7OnDkTRRFeQsiyjA2/WCxqtVocx4DvhmFwzuGkYVkW1hp7DPsHA1ZVVZZlPC9obXd3dzKZYA8sFgv4d/u+j98cUEqn0+np06dhGGLb9tHRUbVavXfv3u7ubqPRAP1njCGDO4oiz/OazSbn3DRNmJlsNptut6uq6mazqVarefGKKAgCwzB830+SpFKpjMdj7FjsRs/zZFleLpebzaZer0dRdP/+/Xq9fv/+/eeeew6Pm6qqqBlFEaU0SZIyuzkIAtd1Dw4OsizbbDaDweAf/uEfME0hISEhISEhISEhISGh3yAJAC0k9Jukra2tD3/4w7u7u5zzra2tZrMZRRFychljhJDt7e3hcIjKeZ5/4xvfoJQ2m01JkiqViqZplFKkqQJvnT17Fr+FB1EihLTb7eFwCAAqy3Kr1WKMDQYDoD3OuW3bjDHG2HA4BA4rofNJSZKEDGJywnMDQmUQ1W63iwq4JcuysnKWZZhFFEWA7HEcowQTxHWe58Cs+ARdbbVahmEgrxOUGVwMDBFdR1EURREss6Mogo/HBx98kBen89VqNbDCLMueeOIJRVFms9l4PDZN0zTN06dPj8djYEpJkmRZtm17NBqhcc65JEmNRgOskDFWfhJCyjxxTLPRaEiSBDJLKYWvCNKHwzBcrVZnzpwBqh6NRli+MAyzLAP+ZowdHR0hs3i9XiuKAh4qFQcqpmmaZRn4bxzH6/W6jEmSJLZtB0GARcR0KKXwa0Y8TdPM8xyoGutVkmVVVZfL5c7OjiRJnPOosGiYz+elYTQWLo7j1Wq1u7tLCAmCAC1omvbqq68+99xzQRCcXJrZbOZ5nud5cRxj8LRw2IDyPEcJJMvya6+99slPfhLfopAxVlZQFMXzPEVROOdls2UF3HXlypW9vT1VVXVdX61WcNXAMFBimiZSmKMo0nXddV00uFgs7ty5c/bs2eFwiEfj2rVrn/nMZzjneJfgOE5eZENjtx8cHLTbbbRfq9V6vR6IM7i2aZqu61JKb9++7TiOoijr9frs2bO3b99ut9uyLMOZ/f3333/yySeTJJFlmVL6wQcfnDt3zjTNarUahuHx8XGj0bh///7e3l6lUoEf9GQy0TTNsizLssDHGWPo9N69e3gvYhjGfD5XFEXTtCiKfN/v9XqGYfR6PVVVCSFxHFerVSyZZVmj0QiPkiRJaZrO53M8UHmeL5fL5XKpKMr+/j4M04+Ojj75yU9+5jOfybIsjmNd1znnQRDkeS7LsiRJYRguFot+v3/lypXyMRcSEhISEhISEhISEhL6jZYA0EJCvwGyLGt/f7/T6TQaDcMwOp2OLMutVguHB65Wq05xcmCSJJ1OZzQaXb9+HcbErVar0+m4rss5B/ZtNBpgakj/3NnZgWF0HMdgi47j4Aw0gDm5OHgQeBQ8WlXVarUK3FZ+JUnSaDQCaKOUWpYFQIxvaWF8DKQI4jwYDLa3t3GdF2Q5P2GO0el0TNNMkkQqEDbaBJAFSI3jOAiCMAwdxwGAi6IIKDNNU2BNQDGkDMMQudvtgoLFcfzmm2/6vm9Z1mw2a7ValmUZhmHb9maz0TQN3BAm15hmkiSUUiSAI+tTlmVFUZCmiqDhAp/9fl+WZc55GU/pRNI3poa5Y45ZlimKgvcEYKb4qtPprNfr9XpNKd1sNrdv3+52u0mS2LYNI+84jsFV0eDJua9WK5BikF/QbYxKkqQ0TVGeJInrurPZzHEcQggGDLg8nU5PsmbsH+wBFCqKAjCNu/I8V1U1DEN8uq5b1tR13fd9VVXjONY0Lc9zz/M455xzTdPq9TrnPMuyIAgURSm3EJqVZRlIGjtNLpKOy9GW2xLXKMzzPAgCsO9arRZF0TvvvPPMM8+UlafT6bPPPjsajRA6PCBIxM7z3DTN1WqlqirnfLVa3b179+zZs6PRCL33+/0nnnhCVdX5fF52h4VIkuTevXvYbNjYOJoviqI4jmezmWmalUrl+PhY1/VOp4P4bDabZrMJ4MsYw3sFxhh26Wq1ajabYRgeHR21Wq35fG7bNhLVkWiMRTdNc7lcEkJUVZ1MJoZhHB4ePvHEE4wxYO47d+6AaGPkiCHnXFXV0WhkmiYe8+Vymef50dHRhz70IcMw4jherVa6rkdRhLcUIO++78dx7Pu+7/ubzWZra6vX6/V6vcPDw2q1ikde1/XLly+naYoge543Ho+xvhgAngIhISEhISEhISEhISGh3yYJAC0k9Gstx3FOnTq1s7Ozt7dHCDEMQ5bldrtdq9V83y9haJZl7XZbVdV33nkH/q2MsUaj4TgO59yyrJ2dHZxyVq/XDcNoNpuz2QxdUErhJY3E5yzL0AUhBNQPdfDncDhUVbVM7KWUAq1ChBDLslDz5CzwJ0gr/syy7OS3pMDKeZ6nRW6y4zjlXfgsiTOqJUmCXGC403Y6nZPEGQwuCIIoioIg6HQ6mqYFhQ00XCOyLHv55ZeTJJnP59vb2/V63bKsOI4rlQoYdKvV4pzfvn3bMAxN0xRFgb/BZDIBI+acAzojRKCiAJrj8RhAFmBakiTGGCaLgBBCjo+Py3sxx7ICJgJM6fs+lgbBwRSARM+ePRuGYRzH4L/AoyfdNjabDdJ4wzDknOd5jsVCZZwimBZez6qqgvnevXsXVicoB1NWCpNu3KsoiqIoq9UKhWUFzjmAKUJUtsk5L2/HGBhjWZaxIhMZfa1WK8SwHGcZN8QKQcZn2U4URZTScgyKonzzm9/8gz/4g7ImpXSz2TQajTiO8ZpkWpy0iW/DMCy3n6IoV65cuXTpEobkui4wNPyaFUXxPE9VVU3ThsMhxkAIQb9pmjLGZrMZKdixLMuO45SZ1FhKvMVRFGU0Gmmapmnacrk0TfPw8LDRaOCVjyzLN2/e3NvbMwr/k8ePHxuGATRcr9fX6zUykZGwf3x8XK1WgZVVVVVVVdf1W7duwcld0zTDMHDcoqIohmFMp1M4Ne/t7cmynOe5ruuAzkdHR3Ech2GIyNRqtfF4HMcx1gtOHbiWZRkbcjAYSJIURdFkMnn8+PEbb7xRPOJCQkJCQkJCQkJCQkJC/6MlALSQ0K+vzp8/v7+/3263W63W9va267qtViuKIsBQ27bBGdvtNud8NBq99NJLqqoyxjqdTrPZhHMukgoppefOnVssFowxkLLd3d3r168DgQGc0eJUtKSwkM5PHMsGTgcb6BK6KYqCZF7pBDklhf8GpZRSCtyG1E5UAx3O8xxkcD6fY4TgrWDKjUYj//dUMmhgWeR7xoVRAz4BzpDuul6vHcfBV+kJNo0WXn755eFwuLW1deHCBd/3GWO2bZumyTl/8OAByLVhGKdOnWKMTadTMEpJkgCdh8Oh7/uEkJ2dncFggBABpKqFQTbcSzjnZWQkSUJNxhjGhqmhglwcw4hQAGiu12ucspgkCQKILFTGGOc8yzL0GMcxJrharRzHiaIoz/M4jrHEjDFCCEZYlsiy/N577128eDHLMhRyzrEZQCTL7VHeRQjJsgxIutwGGDPnPAxDtNzr9eAVgx5xI6qV02cFgEa/6FpRFPhplD0qivLVr371C1/4AjYM2r9169aZM2fKMbAi5bn8Ey8JsB80TVNVtdVqeZ5HCpANbBqGYcl/VVUF3g3DcD6fW5aFNy6c8zt37iD/F84zyObGgKMoYowB7LKCOEuS5Ps+Wluv141GY7PZIOn7wYMH3W7XNE3Qf8/zFEWhlLqu+8wzz3iet9lsgKqbzeZms+n1epVK5b333kO2+2q1ajQaN27c2NnZQfb0cDgcDoeKooA4T6dTy7KQRQ5EbllWo9FAhDGvk8+4ruuyLFerVU3TBoNBo9EIw7BSqfi+PxwOn3rqKcMwFEUhhNy5c0eSpGq1ins9zxsOh/P5XFXV+/fvR1H01ltvYYcLCQkJCQkJCQkJCQkJCZUSAFpI6NdROzs7586dcxwHyY+GYVSrVUVRqtXqZrORC5bnOM5wOJQkqd/vv/LKK7VardlsapqGNEYATcuyVFUFldva2gKVGw6HhmGcPXuWncjJBcJDAjUtjjg7KbA/WZYbjcbJctTM8xwUDBiuWq1KklTS3tFohJP6QFrBCrMskwrf5yzLQLXwLSGk1+ttbW0B+EqSlOc5wKvv+0EQdLtdy7LCMARkzIpjBpMkKdm0YRjI08QYwjD0fR8Q33XdW7durdfrdrtt23a1+q9HOIZhyBiTJOns2bM42A2ojlLaarWGwyG6ABi1bRsDDoKg1WopRa4uTDk457IsA9mjmlQkQVNKj4+P0QghBFPDFE6dOoUpo5osy3FhjQJ0G8cxxlmy3TRNlSJ9uISMW1tbiqKgCwjAOk1TxhiWEt8Cg2ZZxgv+WxbmeS4X/huKosznc8YYqPRJXb169dlnn8UgOefg5uPxeHd3F5sBAWSMXbt27cKFCyjBGMp853LwiqIA75YjR6flNjBNc7PZ3Lx5ExsYNQ3DQJzLEkqp67p4dsCIr127dv78+bKpMAyR8I7UXUmSNpsNxkAIAbY2DAOHIvZ6vXq9jodrOBxiBR3HQU59lmWEkF6vV6vVQJxt2x4Oh71eD5tW07RKpQJvZcMwMEJsNkVRPvjgg62tLV3XLcuybfvg4AAvbGRZtixrsVhUKpWjoyN82+v1dnZ2FovFZDJhjIFxHx8f7+7uhmEIa475fK5p2t27d0+dOmXbdpZlnucxxur1+mazWSwWvu9vbW1ZlmVZ1vHxMVZZkqQ4jq9fv76zs7OzszOdTufz+f379znneDmUZdl6va7ValmWbTab4+Pjmzdv/vSnP0X0hISEhISEhISEhISEhIT+XQkALST066VLly6dPn1aUZTd3V3LshzHsW376OgoKdwzbNsG34zjmFK6vb1969atN954o1qtViqVRqPRbren02mWZYZhqKqKVGggWlKY5DqOA89o4GCw0TRNKaWdTgdIFO4QkiRtb2/TE2CaFlbOUgFJQdMIIbVa7Zemg4xdkEG0JhWOHFmWoamyEI2jMurkeY4KoHhISfY8b2trCyC45LatVisMQ1QAy4vjuATWYRi2Wi3kh7733ntAipIkITMUjL7ZbHLO5/M5xkkI6Xa7lNLRaASEzTmHLTLiUM6dFIcKIp1cUZRWq1XOGhWgXq8H0scYazab5RwxBSDg8XiM3vM8T5IkDMMgCGzbVhQlyzLGWBRFnHO1cFWezWZhGIKtbzab9XoNpIv6GE+Jca9evXrp0qU0TVFSlsuynGWZfMK/AuWoyQrJsnx0dIT9gLmzE3nHeZHSLhfnLmLtygXFBUrKfmVZfvPNN59//nmQXwwYlPxknTRNN5tNpVJBwrssy7CeiKLINE3M/Vvf+tbnPvc5jA292LYNpxr0fv369TNnzpTLQQhJkgRbERAfcBn9SoXTiK7rcLpQVRWNE0KyLOt2uzBNliRpsVjU6/UkSXDqINokhOSFh8ndu3ebzaZhGPDZePjwoWEYkiRFUQT/bhwvefny5SeeeMIwjPV6XalUbt68iZ8CNJvNJEnu3LmDsyJ1XTcMY7VagUfruk4pDcNQ13XGWKVSWSwWWZZ5nod9a1nWer2+ffs2kqZ1XT8+Pu52u5IkgV8/fvy42+3iTcyDBw9833/mmWeiKEqSZL1ex3GsKAo8nafT6d27d3/2s59JQkJCQkJCQkJCQkJCQkL/OQkALST0a6F2u33hwoVWq9XpdHZ2dkAPkyQBCNvf3wcYzbIsz3PYEL/11lur1cp1Xc65bdudTqdkbbquc87r9brneZIkdbvdxWIBitdut5HH2mw2FUUpgRohpHRzBjpsNpsYWxAElFK4QoOWDodDUEhJknq9HlqQCpa6tbWF63J2aZpKkgS3WUIIJlIC4tKdAzVbrRYyfNEI2ixxM5rFjVEUlRnNoMydTqdsGUzW931VVeFf8U//9E+yLHueZ1kWYH2lUlEUZbVaIT80SRJgQUII+pJlGdB5OBx6nkcp3d3dRaxY4bbBOe90OpTSXq/HOUcAERBMFpSWcw4naMQNMcccsyx7/PixJEm7u7tlCegz5j6ZTPb29rIsy7IMPXLOVVXVNM2yLHSXJAmWDwP4JaHQdV2Mp6TScnFMYkmly8pXrlz5yEc+8ovNMGSyS4WLCForJ4vWoNJkHFOGFosFJi6fsPXwfZ8xBnzPOcfYUP9kg+12e7PZ0BM4u1KpMMYmkwlqLpfLsi8MD/sHlVGI/YOUZE3TNpuNUjhXuK6LkMIbmhAyHA7hRwEUy4vXA4wxz/MQLpyhJ0lSlmW4C2j44OCg2Wyapnl0dATISwhhjAVBgDkOh0NN03Z2dsIwTNN0Op12Oh1d1zFg13WRcc85r1areZ5zzrF1G40G/mVwXTcMQ6SZa5qGRwCOz7IsG4Yxn8+Pjo7QbLVaLZPuNU1TVfXWrVvPPvvsZrPJsiwMw9u3b3/sYx+TJElV1dls9sMf/vCpp55yHGcwGPR6vfv377/zzjvlggoJCQkJCQkJCQkJCQkJ/eclALSQ0H+nLMs6c+ZMu93udrvb29tJknS73UajEQSBZVmGYbiuC9zW6XSGwyFQ1/Xr1+/duwdWa1mWoiiMMVmWT506BQrW7XZd15UkKY7jOI6zLOt2u5PJJEkSSmmj0QCHQiIwOJosy+12W5IkeBmTgo1inEB4jDH8WavVShQIO460UJIkg8EA59dJkjQej7PCZ6Pb7WaF37EkSZTS8ivQZLhqxHGMCnnBYcuWbdsOgsD3/dVqFYbh9va2ZVlIiY3jOEmSpPCwBnpGdnMYht/5zndWq1WWZfV6fX9/H4cWqqqqqupqtTJN07ZtVMBkS2oJLEsIKd02QEsdxykZJVKSFUVxHAcxwb3l58kSKM/z4+NjxpiiKOilWq2maToajba3tzH3kp9i+vBYIIRg7YBuGWPHx8ftdrtcRAhjo0WicamSw578E9yWFOYVKGeMLRaLk7Mox489gFvQAg7cK0vKT6wgrstm8clOJDhLkqQURiKlynbKmuXqoFPGGJaprJamKeIGUUq///3vP//889hguq5vNpvSlCMMQ6zC7u4u9pskSd/97nd/93d/VylOVux0OqPRCI2DOKuq2u/30WO/39d1nRASxzGSiNvttmma/X5fVdVqtbpcLnGLbduGYRweHiqKkue5ruv1en2xWOAdycHBQaVSqVarTz311PHx8a1bt5BbbZpmrVYDyIZtNLwver1eEATb29tbW1txHN+8ebNardq2Xa/XB4PB48ePF4vF/v6+aZpRFLmu2+v19vb2sFsePHhw8eJFwzA6nc7h4eErr7zywgsvyLLcaDQODg6+/e1v/mXL9gAAIABJREFUX7hwQVEU0zR7vd53v/vdy5cvl8stJCQkJCQkJCQkJCQkJPT/TQJACwn99+jUqVOnT5/e2trC7/q73S4OGwR6hm+GJEmtVgtZq4SQTqcjy/LNmzfv378P0+FWq7VarQghlmWZpqmqqud5WZaV3NZxnOVyCT7rOI4sy4PBAMmznPNut0sKXwhgPkppozhUMMsygDapMM0oOd0vKc/zkgmCJ2LweZ4bhgEunKYpWgNUBUwEVvZ933Ec6YQZNOqkaYrk0zAMW60W5zwIgiAIwjCEzwZwM/hsesJtw3EcTdPCMPzggw/6/T4gNfJ26/V6tVo1TTMMQ8wX4crzvNlsDodDAM1y4oyxOI5BMxEEXNDCdwJuG6w43w9xwL0nm8IFbEyUwj8a0SOEIERJklBKh8Ph7u5ulmWU0vTEyYqj0ej06dMIdQlYGWOj0Qj51wh+Wf7uu+9eunQJvUOYLzmB1yHG2JUrV5599tnyz7L9csVxIyEEaezYGycbwZKRAkNDuP3kaHFBCisYQoht28jup5SC40Nf//rX//AP/xCVKaWyLP/jP/7jH/3RH5VNodOyfUKIrutZ8TJD13UYTei6XqlUgiBA8jJMOTBOSZIePHjgOI7v+wjder3G02FZVhAEiqKoqopXMrIsDwYDeFyEhXk0kuhXqxUsnh8+fAifaNd1LcuC4zOlFPCXUjoejznnV69e3dnZkWX56Ohof3//wYMHeZ4zxmazmWVZcHYmhMzn82q12ul0NE17/PgxsrPjOO52u8fHx0EQOI6DcU4mkzAMF4vFqVOn0jRdrVYPHz58+umnq9Xqer0+PDyM47jT6VSrVULIT37yk/Pnz+NfAELId77zHUIISiilb7zxhoDOQkJCQkJCQkJCQkJCQv9nJQC0kNB/tfb29vb392u1muM4W1tbvu83m80syyzL0nU9DEPw0DiOAcVs2watw5+3bt3Kssw0TUCx8+fPDwaDSqUSxzGldGtra7lcJknSbrdd182yrNvtzmYz0DrOOTB0mbcryzJ8IcDywA3LoR4dHUmStLOzg0K4QqMOsk1xCyqD5KZp2m63MQWIMYY/KaUAl/hEp6WyLEMJUDKSQzebjeM4oIpZYZQMIIuLOI6TJIEFR61Wq9frWZb9/Oc/H41GcRzbtm1ZVqvVqtVqlmVVKhXYI2CQcFSoVqswzKWUIgccKiknpbS0GSEniDwpUoZxja9wCwrzXzR/kIvDG9F+eSFJEu4qG4GPcF54QEdR5Hke7CYwzrIXNFt2gSFRSheLRWlsgjbL7spRnRxwWaG8LpUXbxdQnxas+WQ1SimsHk7WZIy9++67H/nIR3BNi3Ts4+Pjer0eBIFt24yx+XyOcnBnuXCglmW5bAfCMqFQkiRd11966aXPf/7zQRDouq5pGvyvq9VqkiSj0QhjS9O0HKeu60mSBEHgeZ6mabquj0YjzrlhGEj3juN4Mpl4ngdwPBgMNE1TFCWOY8uysiwzTRPp80mSID25Xq/rut7r9S5durTZbFarlaIoURRpmmaa5qNHjxB2RVEMw5hMJpxzXddv3Lhh2zaAu67rDx482N/ff/jw4ZNPPqnr+ng8brVaN27c2NvbY4wh8f/Ro0eVSsW2bV3XkV49mUw+9rGPmaaJR346nZ49e7bZbKZp2uv1Ll++fObMGcaYYRgPHjwYjUbw9jFN891334WpCF5f+b7/7W9/G+MUEhISEhISEhISEhISEvo/LgGghYT+S3X+/PkzZ87s7u5WKpWtrS1k3eq6HgSBJElZlnU6HXja2raNLEWkTw6HQ1VV33nnHdd18Yt7pGfKsiwVWbQgsyDOWZa1220cIOY4DuccTBYNNhoNSinsBWiRSQpIB+SHwVSrVUmS1us1qrXb7TInV/pFD400TYGDsywbDofdbhdMmTGW5zkaHAwGnU4HfZVqt9tRFB0dHZXnCiKDFYAMfBnlKGw0GshfjuMY5b7ve57XbDZxy0svvUQIQaJou932fd80TUC6rMh0BuIkhCwWC13XLcsCiQN2LEMB7inLsuM4oK74ijGGapIkIalZkqS9vT18hXJ6gtjirrIQFcrCkvDKspxlGUKK8iAI0CDeRpSjopQyxqbTqaqqsL8o+yov5vP5Ly0rpdR13ePjY+SboxxfoXK5NCjH2pV1yvpYLDQIEUKQiI17aTFlWpxXCWVZliTJ8fHxhz/8Ydu2wXwR4WvXrj311FOgzyhRFKWMDMQYAzvWdd00zc1mE8exaZpZluGRoZS+//77zz//PHpP01RV1TAMgZtVVV2tVq+99trnP//5KIqQEC1JEvKUOedJkkiS1Gg01uu1qqoYQKVSgZ1LlmWtVms8HuMRiOO4VqslSXJ0dBRFUZqmiqJUKhXwdFmWB4MBhuH7frVavXbtGkzVGWO2bY/H4/l8btv2w4cPq9UqEvPn8/nNmzfPnTs3HA7xvmQ0GkVRtLW1VavVPM+bz+e9Xu/5559vNpuEkF6v9/bbb3/84x/nnM9ms6Ojo9dff/38+fN5nler1aOjo9Fo9Mwzz5im6Xne8fHx9evX3377bcxaSEhISEhISEhISEhISOi/TAJACwn91+nChQu7u7uO43Q6nZ2dHUmSXNcFWGy3257nxXFMKW21WsBwk8lE0zRN0zjnrVaLMQYDDcaYYRiO4wAEg/+2Wi20kCRJp9NZLBZpmtbr9eVySQjJ89y27X6/D9YGRlar1cqxgd/hGvWBDssK0gljYnDSLMtQkxZcNcsyQExakEcUUkrTNMWf4NRhGAIZR1GElgGaIcyo2WzCbSMMQwB3XdcB+0CW4bZRGibEcfz666+7rttoNGzbhtV1q9XSdX02myGqmDtjDEfAWZa1Xq8ZY8hjVRQFgy81HA5RH4PEfIFTGWOEENu2EYrxeLy3t4cYIiClpII1Hx8fl4WIHnpB44hMkiQ7OztoE4VJkiRJ4rrudDoFdvc8b7Vacc7hMozVQWtlm+gFq3NSo9Go3W6XwyiFFcfylWv3+PHj0tj6ZAs41PH/arTYOZgURClFs1jQKIrq9TohBF7b6KK8HYcTItT4jKLoZDuIKg6NnE6nKEfLqJZlmaZp4/EYhBq7Yr1ea5pGCAFuzvMc+0eSpDRNOeecc/haoN80TXHj8fEx59xxnMlkgqHC9DzPc1g/Izi4N8/zarV679690i6jWq3OZjO86nBdV9d1TdPu37+vaVqapsDZjx8/fu655+7duzefz03TtCxra2vr0aNH165dy/P8zJkzOzs7wMrf+973Ll68qCiKaZq+77/66qvPPPNMlmXtdrvf77/88ssXLlyArXO/3//JT36SpuliscByvP322/1+H6sjJCQkJCQkJCQkJCQkJPTfIgGghYR+5bIsq9vt7u3t6bre7XZN04QjRL1ef/DggW3bYRjmed5utxeLBYAdfviPw+UYY4yxb33rW8vlcr1et9ttuMGCrq5Wq3q9DuMOx3EqlQqgZKPRAISq1Wrz+TxJEkVRbNsG7yu5HpgaAJ9UcFJCSJ7n8N/A9UmwKBW8OEmSra2tPM/zPJckKSvyndM0pUVyNLrDt3Ec+74fRRFOI4zjOMuy7ISJc5qmIM71el1VVWSeuq4L3JmmaRRFSGhVFKXT6cRx7Hme53lpmhJCfvrTnzLGTp8+Dc+NWq3mum6SJFEUgV9TSkHwOee2bd+4ccOyrGazifRYVVUZY8fHx8oJIdV0MBiUIcqyDHbSZWQQgTzPSy9jxAFhQZAppSD+KCwjg2YRjSRJEGFFUdLC4SRN07KL8Xjc6XTCMGS/aKl8eHiIpGZ0V7afn3g3gBK0j5GXX9m2DbcWdAphNYfDYdkyhOmUU8Mncu3LqaEL27ZXq1UQBL7v27bNGHOLEzUxAFwgDvizvBeFly9f3t/f931f0zRd17HQ6CJNU1Bm5EQD4Pq+L0lSpVKZzWZYUE3TyhcbSZJomrZer0GlYcYiy7KiKOPxOAgCWZYppUdHR4Zh6LoOJk4IGQ6HhmFomnb37t3Tp0+v12vXdRVFWS6X7Xa73W4fHx/Lssw5Pzg4wKONdxvXr1/HCxJKqa7raZq2Wq3JZPLTn/4U758459Vq1bKsg4ODw8PDCxcu4FmeTqevvPJKt9vd2to6e/bsaDR69913VVV97rnnarXaeDx+7733PM9DHNI0/fGPf/zgwYNer4fgCAkJCQkJCQkJCQkJCQn9WkkAaCGhX6FM0zxz5sypU6e63a6maVEUgVgRQuQTic+wxI2LcwIppUphg8sY+8pXvgLcVqvVsiyr1Wq2bSdJIhXZxFmWNRoNtJAkSRAEAHzgcbgFzbIThsW9Xo9zDgoJ5igVjJIxJklSpVJBOepL0r/aGgA946uSRUqSxBjLsowQIheuICgvEXMcx1EUdTqdkjvjM0kSoOQgCADokcqN+t1uF2m/URQFQYAcWN/3l8ul7/uyLF+9ejVN00qlcubMGbBUAETGWKfTURRlMpnEcawoCuccZwbeunXLNM3t7W1kPcPoIAxDVVXBWymlo9HoJIbGOLMsS5JkMpns7u5idvDFxvXu7i6igZCiHfxZqvzzZLXyrrJNNEUpRbTjOMYKYhFZkSZMKV0sFpzzZrNZLkTZBeJc9PyvPcJtI0mSMAyRJI4c+cPDQ9u2oyjyfb/RaMiyvFqt8iLb/eSwoTiOgyAAvl+v14SQPM+xRrVajRDium4URY1Go1qtIs+9bAFLX7aG8jJiZbXxePzRj340TdPxeIzyH/zgBx/5yEcMw6hUKtjnlUpF0zQcjYgWVFWtVCrr9ToMQ87597///UuXLum6ruv6er1WFMU0zdlsxjlXFCXPc13X6/U6ADohpF6vHx4eYt3v3bt3+vTpzWZzdHSEZhljqqqGYShJ0vb29t27dyuVCiFkOp1eunRpvV4jmDgP0LbtR48eEUJee+21p556ilLabrejKBqNRkEQ5Hn++uuvnzp1Spbls2fPjsfjK1euEEKwxFmWHR0dHRwccM7jOJYkyfO8f/7nfxagWUhISEhISEhISEhISOg3TgJACwn9qtRutz/0oQ/Ztr21tYVf0xuGUa1WTdOsVqthGKZpGoYhMjRrtRqwqaIojLF+v885v3LlyoMHDyRJ6nQ6nU5nuVw6jkMIYYwhbzpJkna7fXBwkKapbdubzYZzjrzp0WhkGAayfcHaKKX9fl8uDnkrD8TLsgx0D8MGCszzXJZlXIDr4QJoDCQUQvnJkjRNy0bKGwExcZEXtgxAq1EUwdU6K3h0WgguHBDm5fu+53m1Ws33/cuXL8uyXK1W4zjWNA3prqCEWZZFUYSubdt+//33kTCLant7e9PpFBAWK8UYGwwGSIPlnMuyXKvVMB0EPE1TBARRgiEDFqIc7WAwOHXqFGaKSOLzpE5+JZ0wuyiFMSuKgmgoigL6zBiD9wUp3l5AlNLZbHYysCU+Pjo6ahcHQiZJUnLh9XrdbDYNwygTn5MkCYLAdd12u805B43FZLMsO9ky59x13dVq1el0arXaZrNBC1EUrdfrVqtlmqbrummRTP348WMkjGNquDg8PIQbOCm4syRJGEaSJFEUGYZhGAYawTTTNOWcL5fLZrPpuq5UBPb73//+Cy+8gNcVoMbw9dY0Tdd1QHDDMOCXrWka3k9EUYS0ZUrpq6+++uyzz0qSFARBlmUwX14sFthaiqJomlapVFzXNU3z4cOHpmmiF1VVOef9fl9V1SzLNE1DBc55tVpNkkSSJN/3P/3pT9++ffudd97Z2tr65je/yTlnjB0eHhJCJEl69OjRw4cPPc8LwxBnCeq6HkXRnTt3EBYhISEhISEhISEhISEhod90CQAtJPSr0pkzZ9rttqqqlmUZhhHHcaPRSJKEFEAzjuMsy2q1mmmauq7PZrM0TUG1HMf5wQ9+8OjRI03TOOdoYbPZAK1KkkQIAZKO47hWq4VhiPZBxxhjnHNN01RVHY/HuGCMVSqVEhOjEVyUzBQ6CR9xgRJUK1ug/8beobwdn2CXWZbFcVw6PoNXZlmGZFuYL0dRBO6cJInv+5vNpl6vy7LseR5jzHGcOI43m816vbZtm3MehuEPf/hDz/NarZaqqqdPnz48PEzTtBwVpXS9XnPOdV3nnDcaDcYYY4wWKdvdbheYPk1TxpiiKEgiLltgjOGTUjocDjFHzCtJEmRAY47ApoyxOI5p4b8hnUDPuP6lz1KkgPWIFZYYvVNKS/qMlssGMRhMJ8uyIAg2m41t27IsIx+ZEIKz7DzPw5mTruuicrvdxt7D0iRJEsdxr9f71Kc+Fccx6uTFS4IgCIIgaLVaaBn1sY5ZAa+xdo8ePWo0GiV6RiP9ft9xHFxDkiSVjWdZliRJrVabz+f37t1Dmn8YhqPRCNP0PG+1WoHt+r4fRRF6jONYVVVVVWezGWMM7xVAjQHWFUVBhSAITNMMw3A4HCqKwjnHqyA8cYyx5XJ5cHBgGIYsy2mayrKMryil1Wr10aNHSLqP49g0TU3THjx40Gw2F4sF/FsWi8V6vZZl+caNG6ZpEkLG4/GHP/zhH//4x0jex3mD0+l0Op26rtvv96fTqWmam83m5DYQEhISEhISEhISEhISEvptlQDQQkK/Eu3t7ZmmaZomPCU0TVutVpRSELckSbrd7nQ6bTabyNOUZbnb7XLOSwAKQ2HOOU4X9H1f1/U8z5E9jUYcx4G/c6VSQdIoiDPnHF6xmqZZliXLMjKgwSXLCzA+6ReRaAkK8yI5t7wA7ixHCJVI8WQ7wOthGMJVA/wdvBIYMQiCra0t5KKmaQoYHYZhs9lUVVXTtPV6vdlsqtWq53nL5dK2bdBDZED/9Kc/jaJIVVVwwyzLzp07N5lMAN8BH2VZ7vf7oPaU0jRNYehMKWWMcc5lWZYkCSPB4PM8B+SVJGk8HqMdzrlt25gUFMexpmmAqlmWle0TQobDYRkKKE3T8nDCkyE6qXJUJdVFSfaLyeYYIeqg97RwiLZt27KsqEj6TgvDkGazWa1WYfiAe/M8f+edd55++ukS+qeF33SSJADZlmVhnHAYz/M8SZLy9jzP+/1+t9tFLyjPsgyTxZDKmoQQ9ILCLMuazeZgMIBLBlxl5vN5FEX9fv/3fu/3PM/DjVmWSZJkGAalFKcOIibL5bJSqfi+HwSBJElxHMPKGW9clsslXtgMBgMU5nmuKIplWavVCg8aGDTnXFVV7KJGo3FwcMAYU1X1/v377Xbbsqz1em1ZVhAEh4eHnHNJkh4+fLizs7PZbPDQ/fznP4ex+3A4/NjHPnb//v35fE4IwQ8ROOfvvvtuo9H4l3/5l5///Oe/sN6SJOizkJCQkJCQkJCQkJCQ0P8cCQAtJPQrUa1WY4yBdoHl5XmepiloMuCs4ziz2UyWZdu2tf/N3ps8SXaddf/nzvOQc2YNPUrdSGoLYaEI7GAMFkQQgQM7+Je8YcPSbAg2bIAFxgjbERhJ2NiWLVuzpZa6q2vKrJzzzvP0W3ydl5KA98Vhvww/zmdRkXXzDuc+59bmc7/1HFkWBEEQBBx+fn5eFIUoinmeF0VxdHRkmuZ0Oq3rOs9zNKCQJInjuDiOIbhh8XRdR9wVHpbjOAyD/2TTBrKP0M5mMwyPEFJVFdZGw2ixJ2wgfoVVbM+D3dqf9T4PC5sM9YyN1X79QHRtHo/HMIxFUUBGx3EsCEK320XrXrQH4TguiiLDMEzTDMMwjuOrq6uTkxPc5sHBAYoTxzHDMKIoCoKAhsjIq6KtR1mWy+WyrmuWZXmen0wmMLlXV1dZlmEYjuPgrgkh0NyCIPR6PWhfnuc5jmv26y6WZcmyrCAIMKRVVaHabXFQrp/a1rpmGGY2m926dau9BPZpfyWENE2Dk+BsGCH2aYU4uLi46Pf7qGqWZaZpNk3juu6PfvSjBw8e4JDqk2sJYmC4Srul2JNlmWEYOInruv1+H/Yfd1pV1cXFBeYRM4UEehRFZVniDOgZLYpiEASYULx4sG1bFMUwDHE5bOE4zvf9LMts29Y0zfd9XAiDxG5414KMMEZblmWe5/jr6HQ6WZZhO1BV1XEcaF9BEL761a9+4Qtf0DQNq3HyPO/7Pt5qKIpSluX7779///79JEkYhuF5Hnlk9Gs+ODg4ODi4uLjALDx8+PDo6EiW5fPzc8uyJEkqiqJpGt/3TdM0DOPRo0ccxxVFoaqqZVkffvjh0dERz/Pf+MY3VqvVhx9+2E4chUKhUCgUCoVCoVAo/2uhAppC+cWDf/BHA1n41rIsbduGaBsOhxB8RVGg5a4gCIvFAgKaZdm6rh8/foz/68caa5DI6H2cpqmiKEi84ljHcSDsVFUVRRGuebFYIIYJBweP2TKbzTA2XdeRTi3LEpoS6hPDwK+4qeu/QmXiw3UVCKHZ6XTyPId3xplbWYkKtNuTJEEMlmXZKIpYlh2NRtgeBIFpmoSQMAwJId/73vc4jjNNk2VZBFRFUVQURZKk+XyOYXAcNxgMoA7ZvThmWbZdxhADYBiGZVmsKwiSJDk8PGRZtmkaZt92oz2cZdnpdMowTHsvw+FwvV5jZxSh2neiODg4+FRNUDSASl4vI6pNri3hiOGdn58fHx+jztdLXexbkeA1g+d5OCG+rfZNk9ujymvtMuq6LstS1/XVanVwcABxXNe153l5nqPsKFQ7QWiIEQTBcDhkWRbqGcMIgqDX6wmCIEkSPHJRFBiYKIqSJLWSOgxDdEppdXOn0ymKAhfN8xxB+N1u5zgOHp7VaoX7+sEPfnDr1i083nEci6JYFEWSJHgBI0kSwzA8z+PhXy6XoijiPIIgIBDN8zz+/wCvFqqqms/nv/zLv7zZbNpZWCwWLMsqinLjxg3c0eXlpaIoPM/vdju81CmKommaR48e9Xo95Kl5nsdKmJIkff3rX797925RFH/yJ3+CkVMoFAqFQqFQKBQKhUIBVEBTKL9gsOQgBHEbX4UZhH2r6xrdJ9brNcSlKIrdbhd7Qvyx13pcMAwThiHUW1EU6EgLrbxcLhHqhIqFj0avW03ToLNhUaE1cbbFYoHOBtiCYeOr6XR6eHjIMAw8JvlkxhmWE+Bz0zTVPt2M8Kymaa3xxN21crYoim63myRJFEVpmkIWC4KQJAnC0RzHOY4zGAzg4n3fD8Pw9ddfRwFHo5FhGJPJxHXdLMsURWFZluf5p5566vHjxzzPt7eDlffKsoSHxTjhOm3bxo1g5C1oOtHCcdxsNoO+FwQBSWr466qqPM87PDxsT1IUBY4ihMzn8+PjY9x7VVXY3jTNYrEg+/bZuJ2qqsqyvHnzJg5sp6AdHk6CD7h6nucMw9y+fVvTtDRN91Px0/7dVVW118Ut13WNvsxJkvi+ryhKURTQzZZl6bqeZVm7v2VZy+Wy0+mkaer7vq7rdV3D/vf7fTy3VVUhB20YBjZiYMU+ST0YDBBPxnZktPv9flu6qqqKoiCEINKOfjK73Q4yunOthXRZlqIohmGoadput4NuDsPwvffee/7555MkgW7m9wFnSZJgnJumURQF/VgEQaiqCv1SsAInJpRhGEmS8M5DkiSO466urto3PZIkaZrGsmyv17u8vET/6/l8/sILL4RheHp6KkkSIYTjOE3ToLM9z/vmN7/53nvvYS4oFAqFQqFQKBQKhUKhtFABTaH8YlBVdTAYDIfDTqfTaqx+vw//OBgMZrNZr9dL0zTLMvg4GFjmmhomhFRVxTCM7/tocasoCiEEupDneZyZ53nHcZqm6XQ6SGjKsrzdbqFlDcOAdG4DvDj/fD6HkoNcI5/Uyuw+/4vbgQQErc2EgqzrOs9zWMU4jvM8h/C9vk+1XykOvjJJEjRxRjNrQRBGo1GxTyVnWRbHsaqqmqZZlhUEAcK5mqY9fPjQdV3btpEiL4qiaZrJZCKK4pMnT8he2t69exfXhSctigKJZpZli6KYTqfwm7qutwvxYV6avUHGdoZhUF6e5weDQdM0KAshBBsxcRzHiaKIm0Ul2+kj+/UYAY69/i2uiEvjWGxsf+LD9TLixluBe93kZlmG/Phms2l3S9MU+tjzvMvLS0LIcDgUBMHzPNTHNE3sWZZllmUQyq7r1nXtOA56kbddOARBKIqirmtMk2maHMe1Ef40TcMwbDeenZ31+/0kSeI4xnMYRdH5+Tk6z/i+r2laVVWe5z1+/Pi5557LsgxDQkHK/RKUaJcRRVGWZZqmlWXZLiF4eXl58+ZNPC3INcuy7Ps+x3EcxwmCgJcxiqLAOAuC4DiOYRiKoqCvepZl0+lUURR4asx1nueO45im+fDhQ1VVTdN0Xdc0Tdu2P/74Y1VVMUhRFPM8j+O43+9/5zvfGQ6HaZp++ctfxsRRKBQKhUKhUCgUCoVC+ddQAU2h/GIYjUaTyaTT6SiKkmUZfLEgCOjYC+1YVRX+fx8dJHieh5e8Liun0+nJycl8Pg/DcDAYqKoK16aqKqxcmqayLHc6HaRTbdvebrd1XVuWJYoiTsuyLMdxzDXpLAgCwr+tCSXXLPN1Z4rt2BPfwrTC4eZ5PplMEJ7N87woCnhk3CB2QLffPM+DIOh2u4qiaJoWhmEURZ1OR5ZlnKqqKjhNpI+jKMIARqORIAi+73ued35+3grKw8PDMAzTNIVZfuqpp1iW3e12hBCMELvVdQ2fCFlf13W3282yDKYVrwRYlq2qqh1GVVVxHN+4cQOnaquBIuC0zd7O4+dsNkPSGVdE0er9goQoIPwyPl//yX6yvQlOTghpP+O08L8ob13XVVV1Oh3f9+M4juMYawNyHOf7fl3XlmXFcYwKV1UVBEGapqqqBkFwPYBc17VhGEEQnJ2dWZZl23bbABpzNBgMWjddFIVlWWEYhmGYZdloNIJQxldBEMRxPJlMBEGAj87znGEYz/OGwyHSyng84jh2HGc0GvE873keNjIMg+cHDwyC82+++eZLL71ECFmv12Tf9QXZbVUi0ysvAAAgAElEQVRVwzDkeT4MQ0VR2lUE67r+7ne/+6u/+qvb7bYoCnTb2G63sizLsuw4jqqq77///tNPP82yrCAIcRyLomgYxsXFRdM07L73C8uyaZp2u92qqs7OzmCxv//979+6dYvn+bOzswcPHrz66qtPPfUUwzDL5XIymWRZ9pWvfAUTR6FQKBQKhUKhUCgUCuXfgwpoCuUXgKqqCClDQGMpPHzFcRzin1h1TdM0uK3lcglHTPZecrlcXl5eXl5e8jyPEKtlWYZhQAi2u0GnlmXZ7/ehntF1F5aZZdnVatX+is4eUK7Xr0UIqfcpYLJfSQ9b2n0gPWEbIQonk0m7sd6DHWB40RRYEIQ0TfM8t207DMM8z/v9PsdxqqpmWVYUBex5mqat04yiCInX8Xi8Xq9xR5CJMKS4hdu3b3uex3EcRC3LsqPRaLVawfliPNW+yQPZ6+M8z1stniQJbhD7w4EWRdHv99uUK47F4agDdv7Uz+l0Cge9XC6hd4uiGA6HbU/qtph1XR8cHOCc18/fNM3p6enNmzfJtZUDcXIUGdI8DEM0v/Y8L8uyx48ff+Yzn8nzHFeE+SWE+L6PwoZhqKoqNHSz7z2SJIlhGHVd+76fZdl8Pj86OoqiKAgC3DiabmdZlud5FEUoexiGSZIEQQDfijImSQJbPR6PMaHtdkLIcDjEqIqigFkOguDu3bsoNcZsWZbneUEQdDodPEuu66IC7aNVliU6Xaiq6nle+zxgS5Zli8UCsfTNZqNpGpqHJEmiKAohBLeQpul8Pse7jaurK7zO8X0fevrs7Kyua1EUT09PNU0TBOFHP/rR888/PxwOLy4uNE1jGGa327EsW5YlfP3rr78+mUyqqvrjP/7jdh4pFAqFQqFQKBQKhUKh/B+gAppC+QWAnC8imf1+f7VadTqdTqdD9ho3z3PLsqDSmqZhGKbT6UA44leO4775zW/CKuq6HkWRJEmiKKqqOhgM1us1y7LwfbhcHMeCIPT7fUEQEPVdrVZog9vtdqGbAbvv8zCbzcg+1zyZTLj9qndN0yABiv3bUUECws8WRTEej3Ev7Vd5nmdZBpmIhQFxpzCP6OmMAHhZltgf23me7/f7siyjvQZOGAQBIqtoRvzuu++WZSnLsqqq/X4fKhZxaQy12feyGI1GDMNAbkIEp2l6dHRECGmaBoPMskwQBLSGODw8hOLM81wQBOyAhRzra8Fk3GxVVTdu3MAIUUaWZWHA67p2HIcQomkaJCzP87vd7ujoCGPDeLDnxcUFEtb/mnovtVtQqyiK4jgeDoeKovi+3wrcuq5xOdQTutl1XWhWdNAOwxCn0nU9CIJer4fUcJqmqqrmed40DdptsyyLY1VVDYLA9/1erydJkud5YRjqus5xXFmWEM1hGELLRlFUVRUmGrsxDMNxXBiGRVHEcYw3B4SQKIrqusa8B0Gg7Vtw5HmOztr1J81+kiRhGKKHRhiGgiAoilLuW3AIgtA0zW63U1UVzw+Gt1gsDMPQdT2O46ZpBoOB53nz+ZwQgpcT6BIznU5xlXfffXcwGCiKslqtnn/+eZ7nnzx5Iori0dHR48ePJUkqy/LZZ5+9urr64IMPhsOhaZqvvvpqr9eLoujP/uzPPj2FFAqFQqFQKBQKhUKhUP59qICmUH4B6LqOtCY0HCQpz/OdTqcoiizLut0uZCV60QqCwPM8uZY1fvvtt13XbVPPZVlqmgblmmXZ3bt3kf1sHWu325VlGfYZ6tk0TZyW4ziMhBDSLrzG87xpmhDBVVXN5/PJZMLuU8+tpG6p9/oVxhNAF9Z7AYpMMYKx7Z5FUaCNL7pw4HJVVWVZhlAqz/NxHLuu2+v1RFGEl4Rth/T84Q9/qGmabduu66KAhBCEr+u6zvOc4ziWZauqgokuyxJGGKIcZlNVVajGVtCjMrIsK4qCr7BREARRFKuqQvuF9h7bkc9ms/qaJG2apqqqg4MD7FZVFdLukOB4ADCtTdNAj8L1f6rCAKdtKfeaHlPfdsqu9rPQ6XQ8z4uiKAxDy7Kw3XXdoigWi8XnP/95DBsTnaYpqi2KYrfbNU2z3blpGl3Xd7udbdu6rjdN43kenC/P8/1+v2kaQRDgkZMk8X0fMfYgCNI0tSwriiLHcfAKBBOnaRohBF5bluUwDCH34zhGXw5FUTzPwwht28bNYge04IBZZhhmsVhIkgTd7Pu+qqq6rsNB8zwvyzLLsoIgSJK02WxYlsVLGoZhMAvr9VrTNJZli6JQFCXP87quOY7jOA5/hoqiXFxclGWJ/WVZxr8voHPIbDZjWfarX/3q5z73OU3TfvKTn9y6dct13b/5m7/59PxRKBQKhUKhUCgUCoVC+b9BBTSF8gsAgc1OpwMpzPM8FCTDMGj6LElSkiRwndc1MYQmIeQnP/kJzNpwOOR5HoukDQYD2MyqqmBX0cdDVVVFUWRZ3mw2oiii+zO07Gw2g1cFpmliGIQQODhcsa5rhmHqvRXFxlZ04kNRFMgdI/sME1ruWyv0+32ovaqqRqNRq017vR7P84qiZFnW7/eTJIER5nleEIQoiqIoMk1T0zTP85IkQe8L13W32+3l5WVd14IgJEkiSVJRFISQtotIv9+H2Yd3hk9sf8XAsixjWdbzPCxL2DQNJCN2Y1nW932s30gIwSFwoP1+XxTFuq55nsftX/fIKA72Rymurq4ODw+ZfQ8T7AxHzO61Ps/zTdPgKwwSO2M62srjKFwUZW/dNyYdQl/X9bqu4X8Nw5AkKU1T7Iwz9Ho9SOc4jlVVxYB930+S5NatW4gw5/u2y3Vda5rG83wYhmgAXRRFGIZlWd64cSMIgjAMIayjKNI0zTAM13XjOO50OgzDIPVsGIbjON1uF+o5DMM0TXGt63LZsix8LssS203T9H0/CALbthmGqapqu90qioL+GIZhGIbRti9/+PDhU089hRYcruuqqioIguM4aZpKkqRpWhiGLMvyPB8EAf4uyrJUFAUdVzAX8/lc13X8S4HnebIsx3E8n8+/9KUvPX78+PHjx08//fRutyuKgmXZzWbz0ksvVVX1T//0T4SQjz/++JVXXmmnjEKhUCgUCoVCoVAoFMrPBPfpDRQK5Wfk4OCg2+3ato2GtlVV1XXd7XY7nY5pmqZpqqrqOA7EsSzLkMUcxzEMM51OgyD4u7/7uziOWZbF/hzHwcDqut7v94uiEATBsixCiKqqMGiQqoZhyLK8Xq8hFquqguzGJVrxyuxpdWfTNJqmtdIZarUsy36/D5UcxzFC1pqmtTIxTVPbtmH90jRNksSyLJ7n4zgOw7Db7cKzI/6M7VEU9Xo9iEiEW1mWDcOwrmvTNJum2W63uPH3339/uVzyPK+qKuLkvV4PTRs6nU5VVRD39b4xSJIkbctpSZJaGc3ul/7r9Xocx/E8P51OkyRJkiRN0263iyXyBEFAZTCJYRgSQiBewzDE/mEY+r6PlibkX4lj0zRR1Xq/CCH8suu6lmW11cb2pml2ux3eB1wvO6as/TXP8zzPkyTpdrtpmjIM43lev9/neT5JkjiOMTA0Y8GeYRgi0900jSRJtm3LshxFURAEsixj2AiSJ0kSBIEgCGVZojPGaDQSRZFhGITQq6pSFAUum+O4IAgwZdh5MBjwPO95nq7rqFjTNFjvEe8e8IR0u91kb8wZhuF5Pk1TPPDQ1gzDxHEsCELTNCzLIuBcVRVi3cfHx77vcxyXZVkQBFmWbTabe/fuodFzWZZhGE6n0wcPHqRpCmWMZ880zbIsd7sdwzBPP/30er3GCxLM7FNPPXV1ddU0jed5ruvKsoxn9cUXX2QYJk1TURTjOH7nnXeKotB1fbPZfPDBB9Pp9M033/R9v513CoVCoVAoFArlfygvvvjij3/8409vpVAolP8UaAKaQvl5URQFolMUxdFoFMcxx3HoSBuGoSAIDMPYti0IAjTcer2WJAkuVVGUsix930eGOgxDwzCGw6EgCGjEUVVVr9czTRO+zPf9uq4ty3IcpyxL5Kk7nQ5c6nWjSq4p0U/9bK6FnYuiyLIsSZLBYFAURRzHWZYNh0N4XuyQ53lZlmmaDodDeEbEnEVRhHlM07TT6biui4YMsizDRSZJgk4ayBfDS6qqWpal4zhxHCuKcnBwkCTJarVaLBZtpFrTNMdxJEnq9/u4F8uydF3XNK1VugzDOI7DsiyqWtd1a8k7nQ46U+PYbreb7dlsNnfu3Knruqoqbu/oBUHI89y2bZQFLr6qKuhsQRCqqmIYhmGY9uo4vPxkvBr1b382TYOBXZ+U+pPgclVVtd4fVh0enOM4pMuLosBuRVGYpnl6emqaJrLkVVVhXUGO46IoEgQhSRK8rvA8rygKQkgYhlEUWZalaRqCzIPBII5jz/OQUNY0LQiCOI7LspxMJqvVCl04MAw8zKvVajKZEEKCIEB4udfrpWmKRyiOY+SgPc/r9XqyLPu+j/Q0BnDz5k1EmBVFKYoiCIKqqm7evKlp2m63a19yvPXWWw8ePFgul1mWaZoWRRHDMOj+MZvNMNdRFC0WC47jBEGI45jn+U6nM5/PVVVVFMX3fbwbmE6neDmx2+0EQVAU5eTkpCxLZKVd1yWEvPLKK6qqNk3z4x//+Nd+7dfCMLy8vNxsNo8ePWqnjEKhUCgUCoVCoVAoFMrPAxXQFMrPi2EYiJ0iLwx5d90dy7LM8zzHccvlUpIkVVV5nmdZttyvUMfzPHTbwcEB3DQsp23bcH+qqq7X6ziODcMIgoAQgitCenIcRwiB92T2fR5wZnxogfTM83wwGCRJAl07mUx0XYdH7u/XDGz1dJIkaZoOBgNJktDJF0FUGMM4jtM0hfccj8c4EMncJEkw2l6vh24b6BHheR56IGDkT548uXnzpmmadV1nWWaaJo49OjpSVfXq6krTNHGPJEmQude1L+66LEuO43DOMAwdxzk6OsL9Yv/2qM1mU9d1s29sAu+ZJAnDMMfHx3DNmB2c+fLyEusHNvt+Gpiy6XR6cHBACMG7BFwaV8Qs4KLXqesahcWHNE1HoxEqPxqNULooimCcy2sdQpBohrv3PK8sy6OjI1EU243YniTJrVu3IH9h88uyRMSYELLdbpn9KoWu6zZNo+s6z/O+78uyLIoinkxVVTmOcxwH+XoI7oODA57n1+t1p9Npmobn+SiKZFlO07Tb7aI4YRhqmob8cqfTMQyDZVnP81RVresavaEJITDgqqpGUZRlWbFfyRBvUxiGQX786urK8zyGYTiO2+12GJ7neRzHpWnaNE0URaIoKoqCv4s8z2ezmSRJoih+5zvf+dznPmdZ1nQ6hbP+8MMP0YQajaTxXuS3f/u3z87OHj9+fO/ePVmWX3nllR/+8IefmjIKhUKhUCgUCoVCoVAoPydUQFMoPxf9fh/hZcuy4EMty6rr2rIsVVXxn/6iKEI9w/fBiMFO1nX9t3/7t2EYKooiiqKu667rpmna6/XQ1gMOl2GYXq/Hsiw65OK0giBcN7CgaRqWZSFY8SuEKVTmcDjM81xV1TiO8zzvdruKonieBw3dqsw8z+E9J5OJLMvIt6ZpOh6Pi6Ko6xq7xXEcRRGWbkMrhmq/2KBlWRzHRVGk6/put0P3BvQhQXR6u932+33TNAVBuLi4WC6XZVmiJrZt67ouiiLP80iwSpLk+z5usFXA4MaNG9h+vRpN00BYY0+GYa6urqB0h8OhZVlVVcE+Z1lG9rIeAvRT9cQc4aLNNQFd740zLnd9Isi1Zh34gEOapoFixkQURQHvn+d5uu/mjG8xMNu2oyhyXRdtlxmGcRwnDENVVRHg7Xa7HMdh+qCb0zR1HAePCjwvnpPlcinL8q1bt1zX3W63UPlN0+x2u16vxzCM67rowhEEwXa7bR+2IAgMw6iqarvdoiO54zhRFHW73bqud7vdaDRC0hm7wUHruh4EQRiGlmWVZYmNhmE4jmOaJjp4BEGQZVkQBHDoqqquViuWZaMochxHEIQ2vMzzvKIojuNwHCcIAuYO/Z03mw3e1pydnY3HY9M0Ly8vIaMREhdFcTabQV6//fbbqqo2TeP7/k9+8hPLsr71rW8VReH7/p/+6Z/+9O+ZQqFQKBQKhUKhUCgUyi8aKqAplJ8LpJuhShEF7Xa7VVUZhqGqquu6VVWpqmqaJtQzs4/uzmazt99+2/O85XLZ7XZHoxEhhGGYe/fuzWYzy7LyPK/rmhAyHA53u13TNFEUQWpDleJsOGE7HuZa9rbc55HzPJ9MJvVeHGdZBqcsCEK/30eiOdsvHAfG43Ge577vp2mK6Ghd160bzbIMOlKSpCzLut1uHMdZlmFnft8VGvbcsqzNZtPpdFCQ7XbLcVyn00Hr6uVyeXR0hPYXiEULgqAoSpIkHMcZhpFlmSRJaMGhqirZ+9yqqpqmQR0IITC8+Bb6Ff06CCFVVZmmmed5mqbr9ZoQcnx8DN99XQ0vl8vbt2+jpKhkey14Z5wKv1ZVdV1VX7fPLMs2TdMOph1tmqZRFKVpenR0VO37blf7jh8YXpqmQRDYtk0IiaIoDMPDw0Pf9+M4RgeSqqqQaEZtESUuigKtLXzf13Xd8zy8pVBV1XEcnuc9z+v3+6vVStd1RVHgpgkhqqput1uIWszOwcGBoijb7da2bUmSqqpyHEdVVVEUEe3XNI1l2d1uh9bY6/V6NBrBg8NW+77fNM14PGb38eeqqoIgQLofD0+SJFEUKYqiKErTNNvtlud56G/8NW232zzP8YezWq0kSVJV9erqCn87DMPwPK/ruq7rSD2rqvrRRx8dHh52u935fM6yrCzLjx496vV6kiStVis4948++ojn+V6vt1wuX375ZUKhUCgUCoVCoVAoFArl/z1UQFMoPxeQXzzPw3ARQsqyRHaVZVloaEmSOI7jOK7VlC+//PLFxYWu6wzDqKpqWZZhGPiM3G7TNLC6URRBmbEsG4YhLDa7XxMPNPvUM9k3eq73keeiKIqimEwmMKfw0TCh6IwM+4ntMIPIMu92O3hn5ElxtizLYPTyPA/DEF01eJ4PggAHSpKEZC6sK8KtkiSVZTmfz2VZRrtqx3HQCWG73VqW9dFHH2VZBqsoiiJG2O12NU3bbre6riMPi28ZhsGN8DxfVZVwbS3B2WwGq1sUBVRpXdcoRVuWpmlwNqhM7IDz4JzNJ5PjZVni/NgZ6rmtPPbEh/aRwFUIIVVV4SQob7/fz7Ls8PAQc4GTQ8iigzYKDg/bKlokmkVRREhZluVy38YEzTTQLBttNyCdBUGIosjzPEEQRFH0PK9pGkVRCCGu6/I8jwrg6a2qarPZOI6DHs2+76MD+Gq18jzv/v37eAegKArLspvNBt3MobZt226aZrVa9Xo9VVXrunZd1zAMTdOWyyXeqUBMW5aFhy3P8yAIJEmSJMnzvN1uZ9u2qqqLxaKqKtM0l8vl5eXlnTt3FouF53n4w9ntdjhks9kIgsAwzHQ6ZRhGVVXDMDabzXg8FgTh8vKS53mWZTVN6/V6URSdnp4yDBOG4csvv2zb9mg0Ojk5eeWVV9rJolAoFAqFQqFQKBQKhfL/Gu7TGygUyn8MTdOOj49v3LgxGAxGo5EkSYZhoMst+iTIsqwoCtplQBxzHMfz/J//+Z8HQUAI6ff7m81mMplommbb9sHBAVpksCybZRnP8+hXoCiKIAgcx0VR1IpsnueZa9lnGM/2Z7XP1QJd12F14zjudDqCIEiSBBsoCEKSJEEQmKaJ4cVxbNu2oihFUWRZ1h6IfCtyzRzH4SRhGIZhaJom/Di+arsodLvdoii22y1uFq5zNpvJsizLsud5juM8efIky7Lf+I3fODs7I4RIkqRpmmmamqZJkuT7Puwq9ieE5PsG00AURQyyKApUCTrYMAzIboCyQAd7nlfXdafTOT8/hx4NggCx4qZpXNfFRt/3fd93XVfXdSSCW1sNxVzXtWEY1y0zql1VFdx3WZZpmiZJgj4tVVXhK8xOa/zjOOY4bjAYFEWB/aMogn2O47iqKjxsHMcRQqIogobGYEzTZBgGG0VRRNcLx3EsyyKE+L7veR6ePZ7nWweNzhtZluFGhsMhhDLDMEimMwwzGo0Mw4Dp1jTN933DMFiWjaII6fKmaXA5QkgYhrhBmG48Sygpy7IQ4gzD+L6PygRB4DjOeDzmOG65XB4cHDAMk6bpbrer6/rZZ58VBCHLsjRNYcwPDg5Wq5WqqlmWeZ6Xpumv/MqvOI6DfHccx6IoWpaVJMnl5aVt24eHh/P5XJKkIAguLy8xpPl8/q1vfWs+n+NPhkKhUCgUCoVC+V/Fiy+++OMf//jTWykUCuU/BZqAplB+ZlRVvXnz5uHh4cHBgeu6siwLgoAeEZIkCYIgfpJWjLIs+5WvfKUsS9M0q6rSdR3ZZ7RcqOuaEJJlWbfbhby7Lq9b1wzqumZZFnYS8VscDgcNGZplGYw20srowgExDY0L9dnr9QRBCIIgTdN+vy/LcpIkcKloCoHR3rp1q65rRVGiKEJuF+KSZVnP8xCGXS6XCJ+aphlF0Xq99jzv5s2bnuc9fvy4aZobN27IsrzZbC4vL+EWu93u008/fXp6mue5pmmoEu7IdV1RFCVJgr7UdV2WZZZlYXuhfVVVvS7iUYemaVarFSHkxo0bTdOgUNgOH2qapuu6lmXhBhmGaZpGVdXrdrhpmrIsWZadTqfD4bBpGtQWRYZNfvLkCfpptOfHUahPmqbIO2dZhtHW+y4oZVlCNPd6PUVRsH+e52hdgjRxGIZxHJdleXx8DNGvqiqmz/f9siwPDg7QHwOO3vM8URQxF+v1WtM0VA/tMniev7q6EgQBHaUdx2kLcnp6it4jYRguFou7d+8mSTKfz5G8rvbNRq6urgaDAbas12vTNFENQRCaptlsNr7v37t3L8uyzWYzHA7xYARB0O12JUlSFKWu69VqxXEcxrPdblmWvXPnzm63Y1kW0ew8z3me32w2aZqqqqrr+nK5zPO82+0i4MxxXFmWmqYNBoM0TdfrtSAIDMPM53PLsobDIQLU6/V6NpvxPM+y7OXl5TvvvNM+IRQKhUKhUCgUCoVCoVD+M6EJaArlZ2MwGNy/f//GjRv9fn84HPq+3+l0LMuybRvW1TRN2EyYQThoiLO/+qu/Wq1WgiDYtn18fCzLMsMwHMcpimLbdpIkLMuapul5HlLPoigia4y8s2VZQRCwe9ohwSRCyxZFAbM5HA5lWU7TFCZ0PB6X+1YbaZqiuYdhGFEUNU3j+75lWaIopmkaRZHv+0hAj8djbOd5PgzDIAh0XYfD5Xk+iiJv3+QXi8Wh64Lruuv1umkay7JYlp3P5+m+8fHp6enp6en5+XkYhgzDDIdDSZKyLBNFcTqd8jwvSVJZljzPi6JommZd13EcQ7BKkgS/Dx+Kn7ZtI94Lxd9q4rquLctCijmKIsjcNsfdLuvX7owilGVpGAZMMbRyVVXQnQg7wyB/6qt2O0LNWZbxPJ9l2dHREU4F4/ypyDkmCNORJAkhxPd9NLi4HnNGeW3bFkUxCALP82RZrqoKhbJtOwgCmFzLsqqqKopiOBwSQna7Hbo2E0KQie73+2VZYr1HvGaQJAnP4WazQRJZkiQEnMuyRBS63+/DU1dVtd1uGYZBd5R2C7p4K4qi6zoyzgzDuK6LHHTTNHhJs9lsWJY9PDxsmma323Ecl+c53n9UVbVaraCekyThOO7w8DCOY/RuzrLs/Pz8/v37ZVkuFos0TfM8V1XVMAy8RAmC4KWXXgrD8OrqStM0bBmNRqvV6vHjx6+99tpyuWz/WCgUCoVCoVAolP+d0AQ0hUL5L4QKaArlZ+PmzZu9Xg8GE1IS+WVIUsuyNE3L8xy2VBRFWZZFUYSE/e53v8txnCRJnU6n0+k0TRPHsWmaaB+BNLQoinEcwz7zPA+12urmIAjakTD74DNMKFTmcDjUdd00TRjP9qeqqgjYQpFDYePXpmk6nU4QBKqqQsgSQizL4nk+jmPP8z4lnYMgQDMHnKTttlFVFbxzr9fTdb0sS1hFQRAcxzk/P//444/hNA8PD5955pnxeKwoCgTubrfzPI9hGHRLMAwDcfIsy8qybIuJnwzDQDczDOP7Phpk8/y//D8HwzAYPFqjiKKIAkLTI3iOO4WwbmsIrQmhXOxXCByPx9W+dcb1quKDrutt/ZMkQdq3qqqqqnRdb/btUPAaII5jhmEGgwEML9QzfDTDMJZlwekbhsEwDLx50zTj8RjevGkahmGCIBBFEbZ6tVohVF7XtbfvthHHcVVVCLZvt9uqqmRZVlV1Pp+PRiNUZrVa3bp1K45j3/cHgwH8/mazYRgmjmNCSKfTwSC32+1qtZpMJpIk6bpe1zW2VFWFG4F3xquIpmnwloJhmPblRF3X6/X69u3bhBDHcRRFKcvSdV1MShAE4/EY97vZbPDWJE3Tfr9f1/VmsyGExHFc17Vt23meB0FQFMVnPvOZ09NT3/dZlg3DUNd1VGk2mzVN43neq6+++sorr1xeXrZPBYVCoVAoFAqF8r8ZKqApFMp/IbQFB4XyM4BmvrIsK4oymUxEUdztdk3TmKaJBhGyLAuC0Oo8TdOKopAk6c0333z06FEYhqPRaDAY8DzPsuxoNMqyrCgKyNbWOB8dHa3X6/airRQmhNR1DfFaVRXDMHVdN00DH3pwcIAdIFVbWwrv7LouctCapiFDCiWaJAmiuBB5SZKgdfJut8P+LMviWHhetDtwHCeO4yRJDMPo9/tJkiyXS1VVR6NRHMcXFxeiKOq6rqrqarWazWZZlnmel2XZZDJ57rnnOp3O1dVVkiQMw+x2O03Tfud3fucv//IvIVhxv/gJx82yLL5i9uq5qirswHHc1dUV9iGE4H6LokjTtO0XAb+MgvA8v1gsyrI8Pj4m+14l7VVw9bquUZ/RaJTneZ7ndV3Xdf2v7fNoNEqSBF5AFp8AACAASURBVPugtkVRYBZwRQA3HccxFn7M8xwnQdgcvSzwagFjhs+F8vZ933XdwWAQBAHsrSiKkPWEkJs3bzqO47quJEkcx6HbxmAw8H3//Px8PB4bhuE4Dspi2/bJyUlVVUdHR7IsLxaL4XBoGMbl5aUgCPD+mOgoik5OTp5++mlMYpZl0+k0jmNd10ejEXqGJElyeXnZ7XbruuZ5Hm9ZTNMcjUZ1XUuS1DTNYrEQRRELQm632/YtBd4iYAnKyWQynU7LsrRtO8uy1WpV1/XTTz99cXFhmia8eZ7n/X7/5OQEqx26rmsYxmQyefLkCRqP5HneNA1S1WdnZ2+88cb+L4ZCoVAoFAqFQqFQKBTKfzFUQFMoPwO9Xg8tKdpcraqqkHSw0ujJq2kaHJmiKK+//vrp6SmcqW3bCPOiGQK0oK7rsIpt2BkmlOzzufiAARwdHc1mM5Zlm6YpiqJpmjzPj4+PoU2xf2s/4WGjKLJtG3HgPM+xHUpU2y8lxzBMHMe9Xo8Q4nlemqadTqeua0IIeoCwLLvdbhHXNQwDOWtkeJfLpSiKnU4nDMPT01NRFNH7YjabRVHkOA58d5qmzz33HMdxFxcXZ2dnsNVFUaA18Pvvvx/HMZK87V3jxlGQ68Uh+4wz2Rvq1kpzHIfD2/3bnXmeFwShKArYf5wfUpvdq+eiKOI4juP44OAAY0a0Gb4VU4YiYM3AMAyhnpu98Ud5Uf80TbMsy7IM7y1UVU3TNE3TYN9uG5eGhkbo23Gcfr+Pkbuuq6pqWZZw0/1+37Ks7XbreR7HcWEYuq6LM4uiCG9rmuZ6va7r+tatW4IgzOfzLMu63a7jOPC/x8fHnuedn5/bto3IcKfTMQyjnTtFUSRJ4nleluXZbAbp3O/3RVFEd4uTkxNk9tHUJU3Toijwq2maWZYtFos4jk3TnEwmlmWFYbjdbk3TFARhsVig+/NiscCfwG63e/755zVNW61Wi8UCk1LXdafTyfP88vKS4ziO43B+Qsjp6Wkcx03TvP3223fv3jVNMwiCPM81TXv99dcxHY7j4DwUCoVCoVAoFAqFQqFQ/jtAW3BQKD8DR0dHlmV1Oh1ZlhH+hWvL8xyK0zAMWZaR8RRF8a//+q993+90OoqioMdFt9tlWVZRlF6vh3QqlHQrteFSoyhi910vWusKYCqhuTVNM00TuhYpXWhQeNIoijqdjiiK2CLLcr7vwqHrOsuykK2wpZqmOY7DsuxgMEALCJZlfd/fbrfb7ZYQ0uv1NE3Tdb1pmt1uB9FpmqYoikVRrNfrPM85jlutVldXV/P5fLvdpmnqum4URdvtVhTFNE3ruh4MBmjdkCQJ9DTqM5/Pm6YRRREqX1EUKPK6riHoBUFYr9dFUUDj4gbRAxolYlkW/hp10DQN3VEgl3GbQFVVy7IIIc21NQMRv7Vt++joKN+TZRlMNM/zYRiGYYiGFfgK7hVRZVwU2zE8hmH6/b6iKO2YUfCmaXq9XpIkYRhqmkYIYVk2iiJCiG3b6MKBOkdRFARBVVWj0Qjdn2G3oZ5Zlk2SJM9zXdcJIXEcp2na6/UkSTo/P2dZ1jCMoiiurq6apul0OrvdznXd8XgsCEIQBBjJxcUFz/N4GnmeT5IE7WJgomVZTtN0uVxmWaYoCs/zeGGQZRnHcVmW1XWNZw//CoDuK7gpz/MURamqihCCps9pmhJC4KA3mw0mguM4PKJhGK7Xa0mSsiwjhPT7/TzPEVePoojjODwSrusWRfH8888/fPgQp63r+ubNm1mWLZfLb3zjG7vdDn8pFAqFQqFQKBQKpYW24KBQKP+F0AQ0hfIfpdvtMgwjCIIkSdCXZN/DwTRNTdNgkHmeR2zzH//xH9M0RXuNJElUVUW21LZtpJ4FQRAEAWpVEASoZ4ZhkCkGHMcRQpp9A4p2MJCq+ApArcKWDgYDGOo8z9uNcI6TyUTX9TAMW+uaZVkcx4qiHB4eZlm22WzgSS3LGg6HiLumaTqfz6MoMgwDihMGGeFuOESe51er1Wq1KstSluW7d++GYfj7v//7kLN/8Rd/geTvarVCwJZhmKIoCCHT6RQ2XJIkWFc4aKy8lyQJIQT9TFRVFUWRYRhoTdSEuRZhRg1ZluU4bj6fF0Vx69atZh+jxm4Mw8xms4ODA9hqAH3fNM3l5eVwOCyKAkVD3jlJEkEQxuMxLDOSzu0h2FjXNY5CSdN9/Lna94DOsgynYhiG53l0PonjuN2OOxqNRqIouq7rui5EPMpVVdV6vd5sNuPxuKoqCNwXXnhht9tNp9M0TW/duuW67nK5vHv3rm3b2+0WPZ0lScrzfLlcmqaZpunbb7+dpqmmaVgWstfreZ63WCx4nldVFSH9xWKBx+aXfumXdF2vqipNU+SL4d+R5s6yLM9zxOExg5eXl51Op6qqpmkYhpnP557n3b17F4Z9NBoFQXB+fn5wcFCWpeu6dV0zDDOdTtEMJI7j+Xxe17Vpmo8ePcKLB/QNV1X1gw8+6Ha7HMeFYagoCpLdGMDXvva1f/7nf27/OigUCoVCoVAoFAqFQqH894EmoCmU/yjdbte2bcMwEIIeDofr9VoQBF3XEUmWZRmKOY7jN954AxlhuNrDw8Pdbmeapmma2Nn3fUJIXdeKosiyDNFWFEVZlvBuyD63dpXsVx2EaW2uNamA4oQtjeMYTXjhSeFDkyTJsozjOLRNgB5FuhZh2DzPJ5MJBtzv91VVNQyjruvdbrdcLuu67nQ6kiSpqloUxXK5jKKoKApBEKqq6na7TdOs1+uHDx+maYrx/MEf/EG32+12u77vC4KQJMnnPve5F1544fT0FPfIMIwgCJqmxXFcliXP87CZyNgCqPksy1zXFQSB53mYena/omDTNFiLD1qZZdnqk7QhaEIIqlFVVVEURVFMJhPsg18hiFGlfr8Pu5qmaRzHURR1u92iKBDHBnDQOFxVVdw1kuZ5nuOcoigqioL7xamSJFFVNUmSW7dulWVZliVcP4xqFEXj8djzvDad7Xkez/MQtVVV9Xo9nue3222WZTzPE0Jms5miKLZtN01zfn6+WCzyPD85OVmtVpIkcRyX5/lisdhut7PZzHVdURRlWcakn5+fX11d4fHr9/uGYeR57rouIQTVxrXQghkznuf5ZrPBbrqu8zzveZ7jOCiaqqqqqqIOaZoi5y6KIhYS1HU9jmM0ucZbClVV4zgWRdG27eVyGccxISTLsjiO79+/H8fxkydPsE+aps8//3zTNE+ePHEc57Of/eyPfvQj9MWezWYvv/zyxcUFZplCoVAoFAqFQqH8m9AENIVC+S+EJqAplP8QmqYNh8Ner4c1+gghDMNgqTRJklphx3Hcer1WVXU2mzEMg7YGPM8jwqmqahRFoihKktTtdgVBQG8BxHs1TYMcvJ7VxdWbpsFF8bn9AKBWIT1hReEroQKhSvEVHGscx8g1F0XhOI4oilVVXV1d3bt3LwzDy8vLKIru3bvHsqwoipqmRVF0cnIiCIKiKMi6BkHAsuxqtdpsNu+++64gCISQfr//4MEDy7LiOF6tVt1uF+Os67ooiidPnmia9od/+IdlWc5mM6wdB89eVdVrr73mOI4sy2VZ1vtYMe4OH67fMj6jPte3XP/c0p6n/RVbTk5OkMNFldpaFUUB6RzHMeZI07TrhW2uxaXxof0Kdc6yrNvtotSwz1mWRVGk6zrHcUmS9Ho9FD9JkiAINE0ryzIMQySLWZZFo2dVVTmOcxynruuyLAVBePLkyXg8FkXR8zwkmo+Pj+fz+fn5eVmWLMtC1xZFQQh5/Pixqqr379+/d+/eq6++2jRNkiSPHz9umgaPJexzmqamaXY6nfv379u2bds2ND3DMKIoqqqa5zleYJimiag7vHae503T2PsWKMibj0YjlmUR2z89PTVNE/2jwzBkWbZpmsePH9+/fx8Pz9XVFfpsSJKEHuLz+VzXddd133jjjQcPHsBBcxx369atf/iHf3juuedUVd3tduh+84Mf/ODb3/729bmmUCgUCoVCoVAoFAqF8t8QKqAplP8LqqrevHnz4OCA4zhVVRVF6ff7tm3DF8OiCoKw2+3yPNd1XdM0nufRypbjOKzmB4eY53mn02lFM9w01i1Ethcur83zXv8MGX15eUkIOTo6IoTU+/a7EKCtBi2KohWjsJ/IL0dRxPM8jCGy0pIkSZKEprosy56fn9+8eRPacTqdIrwsSRLMY57nvu+fnZ1lWbbZbHieL4qi3+//0R/9UZqmQRDYtg31bFkWwzBnZ2cI5yZJ0ul0BEHwfX+z2di2PR6P4Y4JIU3T8Dz/u7/7u++8887l5SWsbns7UMPtrWELt19pEBOEz62Pxs7YiEM+dU6cBAoYgjjPc0h5Qojv+7vdDr2bP3X16zRNg2/rusbZsixDcxJVVbMsq6oqz/MwDNM0HQ6HgiDEcZxlGRpNeJ6XJMlgMGBZ1vO8IAgkSQqCYLvdWpYly3KWZcie67ruOE4URUjfn5+fz2Yz27ZN07y6uvr444/ruv7N3/xNjOeZZ55hGGY+n+OZEUVRkqSmab7whS88fvxYlmVZlhmGgT4+OTmRJAlPb5ZlP/jBD9I0zfOcYRgc2Ol08KjjTQkaLh8cHECUl2WJ9TA5jsPDTAg5OzszDGM4HGLqsyybz+eGYZRlOZ1O79y5I0nS2dkZy7LYuFgsLMs6OTlRVdU0zV6vd3FxwbJsr9f7/ve/f/v2bU3TZrPZCy+8wPP8W2+9dXBwwPP83//937/11ls//fukUCgUCoVCoVAoFAqF8t8bKqAplP8Tg8Hgzp07pmlOJhPXdS3LsiwLPTeCICCEICDcNI2u64qiwCm/9tpr+AANOhgMyrKEm4baE0VxvV4rioJl4iRJau1zC4wzwzCLxQKx06qqeJ6H9CSEwKvW+wbErQyF+sz3zaChPtGgA2dGthdR0zRNOY7zfb+u6263O51ON5tNt9vVNI0Qkqap4zinp6e73S5N06Io4jg+ODh48cUXVVXt9XpRFDEMw/O8IAjT6VSWZV3XPc+DcXZdt9XrmqaxLOs4zvvvv4+0r+M4TdNkWcbzfJ7nq9UqyzJFUVpB3N4XwEaYeojm8XjMsmxd1wzDtD+bpiGEtNXIsgyzkO9D4mVZYrvv+1mW9ft9lBEtqrMsi+M4z/O6rjGGVjTjJ8qe5zmMc57nURSlaYqeHu3J4zhGtxZFUXzfh+BumiaKoiiKCCHdbne73dq2LctyURSu63Ict16voXrv3LlTFAU6YEiS5DjOer1O0/T+/ftJklxdXTmOU5blb/3Wb+G+8MC89957RVGYpomIOsMwzz77LM/zsixPJhOGYQRBkGUZbyCapnEcJwgCvCa5d+8eIeS73/0ung10UFEUJQiCsizRBUWW5VYQN02jqqrjOAzDTCYTPMBVVWF4tm1jIliWnU6ngiBYlnV6enrv3j1Jktbr9dnZWa/XE0Vxu90+ePBguVyen59DdqPPdZIk7733nmmaZVl+7WtfYxjme9/73r/8ZVIoFAqFQqFQKBQKhUL5HwIV0BTKv0u32719+/Z4PDYMAyLMMAw0ZCiKotfrybK8WCwgWLlPgkxrt9uFhuv1emmaqqqqqiqkbafT4TgO6vanyvkasM+LxYLjOOyAIcGxkn0riVaMQpIWRdHtdoMgGAwGUKVJkhT79fQ0TQvDcDwecxznum4YhrDhQRDwPI/mG4eHh4PBwHGck5MTtAOGZ8+yDNpXEIQvfvGL0NkwvI8ePVJVtd/vMwzjeV6apmEYFkWhKMqdO3eyLPM879GjRyzL8jzPsuxTTz213W5Xq1UURUEQRFG0XC7fffddQsiDBw9UVYU9L4oiTVOGYZIkwQdm33CjLMujoyP8itvEZxSnLUtbE+xT7lPPWZalacqyLMdxEJ3wzriobdvtsW1hW7OMfdDIGElnjHY0GrWXS9M0iiLTNOM45jguSRL0zQiCIAxDXdfrfeNvQsh2u93tdsfHxzzPO44DkU0IefTo0XK5hGG/urrK8/z4+Jhl2bfeegvtLL74xS8yDFOWJXR5VVUMw2ialmUZnH4cxzzPv/POO4ZhVFXVTuX9+/clSdI07Utf+tI3vvENdJWxbVsUxaqqfvu3f/uNN97AsbZt8zyPJDgy74QQWZbxkqAoiqZp0jQVBAENZ3q9XlVVw+GwLMu2Mm3KO0kSy7IePXp0584dhOXPz89VVSWEeJ7X7XY9z3vy5Ikoimmafv3rX//sZz9r2/bZ2Rnts0GhUCgUCoVCoVAoFMr/aKiAplD+XSaTiWEYuq6PRiNRFPM8h+g0TbPtv9EaZ57nOY5bLpcXFxcffvhhnufYp9frQWjqug6Nq6qqLMs8z0PIMgzDfbLzBsMwhBBIZ3wuy7Ku6/F4DAMLwdr+hA3M87wVqdi/daZxHOu67vu+qqpYPk6SpLqu0V/YMIztdlvX9W63K4oCnSLyPL9x48bTTz+9WCywil1RFIZh/N7v/d7p6akkSZZlBUFgWRbHcWEYOo5jWZamab7vm6ZpmiZ6Wdi2jcQ3Rn55eblYLHzf3263Z2dn0+n0esFxCwgL13VNCOF5fjKZzGYztH3QNE3TNFVV4VvbKpF9a+y6rtuaoAIQ0G01IKCzLOM4Dm8FsDHP8yzL0MC6rW27/2AwKIoiDMPBYEAIwflbsCcGnySJbdssy8ZxnCQJIaTT6Wy3206nA0Xu+76iKEiUy7I8Go00Tdtut9vtttvtoqe2KIqILX/88cd5nne73YcPHzIM8+DBA1QmTdOTkxNRFJ955hnDMHBHdV3zPC+KIpopM/tkOkZVlmUURYIgvPnmm5PJxLbtNE1h5AEq3DRNlmVJkvA8f3JywjCMoiiEkCzLwjCEN2dZVtM0FCeOY0EQ8CaDEIIq4fGrqqosyydPnty8ebMsS9/3h8NhVVUfffTRaDTC5K7X69FoNJ/Py7KUJEmW5bOzs89//vNnZ2evvfYajTxTKBQKhUKhUCgUCoXy/wOogKZQ/m1u375tmqa8B44YEWZoOFhCBGkFQXj48GEYhlVVrVarpmlgZiGUbdtWVXW1WmmaBk8N+8zzPOxzu6U9hBDCMMzh4SE0K37C4V4XrHVdQxZ3Op08z+G4IUOLokiSBMFklmXRbqLaq2rXdQ8PD6uqchzHdV0c4rruZrMxDOPXf/3XIUkfPny43W4FQVAU5eDg4Nlnn1UUheM4z/PCMEQrCSjmKIqm0+nt27cFQUBTY9M0Pc/78MMPh8NhmqbT6XS1Wnmet9ls3njjjU8W+6eggIQQ6OAsy6A18zwXBCHLsut1Q5WqqoLYxc/2WBQkz/Pr9hlbiqIor6WVobwty+J5vo114+rINRdF0Va13i9p2DQNBGue50mSeJ7X6/UURdF1HR05kiSJoqiu6+PjY0EQwjD0fV/XdZ7nd7sdz/Pj8TgMw4uLC47jFEVRFOX8/DxN0+Pj4+12e3V11TTNSy+9BNv+zDPPZFnmed5ut0N3FFmW4zj+9re/LcsydH9VVYqiQEn7vq9pGsMwoijKsozbubi4kGUZg9lut7g1dNhYr9eoSVsrQgiMP7bjIamq6uzsTBTFTqfT7XYxBa7rHhwciKKI1wNhGCZJkmVZr9fjeV6SpJOTE0KIIAj4E+A47sMPP/Q8bzKZRFGE9w1nZ2cXFxc3btwQRfHLX/7yvzwTFAqFQqFQKBQKhUKhUP6HQwU0hfJvA+9sGAYUc7/fj+NY0zS4QgjozWYDH/3o0aMPPvhAEIRutyvLsuu6oihalmWaJnZAChgZTxwrCALHcev1WtM0QRBgn1mWbe0zdCo+k0/aZyhUmNbxeFzvFx6EXE6SJAiCfr+vqur1PbMsS9O0aZperzccDjebzW63K8uS47iyLIMgyPN8OBwOh8OzszPP88qyTJKk1+u98MILYRhq+27XWJguiiLUJMuyOI77/x9799EkyXleDftJ701lVlWbcQBIgiAZEiMoG1QEl1prpX+pjf6AqBARCookQMIMAGIwMz3d1eXTe/stbnWq0dIrfZQBxpxr0VFdXS5rsDo4cZ753LKs9XpN9VgaF1ZVdRiGDz74IEmS3W4XBMHl5eX19fXt7/m26+vr8/Nz+qj0aUVR7Pu+KIrpMfTPIQgC3abcmTJTSkhPTk40TWvbtu97yohv/7Vpmqm1TYMh9HjK023bFgShKIqiKM7Ozujb7m+ONKTvf7pz+rYdx6nrmt6xu4mkdV0fhkFV1SAIiqJYLpfDMERRRGPKx+NREAQaEgnD8PLyMs9z+t5+85vf/PVf//VPf/rTvu+rqorjOAzDBw8eKIqiaVoURfv9frlcchw3Rf/7/b7ve/ofA7Q/rmkafXu2bZ+enoqiOI6jaZqMMcq7ZVlO0/T8/NyyLEVRmqZJ07Su62EYmqYZx7EoisVice/ePcuyOI77+7//e2o627ZtWZYsy0mSjONI+fWLFy84jvN937IsmuC4uLhYr9e2bS8WC0EQ6L+0p0+fGobxzjvviKLYdd2TJ09ms9n19fWTJ08ePXqk6/qHH374m9/8Zvq3BgAAAAAAAIDXAAJogP+YIAiSJCm3dpyp4Eln91EW6bpu0zRhGF5dXVFkLAiC4zhlWdq2TacOUieUMWZZFhWoFUU5Ho+6rlN3lUJnejrdoA9A0SrdoA9A+SYVcuu6bprm7Oys67pxHCn3pOjTdd26rumRlErXdV2WpaZplAPyPB8EwWKxoHiUyrAUndPVZVlGUbWmad/97nfHcXzrrbfattU0Lc/zi4uLH/zgBxRx0vdAXwJd7/F4fPbsma7rq9Wq7/v9fr/f7y8vLy8uLv7ty/1/m66CMkpN04ZhYIxR25q+YQrrGWOUBdOeA/2VMUZZc9u2U/je3Ixj0A4GVXQpT5/NZmEYUkgqy3JVVV3XDcNAQ8z01oyx8Wb2hN6x6zrqTdN0ctM00z9KWZaWZXVdl2VZURRd152cnHAct9vtbNum+jYd81iW5Zdffrnf79966y3K2Z88ecIY+9nPfhYEwfPnz09OTuj1FUV5/PhxXdd/9Ed/NJ/PKaeO4/j8/NwwDMqgq6oahkEURcMwiqJIkiRN02EY1uv1F198IQgCZcoUmmdZ9i//8i+Mse985zuWZUVR9Pz5c7rSv/iLv+A4bhgG+vd99uwZz/PDMLz99ttPnz6ljJ7jOPovueu6MAxVVTUMQ9d1QRCiKFqtVqZp9n1PV/3VV18pivLjH/+4rmtFUXa73T/+4z9KkuT7ftd1H3744cOHD9M0/bu/+7sXL17QZwAAAAAAAACA1wkCaID/gO/7sizTbUEQqL5q2zalmZ7nmaapaRrliZ988kme56qqNk0zjqNt22VZUgbXdV3f977v00SyoihxHPd97ziOqqoUN08/GWM8z1OJmELAvu/v3btHMSilnxStUkg6aduWBh/oXMSmabqbEYmyLD3Po5cKgkBRlCRJuq67d+/e8+fPLy8v27ZdLpc//vGP+77v+57WeBljbduapvm3f/u39MrPnz//wQ9+0HWdZVllWV5dXVEWPwxDlmW6rh+Px88//1xRFAp5v/jii+vr6/V6/fjx41vf63+NBhm6rhvHkef5KIoURaHT8yiI7/u+bVv6MulLoxx8HEcK8cebFWPP85qmSZKkLMvlckk1Z9qmoI/NGHv06JFpmvTPRIZhWC6XFKxPd/Z933XdYrEoy5K+2LOzM13Xy7Isy9JxHBoe8X1fkqQ0TcMwlCSJRjaOx6PjOLIs73a7YRioAn95eSkIwne/+926rj///PPr6+v5fP6jH/2I5/m+73mel2X56dOnVVU9fPiwrmue59u2/ed//ueu61zXHcfxeDzudjtq09N/FW3bUtAsiiKd7ljenN/YNE0QBE+fPr3zbdM4xm2//OUvf/KTn9AlM8bo9YuioHuyLFutVpvN5v79+67r0v+roPBdVVVN0yRJ4jguSZI4jn/0ox/R13g4HN5//33G2Pn5Of3ncXV19dlnn/V9j5VnAAAAAAAAgNceAmiA/4Bt267r+r6vKAr1bT3Pe/HihWVZFMmJoihJEg1oULNYVdWzszNBECgEHMfRcRzLsqqqkiTJdd3D4dA0DYV0tL9BNV6Km6+vr2mIQ9f1vu+Hm61hipjZTduX0O2mafI8r+v65ORE13V6r67rqO0bRRF1nMuyLIpC1/WiKA6HgyiKURRRRNg0jSiKP/vZz/I8p93hvu9VVe26ThTFH/7wh1mWzedz27bzPL+6usqy7N1332WMCYKQpunV1RV9mGEYuq5TFGW/3//2t789Ho/r9Zr6vH+oJ0+e0CiEoijCzTT2fD6/vr5WFIXSVVVVaS+C3Zw9OKFvZj6fU2xKMbQsy1EUeZ5nGAZjjLrJlKTT5++6ruu6pml836/rmv5HAn35TdOUZTmfz9u2LYqiqqrlctk0TRzHRVH4vs/zfJ7n9IK73S5Jkvfee880TQq7JUkax/Hi4kIURdM0wzDcbDbU5hYE4Re/+IXjOK7r/umf/qmqqkVRrNfrjz/+mPrFoij2ff/LX/6SxjQobS/L8tNPP22ahv6nRd/3cRw/e/aMMfbWW28xxqYu83/bBx988Fd/9Vf7/T5N0yAIzs/POY4TBKFpGlVVDcNwHGccx91uR8H94XCQZfl4PCqKMp/PTdN8+PDhs2fPPvnkk3Ec3333XUmSDMOI4/iXv/xlHMer1erfB98AAAAAAAAA8LpCAA3wNQ8fPjw/P6c1CVqwnfrOFByLohgEQdM0fd87jvPzn/88yzIq3vI87/v+8XikUi31UmmLQxRF+YaiKFMAzXHcarUSRVHTNEEQKIymTzIMA8dxl5eXNEZMeWh/szJcVRWtQJyennZdN9wMQNd1TSVi27YpJF0ul4yx4/FYlqUkScfjMc9zehHKcx8/fpym6enp6f3799fr9X6/1zTt3r17P/rRj9I0JIdGuAAAIABJREFUffLkyfe+9z36eIIg0LGEi8WCvpAwDJMkoWHiw+FweXl5OBy+9oX+4YqiMAyDNqnTNKXsNcuypmnatq2qSlXVYRjoA7CbDHq8WSwZhoESYTolj6JnURRpV8Q0TY7jRFGcYmX6MpfLpa7r9J30fU8xdFVV5+fn9K1Sq7ooCopZNU2jPnuWZbZtC4Kw2Wy+//3ve553PB7jOKYc/3g8juN4dna23+8/++yzpmlM04yiiP5pxnHcbreiKFKHfbvdXlxcfPe736X0/A+Nkv/Qx/8n3n///Z/85Cd1XSdJ8uzZs77veZ6nVF1RFMdxNE2zLKuu6+12qyiKpmmGYfA8T5epaVrbtoyxcRwvLy9pMOSjjz66+zYAAAAAAAAA8AZAAA3wrx4+fHh2dmZZ1unpaVmW1EJVb2RZRuu6giDQmjOVo2l+l5qhU212HEfHcQzDoG1cRVEOh4NpmrIsTyk2z/ObzUaSJIqnp48xBalTrkotXXrlKYPuum6xWDRNQ+lz0zR5nud57roux3FFUTDGfN8XRTEMQypu67pOASKVbRljwzDwPC9JEp0FR+nqn/zJn+i6btt2FEWz2YzjuC+//DIIgrOzM+pNi6L4xRdfrNdr6lnvdjtq4P5voZo2fav07Y3jmCTJMAyKoui6Ts1iURRVVaXvir6TYRi6rqMeuizL9IXMZrMkSShipgun0QwKefM8Pzk56W+d60j9cbqfvtjpTkr2DcOIoojy677vx3EMgoDS8O12SwE3VdrpNXe73RdffEGvUxRFEATr9fo/ien/e83x/3UffPDBnXv+7M/+jOO4IAgOhwNdWtd1PM8LgkD/cdZ1/dVXXwVBcOeJAAAAAAAAAPAmQwANwBhjvu8vl8v5fD6fz3VdpxFeVVXruqZ1XcdxNpuNaZoUfd7WdR2F0bZt931v2/YwDDTNoShKFEXUlRZFkTrOU2nUtu2p70wofabomX6l6Jli6NvoU1VVRcXek5MTCsqLoqCuLmOM47jZbMbzfJIkFNpKkkTR4XAz6/H2228/f/58vV7/zd/8jSiKjuPkeR4EQV3XURRN+xJlWb7//vsUvB6PxxcvXvwnEer/EI0mD8OQ5/n19XUcx8fj0fO8d999l1rYlOMLglCW5TiOXdednJxQnXwcxziOu64rimI+n/M8n6YpLXrHcVzX9WKxEEWR0uTZbEZN5/6mV07ZtKIolBfT91yWJdWcGWNJkjDG6NDCIAjiOLYsi25LktT3/YsXL8Iw9H2/bdv9fr9arcqypFcIguDq6uru1b46fvWrX929CwAAAAAAAADgv4IAGoAxxmazGe1jUOU2SRJRFHmen81mtL8h3OwRU4xLMTTHcWEYjuNIYSXP857nXV1dWZaVZRnFzZZlaZpGr7bdbiVJ0nWd8lNBEKYPQIkw3aAKLbspQfd9TwE0VXFd11UURVGUKdk8OzujOiqlqFVVGYaRZRm9VJqmtm1TC3vqcfc3Q9JZlt2/f//HP/4xRe3Pnz9/7733dF0viqJpml/84hdhGBZFcTgcnj9/fjwepw/8f+rfH5cXBEGe56Zp5nkuCIJpmm3bKopCDVzq5A7DcHp6Kooi9ZGTJHEch+O4KIqm4xmzLMvz3PM8RVHqul4ul1VV0cLGfD6XJIkiac/z0jStqopWnulrjOP4e9/7XlVV6/V6NptJkqQoynq95jiOAu6LiwuqRX/66adXV1dxHAdB8Pvf//7OtQAAAAAAAAAAvDkQQAMwmtrQNI0C39lsFkXRVGGWZfl4POq6TnsaqqpKkkRHya3X681mo9ycvUYPoG0H27apkjzVdacIm+d5ik0FQaB3HMeR53nqO9OvwzBQ95lKuJSK0lhEXdfU2J1C5K7r2rZtmoYiWsZYmqaUINMKx3q95nmehnqPxyO9uCAIDx48+PM//3M6flBRlNls5vv+hx9+uN1u4zje7XZXV1fX19df/7a+NVmWeZ7Xdd04jmEY1nUty/IwDLQTwnEcJdGMMZ7nqRKeJAk1nekEyLOzM1mWFUUpiqIoCs/zgiCY+uNpmlI2zRg7HA4UPVNqr+s6nWG42+2o5/706dM0Te/fvy9JUhzHh8NBEITtdvvxxx+HYfjrX//67qcHAAAAAAAAAHgjIYAGYJTMGoYxn89lWeZ5Xtd1TdM0TSvLUtd12nDouk6SpCAITNN8/Pjx1dVVXdfUaKana5pG0XPTNJqmUVRNobMgCBTyTu1pyqBpXoNy5yl9pnumWLksSwpPp1SabvR973leWZa0cXxyciJJEqWoNB7C8/z19TXHcbZtp2m62+2SJGmaRlXVqqoePXr0ne9859mzZ5ZlzWaz7XYbRVEURcfj8fLy8pNPPrn7NX3bnj175nmeLMv03TLGVFUVRbFpGvpaqJ9OtWiaqHYch6acKf3f7/dd19G/5jAMQRC4rjsMw2azmc/noiiqqkpr3cMw0Dfjui79s1KjWZIk6lCbpikIwqeffrper6uqSpLk888/D4KgKIq7nxsAAAAAAAAA4A2GABqAua4rCAJFw67rMsamnNd13WkxIwgCCpr/6Z/+KUkSGl5gjBk3xw9yHOd5XhRFjDF6CqXP2+1WlmU6bJDCaI7jqAc9DANjjOO428kytZvbtvU8jzJoun8Yhv7GlE3nef7w4UNVVaMoqqrKtu2u66Iook/Ydd1sNvvyyy85jlNVled5xljbtsvl8uTkxHEcQRB2u10URU3ThGG43+/DMHwJ02eSpqlhGEmSdF2XJIkgCJZljePI83xVVTzPO44jSRIdJuk4Tl3X19fXZVnOZjPGGLWkN5tNURSGYbiuW9c1PX21WtF0iWmaNL5BOyq73Y5WSoZh4HmemteHw4FurNfrruvyPK+q6v9uFBsAAAAAAAAA4NWFABqAKYpCjVoKKIdhsCxL13XqQSuKEoahpmk0svH06dPr62vbtmez2Xq9puTadV3aeqZOLs1Jy7IchmHXdbZtU995+kmTEfSOlDuPX1/bOD09pZlj+itjjCaeb0fPtm1nWWZZVhzHs9lMURSq4kZRREMTh8OhLMuqqs7Ozo7H4/Pnz4MgmM/n9+7dU1VV1/XHjx9LkjSbzWjl+XA4BEHw4Ycf3vl+Xh6///3vZVnu+76qKlVVFUXJ87xpmr7vGWOmaVL3vKoqSZLo35GC+ziOm6ahX09PTw3DoMFryo4fPHjAGFNVtSgK6ozHcUzpNn3hv/3tbw+HQxzHcRyvViv6F6F/mt1ud+dDAgAAAAAAAADABAE0AJNlWZIky7LoV9/3h2HQdT1NU2oxe54nSVJVVb///e+fPXvGGOM4TpIkTdNu73VompYkyWw243k+z/NxHDVNk2WZXoRyZ+og30mfaW2jruuqqs7PzymMZozd7jtTMF1V1WKxoBp1lmVlWdJnFgShLEvf9y3L6rrucDhQw5oxlqZpXddXV1f04m+//fZ2u02SJMuyR48ebTabX//615vNZrvdPn78mF7tZfb06VNaXm7bNsuyaaL6nXfeOT8/9zyPNpoNw+B5/vr6Os9z2lHheT6KIlEUd7tdVVWu67Zt2/c9x3GPHz+mxRKqqLdtW5Yl7WjjIEEAAAAAAAAAgP8JBNDwpjs/PzcMg04L9H3fcRzbti8vL6uqchyH/kR7GpIkPX/+fBgG13VFUZQkibJOqtw2TUN7F4qiXF5eqqo6TXBQ+jwF0JQ+E0qfu65rmqZt2/Pz82EY+r6n+6fomfLQ+Xyu63pRFHmeG4ZBr5Cmad/35+fngiDEcZxlmWmauq5T/bmu67quu64TRVGW5aIoKChP0/RwOOx2u/1+f3V19emnn04f6SVXFMV/GAc/ffpU07RxHOM4pu+tLMuPPvqIMXZ+fn7v3j3DMCzLUhSF4zhRFPf7fVVVZVkOwyCKIv3T9H2f53mapi9evMjz/O57AAAAAAAAAADAHwgBNLy5DMM4PT2dzWaGYei6bpqmaZq2bdMRc9RrpmA6CAJVVbMs43m+bVtFUajmPFWYaYJD13U6ZpDSXkVRpl9pc+NO+kzL0dMEB7vJo8dxnFLpuq6LonAcJ89zWZbruj45OZFlmaYnqDQ9DEMYho7j0LvEcZwkiSRJiqJQDtu2LWNMUZSf/vSnm81GFEVBEPI8j6IojuNXKH3+z/2/LoRa0r7vR1E0jmNd10mSTNVpAAAAAAAAAAD4v4MAGt5QhmH88Ic/nM/nNNfguq5pmlQ9XiwWVVXpuk6JsyAIVHn+4IMPhmEYhoExxvP8crm8vr72fZ/ya5okDoJA0zTTNOnX6ShCyp2n9HlKnKcAmorPbdvSDWo953lOBySWZVkUxVtvvTUMAzWaaQm6rms6lC9NU8ZYnueO46iqqmnabrfjeV6SJEmSxnFs25bneTqicLPZUDm6aZo35Oi86+trJM4AAAAAAAAAAN88BNDwJlosFufn567rLhaLKIoURaEzA+fz+ZTt0pl1mqbRXwVBoOPmLMuitehhGOhYQl3X8zynvrPrulRPppbxtL9B0TOFzmQcR9p37rqOQmcKlOknrW3Q4Aatc/i+T5vFbdtWVZVlmWEYwzDEcWwYRpZlFKbvdjtRFC3Latv2eDyGYcgYa9tWFEVN0z7//PMgCMZxTJLkxYsXQRBcXV1NHwkAAAAAAAAAAOB/FwJoeOMYhnH//n3f9ynhrevatm1qIjPGPM+j0Jm6w+RwOMiyrGlaGIYUNM/nc47jaAO66zoqUNNehyiKkiRRok0NaJ7n6a2n1vNUeZ6Kz1VVVVWV53ld12dnZ/Sy00/KoGmzeDab0cRHFEV0vF4URRzHbbdbQRBms1lRFPv9frvdSpLkOM5ut+u6juM4wzC6riuK4smTJ7vd7sWLF7e/FgAAAAAAAAAAgP91CKDhjUOjz7ZtLxYLXdeTJOF53nEcOnJQ1/X9fk8pcxzHNLhh2/ZHH320Wq1kWR7HkeM4WZZ9399ut47j0Fo0PWW73dIEB2XEHMcJgsBx3BRwj+M4VZ4pX6ZkeTabaZrWNM3p6SkNQNP6s23bkiSJolgUhSAInueVZVmW5bQZEkWRJEmMsSAIFovFxcUF7UT3fZ8kSRiGfd/zPC/L8m63C8Pwk08+eUNmNwAAAAAAAAAA4FuHABreLLqu00qGqqqGYdR1raqqbduWZZmmmSSJKIqe5z158kQQBJrg+PWvf50kyWq1GobB9/35fE5jF13XTRMclFaP40iTzdPsBnWfKX2+XXym3Lksy6ZpFouFqqpVVVEVmu4sy/Lk5ERV1aIoyrI0TZPnecqgZ7OZIAhpmkZRxPP8YrEIw/B4PDZNk2XZ9773vSAInjx5slqtdF1njDmOQx8vDMPVaoX0GQAAAAAAAAAAvjEIoOHN4nmeruuGYVCR+dGjR5vNxnEcyoWXyyXlyIZhqKr69OnTzWazXq+XyyUdKsjf8DyvaRrbtk3TLMuS53ld12kqmtafp/MGp+4zDW7QunRVVXVdn5+fU9mZVqdpYUOSpPl8rqpqlmVlWXqeJwhCnudVVRVFMQxD13VJkrz33nuO48RxfDgc0jQVRbFt2yRJPv/88/1+X1VV13W+7zPGeJ5P07QoiiiKnj9/fuvLAAAAAAAAAAAA+L+FABreLLquW5ZlGMZisZAkKY5jut+2bUp4Pc+TZblpmjAMf//734uieP/+fVmW4zjmed40zbZt67ruus513XEckyTRdZ1C59vdZ8bYVH9mjNHgBh0wSHHz6elp27YUQFPl2bIsSZKKoojjuKqq8/Nzy7KqqqK/5nkuSRK9+Ntvvx3HcZ7nhmFomqaq6n6/Z4yVZRkEAb3gX/7lXwqCsF6vv/rqq/1+/+zZsyAI/u2LAAAAAAAAAAAA+L+HABreIL7vK4oiCIIkSVRnjuPY931KgaeDBEVRFEXxd7/7HWOM53lZlnmeNwyDMTaOo23btm3ruq6qKh1RSIcN0lIz9aMZY5RBT+Mb4zhSBk37G9R6ruuazh70fV8QBBp3rqpKUZSTk5O6rimzLssyyzJN05IkaduWXsT3/WEYgiA4HA6u69Z1vdvtpkRbkqRnz55dXV09f/78yZMnX/8aAAAAAAAAAAAAviEIoOENoqqqqqqapjHGOI6bz+ccxw3DYNv2VCU+HA6qqj5//rxpGkEQeJ4XBGGxWGw2G9/3TdO0LIvS5zAMLcsahoHWOSh9FgSBXnzKnWn3eZp+pnpyWZZJkpRleXp6Silze8PzPAqm6WGe51HinCQJRd5xHLuue3l5KcsyTU7v9/soilRVTdOUMUb7G1dXVx988MGdbwAAAAAAAAAAAOCbhAAa3iBUZ5YkybbtKRqmHWdd15MkYYzNZjNRFDebzTiOgiBYlpVlmWEYVHw2TTPLMipQz2YzSZLKspyKz/zN+MYUPTPGKHqmBvTt1nNZlovFomkaiqTpftd1y7IsikKSJM/zRFFMkiSOY1mWz8/PwzCkJZD1ev3WW29FUXRxcTGOoyRJmqbtdrumaZqmGYYhSZIXL17cuXwAAAAAAAAAAIBvGAJoeIOoqkpLzYqiLBYLxpjrurTI3DSN67qaptFBgk3T5HnOcRzHcefn5/zN+jMtdViWpWnatLlBkx2iKFLxmX19eWMYhnEcu66bGtDUaKb3reu6ruuyLOfzuSiKtLZh27amafv9PsuyH/zgB57npWkaBEGappIkhWHoOM7FxcVyuTQMg2LoNE27riuKoq7rzWbz2Wef3b14AAAAAAAAAACAbxwCaHhT6Lq+WCx83/c8j7LjxWLx7Nkzy7Jo2kIURVmW9/u9LMt5ntNZhcMwcBzn+/7xeHQcZ5p7liRpt9spimKaJt1Jex3UgGaMUe5M9ecpeh6GgSJsVVWLokjTtKqq5XKpaRr9SpseYRjSB/jOd74Tx3FRFLQ6Xdf18XjkOC4MQ13Xv/rqq8vLy7Zty7Ls+3673T59+vR4PH79ugEAAAAAAAAAAL41CKDh9ffw4cPT01PbtkVRpLlnz/Nc19V1XVEUWZYVRdE0LY7jcRxd1/3444/7vpckiYrP9DDqOEuSpChKGIbjONq2TbmzIAjUlaYA+k79ue/7aWHD931ZlqmnXNd1URSu60ZRNJ/P6aXiOE6SRNf1PM/HceQ4zvM8SpwpevY8b7/fB0Gw3W6zLKP29OFwoCMTAQAAAAAAAAAAXioIoOF1ZhjGvXv3Tk9P6aw/SZIcx7Ftm9rNNHaR57ksy5RN07DGarWiIQ5FUejQwqIodF3Xdb0sS0mSptI0pc/UfeZ5/urqSpIkjuOm0Nn3fVp8Pj8/H4ahrutpf6Oua8qs5/N5mqZlWTqO0/f9OI5pmmqaRqn3arVSFMX3fRrl2Gw2dV1LklQURdu2aZrudjsMbgAAAAAAAAAAwMsJATS8zh49enR+fj6fzyk7plMEacp5GIb5fB7HsSiKhmFQzVlRFEEQ6rpOksS27bIsu65jjPm+XxQFvQI9jFY4eJ6n9Pn6+lqWZU3TeJ6fpjYog26a5uzsrO/74QbNcbRtS71pQRAWiwU1oyl6bts2jmNBEKIoevfdd7Mse/HiRRiG9+7do08Vx3Ge5zQMjfQZAAAAAAAAAABeWgig4bV1//59Spxd16XUmDEmCILnedRKVhSlqirTNFVVlWU5CILHjx+naRrH8TAMkiT5vu+6Lj2YHinLMpWjRVGk6FkQhNVqxfM8vSltQNP4xjiO1Ggm023KoNu2pUfeu3cvDEM6AnEYhjiO4ziWJCkMw/l8fnFxIYqi4ziMsefPn19eXlZVRWPQFxcXl5eX/3bBAAAAAAAAAAAALxkE0PDams1m0wmBi8Xi6dOnvu+3bdv3/XK5pMC3KApJkih6rqpqt9u5rtv3PR0/SJExvY4kSTQDPaXPgiDQhgZj7M70Mzk5Oamqil7nDrqTMdb3fRRFnuclSVKWpWmatPsRxzHP80EQuK7btu1XX301DANjrKqqZ8+eRVF0eXmZ5/mdSwYAAAAAAAAAAHipIICG19N8PqfWs2matHQxjiNjzHVdy7I0TaNJZV3Xac5iu93KsnxyckLHA6qqalkWlaMVRaGRaMqyKYmedp/p7ejF+76n5Y35fF7XdVVV4805hPSnKXpumsZxnCiKOI6j8wZnsxnHcdS/FkWR5/koimRZfvbsWd/3lmXtdrurq6vnz5+/ePHi9pUCAAAAAAAAAAC8tBBAw+tJ0zRRFCVJUhTFdd0sy2azGR0/aFlWmqaiKPq+v9/vLcv64osvDMMoy1IQhPv374dhqKoq7XLIshxF0TAMuq5TGE3p8DS1MXWfm6a5f/8+Y6zv+7qu6U4a4qDomaLt2WxGyXUURV3XjeO4Wq3++I//OAgCwzAEQZBlOQzDvu91Xd/v9/TE9Xp9dXX14Ycf3r1OAAAAAAAAAACAlxgCaHg9UVhMx/3Rr5qm9X1PJxAuFgvDMIIgmM1mkiTtdjue503TZIwJgmDbNj2eImPP8+g2FZ+pT03vcnV1RWny+fk5u5l+vo3KzmVZ1nU9n8+pB03302ejpzx58uStt94KwzAMQzoUsa7rKIqqqsrzPEmSOI6RPgMAAAAAAAAAwCvnXwcEAF4nuq7ruq5p2mw2o60M3/dp5sJxnKIoiqIoy9JxHE3T3n///bZtOY6TJMl13TzPJUniOG4YBsMwNE2jzY1pcEMQhO12ezwegyCQZZnn+XEcX7x4QbEy/aRYuW3bpmnqui7L0vf9pmnoHoqe27Y1TZPuSdP0+vpa0zTXdcdxvLq62u/34zj2fU9/uri4uHuRAAAAAAAAAAAALz00oOE1RMcG0gD06ekpHRhIp/l1Xee6Lo070/1lWaqqmqapIAiMsZOTk+PxqCiKJEk0/Uyo+xwEAa1z0IMJrW1Q7jxl0F3X9X0//dq2LWXNVVXZtk13Ho9HXderqiqKwnGc1WpFXWzLssqy3Gw2q9Vqu91i9BkAAAAAAAAAAF5RCKDhdfPgwYMHDx54nrdcLmmIg44E7LrOtm0qNdO+83q9liRpu91yHOe6Li1vKIrC8zxtMd8+eJCmmakQPQ1xUFGa3ncYhimAvo3Kznme13Xt+76maWVZVlWl63pd14fDoeu6siyfPHlimub5+fl2u338+PFqtXry5MnxePz6xQEAAAAAAAAAALxKEEDD6+O99947Pz93HIeyZk3TPM+jFNj3/SzLNE3TNI1O/1NV1XXdn//857IsD8NAxxUqilIUxZRQUw/6eDwahkGtZEmSeJ6f0mfGGP2kzY2p70y6rmuapmmaqYKdpmlZlvP5fBzHKIrqutY0LU3TpmmGYUiSJMuy7XYbhuHl5SXSZwAAAAAAAAAAeNUhgIbXwXw+Pz09nc/n8/ncMIwkSai8LMuy7/u2bUdRpChKHMd935umqes6Rck07ixJUl3Xfd8zxvq+p5GNPM9pfMOyLFmWKUSecudxHNlN+ky/Xl5enp6eUu5MmqaxbVsQhDzPaULa8zxFUdI0pTTcNM3j8TgMwziO9Hg69pA+CQAAAAAAAAAAwKsOATS8DmY3JEnquk5RlNlsZlmWYRi0uVHXdZ7nFD1rmkbzGhzH8Tyf57ksy57n6bretu1sNtvtdlVVUe5McxyEMTZ1n+m5fd/TADTp+54S5KqqFouFKIp5nrdtW9c1BdY8z7uuq+s6z/NRFNH8tKZp4zjSQYhd11Flu6qquxcJAAAAAAAAAADwquHv3gHwqpnP56ZpGoZh2/bZ2dnUcTYMQ9f1oiiqqvI8z7Ztup9mnQVB+Oijj7766itZlruuG8eR53nHcQzDEASBCsscx0059XQPventGzS7Udd1lmVpms5ms+Vy2bYttaHp+EE6adAwjCzLgiCQJEnXdcuyuq7TNG0YBkVRxnHkOI7C6CAI/u0KAQAAAAAAAAAAXk1oQMMrbzabGYZBk810SKDjOIwxjuMEQbBtO0kSxphpmoqiUPf5d7/73WazURSlLEvKnWmvgw4bvP2T5/nbefSUO4/jOAwDY4xS5rquaUBjuVxOExx0v2VZ4zimaTqOYxzHrutyHJemaZqm9JmzLGvblorPNB7dNM2t6wMAAAAAAAAAAHhVIYCGV5thGLRiYZpm0zS0vyFJ0mw2a5qmLEtFUXzfpzVnURQ3m00QBB9++KFlWWmaiqLIGBMEQRRFnuezLBNFkU4gpKh6Sp8FQbh///5ms2G30ucpMqb4eBiGruuapqmqqqqq2WzG83wcx9SD7vs+z3PGWFEUNMQRx3GWZZIkSZJUFAW9FL3a168SAAAAAAAAAADglYQJDni1ua5LvWZN0+7du6eqquM4wzCM4+j7vuu6lmUpiqIoyn6/Px6Puq5fXV3puh6GIc1ujOPYNA11kG3bpvMJp+UNCqAFQaAHM8bGcaQAmpJi+tX3fc/zaIUjSRLHcRaLxTAMbdtSFZqGOPI81zRN0zRa2BBFUdO0siypW02PieN4tVrdvU4AAAAAAAAAAIBXEBrQ8GpzHEfXdVEUaTGDklzbtmkAWtd1VVV3u52iKJZlUaO5LMuu6+hZtMs8HUtIhw1S9Dxtbtwe35i6yRQ9t23reR7tO9MEBx0/2HVd3/cUardta1nWMAxRFDVN8/Tp00ePHnVdF4ZhFEWO42ialud5WZb09DiOi6L42kUCAAAAAAAAAAC8mhBAwyuMThTUdd1xHIqJF4sFz/OmaVqWlSQJx3HjOLquOxWZr66uBEFI09QwDJ7nRVEcx5HjOJrgoMdQ1nwneqaH3b9/nzJoKj6P41hVFYXRNMExDANNP9d1XZYlrVFHUUSRd9M0wzDsdrswDM/OzjiOC4LgeDzWdd22bZZlh8Ph+vr661cJAAAAAAAAAADwqkIADa+w2Wym67qiKKZpnp2dUYLsOA7lv57n6bquaRqlzxQoB0FQVZUkSVVVUW+ankX5Msdxm82GytS0JT3l0fSYcRzZTfrc9z19DEqf6Z6mabIsK4piuVzqup4J1T1iAAAgAElEQVTneZ7nhmG0bRuGIS1EM8aWy+Vms6nruu/7rusokt7tdpeXl7euDwAAAAAAAAAA4NWGABpeVffu3Xv06NFisVgulxQWL5dLqhLTlLOmaXScIM/zjDFBEBhjHMcVRcHzPA1DU/eZ7meMhWE4m83GcZRlmR7PGKPQmW4QipunG8Mw0OBG27Z1XfM8P5vN4jj2PE8QBFEUoyiioJnGoLMsow8ZRdH19fX19fUnn3xC5xMCAAAAAAAAAAC8ThBAw6vn3XffPT8/Pz09pXKxYRjz+Zy2L2azmSRJNP0syzIdJ0j1Z3puGIaULFMtehzHruvKsqyqqqoqz/MkSaKuNLtJpckUOvd9T6HzNLjRtm1ZlpZlcRyXZRljjOf5k5MT+hPlzlP6TPI83+/32+32cDhcXFwgfQYAAAAAAAAAgNcSAmh4lfi+v1gsFouF7/uKolCPmOd5QRB836fic13X1Dsm19fXNLLBGBuGYQqXBUFo25YCZTqBkB5GwfTtN6XomW7czp2bpmnbdj6f05RHmqaUMguCQFsc9LMoirIsTdNsmuZ4PNJj8jynunTXdcfj8fbbAQAAAAAAAAAAvDYQQMOrZDabzefz+XxO8TF1nKfBDUVRDocDLW8cj0fDMHRdt217GIa+7ylH5jiuqipZljmOs237cDhMiTMF0DzP379/n0JhqjwT+pWmNpqmaZpmsVhQEj2OY9d19BaEcueqqmazGUXex+NR1/WiKIZhqKqK47hxHOml7lwjAAAAAAAAAADAawMBNLwyfN9XVVXTNMMwFovFer2miFlVVVVVgyBgjNm2rShKFEWGYSiKcmd/g+f56+tr6QYNPfd9z/O8KIqCIFAYPa0/0xPHcRyGgTE2Rc/Ufe77nkLnKYMmFC7fu3dPkiQ6kJB2QsIwpGfRa9JJiWma0nsBAAAAAAAAAAC8fr42NQDwMnNd13EcXdcFQaAjB6nsXNd10zS+71uWpWna4XCgEHlKk7lb6H56hbquVVWlnWgKoCmJptyZcuSp+zy1mwkdKkiRNM06UxJNUxtN06RpGoahpmmWZZVlmee5oiiqqnZdJ4oiNab7vl+tVnevEwAAAAAAAAAA4HWBABpeGRTgqqpqmubtbrJpmnTk4JQjkyl0pmENegrP81VVDcPA87zruhQHT+kzPYzQm07RM4XRU/pMWxwUQFdVVZalbduO41AwTdVmaj3zPC9JkqZpdNKgYRiUWTdNUxTFresDAAAAAAAAAAB43SCAhlcDnfXHcZwsy7que56X5zk1oBVFURRlKjJTjnz79vQim82mLEt6MB0AqKrqv0+feZ6f4ubbuTNFz7SzsVgsqNes67rjOPP5vG3bsiwNw6ASdFmWURRpmhYEwXq9LorCsiyO47Isa9uWQmocPwgAAAAAAAAAAK83BNDwarAsi+rPFAczxjRNmwJoOo1QkqTdbqfruq7rdA+lz/ST4zg6crDrunEcTdM0DMP3/Sl0norPHMfdv39/ip4pdJ5+tm1b13We53meO45TlmWSJHEc27bted4wDJQvl2UZx/FqtbIsy/d9nue3222SJH3f53mepmmSJAigAQAAAAAAAADg9YZDCOHVQKVmVVUdxzFNs+s6Cpqp/nw8HmnUwrbt23XmKVbmOI5i6yAIDMPQNE3XddM04zi+PcFBT6F3nNJnus1xXN/3TdPUde04TlVVRVEkSTKbzXRd77quqqppEpoGN4Zh+O53v7vdbg+Hg+M4PM8XRbHZbHa73SeffHL76gAAAAAAAAAAAF5LCKDh1SAIgij+63+uoij2fU85clEUsixTCkwz0FP6TMZxZIxxHPfxxx//5je/kSRpWsnoum42m0VRNO1HU/o8jiPP88MwcDdL0F3XDcNQVdVsNlMUpSiKsiyrqqqqqu/7YRhol4PK0bquh2HYdV3TNF988cVbb72lqurTp08vLy8vLi6+/PLLm2sCAAAAAAAAAAB4zSGAhleD4ziWZZmmSa3k+Xy+Xq+rqjIMY1p/ps0NSp8ZY9PPf/iHf8iyjMrR0wGG8/m8qirP82gSWpIkKkHTU8ZxvHfv3pRfT23osizbtqXQ2fd96jvXdV1Vleu6VLIOw1DX9SAI6rpumubJkycvXrzIsmy/3yN9BgAAAAAAAACANwoCaHip6bp+cnJycnJimiYFza7rmqapqipFxtOABv2c0mfGGMdxv/nNb1ar1TiOZVnyPG+aJv1smqZpmsViYds2vQ49a4qbKXqeDMNAATRFz23buq5bliUNQFMtmkY5dF2n0wVprKNpmu1227ZtURRFUdy+NAAAAAAAAAAAgNceAmh4efm+7/s+zWsoikKHENLocxRFqqrSIYSyLMuyLAjCnQozYywMwyzLFEXRdZ3neUEQGGPjOFKZWtd1VVWpND09nZJo6jIPw3A7faazBx3HEUWxKAqa4Lh3716aplVVzedznufjOK6qij4nDUOP4zgl11+7PAAAAAAAAAAAgNcdAmh4ec1mM8qgq6oSBIGCZkmSiqLwPK8sS03TaEDjcDiYpinL8u2q8nq9Ph6PeZ7zPK/ruu/7l5eX8/lc0zRKrhVFCYKAcm2qV/M8T5Vnxtj0Ol3X1XW9WCzo6MKiKGjfuWkawzDSNJ3NZpIkxXGc57lhGLS/oapqHMcUYdNqR13Xd68QAAAAAAAAAADgtYYAGl5Svu+LokjlZYqepx6xaZqapvV9LwhCFEWaphmGoaoqx3E8z/d9T7XlOI67rqOmc9d14zhS5VnTNKpFq6o6m81Wq9U0wUH59ZQa08TzcrmkMLrrOvpJfzJNM8syxhjP83mez+dzxliSJHSkYVVVFGoXRTGOY1EUq9XqzjUCAAAAAAAAAAC83vi7dwC8HFzXdV1X13VJkih3nhrQkiTJsszzfBRFPM9P68/TDVrSyLKsaRrGGMdxnuf1fa9pWtd1XdfRkYaapomieHp6Su84JdfDMNDRgmVZLpfLKXRu27Ysy6IoLMuybZtWnmkGWtf1w+HA87yiKGmaJklCKXZVVTQAjfozAAAAAAAAAAC8gdCAhpcU1Zl5nldVNUkSxhgVikVRnI4fpF2O6X7KnYdhoCdWVaUoStM0wzBwHDefzy8vL6kETSVlSqtpGHoYBsZY13V04+zsjArXfd9TGE1Z89nZmaqqWZZRHl3XNZWjF4tF27bb7TaKouVyaVkWBdPjOJZleTgcnj59+vXrAwAAAAAAAAAAeP0hgIaXFAXQlBQ7jtO2LUXG5Hg82rbd972iKPQYCp3HceR5njH26aefRlFkGAadN1hVVV3XNLtBgfXUlR7H8eTkZIqk6SeVoNu2bZqGDhs8OTmxLKuua9rloPqzaZpRFHEct91uwzA8Pz/neX673e52u77v67re7/dpmsZxfPfyAAAAAAAAAAAA3gAIoOElJcsytZVpf2OKnrMs0zTN8zxVVeu65nme5psZYxzHTbeTJKHHt23bdd18Ptd1fcqp6WH0+IcPH15fX/d9f/vOYRjo7EGyWCyapun7nvLoLMssyxrHMQxDVVWDIOB53vf9q6sr6kQPw/DixYv9fr9arY7H4801AQAAAAAAAAAAvFkQQMPLyDAMWn8WRdEwjCzLKBHu+95xHEVReJ6fKsyMsSk7vv0iTdPIsmwYxlSdpgczxsZxnOaeKW6eAujpr9SAno4cpAkO3/dphDqO4zzPdV0PgoB2n6uq0nU9SZKrq6urq6vf/va3//ZRAAAAAAAAAAAA3kgIoOFl5LouDT33fc8YG4aBfqXyMuXOU/R8O1YehoHiY/rT7Z8UNE9/pfSZAmhKn6dImt60aZqiKDzPk2U5y7KiKBaLRZZlZVm6rkuHGYZhKMvy8Xgsy3IYhuvr681mE0XRl19++a9XAgAAAAAAAAAA8AZDAA0vI1mWaZRZ0zRJknRdb9uWWs903qAoiuv1mlY1KJu+HTeP45gkiSzLlDXfNo4jbXrQ6YKSJDHGTk9Pr6+vBUHouo4ecH5+LsuyoihlWZZlWRSF67pRFM1mM0EQ0jTNskxVVeo+C4LAGEuSpCzLuq7zPM/z/M77AgAAAAAAAAAAvIEQQMPLiJYuKGhWFIWS4ukeURSDILAsS1EUSZIomL49oMEYMwwjCAJRFKn1TKGzbdvUcab+Ms1rDMPAGFsul9Nzx3Gs67rruqqqKH0uikIQBNu2aXljNptpmhbHMZ0uyHEcRc91XTdNg9FnAAAAAAAAAAAAggAaXi6PHj1yXVfTNFVVZVmm4FjX9XEcBUHIskxRFFVVLcuiivS0xTG5M7JB2xqkv3E7gJ5CZ4qqKY9mjPV9XxRFlmW2bfM8n6bpOI62bbdtu16vRVG0bbvv+yAIDoeDKIr0+DRN9/v97c8DAAAAAAAAAADwxkIADS8FXddPT08NwzBN0/O8cRwpXzZN0zCMKIryPG/b1rIsGuUQRfF29/mOcRxN04zjmA4SpFh5GAbXdXe7HeXObdtWVTWOI8dx9FcKqRlj4zh2XXdycsLzvCiKWZalaappWhAERVFIkrRYLOI4Xq1Wh8NB07RxHIMgyLIsiqLPPvvs7rUBAAAAAAAAAAC8qRBAw7fPMIzlcnlycnJ2dnZ+ft513Xa7pc0NGmKmGxO6RxAEOpaQMUahM91gN93nvu9lWR5uBjfatp3P59fX13VdcxzX9/1sNqOe9ZQ7T1H1OI5ZllVVZVkWPTeKIkmS6K/Pnj3r+54+4ZMnT7bb7Xa7/eKLL75+WQAAAAAAAAAAAG86BNDw7XvnnXcWi8VyubQsy7btsiwpaFZVdRxHnuc5jlMURVEUWZap+yzLMqXPlDjfRjny97///f1+X1UVpcZt21IMTSVrmvho23Z6BYqe27ZtmqZpmuVyyfO8JEl5nidJQm8dBAFjbBgG27YPh8Pl5eVut3v8+DFGnwEAAAAAAAAAAP5DCKDhW/bgwQPDMGzbtm17uVyqqkpLF4qiaJq2WCxoBpqqyrIsT93nqf489Zen16RVjb7vOY6jtY2u65qmoQHotm3puZQ+0yPrum7b9uzsTJblvu+TJKnruqoqz/N0XY+i6Hg8uq673+/zPL+4uDgej4fD4fHjx0VRTO8LAAAAAAAAAAAAtyGAhm+TruuO4+i6LkmSoihhGLZtmyQJ3T8MA8/zi8XicDjQHDNNPFN2LAgCY2wcR9rfoCh5mtEYx7Hve57n6Qb1muu6Nk2zLEua4KiqShCEhw8fiqJoGEZd11EU1XW9WCw4jqPX32w2oii6rtv3/X6/32w2XdeFYRiGIQXldy8JAAAAAAAAAAAAbiCAhm+Truuqquq6zhjbbreLxaLrur7vNU2TZdnzPJrLoMIyjW+IojiNb0ytZ0qZp5/DMHRdJ4piWZaCINR1rapqXdd5nuu6PgwDDUnTjkcYhhzHnZyccBxHs87H45Eeadu2JElZlm02m+12qygKx3FhGBZFEcfx06dPv3YxAAAAAAAAAAAA8HUIoOHbpOu6KIqiKOZ5btv2OI5nZ2dZllHurGkazV9omjalw1MPmt2cPUgYY7SwQdXmqqoWi8XV1VVd14yxcRzpvRRFoeIzvQjP87QrHYZhXddFUVDZ+eTkJIqii4uLKIrorTmOu7i42Gw2m83ms88+u3MhAAAAAAAAAAAA8O8hgIZvE6XMVVUZhkGrGlSINk3Tsiyabz45Obm4uHAcR1GUaQCaAugpfe77vm1byp3n8zm9jqZp6/VaFEVqT2dZNgxDVVWKokRRRHVm2tk4PT2lHJzjuMvLS1VVr6+vJUmSZZkxtl6vV6vVZrP51a9+defzAwAAAAAAAAAAwH8CATR8a3RdFwRhGAZRFDVNcxxnNpulaTqbzWjleTabFUWRZRktdSiKQmnyVH+mvnNd103TlGV5enradV3btlMV2rbt3W4nCALl1NNyNAXWkiSN48hx3Gq1aprGMAzXdYdhSNN0HMfLy8v1eh0EweXl5Zdffnn30wMAAAAAAAAAAMB/BQE0fGs0TaOpDUVR6rqmLFjTtGEYbNtumqbrusViYdt23/d5nt8+e5DjOMqam6apqoqK0m3b0vrzMAx933ddd3Z2ttvtyrIcx7GqKnoFCpopg+77fhxHQRAURSnLcrfbUaJ9eXm53W6Px+Pjx4/vfm4AAAAAAAAAAAD4/wcBNHw7Hjx48ODBA8/zxnEURdG2bbrf87wsy/q+dxxnKj4ritJ1Hd2mGWie5yk77vt+GAbKnSl0vmk/933fN02jqmqWZeM4DsMQBEEYhpQ4cxw3DAM1o9u29TyP7gnDcLVarVYrDD0DAAAAAAAAAAD8DyGAhm/ao0eP7t+/b1nWOI51XWuaRomw7/uu61ZVJcuyoig04qxpWhzHs9mMMaaqKh0YyG6tP9NtipupE13XdVmWeZ5bljUMwzvvvHN9fU1/pfozx3Ft2+Z53nVdURRd143juN/vm6YpiiJJkuvr6yAI7nxsAAAAAAAAAAAA+EMhgIZv1GKx8DxP13Xf96uqyrJMEAQKmlVVVVX14uKCHlAUhaZp9EhZlumRtP7MGKPyMnWfaXCDcueqqnzfp5dK07Tv+2EYTk9PkyQpiuL73/++4zh5njuOU5ZlkiT7/b5tW/oZxzFjrOs6pM8AAAAAAAAAAAD/KxBAwzfq5OTEsizTNEVRFASB9jSo1yzLctM0lmVRnXk2m5mmqaqqJEmSJFH6TCsZPM8Pw8AYG4ZhmoHOsqwsy+Vy2fd927a0BD0Mg2macRwbhmFZFiXdjLEoirIsk2VZkiQ6crCqqqqq8jxPkuTuhwYAAAAAAAAAAID/FgTQ8M2Zz+eiKNKms67roijWdU2DGBzHiaLoed52u7VtW9d16kRP0TOtbQzDwHFc3/eMsWlzo2mapmnoREFKnylNppWPOI6nxxdFMY6jrusUeR8Oh7queZ5vmibLsqIorq6ujsfjnY8NAAAAAAAAAAAA/z0IoOGbo6qqKIocx/E8LwhCVVVd15mmaRhG13VN09Cvuq5LksTzPFWeeZ5njNHgxhRD930/juOdIwfruk7TtKqq+XyuaVqWZVVVmabZ9z2dPViWJQ097/f75XJJj3n+/HkQBJ9//nnf92ma3v3QAAAAAAAAAAAA8N+FABq+ObTgPO0413U9DEPTNIIgeJ7nOA5Nc1A8TbkzzWiM43jv3r3Ly0tBEGh8g+f5cRyp7Ow4jiiKRVGUZfngwQNFUdI0LcvS932e56MoquvaMIwwDOnIQcdxDMP48ssvt9ttkiQfffRR13Vd12VZduvDAgAAAAAAAAAAwP8UAmj45lB/eRzHcRw3m03TNKIoGoahKIos/3/t3V2MXddZ//G11n47+5wzZ97HE4/Hjg1J7Rr6Ng0haZTQFilukwg1kZCq8p5AUUEFRRAp4pKrXHCLesUFEnCBwg0SqkC9QaBSgU1aOU1aO47j9/G8nzlnzn5f/4unZ7F9TPknrWfGsb+fi6O911p7rX0x8sXPj54dyq/E05I7S11zURQymOe5pM+u5Hl2djYIgn6/L22gx8bGpJPGgQMHoiiShs7tdlu+KxjH8WAw2NnZ6Xa78pnBzc3Nq1evrqys3PKWAAAAAAAAAO4QAmjsKenanCTJxMTE6uqqfHtQPgYoPTdcSO3S56IopN55fn5ewmtXFi0fG5Q20LKtMWZ8fHx9fX16ejoMQ2PM1tZWHMfNZnNlZUW+VSjtPgaDwdbWFh2fAQAAAAAAgN1DAI29I+FyVVXSZKPRaCil5BuDatiaY3Z2tt/vS/mzpM95nkvnDYmeJZt2CXWapoPBoNPpGGOkh4bWempqamtrS7pwWGvX19d3dnYajUa32w3DME3TNE17vV632yWABgAAAAAAAHaPGR0AdlMURXEcS7gcx7FLnyVQrqrq4MGDkiy7DLosyzzP63l0nufSSaPZbE5MTExPT8unBdvtdpIk/X5/Y2NjbGys3W5vbGz4vi+HJknS6XTSNE2G+v3+6PsBAAAAAAAAuHOogMbeSZLEWqu1bjQaURQNBgNX1yyJc1EU0l5D4mbP83zflwuZlQA6TdMDBw6UZSkfHuz1emNjY0qp7e3tLMukWcfq6urExERVVWtra2tra5OTk4PBYHl5WfaR6+vXr4++IgAAAAAAAIA7hwAae0drLV2exYEDB65fv16vd3ak8LksyzzPjTEyUhRFlmXT09N5nm9vbydJMjc312g04jje3t6WTtDtdntra2tzc7PZbF69etUY02q1rLXLy8vXrl2TPHpra2tjY+PKlSuj7wcAAAAAAADgjiKAxt6x1uZ5LjXOIyRrFi5rlsBavkCYZdnMzEye571eL03Tubm5ZrOZZVlRFGmaZlmWJEmr1drc3AzDcH19fWNj49ChQ2tra+fPn0/TtKoqqXq+efPmm2++OfpmAAAAAAAAAHYBATT2VK/X63Q6Za3Fs4ubXfo8OTm5vb2ttS7LMkmSxcXFMAyttUmS5HmeJEmWZRI9y+3Ozs7Y2FhRFJI+y1cHO53OhQsXpCPH6urqe++9d/HixYsXL46+EAAAAAAAAIBdQwCNvSM9oAeDgYTIeZ5Li2fJnSVWHgwGnU6nKIogCMIwDMMwTVOlVFVVeZ6naZrn+fT0dL/fT5JkYmJCSqS3trbW19cXFhY2NjbW1taMMaurqzs7O2VZXr58+ebNm2+88cbo2wAAAAAAAADYZQTQ2Dv9fl+6P7uGG0EQpGmapmm/39daT01NSU/ny5cvNxqNH/WKtlYpVZblYDCYmppK07Tb7Ur6vL6+3uv14jien5/vdDqrq6urq6udTufmzZurq6uDwaDb7a6vr587d270VQAAAAAAAADsPgJo7KmqqiSAdl04jDFBEMRx3G63syxL09QYMz4+3u12q6oqiiJJkoWFBc/zoijq9/v9fn98fLwsyxs3brRarYWFhW63+9577/m+L809rl27trW1VRTFxsaGVEb3+/3R9wAAAAAAAACw+wigsafyPJf2zdIAempq6saNG9JbI0kSabvhed709LRSyvf9IAg8z+v1elVVZVk2NTUlLZ7jODbGKKXefffdBx980PM8+d5gkiRpmm5tbS0vL6+trZ09e3b0DQAAAAAAAADsFQJo7KmqqqSb82Aw0FpLjfNgMAiCIAgCSZyNMdbaTqejlJIWzyIMw42NjXa7HQTB+vr61tbW/Pz85OTkhQsXVldXJycn8zy/du3a5cuXb9y4QdsNAAAAAAAAYN8RQGNPbW5uNpvNOI6rqlJKSRcOCaB935foOUmSKIrkVkJqpdSBAwd832+1WhsbG+vr657nTUxM3LhxoyxLadzxn//5n9euXfvud79Lww0AAAAAAADgLkEAjT3V7/c3Nzf9oTAMJXTOsizP86IotNbSdkOqnrXWvu9XVXX9+vUsy1qt1uTkZBAEq6urV65cabVaKysr6+vry8vLKysrb7755uh5AAAAAAAAAPYPATT2mrTdyLKs2+1mWdbpdAaDgSuF7na7CwsL7XY7jmPpE53n+dTUlO/7URQlSXLu3DlrrdZaKXXu3LmbN2/evHnzu9/97ugxAAAAAAAAAPYbATT22tramtZaa+15XlEUGxsbeZ7HcdxutzudTlVVly9fjqJoZmZG+kHneb6+vh5F0c7OTqvVyrJsZWXl2rVry8vL//Ef/zG6OwAAAAAAAIC7BgE09sHq6urq6mp9ZHZ2dnZ2Ns9zpVQQBFrrra0ta21RFM1msyiK5eXljY2NtbW1mzdv0moDAAAAAAAA+FAggMZdYWVlpdPpdLvdoih6vd7k5OTY2JjWutfrbWxsrKysnDlzZvQZAAAAAAAAAHc3AmjcLd55551Dhw5NTExsb29fu3ZtZ2fn3XffHV0EAAAAAAAA4MODABp3kStXrly5cmV0FAAAAAAAAMCHkxkdAAAAAAAAAADgTiCABgAAAAAAAADsCgJoAAAAAAAAAMCuIIAGAAAAAAAAAOwKAmgAAAAAAAAAwK4ggAYAAAAAAAAA7AoCaAAAAAAAAADAriCABgAAAAAAAADsCgJoAAAAAAAAAMCuIIAGAAAAAAAAAOwKAmgAAAAAAAAAwK4ggAYAAAAAAAAA7AoCaAAAAAAAAADAriCABgAAAAAAAADsCgJoAAAAAAAAAMCu8EcHAAAAAAAAcJ/5i7/4i4ceekiuy7K8efPm66+//s///M+3rgJwv3v44Ydfe+21L33pS6MTPx4BNAAAAAAAANRf/dVffetb31JKhWH4+OOP/8Ef/MEPf/jDixcvjq4DgA+CFhwAAAAAAABQSZJsb29vb2+vra394z/+4+rq6sMPPzy6CAA+IAJoAAAAAAAA/C82NjZGhwDgA6IFBwAAAAAAAFQURa1WSykVBMFnPvOZLMu+//3vjy4CgA+IABoAAAAAAADqxRdffPHFF93ta6+91u/3a/MA8JMggAYAAAAAAID6y7/8y29+85tKqTAMn3766Zdffvl73/ve9vb26DoA+CDoAQ0AAAAAAID/kWXZP/3TP5VlubCwMDoHAB8QFdAAAAAAAAC4RVmW/X5fWkIDwIj6Pw5FUaRpWpscRQANAAAAAACAUVevXn3iiSdOnz49OgHg/uZ53t/93d+523//939/7bXXavMAAAAAAAC4n/ze7/3e6BAA7BV6QAMAAAAAAAAAdgUBNAAAAAAAAABgVxBAAwAAAAAAAAB2BQE0AAAAAAAAAGBXEEADAAAAAAAAAHYFATQAAAAAAAAAYFcQQAMAAAAAAAAAdgUBNAAAAAAAAABgVxBAAwAAAAAAAAB2BQE0AAAAAAAAAGBXEEADAAAAAAAAAHYFATQAAAAAAAAAYFcQQAMAAAAAAAAAdgUBNAAAAAAAAABgVxBAAwAAAAAAAAB2BQE0AAAAAAAAAGBXEEADAAAAAAAAAHYFATQAAAAAAAAAYFcQQAMAAAAAAAAAdgUBNAAAAAAAAABgV/ijAwAAAKYRowcAAB8FSURBVAAAAAA+DIIgaDabWmullLXWWtvtdkcXAfuKABoAAAAAAAD4UFpcXPzsZz8bhqHneWma7uzs/M3f/M3oImBfEUADAAAAAAAAHw4f//jHZ2ZmWq1WEASe583Ozp44cSIIAq11WZZZlhVFkWVZt9v91re+NfowsB8IoAEAAAAAAIC73fz8fBiGTz755NGjR6enp8Mw9H1fKaW1lhYcxhit9eLiYpIky8vLFy5cSNP02rVroxsBe4sAGgAAAAAAALjbfeUrX3nggQcOHDggDTek43NVVWVZygLP87TWxpg4jo8cOfLqq69evnz5z//8z2/dBthrBNAAAAAAAADA3euLX/zi5OTkiRMn2u22McZaWxRFVVUSQCulqqrSWltrlVJa66qqfN+fm5trtVovvfTS+vr6P/zDP4xuCuwVAmgAAAAAAADgLnXo0KHPf/7z8/PzrVZLujxXVSXps1KqqipXDS0jSqmyLMuyjKKo0+k899xz169fP3PmTL/fX1lZuWVrYE94owMAAAAAAAC4hywtLZ0+fXp0FB8Sr7766rFjxxqNhrU2yzJrbVmWru+z4yqghRRHF0VhjGm325/85Cc7nc6ZM2dqTwB7hApoAAAAAAAA4K7z0EMPPfroo0ePHvV9vygKyZSl54Yrf1bDjxBK5w1rrSTRskB+gyCYm5t75JFHNjY2Xn/99VvOAHafGR0AAAAAAAAAsK8ajcYjjzzyzDPPNBqNoiiyLJMMuk5WuuuRmmillBRBS9eOgwcPfuELXzhx4kQURSPLgF1FAA0AAAAAAADcXebm5j772c9K+pymaVEU0tm5LEs7/PagkAC6LEt1WyMOiaTLskzTNE3TmZmZl19+eWZmxi0A9gA9oAEAAAAAAO5l9ID+0Hn++eefeuqpY8eOSQmz9Naw1kqrjXq9s4xba40xMu5mXS8Oa63bIYqi2dnZRqPx7rvv3nomsFvoAQ0AAAAAAADcLY4fP/7kk08uLi4aY5IkkZJnpZQdBtDq1m4bbkSW1WfdiFJKaqjDMPz0pz/dbreXl5fPnj3rZoHdQwsOAAAAAAAA4G7x5S9/eWFhwff9PM9d9FxnjDHGaK2NMd6QtVZG5NcYo5RyF0oprXVVVdIPemlp6cUXX6wfCuweAmgAAAAAAADgrnDo0KHDhw8HQZDneVEUSinXPUOu65myjEhZtOP7/siIWymdo4uiGAwGBw8e/O3f/m23D7B7CKABAAAAAACAu8Kjjz46MTFhrXWtn12CLEZuZUTi6XrobIzxfd/zPPmtry/LMsuyKIoef/zxz3zmM/UpYDcQQAMAAAAAAAB3hVOnTvm+L/2apWZZSp5dS42qqlxXaBlRSpVl6XaQHh2e5wVB4Pt+EARhGEpsLVF1VVV5nidJMjc399JLL8Vx7J4FdgMBNAAAAAAAAHBXGB8fV8MPBkoAXVWVa/Sshh051LAU2hgjDTpcubTneWEYxnEcRVGz2YzjuNFoxHHs+76qfZZQGnF0Op2vfvWr7nRgNxBAAwAAAAAAAPvvc5/7XBzH0nxDRiQvlk4aQRB4niclz9JnQxa4xTIeBEEUReGQVEBHURQEgSujlgfloKWlJRpxYFcRQAMAAAAAAAD778tf/rLneUVRuDpl19DZ5ci+77sE2RU+13t0yErpv+GSa4mk5fuEnue58DpJkomJieeff77T6dReBLiTCKABAAAAAACA/TcxMSEtnlWttFkyZd/3G42GNNMIw1ApJZ2gZY3k1FprqXp2Hx505dK+70svDt/35UGpgC7Lst/vHz169Fd+5VfqbwLcQQTQAAAAAAAAwD775V/+5UajIW0xhIxLdbNk0FIE3Wg0giCQEFnWuEJpiZ6lwFl+5UJ2iKLINZKWKaVUURRa68cee8yNAHcWATQAAAAAAACwnxqNxrPPPmuMyfNc0mcJlyU7lvhYvkMYBEGj0QjD0FU3u02k4YaMy+MSNMu1S7FlsWTWMjsYDI4ePXrkyBG3FXAHEUADAAAAAAAA++nw4cMLCwtlWUr/DcmdJXH2fd+11JApz/OazWaz2ZSezsYYt76eR2utrbXyq5Sy1hpjJLmWZXJRlmWe52ma/umf/ql7FriDCKABAAAAAACA/fQLv/ALjUZDqpIlSjbDfhpyLQ03hFJKMug4jiV0rldJuz3r6bNwRdCyoVLKVVsnSbK4uPjcc8+5xcCdQgANAAAAAAAA7KeTJ09K1izqSbTrqlFnjImiqNlsygcJZb1E1RI6S76shwXR7tbzvCAI3KCT57kx5tSpU7efBfyU+JMCAAAAAAAA9tPY2JiUJ0uFssuFgyAIw9BF0vVHtNZhGDYaDbl1vTustfVvDMqgu5BT5NYOW3MIKYJ2s8CdQgANAAAAAAAA7KcgCCQ71lpLWwyllCuIHlksgbLEx2EYRlF0+xqZddeiqiqt9cinCKXkuSzLNE2ttS+99JJ7ELgjCKABAAAAAACAfSMpsPRiVsNaZqWU678h8bHkziN10L7vx3EsZcsudK6vGUmitdbSWtrt5uLsoiiSJHnmmWdOnjzpHgF+egTQAAAAAAAAwL6R8ueiKKRIWQ1zYVeqPBIiu2vJjpvNpnx+UCJsFy6PrHcXty9wt2VZZln26quvulngp0cADQAAAAAAAOwbrXWe50VRKKWkFNoY4/u+xMojK0fIoGujIbXSkjW7kmq3zPF9Xwbd47KDtTZJkrGxsa997WvDM4GfFgE0AAAAAAAAsJ/yPJfyZ0mN5VOBYRjqYWNoWeYSZLmWQVngbm83MquHgbUalkW7kaqq8jwvy/Lzn//8Jz/5SfcI8NMggAYAAAAAAAD2U1VV0kMjSRJjjOd5cRzX65QlIB4Jjh0pYa4vdiF1/RFXHC23tw9KDfXOzk4URV//+tdlEPgpEUADAAAAAAAA+ymKIt/3syyTCLiqKilbdsXL8uu+T1iPmOVWfl3c7KbqtzIim8tKGZETXRJdluXOzs7U1NSv/uqv1ncAfjIE0AAAAAAAAMC++ehHP9pqtay1WZZJEOwCYnVrAw1XqjxybYzxPE8ekXGXLMvvSBgt3yF0O9dnJYbOskwp9cQTTzzwwANuCvjJEEADAAAAAAAA++bUqVOdTidN0zRNJf+VQFnV0uSqqmSx1D7LMhmRfFn6dbjEWZ6SazcoC3Qt3RYuhnY7lGXZ6/WOHTu2tLQURVF9MfBBEUADAAAAAAAA+2Zubs7zPPkOoRt0YbFcS3sNSaVdww1XuVxVlWupUd+kfl1Prl02XZ91O8iysiz7/f5zzz33wgsvuGXAT4AAGgAAAAAAANg309PTZVkWRSFBs+/7YRiqWkAsobBwtyNT9TxauMLn+qDQtZza7SkBt7TykN2yLJudnX3kkUdOnTp1y/PAB0EADQAAAAAAAOyPsbGxTqcj3Z8l/42iSLpeWGurqqp/HlANC5k9z3O1zJ7n+b7vSpjVrQ2g5RF3OxJSu9i6HkDLtTHGWjsYDBYWFn7jN35jbm6u/iDw/hFAAwAAAAAAAPvjC1/4gu/7VVVJEGyMCcMwDEPptqGGTZxFfUTCYhmRGNrt4Abl14XOLoN2uXN9B9/3rbVFURRFkee5VGR7npfnuTHmz/7sz2Ql8EERQAMAAAAAAAD7Y2lpqSgK941BpZTneVJ9rIaRsdRBu1JouXBxs0TMLlNWtT7O8lvfSqbqxzlFUahafu3I+sXFxa985SsjU8D7MfonBQAAAAAAAGAPxHH84IMPpmnqwmVrrUTA9Yz49vRZ/M9GtSRa3dZtwy12v2VZyv7WWnlKa51lmezjtpLjtNZlWWZZ9mu/9muHDh0aHgi8XwTQAAAAAAAAwD44duxYGIaS/CqlrLXGGGmL4eLjERIfu18XLhtjgiCQ4FiWuX1kgQzKhSTaknS7kmdZb631fV82kV85KM/zqqq++tWvymLg/fNHBwAAAAAAAADsvscee0wp5QqNtdbyOUF1awA9EkZLpizclGvB4dp31MNoNy65c5Zl9cUSXksP6CzLrLXe8COHVVVJAK21LorixIkT7jWA94kAGgAAAAAAANgHH//4x4uicLmw1lqqmCXwleBY3RZAO/UAWg2Dacmd9W2Nnt2GVVUVRSFfPgyCoKoq3/eDIGg0Gq7hhsTN0py6/tT09HR9T+D9IIAGAAAAAAAA9sGBAwfyPFfDNs122DdDRuor63m0ux0Jpm8PoI0xEkO7HFkplWWZ53m+7xdF0Ww2y7JUSnlDQRDkeT4YDKy1eZ67p+ywTHt4GvB+0QMaAAAAAAAA2GtxHPu+L/mv++6f53n1sFjiYwl/3YMyKxGztbaskRRb1vi+L5my685RVZWk0o1GIwzDZrMZhmEYhlINLUdoraUaOgxDya+rqiqKoizLNE2zLPv0pz/t3gR4P6iABgAAAAAAAPbasWPHJERWwy8BGmPk4vbq5ttH6reSO0u47MJr2UrXGkZXVSUZt3TekKz59vDaGBNFUVVVWZbJ68lsnuc7OztPPPHE+vr6hQsX3OnA/40KaAAAAAAAAGCvPfPMM/XYV/1vAbRLmetxs7uW9Z7njYTRclGWpTFGEmellPR0luYb0vRZroMgkC8Q1qNqpVSj0eh0Oo1GQ2aDIFBKbWxsPProo7/1W7/ljgP+vwigAQAAAAAAgL02OzsrX/mTYmSlVD1KtsO2GzIrlFISKEuyXCdTSqmyLPM8l7hZAu4gCNrtdrvdDsNQD1NmCa/LstRayxFlWcrRcq611vf9ZrMp6bOcYq0ty/LgwYN/+Id/+MQTT8h64P9GCw4AAAAAAABgr42NjVlrJf9VSkko7GYlJpZMWS5cduziZjtsBu0u5FmpfVZKFUWhtfY8b2Zmxlo7GAyUUoPBIEkSWVkUhe/7SikJo2VQDcuojTFhGJZl6XlelmWuEUej0filX/qlsbGxf/u3f3OPAD8OATQAAAAAAACw15rNpqTDZVlKBi1cHi0psFy7cNmt1FrXA2tjTFVVMiJrtNZFUcjt/Px8q9VSSllrr169evHixTzPjTFFURhjJIN2e1a1ryAaY6IoUko1m83t7W0Zl8Ltxx57TNYA/zdacAAAAAAAAAB7zfM8V2gsF/XkV9pi2FojDhmpJ9FVVbnwWuqXy7KUkaqq8jzP8zxJkiRJJEQW09PTx48f11rLNwblEalulqfcrZBDfd9vtVrtdlu6Red5nqbpK6+84rYFfhwCaAAAAAAAAGCvSbWyHXbecNGzRMwS+7pByYLdrONiYpmt35ZlWRRFnudZltVrnOM4np6enp+f931flklmffsO7hF5wzAMG41GHMeNRsNamyTJ0tLS5z73uY997GM/ehvgf0MLDgAAAAAAAGCv1fNl1/hC8mV3IVNusRQ7y6381jeRxS5KVsOSaveUo7U+fvx4r9fL89ytka3kWa11VVWm9qnDsiylJXQYhnmeb25u9vt9rfXXv/71//7v/z5//ry1VvLukbMAAmgAAAAAAABgr0kuXI+PJQh2iXN99sfdurxYxt2sjLtkuT7rPPzwwxsbG2+99ZZ0gpbQWfb0PM8YI01C6iNKKbkeHx+31uZ5vr29/cADD/zRH/1RlmXf+973/uVf/mX0GNz3CKABAAAAAACAvVYURRAEapgjS2p8e15sa6GzXLsFbqo+63JtrbXv+3Lrcu26Tqfj+/7Ozs57773nMmh5VtVOkYv6lHyZsN1u9/v9wWAQRdFHPvKRPM+bzabned/85jfdEYCiBzQAAAAAAACw99I0dTXFLjuWGmQ36NRHJGt2g/XKZdlBbqVjhu/7nucVRXHL2UPNZvPEiROdTkee9TzP87x6wC1bufRZ4myttTFGvklojJEvFnqed/LkyV//9V+fnZ2tHwEQQAMAAAAAAAB7rdfreZ6nbu2nIQnviJGaaPlsoHAZcVEU9bBYSKaslOr3+//z/G1+/ud/fmJiotfrZVmmtXbdNupkXD5maK2Vo6Mo6nQ60nhaax2GYVVVf/InfzLyLO5z3ugAAAAAAAAA7iFLS0unT58eHcV+i+N4aWlJvtqnhxqNRhiGMiLLjDGuOUb91w77b7jk2m0iMXQ93W42m1NTU7JeSM8NuY6iyBjT7/d7vZ4brL+A/MqIRM/uLKmYLsuyKArP87Ism5mZqarqrbfekseB0f/NAAAAAAAAALDb/vVf/9W1vBBSWSw5r/t1ya8bVLXQ2ZE8Wta7C2dtbU0eFDs7OxsbG1tbWzs7OzKyuLi4uLgYhmGWZZImu3pqY4wc51TDRhxKqSAIOp2OhOaDwaAoirIsX3jhhbGxsdqBuK8RQAMAAAAAAAB7rd6X2SXLeZ4XRSFpcn1cSC4szTfkQhJhNaxTtsPaZyl/lvTZ87yRFhznz58/c+bMd77znXPnzrnBo0ePfuITnyjLUraqHy1HSCQtG7pZWd9sNhuNhoxkWRYEwdLSknsc9zkCaAAAAAAAAGCvSb2wvvUTgnmeV1VljNFD6tb20PKrlHKdmkfWyG71C6VUWZbS60MkSbK5ubm1tbW8vPzWW29lWaaUCsNwZmZmcXExSRIpZHYBt7ykvI/v+/J6srNk3FEUtVqtRqNhjCnLMkmSp556iiJoCAJoAAAAAAAAYK/lee46YNhhAw0JoCVT1lpL/ivqz+phhw09/GagS6slF5b1kh2Lzc1N93gYhtJno9vtXrx4cXV1tdfrKaXiOD58+HC73a534ZDj6ufKrWwrIbjv+41GQ+qgjTFZln3kIx/51Kc+5U7E/YwAGgAAAAAAANgH6+vrqhbvGmNsrdK5qnXbcAG0y39lmTweBIEUJqth6OymjDFBEBhj3nnnHRlUSk1PT0uPDmttURRnz569dOmSTM3NzX3qU5+qqsp1AnFHy4W8j6p9HVGOkwx6YmIijmOtdZZlX/va1+RB3Oe80QEAAAAAAADcQ5aWlk6fPj06irvA5OTkRz/6UemAoZTSWldV5XleFEW3lzzLrytJdslv/dfNVsM+Hq5TR57nBw4cCIJAKeX7/pUrV9zmLlOemJjQWvu+b61dWVmRByUW/5mf+ZlGo9HtdutRuFy7KFwN03DP8/I8D8NwfHz8zJkzbhb3JyqgAQAAAAAAgH3wxhtv+L5fH7HWyncI5dYFzZIvu5H6lDw13EDJrVsvt9baoihWVlZkpNlsdjqdKIq01tJtY3t7+8aNG1tbW0qpMAwPHTrU6XSkE7TUYrt93IUjs+6tGo1GHMe+76dp+uSTT548eXJxcdG9DO5DBNAAAAAAAADAPnj77bc9z3PRrQxWVZWmqVujh22XpWnGSOOLETIr1LDDhsuR33rrLbdydna22WzqYUl1mqabm5s/+MEPZHZiYuLkyZNZlpVlmWWZ9INWtZeU3FkC6OGWP6K1DoIgjuOiKIIgeOWVV770pS+NrMF9hQAaAAAAAAAA2AdlWaZpKsmyIxXH8jVCGXG1xnLtImljjHtWFrgsu54Ly0pprHHu3DkZWVhYMMaEYWitlbQ6SZLt7W051BgzPT09Nzcn0bOLs+Xd3Lly6w6V46y1nufFcTw2NiYZ9MmTJ3/3d393+Dq47xBAAwAAAAAAAPvj2rVr9S4cWuuyLNWwq0Y9R5ZZdzEyJbeywD0rZFxS46tXr25sbCilxsfH2+22pNLWWmNMVVVJkrg2HXEcHz58OAiC+pvUD61fq1qltrVWiqBbrVYQBGVZTk1NPfnkk0tLS81ms/4I7hME0AAAAAAAAMD+eOONN+TDgBLgGmPKstRaSyrtQl4X70oxclVVdliP7LZyC1xYXCeb93o9KYJWSh04cEAKmYuikC8fZln25ptvug1/9md/dmpqSmvtTpEjlFJ5nrs95TittYu/5fWCIBgfH9daDwYDrfXLL788Pz/vNsf945YKfwAAAAAAANxjlpaWTp8+PTqKu8P6+vqzzz6bZZmqVRkbY8IwdKFzPQKWWTWMpOVCKSXXSinpiSFcOiwkC06SRMqfwzBcX1+XvFh2VkplWdbr9Q4ePCiPtFqt69evl2U5MzNTVVWv16vvZq2VX3krd5a8kqTbQRAYY7Is01ofOXJkfn7+7NmzbhPcD6iABgAAAAAAAPbHpUuXtre3JauVJFcpled5URT1ZTLuCpBVrSbakWVyXZ+SbSWY9n0/SZJ33303y7I4jh944IE4jj3PK4pCuk6XZXn16tXl5WU5ZWZmZnFxUR6v58v1bWVEboW8iR0m6Y1GI4oiY8zDDz/8i7/4i4899pjsg/sEATQAAAAAAACwby5cuCD1zpLnGmPKsszzXNWiXnettZYsWA8//ae19jzPxb5SCu2Ko/WwDloeaTQaQRCcO3eu2+0qpR588ME4jqVTc1EURVH4vl8UxQ9+8AOXgH/sYx8LgkC+Riib+L4v+1try7KUo6uqkvza1VPLVFVVQRC0223P8waDwcGDB3/nd36HZtD3FQJoAAAAAAAAYN/87d/+bRiGkttKKbRSKs/z+tcI1TDhlSh5+KhyC0aSaGkk7cblWkLkIAiiKPr2t78tjx8/fnx8fNz3fTlasu9er/fOO++4I5aWlnzfl5Ba9lTDcmx5H2utbC7jrv7aWivXxpixsbE4jpMkieP4lVdecZvjnkcADQAAAAAAAOyb8+fP7+zsuC4c8itdOOpZs+S86tay6JGpEXbYo9k9Yq01xkRR1O/3L1y4oJQaHx8/dOhQHMeyRkLkLMuuXLni9pmZmdnc3Lx06VJRFK7GWWJlKYUWMqJrwbdcKKXMsBeHJN0nTpx45pln3IO4txFAAwAAAAAAAPumKIp33nnH932llFQ9V1WV57l8mVC4PFcSXkmrZUQpZWs9mt2vrpUnuxF5JAzDsbGxs2fPJkmilDpy5Mjk5KREyVVVSRn12tpamqayv+d5hw4d0lpLfbR0i9ZaG2OkHYeQ63ruLBfWWs/ztNZRFLXbba11VVW/+Zu/OTs7GwSBLMM9jAAaAAAAAAAA2E/f+MY3JIqVsFjS2yRJiqKQa9dqWWZlsL5YKeUadMjF7R085NpF2J7nnT17VmZ/7ud+Tr5GKEmxRMb1Lhxzc3ONRkNrnWVZnuc7Ozt5ntdDbTm3qqqyLOVtZUrOkhcwxkRR1Gq1tNZJkrzyyiuLi4vuCNyrftRWBgAAAAAAAPekpaWl06dPj47ibtLtdp999lnP86Sy2I1Lv2allB02U5Y8Vw8ba0jZcv2ROhmXxa5vhltsrc2yLI7jsbGxKIrKsux2u67vh5y4uLgo6z3Pu3Hjhvs0osuy5WXcnlprqaGWEdlEpmRWaqVldmJiIgzDKIouXbrk1uDeQwU0AAAAAAAAsM/efvvtIAgkrrVDSZJIJC2kitnlv+5Xgl23TClVj31lK3VbX2atdZZlly5dklYbR48eHR8ft8MC6qIoer1ev993+7RarTAMwzBUShljpNjZvd5IFO4uRk7XWnueF8dxs9nUWj/11FNPP/30wsLCj87AvYgAGgAAAAAAANhn3/jGN6IocjGuFBenaToYDJRS0sVCKWWtdT0uJNhVSpVl6eJdl/b+aF+lpDOGDBpjrLVaa9eveXV1dXl5WSkVBMFDDz3UaDRksed5RVH88Ic/dPu0223P88IwdGF3URRFUahhKbQsk2bW7kT5dXzft9Z6nhdFURzHeZ4fPXr0xRdfdKfg3kMADQAAAAAAAOyz9fX1mzdvSnorWa1Sqqqqfr8vnaD1sMrY5c6yQG7lV6JhFxDLGrnWWkvNsiyWymXJo997772dnR1jzMTExJEjRxqNhu/7QRAEQdDr9SQBV0qNj4/LbpJNW2uNMWVZyrV7K/cm7t3kQkgaLs9GUSR10MeOHXv++eflcdx7CKABAAAAAACA/fdf//VfLjuWrFYplaaptMiQvFhWunB5JPaVZ+VWNpELt6YeCst6z/PW1takv3MURYcOHZqbmwvD0PM8z/OyLNvc3JTH2+22VF57nmetLctS3kcy6Kqq6q8np8utGr6epM/uHXzfbzabxphWq3Xq1Knjx4+79biX+KMDAAAAAAAAuIfIh+Nw9/vrv/7rxx9/XAqK8zy31kpB9OrqqlJKqpVvz5SVUpL2Spqc57lkwfUI2MXN8oirpBZFUZw+fdrzvKmpKWPM4cOHV1ZWkiSRyuvz58+3221Zv7297fu+7/tVVXW73fHxcamGVkp5nifj8qruUHftXlKSa/cyURRtbGw0m83f//3f/+M//mN5YdxLCKABAAAAAADuZTs7O6NDuCslSfL3f//3Tz/9tIS/alihLFNxHI8Eu3ZYJe37vqtHds0x6gG0JL9yIbMyIo9XVVWW5be//e0TJ07EcVwURavVWl9f7/V6EjdfuXJFXmNqaurtt99uNptFUWxvb/d6vXa77U6UbY0xkpsrpaTjh4vL6+rvk6Zpr9czxrzwwguvv/766FJ8yBFAAwAAAAAA3Mv6/f7oEO5W3/nOdz7xiU9YayWAVkpJv4ter9dsNsMwVLWqZzWsLDbGyIWMSLjsYl93K2ukPlqiZ5mSp9bW1sqyPHjwoAumt7a2pK758uXL7uhut7u9vW2MGQwGOzs74+Pjeph0y6vqYWhujHHZdP2d3YuJqqqqqkqSRCl14sSJqamp9fX1+gJ82P0/SPz6D9dSjiIAAAAASUVORK5CYII=) **Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
import os
files = os.listdir('ct')
files.sort()
first = dcmread('ct/' + files[0])
data = np.empty((len(files), first.Rows, first.Columns), dtype='uint16')
data[0] = first.pixel_array
for i, file in enumerate(files[1:]):
data[i+1] = dcmread('ct/' + file).pixel_array
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# setup aspect ratios
ax_space = float(first.SliceThickness)
cor_space, sag_space = map(float, first.PixelSpacing)
# TODO: YOUR CODE FOR AXIAL
ax = 100
plot = plt.imshow(data[ax, :, :], cmap='gray')
plt.gca().set_aspect(cor_space / sag_space)
# TODO: YOUR CODE FOR SAGITTAL
sag = 100
plt.figure(figsize=(5, 10))
plt.imshow(data[:, :, sag], cmap='gray')
plt.gca().set_aspect(ax_space / cor_space)
# TODO: YOUR CODE FOR CORONAL
cor = 100
plt.figure(figsize=(5, 10))
plot = plt.imshow(data[:, cor, :], cmap='gray')
plt.gca().set_aspect(ax_space / sag_space)
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
level = float(first[0x0028, 0x1050].value)
window = float(first[0x0028, 0x1051].value)
rescale = float(first[0x0028, 0x1052].value)
def window_level(window, level):
vmin = level - window/2
vmax = level + window/2
print(f"{vmin=}\n{vmax=}")
plt.figure(figsize=(5, 10))
plt.imshow(data[:, 100] + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.gca().set_aspect(ax_space / sag_space)
window_level(window, level)
# 2) Play around with different Window/Level values that enhance
# the visualization.
# bones
window_level(600, 500)
# soft tissue
window_level(20, 60)
# Which values make sense and why?
# TODO: YOUR ANSWER
# High values (around 200-1000) show denser tissues like bone, while lower values (around -100-+100) are good for showing soft tissue
###Output
_____no_output_____
###Markdown
**Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
def threshold(slice, min, max):
new_mask = slice.copy()
new_mask[new_mask < min - rescale] = 0
new_mask[new_mask > max - rescale] = 0
return new_mask
# TODO: YOUR CODE TO SEGMENT FAT
min = -100
max = -60
_, axs = plt.subplots(1, 3, figsize=(20, 20))
axs[0].imshow(threshold(data[100], min, max), cmap='gray')
axs[1].imshow(threshold(data[:, 100], min, max), cmap='gray')
axs[2].imshow(threshold(data[:, :, 100], min, max), cmap='gray')
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
min = 40
max = 80
_, axs = plt.subplots(1, 3, figsize=(20, 20))
axs[0].imshow(threshold(data[100], min, max), cmap='gray')
axs[1].imshow(threshold(data[:, 100], min, max), cmap='gray')
axs[2].imshow(threshold(data[:, :, 100], min, max), cmap='gray')
# TODO: YOUR CODE TO SEGMENT BONES
min = 400
max = 1000
_, axs = plt.subplots(1, 3, figsize=(20, 20))
axs[0].imshow(threshold(data[100], min, max), cmap='gray')
axs[1].imshow(threshold(data[:, 100], min, max), cmap='gray')
axs[2].imshow(threshold(data[:, :, 100], min, max), cmap='gray')
# Are the segmentations good?
# TODO: YOUR ANSWER
# They aren't bad, but they could probably be improved
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Requirement already satisfied: pydicom in /usr/local/lib/python3.7/dist-packages (2.1.2)
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# Without loading the data, how many slices are there?
# There are 220 slices. Extracting the file `ct.zip` reveals 220 files that end
# in the .dcm extension. They are numbered from 0001-0001 to 0001-0220.
# Without loading the data, each file would seem to be an individual slice.
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABIYAAAJaCAYAAABaw0xuAAAABHNCSVQICAgIfAhkiAAAIABJREFUeF7s3Qn0Z2dd3/ELsgUSloQkZGWykGQyAUER8Ki4IbIp1BXp6QEs1lpxwaX1aNtjW7EWKRVUPLiCtbZaRARE2SooSAMCQbOAkGXIMklISBBEoEE7r6ufv09ufjPz/09m/3+ec+489z73WT/3zv8+9/37Pt97l2ma/m7nts/DMcccM5144onT533e501/93f/2MRd7nKX6a//+q+n66+/frrtttv2ebutsApUgSpQBapAFagCVaAKVIEqUAWqwMFQwPvuGWecMX3Xd33X9Nu//dvTu9/97l124773ve/0fd/3fdP73//+6Q1veMP0mc98Zpd5e6IK7E8F7rKz8n0Ohu55z3tOT3/606fnPOc5093vfvfb9f+ud73rtH379uk//If/MF188cX7c2ytuwpUgSpQBapAFagCVaAKVIEqUAWqwAFR4P73v//05V/+5dMznvGM6aijjprbfMUrXjG99rWvnT772c/erg/nnXfe9IM/+IOzMcXf/u3fTn/yJ38y/d7v/d704Q9/+ID0tY1UgVGB/QKGwKDf+Z3fma655prp1a9+9Ww1NIaf/MmfnP7dv/t30+te97pejSpQBapAFagCVaAKVIEqUAWqQBWoAoe1Ao961KOmpz3tadMFF1xwu3GAPn/wB38w/cZv/Ma8ckYAj571rGdNxx133O3y3njjjdOb3vSmGSR96lOfOqz1aOcPLwXutqq7zN/uc5/7rDq1xzTLxoAh/wH+7M/+bHrjG994hzLPfe5zZ4J673vfe2JBtNGgf//v//2/6dOf/vRGizZ/FagCVaAKVIEqUAWqQBWoAlWgClSBfaKA1TLPfvazpy/90i+dLA1bBu+7T37yk6ctW7ZM//W//tfpq7/6q2eAtOp9+4QTTpi+7du+bXrkIx85vexlL6v10FLMHu83Be5gMXT00UdPT33qU+e1jgCMbT0BEIovIVBo69at0/Oe97zpV3/1V+9QHAU9++yzZz9DsSbaKCC6+eabZ9O7Lke7g7xNqAJVoApUgSpQBapAFagCVaAKVIH9rACLn+c///nTySefvMeWWAu95CUvmR7/+MdPX/iFX7jH/N6tf+7nfm62IGqoAvtbgTtYDB1//PEz0Lnyyiun//2///e6LXqAnc997nPTZ3c6zLptZ/wf/+N/nO5xj3us7D8YxDzOTX63u91tuvdO66HP7YRJGwk/8zM/M51zzjkFQxsRrXmrQBWoAlWgClSBKlAFqkAVqAJVYJ8ocMstt0x/9Ed/NH3jN37jdK973WtlnVa5XHjhhdMv/uIvTp/4xCemd77zndMTnvCE6Zu/+Zsn796rAij0F3/xF9N73/veVaebVgX2uQJ3AEOsfdyIf/zHfzx7Ud9IeMELXjCddtpp81Kybdu23cG3UOpiNvfEJz5xOvbYY6ejdv4H+q3f+q3pt3dCqI2EH/7hH56XqzVUgSpQBapAFagCVaAKVIEqUAWqQBU40Ap4H/Uuu2PHjumZz3zmHUDPZZddNr3mNa+Z3vGOd9yua3/4h384/fmf//n0Td/0TdOXfMmXrDmqlkmdvlDGaXX9DB3oK7p527sDGCIFMBQv6huRBkxiCfS//tf/mv75P//nMxldFXyRDB1ldueT9c/9nu9ZlW23aSyNsnRttxl7sgpUgSpQBapAFagCVaAKVIEqUAWqwH5SwHvwddddN/saeuhDHzpbBll987a3vW1iVbQqyP+zP/uz09vf/vbpW77lW6bzzz9/fjfmW+gtb3nLvN9QBQ6UAivB0N427itj3/Ed3zH9wA/8wPQv/+W/nD7+8Y+vrOr1r3/99JSnPGV2IM0RF8LaUAWqQBWoAlWgClSBKlAFqkAVqAJV4HBUwGfmfX2bv16fnr/66qv3OAyGDpaL8Zv7T//pP53+z//5P9P27dv3WK4ZqsC+VmDjnwTbRQ9Y8DziEY+YPbGzBlr1mfoU9Qk/lkXyXHrppbuosclVoApUgSpQBapAFagCVaAKVIEqUAUODwUe85jHzL6DLBFb9YWyVaPghsUXzZ70pCdNz3nOc+YvdzdUgQOtwJ0GQ75ixjqIFdBP/dRPzX6DvuEbvmE666yzpv/23/7bHcbjPwjfRa961aumU045ZXrzm988+Q/UUAWqQBWoAlWgClSBKlAFqkAVqAJV4HBTgA9dUMeXvX1o6Su/8ivnL4p92Zd92S5Bj69/n3nmmdMP/dAPTd///d8/+ez953/+508vfOEL5y94N1SBA6nA5+1s7MfHBu9///vPaxxZ8iydZC079vCHP3z6tV/7tfnzfGK+ha644op5idgf/MEfTD/xn/7TdM21184e1RN+/dd/fXY4zQfR7//+708+2/e93/u9s5OtMd+yreXxd33Xd80e4D/4wQ8uT/W4ClSBKlAFqkAVqAJVoApUgSpQBarAflfg1FNPnf7Vv/pX01d91Vfdri1fKeNY2seZPvaxj00f/ehH184/4AEPmP7JP/kns4EFg4oxMKTwns1P0VVXXbXf+98GqgAF9trH0Bd8wRdMv/IrvzK99KUvnf7v//2/0xlnnDFbAH32s5+dbrjhhpl+vup3f3f+LN9FF100XXLJJTMJZU2kzL/4F/9iete73jUvKeN13XpMQImTroYqUAWqQBWoAlWgClSBKlAFqkAVqAKHsgJWy/zYj/3Y/B68q/DoRz96diz91re+dfof/+N/zO5XvvVbv3XasmXLropMJ5544vzufI973GN64xvfuMt8PVEF9pUCe7WU7KSTTpp+/ud/ft441mIxxBTukY985Lx8DP38Z//sn02sj3hb/6Vf+qV5naWvkb3yd35netCDHjR94Rd+4QyPHve4x81mc0zmnva0p01bt27dV2NrPVWgClSBKlAFqkAVqAJVoApUgSpQBfaLArfeeuv08pe/fBLvLhxzzDHT133d183vy96LdweF1MMptdU1vnbWUAUOhAIbBkN3vetdZ4jzmte8ZrYUYjrHFO7pT3/6vJby8ssvn30Gsf5BODmY5oyaJdCf/umfTjt2giJmdLy2P+pRj5oe//jHT9+z83P1wNKb3vSm6bnPfe6BGPfchn7zd+Q/nn2BJZNjfWmoAlWgClSBKlAFqkAVqAJVoApUgSqwSgHuUC688MLZt9B73vOe+R14VfC17uc///nTueeeO/3mb/7m9L73vW9Vtvk91Lsy372MKz796U+vzNfEKrCvFdjwUjKk87jjjptBD8ufJz7xiTPU+dznPjdb/lhKxpTOOkr+gzjO8gm+G66/fl5idsIJJ8yf7vvMZz4zH3PUhaDe7373mz/P99jHPnZei7kn/0b7QggWS7zGjyGA6JZbbtkXTbSOKlAFqkAVqAJVoApUgSpQBapAFTiCFfDuaHXM137t105f//VfP/sVSuCDl1GFr45ZRsYX0X//7/99BkCMJBI+9alPTW9/+9vn5WZ9Fz2Cb5ZDdGgbshhiLfTMZz5z+q3f+q3ZXM6n9L78y798Bjsf+MAHZudYlpV96EMfmnx2jyMtntX5I7rnTudb/BCxMHrIQx4ynXPOOXNeMMl/AtZG6kRGn/3sZ98puQCfl73sZTNxzfZnf/ZnszXQnsJ/+S//ZQZdHHAfjABMjf1e7hsHgmwpXkMVqAJVoApUgSpQBapAFagCVaAKHBoK8J3rXc0yMMEKG1DoKU95ygyFBIYR3qk5l+aPl9WRDyq9+MUvnr9kVih0aFzLzdaLDYEhQAccsswqljVg0HnnnTcvJWMNxPu6tZMPe9jDZhAEGn3ezjLSH/zgB8+wSD0CX0XAx0033TTXZ8mZ/zw+8Zc8G70goBB4ItYWyGNj4QQW+Y96uIQf+ZEfWet/YBXi/G/+zb+Zx2g8DVWgClSBKlAFqkAVqAJVoApUgSpw8BXwzsZxtNU0z3ve8+Z3tm//9m+/w+fnjzrqqNkvL1j0Az/wA7P7FQ6qGVc0VIGDocCGlpKx6vFJeUvEnvrUp04XXHDBvGTMJ/VY/YjBHje09ZXvf//7p3e+852z0yzw54EPfOC8/IyZnbrAGmAINEJKwSQ+it7ylrdMX/3VXz1bHm00AD8g03d+53dOIEoCSyDBee1aA3q4Bf22sXwChcTGerCsmw43/drfKlAFqkAVqAJVoApUgSpQBarA/lTAe20shMYlZcs2GVx80Rd90WzQcPTRR8/+eL0bN1SBg6HAhiyGvvRLv3QGPdZF8jXEwoe/IaCIZRCLIL6CpMcS6DnPec78pTLLyW688cbpk5/85HwOYOJMy5fLgCQmc9ZZCj7lByLtTcgSq1g0jXWAQ8z1/OfbVeBziEUUp9TLoG7nxuVdAM2q+lJP8gJeLH32RTCGgC7tjPWi0tqU5lyOl860jSWOt9NHx8slauPStnxJbtTA/rLMvhhj66gCVaAKVIEqUAWqQBWoAlWgChxOCniv4l+XM2oOp//mb/5mj93no/cVr3jF/M5822237TF/M1SB/aHAusEQX0AsglgIoZv23cTgDishYMiNz1LIZn3lwx/+8BkSsSz6sR/7sXn7oR/6ofmLZKyBQCRASH2XXHLJ9O53v3veB5gApL0JsQTaFbBhScTqZlcB5Hjc4x53h9NACwjy5je/eW15l31WO+PSOgXlBVm0w5zQkjbjZK20BDR3aGidCcBQ1p+u8p0U8BNoM8KrLEXTVJba0UVeIGlXAMt5bX3N13zNXA6gopUxrYJj6xxKs1WBKlAFqkAVqAJVoApUgSpQBY4YBbZv3z796I/+6Py+xBfvquBd7nWve9304z/+43PcUAUOpgLrBkOsepjFsQ6yZpIfID6ErKF0DigAfF7/+tdPP/zDPzyTUs6p73a3u83kEzhgccTJ1kc+8pHZ6Zb/DL5Odu21185WLuoFi0Amjqj3JvDLo14Qg5XOvvApxPJGPWBMLHX0zX90wdgCZ7SbvDlvSRuYol9ASsALaxx9tK2ycNrd+NUVCKb9JZjRZ0vMAn7AKUE/9U/ZcQmavoJDgvOrYJM+0jdBfvVIr7+jNVm6UwWqQBWoAlWgClSBKlAFqkAVmF2kvOAFL5i/Qua9N8GyMek+vLRjx44qVQUOugLrBkMshGzAzld8xVfMlkIsg8Cc63d+it4n6DmOBgpAoSc84QnT6aefPg+QGZ000IilkP8ULI+uu+66eUkZL+zyfPEXf/FsfcShtWVnlqxtNIAwnF+z5hFAGG3tygpmPfUDJUDMCIWU04b2bLFCSjtLq6QR5KyCLuvpxzLP6CdpCYb0ddmH6CEOtBrrNJ5Rt2V7OTempw3Aa9mHZfkeV4EqUAWqQBWoAlWgClSBKlAFNpMCfO2+8pWvnL7ne75nuvDCC6eXvOQl0wtf+MJ5xQzjiIYqcCgosG7n07H+scSLFZAlYu94xztmZ9Lnnnvu9La3vW12Hv2oRz1q9hHks/QJN9988/TTP/3T8yfqWQRxPs3iyJIxVkfSLCE75ZRTZkfUvm6mPnV82Zd92fQnf/InG9IqFjqsZkAY0CJWMKxkNuJ4OtY4WbY1dkRaLHGSnmVolmTtKqiTlc2q8rsqsyp9d1ZGq/o7WhbtSgPwJ5BH/lX1jH0ZHXzLPx6v6nPTqkAVqAJVoApUgSpQBapAFagCm0kBAIgxxfOf//zNNOyO9TBSYN0WQwDBPe5xjxlosPTxFTIWQSeffPLsQd2Xx7Zu3Tp7VOeY2nk+iARAyDmOptXzV3/1V3PMJ5ElZc5v27Zt9isEErEosuzs0ksvnSEUgAQkbTSwZrGEKzAIuABsVvkQ2lXdG3GsTJvAGsAoy7hWxXsCLrvqz5g+WuisB8iMY9kVVBr7tas8Yx/W0+56xtI8VaAKVIEqUAWqQBWoAlWgClSBKlAFqsCBV2DdYIjVD/9CD33oQ+dlYb4gBv4ANnwG+by85WScVF955ZXTi1/84vl8AsuiN77xjdNP/MRPzEmAEEBkCRkYBEI8+tGPni2R0NSLL754uuaaa+alZ+ATKLW3ASCyvCxWMhtZVhZQon8skFaFOHoez+0OPqnnzi670p+AHuNaD6BZWvesGkvGK96VVdFYboRH6+nDqjabVgWqQBWoAlWgClSBKlAFqkAVqAJVoAocHAXWDYZ0713vetf0jGc8Y/YVZHkXZ9T8CvETxHqIVRDYIwAxQJIAFMkDDllq9uQnP3l6zGMeMz3xiU+cy7AsApqY2CmjDpY9ANEjHvGI2Y/R6KxrrnRFACk4Qd4V+Inj5I1AGUurAkt2Va9lauDTCFN25UdI2/FZtGII605Sf6DM0vfRrioZAdKuwFW0WeVPaFW9gVOgUMHQKoWaVgWqQBWoAlWgClSBKlAFqkAVqAJV4NBVYENgyGfJAR1WQ//5P//n6aUvfekMdLZs2TKDG46pWQnxE/TABz5wPub4+d//+38/W5+wKPr2b//2Gdx80zd90wyP1MV6CBy66KKLZufHvtJlCZnP3LNUev/73z9DqD0FYAYsGaHJWGaj0CNl46gZBKFBYIi2fJZeHCfMgTTLvOoybsBr/NT8nsa06ny+kubcrpxMryonLWNRxyo4FKC1CjatWlYXK6pVzqx31YemV4EqUAWqQBWoAlWgClSBKlAFqkAVqAKHhgIbAkOgBmsenyk/7rjjZt9C//N//s/ZabQvi7EkAnfyOXowxyfswSF+ds4+++wZovhsH7D07ne/e3rNa14zvfa1r52XmbFIsnQMbAKEACfl1mMtFDnzyXUAZ7TasQ/MsGoZP7m+nssgfyxowBQ6sG4yLrBk/Ow7QBSoMuaVf/kZe1ps5HP1WbIGRgn6tdGx6Fv6p55opC80A8925aB7+Rn7QLJxzOvRs3mqQBWoAlWgClSBKlAFqkAVqAJVoApUgUNDgQ2BIVAIyAEOvvd7v3d65jOfOfsEesMb3jC9853vnOEJOGTpl0/Z8wskPz9EnElzNu1T9K9+9atnayBl+RGy1Ezd9m+77bZ548SaBdAxxxwzb+sNynD8rC9ABiATKANgOJelYeutUz5OrEGYcblUfBctl1DJR6NxOZZ9aasscXbVj7H/xgBIATfqOPbYYzdU19hG+seKy9K7QC7joE+sn5b90i6QFE3BpNS1zNvjKlAFqkAVqAJVoApUgSpQBapAFagCVeDQV+AuO7v4d2M3fQEMGHjlK185veAFL7jDCPgJevvb3z5b8fz+7//+bAG0Y8eO2crHV8SkcyJ9ySWXzJZFgAjrn6OOOmptqdm11147fcM3fMNsHbR9+/YZIKkXOLrhhhvmvPwLWYrmc/YcUd90002364vlZZao/d7v/d4d+tiEfaeA68syShgdeO+7FlpTFagCVaAKVIEqUAWqQBWoAlWgClSBKnCwFLjbRhsGbCzJAo5Y/4BEnEgLlpFZOsaXkE/aW1Im/NEf/dG89OyEE06Ygc+TnvSkGQrZTj311NknEauiu9/97vOn6cGl+973vrNja1Ys49fNNtrf5q8CVaAKVIEqUAWqQBWoAlWgClSBKlAFqkAVWK3AhsGQan73d393ttR5/OMfP/sLAoEe+9jHTp/85CdnJ9I2kMen6MEjvoY4kz7xxBPnvCyM+NYBinyFzNKxT3/60/MSNBtIpIzYp+xZEhUOrb6ATa0CVaAKVIEqUAWqQBWoAlWgClSBKlAFqsDeKrBXYAi0edGLXjQ94QlPmM4777zZIohPIRY+HFLzQWN5mKVg/AbxF3TXu951BkLSLC3jX8jyM8vGwCFOpgV1A0H8DrEaAoqylGlvB9lyVaAKVIEqUAWqQBWoAlWgClSBKlAFqkAVqAJ3VGCvwJBq/viP/3j65V/+5RnwgDl8CgE927Ztm617QCAWQ4CR86yIWAmBQs6zLoqFEGuhe9/73jM8Sl4AKf5tpDUcHAU46g60Ozg9aKtVoApUgSpQBapAFagCVaAKVIEqUAWqwP5SYENfJVt2whfHOIvmGNpXxcAecMeyMbCHPyHwBywSg0Wsh5wDkcAfZSw7s5wMhGAtxELotNNOm88JrI8aqkAVqAJVoApUgSpQBapAFagCVaAKVIEqUAX2rQJ7bTGkG5xL8yF04403zlYlIJGvjHE+ffrpp8/AyLGYRRDLH+CI1VCsgIAgS8+UFzsGjoRbb711dkxdi5V9e9FbWxWoAlWgClSBKlAFqkAVqAJVoApUgSpQBShwp8CQZWLgDUsf+7ZjjjlmOuWUU2bH0ayCzjnnnNmHEGgk2GcBxIJIftZFvkIWAOTYvjqBIudYEzVUgSpQBapAFagCVaAKVIEqUAWqQBWoAlWgCuxbBe4UGNIVy8HAGz6EBPsshs4888y1JWa+RiZdAHkAITFfQz5jL7AUYhlk+VggUyyH5gwNVaAKVIEqUAWqQBWoAlWgClSBKlAFqkAVqAL7VIENg6Es62LN44tiwBCA45ilD0sgy8X4HLLPOTUAZF+whExecSyO7MuT9H06wlZWBapAFagCVaAKVIEqUAWqQBWoAlWgClSBKrBSgQ2DIfAmgdNolkCgj5i1D8gDHoFFlo05Z3lYvjjmmI+h1BNLopW9a2IVqAJVoApUgSpQBapAFagCVaAKVIEqUAWqwH5TYMNgKIBHfM973nO2GAJ77LMAAnqyFIyfIXBoGUa4tDzX4ypQBapAFagCVaAKVIEqUAWqQBWoAlWgClSBA6PAhsFQoM6xxx47AyCflgd/+BhiLZSvjR2Y7reVKlAFqkAVqAJVoApUgSpQBapAFagCVaAKVIG9VWCvwdDHP/7x6b3vfe/akrC97UDLVYEqUAWqQBWoAlWgClSBKlAFqkAVqAJVoAocHAVWgiFLwPb0RTDWQgczWL4WR9gHsx9tuwpUgSpQBapAFagCVaAKVIEqUAWqQBWoAoerAncAQ2AL59BPf/rTp5NPPnm6xz3ucUiO7bTTTpuXrzVUgSpQBapAFagCVaAKVIEqUAWqQBWoAlWgCuydAnfZWewfPzO284AT6VNPPXX6ju/4jumYY445JJeK+crZn/7pn06ve93rZh9HDVWgClSBKlAFqkAVqAJVoApUgSpQBapAFagCG1fgDhZDcSj91re+df7M/KH4BTFfRLv22munv/mbv9n4iFuiClSBKlAFqkAVqAJVoApUgSpQBapAFagCVWBW4A4WQ9WlClSBKlAFqkAVqAJVoApUgSpQBapAFagCVWBzKHDXzTHMjrIKVIEqUAWqQBWoAlWgClSBKlAFqkAVqAJVYKnAHZaSLTP0uApUgSqwHgUs8bz73e8+O6y3+bqhrwfaLFH927/923mzPFVeju7F2eTPlwaVue2229a2lF1PP5qnClSBKlAFqkAVqAJVoApUgSpQBdavQMHQ+rVqzipQBVYowBfZve51r+k+97nPdO9733uO//qv/3pOA4icDwgChcCfQKHEIxQChG699db5q4P8iH3605+ePvWpT81wqV8iXHEBmlQFqkAVqAJVoApUgSpQBapAFbgTCtTH0J0Qr0WrwGZXAPg56qij5i8Ygj/A0P3vf//pfve735zm2JcOP/7xj9/OSmhpLeQ8gAQQCSyGQKFPfOIT8/ZXf/VX8/bJT35yBkXgUUMVqAJVoApUgSpQBapAFagCVaAK3HkFajF05zVsDVVgUyoA4lg6BuiAPyyFWAk94AEPmCFQ0qSfeOKJa8vLxuVjN95441zH6aefPkOh66+/ftYSGAKcgKe0IVanc+AQCyIbSJQlaiyS7DdUgSpQBapAFagCVaAKVIEqUAWqwPoUqMXQ+nRqripQBRYKADWsgmwAzgMf+MA1CyH7xx577Gw5dN/73ndOv+iii+Z84E6WmIFK2c9yMnAH7LFszBIylkKshlgLWaJmyxIzlkfxRSS/7TOf+cwcZ1NfQxWoAlWgClSBKlAFqkAVqAJVoAqsVqBgaLUuTa0CVWA3CmTJGOsg+6x0WAWxGAJ7jjvuuDWfQFlaFh9EwBCoFGfVgNDoY+jyyy9fc1QN+gA9NpAIELLPYujoo49eg0DSA40CkOSXT/7Cod1czJ6qAlWgClSBKlAFqkAVqAJVYFMrUDC0qS9/B18F1qcA2APmAD+ATGAPMAS+HH/88TPoGa1+wCFgJ3lZD4FIO3bsWFsiJn+cU4/l0ytAx9KxwCBtLTcAiNUSyyK+imy33HLLHEuLhVHh0PqudXNVgSpQBapAFagCVaAKVIEqsLkUqI+hzXW9O9oqsG4FgBogyJavjYkt37I8LL6FWAoBROANEJSlYB/72MdmyyEWQjfffPPaZ+tBJHUCTbEWClDSppDY/oc+9KE1i5/AI2W1deaZZ86WROBPrItYD2lTG/oELGVp2roH34xVoApUgSpQBapAFagCVaAKVIFNokDB0Ca50B1mFdiIAqBPgBAn0EKcP4NCy83yLX6FAJr49lFGPRxM8zeUL5ipD2AawRDgM37C3nIy55V/0IMetGaJFB9CWSIGCiknnHbaaWv9YtUETBlDvojGT1HAFXhVC6JZtoYqUAWqQBWoAlWgClSBKlAFNrkCBUOb/Abo8KvAUgGQJht4wn+QpVp8BAFCWRoWCyGwhRWQPEIshgCbEfaAPFmSNjqgTlvyAkLyWHYWMBSA9OEPf3iuGxzyNTLxySefvOaIGiTKcjfgSZ7xa2VgEWsi9ad8LInE/ZrZ8k7ocRWoAlWgClSBKlAFqkAVqAKbQYGCoc1wlTvGKrBBBQJrQBTLvFj7gDKBPdJY8gQYgUYf/ehHZ6CTpVtgS5Z+yT8uA4v1kdhSMe1oA3QKQBJfddVVc7rthBNOmPuSJWPgDtAzfrb+jDPOmK2WWAexFLr11ltnP0NjrI/xOwQmxak10KTuhipQBapAFagCVaAKVIEqUAWqwGZSoGBoM13tjrUKrEMB8IVFT/z0WPoF/IAzYpY3LHJsnDznq2FxLg3gvO9975tOPfXU2VH0aDmk+e3bt89AST2WellmNsIg7bMO0l4sh/IlM+fW5SUXAAAgAElEQVQAqlj8JB4/Tw9AqU8Z/Y0lkHTgCSRiaaRfcVAtztfM+hWzddwkzVIFqkAVqAJVoApUgSpQBarAEaNAwdARcyk7kCpw5xUAVOJwOrAmoAWUkcYCJxZFQAv4YgOQUpZTaNAIlMl29dVXz/CFo2p5swFAqS9tsSTKFouhHAM6o1VSloGBRPazPIwlkPb0lwURIAQACcpzjK09IfAovofAoYYqUAWqQBWoAlWgClSBKlAFqsBmUKBgaDNc5Y6xCqxTgYAS2WM1xDIoy8gAFmAHrImfoHxhDDTKkjHlwRfAJfHo78d5gCf5lR3hkPRsqTdgyPGy3ksuuWQeoX5qL76IsjRM3fE/BGKpm3NqFknyBCqlDnHh0CxpQxWoAlWgClSBKlAFqkAVqAJHuAIFQ0f4Be7wqsBGFQBVhDiOHoHNMi1AJ/BmBEPyBgbFaigWOUt/RSmnLQ6oWRMBOSOAuuKKK9ZglXzqTzjppJPW9tWd9gAfy9n4FGI9BGzFgijgCeCy7A0wYlGkXePJF9ZS10Z1bP4qUAWqQBWoAlWgClSBKlAFqsDhoEDB0OFwldrHKnCAFABLQJkl6BktiXRlad3jWFnOouN/KEvOxq6PVkMBUB/84Adn4KNNUCafsgeFAoNYJfkimr5Jl28EQ9pIfYm1xRro0ksvnWPH+qm8NsAgoIjVECDkfKyY9EWfWA1JE+draLFIOkCXpM1UgSpQBapAFagCVaAKVIEqUAX2qwIFQ/tV3lZeBQ4fBUZrH+Al1j3jUrAljLnmmmvWHEmDNpxKgzj2t27dOu3YsWP2NcTfz1gf4API2FjqKAPYBEgljtVQrHgCoHI+6o79sg8GKWNMrIm0DQ75ihkLIpt96bEc4hdp/JqZftvk1X9wSB1xti09EOrwucrtaRWoAlWgClSBKlAFqkAVqAJV4PYKFAz1jqgCVWBWAFCxBeAANbGOSRpn0yxrfJoeJHE8OpIO4AFlRque0Vk0qyJ+ihK0GQuk7KcvAT7jcfKCQ2O/7bM+0i7g5LwtZfUhVj/jV8wCeoAikCjWQgFdWXom3dhBonzuPrDIkjX1L/0prQ2yO1WgClSBKlAFqkAVqAJVoApUgUNUgYKhQ/TCtFtV4EArEOuXEc5kWViWcI3LqAJBAo0CccZ+ZznX+MUwFjzAyui7J2WzZG0JiFLnCIjsA0EBQGJAKBZAsYBKnfoZeDOCquzrI9ADEOkf66HE+ZS9Y1uWs42QCChzbFzAU5xaB0IFHh3o69r2qkAVqAJVoApUgSpQBapAFagCu1OgYGh36vRcFdhECmR5FugRaCPelSXMCIbINIKlHJ911lnTDTfcMNdxwgknzNAFMLHtCiwtQY4+pO6//Mu/nC2UACs+gFgsyZ++ix2DRuPSuICnwCF15ktm+jqOV9/k145YnfaPPvro6YEPfOAMfYwjEClL00Ah6TYWUQDSuGzNvjErF0gUYBaNN9Ht1qFWgSpQBapAFagCVaAKVIEqcIgoUDB0iFyIdqMKHGwFAl9AEsuxAisCcgJcxMcdd9ztrG8Cb2I9NMannXbadPPNN89fBQNCRuuh1L0sN2rhnPrHr5XpHzA0+h7KErPRUgjY+cAHPrDm2Nq5QKITTzxxbibH2hAybv2MtU9gTvrPL1KWpR1//PFrvotGCyL7YBJQZIkdCyQbHSxPU15bxhfIFODk2LmGKlAFqkAVqAJVoApUgSpQBarA/lagYGh/K9z6q8BhosAIhkCScfkYgDOCoREaZT+QI9Y3qW/Mu4RCzsmfskupzjvvvPnLZIFDY2xfnwJ7RiAkPf6GQJz4PIplUfqWGByyv9Qgy8ECsNJXx/azZEy9LK2y0Q4MYtEEQGV5WpakiUEigEib+p60ES6N2qXtpUY9rgJVoApUgSpQBapAFagCVaAK3BkFCobujHotWwWOMAXAB5AEkAgMsWwrXxqLhQ7AA2jIE3gRIBRJAnECNAJRlrAlkEW+lNEHcCdlx3PJox37ySset9HXkP3RF5GysRTa1X7Gkz6M48l+wFaWl2XpWL56JqafsaQdfTnjjDPmpWY2QChWRA960IOmj3/843PeW2+9dW05WjRK/aO10Zy5oQpUgSpQBapAFagCVaAKVIEqsJcKFAztpXAtVgWONAXAjgAP8CJwKDEYFKuhgJrAoyUccqwO9fEzdP3118/1jX6GlsvKnE8fxGMdgTPLPK6BvAlLOLQ8DiBapisfUJRz6uWHSCzNWLdu3To3lfFHL/3K183EAE6O7Z999tlrx/FDZClc/CWxajrnnHPWIBFQxNooVkVgEe1YGIFJ8WekjWilD9FpTZDuVIEqUAWqQBWoAlWgClSBKlAF9qBAwdAeBOrpKrBZFAgAyXgti2IpFPgjBkiSj5+hEe6McEgdo8XNqaeeOkMQoIO1y9IR9Wh5FEuhtBXgob5lvYFCgTnpeyDPeByo5RxAJIyWPKlj9EnE2bQ8GVsA19jfbdu2zWM1vvgdSiwfjcAgcWCR9DPPPHMNIMUSKLBITH8x7VgPgUPKSAOIsoFI2tPP+EJyHKuiXIcRoEWXxlWgClSBKlAFqkAVqAJVoApUgYKh3gNVoAqsKQAeAAwBH2AGB8qjtZA8wI400EEMQowAKV8HC5QAVsbzAUqrwNIIawKJYgmjjhE4BRYtwZA+LkHICJFGSDQCIUvlLP2Kz6AsR1M21kojqJJ+3XXXrVlaGU/GOY6NxdCoKYATcDOOyYWQN8vM4qw6kEhft2zZsgaFRosidQJHQBEQRZtYLxlPrlPiXLPe/lWgClSBKlAFqkAVqAJVoApsbgUKhjb39e/oq8DtFAiAAU4COQCE+BlirQIyAA6BNvKBJckf65oAJnBlhCIBJmOc89oPwAmMscSKA+oAmdQ/ApWAoFgK5XiMc04cMHTppZeu+R4KEMpyM3nirJpIY12Ox7YiojzLcennLbfcsqaP8wDOuNQsFkPSTjrppDVn1ayCgJ5jjjlmuu997zsDoauuumr2UeRrb45ZEmWjH7BkqVmcWINMzrt2+hxwFPik7QC2/neoAlWgClSBKlAFqkAVqAJVYPMpUDC0+a55R1wFVioQOBDwkmVkAR1ASYAKAAIyZDkZeARqsLiRrq58Ul59lkMFtFhyxaIly52Ak7TBMiZwKp1MfSxlspRLnnEboY30QCvpCdKBoHwxTP9YQ8UpdayDtJf9jHmlYDsTU3/0EGecxhItpY+WRpaGGX+AkJgmYv3hl0k/wRzAysYPkU2fwTJgJ33NV9fixJpWAT/i+93vfmsgi4Nr7YNKdAeNQCR9DKAbQd2uxt70KlAFqkAVqAJVoApUgSpQBY4MBTja+PEjYygdRRWoAndWAVAD0BAH7IASQEH88gSGcI4MdjzgAQ+YHSUnBiFYt9xwww1roAGUYTUDXIAfoAioBCblE+/54tkIoGKVc/PNN8/tWzolTt4R6iSv2PbRj350hiz2r7zyyrks2JUvrGk3QCVtBgiJk7a7WN3j+REqpW8ZH8iTTR/SfiyU5NcfIEedgA2Ik3T5039jA4lOOeWUGRrFibVY0A/XASDKdVFWuvwnn3zyfD7QKZZg2mCdpE19CHy7s/dVy1eBKlAFqkAVqAJVoApUgSpw6CpQi6FD99q0Z1XggCsA+oABwMC4ZAuEABZAHaABzGBtIj/IE4sfAMmmvOVOYrABmGGVkuVT4lgMLcsHShh8YA8Lmo985CN3sGgZrXBGcKVfsdJRnzElLWNM/Y4Dk5aCJx0kSQgYS99ynPMXX3zxvJt2zj///LX2k0d/9DdgCRCiE13AHRY99MvXx0atsoTu9NNPn62MjA/YSbp2wSVQSCyPerUpxIm1axrrKddWunHKe+KJJ97OuXUsvNL/xlWgClSBKlAFqkAVqAJVoAocOQrUYujIuZYdSRW40wqADNlinZKlWSBGLFI0JB2YASVieaNsrG0CVcTHH3/83DdLwYAH1kJLq6G0OwKTQBtlWRyBF/LFYkhe+ykjX9rVJkujWN+I07+0NYKfpeWP43EMl1122Vyffoxb/PtYmsUqKRY8sQoCeeRX9qabbpodd+vvcgscAojozLrKxronVkej9dGOHTtm7cG60XIoS870h+UWQMQ6KOn6x5qLPg9+8IPnc/I5b7wgn77oQzSga0DXEoQ511AFqkAVqAJVoApUgSpQBarA4atALYYO32vXnleBfa4A2AMOAD4AASsUwADECfDRaGCRz7mvsvwZQY38AIO6wIhYsSgnLeVjdQTaxEJHHOsaQImvohtvvHEuA5IoH5CRNmMZk7IBWGMMbiwBR9KSfskll8yARDtivn0Cc4yJThnbeCw9EEWbseRJ+1dfffXaEi06ZwyJjYdz6pwDluJ/SDxuD3vYw2YtYtGTa6asPiTWv/SdvoDShz/84emLv/iL57HFl1H6KjbeY489dgZaNv0AvrQVjWcBGqpAFagCVaAKVIEqUAWqQBU4rBWoxdBhffna+SqwbxWI1QwYwuJFABRYj9hACAEYAA0CL2LJEoucQJrAE5AiPoaytClwB1xiQQQ8BKg4BwSNS9RY2zgfP0Ox+kmbSwsf/eSLR1/UvSp/YNdoGaQe1kGxSlrGy7zaSVr0E6fu0UpJXSOIia7AzGgVxNpHn0drobH/qefaa6+dx8WyyNfMUk9iGqqX5uP5wK7t27dPX/RFX7RmTeSaK+v62lwbYxuhUsYWALYEbPMN0lAFqkAVqAJVoApUgSpQBarAYaNALYYOm0vVjlaB/a9ArGZiMQQSBM6IgYxAh0AigCifUldePsubYm0DQijDUig+cwALvorksx9rIFYqLJSUDVwCIli3WILFWmisO/0DMdL37Csf0BTQYVzZkj9gY3fHOecKBHYlDpDKuUCiMa/9Zf1JUz4hdaZPdBiBV6BMwBDd6E9zS9rydbPAt3y2HmCSJl/00ab6Lr/88rkO6Y7VbbkZMCW/pWYBgPoFHoFzzqUd++nz2mC6UwWqQBWoAlWgClSBKlAFqsBhoUAthg6Ly9ROVoEDpwCgAuYkBgJAg4CaWIpIAwwCisAFsEfsK2W28Qtl4JGgHlYuWUoWyJE2Y2kzggYWRR/72Mdm+AEwpYx6lNuV1RCLF8ugLGFbZTWUtvTLuAJhWCeNljLSs8k7wpqAoGWc/LtKTx3ihBEeKacf+j7mtR8NxTRhDeQ60YUWoz7g0NatW9eg3jIPXR/5yEfOS8fiVyrXFAByPX0dLQ7I9dV5QR+1J4zAaU5oqAJVoApUgSpQBapAFagCVeCwUKBg6LC4TO1kFTgwCuRFHzwACYAKL/72WYU4n+AcECQ9S5PkDbgY84ILo48ali7xLZSygRkjUBnhEMBjiZW6wI4RCgUMASXLAHyoE0BxPlvKyB94k/bAmJRLfzKuQBrlsr+EP8vjsY3x3FiX9LShb9ni3NpSPH2K42v7tFAmDq1Tn3hs84orrpjOPffcOe9Yt326f+ADH5gBFIsjW6AdX0McVbM4kgYEAkCuH+sxn7YHlNShbvoVEC3vwB5XgSpQBapAFagCVaAKVIFDW4GCoUP7+rR3VeCAKxBgIg4cAlPiD0dalpRlWVagTqDLEow4jo+hgAdACViKr6FYsmh3hEoEABwChuRjNbQEHI5HIKKceljd6BcwNYKhQJiUSTvpu3LOZUv9q8a2TEuZ9CFx8uV4rN+59E+6cYJmWYpH91jyJJ8xxTcTOJT6lvpJB2xYFo3jiYba8JWzbdu2rVkNBdiJXSNgKV82EwsAXfqdMWmncGiWp6EKVIEqUAWqQBWoAlWgChwWChQMHRaXqZ2sAgdOAS/6gSBiMMKLPqDgnDhLnqRbMgbyBDKMcCi9VsZyMM6OY5US3zisTwJB0m7aGkdtWZhgeVOWky3hkLb1MZY/9jmgVh+LGNY3SwATUKLuABVxLIaSfxx/8o39k7bccj7p6kg7Y970Qf+zL854xvSUGyEXSypaxGm1cdIpWtGOZiAPR9W2+B8Suyb0ca3jpyhWXa4t+CTEp5O0WA/p5ymnnDJfw/RX3gCiaNC4ClSBKlAFqkAVqAJVoApUgUNTgYKhQ/O6tFdV4KApEBACItj34s9aBQgQpGXJEChkuZEYUJA30Eh6NpBA+QAbECPggdVQ4FDgQuoZRdAuqyPwA+gAN2KppL6AEmXG8oAHayNpgMmYN8BlBEkZY/wMjaAm+9FAnPyJR+Cz3N9TnoxXO8lrP9ckcfqh/xmP5V6rvnIWCyt6PfShD50dS8vnmo4WSa6fr5xdcMEF83nlgKDAN9q7VtqJLyJ5XFfQiRXRaCkUcCStoQpUgSpQBapAFagCVaAKVIFDV4GCoUP32rRnVeCgKRDIAhwIjkGCAAX74ECAD0DA0XQ2jqb5nokPGlY7QAHLk3yZLJ+kV1fAUEBHYEisToAb5QGefK5+1XKy0WIlUEb/A4ZWLSdLW/KNoGfpZ8i5AJllXv0by6btAJ5ZxCEsz6fs2Jfsj+1mP3H64dgYOYkey6WdxCx9LCeLZVb0DmDLsjXa+uKYGDByfWyuHTgUKyPn1JklZa57rlXgITgY8LbUocdVoApUgSpQBapAFagCVaAKHHwFCoYO/jVoD6rAIacAuBB4ABYANOACkJAQaAMGZClZLHCW4CJfKPvIRz4yw4ZYC1lOBhCJLTUDHQAOMEGdAQ9xfOwc+MT6RZkRDgVm6fsIavQ3foZYGoFD8ox9DbiIxUviWA0tgYxjIXH2A2Acpx9LOCRP+pf8iS+99NJ56Rt/Qbuqc9X40o/4GRr7pR7t2Vj9nHfeeWtjj2YBRCAd8HbaaafNIBAwcv1jNeS60VwdzttyHhxSj7SE6Fg4tCZJd6pAFagCVaAKVIEqUAWqwCGnwN9/Z/iQ61Y7VAWqwMFUgHVOXuYBmrz8AwOBKPIALdKAH8Ag0EMZFicBB8qzMAJzQAbn5AkgEoNA8vFXE0fLI7iwLw9Yo7y8Y3ltqTNQi376qtzZZ589XXXVVXPbW7ZsmfuljSuvvHKuR5nAoixnS3u5DskTSCYO9Mk54w9wUs7+KmuZ6AQEBbqIQS+6AmjGYl8MxIxASZ8zvtQvVsfYvjzKjWO75ppr5jrBNnqO8amnnjrnZ+m11D6QyHI8uqd/8jvHmsw9kzHEQXm01laWFhrXKl2ideMqUAWqQBWoAlWgClSBKlAFDpwCtRg6cFq3pSpwWCkQwAJCeIl3PH4ZK0vJvOwDErEKYkEELGQ5mSVlrGCAlCwlC5AAGMAiceBTYIs21Q0i2JQHhVi0sE6Rj5WR/Vg3BZ6kTACEeLT+kc924oknztY5lo2pS1+ADV/o0r5jVkxZChdLpiyDA0dG2BELGRc6mmU8ufjaFXwifoRCgTopK059AFz6nDGO+VJ3rI3k0W7iQB5tADsAEF9DsfoBdpxzfVgVnXPOOWuAKdArsXzXX3/9dP755891xN+QuvTX9uAHP3jtmqQfWYaY/o9apf+Nq0AVqAJVoApUgSpQBapAFTjwCtRi6MBr3harwCGvAKjhxT2WJaANGAIwBGzI4zhghxVPrG3GPPJxWKyOq6++erb2UZcN8MhSsoAlTow5QY4lT6CFOL5vtAkQAUzADggF6AAVAUwf/OAH15xTK6tdfRlhScAMeJFzzrPQ+dCHPjQDKeDnrLPOmuvNFtgjrw3k0UY2dQnKLsFQLj59nQtMEUc3edLPgJWkRXex/o/l7QeSpVyAzmgBROMAvVgYRQuA5x3veMd0+umnr/kZAt/GDVCjpw3Uo5Ng/NrjIDyWQsCRa+R6udY0zLV0/VI2ujSuAlWgClSBKlAFqkAVqAJV4MAqUIuhA6t3W6sCh5UCgQZe9gNFvPB7wWcJJA1gsA82AAJgkhf+bCMIAgEADxBC2YCnEf5oCzBiqRMLHtCHFZK2hDhF1r8RyKS/8rAAsgFON9xww1xX/AuNcCgAJTDHOfv8GY1wJlBshGPakVf9yTuWGaFO6k/MgkkY23cO3Illknjr1q1z3TYh+/KOm3JCrIvUm/PS0659oCZaqS/QKHCIppb0uabjF8yiNUgE3rEaAn5ybzjvmqrvzDPPnOGQ8toODAwQS99i1TV3vqEKVIEqUAWqQBWoAlWgClSBA65ALYYOuORtsAocHgp4Yc9LPkAB8ARigAGOpQM4jgEAwAEkYrkDElx33XVrFiI33njjmpWPemPpAt6MIAmcABYE8MC+WHqASYBS4iztisWKsvoqAB+x/El5+ZKWMSa/tuzLK5YveZOuzuSzD96wUMqYUkb+AJjkz7iUSRjBzwhwsu/8aFGk3nEL2Al0UW+sh8SBMOmfvoI6aVdd9lOn+rZv3z495CEPWdNxrbPDDkutESrZd50AOMvN3BfSgCP3hHO5lpa0yceSCOhzDcexr2qvaVWgClSBKlAFqkAVqAJVoArsewVqMbTvNW2NVeCIUiCwAEwBFhLsAyCxMgkACWSQDzRiscPaB2jIF8nAGOCFFc9oNRTIMIKSsU3+b+JjCFDK8jV9U3YEQ+NFkPdjH/vYDCICL9LfxMkPTmifv570YwQoxidIG4P8GfsIWZJX2gh3pK8HhCzbSX1L2JO6WC8BdMolzxin3fELZimbfGOe7I/t2TcWTr3H657r554A+5wfnVAHTLkPpOc4sfupoQpUgSpQBapAFagCVaAKVIEDq0DB0IHVu61VgcNKgREoAA1e+LOcKHEsazIwL/mxzBHbvPAHiqgjS48ChQAkvmqcG8HSEkZY3mW5mvyWM4EPAVP6k/LLctq3dAsgynIy7QRkLNs0Fg6plQkYG6FPQFGAivwsopZ5cxxIE42UG4FPjpOW/Kug0FgHTcexOudY3wXlxzzJ65z+PuhBD0p1czl5ExwHHknLWNUZ7a644op5P06rXRubpX8pQ/NRX/cDoMeyjEXR2P8AorVOdKcKVIEqUAWqQBWoAlWgClSB/a5AwdB+l7gNVIHDX4GAgCXM8CIf/0JZQsYvjc3SITG4Aszki2T5xPy4HAzUifNpS4zkz/Kw5AMULF3SZhxPxw8RyyPWRCOUypfP1KNsfPqofwQ2gTBZLibWphg8EQJF7Kes/ZQNjBmtjAJDRtizBC/LO0Pe1Lk8N/Yj+QJ9klc62GWs+j5aD6XfsZgC51hR0U1+m2PwzT5tLQfjaHoMy3pOOumk2ZeQ+gIOWQR98pOfnDXcsmXLGgRbAiLXUxjHHVA0ar5Ki6ZVgSpQBapAFagCVaAKVIEqsG8UKBjaNzq2lipwxCowWpJ4gY9VTix1vPwHHC2hCVFYj9jyRTJgKJZEAAwAAd6oD7RwLD9QBBxYhibOZ+8BpnydbLQWymfnOV/OZ+ZTB19H6lQ25xIHgARYibPUTfkAJWMP2BgBRy48neRNPnmE5N0d6AgYGW8i+ZeQKGnShdQ5whoaAm1ADWDDggcMGzc6sBaK5ZY4S8LSF8CItQ/YBQ6lLzkvv30gKefTp+RxnVw3IA8oAuvoG8DnPuDkerx/Um/0251uo17drwJVoApUgSpQBapAFagCVWDvFCgY2jvdWqoKbDoFdgUvLAfywh/YA/LEmfRoHQQgCbEqEQMXgUPKjHAi1jDK2ReDEHFWDOgokyVtgRvqEAIygAWwiDUMOBKLmTEGNoClfMkssEi7yqlrhEKBF6PFjnay/GzUagmRRtCxK+iR9DHeVbm0tYQr+phthFTqsQE+xhytEo9jpReLIHGsi7JsDDiyAW6jv6Jlf1gdcXTNgixfOKObzfWnWaBg2pae8ejX0jJq7nRDFagCVaAKVIEqUAWqQBWoAvtEgYKhfSJjK6kCR74CeTn38p4X+0AZVj22WAextIlj6CxZYrmSr0+BR1myBQwBA7EUUg/LG+lZDpalXc5pGzQIfEofRtCjj8IIU+KfKFZGgRGBJ8sylmI5BxYBIPa1uwr0RBsx4JK2U+ey7sCZXcGe8W4a8y7zL89pP2HMm/4kf44Bs0Cj5M8Yo0ssi+gcIJYyxuW6CvQFeQJ3onOWmLn2oJ77gqbOu67goTyuvzTltaM991Pa1YZ7ZjmujLdxFagCVaAKVIEqUAWqQBWoAnunQMHQ3unWUlVg0yrgxT0v/0DECD/y4i7dEqIsAUs+1iaBPCyMLOcCBQKHAl/G5WTjkjLQBYjgeBpgUD4AKiBiBDeBCGKwSf7Rx9AqmJQLC3JoL3AoS8qMJVBrtIja1fKz1BedlA9I0q/AnCW0WXWsrqSL1TmOcTw/Xpe0s4yNMdcmdS3b5acozqzHmz4Ah94srrJszZIxECibYxqyPGJdFisxcJBmrucFF1yw1g9txLqMvvLHaizaj/3ofhWoAlWgClSBKlAFqkAVqAJ3ToG73bniLV0FqsBmUsCLOUudQJDl8jFLhbzog0KxIgKHgBx5nQ+woJu61DlaGAEN0kCE+B4CfViOBDbki2TKsz4BH5xXN4gw+syJxYmyzgFJ2hgtWgKkQJFYw0gLAJJ25pln3s4nT6xbtGn74Ac/OPdBOr8527dvn3XSlyyNCrRKe46zP0Kq9CH3VjQLxEm6fCPIkb48DnhKHelDQJDj1Gs/fTL2Vf0znuiU89dcc838afqMNZqnPhZX27ZtW3NSHSuiXNeLLrpoevCDHzzXS4f4LHJeGv1SJr6KpDdUgSpQBapAFagCVaAKVIEqcOcVKBi68xq2hiqw6RQAEvK1L/AjS4KAH1YeLG1Y58TaJgJ50Q9MkAZOADuWDHnhBwBswEIABuiQdLAJcAGbpAFEABIfN7Esih+bEXJ86EMfmvPwd8P6hb+cJRjSl/RPWWMEs1KP8+m79ByLbeecc870l3/5l/NQ5QOS9FGdAVXJK77sssvW+hBAos30a3lTKZOQemKhtYRB8q0CJ2P76cPWrVvn/tuWY10e65s+xoLHdXJsfPYTpNn0IfuBguN1CQxT75adXy9zDePPSDnL0wA9/oliaZZxuc/kaVnH21AAACAASURBVKgCVaAKVIEqUAWqQBWoAlXgzinQpWR3Tr+WrgKbVoEAAi/sgpd8wYv/CF1iXQIeeMkfYUG+SOacfRs4ABTli2Ssb04++eTZ0gcoUN5+lnEBEgEw2tWPQJRABJDKUjDWTM6DUMmb/gaOBLKIQat8aSx1BqAkvzGnHX6JAj7EQvKJx7r597FMK3nmnSEkbwBL2kg8lkteaeO+smlXHEgzxstlYmkv0MkxHRLGfqU9cfwLjeNcK7Rz58orr5z7NuYb7xNauJZgn2sbn0Pq0Jf4Q8p1yP036jG21/0qUAWqQBWoAlWgClSBKlAF1qdAwdD6dGquKlAFdqOAl3XABeARsvTHC/4IRmLNE18zLG+AJWWzBSzkxT8QA6BhTTJCIcCH5RKgkOVLSzAUcDA6nwY6QKL0LW3E8in+bwAJwCJ1jGMJ+BnBBHgRkESH5M/+EtrEj9Eo7Tj+aBALnRHWqMt5ITBnTBvbkifHGWvKjWAo5ZMn9cbSaqxj2b6xj+Olj2Nxri1H1ksfRK5nlgY+5CEPma2D3B/Gms15/Uz76k3/o8mc0FAFqkAVqAJVoApUgSpQBarAhhXoUrINS9YCVaAKLBUAELKkDBTi+4c1yH3uc58ZEjkWAjqcCwABaWIdxBeNvKMPmixTAh4sI4tFiXLygk/gAuiUdtWtrSx9sg9CnXrqqTPoUSdLJP3Lpp+BSspbFgY2sVaynA2USBgh1ghyQBA6BIoEYCROuvKCOvnmcf4DH/jAGkAJdBKPdeU47RtfYMnYv8CSxIE4uUa0sIQsfRjLZoz6NPYj6emDWD+0IabXueeeu9bf5NPHXEOAyRI74CdwkMaxHHKNXceAxVFn1/j0009fKxeLL7F7wNgCstLXxlWgClSBKlAFqkAVqAJVoArsWYGCoT1r1BxVoArsQQGgASBgaWMDAryse7H34h4Q4OU+kAY4yFfFwB6AAOwBBmyxAIo10NVXX722vEjdNu0qo7xlagE70gMbEgNB6ojz6fRN/QFRI8ABbMAO0GS0alLfCH+MQ57AKL6G+DSK1Q1d4qsosCXxCHoAk4AQsSBfNsfgkfYDZVI+wE2etBsYFADk3AiRsp8yYmFsz7E2ossYj1DIfkCc8dI07aedQBv3gpDzSRe/+93vnh7zmMesAan0J2PSvuuY66Vd9bH+yj3h/lN3QxWoAlWgClSBKlAFqkAVqALrU6BgaH06NVcVqAJ7UAAsAQRY7Xhh5ygaVABtEmIl4rxzIA2QwEJEWWkBKQBIoJB94CdLjrz8y6dN7bE04ZOG5ZEvoo31XX755TNIkGYZ09L5dNpI2/oWcGNpk/K+mMVqKFAoeQCLgJPEzskvzqZcytAC4JF/hF/OB3gAG84LgTGOQacR3CTP7i5NyifPCIRGgLKEKSm3Kn3sm319dz2igbLKSQvUCQAyxmWfMk7lOS7/xCc+MQO/9FVZDsPFLLhyr8ivPpsAEOWrZUBRQxWoAlWgClSBKlAFqkAVqAJ7VqBgaM8aNUcVqALrUCAWQ17avZQHegA1AJDgRT8wxUs9SKOc5Vx8y4jjh0h5MCZQByhwDAQFDLE4cmzTRtoMmAAszjjjjBlcADzAkn355Q1USLn0Ud8CJVj7KAP2pC8Al/oAn0AkeQKAllBLvcln35IrwSfu5RW0J08AivoEaUkPeEm6MulrYEvqmQsvgroTRuCTsabsKnCTfqWcPPooPTp8+MMfXgN7LK7omnJi15qG+ixkbOrJ9eBHilUQOAT8aS/nwSH3CejoHgt4kgcQcq+lvQDGpQY9rgJVoApUgSpQBapAFagCVeD2CtT5dO+IKlAF9pkCedH38u9FP6AhlkLiwBgWPABPlpyNIMTLvY0FUL5Qxnm0IF/qBw6ylEx6AIp09WZpm2N16Q/wwLoISFAGpAioiDNs+e3bfGks49C+NuQ/4YQTZrBl04d8US3OqgNXkj8iB1xIV46FTMLYzhKoBKwAHvIF0Ixx9lM2x2P9KbuM5ZGmrHGP7dnnH0h/7Y8h+oFDuS5i1js2fqNuvfXW2eLHRlva5XqlvDhgjZPq1CWfzTl5rrrqqvla0jsaqxMUcrxcqkavhipQBapAFagCVaAKVIEqUAV2rcBeWQz55fwJT3jC9LKXvWzXNfdMFagCm04BL+G2WAx5uWed44U+L/HOg0Y5ZvUD0gjOxUeRPGAAgGQDdLJMCAhIW+oPmAEMWB2BA4EMoMIVV1yxtpQsVkOWn4FOrE9ABZYogQ+x5HEMUrBUGoFOxmMMYErGIh1UcZy0cX/Mqz7nLFcbfQcFlNDD+QCzgJRxXNkf28pNp55omvNLGBTIM4KkXaWlrDozjkCbERYlnzR9yHGgGwfi27Ztm8fmuo59St2sxsZ7JpooY8yeQYBTrIfcAwF90gL23CP6oe2GKlAFqkAVqAJVoApUgSpQBVYrsG4w5CXq8Y9//AyEvCi9+c1vXl1jU6tAFdi0CuQlfIRDXtS92AMyXtDBAC/3+by5YxYklg6NATBQ7rrrrpsBAIshdYwv/ax18kWycSmZerI8TB2nnXbaDIeU1y5LFgAKUIhlij6lzXwpTFnlgC77ATGJR2BhX3mgh/NpEEPaEgzpW9KSR5kxzX7gS/KPsCrLrugdyKZMgn3XIO2nDvEIgbSx6jjQTTyCnuRNPLaX/o59p9O46at7ILBLeceugfLpJyfhNAeA1BcAlXGzGnJPeBYFmKUv2gOWxmuU+yZ5GleBKlAFqkAVqAJVoApUgSrwjwrsFgz55f0rv/Irpyc+8Ynzy85b3/rW6Wd/9men97znPXdYTlBRq0AVqAIUAG4CTMCXBC/rWRLkBZ+Fh5f+vMDHcfT4uXLAgLNhcEi9ysdaCEiItRDLH7BHGqDAYihtqV97J5544vx3y3ImoGC0SApEcl5+/VfesTH4TLo+2E+9gS7ihKTpl7LZRrgSADRCo1Vp6kzdyvsbnK+kOTdCnbGusS8jHAo4GsstodDYz5wLJHJuBEVrg/6HnSWYyrWN/mNbYBsgNC71c3zWWWfN19c9IAYDAT/Xy8YBuWPXmB6uObDoWeX+UZ98gKHr5P6xKWdLe8bSUAWqQBWoAlWgClSBKlAFqsDfK7BbMPTGN75x/rX8F37hF6YLL7xwfiloqAJVoArsToEABC/2XuCBAWle6HMuMCi+foAN58AB+bzsxzrEPh81IADgIg/IINiXZgMDLAtbtZRMe6xMwCj1x2ooS93SH3WqG1RQt3Sx7dRTT52BE2sWY4uFkr5nU87GebL2Yt0UQKS+QKAxDthRNnUFtIilqWMEbYEb0qOf/md/7NeyzhHSjLBHnc5JE2fsjoGYnI/+Yx+1PR7LazyuY/qasYBsvrCmXm3QycbfUu6bM888cw0SxUfVaN112WWXTV/wBV+wZvEV0BefTSATf0/uB/ePaw8OuSfdL9rpM81Va6gCVaAKVIEqUAWqQBXY7Ars1vk0nx5M+b/ma75mfunyS+zoKHWzi9fxV4EqsFoBIGAEFIBAXurzAu+85UD3v//955d1IVACLFAHh8f+7gADzt10000zHAlI4J/IccAOEKGcTR0BD2J/u/I5c/lAAuUCTdJ+YEhi/VM20IM/I2DJ30Rx/BuBXCyXQA99VS4OqXfs2LEGPECJ0VIm8EisXLaMIzBGbLzqTTuBHCDVWK/6R6ijTuWNIfWPQCZ6qm+s0xfdtKdd5QOMUl+uvuNl0FY0c248pjn9ov8IlFKP8/K5VoL2x9g++BOYNsbusZRX96ilffUKgWLzQUMVqAJVoApUgSpQBapAFdikCnBKsUebehNscIiPIb5ALCl7y1veMi8pa6gCVaAKrFLASz9rH8uCWG14GR+/ShZoIh4dQMtjA4WAoyz9AjsCIPKy79jfJ/DCBhxpD4TSvi2WRxxIq8uSI9YjASz56pm2lNNPn12PtYvyrIScz6aNLHnThhDwwZIlECxLmUYolnwjKOE/KBAkoCrnczyeD1AJEBvrCmRJncmTegKGQJGAnvPOO28NngT6JN8yDkxZwiD1px+j5RNtYgUkDtjjxyntB0yNwAygYlUEYAFfQF6Wk2VJGb9UAXWg3/j1M+3yLQXqZXPdbfoaK7RYEQU8rbqXm1YFqkAVqAJVoApUgSpQBY5kBXa7lCwDN6n+zd/8zXnj7wMg+sEf/MHpGc94xpGsTcdWBarAnVDACz14EKsdUEUAD7yYs0gUC4EUygQcsMYBiJzzNyj1KZOXePV7sQ+A4UdIO8oFiMRCCWjQtnazXA24yrK11AtqsJQJ6Lj88stnJ8dAVcIIYvQhx2Iwg/NpIePT31jorKoDwIguI1QJ5AGbxvEENKlX+8okb9rUj9SZWNuxnkk/AoJGq5pct1w7x7FsGi2RlFla7ATGBaxFR8fqS+xLbICUvo3jyHn12HJeHvu2tCE+6aSTZtijf+p3Xh3AD+Cov7kW8gdGAZKpR/ksU4wujatAFagCVaAKVIEqUAWqwGZRYF1gaBTDMomXv/zl89ZQBapAFdidAoBCgAhrjwCgQAxpXuLBGhY4sRYCd1gZebFnCRJAxBlxQAHrRfUH8njJF7ThJT9ASGzz8q89+S2NApTyhTTwQl3gUUCHNPvyOadsAMuYZwQWAReW4CYPSGRf/wJtxKlf7Jw8yae/9gV1nnvuuXMc59MBKfolrzhtp4+J5U270uSVJmR/CYVy3UA6eo6WPvE1lPYCftSX/uWcekatMu7EYx0Ze8YmlpY80WLUCSADeNwfruVooWX/fe9737Rly5a5DjrFTxTQ6Ng9Jl/ayT06i9NQBapAFagCVaAKVIEqUAU2iQJ7BENeoJ71rGfNv+wKfrl+xSteMS/JaKgCVaAK7EkBYCgv+/J6SQeCvJQLWSIG5gBCQITzo7+egIR82UwdAI96LQmzPMymTnXECogD6FgFWXYE8vibBgDEX4/yANS4lEz9lpOBB6yQrr322jmWh+VQwAmQFDASYDHGyrM+An4AiMCr6CC25EsZeZ1XNw2SV18CLrQ3gh3lVwGUEbwom+PUP4IgbaUedWcb/Q7FistzYGxf3xxHg/Q9/c91z3n5hbF/9uUT62uCvm7fvn3NV5Q+xAeSffcLC1Z5aKzubKmLpiAdeHTLLbfMVkSpQ2wZ4gia7LuvjD0arXWoO1WgClSBKlAFqkAVqAJV4AhVYLdgyJd1fumXfml69atfPb30pS+dJXjMYx4z/cZv/Mb0nOc8Z3ay2lAFqkAV2JMCQIGXeZAoMAX8AW1YewA+XsTlCTTwEh8LEFAn4CTnQQjLzeJ/ZoQK2lOWhYhyNi/78rBAAoa0rS8AQ6xGjCMgxVextHXllVfO/Ur7ln3F11CWrAV4KBsfQ+o0XrG+ComTX5o+jEBIGW0Zg3z6EHjDL0/GzzpHe8omX84FjDhnk0+dNJY3AGcEOdrIpg+5XpakBfDIb0s9ATEBO8prK/WO12SENsatj0LqcjyOxbmxPflcK1sszW688cb5+mvn9NNPn+vTTsZLexZbIKPx6J9lga496zPHwGD6L7/rEQspdTuO/nMDDVWgClSBKlAFqkAVqAJV4AhTYLdg6HnPe970ohe9aHrzm9+8NuyLLrpo8iv893//90//+l//6yNMjg6nClSB/aVAoIOX7oS8rAMAXuZZ/ABGwAs4ARiJpQdyiBNiAQT2qN9LvLRAnIAlICDOhmMtJB6XIAETAEFAiv7Y9BdgYinkXECTvoMQ+icELgAU/BI5r37ntW+MygaQJLY0SwB6AnQCeAI5cqw/6R8rmfFYnrFuPnyWY8kYaaU/+pb4/PPPn8dgc14cMJPjpEX/EfaM+VMucfomHsMll1yyZo1qLOpLH5UVEo990Ja8GR+4A/S4psARoGM/jqX9yCEPwGhzbVxXVkSf//mfv+a0Wh1gU4ChfOoIIMq1vt0gelAFqkAVqAJVoApUgSpQBQ5zBXYLhvxSvAr+vOlNb5q++7u/+zAfertfBarAwVbAi7YXdABHzA+QAA6AQ17yAytAlYAQeaRbIgQeWeaV5WAsGQMY1GMZmLpAAPVbQhZnw/FLE9gUeBMIpT1wQAwuaMsXyoAD0Ee/RziS/nGIDKAbkzZADfsBQ9E9wMQx+ORYHeIlTAGO9EubI6CyPwKhjN0SqtQzAiK6CQEu0TewSPqYFuAzxqlj7L99z4wrrrhizfJoBEO5Zumf4/Q7bc4d+4d0OkhXb9Lkz7706HDDDTfMy8lcE0AoGqnD5pq7B/I1ObFjwNGSQTAvkFL9lkq7xpYnjtq6HwuH5kvQUAWqQBWoAlWgClSBKnAEKbBbMJQJ+XK8Jsp5MVie63EVqAJVYL0KBJh4CY+Vj7IsNPyd8fIeH0EsN/xN8qLvE+VCrIPytwpI4JRaGfWJARKfqvc3S1qsP9ShPIigvPOBNwE88vNjAwYFpAA4gQwBECOwYKkjHUgSq4MFUcDWCBoCgQIwAqQCcowxIGXLTifK+hkNYmEUMLSsy7E2o2v21RnIM0Ig+6OlUOBQxr2EN6kn2stvXx3aHttQNuOeL9wQVj1L8ozJdUlbYuMYy6SdQDptjZvzF1544fQlX/IlM0iyyWtzf4BEnHvnc/eshoChm266aQ0OGROLM9ZsLI7cQ9IaqkAVqAJVoApUgSpQBaa1H9zyI5w5to99OPbDnbmheZQfZzPv9aNcw6GjwG7BEDP/Jz/5ydNrX/va2/X4KU95ynTxxRcfOqNoT6pAFThsFYhFDmsMARRgyeGBkhd8L+GjFYhzAt8xWUamHi/sASCxBhJnGZk2QChQiGWJOM6rwZ6xDb6F1OVh5iEWUKJdwCKgI9AjfecHiF+bse+gTvo1QqTsg0n2QYsR/gQKrQJHLGTG/gbQKMPPkfboZrOvP4EZI/TJ/giFsh/oYmwjjEm/xjajwzjutK//q4J60n7qH/WJpik7QqglAKKF80sopKx0S8TinyjLzViaHX300dNHPvKR+V6gk/sg44+zapZD+pdxu07un05oVl3VplWBKlAFqkAVqAJHqgLmRKz0zcHNmczxzJXNnVhVmyOZe5uPcVPgR1d5/OjGT6c85mx+gDMfU5a7BvP5zPOUbTjwCuwWDL3kJS+ZXvziF8+/pr7rXe+aL9ajH/3o6bGPfez07Gc/+8D3ti1WgSpwxCnghdtDw0u2F3Mv3P7WeNiIvYwDM84FnEizjMyDh4WQl3sPHA8lZeRTL0shkMkysrzIc1gdXzSxLApQ8HBSVlseXh5kV+1cEqYNy5VGOOJCeHDJmweZ/PZjUeLh57x09QYsKGs/G+si5ZIv+/LZt/l6mQep+mLton37AS9idVrSBTYFzMhnP/0zjvgXyrnkTRxgE1CzHKPxqC+a5PzY5ghpnE9dgSzjzZx6oknOpYxjebSrjWXMgsq4XcOxT/pII5CH9Vj8B4ndB6OfJVDPxMTmVy2bcu4f96P7gEWRNBtA6b51/3aJ2Xg1u18FqkAVqAJVoAoc7gqYU2WeDQZl3gzomF+bk9oHiczNzIl85EO6r8YqY77lvLmYOZ15mfN+mM0cLHVz/yC/fOZY5mMNB06B3YIhF+eZz3zm9C3f8i3TU5/61LlXXja+9Vu/dc0XyIHraluqAlXgSFXAi3UAAMjjweLlPtDCgygPkoASS3oCI+Tz0Im5qoePhxm4s2PHjjWAox0PMPXzOwMuecH3YAJzAjs8sLLJ45cM+ZnEaivQJL+MBHqkb3zW2A+ISb3SAkWWzqHlST5x6so1NxZL0oxNuzYPYf2RP0AlbYIcLIeM1ZhHKJRygTh+0cm4pAWsaDv7wFT6SJsE7SorZD/XLTqN9cmT62b/0ksvnX98GNsa6069iZXRj7Shv8YW7VJP9A9Io5t7hG70sGUpWSzUTEbUpw3l3E+BPpYQmqCYvICQyjh2H8qjrC3XJWNoXAWqQBWoAlWgClSBw0kBcyj+MuMWwXzIvMscyDlzYz+0mkvlx0rzpsxzzY/Mhc3NACAuGdSVubwf3JQ9/vjj1+BP6qGTH+7My80fASbH6jLfMk9r2D8K8OT5955I90/9rbUKVIEqsC4FPEzAHC/rrDOyeShI98DIQwekEbyEA0k2Dy158mAJJGLhweJofGn3MAsYYhLLYbU2PLACGdQTx9WAkDbktYECji03kx9ISNmxDv31IAtksi8EDikniIGiwBuxMNYZKOMBSaNopZ8pp56Al6QFkKSuEWwF3CRthC0jzNEXy+P0Ib8WRWtpaUNs44A61yH5aeB6xcImVmLSWPqkvYxhVRzdlE35gJvEaUM7gTX2+Yo688wz5wnFCHLGPPKxKjIBsfm1Cvhx/7Aeck8BicBQLIpyX8kvb6zRpDdUgSpQBapAFagCVeBwUMC8DaixzN4PaeZK5roshsyd7Jt75kvB5kPKmK+bJ4FBzgFKwI95nHmR+syJzKXNRaWrTxvmtOaRgJM083Hp5lj5kTjW3erUhti5hn2vQMHQvte0NVaBKrCXCoAKseJI7CEUCBLLoVh+BB55MAEPzntoSPcwAnH84uDhFvjhoQceAEN+xVCXB5mX/sAXsYcX8GMJmfo8iDy41OnhKE17ASOBIx54yvra1ViffcF5IQ/HEayM+dUXyCO/pVKBMWNs3KlrhEvLtgOGwBUP31UbXQKHxj6mLF88gVK5JmP/5RvBUOBQro+6l1DH8UMe8pBZE2EJhNQ3ptvX91jmjONQfzaTkECaWAhJy/p2AEgd2h/rc2zC4b6xKasekx731AUXXLAGhUBHW9bJG2ccK8oLLGlzCdn+fqT9twpUgSpQBapAFagCB1cB81lzY74WzYnNo8xz/aBmrmxenXmveZG5k7mN/ObFgI+5mvmg+VR+DDUq82w/LJofAUTJ6wc2ltjmVoI21Zu5t/bMxfiHlKY980/l5FUPdxHmjJ1j7bv7Z7dLyV7/+tfvtqUnPelJuz3fk1WgClSBjSjgj31+BVha1ajHSzswI/Ygi48XDxxgRxkPFg8KDxAv9NkPYJDGYihL0bQZp9UePh5sYg82L/0ekh5EWXaWh5B2kjcwQxt5OHrIOh8oor7kMxZtxEl0LIuSN/Wmz2LWLmDTCD8CRQJkOKQOTEpbgUVJV165cflW6qRfNn1URoh/I5BEmv5l/PoWTdRjP22oa+zvOP7sj2m7eriPeaNd+qd94zcmbanDfvruXK6ne8J1lMdExxiyBVjRMMDIfeF+yeZ+057r5VoBirnfgEPtul/StljbdHOuoQpUgSpQBapAFagCh4oCrHsAGmHLP3woBcwxnzLfAW5YDJkPSTPHyVzVD6fOg0fmV+Y5mSM5Z67ohzPzLnMsFkaAj3rOO++8+Tjzdz/Eyu+cHwvN0c3VwCdzceXNpbSlHXN+58Ah0Mi7Q62I7vxdtVswpPpnPetZd76V1lAFqkAVWKcC/uB7AIljveFFnIUKGOScB1AsVzxkPCC8vHvAeCh5oLDciDWNh00gCyikDMsRwEd6znkIeZl37GGjPQ+1+JXJLyaxFvLSn/zKXLXTUbU29U1fxn6mb2QIONCH/JKijymbfqtzbCO/1Hj4yT8CpMCTjCdAxAM/+84F1njABuDQ2uYY6AigydjoSX8P30AWfVRGH1KnsWlDunypM8DGuVGvHOfWiC7jca6JtOznK25j/00klJcWLcT6oY+5l/iLkjf3V9KjKauos846a75+uSa594Ag9xlIJ6YLwBQn1WL3FGukfPZemjygUuCTthuqQBWoAlWgClSBKnCgFTAv4iAa8MnSMPMaafxymt/Yj4W8uaw5jbm2H8FszpnLmGcBP9LidsEc2TxLmmAuZf6a+Zg0Zc2tgSXtAUWsg9RnrmkOZj4q+MHOkjNzRm0po9/modqSX1mgKRZIc8GGDSuwWzDkghC6oQpUgSpwoBXwou5vkD/4ASSBEh5OII0HhDx52Hip97DILxBiL+bOM2EVvKB7yHlQ+bXBy37KjCDJQ8zfv1gWAUrxMeRhpg/yCwERAIA2A4X0J8f6sIQi+qBfgJI+qQ88EC8hivLMejPWEQyl3vQjjq0zHmVsAr08kMctaXz9jIGlkDpMHkwW8mA3pkAfsCab9u2zRvLrz3gusCnjSl8da8ekQEi+7KfO0bqK/saj34Et/Bo5jr+iJXQaxzXqsuyP9vTdGOQzVveiyUd+kXJPaktflZfHeZpI42DR/el+MTECF90bJiz6q7w4Gt5O9B5UgSpQBapAFagCVWA/KGDOC7RkHgsAmduYj5hXmVezyjFXBoP8mGb+y1KIFZD5smPzHnnNk/jjzFzRHEpQ1nyI9ZF5uPm0H80CizJfyjxR/tQLApkb64Mf+2zmodr0Y6o6M1eORbZ2s+zNfNWYxvnkfpDyiKxyt2DoiBxxB1UFqsBho0AeCGOHAZSkexAJHgAedjYPkrxwi72we9hJ95Dw4PGSLniwSFdPrG88eBx7mffrg4chOBS4EsCizewHLjDJBRX00XnHsRoKjMhDUxkAR7oHs/bTdvoir4dg2k4/xTb5nUudxmSfldAIPJLHuVjv0GJ8MEsHLPKQVpcvSvBtZAwevvrpoU43YERQLvWPD2H7uU5jncro26pN/jFkXMqP+UcNk04z+fRFPcmT8apXmvxAnP77RcuxMsqP95YJkLI0smXMAVFbdppcq4NmgI/7KJv7Je2Bj9pQd5Y7qtc9KJ9fytTfUAWqQBWoAlWgClSB/aGA+aL5L0t182JzGHAI+AGKgBdzEnNX80vzFnM+8xZbABEwZDMXkgeEAWrUOQbnzLnMiQVtqNucWn7za+fNoTJnE5tr+oHP3Fef9MU+9wzy6r9YH8y9tGMu5hgYMp8Tqx+wEuRvWJ8CBUPr06m5qkAVOEQU8ACIpYiHAssMDxfWLGIPMA8IL+ny5QGn+x6KzuWLCYEXiQN6fFbTQw5oAkL8KuIBms3DxkNWe4E2wIK2tGEZmnY8FoB/pAAAIABJREFUjAAB+84nBFB46LJ0ibWQOrMFtoAzecCK83WwtJtz6cvYpwCPMS9NAs7G/QCQwBn1ZmJADw9r4wsgkS/QZQQ/2heW5wKN9CWTAHmzKZOyiVPGuVwj/UzfEue6KXf55ZevASmTi+ih79o15tTF7Fl90SGxtDjEViYwSByd3Hvyx4pInYFL9vWNabZ8WWoWR9XKOe/eBYjAIfev+huqQBWoAlWgClSBKrCvFDB3BWbMlez7wco8JV8LM/fwYxjoYt4CAgXeACzxEWTeYo5rHmqubf5iHgPEWEZvvhUAZQ6uTvnNo8xzlLfvh1fQRxlzM+VBKXNn7euP/qrf3FOa+dM4HzOPYoFknm2OmjpYahubebU0fVCPeVjDnhXYLRh62tOetucamqMKVIEqcAAV8GLuQeEBFxjkYeThFLNTDwUwJhY4HkaCB4+Xd/k9PPI1Mg+Q5PWQ8gDzEJJ/tBhSj7KxBFKXtEAcD0t1eRCKs6VufVDe0illtOVXEBYs6a+86eMIPtKOtp0PiFJPxqfu9DFOp9O3PJADRwI4LOGi17ilHv3l8JqutNa3wBEPZZs0ZRNSj6VYoJeQNG3rR/Jrx5Y8qSPgaMwnbeyjvOqKVsaZ/PIKsfrJZCJjN4YsORshmXR5pKWsOkfNUpc8Z5999txm0kxQxs19JKROZQSw0Tn3n/LRQVrA1ZyxoQpUgSpQBapAFagCe6GAHzgtbTfP9CNWrH9AF/MdcztpfqQEgsxJAoXMSwVzZXNVc0/zPRBGmn1lnDO3AY/MZdTpRzfblp1WSdLNeUEccxx5zNeUyfwaKDKfi0NqcyY/qplPyadOkEedYBZYlR/y5DXfVIe+6FcglXLGbX4dlwj1QbT7G2m3YIjYArpI7GUgfEMVqAJV4EAr4IHggeFB4iGQh42HGlAQa44RsuRXCy/lygMd8RMUSBNA5CHinHx+afBg82uDl3gPGfUHuIht2vWQY02U8nkIi2MN4qHoVxkx6JIHbh5o+pw6A3nESyfVgUPOJYz5Pegdj5t8gSv+vtungziARJw685CXx7hpbhyBQ+pIPalXG4E99lNv8uV82lAuaWMe+yM0SltjvBzbXNE/hLGs60gv/ZZuo7e6aC12PpArfVKV/bQZcOTYuNxLOefYeW3RKjqZDNHPPWMy47mpHPAoj4mV/NKcByPlV09DFagCVaAKVIEqUAXWq4B5qDmruYXgGCDhYsAclzW0eS2rIX4rL7300nk+Yg5kfiK/uZA4c9n8yMlPEMgk3ZxHkNcxn5rjvNg8V1Dntm3b5na0IV09mXfmxzDzoOuuu26GVH6YE+Q1LzLPch7QUh8whU2AQTnnx+F8VAXE0gYN8o5g/mrepw3vDPqfMcyNNcwK7BYMWcLwUz/1UzNtzEvNqNvjHve4ylgFqkAVOCgK+CMvjBAiL/FATKBBLErk8wDyQPErigddAIwHhweMdA8hD5z80uBXCvV5gPq1IvVpWxuBQupgBgtOeaipyxZLoICQlBHHWmiVdVEAhlheDzqx9lOHh1zGLy2WSOP5jFG+AA0PwzyU83AM4BDLqx1lTSBoYRIh9msNyBEIEvAT7RPrg3NieaXTP8fSTFCkZ6Ppcj83V/o35rc/3gPjvRD95Ml+6tIHS878ypT+JpY39Wpz7FN0S1rqE8trfAFCIxxSTr3uEZMXgb5ZYmbCJt1EDCCiOY3jVFHd6dPYZverQBWoAlWgClSBKmBO4QdQmzmseat5iPmD+YWvrbIGMrcAWMT8A3nXB0rMJwODwBfzU3MU81mAxb45DMsbZaWr1zIv+c0R1acdP27pj7mtuUss2LXlB1E/mGnroosuWoNF5pf6at5j7qwN1kkYhLGYFwnxJerYWOUxxzJXz3uB8WhLf8zBbbF6NxY/wMX/kP1VjGOz3lFs+P9xDcBChRe/+MXT2972tulVr3rVZtWn464CVeAQVgC4yC8aATFiIbBFHg8sv6B4EHlgeph50IxAxgMEHMoLuZd8DxoPVrHyWX6mXMBJIAzQoE4PXg+u9Cft5JcUgEAZm6VWHsY29THdlc85IXUHPiWvh3MsjFKv/AEjASGJ1ePBDISMsEOZEThkf2z/LW95y6yZh2fMecUervQ01uiRcUUb8XKTZ4Q8gS0BMzmX8QTUcII9apX9tDkLtjM4HscvTd0BYCMIS5tjnLz6kWV2qVsc/bKvX2kvae4d7YhtJkHKmXzQLlpm3TsY5DrGmijWRcq4/xyb1HTyMl6J7leBKlAFqkAV2NwKmDuAOKxrzBnMb83JzDXMQ81PHvawh83zjiz3Al/kN48z7zWPFvvx04+g5nXSWN2oy5wz82Vzr/jZ1LZz5nnmXqx9/uIv/mI+BozUDwSpy1zGvCjzQ1fNsR8bL7nkkhnkBNpkvuS8vuZH3fgZMg5zIufAIPWbR4nzg5r25PFjrzaNDTAyx1OfECgkT7/C/vf/j3YLht7whjdMX/d1XzdfzIYqUAWqwKGogIeeB5MHYOLAGA8JDzEPLxAjYMhDTx6bPB549j04AoZi+eEXCQ9YcCQWQB4y2h2hh7bBJXk8vDyc045z8guBCPqUTT3y2JZggxVQ2nHeZzxzrHxCyi3rTV7nA37E4/4IUrIf4OKLEiYMHqAeqoFkYnpGE+XiGFubAVf6PO6nHyMcGmGQiUDgS/qir8qBbwFCIySKrvII0SKxtCX8SZo47Y3QSNqo0VzxzqBP0WaMnUv+TIACh9SV/UxWTF7cVzaAyK9sWWrm/rH5hc79SPdApUyE0lb61bgKVIEqUAWqQBXYHAqY6/nRE4yJJbx5grmReQbAY34hjT9E81FARKycOa25R5avm9vEQt68zZKrzI/Nke1nTs1KRzvSM8fWH3WYp7FI8kOnuSAAZW5tHgT+mNOYO2kjgMa5zJtAn1ggWX6mzszNjNXcSB7z0vggAsa04dicnTWTfXUak/rUIdg3f9IP7cunfWAI8DKO9Gtz3Em3H+Vul5LJ6sIVDG3GW6NjrgKHhwL+wHvwxS+LP/7+ZnlgZU0xgOGlW75YXwQGARsgjgerh6gyHgrq8YABjDxgnQ9MChAATex7+HmIqsvDSX0eoP5+Bt7kgTlCnjxY8/AVO69O41LGMqPACLEHYOCPYxtrIGWkp359Gx/YruYSJqRM6kvZtCedljQZt0Cz6OShqi4OnTM58CtRwth/fQp0WRUnTV/V6Vh5sWui/jFPdJJHiCa5Rmk7Yxcvt2iTa7XW8aG+1Jt2xjxpV6xvrqO+ju2kjdyf7jXAx/1oM1kxUYu2GVcmRO7XsR3XRVsNVaAKVIEqUAWqwOZRwHyARQ7IM843zRvMLcxVzc+AFOfNj+V1PtY2fsi0LCz+eDK/HX/MMzcxn7Gcy3zFPFd9+bGUtY998y31mZOoB7wBn8wp/ZhlbqMvo58h8Mb8BiyK5bXy6jfntszMOXN38x1zLxDLnJh1kHyBTvqp39rQHitt9Qf8mNtpy/xKnqTrY94JtEUPVv/aM17bZgt+bv7xXQ2aGT3q56VDyMtDYiI3VIEqUAUOtgIedv7QjxDDfpbfgBr++HuAiQUPEQ8QDy5p6vAg8fAMIFHew8i5gAl1ebioP8vSlIsTPKawIBIYlf1YD3mIecDqh3aU9xDKLxvaCyyQx6aPsRIxxsCYAAN9iSVU+gNQeYDmAR/Y5G93LG1MLPw6om5jyZj0RVqOl9ZC0tWrvFjd6jUxGANAtoQzzo96BpwE9IyxvM6PATwZ68yzaIRBAUHJt4zHvM6l7+PzzZhGqJYxJo9zyZNY2jihij5i21U7fz1zn7m+uZ6j9q6r8V1wwQVr19N1NMlK3fZdY/2Ijn0O3+4W6UEVqAJVoApUgSNWAfNLFsZ+uARtzAvyI6T5BfBhnmaO6YdEczj+HKWBLKyEzElBEbHy5p6ZG4ovvvjief6lvHmSuYp5rnOO9cGcJZ+Alzd+MAOT4nxaH/XF3DIuGcz11AMsmavqhzrSFvADfBmP+Y55szLmUtLkVZ85kr44tp+5lTzGFwt+VkPqoVPmTvQzBm2bR9FBHn3MXE65vFscsTfUYmC7XUpGkH/7b//t9MhHPnK+GMvw6Ec/epnU4ypQBarAIaHACAD8/fKA8BLvxdrfNg+rPCjyEp+Hq7IeQh4sWfMcC6BAAnEAifIevh5mII0HjXY8aOTzsBNGaOELDurMpn95GKXvyS+2TEs9aX+scxzrCDvyEDfOQJjELHqWUCN9VYc6wQy/AhmXiQPdPIw9UOO3KeNMW8ppzxI4ukgfY23Iox80FgKE0rekj5pFgxHAZCKTOvU7W+CJ2JagjT3tOx99s6/9MT3XJnXF8be2cg1yXVJOesZosmESExgottEVEDRZig8ik6/4HzLBMrmT5ry8yqmvoQpUgSpQBapAFTgyFTA/AFzAC3NGcyBzCvNPaeZj5gNgibmIPACL+ab5G8Bi7pD5Jsse8wj1Zi6iHvOMWBDlS7vmp9nMubKcTB8y38q8ydxGv+RnhaMdwY+Q5iqZAwIyrIPMb8wrzTUzb8pcLbBHPebLseQxTwS7zEnNu7Vhvm5O5JzNmIzVD5XK5odhdStHC30yZ9cvY6KhtPxwJ13drKUE5Y7ksFswdCQPvGOrAlVg8yjgYZVfLfzR9yCLFY+Hhgegh0DAUMCHh4r8yRMok4ejX1CU9VDzIPaQEQeWjKApS7sCNsAS5dVtC5waH4qBD2IP3jyAxYEhrmLKiJnkBvDkYT3CEPmlc3ydcWg78CZjDGAxoTDp8DCONZFfkUwyMhExidCnEczYt9Y8bWTcyZc+LaFQ0hNHA/XZj8Pusd7AmACZ9CPjX2o11r3UJnrmf0e0zXK96KPu7C/jlFHHOJ5xzMZt8pEJSCY/7jkmzY7BIfeUzYTJfWaCYjKlXH79c96EJxOZVWPKeBpXgSpQBapAFagCh5cCoAcoFBcH5hmWVZnzsdYxL+PE2Xlzyq1bt87zL8u3+BgCf8zVWKmDQeYUYIv5qnQ+dsxnM1c21wggCgxxXr5YGknXD3MgczvB3ChuFhybrwBS5i/2szws6puvmOuY01gCZg7tWMgPXsoFAJk3G6M5qXmUPDmnDWlAUX4wcw4YMj+igf46pz1tyw8macMxXYzBj6Lmks5ri5bjXNi860gMe/QxdCQOumOqAlVgcymQl/CAAw87DxMPR7FfHTwMAxQ8XATgAzDxMBzhTaCEh5cHhQerujy47Xuwya9ODxibB/AIZPwKox8BQgEm2vXgSjlWOx668iXWvr4KxpYyYn2QL2NVT4KHnrpZC3mwxrlgHojOCfKlPG1oFIihXACROP0ay6SsMmPbqVufAi/E437yiNWTB/fyOOnGHxCTusb61OF86lJP8mdffNlll63lG7XLdQDC6GpLfc6lLmXGbazbvjCOVb/dVzQSR2exCYxYfm3Q2L2S66Qu94t7z4TH/eDYPWjfdXHNcm+k/cZVoApUgSpQBarA4aWA+SPLH3NKz3UxyCHm8sX8AKjIfO/888+frYTMR7dt2zbHQIc5HzBjbgHW5EfP/MgJBqnDvMQ57QJP6je3zA+cwIv5xjXXXLPWB2mxFNcvc6PAInNl+9oGrwAucypARj5zICE/iJl7m99YFmc+kx8WnVf2/7N3b726JVUZx9cX0WwV6bZBUWloDkIjDRqNUTyAGi5MCJfExG/gB/DKG0284wJjAkGBYFTw0CrQDZ1u+mAgUbLVL+L6Tfu/HLzuvXt3s9Y+jkrmqjnrMGrUqJpzPPXMeueiJ4yjHBny6RwOgpXoY6e9dGWRaMpqi+zIJnHYCnaHt7x0Q7bpIywWZqWPNqwR+jZT64X7a0bdXNtbEkO/+7u/e/Oa5zl//ud/fsv8zVwLrAXWAveKBTiG3jB40HMWnN2NFvTSvBXJSXEkESAW3s4jjDhAbxjmFlWOmzNTVphtcFZIIU4xwknsyIlGZqjLQZ0SPfILHJZdNNJ6a1OsffkRBMCAcsgdW2b7thLHrJy8CJccoTxBeg5U39hP7Jhl0109wMN/UWN79qIHOfo5yZv6EnFSm8oHdOoDHbRB5myLDPVy8JEq0pVzADjkKVdaMdBTXvmn8Wkd15Moqnw6u64v9PFvXMtL9+ytPx364BsCYm+2gJO5ewgo0q557GCbbGvOamO+Ecu+G68F1gJrgbXAWmAtcP9YAFmBDOHXI3L6uRcMJw1BA8NE5iB63va2tx0YT5oXX2K7iBAZdvzArnAugkmAY2EOedLgRzgPdtM2bAE7SncOs/p2kQAHPvPMM4ee5MAjdv/ALjBQ2AqOQdCoT2e7nOAZhIu26BTu6SdmdIHdEV2C9uEfsung5Zi6Agylnn47IpG0Sy/tILf6dlK7fsgKt8NU+ok8gtf1v29mhkP7rIJ+Ic/kRyIditzH4ZbEkEl2GgzQk08+ebB0SwydWmev1wJrgXvZAh7qHEgLdk6Eg+J0OCSEheeexTrSSFnORcwZKeNanjcPHDaH6LmoLmfVwr+2EADSrp/v/IlQ4kTI4IDJdljIVzcyQV0f55PuSL5zeYKyHCHHX3qkg1hflEEeJYMc4CAyS7605LvuTRAwEfnhDQlbca7sxNEKbFAZcYSJ9rQRKRRpo7wQOXRcvBrqQ2AiIohMNrUl2o4ndeVlG/mz3fqDmDJu+mG8pAvZuHabE9mV3NJqw7X8dKzMtHfjkK1neXZLRu3rxySE9MkBsIgjf5yrQ39gCGEYyDOfADx1ACBz05wCVJyzd4Dm/yy9Z2uBtcBaYC2wFlgL3IsWgCFgBi/Y+H27fpzDAr2s5NthNTgCBkVYwAl2F8FoCCL5yAv4JwxLHmwG/8IK1vR9owdWIgvmLe6FqGtth7eQNeEuOBiO1Qas6CdsdjPRCS5RDtbRL/gXJlEefoVn9AFmCa9o0zVsY6cRveAhu57YgC3IvXbt2kHMkKc/7SZSlq5IILIih9hBOe0gfuBYRJA4/dTVHv2Uo6M24Sn9aAcUe6ijP/San1y4F+fU7eh0W98YMqgf/OAHzz70oQ8dA/7Vr3717Ctf+cqxLW3DWmAtsBa4Hy3AMUbUiDkBDosj8ZyLCHKNwOEoOFpOlIPghDhgxI1rjkU+p6M8mZETEQveVHDUkUJiR+0pHxnRx4x7OzKJp8gLjjRypXqTeKictAgi3/3xG3PEEKfLMQIcSIX0CXQEDLTted+3bIAIfVVXf9VTJrJDbHdO9dnCeQd7pE/f7olAiVCZ/Zjn2bJyXdd/7dJFm3Zm6YuxrU/KTz2rN2O6uK6sOCCUngigxkScPo25ODnuj8aie6Vx039lBXPKYT45/GfQCEaAAyjpAOqMnzHpY9RihzzzNSCjLiDkesNaYC2wFlgLrAXWAve2Bfr5GDzJfyNZ4EwYFL6BafxUin+HQRAk8uAGLyKt3RE0CA8YSB2kiZ0wdtKE+ciXDxfCosmGVeE79eEKONFOZvhPu8pqD0aZ+CesBEMhTWAdpJF2nSN04Cfl1IVV6KMNRBgiaOJspAzcQwYCKOKIrMqpB4eGf8JSYvWQY8qyqX6nl7TIIWXZMRJLnqAdbWb3fjoWztLP+gGThcfIuR/DLYmhj370o/+PDOqr3PdjZ1fntcBaYC0wLTDJiBbxkRkcJYfBUXCw8uVxmtIiBTiFnLXybdGNsFDPwl95zonz4pyVi0TKaSvLCaULXck/JZjkC8reiFipvnwOq0CWHTQcNAemfW9EHOlOl4iqSCmxZz+AwBkihSYxpI4yp3rPD1xP20WAzbGor+L6R98IMmlsOm2ffsmrfeTXKTFUvyJsak8b1btRXLvFABEdstHMdz4Psk+P2tWv5lA6RQjNcYuEBGQAEwDFASgaN+MBqAErgb62NZOjnHEDUgBIIGzDWmAtsBZYC6wF1gL3pgXgMUQQYgeRIUZ+8Oe9yKM5387P2yEUhrNT55FHHjlIECQLfKAcssjPuODZyCIYEBZspzESpXy4pJd5YS14Bo61gwfmgC/lIXsQIjANfcKsMKAAxyB/YE8Y5No5idUOnUgcMuii/X4eRpag33QPP6sTKaVMpI+XgfKUJStMpT7MSkcytC1EENGpn9+zI7u1s0i5dGxHEf3Ido20KpCh3cgrn2yIYLoodI+f3PKnZJ/61KeO3wz+8R//8fGxqA1rgbXAWuBBskAP+7lY9pDn6KR5oHMiyiFyLMLnbhqON8dluyqnoyznwIFykjlWi3bkhJ03HI9zshAuk1SZREMkyJSTg9YuxwQsRB5o+/TcePWfvDhrjhnQiBTST86tevVHvc6nU9RndmCbjvQ/JVbInWmdk80+hciT8rVXGoBTWaTMLKscHaeeZEw7lC+ubu27rs153g4haZ2L2wWlv41NRNDcVTR1lF97/Gj1gDvjEYGozuyLeq7rjz4Zq0kMOY8cIgsZZl71htFcNs7qpgNy6H4DKhcTZU/WAmuBtcBaYC3wAFvALnQkCYIEBuhFHP8OUyEbvIyEH+E55eEE3/SxK/3Nb37zsUsGuYG88RILVvAzfHhWPSQRWTArPArXSbdrCL5rF7g2YQtkULhHeUSSPLiFbPLoDHt4WYn46T/kwjjKtdMGqYQ8obc0eXRBtsAs9IVr6ab/cI+2lFVG3+1mUgaeEdRRV/uwOIyrT2TAtvoQ8aNtdaXR0Y4kfSBDWfiILdnCNZmnPx0zJuSoo3/0YRf9QMTBXGQKbNcaIyx2L0/fWxJDH/7wh8/e/e53n/3O7/zOMdG+8Y1vHD8j8wHNgPi93LnVbS2wFlgLvBELeLh7qHvgFzzQOcsW/Tk6TipygDPkXDmLFuqcKUeOEEAO9Van2JugyJ5kaxcJkiP2O+52l0Q0RH54FvcBQLpGdFROPgfJASKhODTteIOin6dHpIH03vrQo58jcdDqOvQ1Ai0biDlTsbdYgIr20oeOER71NxtXLlmnfkaf56Edtqqc+uVLm2W71lbtZquAyym5I780W58bJ2nZpvype+di9ZQ1fmwvrv6sO/uqnrd+8tMt++lz89NYmafGAJAxpxBObQ0HohzSA3jkROxl943XAmuBtcBaYC2wFri7FoARECz8OOLBLpvIHLgSQdGHk+UrG3GCnIFRlIEL4DMYDAZIBjwGU9iNpH54FT4II8iHU7QXlpmYp5dbcI10+AJ+gQlhCzgYMYN8oQvMEu5hXbt14ElEj5096qsnjUxkDqysnwgXxI08fRMQQNpAisF48rUThpJPnv4gacgjF/Yhgz7sow47hMvpr4yybGoM5LEfkk5dO+7DkvRjQ7b1EzvltKk9dZUnT75yEV93d4a9duu3/CnZrK6D73rXu86eeuqp44OXzz777Nkf/dEfvXYLW2ItsBZYC9yHFvBA95DnOD3YPeg5yRwpR8HZCZx539rhTKWrW+zNQzuOOCyOUB0yycuBkC9EBkQ+5FTF9JokwiQQIg/IqYw4546g4ow5TqQC5+fQl9mHdI+YUE8f6MuZKkuGOIIrUDFJEfr0703pHpAgV1C28soGPiYIqb9iZX14mr0cZBbLrwxSjT79dI+N7aTpJ2/k117tz7Tykif20y9jW9u121jNfugbIo4Oyolne9XJDpFOjW/9n31qTAM1QAxwIwaaEIzGBCADvIyzA3hSrreDQIsygIo3aPfDG6xjsmxYC6wF1gJrgbXAA2oB+AKRw5fDiQiPiJ9e5CEyYAtl4a92usAYdgTZHS5NXdhMOdgNBoKdEE19qyfsCXcgQLQJx8IHNoMgVdSBf2EJLyDhBlikuvADjBfxEu7UPjmuncMk8IzyE7/CoXMHEAxDviAPNtW2OnYYtUMInpEPjyKg6B05pA35EWTshGDSDrzrRWt6sStcCBepo61ejiojDQFEFgxPDvnk9JKOndRxXX4Yuxev7MMG8tn5Xg63JIaefvrpG+oeaH3iiSdumL+Ja4G1wFrgQbAAR8bhOjjCSKF2BEXucAoRBhxA5TkHZAoHbRHOyYldc2Tk+U10RIH2OOmO2i7WBqfJMXFCHFeONgIopzrt75s79LK1mGMPZOS0AIVJ8tBrkjj0i1SKGBLrh6O3I+rUfqSKuvmMGdNPWYTLJMAie/TZuTIdZLaTKmImW00bOgeQyPDTPcCG8xdLC8REvMzr9BULs73aVF67ytRPsaA/U7f6Vhv6Uru2XDe29bt2px0Pwa+GQEmkkLlnThlTW7SBDoAM0AFEeuMV6HFtDjiUVddc2rAWWAusBdYCa4G1wJ23AH+PtLHzF36AA/oeEB9tt4k8/p9fR1IgSWAaeFTeCy+8cPbYY48dJIV8OA4Rwb/z9YgVWA0Wsnsc9nD+9a9//SCjelEZvkUoha/oNl92KY9EEcIQExeFX+hC574P5EXmtfPdPvCLPspzPgmgXnKRHUZtBxKc+u1vf/sg0CJn9A15g/AJH6lXu+SzExtKg13VpaNyztXXHzaAk+HpCCwxjKwvyupnu54ikWqXPb2MM4bksU39UA8epS8cdq+GWxJD96rSq9daYC2wFriTFmjRnsP08OdUBWnyxZwPh8zZeIsgFjggP+fiPDgYeTloDoTD4CQjESI1yEXIlE4OGcIkgiIakhFhkWNzbQssR4Ws4tzoJp9Ddi5u909vg9RLJqfLsSIU2vEUwaXf18+3MaeHeNYNXEzy7OjEeYgo6UPV2VpZNo54Uc7Rx5+TlWw2SpbYWyxjwhFHCkXuqaNMdp/6Bn60PYmgymbb2VdgZ5JBcyyzwyzfDqH6SJ/GWHm6ZZv6LQ6sAHqBlMZS7GgHEfDjQBA5gJXeFAIlgI303qq1q6hx2XgtsBZYC6wF1gJrgau3ANLGix04AJ6yuwUm8wKR74YXXSMVvEyET+BIeM7uHrjFjh5EhLKwj7R2syBU4CDYA/ECP2gDFpCuPJnivnsZNoVPwjThMfiKvn1M2jk8IZ8MIVwFu8CYcIs+wB5C3wjy4lLetXPCiIwIHXhtNG4aAAAgAElEQVQTnoFrEF/6nt79LKt6vexkLzgXkaSstslTjo3bQW0Xk3LhHvn9NMw5HE9PcgvkhO0jmCLdwuXaNH7symbwpzKwmzIOcuFoxNu9GG75jaF7UeHVaS2wFlgL3GkLtOjmdDk7D3YOx4Mf2dA3XTgNZThYziACibNXhyPm0DgPDj1SaRITZKofQfKWt7zlYndKhEGEDztw/pEOkQf9JwjXESccFRKAo8tBqU/PHF7nrmcb06n1u3E20Q+HNrzBCTSkZySOfH1KJvmTAHGtbTLJmDpF+JBZnrrVmefyhZmWrJxzc4e89BTbvWM8jFmETcRU9iW3sYrcUQboitxJz+LKu+4nbsBL4xIhFPGkHDvVl/6dvetZRj/MN8ckh4yvt3NAFPChPwE/oAhoMUe9bZSu7hx3dt2wFlgLrAXWAmuBtcDVWwCRg6zhj/l5GAsGgdfgxF5oRQjx0XACX6+el4d8PhKIL0fsIEZgMmkIEH7/+eefP37WpR0kCCzaLqFehM2XVeHTiB74xwtCOASGgS/gGKRQ31K8dk7ueGHoZZnv7sAn+uRn/YIdTMrTDx7yUzn4CY6EUWEfJBDCxs/mlIdR+hf2+swebART628fiVZXHpzNVmyinezlnC7z49fks6U+yaevtmF6ttNH/YaXyNdf+IkebEAvZJdytaMvSCgkF13Ij6gi2wu6bGvs7rVwS2LIh6dvFWw/27AWWAusBR4WC3jwny6c5+JcHsfTYpwT4HA5YXkcHKfCUYg5o4iDHLPFewQFJ9eiXlkhHSJZlCVjHsgk19oROB9y1OH4OTM6CREy5EYQKOecDEHsmhMHMiKF9K+tstIK2nVwjtrWN/LJTaecaHXIoVuh/iTLtfrejvWGKb1mX5XPRjNWdx7KZcP6WFuAwCSBpm2lI+OMTeTOJIXIUEasnrL6ZZwAoMidG7WBBAqYyVfWTrNZJ/vRnc0jhZqHYmCyN2jpkw597BEgNJbSgSBjGRAFbKZtLgZlT9YCa4G1wFpgLbAWuBQL8PN8PJKB30bcOIe3EB+RK3Z8KwvTyIcJfFMIIaM+sgHOQmDABP37eliPHL7+2jlp00/M4JEwQT+zQugglZQJE2hPfe2RryzdemnGCF5CIXMEOAIpQ29kiABXqA8nwSPkkQuLwCl+pkY/53BHL0/ttoEh6UVGRBS5yshDJiGJImZc08HuKWX6iViYTLvqKx82VwdWg3voyS7tToLZyIn8mRhRO4g58tgfEaQ++8hzrp5xJYdMeQ559FfOGN1L4ZbEkI9LP/fccxf6vv3tb7+4fvzxx8/e85733Et9WV3WAmuBtcAdtwBnEgHBIXOoLcY5G4HD8/aBM+bEkTScUsSCNx6ukULKtSOJXDI5RSFihVwOnHPJkXFmDtcRJWLXHNckCzhaBEKLf05tEgzOAyFk0IOjR/LI44gRCw5AQX+0nY6+sxOxwTHTNfnslZPOTq4BIvLTSVo6kJtDpg970mmSFxEmlS2fE9bXHHJO+VZ1a0vMfvN6tkOXedBXyP7zP8s1VsZcvrLVnTuJIghPx3H2v51YytS/uXOIjQE1Y27uNC/IkBdRqS5CTl2AUtvGKZlHZzasBdYCa4G1wFpgLXDpFoC9+jk/LAWb8dd+qgUTSbt+/jN9vtnLKNgHnnv00UcPMsWOFP7cfwtHGCFb4DE4lBw4xws99aT3Yg8hoS4MgnSSBwPAdHCF8vCqnT5wAzwBSzhXFo6FL9SHH8Jr9NamdhA+yBLB7p9+aia/MvAIXZEkcK90bZOn/cgW5I02w8LykDt2/8ijAxlso66Y/V5++eWjDLtK0299YHOytA9zs6/+sIE+sUG7g/rZmXoInnTQDnkwE13oR38EkHRyyqevAFvpH/naJluA/WHWeyHckhhi7N///d+/0PMLX/jCxfWXv/zle0H/1WEtsBZYC9x1C3BKFtiREe3ayMlzGn2PhxPkWDl5jptD4jQ4DPXFHNzM54A4pYiErtX3ViWyglPKSXM8bT3mcDk9DqwQeaHNSAWOKocmPWdPlnNOk1OL0NJ+5xEXYm+yBD9pcw2s6GekhzpCdSJGOElblelJl0iUiBQ60AXQaQtuBA99p32AErK0jXiLmFKucQqgFF8Y5/ykNtMx2XMcZnv0mmXJSkb2EzuqJ+4D1Nm6PiavsSY7O5lLbCOv8TOGkyQCqIxVYMe4sim//o53vOOirHRyyAv40d1Yk7dhLbAWWAusBdYCa4HLtQAip50+YSkYCbEAIyJpSpcGA9i9AtPw73CQYFcyosMLO8QFvw17wZCwZATSN7/5zYv/StYLPdgIntAuDBBWQQi5hlfhlAgXWAFmgC0RUhEa8JbdS8moDmyFVIJzkEP9PIsMedqFSeDYvoFIZ/0lgx3gZ31WDt6JIKJrmKWfx/cdJjYgg16wjP6y5/Vzko0+sFA7s+CkiCE4kR7ZW3/b3Q9L0Vu9yCE6sjnZ8umrLWMVhpWnnnRjwaZ0Mj70jByC0ydGv9zZdvvSbkkM3b6YLbkWWAusBR5uC1iYRwhxWBwNZ9pPjjhy58pwnhw+QkgaZ9LBCeVc5JETaTCJBr+NVp+zUc45mZOEABQ4cM6HQxJzxhEYYk6WTt4ecXCcnvZz8Mr0L+c5Pk5TvyK1Kmf06Sckn9xIkNmH8isbmaXstfPtzpFI9V15gYx24XD4dOiISLFbiU4OtsxBRwyxQQRcepwSQ7UnTv/OqzPjxmWmAUn0p1f6NDZTrrrzOjupkx5+OqefAEayxNmb/fSh8Y0sAkbmvKgvdgKzn2tzx7jWrrEnjwyAacmhY+ptWAusBdYCa4G1wKVYgL/lz/lffhpWQJogJPj6drHYzQM3tDsbedBOGSRIu33gMr7bt33gMz9xQvggQuTBiz4xEM4MF4Tj6BEOgS3oF3YpljZxB7LHbhy48ZVXXjny4DdYN+zCWPRFfoi1SxcYo13NdFAX2dLnCiKAwibqwivsg1Rp545y9UFZeJjtIqJ6kUkndrbzHZmFfGpnkv4hcegGB7Mx8iZySLthN20YKzgZ+cbOsKV85ehGjrrK6Gd52mcH5J68xpqNpMNa5NPrboYlhu6m9bfttcBa4IG0AGfgIc9ReNDPxT8nxlFwxJyYPE5I4MgBAG9GTkkF9dThIDl3dbztETsiiNTjZNtJky7e7nB+nLKQTvJPj3SvjN+rIyc4L7q1s4ij05b+BCrIDhR4y9O/EJ27czhzICMihbNGPknLkRZPXSN1InZczzTyHnnkkUPuv//7v1/sElKGPA7cEbghRx/F6gFRkySKRMsO9c11B507125vzYyHMZPfmAcQZt8DNY2bMsgv4MJBV0AnsKbubJOO6kQM6Yvz7Fd5sXIO5wJgpo3q08H8Uj8bJ++osGEtsBZYC6wF1gJrgR/IAnwtYof/hRucwwJe4PmOIh8MwyAw+s9WsAsMCO95oQczefnlBSCywy5qPl2+HT+IB9gtTImM8D0b7SmrHMLE93hgjHCHMnRoNw5d1QlLRFbBOPBgeLVdQ8ghxE3YF16EkfQvcghZhYBph0/YBLHinF7O6UyONvWFDHqxmz7Ddu1IgiPZQ11l5ZOPaGE3dZ3Dscgz2Ea/tROmkh+5ow/wnP7AU9pMhrHQbp9/0MfsRAbCSB6sDieH+ciGxfUpfEmu+uxnrKWrc7fCEkN3y/Lb7lpgLfBAW6CFOsc1A8fAyVrocyQcBufBGQIFEQSRP5w3Z8ThqeOcE/HWgWz1pUccTNKCTKGFPkfIUQp0UJYsTo8jcs7hieULyqjP0fYzL0AEGNA2merrl7LJDkQAJxzfJBickz/b6Kd45ZHVttocaGmzj9rRvuC8smzjbRYiDEHFSSvnkJcM7aVb51PXQ/Crgb79nCtCh41nGiATWZYNtOW8Nu1qUt8BtInVafs3sBIIIX/217bxxlqcbDrrVzuj6qtxBTjSJd2Ap+ymDcAJ4Ap8Ni/Idb5hLbAWWAusBdYCa4Ef3AJ2psB2/DdcAQPwvfBZZAIs2H+y5a8RFF5e9RILucKfS4MZ/FSLr4bTYB+ytIHcIFNbCAk+Hb6ACRAYiAw/M3MNx8F68BiMp00ESf+1FRkS5pj4Q11kC2wIRyCA+vAzcit8aAc7jKZ95ftpHBxEzzAhzBIB1A4bpAnd6UQ/WKtvB6mfbJiHPdv9ox9sS3Z914d2vsOI7BbuQxzVB6QZ7Mhu8FVYOrvpo7GhS7uTyHZufLStr/SFs5wL7KMOzE8v6WL2U/ZuEkNeG/7hzaa4DnlLXPjsZz97GE7wH8mwfRvWAmuBtcBa4PVbgJPhjDgQ5IlFemRSDoqzqJwWOIsW/mJOUdzblhb/Ym+EOBiAQFw98joiEzhQunCmk4igzySVODEOk+Pj9AAPMVDiaMcTh8dRe/MEVDic66N29INO+u6abpxvpA4HGZHiPB3E2lQ2ABFpcjoCwIQ8+pLFxvo2CTR1yAmc6V9lxOqJkTEceSQXEsXY8I/SugbkHI1HupGDPKILgME+AJuxIUcdsjp3bSy8EVTeAVixl3ptK9cuGZGJ6Z4e5DX+7J3NeyvWOIqzcfOMDPMkIk3+hrXAWmAtsBZYC6wF3rgF+GS4iR+Hwey8gcHgD0QDvAS/IVSQDggMafy3/HaKw03wB+zFV8NFdhLBF/ACvAEHkQM3WbOHHbQJS4jJC8OII1GU5f+1C0sgLbzUco54Ug6e0h84Cv6AW+Q7YDv9fP7554926UlHdSKoEEDO4Q1thWXgLe3BKuTqA5wi6Jf+qUd/fYOx6MOedFCGXtqjv5eD9CGDzdQlv3Zg0jCffO1qUzr5YXPta0v7ytG3XU3sa6zkwcLao4OYXY2RQEfpZKtP77Am/bStPfXuRrglMTRJoTqTkksK3Y3h2jbXAmuBB9UCnEWESM41BzHJmRbrgIJzTqUdJ0BAREtOnwPyBoTTkz8X+p1zThw4Z0kHupDLcZ2SNxxeZAtn1mFcJoHj3MuFCBzy5xFhJPbGBmjgVCNqyK1upESEWXLlnx6VBbSU4/j1nRNvt0wAoDYicPi18iJlOHmghh3ZOfJMuVMiifyIp+xC9inpM4mcymm7+kBhJJBxBCoieyqTnuJ5yJ/gzLm6ygSssqM2zY1J2JVHL3OgORKoeVDvv+3XWmAtsBZYC6wFrtoCMA8shDyBuZA94QDEjp0v/Di8Js/PwvhvRAUSApZBdMBnfDo/Dpf5px/8t/8CK5bWzqReEMEC4RjtO6dDOEG7YQ1xu2ngSlhKOZhAm9pAmtATXtGWNPiFTDjRC0E7kFzDS/RWPywmXX8isPRfgOOk90KL/uzGTurDfXRRBj5SH1brmz/son1lpdGd/SJbyFGXfbVBH2NS2/Ijf7TNltpkszCmMaF/L+Bcky9fujpwU9hTG67pRhb5YcGZru2w9N0gh25JDF31zbHy1wJrgbXAWuB/LcARTJJDas56LvQ5Eo6IA+ZMxBygc29P+tZQTl/dnCDHxTlxkspzir0hIpdj5+gdiJp2/iBGEBTqIG84OjIjVHJ0kQrF5LRV9rRv+qfPDm91yFOebslWhyzOPZLCuTdcOWdlhOSXntxJDNX/QIk6sw/ezilz/XxrdvYLSEWsVJ6O+pasZKf7jCeBpHyHOso52FWbtoEDIs47AnKznrodyTiN9a80emfvQAfbAh7Gln2dAyvKAp3KSXcY+2z7vzN2/64F1gJrgbXAWmAtcLsW4MsREHwufyrmo3v5A98oA3Px97Ad0kWd/pkI3IYIaYcR4qOXfurx1UiUsGDkAznhkrAHXw8jhWPkh2UQUuHNMCg96QwbOk9fOrz00ksHjmk3EZuoB0vAGrAFPfUF1gw3RqzQj8wIILhDGTIQLfIQTeQgebQtX14EEMKKbG0K+lN55WBMZSNi5Gk3gof94Cb5Ym0jjti6n36RT7fwVXXhR/K1LU898vsvamFxtmDvZOuXfpBJX22L4Wy63emwxNCdtvi2txZYC6wFbmIBDoNDEQvOZ2hhPwmYnBznGgCYTj5igPPnqCz+lXNUVxonFdGQDpEC8jkrO3AAFW9COK3KRzrQaxI46vQTsfox+6Q/rtvdQ04/SRP7TTknS2+xg3wOmi7ZiAx1hWxHNmdOtr4DU5x0tlFnEiTpKj0yKVIohx6oKo7wmSTNtAmdHBFO5ETuOPcTtbZFB+J6AwYsTHJqytW+n6bRU31Hb9bUdw2gOejIVg42KKQbm5CdjRs/sf6zZ/NtyaEL8+3JWmAtsBZYC6wFXpcFEA8InrAAvIFo4Y/FsIKfVyFQEDNIEj66n4chXfznMdjFThz+3Ysy3/5BQMARyAQ4A14iB36D9bQr9A9I7CxS1osoefx8eXYtwR/Xzr/nAwvACMrR07ldQfSKWGm3Nf0iQyKNwjz6Rz96wTd0o7O+TKIE7qCHcv3X3AgeGAQpBNsgYSJw5CNopLMtu+m3PtGXTvL1IfJMO2FQ7SsT+QQ30t/4hL3U7+d92iY/7ClPHe0jvthMSG94ll76lc3YI2ypr+ErOsFjYXR5dzLsx6fvpLW3rbXAWmAtcAsLcAicDWfCsXA2QgRGRA4npgynFMEhr3zlHcIpiUQuRzPTEQacWd8IipBQ3weRtcU50odTc80JAisOBA4HRxd5gQd1ONL+00IkA/0jcipPLscYsSPdf5kgQ6ivCBH12Ule9qqceoJ42sS5f4UaOAEAlHH47x+cMB20E5BSJztJL8zzqVtkizjAUFwe/ekAoLB5AJH+dKFH5+pI67+F6Hd57O+8MtmnOcCW2dNckI5IAgSNf2SfMs77uaH+KN9bROPbnNFW43NhjD1ZC6wF1gJrgbXAWuA1LcBn89X8MF8Ph/DByAp+GQaDDfhfeCX8ASco+8wzzxz+2L+eF/vgNH8NU5CHJIJjwo+wHN/vhZFY+9rWpvJ0UR5JZDdOBAl5yvePNejSfzRTRvs+SE3exCv0Jg9RhOCCH15++eXjP59FcGgTlqR7/3HNT8y80JKnLtwziRZy2UJ6L8P6eZlr/YVPYKp+lo8cQtSoo7/0RKTpl3LabjePgdMvxBySDYGTLPYNc9NDHW2IvXCrbXXVUQamZi/t0gshRs/+IUq7kBBMdIwUbAKxm3Ry9Nv43amwO4bulKW3nbXAWmAtcBsWiARSlCPjkAIHVe9a3MHJOyIPIg0iJDgsb3k4Z46xo7cbk6CoDl286eC8vN3h8Fxz/NqprYgl5ThjB8fGMZIB7EibMUcJDAEkHKs8Tru2pw7ZRF+1oRy9OeKIkcrMvtG17wwBB9lzAhlAJCdOJufN4etjNmWb0zra7SdwU3bnYody9CWD/fUboHEAeskOYFVWup+VAXGTzEqn2RfnyC3gYe4c0gdpAAbgFelEp+wlZkeApXkVoWRcAMvSs606G9YCa4G1wFpgLbAWuH0L8PuwjsC3tlNHWtiDn4UT4Atl4AQvb/hjaXCT/Gvnu3ngAxiHj+fH+XjYC1ZTRn1HmAMmk05mafAFGeGMMIp26CUdQQRbtAMbcYL4mS/m4BC4hVx1YM2wBSKln3GFpZBDsAX8RX8YMSIFttQ39Z3rE7vAVNrQJ33UX/qGWeQrD/fAUvLEsKD68tQnG85jT3rqF1wz87StnvywnBiu0hft0yvCS54+9VM49iJTujJkaZdd6WI8jTlbhfOksymMxk7qK1Mbtz/T3njJJYbeuO225lpgLbAWuBILTCIoxxIRw3HlGCdxkdNTjiPiVApktJhHhMy3JhwqB0VmdTkl5YTqcYQCh8sx5igrRwbnxoE5ODdOV18E5RxkC+RG9PQmKqKnslOH+qAOufI4WgdHLOakI5vYQ98AEg480JL9XNdPsgADduGE2YGDV5aNs00kVPrRI4Im2zVO2ceHp4EwQIbMgNocO3UCS4Gd028NTT1qU+znaMYkoklbys7xoS8QdGr7aVMgT2AzRyCMvekXkMmuR+ENa4G1wFpgLbAWWAu8pgX4Zi+f4BQ+Fq6woxrmQGDw0/2nLH5dWf7Xzpb8v2sfmbbbxm7udsUgcNptpB0YbGIMuCM5/Llr+WLXMN3EDuGYPiRdXmQHvODfy9PfQS+y9AH2UJ98sTr6gnyiI91gkevn33KES+TDaeyBJGEbfYPJIoDk2Q0F5wnhuV5SIqrgIHXoEkaKzILttA2/hP3kaYtusBnZ6uoDua7bqaNuefon3Tg6h3VhqXAhDMde+qKOQ6iOduUbd/poW1kBpmQzddhIPjsqS9c7EZYYuhNW3jbWAmuBtcDrtECEjHgu6F1zEBxHi3gxQsTBKbnmrBA1CJsW84iXHClnI93BMXLWHGYEBXVzdpE4nDcHhSSKlBG75iQdHJwDCHHtTVAOc8b1Tz8CDeREDtUm/SbZ5NrupQLbzEM6cEBWu5zm94UmCRMxlI3bXRSYmcAqu9QHdZWPhJtkDTLIOLB1b98QN2wXwIqg0hYZgBCA0Fu8SKTKBwzTwxu85EU2iclVJoAmdg0YOejMPoI8/ZAPANJDu/QGXtjaPGJ/ZcXNp8bvYiD2ZC2wFlgLrAXWAmuBG1qAbw4j8aswCr/O1yJ4nMNNfG1EAJ+O9ICpYANYRr40P/9WD4YKJ4QBwnnhEjijF0jtouHvtUcXsskjSxrfr61efoVzInzU0TZMAOsgTpSFH+AL9WEL/921l13wh28Xwan1U56+IsRgRfojgOjhWj7iiVxlvETUlh3S6tERzoVH2nkkHwbWdwf7wqnkRQ6F/eTPnUl0C++o51w9Y8dG7eZRvx1akwBiJ32grzFxhGXF0rVhDMliJ+lk01l57dKLDYrZbomhG95Wm7gWWAusBR4eC3AEHIfYgnwSQafEEIfkiKDhADnurjltTpbTySEllyPjHNXnsMjW7twBpP1233CKygIJjkmwRBgAPco7lFWmQxvKaV9bjsiedgDR3aEfHC9nClTQQ5/Ijdio765nQIIAD5wrh0zXdv9MvdVRN2JIGeUjY8TKk6Oc9gQ6Bz4QO4AXIBFJx9mzrfrKBTDIjwhqPNSJFJIfmJo6I5y0QbcIJ0CiNtJRW3QUV18Zhza06TC+joggP+vz+3rB2CjD3vRXxpiZF83Fo+CGtcBaYC2wFlgLrAVuaQG+mJ/nu/lR5/w0H146jMTXypfeix5ky5vf/OYLH+ybP7ARgohfJztfD5fATDCSNKSCD1krRx4cpR3+XJpzPl27Av2QMf7LmPrXXv0AtXLKh/3gAmXFcA95fX+Hbt/97ncPDKIe3AJ30huhguTRpjry4TRt0gHBo//kyoOb2lnkJSIsSw+4MUyqvvK9jEQgtQNcnvKu9UcZukVKkdkY6Ft6hSsj9OgSkeNcerLhKoFO6pUn1jbcqK44XeWFqWA6Y8aO+hHOCitrDx67E+TQ7hi65W28mWuBtcBa4O5aIKKFY3C45rhyHDnqnJnYYp+j4YwdnK1yAEG7iCKdOMXeXjlHzHDqnGcEB4eVo0XWkD3JB3rlwCJ66MmBk8l5S88Jai9yol0o2kPMCBE84kgq/aIPPaQBFznudEmPZGgfoOKII4YCUNVVVj0hYog8NosQKj5tJxAknfxJ0qgTqIukCaj5mVjEjjjApnxl+229NOXZK/IowJj89FLXTiJjrrw+NA+AIAdgNus1r2qXfdQzT4xTcyi7Nb7mn/MNa4G1wFpgLbAWWAvc2gJ8KCzFf8NfYv/mna/20zB4x85daXbawDyIHy/Z4AS+nO+GOyJkkCh2zyBP+HD4SDvO+W6YS1nl4DqYyHVYBfZw0Auh1MuiXmJ5UfTKK68c6dqlSzg03HH9/CdhYSxyETD9p7RJsoSTyNEXOrEDHfU9rKIPcKa2IqvIh1/YA1aUT94kapRhH33UhjyBLeTZEQWDqQd3CvrCTnBiO4C8IGOrSB7llYHp9Ev79CaXreVpl/3oHNnVGCjDLvWldHLV0291zAPlBOfKhZm1Z0zC0UehKwpLDF2RYVfsWmAtsBa4LAtwBhbpEULJ5Uw5F46dA3EeOcIZcWSclnJIj8pIDxBwOGRLqz75ETKcqyNnSB6nzMlxXI6IFbIinNThKLXZ7p9+fjZ3MpHL2aoXcIn04rwjNyIqkCOAB5CgXTrToZjuEUucPSAAFDgn65QUqax6/cRKHznryJ1Tkqc21KXLBA7ZVjsBtGykrPRAhjGY5QMKygMB8pBCE8RJI1fZDuWBQzq364js2X79BMYaN2nNoWwof86DAAvbsbe54micm4sbrwXWAmuBtcBaYC1wYwvwqwgYxAy/ytf2YsgOGnipXcgIEAFeQBrBPY8++ujhr2EA5AcZL7744rHTBjYkvxd68F5thfXgAX5cPDFEWCXiKeyoHb4eXtM+PAaLIG6U9dJPu7BYWIR82MA/u4Bz0k1f4Ac4Q12YTN/J1g+HvHRBsNBbfyPSwijtuGErNqRDOEabEUDIIbixtukpTYx80zftqquevHQmmy0RdNrTV30Ty4s4qt+IHLrQWZtk9tLNOPRpBdg2jK5fcLay0sXwm3NjAC/qc2SQttiOfa8y7L+rv0rrruy1wFpgLXCJFuAwIoly8BwFEoHzaQcHpyJwKhwfEMGZOC9dHkekDmfH0TnI8cYkpxaA6M1G7SJROC6Bg63NnD/5QIEtzDk27dOjtrUvAEUIEHkcNucYGZITrayfUwEoymk/soxtTh2mOhEYYmU7lM8ZH0qchxx0ecpWT5zudLpZSM/sdxpXr/TADqfv3Ecne4PH5pFHEVqNR3ZRXp5xm4AvHck0PvRyDujon75dO98i3tjUP4AUwKmMchGHbXMmh3zjKa15dzObbPpaYC2wFlgLrAUeZgtEgPCXfDbiRfDCzDlCge/lU5ERiCAxUsbOHUQK/IP0QWIo5+dliBokBsw0sZM28tX5aDrw3dpDVsANP/ZjP3b4e2Ud6ikTNpn+nj6wysR/ysMP8Ee4xAs5ZAmdETXtBocnBLiG3is4rkYAACAASURBVDAickhd2JJ+5MOscId2nPcNIvLLlwYDsplYH8lRnlw6RLiRpa42YUcEUBiSnurCvXZUGwc6I4DYiW3gWGSXsvUHbjYGYbdsKh0u154DPu/7TdL1kZ7SjRkb0Ue6XUsRh/B439xkYwccR+ZVYq4lhh7mp9T2fS2wFrivLMAZOFq0cy4cLUcWQcThAgecbTt0OBNOiAPLsYoFAIND5qBy7Bxmzi7nhrjRhjc5nBgnpVw/bwpMRIyQff3V/zoRuVEMaChnlwv52gU4gAvOMeeXrmRJc0yCJwJn2mO2r36ERwTSjPVxltd2xBFH7s3S3B3D9oG7CfKcFzrPboGqf/u3fzvAEDvpb/aVj9wBBgAL4yZWJpDmvGty2S2gADwkb46ZuvRVPjl0k5adzQlzB9hxGAN9Vga40fd2bZFtnrC1Oh3KbFgLrAXWAmuBtcBa4PYswH8L8JTz73znOxeEh3R4AI5D/CArECtwggCbRAKF95SdL5Gck2E3NvKHr4cVIjLEyJO+JUQHMshWNozG7ztgOSEMhDShl7LIF/qFeZRzTqZ0L5u0AyvCELAGLKI9JA5iB2GjbRgTJgurlg8ThQHDVMigdoWTgUSJ4FJPHjwjlqfP9KIT/MJG6iGQ2ulEV5gGtoWNYSt9hJnYEj6kZ+3B2GTRF37ST21L1xe6Ni7IJfiOTuSxj375xIN5oJ5AV+Nr3JQl17WDfHpcJe7an5Idw7BhLbAWWAvcXxbgWDkXDoKj5ZzmLg9OUEDiOOeAOCiHtLbqqu+cExPIkEYeZyl2zWkBFtKUzXFHvlRHee1VJ6IksifAIT3yCsigQ4Akxx9pE6Ehpkfl9IMek2ihT23pD/CA5JjbgQM7lT06fh7UU54zJlNf2S0HPvtceSAjUKBs5SNu/EtX+gJpjuyvHKCgDQCgHVLqkccGDiBBGn3+8z//8xhrAEudCQTrU/X61pCxcrCVQ98Ausgn9Sbgct34Kk8/Rz/JM2bAUQRTZKV4w1pgLbAWWAusBdYC/98C/cwLHuFn+XDkA4IEtuGb+XsvfqRfO9/R+8M//MNnzz777FEWWQGfRRLABc4RC2REziAO5ME3ZAoRR3BWu2vI0l4vnmA3fh7pwc/DG8ogVmA08uitPPlwQrtv4A71YFIhHBJ2gb/Ug4FgGCF55CCXInO0WT457KZvZNNFDLPIUze8hozppZf88A6cBDPJg1P0gV5wGfvJC0+rR09y2Uzb2qlNddVTXz1lEXbGtDbZjh3l1xe6SFc/Yoxc58aELbRZO/AZXfUNlnYtwF4RS0fCFYQlhq7AqCtyLbAWWAvcaQtMh8xpcXTz6E2PmKNUPhKIg+qnZJEOvbESAyUcEueeg4vUETvUUzagonxvQCJqitMrMiHQweFOskZ+hBN5zr1ZoitdOFSkRcQQ+eq0K0j/OPbInoBE5V0rzxYddNafSBFlI3s49QBPZIryyijP7g4/1+LkJ4nDLskBGpzrQ3XKI6s2xMpKu37+xi4i6LQsvdINIQQ4RDSJ5bErefRmb8BT2jzkVxbIUzZSjn30R11B/4yHI1B1p+f8trcWWAusBdYCa4F73QJhLL4bvuAz+WDEj487h7uQQHzvhz/84QO78Od+6oX0QFQgb8hwrl5EDozD18MK6sFsfLVy0nsppS4/LnYoH65IBz4drpJuhwryiDx4it7hDXXpA2/BP9qCzcKaYY4wBfntjlEHvpBHDjzR7h/2Ibd8+mo7QkYb9ChfP+gopmdYTvvwFTwklietPmg3woru8uhePX0is5/X01ld+XSsXbrBgRE8ZGrLmHtJGlZUHsFlfNq9pIzxgWuNUW2E0ZOhvD6IjQ09w8+XPfeXGLpsi668tcBaYC1wlyzAQTlybhEMHD1nyanJjxSKOOGEc5jyOB9pHLLDOYfGwXLgHKojgBAhUxucZgQLZy4/AsK5MAkHoId8cQRVhFNv2ThNvwF3DQTk0L2p6VBHvkNZTpZOwBMnzx7KcuBibUVo+b09e7iufACKQw5E6ffsT8ApkscuICAjQKZupI322QGIkxaZRHZtzLLaqSz5ynUoT5fGuJgdy5PvvHazPR2UM0b0NxYzuA68Onewo3mlfbZLpnqNu3jDWmAtsBZYC6wF1gL/ZwE+F7YI31j4223D/8IeFvtwyfe+972zJ5988sBcsJPdvUgG5fnzF1544bhudw1yCUaQFu4IT5CR/9e2AAPAEmE3evHlYQXEzcR/yCU60wUOoE9+Xr2IHeRGmIwuYcJwn7L6iPRqFzd8Vvvy4S3Yi87wR+RReFO/YFAxPAiPaCf94UL6sEP4srywlv7AL/pQ3bAbcgY5FX6mm34bI3nqsY38dnJXh676pbxxNF50ZfdensHO4Vdx/VPONX3YiO5idoBJ1Qt7KeNod9FV3GNLDF2FVVfmWmAtsBa4SxaYi/yIgEicnD+HE3nAIXGSnC0nKsw3EUALgBFZgrzhwAMF4pwwOREIyqnDwXOWgjyOjnONcOIcHYAOOXQOTIjTh05tg+ZI2ykUaIgkqc8cvPLkcvyc+mkduteGsnRWnnz6ATH6zslns4CQeukaaaYMh31K4ET2iNWbZI86ATlxZQNMET2ADfA1y8+y6dMOo8qdAsDS9QmAIpfOYnZyGK/+Xa7xa9yMHXmAjr63Y8j4RTjO+XcM3oa1wFpgLbAWWAs8xBZAlvClfLgPScMzdgIhFtrp8/d///dnP/MzP3NgD3kwDQIEXuDXxT/6oz96YBJkDezGhwuRPX5Gli/2sWT4jXwykU7tBELw8N8wECzAnzufL/60D5OoS18kFJwAS7nm6+GOMJHy8sUIGP2lixAOoXvf/oG36CBP0D5d2xnU7p/y5MMvEUB28rADfBIezQ7ah+nkhZP0TR9gKnkRM+qqB+9pn42EMJX26Bz5FU6Vrk44KnvoNxnGjgztFNgHWcTecLCgHtl0jQSSzvbwKDuQwXZsK4ZRs+2F8Es62Y9PX5IhV8z9awE3boswD5AWlh58LXKL799eruYPiwU4GQ7HPG4Ot3DnSCJmOJjAAEfGuc3txhEtHB8HHCkAJHCqnFMEkTa1kSPtnuIc3TucMcenjd58aTtiBWDxG3OgprrJUr9z5SOSvG3jmOlV2rx/c9KT1MgOlQcyaq/5ofysk80CW9k03bUjbT4vAk3yAhHkOy8ur+tpDzr5NhFbNSbOA2DzeeVc8HFrQIKNI7EaQ2XUTa+ecekOnKkDIHWoC3wYN2Ot/+lMDkAmbcok15xgvw1rgbXAWmAtsBZYC/wv6WGBz0fztz/0Qz90kBH89csvv3yQIYgc+T/+4z9+kC989re+9a1jlw4SBaGA8CCHX0YqufYxZMSFa/L4ez7brmlEAzzIryOSxHx0/8DCbhgy+G4YQ/v8vHJhHOdCO8aVgxlfeumlsze96U0HDgiTKKvc/I9hcKAXTfLI1i/Yjb76Bve1E0e+n9fJVx72VMZP6ugDW/SBbNiU7vqgPgyjDLvWPrvUf+2y89y1xD7aoT+bak8d7cM20tkke8Oy2hRgJG3Sv3Rja9cVHckybjBchJn+qaMfdHWdHIQV3cTa1Q+6GMvGIjsbA+lXEZYYugqr3kWZLRJSYS4+7qJad6XpFlFuOje2mH0cbrION7+FrvI9uFo4eqDMRbWbuUUg23ZjSxPkK9/C8nY7Ph+qPYTVJbeF6i62bteaW87c5KDMq5wih8iRODgmwMM9wQmZsxwk5yfNveCeUY4D8qaKc+++AD762RcnKS8g0X0kBhCUpY+QA87hdj+mq7j21ZdfXfVd0x8QoJO+dY9Wt3u4+8h9A4QBIaf3Z4DGfUyngIdyHDvHHPEUQRQRQp90k/YjP/IjFwRa92zPhXQ/jDDqqV8ZMXInIoh9J8HjnG30K1vTF2CSB6iJ5Z2Wy47TH3Quns9Fdu+Q7tnYc44N2BHIAS4FfW1cq7fPqlcHeqO1wFpgLbAWeKgtAEvx5/yjgETgr2EY/h5O4ruRGXYEufbhZwSJnUO9tEM0wFvk2EkCw/mX9jAacgLxE2ZQpo9ShwfCVmEIunz3u989cAyMpF3BjiZ4MWyQf5dHhjy4ka5iWAk2hBOun38LUf124Hz7298+u3b+EW3YMryFsIJ3EGHaVB+ugCm02b+Nl6eMPC+iYD94zy4oZFFYEIlDJxiQXH2BZ9uZ1IvM+h3hwlbapDesEzFjd5bxMj6CeuzO3soUq8fu8Kgxdt0YRxYZG0Fb+i/Q31hpIzuL20GlPf1o3CZ+c94L1kPYFYQlhq7AqHdSpAnkhvNwMVlMJKGFiZvVjeSGaVK2eGiBo6xz+c4rd1X96CZM/lxcvZ429fX0aPHUlk22cbhpu2H1tUPbLWLmQokNlMmOxS0OPQhaLGXjFt1s3QJavdpwQ/dgoov6DkEZMuUL2iGnB+Ekp6bu6dBYqvdGiKnXY/cte39YwNzjHM0jzs/zQVpzhtNzn3C47pHeQnXPcMbKcoTmYXPQGw1EUPcUkNNulggG8xhA4JgBmOa6fMF9Ou8HpMp//dd/Xbwx63s/9K5O4ER7nK48etHXM44OdOwe0477yhstIKtnW3HPu+4n7Tj372HJAgS6n7rvey50v9UGcKa/ZM/7s2dKM2be75UT+68WjUXjEMkizgbJQQYp57nfm0Wys7/YNVsgm3ouSqvdnmmPPPLIBbGT/PTW37ZzN/7ink3a7/ktLRIyWyZv47XAWmAtsBZYCzyMFsh/w2D8vAMRYP1i1w089b73ve+IkRawFRwCO1UW+QHrIA/4dSQSnMK3I4B6kZevn2uNCKFimMK3EMV9e4g+9OyFmHOEipdAcIAAPyQfjoAv27kNl/kYtjbgN/iCXkgiuvcv7ckiG25AdGkvOTAXXEc2jAdTkKUMPdvFA4PIJ0s/e7GP0IFBEEu1w176oAz9kFn6zXZ96gABRR+6l6YtdcVhKVi4F3DK6of2jSs8jZBqd3220id1lNFPofWpMnRhq8bGeXKlTTwrnX0qe1X30hJDV2XZK5ZrkvUhM5PM5HEzNak0H0EgDtS70UxGi0HlW+i08JEW8Be7Vl4odgM5d5ioPZw8MCJfWizUBvnS2sqoXm1rx808dUh2fdK++uqRYUHiENRzozg8VNmih5lr5bDudJMnJDcbsUuHB4eHHVtmu+LSlGV3bU/7dj0XiMrNRV59oYMHTbbMvumnv9N+U0fnFoTaMRc8LBwePBFUbCo/2zcm4vQrPoyy4YG0gPEXjLt5Y364Vz0/nJs3AZJ2q7inHe4pjrHnR6QQwsdccz+aQ2S4t8zhHJn5yUlyyL15UV4ZOjnPAapDnvlOT/efeR35JK37xM/O2iUEbLjH069nSAOpnqN7FPkKAHXf070y2qZP90f3TjHZN7uftCdvHulSnxoD8WOPPXb2H//xH4eaPeu6/2c8nw30EtQzTj3zsne27Rn3ne9857BfbxGVJ6NnH10FALE+s0vPD2PajinjIY/dzRVjRV6EXPNF3DPLXNuwFlgLrAXWAmuBh9kC/DhSh+9E+CBRkCYwGP+LFLJO4E/5dukwE//L11qvwS5e0nhhR1YvjMjmi+GlXmiRwfeL+XllXRfUkaZeOCz8gBgR+H/y6NJucfW8wAvrkU0GAkU9evH7rXe02Y4i/bDLpxeN2lAXcQJzqMMu8GbrN/JgEwQZ/ejiml3gjPqrLrnqsxvsJY1sukrTFnvQ/9r5DqZ2PcmHkbQZBuyladiKrvWZnfW1n6OxAZyr7XYT0Ve6sZWubUEa/elm/I25PrCNdH1ka+l0CB9LI0d/qi/vqsISQ1dl2UuSa9KYzC3YTM5u5hYQbkQT2gMHs1q6m8ODxhFxEKg3QZWzQHHezhRxZZxbDGjTjU8Hk9JBdosx9eVbALpZWhBFZLQoK9YndVqctZCoXuW0Q14PvvrVDZYO9PrJn/zJQ083C9lTlms3nrS+izHbrry0Hmo9JBvG2qaTczFW2n8P6Jr8Fl3qyU+OMqcylWEzYcrNxmTpP53++7//+2JxS08OwrhOcsp1C3/j3aJf2+rE9M+5UBn1aldcmOcXiXtyX1nAGBp/89Ozo/smJ9gbKk7cvcDxmV9tdVWOIzN/PBtcu5/MPY5a+Uhhc1xZu2fIy4Ejktrhwtn3DENyVJdjbg6as93P3RvSHN2v5qy+9Jwxl+VLq36Ond7ytdvzJZuQR++eHep2H5OFfHLPKOeQ56BXNixNvu3fgENlT+8hO3jYwHOQrQIcTSqyTp8X9b0yXYvJCHjUb+WkJT876GN6Kytkc/qyiTKCa2PojZ0dXXQ21p63YvLlz2cp3adMeRvWAmuBtcBaYC3wMFqAP7Xu4O8RI/AOPO/86aefPvvZn/3Zg1DwkylYA2HAb9t9AtvAWOoiG/hqOJ/fdQ2jITj4Y/gGXnHdT6cqZx1BB98tlGYXT35/YoMwl3GiJ3ICTkBIhG3CeGIYAFFFX4ROZSJn1NWeaxgKdkBw6a+gfzCDdGnkWEuK9YM8bdBr/pSLjHb5kI+Y8fKKTZJf/6TJYys6kt16is3b0Q6ftrGAvY0DMgbO1Vb9bRcXnehGfzKRQF6GSg+zVkZd31iSX54+yxf0U4AbWx+nv7bZBIYlxxE+pDPcfNlhiaHLtugPKM9EAcBNYhPSDd7h5jcpWoQUexi0COljV4F8N14LoBYcJrE6kSXacXPNt789aNzMLarcRC0CarNFmQntwUSWh1Btni7eIqnsynHTCWRJVz9yQ0xG+rsBlJuLkG4SZa+fL0Tf8Y53HLZRLv3EyVDOTfT+97//wobkSU8vNvHwsE3RYoiOHip065g6+omKB5F22bzFln7VP+ducn2W34OhOmJB3MNA3LU+mA/THtlHXjI9dIyXhRzbGFNzyUJ1jkN9zt4RgWQZd21na2XUrb10PRTecF9ZwLh2v5of5rlg/nBGxt58McbmiLGPNJIfgDDPpHuONN/JJVNQzjPLPPQ80E51q+NecQ78yOvj055f5Mhz5GTF2nL0bHNfAg/AFMfZTkLzfpb3kUVz3H1Kl+6FnhNi5bOF/ndvtq2aPXqmKV+Zeb/2DBYrI56H3/LTzbNMPPU8Gj8PygvkCvN50nMjO/T8m3F1Zlq2j4xLfs8Y8vSn8aNXttFnc8Jzjp3p3nMQIOwZld612zzqWXJ0ZsNaYC2wFlgLrAUeIgvwr3wvfMUX85l86ec///mDKBD8JzJ4yfoEYQTL8LWwCl8Kw4hdW1/BSa75W7LI9yJaDHOFz/hf65DWceTCZj52rWzfEqJDWIOOsJe6ysBOSCL1rPPgA30IYymL1JEGLyEytAlr0S9sAEMiiOiuD+xhvUW+QE4/r1POmoMtlCFb0I7+yidDeXqSJQ/Gk6eM9uEXfUDCIHT0l51gGAFBpKz+CfpEX3pH5kkjQzobubbrSPuNkbr0J0fd7JdMpJZ2kEdtdgiXhSHVk0aGvpMBh2m7so2RdH1iuyWGjqF7MIPJgBl287nhu8Hd5D0kpDfhWnhMAF+aSfjUU08dE1maSWYCNYncaB4cJpSbxAPpN3/zN4+bRZ5DnvLKVY/cn/7pnz4eZpWVVx2x33C+853vPEga7U5ioUVpBIcHTDebUXVjsEP5ERLkvOc977lYbJVOtgde+n7zm988Hki/+qu/erGwTQdxh0VO5BVZ6Zh+2fTFF188+63f+q3DFunU4lCcbDsAfv3Xf/14aLRg1K8Wdz3UmrktwLrZ5+Kqej1MWwyq44GqbOOqDx7ErqWndwReOk47WkzTO6bZgzE70octzUN1O6a91dWesi0sXWcXOtTvB/NOfTB6dTon3UeCORwhYy7ktMQcm3FXltPMCTZnzVFvWaT3hstc8vzq7Y6y7nttKO/wrOPQ1SVbG+lAJ/NvkiH0MveBlDlHe/7p2yyvTeRob72UU096h7R06h47zW+OS1deP7vfu/eTXexbQEgrduDI1cnR6xu7kleb83rONKQSm1Y/+/Qs0b7z+t2bQbbW3gQXPW/SWTvqNid6Y1X7xoO9jSHbGw96SucfzAUAjryeCfzCtPM+E+Zo7vlaYC2wFlgLPAwWyCfD5XwizO7f0nspBlPB316yXDv/eRP/iuTwoozPhIvk8a/S+WYvreEJPh0ecMADfH34QJ60Dmue9ODHlaOPtY0yfHi7u42JstYG1aG3ttV17qV5u6LDIGJpMBxyJoLLdUSMttRXjuwIE2VqC9mjnLVcBBDdBG3Il0cf+qunvD73czJrZbYMk/SBaGsou9f7BhJ56sq3riVTW+Eb5WAf2C0SSh3XYSqxEM5Snyx9g6naSSSfPkglY9/YKGM8rK1gKvJhLGMiTZ1snA30rbJH45ccdsfQJRv0dsUZVDeym91NbqHeYdL1EPDzsK4D+SZSJMYkNZx3+BK8LYomlRtRew4T0LWDHDeWSYbcsIBxg0agWAi5QchQVzk3VW23kDBpHT2onnvuueODry2kJsngPPkeMggfbTika78HyYw9KP1cTH19pNMM6ehBgtBBgOgjXdWJ3BCrb8H2S7/0S8eiLJ30r0PbFje2euqLOh4QGGd5Phar/x7sri3cLD6lZZcWSfQs3Yde500unZ7k1aceBGxev8hAErYorW8eVvSnh1h+DwztqN/BIemHnUtsbd6RWV9n7KGujLnnQZddIhiVFdgzG9JJnew9F4PTFnPc9vzesEBzpXEyluYHB2sOePaYB8p5BrS7qLzAiTzzj0MlozdA0jk6zzjOl1MkS6z89fMdf+YUIDSfhZHhWcmbM3OdIzfXehZpy7wTm48C+frjGWobb/eOZ4Nnj7qV7/6srn8XG7k1ZZ+2ow113WPa8QwE5JSTTp7D+en94NrzQN8rM8vKjxDKbhMIzmdg97jvCikTcAx80Iuuc5zZSL30T0ZzwLXyAS5AMlJPnj7mf3oOum5cjJ3+kMfWs//3xqxfLdYCa4G1wFpgLXB1FuCL4Ro4ByHwuc997vD7sBMfCje0C9s1rNJPn5AL1mV8OjzC/058n9b8dOuFcFVEBSJC3Xx/9XtZB9PDCfS0LvI9VpggOWLtw3LOrR9gQXIQNKWHI7TF15NPX2SMtYd20lMs3zrDmo0ca9F0pI98uJF89hELMAU8pB650sNX6sMd9COzD1TDK4gu8sI16ghIM/ZW1zrTuTaU0w5dnLMnjOk8Mok8Lz4RXeprT1p1JoZir8ZEuuv+2xo8DSNJt9OJvcNO0hyF6mbLi4xLPFli6BKN+VqiTNiIIAtyk90OIQ8IeQ43ldgN5aHg5gC0TfRAvolhUrdIifRQLmLIIsv3OxAvJnOLfjdfB5kdbpp3v/vdR1st/iMEZnlM9yc+8YmjHLIgYklMV7F6X/ziF8/+4A/+4JjsLRamfp3/8z//89FuC6gWD91QxW6Cb33rW2cf+chHjv6QWZ/a0ZQ+FloYbUfllHVkHzHyLKLrVDdlpYktzn7hF37h6IvtiNI9RI2HB4L2PXyNpYd8Cytj1CHNeCCt5sPRucN4z6B846y/wumYN/4ejsr0u2G2jJyhawf9PYyw6i3qWtgp34PdA9UcjBwyJz38skc2ZO+2UzYGzRllzIXGVXvOxQ7965hj7Jxd6dnDNXsmY9qVXWZ918rP9qZd9/z7LcDOPXtayJuP7Ge82dbc4NCMR45aGenGPdIhpxeRYPwDHeVpvXk4x1pZ95o2JnltzmhLUB5o4bybSwCXeUiXG425OsKce5UVdwSWmkvmWHWUARZqZ8pSr/vmZu3Qaz4HesY1/8mrPefAmfsNkSZmj+yfLY5OvRqQQRFH/T4/2yo/7w/nDu31jJl9kKe98jWRbp5RbMGHGSuxdukcwdxzlByATd4cm6n3nq8F1gJrgbXAWuBBtkA7Tt7ylrec/dVf/dXxqwt+mj/1Mhn28UsL+F0QtxYMC4eH7armc+Vbi/DB1onWOnC6ctIQFNYisFbrjWS05iBbgC3gdus3bUdy0A92meXViYihN3IGNoJVYAK+PoxBLpn9u3nrCnhQPl3hDDK8VFLOeth1GIe+0q2rlA2X0EG7CBRyYNJ+NkZ2P1XTN9fhLfXopH9kwirwSZgzokm/++9lZCBv+iWP/gnqzF1D0ugt3eGcHPKzf7uf/Eqnb0yRoZ/0TG5YtHWSPDLoq699E6n0o+IlhyWGLtmgp+KaQAA+8GyCeSi4Cbz9NtgmvHImsOtuDgNvYkmT50PHJo1DcNO70SZB0rnYDaCNyBkLm4C7fIv3SCD6/Ou//uuxK0d5x1wczEn/pS996eyDH/zgxcKiB0E3nv540PzJn/zJ2cc//vELeS0QTPh5KItBbxGRTnQ8PRBJHjDprj/kKufc4WZDwLCTvMqI9SsbkOEm9dMz+iSz3THiCI+//uu/Pvv5n//5o1y/ezVOHtBiD4Gvf/3rx7jqR4uuHqria+dbRXtI9DDogZCtb7SAnHOK3OxYf7q2oJw2dk6XuWC2kOYwaoc9slGET3ODbcxZ9e0+k47cyn7mNPuIOSN2z0FIz37mg3bJ0R458uigTW30EE1neqejuIdgcy2bsL822ZVtyGvONI+T1ZiQQWZzcNrx9P59UK/NP7aLqPZc4mSBmGydTdmgeaq8+1zsIIOzVRcYcS02DuxqbIyvw7k6xrr7AMkTQOD0IsKNnXFznI59c7rxM2c61MsxG2/9FCcLiAFslFN/3iN0qq/aTL66sw3nc26mozrpJr95VyyNLoJ4HpV3f7FV9m7+VdZ1bdtl6J4DFI1BfqRnTn2pX8nKnskSV4ZsNiMvmwXI5j3aM5WNAFPE+AREjUPEkTY8E8jfsBZYC6wF1gJrgQfdArAxTIS48VIdSQIHIYmsURA9XuzCJe0O8uKWv5enPkwkhqvgJ34ZvpLOp1pXwP7y+G34RHuICH7Z7mdlyZQvhBH4eu0UYCO7muhAjvN26rT2bO0CFyiDlCEXgaIvsAH5kSTaRv7Q53mxIQAAIABJREFUB8aDz6+fv7yHQ+gewUSetbFruqoPV/ZdHjawppVHB3ZkE9dwEPmCPP22TtSedPYSrGmkKQPPhLsQNcZAuvb0x84t+kW40VkZ2Kb+0dm5MdYvoe8P0a92yaxd5Semda0PM67MIfA8GAPjSx6bhw9PcWLlf9B4iaEf1IK3qN9PMQB9E96B7TMhTba3vvWtF0SRid1kMEkKzgPTbuAWF2Kg3ERpAeyGnKSHPLti/GRKmfJasIvdhO0asqXO5PONoBbvysivHN1dW8A88cQTRzk3foyz8xYodPetC4SWtiMc6NXCUbqbci7SlDP509l5/WRDN6uH4+yvOhFKdHYjPfvss2e//du/fUFG1H79J1Odf/zHfzy+iZQd6UL+tL3xQLLV5xZnynRzOv+7v/u7gwyb6fLnDdz1aXrjPsv2sNB++dmGbemJ5e8Bp0xHacrrU4s580hf+y5Ic0ob2vZANobylUPaiZWT1vixnfIYes6n7aCRQpNY0w+LRHWNQ3NRWdceoo4W1vTVpvIe0hyUB+tcbDdO9bPr4ha23SvuR/OHXLo5mhPS5rjrFzumR/pmw4sb9D46MaaeO+5xMXsghNgXIYTU1D/PK/cxZ2buNNfF7KWeWF32MV9KB248C4xbBG73mXbY1bgEGsxdDjf7F5Orbcepk2y8gRB6Vq55Xh31BHHyxA4Onp6l08e5MW/e9HzRD3Nlyk9OZAid9C3d3CeeQWI6Ki+ve3jG0j3T2I49AJlsPu/nngvz/u68ezdbKdv5tF8ymt9dZ6vS9ct41R/nSDz6mT9s0b2ojHxzSkyWc76CDfg240qmeMNaYC2wFlgLrAUeZAvwe3525aUqsgA5AscKCA7nMC9cDEPz+zCAa3gKVkW28KX8JnzGp/KxCA5EiXM4hmz4DrFhHQdz8OX97Avm90KYTOX5aP5bnM9Wl4/uo9F8vbLSYSAywznSOtTn52Eq61tlYSfECvnhg2TAjdId7AJLIMzYAJ7RL+tNgT5spG8Tb6grj63E6oXh9Fs60giuJY8+gnpwrRfV7BmxFN7Rd3qyl/b0wa6uPnrN/mRpQ//Vh5cRQ7XhPMJIm2Gx7OW6Q7vS2WgG+doX9K3yXSf3+ypd4sUSQ5dozEQZ6N6Mezg0YS0uPAxMVotchxvVA8HN3GSai4F5M7j5McFNDuXcVCakB0nEyDzXpl02vnpvgdaCyA3vhp43uLTnn3/+uBFb3JMdESAmz03lhlC+tuS1qKOXoOxLL730fbuQ6KscuS3ouvYQc4NYREmrXW04XIt99+fxxx+/IBfYpYW+2I3pcK4/bKZufWlRo50WgcpanCmTrMgwsYezh7SHozGUZowrw5YO188888zF94bc0I1X8+PRRx+9uNF7aMwxde7nIeV5mBmnHqbS2Yke2at5Un+Ks3cLYAvV0owx28x51Fhkq+kItMtZIKR6WNGBHI7O+LEBWzmMgTx2MlfcB342OO07SSR99JA1VulLH/rGuGdDMX2m7sr5HpT50Nxih8ZbX1yTbY63uJffebH+ux+NJ6dAD0d21j55yayd9NWXCGB2yCZsIGhHG/rfPE/P7h/j7Vwbpc3+3+65fngOdV/0fPL8aQdju3qQPpxqb5/YncM2vpy/8eU8exvlWj/JVs8hRCDrZ3bQzxweO+SM2cy8JLNxaxyax/WVTYyxukCUcsbbOGX7QEQ2E6tD196Ekase0KB/6uak2YcMYxOYke+55/nTHEo3OnWvns6jWbZx7N6pT80jdZNZWn2Yz5HS9MlzuDDnjfIdp+01r9RLbjLmt87oo9/zOYwkJq97RmweezaaT+aPsQiYmnfa6PlLpr5tWAusBdYCa4G1wINoAT4WtoKrfIrCz8f4RtgC0YCU8J/B+EI4Bs6ALeAoeAn+gBnhZrHr1hg3iuU7EC1ktK6D2eAaxItzh/UnAkeQzj8rHyYTS+PD4RF6wUnt5smH66P2BH1SHgaIXEkHsuAU19oV6xPcIGivHTv6Fo6x+cCaQnlEDdnwBH3YTfv6DHuwZy8s6Scoy5bqi/WF7F6MaksZ6xE4BtZt19Ah4DxoQ33t0NP1xHuNRXn6Go4Mf005yut77cGg5NNLLH3WU7fr2p1p8q4iLDF0BVbtBjPQJqPgZnVjeVC0k8iEBphd+3aGCaPuXCgEwMlxDrhbjLeAANxbxE8AHxDXpgVOhIZ0ddw8JlUTXbtN8q997WvHf9lK3ly8R4R4YH3hC184+73f+73jRm0xIF1bbkTy9MUWyo997GOH/ulL587FDnXrZ31qYeLagyHCwcey3/ve9x4y6Seeiyo3mmv/qaxvGCnnoG99cu7wEPIg144HReXYINlsxja//Mu/fEFSld+iTn/tVHr7299+8YCTRp+OuZjrxm4cij0wOjePejj00CRT/7Tbg7p505xTVpr8afvsLe7hr4y+V06ecw9KfUQIKSPdwzq7sVMPQu2ax/Iif6a9q0NuD3Z1PXTJVlbsJ3vpqE5zpbR0FNf/0hBq/RyK7OzWGLIJu2q/4FpZofH4iZ/4iYv+NhebN+njA8vzvokAMe97BjhvHOeYndpa3/SFgxPYUlpzlj2zReOdvs2J+lMcIcX5cezsHCENiLCT51GOkv4csHa0obx7QjltsGF2dr94frnvI0rpi9yZDqz5495m2+7vaTdl3BPaRb4o2/yjx+nY6F+2bA5UPh0b0+YZOY2jOPtLN7fZqrFyvzVHAJn63FwT96yZ97W2ECeeUWzT/VlZ+ZVvjFz3Aer6JJ5H41xsbl4/32XV/V3ZCRKa9+o0HsXygFX97NlSn3ubR2b2ai6LzXnPA+PeM98zxDz1EgDRjBzk04yjNpuH5pdxInvDWmAtsBZYC6wFHkQLwFswFl8LX8A1/CDfyY96ucrnKhf+igRSzssqPjQSR1lrAn4WGdMaEkaDTwrhHlhGnWLnAt+rbYSQtaEXftrgy+GUcEPYQB14Bq4ji77q65N2wxR2MJGtDIxJnn7x98rAKsgfu7z1jT1gJBhDm9pjF3nqk6de/52NvnSgM7nwsHrKuPbCD3aBdcOadIk0orcAY+on+cbHC2Jp4aCwDjn6A+vZtIH4kqcPdNd3fZKmbh8MNz506mA77QnKIebamU+2cnTUB/gom1affOXmeBzCzkP97/oy4yWGLtOa57LcqG5aCy4TtTfyYpOwrXwmghtd2SaFuiaAxW2LCpOixY+bw7mbGQA3cStnQnrotHidC0p1IitabEbwmIwd3RRuLAs/P6+a5boJengo9zd/8zfHjiDlIgPmAlZ/3Oza76dJLbTEDg8Hi8P6VZ9amNQvcWnaUF59C9Pa1hd2bHHu/POf//zZz/3cz31f3SnbObt85StfOQgs53NXi346yNbPv/3bvz376Ec/epQpTzvTPtp817veddzULdxbFFrYCfMB0oPpdDp6CN+onDShReFcfHaubT/xcl0fixsDi7p2Dc1y8tk74suiz/xjG+PY/IqIce3haE6TY44YAzar/9J6MNIrAi7So3HWlvunhThdnLf41wc6mzPSzQVH9dlMGfebui2gs4s0eZF38rvPsotrb3KyrzLuUw6mfvnJZWRu41d5jqfxrv/0NW5d1yf9oq+DHTklfXFe+ozZJqc173k2kG5hTi+x541nTw7buTmr/8rQe4Zr57vmmst0LZ+uzR1t9i0yBIU3RGxVv6oX4OAIgR3lyOjeJNv8kEcn+ruur427toXmvPOeEc0RtiWfzehBhqAN9dyb8rQBEJhzySercTNHXQs9X+e8mTqVrw3nPReTV73Ta7Ln3Oj8tDyZNwvyTueX68BCz4x0Evcfy4y7eTHHvrGrvWlrddkvoGWOGCfXlTMOwBn76od088jclma+ZQdjfau+3azPm74WWAusBdYCa4F72QJ8I5zlJSmsDKvytV6q8IP8qTLwWdgJprO24De9nIM1w3PWiPAMUoKPhZfk8eG9iEMuSEfYWBeRow2hdQlCSdvaVaafR9FB6D98hSuqSxZfD7t7UaYN7UZ8ka8M2fRwrRwCRRt013+HvlkjkFcIt7Rjp7rspS7Z5ZHd7iJYTJtkwTTwhj72naPwJZ3YPWIu24Rf2jUEn5BNP+MHL0rLfvoM04ZrtK2NiC72Vkc6OXCv9QIMpI/6Li3MpP/KkjeDNC9K6WvsG5/KyCfDQT99MzaXGZYY+gGtadKYWL2Vd4MbfBPLuUEzuAZObAJblLTQMsjOJ5nRglf6XBSr1+LMJJVvYtjBYRKZlAH2gHyxm+DP/uzPzn7lV37lgvhQPrLF5G2hI7awLT8dWozMiem7O/5jlwdVJImYPvqq38gH/1HMb21beGtvLojVcT0XvS0qWjAW6yPZvhzvv66RL6+FHxtpJ709VDzE2EqespES6jinr9gDU5nSxR1sZTzpqtyU47yx0a6HgAf3XHA5Z8O5iD692V23mHXjexD1UO1hwP49vFtMNhfI71DGPHHdAppN6Td11Zfyr736c7o5No0VG5KVfbWNLNL36iMKEYrmZORG5EVzTXm6mZPTeTWuymkTsZYe6khPBh0w+exDjx7S7qNs7oGKgEIc1ufmBH17y5G9GovGaZJT7kkOsTHggNxb816YY8cpNkZzTJzTt3v7Ro8fzwl6d6/o+7wXzEfzP3twoJ4/5jBAYZy07d7rOcRBkWv++gkScFCdHGR9o5N+Fae/cuwubk743TpbGDv3hX7Vb/oJ7k8yejvWM8pYOdyfQFHOeN6/bGD+N+ezV/PeuOincgBBbZCrP/UD0d28yq6NQXNXO4K4OaAd5Y2n+s7nHFI3/bTlmONbWXE6i2cdbc285oi4uVw/XNstaZwDLD0vmotTdjaw03T6hurqb21k267VLZDdPFCXLDroP93T1RiwfXbI/5DDfuZu8+hC+J6sBdYCa4G1wFrgPrcAn+kzF16cwaZwFmwjwKEwZcTHN77xjePlZi/p+Ez+FNbgX/lRGFXcGiBShV+FafL9fDESyssf/tUOF3LDUtoP31WHn4fNBJiRnnbrSCOfTxf6uZjy8L58a1y4C4bk/8MZ+kBnek4sTpYy5EpH1igjhgvCURFHlVOvPPW0zWbVh6sEukVWwcb6LU2b2c65NDpKQ77AnHAxHbSlDfbTfmOnf9YasI30+po9pZ1iMOM3y2ozfCY+xVxHJ14N8vVBLFRXzBalw1J0tG697LDE0Bu0aGyemy8CSIwMcJhIBk4wmAF5k8lNG3ES02kytkBTp8VLC7DTa4teckxaN4QbIXJC3NHCWkxXNz59ym9HEH3cdCa4CY0s+Id/+IfjP3YpI7+bpoUHPS0GP/OZz5x98pOfPPK7SZr8LSa0+Zd/+ZfHh5lb3NOhG6qb7J/+6Z/OPvShDx2ykkdm9kmH8j2gyNY/NwhdI9+qo/6f/umfnv3ar/3aUa4FdfZqwSK2W+V973vfBTlEXnIjvrTnI9O/8Ru/ceRJT1f9KFiwW4R7CHTMm125+i29h5j0yiWrawu87F9b2VjcAtP5bFt6cyln4dq8Mn/6vpP+Vm6WV8f3mtq1Zu5FmOXE2M889wBll+qfLorpoh8f+MAHjvZPSZ/GSJ+NXQ6quWlcG3/6csDaPB1XOjvoqR1jkCyy5xzlbORHIinvZ4NzVxE56tCDU2tMGlv96oHvXOi6MWI359Wp//pM18ZEW+lfmtjcU1efte8+jeBx/7KHfPr91E/91OH43PeV6Xljd5f62mdPfUqn5lH3lR1u+pPN6lfl3K+f/vSnL54/6hXI7Y2HdPrlNNPF/WO+5NCBFP1v/tBP3XQU08dc9EZKnkNfxN0D6eA+kSavt2L1rbpic00f9UtcnWTSxzyhq7ZcZ7tsUz352jIm1Rc3D8T0OrW56zk/mj/6orxx6wUDe9W+vOZjupR2s7yeH9kpXZIjvXsu+9BNm8qyz5yb7n2gtxclgUZl2QyI6dmTbS8myp6sBdYCa4G1wFrgPrWAF4dve9vbDsx4/dV/qNGObf/xGQ5DfHh55WWYl2qwUPiZ/5zXMK7rfPzEvXak8P8wS2suOAo+EVqz8cf8b3X5cX5fHTH/Xnl1rAXI8PIavlIeHlBOjEyR5hq2pDOCCA7QTrK1ZydNJA48FC5BHMFGcCEcS0e4ACYI15MpD76FBeEOtkN+qWfdxzb+G5t1DtnKqF+f9Is+2c+Y9IJPWTZQXplJ4LGtto2XtYmy4SAxPcihs2tHGIse2bW0cNXN4tPprly2kue6IH1en9a9rOslhl6nJU0ygNcN4QY3eRwGzI3ZjWr7oPzKuomRKz0EulG7qe28mZOMWm6mGx2BcfJsVzRx3VQmKgDuRooccd6kdv7FL37xYLTJ8CCQ1qGuxbWFh+AB4CacZeTTSXstYDxMZjllyG+B1sIAURKJ1SK4xbzytfPVr371+I5PN+S0i5tt3oQ+RP3UU08dN38307wxGw9xu0u0rV03vTb1my2dS+tB5cHYQ4U99INO7GLBx8YtcOYiUr7jhRdeOP4tZTcyvYTiefN3jvHX39MHTH3yMNaXyqdT9q5tsQch5yPQW19yPJVvLPVZHQ9ZeZESYnVaCCMW9dVcb+xOx/JLX/rS8d/b7HqJgJwkXPY3/7TZHG++iPXLGxF5bcdtXtGvflaHI5my1HOtbN+P6rfBkT//8i//cjzgXWvPoV/Zywfb023atTlQHdcOzo79Cj3E6Zg+5FRfXLpzwdgWyJKevcTK+7i5eykixdwg19zwpoozNtfbdmw+1ab69Lp2vjNMW+ZE87G+slkHXcrXX+10P855arz9XNR9oRxZOdZ+XkgHOtdP84qOypHl/tOucW7+BYyyZW16vnLgU1dtulZHOUE9gS319fSD1afzqPJiz1bAwbO+udqcatzE2prPJG2p1z015465wA49E4ql6ZM6zju6DwAXerBVvqNnU+NTf+le/6de0vI3PU+aw4eRzkP971p5OoubQ0jd7BmQ67kiNsb0dbifAqna96xVxvOAXTasBdYCa4G1wFrgfrUAPwx3eYFmvcdP+gczMBo/J4Zh+T+EBj8uXVlEi2vnsGhEBD+MPJLeCyDYRuCz8+VhOHG+3Q4fJJW1hLJwaYQPnMyfCzBAdVw755NhOPrCSupWNpyjPpzCj1vjJjNMQY4dPXAo/0+Ga4Ft1HftBaD8dh/pu7wIJbgBpg2Xar/dSGxChnWbTRhebAvwSD/Zgie1R6/aUx6+DCORF8EFmyhrPCe2Ura+sSN95cOoyZk4i82rQ6cwnjIOIZxVuXnd+VHwPCTvZmWuAkdBz3+YAhu/tgUCwBE9blY3ghvYYszhBjfpHSadm8fEbAHdoseNEEERkC4mr11FtWnSN2nFJr6J5uZyM7tBxYFxN1bkDx2U1aYv5NPfQSb5ydaX9CNPiNlWvgfJvBFafNtpc7pYMZlbfNDLA8VPjZTrZuuB07V+sVt9KVa/vupX1xbwPSi1d3okt2/XkIfI0r9keCj03Q0PFh+ErY1sqIzDtVi/PZDqX4u5Fn/64Xs3iDj6ORCAN2o72TkJ459TOB3/+jrjbFjMBs49yCzMjVs271w8F3bGt75G0pkvDvpJI9eDvrnTeDcvyHPOflPGXOxHdLCzXS3NdfO9fke62oVn/NjMWGV/sulqDB0WzIIxsCtHvxsXxA8Z7kttRLC4P3MU9VEb5HlT0TjNeUhfjkHdPuTcrhztu88BAeOoz/TNhhFprjluBKz+OfqZl37nbLuPfYCPDhwk21qUI92kmYPGl92Mi+cPeZ4jrjln/aar+a0duum3eTXvZzbKhtlOrE/kOje3m99sLXDkgBBw1FiygWB+6LcyxmzawzjQw1gZS3aRRi91xOawoE15nCA7uFaPDYwH2c1lfer5lFMmw5iyKdk59uqIu2eUrb463RP0m7qRMe2nXs5bve4Ztqs/6lev55I65pP5lh3cX0Cj8WVTYzbr1q46yRN3P5LtAA7ZiLzqz/a1XV9P456jbGiss6VYnwJ5xiKizJgg1JsnxquxM9bVIaM8dtuwFlgLrAXWAmuB+8kCMJXPWbz//e8/sAAsaE0As3mx5Lp1Bj/Mp3qhpawgLX8KwyBj+GfYGPbhO+E5ft45IkObzoWwjrL59dYnYRu4CUYkA86AKcI6rSHSS93wFN3589oK22vTQS+4jvy+P0onsuiiP9pMZ3oXtAsfySe3zRVsUZ724RYhHWAU9oRlWq+pE2FDf/W8cISjI9PIgKPgUvmwR/hGOnn0CePAydLIU1a7MB09WwfDPAJME1ZNhjR4RxvSwk9wuf4Zd3LpGK5XNn2luVaPfcXqwZHs6jr8BNMqc5lhdwy9Tms2mUxUgwOwmyjiFvUmu3JNGoNsgRZw7wbuBjPgyS1WVxvIjBZD5Jg4Fr4mgnNEy7Xzt/9NFGkmj0WMCUQ3cYtT9b785S8fH1luEUafFgXa7+agA2LDIrSFuLhDfTeQ8nT1H8AsGObinS4tGvTDdSRZfRB3yHfYFfGLv/iLF/1qkaXt7Mhujr/4i784/vOYdsiZfSWrNDe1B4Y388p2k2Wv2QYd2bWHYTepfnZDPvfcc0cZOqmr3RaR0swH9vHwZCMyPJTo3KKQjefYd11cvRuVk0aOsTeX0ot9pPUw8jBsTJu39aN5wx79nExetmw80llfvBnpAS69eaxd5dnMz868dTB3sn+y5nh72E6b0sd1R/2gD0IkvZo75rvD9bwPKlseG3sDQye6s1X2yibS1ctW6dJ9RxaHLSZHuqP7sxghg4wp1L/ZnnkiaKt2GmN2RKwZW06Vo2Zj6f7Dn/Ly3GsIpogp87vnUSSA3R3GQ935TMqudHrssccOPcgUd25s1SOL7ZTNntnfnNdfhCcCmSzzjV7q+pesxl0w1s45YvcFfTh3ujjXPgCjjL7VL33Sh+4ZZf2kyj2nLAfcGDUm85manZtTxob959g57x4SKyvMsaWb/s5xT6ZyQvey9OTLa+z1lX3rb3UQOOyij6chmT3vlCmtudrYuibLc5/9AQ+xPnUPq5+MG7WVTunZPd48JUv95iT5xsc80a5nRLvF8hv0M17NxakPuRvWAmuBtcBaYC1wP1iAP/Ozsbe+9a0H1uNn+UlYDUnhe6rwDgxg4c7HtdsXiWJ9IXhhxyfCMdL4UH4XgZOPhLmsW8i3doFB+FkYyVqTL/4f9u5uZ5ulKvf4eyivByBg4kSIgoiCRt2THRMTd9z0INaZuGHQxCOACZGpGKNEQT6356Gs59fr+T9cs9b9ThY6P1jQnXSqu2rUqPFVNUaN7rtv/fKjaMtnF0+AAy+pog6dJSzEKsUW+orfqhOfbdKkuAH8vn1d0knpKH7Cs2QImZQAav8YjeTkmgwbCy/kQbYSUNrJURzlWoyBFm3wqfOmFNnCp578ipPEKGRI3sXC+hS3vPv8MzNxoT7q9UeTgw6S68ZaV+PzISZFL7k2Lhp7ILZxrvZ0k0yhqR/5RWd1xXqNxwbItRc4hpT/8eWdGPo5RdjGSTfGwugpkEFlZD2dN6kYVkF6k09J2X4feQbs2tZQTObd9BiT4SudFhSTqZ+UgUVPeOBanK5NJJMabAmkEhvuw93Gxl8b+90sXsHhtQ2oyepQSgb8xV/8xYW/NwfKrsazRNb3v//9K8OOduMpSx7ghRxlVG2KTSZ0gOkEu/c2kGRNNxaOcIIp2dMmEi9oa4FN/ngwVud3v/vda3Puvs1fbe6r97M33y5qA2QRIB98kInXHP/kT/7kklEbrRYWdV0/KtWdfaq7ED4dNv4tHEu/Nvf9lCyYNqxtbunaNTuxOPbzpV1UtbWIwfPOO+9cC7C3RNZ+2V2bRHgtWhb3EgrZVUkF+iFn36hBaxtoZff4d63MwS7t4VTSQTZrDNfqG19yFoy5knzSrbLNe9fuTzk1Hh1LpAUbT9mxnxH2DbDs5bQfujl1h09Pm7wtwoacnCD7RQ87l6CwLvRmEJtjZz05MOfMISc90ZGz+YGn7BdtnmAl52SQHJMdXl3DScfhdE3P5pMSHkER/PgA76d8+mqD19w2R83D5EU3YNWDUzrVWTvUGRNetpheJKXgi97qwbb26QsHGuIvu88+agNHJg66yT4WP/nTx8qoOZIc14Yas7rsuTHMO4kyesRvtLcmBOe+cbIbbcZ0Wgvgae1tbtavPmepb2O57lCX7ODCR75EqS57yN6yWbx0oica4Fi5Gcs6sOO+EHBf3BK4JXBL4JbALYFfIAmIwfxKwcNBsY6EBP8ldhZLSYZ4gMjv8fX8oOvisvYGfKY6p6O9Tz61fRxc/Dp/W9JBHNu1ZITYi48V73Xkc/lqRw9h9bN/szf14F/sVlyVfw8enPhK7NI/nuG1/QBa4YXT3k48Bm+xONj4KollTyDe0wce8MbAX20leyRzxLpw6yMJghZ09sBQkgsNYsn+lYzs9BEjNw4ejSGetkc3hkOsLRFHD2Dh9iAUDLm3x904Sr+NWeAoyRfebAGdYtkz7oKjfy9zXft5TUbaiku148Uhdvowjjsx9HNKtWCZgXUwJsbZJFeWOOl3ogX8BeuMjyEpnTuJXXcwBga/m+kmb3ASKN7iyPBPA+y+sRja1772tVdf+cpXrmQPQy6hsUklNDB+k83mHV/brs1pXHSa3JJDZOFsU9XGSAnGprk3SRj2JgzwaSEgL8mr3uBIBmjVR1kCyLUPW/vOiU2HDTk5J5/4V4d3CzvccOobTnp0bdPvlAF+/fw21m70jOFeSR5kaAxySBZtUPH21a9+9dVf/uVfXrJqQSVb1+pOOrcOXMkf8Onedbx0nbyXVgkDsu4IVplculbiWcIiR7HwxrbIswPJOAssnW5ypusSd70Rol4dmadzeNRzBnhG98pw77MD7WSrH1xdK9lWP+1rox/cjmuhX9tMZ42n3PHCEU/ZIkcX/214wVbHiYUHf80D+N2jV/KkealkT9YOb96YA/7LRxB/AAAgAElEQVS5wrjWCvZJnk6BwOqQ0975uXbiDZL02HxdOafv+E8/SjaiXv/mlDr36CN3uNCDv/CzN/zDoV29A436mmfJTNAErnE46RJD5BENrtHgNC/dCxzQIFlDr8kfbuMYt1IAhGYBg/beHHK9umlerIwEJdaEEzaZGScbNl72VQCUjTUP6d26Yd0VSJpX+HFKXKIxnMkuHSrT78vEfrpQ7wj+XFfiq/b6qj/7nHjTd7rv+06tl/RF/mTEVtkw3pTODvIrYcSe6T157Zj39S2BWwK3BG4J3BL4RZGAuENc5ruh4g2xBN/Fj/lcgXinB2TiY35ObGsvxJ87SvKIl/hF8Nr4VXFdMTFYdfzsxgbtH/XNb4vv7HmM2U+z+FkxR7FP8UIxBlqjt1+4iLv4c7FA4/Dp+BQriVfh4d9780icU8zquofiJXLy7/rBqb3vFxmvpI02OMVL+hbP6UdW+Ov7QujAnzebwNmH6CcWRDuZwd3PtopZ8AGXZBGai5eMXZyZrakjd2Vx1MZfxqRXcc/G78VYG6NVt2X6ZTdk3rGx2fvFZNH00vEDvrgTQz+nQE02SrE5FND3W0RG6NrZRGO8JnsbAMbHiPSvzEDepGjGsQYJPoPePm3oTc7TABtDCV+nRYjhV78bIXhsuPBgMvuHpt/+7d9+6RvsWXobyM/UCvpt1No8LqyfmPhtbptsC0iJHotRE87rmug1qZvMaG7iue4ka3STP3ydJX6U6PIB7j//8z+/xm7R3QUg+bz99tuv/vRP//SauGjSt6SR+xZN/6SGF7jwi1bX+CbjaE/Oq0PXbdzXNlbn+OqebtGX7lfXbUbpLHh13tjw9lN9s+GVIRpzJr2ppW/6KwHj3snpSTjBpY2skzPdJUN4XS9v9AhHMuIISqCs3bUJV0pw4SG8JQCMnZ2m+/gAs8kj9b1hYrz0kdzCA64nE8kgOcAXXvqXoEgG6skhGWnnrODI9tNLzgQd2XO61Ga+kac1hlP0VIWclOybI3TdGyLkLIHYXCDj8HLkjYvnZBZPfgaWPNCgH5rhULq3vrgGhz518ZQcs8nGMk74OGvwErL6sh+yjOdk6N48Q9s6b+36JTdt4Foz0ks85ejpsjH0P+Gza3CrJ+MYU5LGHC6ZlE3ql92Ayy6TwaN+4LwdZD4LTuBt/Ulm8CTPvV4Zg42nE9a9ttaIE7b2a5A5orsxFwe5OVov2Bpe8E+P+BCsaXf85Cc/uZLLbN88YscC5AJoffCtH30ky6Xnvr4lcEvglsAtgVsCvwgS4LO9HSQGFXfxbRIxfBtfJ04TI0hWiBH4TMkD8RmfKi6xd3DPf3qQJ8bYn/rD580XfpR/NKZSMslRjOEafvVg7S+NDbY4xZtDJW+KAYwnLhJLq+PD+W2+XoKiN77Rhlb1+vDzYlnjqxd/iqPjVQxcnMWni8XErD04BsvPiwHhNqaH/70NRIbFa3D3YkAxDr7EjZJA8KMlGXlTR71EWT9JQzMZK/GkXlyOTv03riymAVv8Qr7Fmero1oGHfi5Gj2AcyQls58aq1QVbrHV1fjrS646vjn7I9YwDk0v14fkgyzsx9HNKk5GawDZbTkpyL9BvY9i9CdvJIDO+DIMhdH0aC7LUUf72U5ehVc8ITRwBdxuskiImaZshE9C1k2H7R68vPP1luMPkb9Nzbn7wZfPo37/Q04boUan9Bz/4wcsHl+NBfXgtDDZIX/rSl142e2jYCYAPi5xJnAwuQp+O5V87/pW+cfTFL37xGqdEhbZku7RbOLZeWxu8xtFuUWgDpwwfWKfF1+JTkqPkQpsdtNgQ/e3f/u318znjRD/8dMhedlHZ8dcuzutkqwwXncCZc4pOb3054rFNMDh1q8uSBGtLa0fwcwp9a8jY+pPB4nL9zW9+89WXv/zli79ozG4rtZFtOqDL0xbh59DgNG6JLHDRrv/q0XVJmhIH7MI1W3n9nNR5JAM4+8mZazDNneYWO/YtrGyOzkt4gDc2pySBFm3pqtK8yoYl5DhGgUe/W+fwyFVbztha4y0bbZwtePf65GB3HHrvHh2uHa4r/ay1nx2y8eDoiG7IQF3BQ/3U1ZZtcZ4FRugjJ7KjM+sOeHh7U6YnUep6iuRpDJ4ED+aPvs1z8ihA0GYdtrbhH07y8KamnzqyLbyRi1NgQ3/wK3eeZgfZcHLKvtYm1+ZWvpdAn47mBFz1S04Lg+dOtHoiyc6z4/qkj/QYDvASv8bZMxrAw9v9lud1OLdvdWQPD3oax7UDf+x+26yLArZsqfVDEpwe2APatNOtvvDcxy2BWwK3BG4J3BL4RZAAHyUOlBQSc7gXQ3jThx8Uc0hUeCAu6SGusBfjz/p5kthNTLJ7FQkN9+IVJd8Kr2uHmAheMZ94x9EDmY3N6qvduGjSR1ykn+QSvGjgo8U7xVJ8r1P8IRYST/LZfl6mXrII3R7i441/loxyLd4Sn4lFxbhoNXZxj7gPHZI82tvjqCcbcL3hVIINL+rRXeJNPEw2PYCyJ+ync8bThu5++uUBFB6NA87+uxc18Il3J/zJpvirPnjBq3uxpbFLoBlv47XiseqV6hyunQ5124ZmY2xd8Tj5sAs6dHbU7r64mww+jONODP0MqTJWSrShYUhOhlzyx4QyOUwUi4KJ1BtCFhLG1SaOgWU0lSm2YJvCM4CubXAcvo/DmNrsmgAmb/cytPutIbS1mUVfCwJjLANsE2eRQ1sbGHhLcFQH3lsM/TQio620+BjDpDERTe4Wop7i71N9tIFp4izPyULpGz5/9Ed/dMmkidZkJBPX9QXfQtnEMcnasCiNqfzWt751bej1P2Wvb3V4/p3f+Z3rPtmvbFxbdC1IZE0GTnIpEYdXB9s59X/yUHv18Yd3dC3vF9KnozolGttsoXnlunDayCFdK+lJ/3ef/+YeDH12hMu9NkkfHwgHszqIJvVo4UyMBfeO15jG9XpuOIJNd+qXDw7QkV7jpX7Gqc1Py7K7ymjgBMO7tp+90mk/TdMHLzl3vJEnvfRb8pO/6ODMsid0hwcu81cigGOzznjSAp+TXaFFPSdnPempk8CCfXEgrS/Z/trYeb02hPaCA/MCDcm8Mtnqp251kZ4LLsKFXrSifZO7aDcvtPVxRYkma2sOEoxx4HJYS+Fz4i/5S3RaZwqAzLFdXwpk6IkOycHPt9bOXZMrOWYvyuw5XrfN+gc+fWY/yWLnmLrgVq7qsh18G2/XtsZlC3hc27mE8nSkV9fBV4I/j9MOtAsAC5SMv7QuTvWnLEoi0gd9ZietkQIvts026ZCPpB921ltuzQu0JONHtJ+83Pe3BG4J3BK4JXBL4MOUAF8lLuWHJTj4Mdf8tQej/JjY1p7PP7KKUUrA8KvFKmKZ4ht9i4uLZ5TFAPAVW3rrx9u36OjbPXyos6O4QVn81z5M/MUPa7M3tWcVX4nz1JVAamwxgj3M6+cHppIh/YwLHn3xYp/p4Ovt8cgGjWIVdcUU/Dt4MaoHRWDEXMYmP+OBMb74AK+9UVTSyJ5JjCcukGASNxqn0nhiYTQYCxz8rotpwYrl7Wvgh6dEHT7A4yk+7HVXR3BtvAZeu6P6dHZVPtefsWCwZFHiZ2GK0fQvrqyu2Kx68VJyacwPsrwTQw+kKdA1yS0EFMg4Ga5ri4F6xsNgbC4FvwyjTQwjMznd9zQfDLzgNoDfABwpDIAxZADq+kq87GVJoDa3jFydjZGJjj44tLdQwAFfwXu4GZa3e/AKR0mekhvKFiL4bKx8hR8vFgmLgsnXOOg2aS2OeGW8bcTgb7OIVvL7h3/4h+snXR07GVrwLE4WyyZGm782ghYydSVlPD3vo77J89x0GAdeSTv0lcCBA96SOnRnQcRfi1pJg8pwe2LgG0fJo8U2GsgL/+khnncxeBHE00U2UHsLCBjXkoSN0UKlPBeSNqQWUAnGbVcX3soWuJ5OPNqogQUHpqQPOe7GPDtVx068hQRXtqpc2/r2t7/9koRJRpX6ue6+f9HCGx5KwKx9B2sRbq4sTa7pdnGAa2Ob3ZrjfYQ8R4/3nDp9Sx4KGKIjmWezcLFJNCVnujKf9Oe0ycI8lPSCB6zAwBpCzsbk2MCSp/nT/CsIgWv1yVbg6fC2Ru3JKv7RSEfZc7zory67WTvJ7nKckswOfSVuyF5JVmhk/+YUOrMza5qj70xZa43X678CE/gbS198k1X8mq/oN3fTcX2Suf7w+n27/ubj2s7qbGXmOhkkq2CDI5Odg81D7dlAeOrTvFp72PkLrvtkX1/1DvXhdb/07HWwV6fnw5tqm2xLPs21+qw94Tv5st2SpmTdufMiPfEX1tYCSz6KztgEHfaQIPzRvvTe17cEbgncErglcEvgo5CAxIRfSIhHxBb2eT2IEruI9/hPez1xDV/Gh4rX+EyxRXEBevlHvg7c6bP5XDglbPKfcIl9xHuuxXv6l+hx3R6r+AreYlN7xuLV4gi0grGP9UBMIoYPF4P592tw4YhG/p4fB4MHB3/NV+Ol5A852QuQi7hPO1rEdb3hI0GDfvI0Fnh7VjEvWPzsG0XFfui1R0smdKMNTcr4x7Mx4CN7+I2N/nQhhvbASh997evR655MSxCJPzeuKQ5Cp3plMSyZJONiNXX1cZ08PQQ2Zof4uF8e7T7nBeD5ojgymOL34sgT/n96fyeGRoIMkxHZvHQKYrs2SSiYMhhgT+otEBYCpYlicdAGV4vGBs6G3OB3ryNnA/LGbCNdUsR9hogmEwFsf3Vdv/Ar11hdC9BNaryhP3pNFIbP4KPX5CSPFkH42iy1kYbDRPubv/mbV5/73Ocu/L0poISjxUYCyWRefvDUBs+1SfOd73zn+jmSQ1/tJa/Q6bqJ78PW5N5GEY4SPpXgLS4+lI1GNGuL1ia85JHTJkimucUYDa6V8JNHCaQWKeV57s+q0vOW6afS749bgOjByY7wSmfVZQPpli3gv8WjRRo9YLKH+AzPLm42jhJJcFTfoohffBvDIty3hshjZUxP+sro93H0aGw+wAWP+UMn0be0u15bZjcLd8LC5c25+rhPV8pslWPo20ZwaIuH4MEYjwzrh/fam5PWA+M5wcWTseFmH8rmEr71yV7YnrWDXq0fJZnNafbs7ImGvuf60nyV/CFbcl/bcW0uqXckk5UHp9kbh/BlJ+ls7UOdn2iZv+Z7ayE68fX66amTf5z71Kc+dcHAFT3ZfHZLttYs85Ic4IofMOaa+5y1AI1esk24jZn+kj96my/GFAQJPEoIVYJHQ3aWjtKnUrsgBo3Vrz7RmcxdWzPIgq7w3UcG9W2c5JtczXfrL93jOT0mr6UnPZqnxln45Bp8sOnRm0Lk6VjdLlxtKwvXaGo9of++kdXasPPaGOjKDyrBWRf41OYGOsOrDv77uCVwS+CWwC2BWwIftQTE+v6FmT8UG/BVfJTvnRbr5G/5dz5bYsXbxA6+DBw/Lmaxv3DYO4ldisXEEvnSfHYxEhx7rd3DYAf67KXUiYPEbGJnySOxtvhJQqQ4Pf8Kp9hDP7GSGE28LiElhpcU0Q5OXIs2PtwYEjx96JmPlgAyHhhxq77wuTY2vovPxL7qlCVu4HDoJ67SRn7GMTaa1PdGEdmRF5mgB7/JiJ7EmOrAS6YVb9OTNg8EyUbsoR89wAkfeagztqSNumI5/Ivl0G1c/WsHL17f/Wfx1MXc85Fu3dIF2ZMNGbgnQzCuK4sJldoXVl98kRE+PozD+1D/68NA/P8bTpPbaRKbWE4KMLma/Ayv02LR20OM0LU+FpI2cuGkRDgYWSVD7HxU16LAUNZIGI/jTaUJaZLrUz+wC78G6Bp9DD+8+O5klE73PZm3wXKoz7C3jzr3b7311gu/+MF/E8cEk3TBuwREMiITcNFu8SFrG8eVifGbcMura4eFAM7OZB0OC6vxbVzVOdDcJNzSAsApJIs2L20slejkTJJHsMkObguUzb3Fi7xtrCw2JndJKDJ2WrCzxRJ27IlsOuOlBbKy+mTonqyMxVaVxq8+e0ie3aN1ZQ6f+y1dSzgsrmwN7w735gXnUN/senGhjfOBKztoztAj/s2tlUeb7/QbvsWVvE6c6smerewcVA+2uW/h54zZIWffRjfbAu9JgD4CArZQMpQuS3jAYY0o6QOPRAVHaP0wJ7RzljmC1hvt4N99St6AI4Po8KZONgNu6Vwa0Zd80iP9NCeVOfHVAceIBzaJVsknvzunX470155+ikqGbIo80c/50TeceEJf46DBmW2Ag0tpbYFPKdBwLQgxLpnA0/XOUbJQj9/0l+02jvHRRzdg0UjHbLOEeHZQX6V5oG/864vXXev1YwNwp0d8C3bIXV1johWd2S4bgLuEoHpBETvCb3YYbd6KrG1tsSdf+E9/xj7ljvaSbAuXXiq3X9fmcqc1TRDmftdC12RTAqjkI/6Ni090lZADD2b1eRnHfdwSuCVwS+CWwC2Bj0ACfKG9k0QLf+RNFfGoGKSkBZ/H93pgx5/xv/YH/KAYQvzEj/HvfFw/9dcmPoC3JA6W+HTxhT7iaLj168z32zsYy8lvFhcb1z1a0MRP86fqSx7pC7ckBhrAi+dKTIGzJxFrgBHTlZjh28UD+OHP4ScLfhxO/BRDwFcMqA1N4LUbQ5t9DtrQ6MCncbXhTRxExvqBFROKF9Ddg2P0kDkZaCuOEP+IfdU3tngPbvSTLdzq4KITtOlvPAd8+uPFYa+UbtGpP1mBF98VP6pHPx30s3k6VYde+PAsTtYPLcYV55ErGDiLlYqptMEdTcZub0AHaPowjvuNoWepUkoBcUEuBTOUEkCVFNvHnm3QwLVppagN1pvYu1kxGTrahLsv4D6vu0ffBuaua1PGg80jY2rB6C0ORuZ034KCV0/2+8engvOC+wy1TZSstW/zhL+gPvhKtHz9619/9cd//McvNMK9+NBnk+OVTfBwgnGSW9cWgXA1geMd8iZV+qMbv/ltgwQGrvBHo8WibyylJ3VLo2sLjgWkjVmw3bdI+5i3fydjC3gzTuMaG80chVL2mt1Es7IjHpXnGcwJv3Dwrxwbe2XGWeizem7jWom/eCAH1+kbfzbscKYr+NwbL1kr9bFg+glYusIH+PiPfrIMDxj4uo/+5cM1/G009UebkjPRJvFIHmByZI1Lr3TuSJ/u019lyRr35lBzi6ycnCVnmKy1G3NtoG8jxbuxBQXZE2dhnBI7JSskZqwz4Di0gg735rp1SZ0zHiRvOsztDrJ0oDMZZL/pjPNJxmTGeYWXTLX1KjW6fICbXbdWcubWwOiMj/o2TmOsfaKPXAo2jI9OPMCDNm/JkKt+4I13rivJnQ7gchiv+ahd0Mdhn/SAzU6zS3XNqXNO00G+At/k2XrfPK1PNmFM8qgf3sgLrfUBsye+1zYKHpc/dCbPeFamdzIIf/XxGMwJv/Nv52vXSrSkt3TXk7qdV/jkIzfIA8+nkg3fRJc7x9FzH7cEbgncErglcEvgw5AAH+fn/vyr07eFPJzqIZJfLXiwx3eJt/htMbxD3LM+buMw9d445x/B8XUlf/SF372EgSQEP6g/X8ivgvfgqBiqOEFfbXAXa0jq5IPFqGIRMUZ0i3XwJk61p+Fj+Vr987dgtL1+eign8SCp4t6BB7RJ2IhbS7CgX3/tYEtw9QBKLIwu+PTTPxq0OcSR6sVk4mRxAP7xSy54EBuTv9gz+ZfgQTc+oo8sxIrFYvaQYnQxiTHpRfLLPphMekkBvPHhRW+xjHLjnXSzsRNdwHXCnvEVuYEr+YT/jXfaz6gr/nPtLEbbGO8S4Ad83ImhEShlE3zGUODNiJocKaQsYhsgBszQTGzXnfoWNEuCtIEzVkdKzxAsCl33z0/u9Qm2vuGJZoZtAlnU0Grio6WEkAkHhnGaZM6ypHAau02WCdSJT20m0L/8y7+8fIR634ywQIQTPS1yDL2NtBJ+pw21ySE5YyE6N1zoV+ckNxsjGX06qr4+xnC2EBgfHByNrzQuncTXf/zHf1w41ZFT7fCo6/QR7N/93d+96EC3+oVxbQFTbxy4dlOX3ixMfW9H3R5NenW7mLhee9nr4OpTmf00hjLYrpMvftDPTuOJHCxe4SHndMeW9OEY2ZPDxh3fq7P0QSb6w+9onimdjasvJ7g2vvRXD5ckU/Iyjrotk70+nIJ79IMpmVOZI47H07YEDOrgapzmE/t1ciZ+L+yf9sDFczQrV6eXIJ4OvEv8sBfyNodaS3r7h5Myl+gmu9ZPktY42Yf1pbfMSlKhl8MyPhl0dJ++lPHXh4Xd048326wp1gn4OViJvhIb5g/atZMlOtBWIl1iKZ1LoqDJYUy0u4+2eMkm6genNUi98emufvSaftOxMh2EM57T4SN7QVd2tfrSN5rXPvBTkqNxsuto33GzS7aiX3gbU5+O1ZG1r6Ry7dG3NIevvtqiI/rOft3v2C9EzEV4FqcEpPUMjgKi/F06ZLfrE92zl3wHPZlPbCu9ur+PWwK3BG4J3BK4JfBhSoD/8YsApV8Q+LajRIS4TDwmweHbqhINYhq+rHg5n4q+fHi05i93b1YMp85+kZ/2M/NiOUkPbx3pyx+Ksfl9dcYV16HD0b7OuBJL/Cl8+BCfGQMOfaNN7NKbOPYi7vuZO5zGFWeJaSVp7HvFLGKVkjV4MJY2NEjS2PfpCx+Z1WafbK/ZXsAeSRva9DOOvQY+9YMDj5Iy7THoQx/0iOXtEXrzBi0OJd7JBl74yQHufqmx+wy6FZeKV8lGmwN+8t5YJp1pT9+1b8zkeu8vhE9HdqBEO/xrK8VsYF07ybH6YvPa3JNbMXPjfJDlnRh6kiYlOVdB1ZkADEMGUjBLqRaHXiNjfGV01ZlU6kwYRmcDUFCsrzr3DLFNgrJNTkZQqX8GkMEwdhOtBIuyxIvJZNLYpMFpvAL1DG83NnDi0YegP/OZz1wBOqMz0S0EJXqU6uG3SZGwwmt8GqfJAqfFx8JHNiYgGtuIkw+ZtiF/++23X/31X//1xecphyaWBcXi3GJWckLZid426hI5bdLJqkRQi0ULe6WxoylawXZaXLU7q0P/wtQeX8Zt7K69nSWpke53Mls4yG4XjRai6rLLSv23rXt12VB6z75bYFrIwuWeraAVX5xL3w+ilzZr2Q9945mNsgfXxswJwLELHJ1wuOokH4wFBj3R6rqFNB4aLxj3OVW8JcvKbEjZh6PhZQN4MCYdsUe65gzwmZzQr29zjM2BA6MenmCjqRLe5LtyT/Z9NBxv5EcGnGc0cYgld0o6k2uw6ay5ppRY1cfZnNQnOhr7Ivr5WPvAZ7ID26uynCd+4Py1p9epW8foWx/rTMkfjh1v1jjrpLe00IMvNLZWcfLkTqYOdKSv6MRj/EoIFXzAZV2xFpVAqC/6PY1Kp9mMEj4HnJIY8FmrrXsLlw6jq3lV3+7xYK0BTwboQXvtZ6kt+Upq6XvyrM+OZ0x9ok/AaA1Mp4/g2TpbWD4uxp+P8IdD9c+yEX3SSzw29vJJtumskg2yb/KmOyVbQp8gsERo+OlWuznKXsDdxy2BWwK3BG4J3BL4MCTAL4kJ+CL/PswviU34IYe9i2sxkT2gfYBDbGyPpV3SQozhAZyYx7FxCT/GV27sxjeKrdRJnIgB3Yur7GFc84PiKTSCk8Dh37XpL9aRsPHGdH8xDxZN4jJjuofTUczoWsyCLzzxu2iUADO2PZt7chDrqMMrWvoDEzjFJPr2aQpxNBq1lRAypn4lgNAtcUOe4jV0iofw6locKUbSX3ymH7mKs8ipGE9sijYPLOEnd3G58ewNwKkvZiIzupN0MkYxCtzq9DdesWcxTnGNh6Bwoht/4hqw9sLkD27joq7JeuMn9+hkH2KcjQOLp7IXsPVfOO1oUPdhHXdi6EmyKY5iCkabnIyYEezmzL2FoU2YRaUEiRKsyaW9p+hN5so2bsYzfmWBevcZS0G5+hIXJhRanCZWfU0McG12LQ4Zk3LHyvBslPCBZic6TZQWE3REZwuFcdUxUMauD/6jzyLr51XetIGrc98sMrlMYLiiMz7ivclqEksooN84eG6CNkHSpQ02HZQMKGmkT8mjkl4SFr/3e7930Y92ukFLp3o0f/WrX72eKFi4tIHrRIdF8p133nn1+c9//pq4JaP07x58csPHoyO+W1zw71BuW3XK9FNZnTJ5um7M4LSFP9zg6FS9Mxi0q29RWufnrRL3JU9c6wcnHHjW33xSz4ko0RNOePdtlWgMJt1WJh/34UjmcNEluiQp4iGHQH/0Ap6DZfvNkexZ/R5w9a0r+PYEh8d/+qd/upIoyTnHglbXbGf1qJ/63ohBh8RLb9tlL+HJAaGFM2vOtf5kl8p0l7yMddKsrvUgeaJF/35C5vrdpzdjemLGgfdX5OakYEWAIVDQZv3Qp6QOGrWbQ+YhvfRGE37IO9qUxmIncAlW4gl9eA4P2LXFkk90Wv3yi0841t7Acex0rb0TbrCbdJP8FIgIBMPbHKhM1vo60kG2Ff7gu18d1Vfd66c3KXvj7ex7DfB0BN995SOc6bi2+Khe323b+8Za3lo7muPmQHNdSZdsPjtl196uS+d0VqCkzdh00bq9PJz83fe3BG4J3BK4JXBL4L8jATGTX1b4ppBTXGEvUsJCbCCWEWsVX9k78F3iEH6tt735aHFfSRBxAj8pwdFDsGhUL+5pn8FPinGMpZ8+xXb6iAftIyU2xFHFhHyuPY59h/5wSqCgCU7X8PSHJB7Kir+MD16ChC+WYJF08WBPXIdXvDvEfD38Ih/yUmfv1Jvo2rWJF9GfjIzXz83IpzhLLCC+s0fDA974e4f4F01oFP+JDxz2294Ywhc52ceJzYypDs/axQvtuckDL3guWaQO73RIrnRInsU02tGEhmgkYwk442jbWBxtfY5EGzyVG7sUu6FXu7HRAEaJT3jBFZORievqjAW/++LHSzgfwnF/fPpZqJQ4hPAAACAASURBVJRC8ZWuGRUjNLmUJmAKYcBORiigBdPiUSKhpEETGc7zOiPbNnXnhjCjZFSOjCdDUbdGxahtvjLS+rypNLE/8YlPXPxn0GuArttsSfDIoPodLlj1C+s+gyev3/zN33yR7dKjj0XERssi43XOxn5UWkgs4HhuIgf3rMb/a2K2KWnCB2fsZIFGOrSIWXyVdE0mJbPomY49XdCG5jPZBR5/5Aj/yiS7UcdR7Me09XFmf2uD2Uf0B1efn1Umqy1XB9VnT5XodG1B5yiTt/qulY1PHmSdbFZ25KTeKUmztr24XJN/Gfidj/VRJp+VSXQ84ldyQT342tN99hOf8KwtuY5P/dlgNtJrsfjCI56N1Rt72Yy1o7UB/RIm2VfJSbDBwwW+hAg5OPUzBkfXmgN38NYdfepnLEENmp092UlW2Vb4ObjFbU56ItJrwfTL+XKSAgCbfmMIZNRZD3u7ilOzBrVG5qA59BI3zZF0gCdyKSkkMEgO1hsJAzBodJZAgM9ap3SSA7qylewk3Zvf6IgWMuSYlfriWxv8aChI04Y+a5Z2fdQp02+yT7ZkTccFIOgD33eR9E9v0Zt+zDt6o3N44QGzdrG6xB/49Fl58q8PG1Yuna6z9+bAo3W4da01jf4d7jtbX8laXXzghfzwL4hGA5mgld8gc2Omy3N9j667vCVwS+CWwC2BWwL/HQnw197c99CaXxXX88MSLxIBkh/8fskY9XxYe0ElHyceUIpL+DlxgZKP5uvEXMYSE6rnv+Hkq/k/sRtY42nnL/k8vt81f1iCgn/mJ8USfGUvJxTrGIffLI4UZ8CvXdyGVv3wIpFSLIFmPEhaoLkEjvbiF3jFtmD1L3ZDkzHEX/y6NvRrJy98qNcuxsaztngRX5E3HtFmPDzCQx8OciEvMtKPrNGJJvjICX78l0gid/SDRbNDLCqW1Uf8iE5xK/mLfdGgTl/91JM/urrXz7joRqextetrbH3QmB3gAe1kpJ8Y0xjoAF/eQB92iFd9wILR1zWZKcEYV5yNzg/ruN8YGsm2AczolZTIwJWU32QqmFdSlPpHgXjB95Zd69MkQobrvW98Ze36LmzwSkYV7Sabty+CV8+AwVQyTNeMWeltB2+7dI83ME60ujdRXCcH+FqQgm2CmgSM23d8/GOXMZo8xugkdxOM/OBzakNz97Up1esTz8lt5QdGRrwP78YTGpJRcjRpW5jQbCEzAfHb4gjGwubNIjJt7OQBP1h8mOTuTeTGdW1sJTzxB8+bDq+HZitwdga/ul/eT5zB6UcetcdHdh9+MnetzH6CwSP5od+xtpxz1GZhDD++mxv+knvfClIfHvBwS4AYOzq3dO3NDQdYR7yjsXtteFD+5Cc/ufpoj8e1p/qjDbzkVQea8IwebcG6pkv6x6vTNf1LmmS38MCxeuQsq2sc9KBNKWB59ynpgl6JmZLPHBr7BBO+SrDpiNOnC06Hw4wWpXUhe4iXeOQkm9ueaJgDaC1pZT7tB5A9PTKO+e6EOwfdfKDL1g70Cy7+9V//9aIfvZyboAktntJoTw49KTMGOpKRn5RKFAmwzEuyb36xtda31Rl9cf7kRy6CJDQYN/noZ0x0otl42dTaVWMYs77wZx9gycn60XhKOOELdvuiw3GOl66CPXWX/dQ3nVYuTcGGw31wXRt/6xa/+uxs7c01naXnxszG6Y4OyZxdgGcrv/Ebv3HNGXZGj3ikG/1ap3v6uHTc17cEbgncErglcEvgvyMBfl2M509vxOKf/exnr/hCTOHNEkkUG3+JiGIufkuM057AvQdIe8//2bAri8uMJU4QV4gB4FeKWfjBkgHGg8tbR3CjpRiEXxSbgecPeyNIosNDGUkS/lc/+6v8LhzFE8VZfLD+r5/eRBZv5dP5bPGUpJM3jLw5JO4DD6dDu8QInsR5Yt38cw8EjYcm9PDp+PVGEjxiOzIla/zjz95aQkpsWeIJfrTBg8ceFpF/8QMZoqu3fvSRKIMDvHY0dKALLegudmkvgHZ86yPuQJdTe3qsDN8ZZxVTPYq7Tlg4qqNb46Qn965XL+yCrOmQnshc+WEed2LoWboU0YaiSVYAn9IyqCZeBtQE3hKONnD61UfZvQno3thrLMZlIEobWrgyBGWGE31gja0soHZtc2ITrs0Ym6gAZ5Ip0em0SCrhXd661taGz8YVXdqq22t14NX1FFvdwoaXoVvcyGL5iK5KtPp3sj/8wz+8ZAA2ncW7+87kD87kSl6VTWay/7d/+7fr6UGLCXzk1eY/fvw0jiOJpniolAj75je/eSWQti1Zpx+b1Gg3fjbRYkUWsubxsDaEtqW9RWYXimwVr9rjh1PAU9/e2bHrv2O5PuuzkcY1BvyO7HwX1eZCMmsOwK1v9h2+5kO0Ze/pONmRXx9nB5tM4DdWfMMfDm2NRyfZcDYBD0cRT41JZiVSPV1ypJtw0jecnJo32x7xEU9buk5eixOfnHNzZmW6ck5f5qSNdcma+q29JOP0WhkPxnDqa8701MxGnqO2nnDW2tBqTA67p1ccvvtsn6MGF17BCpzmmwBMUNBTpYIF8rcmCMrAllSJ/3gnt533zdfmfzo1JnqjO/r0FxAUnKXzZHTqKPsyptOTNTTveOQhyGusbIIMyHrpbS0w7h475xpz53HX4ILN1qJ9eVg7bJyz/aRh4VyDb7zsrXnsFfT9Zli4GoP+1AnY6ME9vfJ//WOKIBKMALIAkOyM0dryHiHdN7cEbgncErglcEvg55AA3+whtdiaD+rn4fw4f9X3eUr45Lf5oUc+sPgxP1/M1X5B6ZT44O/FPPybvZZDTFWspRT7aBf7FDcZWxKq2NCYfKJ4R6ypXQwlccOH1q8HteAd9lh8t6QPf6udPOxZ+GZHb5WrIwNy4pO1G0cfyR1xHRrVhZdf16ZOmzEcEjJiZ7zHb98q4v/16YEintBI1vGLJ6d68RWa0NGekdzsMeFAs7HSSzjgK34sZlLaD+GlWKbYBmxw2pyOYqliG6Vk3sbYxT/KjV3AqgtX+MNb+9JX3TX406H/7hur/6DLOzE0AndJKYwz4cvU9Xpcwl/jYXhtghhThggmuBTtjQSGa+I72/Rl+G04KtEg6O5tB3DGcGYwGV8GY5K4Rodsqr7da2tBiWZw6o1lQkkmeROgBW2TG66Dd/33f//3r77yla9cdYvv7GMC11cJ9+IiEwvSN77xjVd/8Ad/8MKjCRS/jYF+9U30aK+9TVrlt771rVdf/OIXL7mvXLXTc/0tsC0m8R6dyvD3OmNyWF5WBnBpM8bqFd3sgQOQYGiBadFaG9uFYxeu9L+LQfYWPm3ZCBpct8ApvY20bw+Brz1c2W8bv+6D2/Fds288rd7SX31s0m0ik0M4T16iv3oyBBt/ldHa3ABHN9od6cFcaJ5tmW7UkVO0NU9yVNFhPPgdrh3xq95pzDbD8Cb77RPd1YHRz0+/2jiXwAhnMsxW3HsjR4DTulISZeeb6+RlvGi6iH8+VmdoYJ+SG+alxAmH3s+qBDnqu9dXIsw4aIs39Q73HPqPfvSjK8Cwnn7605++YL2pZ42FX4kPMDuX8K8+GtkjPtVnE+lRgNScE5CUXCrZUB/OXJ2AYtcFsnGfjaA/W3Jde/6hdUYfPOKnY/u5fj/7i4Z0FY76aG/M9Lf4d8x0fZbZK1h+gZ4Xl+vV2Qsjc7E22NxGW7pZW8ULmsnFW2Gtq3TpdNBj9s7/0J2kIJsu6Ybunlo+oumuuyVwS+CWwC2BWwLvJwH+R8wrqeDzFpIffA3fJE6QWMn/8mne/PbgyAlOGxz2CvZu/BbftL6P/8vPFXOIpdrrSUTA5zMc/bpAPMX3wmXcYkEPniRlxK5ozm/yu/yhcfuDGA8RHfDgB29okQQqYVRcZlzj4AmN3kZS5+Cv4cVDOEryGBNt+tgH6cdPO+ByGJNc0Kgf+uHUp9hUUsw9X19sR7aSX5JHvZWkToyyP4MDX5yZ3O0pyLTYw9joLNYzLpzg01dx48ZExUu1FbMVZ208dTH7dFRH7sbAq7jZoc599Lgn98Z071w8tWcPwSjjif19mD8jQ8+dGLrU8t4nogyogN/1qZwMRT9KZ4AZacFyk1Bp89akNiFct3i0EBlv+zyTdRU28X5eYoyMVT9GVPDdhMkY3UdXhnjC1qcJqt1mygeEa4vOpbe2fRNIX+fCVWdxMHlNcO3qycCEtdjU59///d+vhVud0wax0ibBaQMp2+xVUHTg9+QrfpKBktyMs7xGX3QHt/fLl2u80APYdN042cKPf/zj640h99GG37UbC6qTDlsc6Pq0gb1Pj5VrI67Phcs92OwXPW3iFkeL0DnWSUvjVd9C677rhVF3zouFQ5/7+F75LG+NV/uOb27t/HSdTHOy/XQNTu3wsAOnecfGlOrJKHlEQ3KNTk443H3zamHACQD6R7f6hS/ZV/ZPZWyEM20+uM+2kpNxOMvWk+DZmbUl280WlfpG30mn+/QHDt2cjgQLBwS/o9eOBSLmYK8aoyN9GCte1Ukyo+d73/veFThIJEkKkbtDwkm7sYzj49UCjoKwnb9wo814ki/K1h+40ju90CU6jdVx2h36spvWemW2Qy7hVboH37cIBFvoz4b0DV/w6TcaotFTSU/TdrzsqXHr4/7102vfgtTwV56w+lgX2Wf0gg1u4aOtdm3Opbn7s54u8lfN72wou1vd7RpKb+xb6eiNMKV5SJ7RyZ6t+XDCv/Qnn7u8JXBL4JbALYFbAu8nAX7KT9DfeuutC0wyRrLG2yJK/opPEt+LU+wxJGb4ZbGEWCMfJf5xSgRIcohb+HM+Ch5wEjniqOI2cNrtgdRJFoGRDOHn+GH4JFv4OzHQu08PVMRd6OIH+Ua0SuLwheqNHW3FX+ISdBjLXqQ/CpHoyXcr+WF9+XKxkhhDH+OgEX36SHSIy8gDXcbm09WJxZQldPBBtvtWD3nD10+99HFP1njEg3gK7yWMjNFPvNBZzKPfGROrc27s4tp4xrLPLHGVPop7lI6NgxoLDY9O8NXvmOI0ci0RR65eirCPLdZznS79xK43peExLntzTQfFstGjhOejiIPuxNBlFv8nU0roFMLwurZhoaA2DGtQFNhiwOAYRUa6gbHrguPaM1D9C3zDV/lM2lWAZxALAye6dix0u4dT2Tdd9Acff9o2URKNvZpY/+h2X13jmdjogbv+tem3dP3gBz+4FoA2Bb0VYHGzGJQMsijjwUTWP77VGd9igQeJMvLYMboGZ3x9nOmIrIxfvTGXTv9O9oUvfOFFV2DjP/7c+znZb/3Wb73wvLICl6yMV9vKz7WnAH3/CH0dq/t4VwazsPq0QLk+29xrz+bCXenbO2gITrmwex19aG/xCl5dco6O+m4ZPKfbuC+MD/0nH2DIVX32brxdtMOjzhzdDTvn474xtdOjIxtyrd58Od+kMm587yKtD4cPN1uy4LNrDl7Zz9zOhTz+jIUOjje7WfuBP91IgHHUTmuS4GP7kDMas5XK6uByoN8RTZJM8PTbd4EHhwU/GQtucuSuc97mrLOnQ+iMVomu+GLnOXuOmv688iuwMk5vHYEhS/fk19yjO/JBlwSSevTirzqBSE//yH115Bpdlck+O2ltr8Sz63A8i+1FZtWDCbYyHGB2XiZrSSXyEjhsf9enLdeHLZKBewGeoPARbSdfK4NwL8zagOv0lw4bv7bgg2teNy/c6+OeftaOq2stzW4ElN///vcvWAFSARPb08c9WzC34Cen+7glcEvglsAtgVsC/68S4G+8oeMtZT5IDPX6aRPPl2rjh8QPDrEGf+NXE73Zwr+XOGm/0Hd9xDAe3MOpro8ZS7LwYx1iHr7ffscpFoJXLBdOsVI+kr9z7e0f1+C95ezhWD64n4S5R4f4LP9tXPzpz7e6RoNYiR8FL1ZCNzrQLrnTQ8GSQyXLeqgv6WQM/llb8OTIX2uLdjDiObiN6V4iTBynHl3FBuI6p74OZXGe+2Iq1/F/Ac6hviM63G9stbHTxjjBbf+uwW2/rs+46hyL7B32rGDTpXiOzOEhB/bG/siIjZBjiSz6YZ9ixvY0H0UcdCeG0v5TSXkZXQFvE6SgvzKjqgTvtNFi4BScsTfxC5gzePBNsiHj5bLJAM4mvn8BA1C/xbnXxkSrN3X6mG4TFl2MEXy0Rf/SdOJ7xIefan3uc597WdD0B6esv3tJpNqSS5u/FkP8+gA2fPqaPGg+cWlrEYinEl5g4dUXnHvf/PETNfXBaes+eZiA6tGBppO++AEXTY2/ssLr22+/ff2ETft56utc3Wd76Tb+wHVUV7n1LVL1xwO4tWltaFEfTcHsuNsenM1+CZN0sjjCC86bMo9w1M/C2F/IN3/AP+LvrCeP9BPPlc0Xut/5ufNIPTrMjfq5T9/6csL6dLaI69PivD8DNJdWjvXbJFO8kbN69mLRz0aziWTqvrc/yKtEUPALd8ooOcPRtWRc99mqe47eWiVQKGhh9z4QLUjylIjzMkaOC/yexrA+NY976gU/h2YNSgcSQhJNJYQEX+QgyaMenwIGtHCo4LQbT/DhbZjoBI9WQU9P7MiXXr1aLKmQjpN/dgEOTXRLp/gE3/qurTlxXTwf+gejr9NTRzagPj4f9VWnPxiw3hx605hrL/WpXNuMn+XzEfzyHe5oPfGdstI3G+s6u8KH6032vmndpjd6LJGq9CCC7TWv6ZJN9o2m4I0jYFo+Ri335S2BWwK3BG4J3BJ4jwT4LQkVeyf+Ryxhgy7O4LMkNvoOohikfZsEinhDrFRsKNZoT6DedT/9h1vcI97h0yWS9BfDSP704Mv4/C3fBy/f17dywDjRIIHibfT2n+rsO+DK9xoHHWIfMWUP9H74wx9eiS0+07chHdrAwxM9+sDlEIvYt4jP2g+p1x6veJFckrQgVz47mYDpUI9+uIyHP0d9Nv4EKz6kk41t188XpxXjvAz0jLP794sNti+eNp6prbrwFQdtTAd24yX7APwVCyaHYrzk2/3SSrfw7f5Ce+MVZyrBFTeB/7CPOzE0EiZwSmgyMnp1jDfFB1Ope4uDiW7iN1mUTSptbcwYHIOh7IxRm3vwGV917l1nqGvU6s+J1jgtIGhsUlaHnp7Egq+P0psz+1Oo8D8q9w2jaKk0Rn2Mb4Nrw9YGskW2e1lTi1UfbiZjbfQBj2v6sZmUOXdtEu3bGr2qJ8Ndn3RIjo0VbUr0dk+XfS9lnUC4lOhMnu6T3/LviUH6JfPOdE8n++2ezDAd69uisnofc325BOtMz43bffaCPvLqXnv/2OW6BbMyuO4bEB4yXb7qH83n2MnrlMPSvXTVvzFri/4Wz3iP5/DVb+FKboVLiR4wwSkd1bExvLLB1gV92Euw8Cwd0cIGBAwt7M0FjnltJjqieeWqLVmcdGd74PtQtcACbWzXHCnY4KTBp0u4+scz9eB8ENqG3Pn66YmaQEkAJbiwYRfsoL0Ayny23unbfIsXT9HQINggg9ZLfPezNHx5egQfWHACCvclhdQLHNAkSQTeePES371xki5Wp2sTyXP1JSgS7Kwj7jq7SDdKfbOL/ALb2P6Nv/1cV1+/5LL01id916ZszXuEPz6jWRm957zYem31yXa1J6uTHvfoOO2RbemvpOfsmK688eaN0d5EIy9rNV33lpiSfbEZp+tkCm/zMHru8pbALYFbArcEbgm8SQI+I+DfL/kd15Ij4hnxhHimB1Tq269tXLXXxffqxCriux5aVfJX/UwNbg8z9HO45mf5PuPzf30j0p4GjdrRglb7DD5UQghtvTHM74rxHHDXD7zTg9n+GRet0cSHinX0RSMeimnFXX2nBy0SQOj1FpNxJdDEkPoWS+TnjdGbQPw2HtpPFaepA68s9i1+OGPhjUfQLo4V9/XmUTEm+cInLnBsPOJ+8VwAx1HMYwywwW8stHi6XrhQwpEstbtHF96WjnBHNziH+/pXt29KqyNLMXdvt538fJD3d2JopEk5zjZPFEU5lFsgn8ItLjYjnloLYE3OXVhaUFoUGmY3JtUxnAxor7UbrwD77NvE1Nf1jtlkU983efQ/F7rg1NdmUWny7eQHG1xlrzYGV6Jl4fQzqS1GFpldNFzvfa8XqttNUxOXTNBm4ygr3iQsIWQx09di6slz2fvk03h0pQ86XfcGlZ/UWJTjI/ilUR8bUgtpukkHydP4aHNUJlN14M5j4cB2v3CnDSzMymjHXfuJBiUaWqgaQ/3qvr7KxgZ71sOVrS6uUz6NGy4luhtX32076dKG5vrFc3LQ5nhTPd3VFm27WOuLj0fzLhqVfUMILnga37Ux2Au7sjbUT5ujcvk8cexY2z+ao0GSpHFaf3qrZhMu6Fmc8IC3jnH6AhX2rNQPXm9OacND/9ahzRwxr3o9Gp7sue8l6UMOBR8X408HfP0DRbSjQWDl7DfhnJ/2fWuoJFf/MuZpVE+4rC9oMv8d9EGH6TZZa+taAhhONKVzbw5ZF9xnJ8GvjWknrxJS1qrzraOzv7HRE11dNzb47Pdi4vkgx9V7eB/Bb1vt5xyHdnmqffvu+GDjvb7oaa3bueK6enTv9a657JFtOCUKBXnWa/JEjxN86y84+k52S999fUvglsAtgVsCtwRWAvyzN1HEFN6+5lvENP4Jy4MuPmdjl/zO+mh+b+Mv/oxfs5fxhhAccIlX+Co+rL2gfsaXaHGIe4zntMcqTtQGTr0EC98qxmosY4gtPMRVZw+lTqyVb1bqB4+HXHCJ1SRq3OMJfeIpuMRN4NGdz9bmAV8/hxMHwSs+E/+hl6/WF59ko67vA8Hv0Ec9GiSWvJElPivxoW1jmvAUK+ifDoxNf+DjUR1ZknNvI10Dz1Ess3VnHATm157eJuuNH/Spa69PPtlEcYf2aGuM+oRfaT9Jtuh2DXdvPJMrPZXgSYdnDER/9pHiU/2dcH8Ux50YGikXoFOADQPDcE25SgZjQrs2eTZYZ9Tdbz2lOx0ZgGvGVJnB6wePtozSPWNwv293hBdtTaj6V0ZTPyerT/AnzYsHbe6bxJXbt7p//Md/vH7+VVs0oWP7Wch8m+cRXnAWFrzD94Xnb/3sJEyG+LAolCmnF3VgXTe+Mln0rSVjRFOb1uixkFnE4Voal+f6+6CuyX3Ko3uT3s/JvP10Ho/sId6yie6zE3x0feKrPrzh2Ht0tcgtTrKQhVbnNAfIsMWsBQmv3gLhYJMPmjqzrR1Tm/vKrt1LVu5fza9MVhbhra65EL1bBlMfbS2k6vDmCU18bt9ko/TEBY2ONr1LR/Inux0jW2szqxSIPPoI9eJrrPTWvQQPG2enbZI52t6y4DB7eya7iwbleYIJtzfj9JWM4bBsup3sn4w4f44K7QU67ETypXFL7IB3jV9tjYNHdBszfqMJjLWUrXHyJbHg6F840BPv8JQk8vQITWDhlaRDq8BPwCLw4+zTzWkH0aKsrWCAvnPAax+uO7w+jJ4SStt3Awh9GqP+jSfQ2leQz37G6k8L8OpVeEFjBzz6RONLw9PF2rLrxlwYdeaft8bC8QjX2cc9nlandIkWx9rfo7UxO26OlBxSX+KP/OHLPgVSTrBs1dp0H7cEbgncErglcEvgkQTEITb9/n1MLOMbpx5W+UmVWEOyhK9a38df8UPqwIlP+E++SKznAKMvGHW9NSTZ4tq+MJ+m5LM3juHTwKGlmMYDbj5PMqh9Z3sXftKYYhsJIeOKGyR8xDliEDGZ+mIq9PKX7sVFtaPfmGI8yRoyghc8WWhzGrtkT7GSOnua4rtkof28LjbAp3Hwj95iz/ZQ4j5JNfIsHiADMsRXsb6x8bIx5BmD7P1F0BzRSEbtB1w7lOJJ4+NPu+SYe3IS29n7uKdLsYc3n1330gH5uQYrnlayB+OC0w4n/iW16FocEyzZqENLcZQyGtGEf7jI66M47sTQSJkCKNXBACjGxGqTzFC8fgiG4jrBORlCBgxHhuxDyTZiHWuU6urfQqU8T7SFT5+9b9wm5NlXO/jGrb3JhOazjyRO/7hUu3JPfdyXET/bm9DV73i1Kfc0SfqGj8lIxi3WK3ebR/fe7omOFpcWnu5L5klqgK29fsZXh07l4qstXGBaJJMZuZIvWtHUK5fJffWeDrTZ+NtkdsAB58JkH9q61ndxnuO8ND5faE/2aDeGEs7sQrtx8cDeV2dg+h1wcFCHN9zwupaEiy/3J2z9TjqD29J1c0syCd3JJ9rR3PePtKUXffERn5XqXcdLSV+6cy3RsPTD19HY7vtHtGxQXe3KzmiubfHpg46+AZTc2Zu3d5SCBUEGp1mSBs5scXW6ukjP4XQvEQCfwCTc7jli+Dl+jrLXrDn1xu1NH/MRPexX3+hEx+rWtaDnv/7rv17kp671onnSfDSO8TbxxFl7Omcddmo3viRVcumtIU6zwAavAqmeKCUXsuhamU20lp9rezaWblev+mQ7zX39wxnsC/NPFxJt6N4PUDemkn4EDPSM31130Avn4ne9h3YJHza8NHcNxwm/PGWj7wF6ujn7JcPmfOXKWV22V8lGOgtE2RKZOOmXLPElyYd/7eTRPOIj7uTQqaH7/pbALYFbArcExA/87Gc+85lrf+An8uJDyRI+ho9y8iESIxJFrpViGT6YvwHP/4iNxEN8Er8tDhIjuRZH8VnepOktFvHKxpf8oD0InP2DGB8Htz2WGKfYB12SCHDrB4ZPllDgQ9W5l6yQXEAHnP0UvliKr8QrH4un3sSFfxNAkjDiJnGMNg8yJTXEIMZBp3GN7w0s1yWNjOneYTxxnVhMzIV/dRuPwQfeGHhEF1+uDmwJM/iM64jnM85w73SgG1x6vSqfj41LVBXfgEWjcdTVtzgOvvgXl8GjrXZJOXEJ3WmnDzyU0Fr960O+2l3jPX0V+/VLl+JJuoObTMo/JJPl78O6vhNDh2RTPkNhDBYHCwaFWQTWOFxv8tjhFgAAIABJREFUkK6PIyPrPoNkaBlm1/pX16RfY46ONfoMuX47eZowS4M6bxv1tkc4g1WeOCxUJYZqB9O5dRamcDXJTYKzziJgM9lriS1i20c/MNF4jqedLqIXnGu4qj/7dE8PaIKjs7G3/MY3vnF9OPqkK37SK5laWE1yk52DsPiXHTepd9FZXaKFfPtoa3aTTsGe141b/dln4be/+vRVvbrs13V26HrlB75TfWMmC2Xttblfu9568I76XTfPR7QFgxby4Vzo1/xDA9wWVCedO/q5ZLiiVXnyF23N3Z9S8FPHUT+0gHOgz71x0RMemfxoQo+FvLccXOcQyNsclDhbGcPFoUZzsgnGRl8ixLHyqx3+dNDbOyVv4C6Bw5GxV3X6wqVu54I2dYIbPPQzshw82N5kETykG/XwrV5dx1MyTpbJ01tAxhEklOBBA35LkKCncaxLaMKHdRmscQRzEikCJfPOuGhy0P9px+kym0OXgIiDbo1/tL7DlT3l1OnckzF8wLPzCmxHfbVnv+GQyBHQ4i1ZRjuceFS+fvr2kyD0HKNx8OPINs+xX4h5vgB3nskr2HAurvS8cuwtwOwgG1s7xRudpU8lfbHxEkP9hS59wh991lV9neno5Oe+vyVwS+CWwC2BX00JiL8lhT75yU9eb9R8+9vfvnyLxI4HzpIW3ny3B+FnxOzFH9r68yDS82CC/+GX+SI+R4xiDHGL+EQigE/jx7014lCCLdnEv5WQkkBwLb7SR8KFvxTP8JPiFw/l4fRgy1ho0oaWfCle2u/oC17yJ9/tvp99SeT0cWl0wWN8vIAxpiQEetGir3EkjMRe1UvoSA4Vo+A1mkqS6b80FEuKj8jb+HCTgbb4KY4oRgrHthfjFFdqK9bYeGRjEn2iqf7odu2hZW9M0YW6EjNwF8vh3z3a6YTsxC3kAIb9uG8cuMjTvb7OPnEAFh72BI6Nda8NPWhYWuChJ+N+VMedGDokTVlt7iwaJol7ZQaS8RTcu6fcDI9hOvfQxogyykr1tRnjEY5wKX/0ox9df9W+eFw3JgPeidHE8caH7Dkagt1J1cRSwmExc5wwV+XTEc2y5D2VX1hjNKm33tsDJldtSnwL9pXJwGLorY0m/+Kqz8lrC1p4tjSp+vhwuE646vfnZI2/Mk0f//mf//nqr/7qr65FvWTQ0ktObMOEtzA4OY3XT5s79c7kvnKFf+0gnT2yjer0j66tq15deleXLTZuY54wbcoksdj/o/HqEy/h2rkU/17DbP7EFxqWt67pa5MY8UDGrp1wKJN79cvXyi8cSvQ2bxuzkv31FlJyVRoHTTtONKTr7AocmUl6JG/zir3AtbrfMZLnad/xpN+OLynCMTl7e8fYEkLqBDCbXNEfjauveEMfmfTzLn05cetHYwiUXMdn8zu64wW9ePBatmQYmlcX5mSJqnBJ/KC1J3GunfiRgONQowPuvjcETzxyyuCjZ+Wmz85Ra6L53veC8N66nu2n31Pn6sHXZ8vsqLGVyWX79V2iHR+extQvXNHTmG+Ca6zozfaWlvAGE67GPWXnPjrig96TM5nSC30kB+ue05oneW4uWPt2frhOb+xMICzh18/sSg6aW47Gc10S7+Trvr8lcEvglsAtgV8tCfBL/OknPvGJa8MtlrGHEDOIJfkLyRA+iK/hj5TFE+IRvqcHvPydvnyQRIy4gJ/r7SAPQuArRjIeGLELPwUvnHCUCKnOGGCKRfhS13DGhz2FWEYyqZ+Mwa89vyl2wXMPTTc5ZCzJGP3RLDnTGy7G01ecx//m2+HFB1h8SfgYr1gRTnTDIw4kU33z6cYjkxIjJZ7wX+wFHv9iBXGPJJt4G53GMb44VgyQXCSl0EQm4ouVG/q6LzZh+dF9xk9oFk+IdeBEe58F6K2o4uB+RmZ8cYx+4tlkwIZK5vQCiUSbQx/68tIA+vDrGh60aUNb+6r2Rb1cwOaKccB8lMedGDqkXdCtuifADIeBZkzKlKisz6MAPEOVzLE5alPQZqp7hsNI1MOnXydawqNcQ3e9sHvdZA4Gnvo2fjD6VddYjNG5wT2jZvgmuoWjhMjf/d3fvfqzP/uzFzrDha8WjUpt6hsHTW1MSrJIuvStF/3IuYWlRcDPyfQLLryVwdUP/H4jJ9oWLl0nx1NOyVA7OazM0g1+nBZViwb59deT2U68V64Zro5WdyfMyq+2+p6w7uMpnPVng9mxxaokzutJYLED8ByszXT4Gic71n8z3+xFZl59vK/NRW+0nXQnyx1vbat2Zd9A0o6ecLpfOLhqd70yU+9sjPrBxU7iU1lb4y1d2VYJDfoHr19H8o/OvQfbmMruo9cbJpynU1DAYa8dn7Q0B6IrHfQGDv22jhWwmC+etPXzMmPUL/x7/4gf7cb0xIpjxId5k/zqL4CKlxJB6DAf4X336Q0iAZbDdf+QJrAJDq5z/iZb/aIlOdXmyWJv4bDT3hxyTSbpOdm7T1YcvSA0+QksemJY3/R99teneaE8x8te0L12l41WB25xu1773Pvg0lW4Gqtybe6R/alLN+S5NMBpDQDjGp94oH/zIH3p77pgmZ2RHXu2BumXj4GLfJR8D/zJ1xj3cUvglsAtgVsCv7oS4LfF93yRB5BOcZJNvmsPJ2y6xRfqevCfD3JfUqi3bbw1I9mhD38DhzhFUqW3f3p4xS85w2djL04xZj8N83CLT+MPxQ5w82n6iGuKTeDHBz/HT/KN4IotiueMV6LKOPYbDvgd8KKjt4rw4GhMvhdNvTWkHW71EjbiGfjzvdXhB118tfjHNVmRDdyNryQf/OxbQ96Uoi/j4FMfBzjyQYM6pTr0b/yZnOJFaSx99iz2ECP82tM3p9AiDsFP8RNcDryrhwdNxWbwFaulDzDg3ZMRvJJL7iXF6iOOYTPu6ds9/vTdh1/qS3q1TwJDrvh3fpTHnRh6IO0MwiLBAJyMknEwIiXlZSxnUA/lBsoNUdBd0J5hZmSnUS9pTRw4wnOSvpNxcTVhbJzb1J99ozmaGOXXv/71V5/+9Kcvw7awMuTOJmYLVGM3LhqbaGRlgQRr8pR4qLRAtCCbHCaDBRO86yYIOe942hoX/bVFm7atA+9Q79pZe9e1SeK9fkqMhAtfJywa03PyS67pWCb8S1/60svCmh6VK7MWR/WPbCe8tYdH/fYJ7qRn6SLPzhahSkk242fjS284s7+1xXRtYdTXz+zwTQ70dtK1PDYG2OVLHzI6T3pobPDu6wt2Zal951s4q89G9HHNkYZTUPGI18XXt4iCS0ZwwKd0Wj/0c3bEa6V61yf9tddXwqY3Z7LPbHNlVVt0BMMmBQOecJh7HFVrWHPM0w5Pfjg8tHcuX+iFMx6j370Eoj7GgMP8MhY7S37ZgGDD+kD2ezYnkxc++g4S3GDTWbzFc30aIxrTd3y4z5ayYTJofkQrfOFSZ54UDGy/+qbrcOuvzrjhFlAIqlb2O+6O3fjqdv42jhLvwTVuONY+lxev2ffA4qQ5u8seL+RPR/VkF64T/9rhudamK/XZlfWfrfAJgt+Sba7VJ1f1+qycrDf3cUvglsAtgVsCv5oSEMN4wCNR0cbbm9R89OunON6+o5+e8zv5p2IM8Y7EiphCIsQ+x75HDCRpIhFSkqeYKDz8YT8JsycwDnxKh3hNAocvk+Dh/8RCYiTXJV88QPWWdzEMvK6NjQZ9jQlP/h4f0SM2knxBe7FGOPTz8A0eCZximd4aQp/64qLeGpJUktAoOYQeeNSRl/F7QG+PWMKnhJFx4PAAj98uRiEXNOFX0offpy8PEOMfTPHdyrr4Aa3GEQtENxmQLRmwB7Diy41l4PWAqiRbsVJxGBtQ582g3khHH/7wLgYxrrgDDv3YnPteFqB/sS7d4tuBpnRXzOjeeHDQmTjIHri3hYz7ccQ3d2LoUtl7j4JQCukVOBteRsbA1bdxBssAMioKB8cQNzh3r42hF9x3vff6nkE26prIGXh1uwBoe3QaZycGXMHF+dIaPfiyqOLNItmEDN/iYMiM3ClrbYJYsMrCy6LGlwXxs5/97LXQmgTON20IWyR27F0YTn7RVnuLfvoINpiFdb31PliH9+pamMKpxI/vrVgk4V65dG1RSnfJ+hGcunQAVzo6bfPEtXC1PYLJXrS1KNErO2bbrvcDztnL6rjrxe+6DSW8XTdOtrSla+0//vGP3/PPZPEqgWkxNffIec9kR6fwpIf0d/IO58mLfg599gAHbzjhcsa3fs335jxnVrI1GnbM8Ae3OM+xG+usl8gxjzYZEu3Z3c6JpSN5SXJx5AVInCac5h3nJ9FUUk+f5kzX4Iy/slw++x188usnbWRJj9GbHaQnDtSaGh/gjdU8EzigUwJDEFN9PAYfveRsPLSWkDr1cq4HaMluW/t3Xc+uVz/psQRFc4pdVBfO7D2e2Qv6BHz5jexp+2YHjSUZVRCWHBcen43hyWkfR995qd/qMN4bo/I9E+MNN42l2TW8ldkNWXetbC633pNDdmktYp98Ra9nC4bRL6gCV5Ckv/to+DiCpzeI5a6+JXBL4JbALYGPSAJ8ik27OIJvtXFvcy8OEQd4W0RyY498X/GdpIZ+3gRy6Cu5I/6w/9EmnlffN4v0tZ+RAMhH8XlwSCIYY2Oh/J8++kqEoFfZT9nEBA7txTd8omu4lOiBu7iK3yzBJJllT5WvB19iC20O/eKbfCSC7NnggRevJY1cB6sEL8YiC4ko9xIYcEvywAWOjHqbCDzcJdnQJu7sswp8vD7Faujl+8UBxZTaiy/qSz/1Q0/Js/DE58adcDjJeWO0M5bTXrKuf7qlVzGLJJp4WQzinj566wc9xdLqxHZkqESvMenHvYRSbxmJYfRV7xrf9tXk+VEfd2LogcQL1imQ0pQFtyWI2lC7zwjaFGR4yg7Xfk5mQcgYGSK8TYom39kvHE0M3xnav/oG3wIRjibELgDajB198Hb9qL7xlp5TXPqFxyLg+0Emp0Xy0QGnowWycjcQ6kycvjO0PDy6hi+8aG2C04fTZJawMkm/9rWvvfryl7980dCYcKbfLZMluKUveIuDDLUFWfvKvj4WuEcH2PCcH04m0/jZvumjOryecKeuHt2fOl8cXUdfPG39SdPZv/tzQxot+KZjMrKR79BPnSSFEkwySu+V0ZesVpbmxqNDn1Nm7tOd0v3a9M6L5XN1YGxHuDmBbU/eBSvdR88jWtWxf05HMKD023nBS3LZfslDuddgJBM4ZWtWwQ5HDZfSfBX8cMLmcLYeLjjCSV/4zb4rc/DRFIxSkkLQQ5bNzWwh+Ya/fmgWcHgyg0ayK9GzdDWexFfwSsGRuRkvjbf0xRd80WUuc/IliNDXuTiyE/3yF4IkMu5+eQ1+y9pP+B0z+azsGrP+zbPsCqyj+uDCm70mi/ptSSYdXWvf+heApwv1yWd1ubbChtmPddjTuGTcuGyInpsn+VYBv/VbcCwQ059d6E/eJdXie+m6r28J3BK4JXBL4JdXAvyGhIPNu+QAHyHmtwfZWGl9KP9TokjsIO7xgFcs5K0fcVIPzkqsiEPy1R669Ka1+EYShU+qDx+2MVJ+TZ2HYT0s640cb+1KDsG5b5YUv+iPLmOI5fq5+/pauEqM5FNr15//1bdvAIGB3x5FXW32NWSFb32iUYl/Bx7QAO/GbBJ0xZPF+NrJmp7iR93GFWhR9+7Tz+ng7W2r6E+P7jvPOEacI4YNd3BZfvFJ5cZf+GUT6PM2fXEFHHgWY9CL2MP+RF/xnrHEtmKV4heycc+ekg370rd4Tn88ope84SdLcOQsBnWi5eM47sTQG6Re0GmiMw4GwzgZDGWXDFKW4SsRQcGnURomI9OfYWT0a+yum9QZUYbcfSRvexPrxLV0ePouc74BtPZo274tarvAnKIKTyX8EkMO/XbseMV7v5dMpk2exqz8zne+c308TrvTYSyyox8TmfwlyjgFurGBUG8Smli9xQRn9Kyswq2MxkpjLd0tatHHHvCyMC1gy0vySH7ps/tKeGpLfu8HA96x/arbMU6cwdR3y6XlTXASWf3k7KQve1G28CpXlifeZJ9e0gm4E7bxyBfOtY2T91MG4Xtk//AGD6fraI6npTNYOAUJ3rZybJ/F6Zrzl7QCs/2vjs+Ht6Vy4oKADTAaP/rjBy7XK4uSSpI/gg126ewDzgIfG+4CGfIsKGnMxrOZ5+w5RE+B4Nnj1FF0RY/SuvPd73734t3cjWb3ycNTIPh7IoeOgovGTG5K6w26yCmnDM74+EmH0Zoeoy+6wbJnwUtBUbZ7JvvhioZobz0qaKxUD7byUV+wAgjrlGBWoNB8Sc/KbFKb4KtXu6MvGSrxtbTtPHS9dhNNyaZS/ep19d219mTRdX3QS650GP/aCrgFoXi2Rgu4JH2s3/QvaHZ6OKCOTPq+FLzV2wDgv7Gt+dHziN677pbALYFbArcEfnkkwL+I9yUT+H4+oJ858Rv5X352/Sr/6OGT2KGHQa7FQPySOICfEVeImfgY+Ev+wGVjz5/ZA0gSKNsjSBbwT/kyeHr7hfThKz6SmIGHTzQGH+jhliQBODiD5yvxKm6z96ken8VKYoN+jaE92kss8bX4wF/txi6GMp4DfXCJE8lDWdxhLLLHM/mREXrEm/aA6NeudKDPtaRHMou33jzyZgx/D3e86JtMwfXGUXQrO+wBjYnPjm2vTqyOn/4dG030yUbAsw02hReJsewDPB7ICiwc6GzfCRat5AAWLcUr7JK96Fesgx5jqSsWEg+xE3X2FR9XPHMnhl5M6L0XlM0IKFnJYFxTvOyqa8oG51rZhiADe2Sc/V21toz7JIExFdhnPBlIgXsbAfeuf9bRBLGp9/vK+qg33k60YJW+M/TFp79uN477NdSzj4kd3OJsAdxyxyXTR6cFk4zJtySPTZG63gAqKafd5o7stcPfgrILKxo6omdpiW6lzbXxkse21bePx4UD7uDUWQTS1amjZBX+le3KOzlvO1zBLN5gtjzrwte43Ydz8W1b8MGdeKuP3xbb5R+Ok09wK7NH4z+iefvQsXHUnYe+6o2bzJYOdfVzDc/yetK/+LVxDvWPpsZZGYHNFh/JWuJIMAGGU4luOJ3qF2/8wAW3Aw4OSoCjzEHDZ37aYAsqCli8VdO9YEHQwQE3nuCJI89Zr8M23iO9XIQ8Hcki2Ut8CEb6CHU6QDs6rKt4bIzK5vHqjNNGb7KP3mSW/tJneohm8OY3+XDYZAVXYwgurSkFkyU30iE81lEyFTj0RMj6Q4bWJ33qt3agr/vmR2UPFuAwTmfyq48SzNIW7PLX+NqCjQ516Sc72nL5VL/ybIx07x78eWTHtYGnZ0lCY6svECT/gky2ySbNBUGSPgJd/oBc1UkIskt8WWPZN/mRy33cErglcEvglsAvvwTEDBIC/AG/wU/wG/kX/kC9zT8/3wMJ8ZFkjmSIOMJhow/Owa+A4bPsP3qgoY7P56vgfff5TzGKVfgg/hLO/Cu8zn7qBZbvq09+1H3fVjUe34gOfjRfquQbJWjQL57Kp2vDH1nwiUqJpI2N4FdfnIUOvtO95A6cEkD640Vffre3fdDo0C/4xYUWMRUZiaGU9KMePL4c+jjEceEmh/gsqeJevURbdKMpnmqHCxx8eGjfVeyhTE7FMvVRshPy1lYc8frpUyLw051YkB30UzBjOPAEHq/GEDu7FqPgwVm/YhX3dKgfO4JbX/tWdOON3rR/XMedGHqD5AuqGROFNXkYinsTpORQiSH1AtwyhxQMD2Mr6DZcdWugS4YxgmcwTobrlO3MyMHsdYa/Y8G7E2L7hLcNSXxk2DY7JXsWRxMT/U1QJRi8NWZ8G0c9+i3km9E9cSUHuEwSNEj4kG0JoXQDX7R7s8g4xuyEIxlXZ7xkZqxz/HhS2hTLfsMDzlF7/dTB17H4wHJUkmu///u//55xo2H7prdkHT/VwxdPW9b+QsTzxSN8W9c4Zz/3K7eF61qJvujvvnY68gFqiyu7WtjwryxzLufYCxud0ZaslYvLWzfezFG38l3cJ88Lixb9towvYy2vJXB6GwjebMX16lk/idl+Buq+DxBaTwQdxszZoSm+9ppMm5fwS3BI+OhfsgSOtcWl36uunKzTE5h9a4hjFViZdwUuS1N6Sgcrx61belcm6OBUzd2Ch7WN+G2c+PB2ELpK4FS/4yy/3nLCF1kLKvCivVfA4SKrXdfhQl/rixJtlclQ2enJksCp9bn1yH144m/7W68EUCU/dszWSvBrP+iPnoXfsfAApnLpj47ao4eMyGvHi7906n71W99T/9WnR3KP1p2n6bfgSVkik97YsvXe2oG/XsumSwGcwAsfcLL9AlZ2a7z7uCVwS+CWwC2BX14J2Ev0pjZ/4i0NDw7yU/yy5I3NNz9r025jLxHBTyglVvgbcRhfrq6fkvFB2sCCs1mXNJE8Ejc5lOIYvqqEVLGJsreN7CP0lxxCFxrzU8Ut9eMHJZz4PGPnU7U7JH/QAU5feJTaxRQSGWK7Yh5jOYtVyUF/frMHWNrwjjd4JHAkLMQz7nurp5+hlUQxHtngC13qe/hDjh6s60s3cJUIogdjoxEfxQF8umtwZItGcmtfHU9oct29kh5662njkGZActg4Rpsx6Z4M7fnEHPGdnujCGPHYCwFg8a7NA89ssKQi/ZElfPVlJ+TELtmN8ckaTngk/djNx3nciaH3kT5lUnxvDDX51FG2smQKBZcFpHSGW3AssG1SG84H0mwkz4Mxm+T6ud5NhjeNCuy1uT7L8DUBwJQ8Yfi9ZdOER7trxqxkxJ0tVk0MuMPbuOE3hglpwQtn+JQmeE+BTXJ49m2cc6LC73z9tAEGJzG0MDvpq5eE6K8qo7WNSHQH+/bbb19vQXXAd8Lo2+Z78anffvT6z//8z68++clPvrw9Rs795MaiCU/6ird4gCt5ul64k2djL+/RvfToH55wV+7YO379JVT6ADU8xj/HCFe2yF7pHM/ZjjcmLH7qJPfo0XHKmG45AYthY51wK4PzevEtrcGhncySycLE18ozfPVT0q/+yf6kc2V62ka41eORrOCSmIkmyYnWiuVv5eA6Ww6nEg4JkzbGOcmVi7nDYYWPk+5VZesah8sZWrt64pazLwnT2PGnXF5Puk8dolEgAO/qWnBScISXdBK/5LLfGtI/mqMJbDpAh8QPGOsz23KthIsc2GX3j+SODo5e4gFt1uD0dtJobHXaSwy13nZ/9gWPTmPXVv/W+60H78Svsg9Q73jgd5xg+Zl3nwLMcET/ZQzPcsueg8mu1s7Art3Xf8vas736u89e8Ez/la6dBYXsjQ3SkTWFT+U/8EafPRjQRz3dkgNdGUN/vvfk8xG9d90tgVsCtwRuCfz/KQE+xJ7GW8j8RSW/IN7gd2zmbcb5FD5GjNPPv/gnsZD4S4zhLVabebBw8CX2M/YuYgpj9TaSOmOCOWOIjZEkSEp62OyjUQLKfkvMBj+fn8+kCf09/LCfKmZcH+26mBlvaBJjFzsVA0osoFOCCD/ao1WdvvCQR8kJfXszt6RM+9v6qEcbnyu+l9xQki2+7O8c8PeGuNIeSFKJf+4BqD7GVOJBf4kptPdzOTQn53TST9roSgKpbxKJE4oxyVFfZTxWl7yVjo1VxA7ecKI7tOGbjvZtInzo29tC+IcbnyWC0EJG5AcWXnjQskkhvOsP1niSQ/Dq93Eed2LofaRPkQJPE4dROlK0OsZQsErBrtuIUDKDYJxtXDJMRuJnCO6d3iJYQ/71X//1a6wN0gt21TG6cLapUKKXkVkIKi1o0djGw73TU2sl+J0sTRilcRiyE9yeJqjFhWx6pdH3frwRUIbcQtFvWeNXaeKhrQnT5CNfkxGMzLdJAmaPaFXXNWfQ21TR/6hPbbsYGCtcSvfhbaPXRiW5WkDBRCN9lhDhNHqFMzzpLXzk6drmLVrS8fJbG7q23nW8KLOHLetTnftHcG+qA18f46UrtFvIsjcLugUv2yCjkkWvn5JC4SdX1yuTeFpejLn36Sd6oimY4MHBZ86ZU64lCztWDtWFI7oejXHah7kdrpyWOjZYfRt1910r6Rtd237ysTTUlk3ix8cS0wf8yUOJ9xxifWpP1u7hNbfMNY6Os8aDvhIoPS1r075zwvXqZOkX+OijHT7yMYY6jty9Nac+5OBQOvFjfH0ETwK3ZIw2/AsGkrNS4CZxAN68BBcvrlfHyULdecJl3RKwtZ4qzdPow0djw5UugxM4bVJo9bw065dtLH7BhbUjWdQHfPatTfAi2MwOwgc+2OiLxoVNn+DJobbGM8ap4+S4JXjHm+rWbsBkO+mInjqztUp6ZzPGEDhaY+hYXzA9eWsMeJLDxx1YXUK5j1sCtwRuCdwS+MAlIC7gT+y3SmJIUogd7D9K3PAJYOzP+AZxjXiBz7X/gIe/L6FQUuZ73/ve9baLByv+mYrfEd+CE/8Wk/STMvXi/34NkV/M3/FX/LXx9UVXNImj0cavOZRODxDxItZGbzENXPyhGEmsIOZRGrOf6Ysj7E2MJTbyIoL24oqSNtolcYyPDrG8ceDDU7EQePXGVRpbYk18L55TDxacxJE9ADn1czcP2ySHetMGPDjyMw4c5NHbN+kBHjjAOfCKLzGrJFJ4tYkP8EPO2sOBf+OSG7lKBGp3XZyq/8ZcdK4NT+RP9z2wsp91oFnyMVsgM/ZElmDEIGhyjyZ18NEDvuCjY9cOMnFvrI/7uBND76OBgn2KZAQFrCaGe4kRpZNyGXgno2MQbT5aIJTa1LcASCp0DXdB+QbpcKmPpsoSDSV94IomJSMLtuSLek+e4exoLHWNBV5fP4V66623rmyoxZQB94qf/kunPja/Z7JpxQzeBIDPpFNaVHuryARKHuQV/mTZJmRxqkM3WMfCBr/9gg23PtUp29x5u+hTn/rUe+izgFmckpVFxgJpYWhc+OA/6vK0AAAgAElEQVR2b1HDYwm23myqf7Ddx4P7eAnX8nBenzzVP3zx2v3yrK6kRXDRk/1EP/vBj6cs2twHqyS7E8fKA93ptfqL0edD2/sdyQJM86k6drN8srP4RdvOG3OgY2USrmg87ad77fENz9Ll3ljNueYpB93rz/Do/yb86uE0TjILNh4bd8cPHmzw0akfh06XHDOnVlDUm0OcW+tYQUw4jRMt3vhSX8BC9uaC+8ZOP++8884VoGhHCzqsmUpyih+BDZj65czhjIZg4RG4wRsN9TvlFd36JquTp96uykbWVqLRmPotnmxeAEOudJ6+9Vv+rsGfj+xRu/XPmlnf1h/875jpsfbgGyf7j8cdu/FOHoLFU/iTcfYTTPdnWftPufupnaQTtCXz9FNd+vP2JV+DbzZYoIcea2yvaOsn8KJ7NGsnk9YoclF/H7cEbgncErgl8MslAckgSQg+weGPLTzoFjsUs/AR/Ih7m3Cbc3sXdb1l0qZenVjCRl5CQSzMt0ia8KFiI2O67sEo/2PPYhzxsJ9jo4mP8oBMnC+eycehk/+zH4ALHL9lD6TkE4OVyMCL8ZXwgwmfB0N4sQ/hO9EqfihO6g0ldfYlYvWSEuh2bS/HZ4r37Ofsu9w78KEvvNqNE43o1B9/JXrsicgUnAd3EjboI0v1knH2y+jQhxyccJN9cUxyxrP+HtShSz+xPPmU5BFvkY9kFn7DGX7jkocHqeTdm0mu07cYmM73KPYppoAfboktY6IBj+RIRtlBD6PIBz/wwkEGvRyhjT60eTBbAo3c8KftF+G4E0M/QwsU7LSAOGyCGUnXvVbXBobiBaTgBLYFwE1YRuUsEG7xcu/MKLu2EJmslW0I2nigrbdOjKs+HAXMldV7s6EPULcRUBqjso1G/PevXy1QO3nIAp/OH/7wh68+//nPv2wwwg9+cZsIcL1+WtgtjBaBcwPhvqfixoDDGB2NWV1jBHe2L519hDvYlVl0ttHAO1o7Tvxl3LWvPF2nLzz2bZmFCedZ5/4cjzyqj7cXoOeL2pXxpGllE190y8n1faZgwEfPaQct4PrtGPp2rI6iofYW7+V3xw0eXDyqc79jrBy0tYE2b6J/x26DvDSygZJ0J/3u3yTj5n9l48C91/ovDWhuHThllcy2XDlGi8SSdQW//5u9e9vVJDnKPj6X0pxigzHGYJuNBhuDQYAEiCN2Qoh74FqQOOEKzEYChGVZ2CAwwmCzOZ5LYf2K/jeP83t7PAzjcS++Sqk6qzIjIyMiIyMio+pdvbiWVvfJT/JG8BLsiZ+D6+ucfmKWPI2Jh3jr7xnB2Zd9yaF5m1swwJkKgAQhgjPjOMNsTfKhk6tP4VCjg50M/8q5MemDPmN2fLJemHAEGwx6wKFHYGHftweaC2xy6W8FCUZ6s2ePGMsHwNP4Uxdq99ZJQJCtaIz5wChqeMmhN0wCEbqgPf3eObpfGa2M04Vkoe+8kvvKp/tTNx7BaAO3c9Hh82WI/tZZzd8W1ONRoT90iZzZXO3otYbJBQz5kI2+u9wSuCVwS+CWwP8dCfCXEhf9KsFZgh+WxBCXFo84ePcVyZe+9KUr/jbOWaOXYfyGGIX/7esWyQp+RhKGD+fLfT0kseClmr7ObZ3l+B3+RhwAvzMD+OIJNbrUxvRVi69e4M+n58v4Q3jQou4rHKtoPN4UcZWkA37gQhv/KR7wDB9fiL+SVmIa4yUsjCc/fCpokbjqlxpkQj5iDHKHyzM4MNqd4fAj3usPgZuPD7cuZALGuPjGE3mhRVJKPCDZBI5vz8+r4bK2kkPmBBOvaEI/PozHOzhxLRkab+38QkWhD2D6O0vFxWjXJ2nUOqGvWAwuMu0caL3M3fprNx4M+tHr3ryN8UwW+shBUoncrR++yORNKXdi6DushI1Awfuk3ca2mBSFcri3aftJhsNVBy3GQnDbIaUgW2BMITvcrfFYcrRTpg4MeyjX5tnBdoP9DgPwuN+yAXpKv2Pdw+nCt6wtvinw4oUz41tdmzkWZ3QsH/3cwxt68xizBjFc5OVqbnAnPz3r83eG+hs5j3gHmwzUwYQ/GjkX2W28l81vbLSFJyPCsDCWcOA/XGRJP8yhPdlsDef2RU/0RSsY86mXj2jbceb3rO5aPSL7aGqeXbfFdfYv7fGLhsYsnvgCZy3rQz8aJCmDqYaroi0Zb/veJ5NqdKQr0d5zskTPmRQKFu4d333tntP7xsQ7mO7Zje639jaJnsKDv51v6dx5jYGPE1V3cG6PJI9oQ5/ABryggfPM/kQjp0U3c245yep4BO+tCyfLpu3B3Xx7GQNWcCEwso/Mby+ZDx2CHQ7Y57rf/OY3LxkJQOyV1Z14WR7dR//KtLZkiQ7XylBf+IM75d1eAdce3kRN49XGRsO5x9pri295M47+F9CBy757Q8WHkHdvJPHijSTZC0gEddnqc45sJh7PvzNk3nROv6KNPvYHqIOJx13f4Bt7PhtbW/enfqRXu769WFF3pedkZAx58BeuvsziS8iitTIn2SsCSnB3uSVwS+CWwC2B5y8BfkBcwSfwf+y9r1O08Y35GpyCdUiXMODfnGfEL2J6iQG+1bN4pGQQf9HfHpJgcFjvhVY+Cl4+Vm1OV4kI9PRzrP5ODd+ErsYUz0gSODPAz6flL+Ht3MiHlchCY/MVs0mQ9HUPXiVB+nrI/ORENmDgciniLbGZpIT50ceXko15+H6xoX5+F059YMVu4jhtcLjnq188JWzI2mVNtBff4Mmc6EWDRI5EkVqSxHi48Aq/eSWLyFUCEAy5Bu8ev+REjsaHH3/GlAzSB94z3qy7ucTHJQ5L8hi7cV2xUOuqT3wLnlw945eMyAydaIfX1094tVZiEnDmNm9xDFj61k/lrsV5A8qdGHoPi8BYSABRQgts0S22NkkTCiJJ1L12ymgTKxkEymWDyKJ6865oyyBQsi2e93DSYSNlpYh7UKk9vIu/eYLBU5cNT4HVeLDB8Og+mtDRXG2cpRVeMHgusUT5ZUvLjHZINBc5SJDtfwcP7xpdzwzFX/zFX7z1+c9//v855D2S26M2dMIVL9GKvtYQnYw0vvuZnHU71wR9Jx5w/RFgegAXI9sB2uZ/5+kz1X6qdR4QzzVsrXYdo5+MK/Eaf2q40pkOjB04++nU4g/X0qCt9Xb/CD6YdHIPwtFg3PKagyRDV3DkF2zzJeOV9dkGVtsj+lrj3T8dpPspl/kXP3yP5m+O1qDaHOf4XRP46UC84VObOp7jYevu4e4PdHNoJZLJ7vu///svnWq+ak6LDrNJBRLZH/haj93PrYcavcEnj+rlDa7GJQ/P6PX2o4QGXH6vL4hw2e+CCwEDOGshgLBnlpfud57kYqy9xJnC3/wrt6UNbH9gUhKh9SCLLc2pXWBj35ITvWkv6XPBv/DBpWONsS8aA34v7frT0er2a2uER4XfMG/JVEGSoCgcra2aTE764ntpSHbBG2ve1c/gV87fJriXD81XDV7Zeu/xkq5175kObnLIemsXVLLN+Vp8s+ECWHUHBbjc82/J/xG9d9stgVsCtwRuCTwfCYghxOkbV4k1POdbcMMHeJnGj/Ob/AM47b4U197fq+FTxE1iE7GIJIp5xBdiFWOUYoB8v/ncr+/i+9ACD/z6+KtiBnjyeZ0P+mmZeEOBQxIjftDjbKbfyzS44XXG5N/EAAqe+Dz9xro3ln/0H8soeJSg4B/hU4sjnLMkNyRbtBkvYSGhUuKHTPDS1z6NsR7ujSVnSZN4gsN5si90yLovfp0HJVIkgsRlEndowI/YtbUwFox4Ub0/sTOvr5TIAg44yRcsOsQReDEPOj2Lm8mkZJB+l4QNHSE3z2gRS7u3Jtaf3PfvO3omF30SXegBC5d785GnIiYne2tmDjShGZ9vWrkTQ+9hRSysy6YQpFJ+ymezlxQqWKVYlLRDHPRgzwCYYmyhTB0gCs4pXcaoQ8IeJoJv7Fk3Rk0ZC5bV3tTLquJF7WC2F9rgyzgxLP2crIRKPHu7HY+Mzbe+9a1r0zFUNlaZan142o3IUCnN4z4YtSta0NN97T03rg28sMmFDLpaTzy8eMp0lxyLjq2tVTyDczEG8LpsbPjK/sczHOgkm/4nql3jxkefPqX65GF5dZ8ck5PnDonoac05xnNsczefOWuLxgyY5JmLrDga8vDcHM25uqqNXsWbPmXXc+du/njXtzpxDT7Kym9lGN/V6PV1kLJ8d/9oHn3bnqy3DS/gKvXFQ2vveQMZ9/q0043marxa8MKGsDcla8Dqa72XH4kPTpfuGVNiOlj8s1X2ZnZhZbY0NGbXavmM3+CWptrQysagSYCk5mjRJuhBpz0k8Op32tapNUkfzJXsOVt8lVgiH/PpX/19tSAvx4KJ/sWb/HZNJU/NwWmvHqXjq6fh1Va/uq+f7I9s79rv+FG3Zxpnj7l3wduF1l2PxRuOYFvX5tGvLbhoCV9y2PkWV2ugLZwr42Tb2oVvn89xnldX6EsJIUG6gNT69lZX0Mnm0BF+hM5EI7tLbvrZZmMK1sDoU9/llsAtgVsCtwSepwT4Cwf6EgESGn2Zsj6Hb3EAdwZ58RTbiz34U2cYcUc/dxKP8DVeOHzf09dBfAffLb6QnHDP7/S1DX9mrERFMUM+KP/Gj+kvluGH9qUFuHwdmiUPNtGBhmCKb9CiDR1+mWC82FryxMt18Uoxnz7nrY0nnD3wzU/iR4GrL27Iqi9wjHffz9AkLsgYLPmL3eKZLMDx22jpayRjOhOi2xiylDiR2BGHkqUYqz9HkNz0k39f5aCNfMzhwotnchUPltzxskxSSNLHXI2JFvShH73GFFPDRw/Qae7Wcde33bIxEfyKBBR68eP83HjysiZiQKVzo9gFHFlZy/qb402p78TQe1gJCmFz24AF2YJNCqUuQUIRBKV7MIM+JWzDa2Ow+hqgJJLahlZ3FdB3WFCjgUJ5216Qj8Yz+A/OGPAuBssGEEQzLsbtgaxgPlwd/v3MgFEWkFNuRsYGJ4MyyG0cm/Qzn/nMqy+O2mRbu2cooptM2lTubeKee8sfbdqN66BDliVvwCjLR7DqZAGeYdNm/Zq/cY0B739aY3QYHGuLb8mgdCHY5o7uPXhp27UyRon/XTtzLn/6Ttl5hp9B7YClDV+t9X4hdE32oJin69SXZEpO9L8rPjhaiT3tfRlGPzgdbcaHGw/xjAy0asNrvMTPwkVy/J8snOu1em7+86eFrdHiqS05R5v60byP2oODw5rsPCtftsCzxI/fvLtv3qWpOd6tBt/45gjHqRscLdtkvTilDsu7PsY2X/uvnyKtjJoDLF53TDrP4W57n2cL0vqk217yqXBfgYR31xTd+jlfdbZUrfT2xpiVQetRnZzAZTfUjwrYdFwQQbezu7XjE5y6NjBs6+5B+njue2OiV23co8u41aPlMV53XPMkP7w1ZmnoXv+u0eLfezCeq7tvrZvnkSzDv3Otzux6anfRnRdPNt2at5/VAmTtYPCfnZR4LPjmo+k2XSl5jd9s0SMa77ZbArcEbgncEnizJeBw79zBnksodNg/YxhfCoHju/kQh3dXCSHnF7GH2EIsL/Eh0cCf8Btenon34ZUk8XdTO9OJbZ15lPWRaOKXFHXnQP4NTokB8EoxSb7P+YkP65cGvdxoTrj5Wv3mdzaE69///d+ve4X/E3tItoA1Pxn0NQpe+roYHBk4z/V1kXHmccYpWSWJYpzYjYyMR5P4zT2Z9rd98EwuZJgfl9QxhxoO9MBJtnw82cKHBjj7qZWYEKxzH3rgcw8XGvGGR2uGf+Pw5B4NycSYvpp3fgXTmP62Jpzwb5xyCfSpgCFnsYOi9sVW66gNnXj2Yhat6BOr9DIdzCaF8OrZGRncm1ruxNB7XJkCVJs5IyAA7YshB+JNCFE0cBSE4lHkrd3b5BRkE0FwZOiMV86DRgdK/QubodJOWdUu411oYSzR3YGiADvYxnag6bnDP+PKQDEsixt+MDaiok/ZTeR5DyIdkGrfOr5sXMakt8VqssaDLH8JiMYmg+aFBy34QSNj1HiJjf2aBqxxpxzN2RuFi6mnEh9b75zhwaMrI5s8H9WtgcO48kh2L6e/Kg4wJ1N7P5PqGU9gwrU4k83KJz0nV1+Z0BP3K8cMX0nCkoythToZGpfs4VLQ016Ag6HXRk4lToxPtvGiTq7b5j49Xr3e9Tzhe26es3/l2v2uNfhT9rWtvLXFE57JOscZ/EnDznPek1H4kwU+tzQGrLcS5Cs4cWjm9LUrxu2VboT/5KN5Fi5c1fo4z95ypfsCObaSfXNxpJx4Xw3181KBBvtCTt4IsSfpSrzH6/IfH6sfuz7awcB7joM3nraP3qZPu1/dG+Ny77fwZIs/tDdO0LC2FixdiBbjo2kTUDtnfMWLWhuYxsLbFe7WN/hd58Yt39rYHQFUeqo/2cMH98rUmHRt9S/Ycy0Wtnt1AXL7gj8kTxfdzXfRG8/k7PK5OjnQaXrEloQPXDKyHmhhl+5yS+CWwC2BWwLPTwJsPF/hAC6Rwl8o69PZf3EFe89HiH/4AecW4/kM/kQ8brxkAXixtK9qxCUlX/gbX+TwR3ydhIYzw4unlxNw8Tudk/K7+TDP5thkhIRAfhgf5iqJIYEldhAvaHNvPvOYEy9g9Emq8I1ifTQo5i2hRB7kI6ZCh9irl1vm1V5iR8JHbKhfkWTRT47GGY8mL/HdgzdPySXt5GUceLRZH4mmkk79pI7sxUTm9lFCX161FvjtC60SSeGDu0STNjhbr34+pl2CiVydG/WLYcyLxuIMtIEjy+Ib/PpQgy6QtWc4WtdiCede8nGObC3BwE8X6ItSbATe+cgY9Fsv9L3pscidGLqW8TsXm80G2cCe8pQcKpBlTChWhx61vgwG5YPDRTEdCPS/8/QJnDYKAzYYczAAq4Qp6dmGRn0UFF0CZZtYTZG1uVL2PVCEKxzwdNC3CeHpJ2fuKXqZUJusA592JXzuzeeq7DNazbP09nkhGZSAYrjA9QerS0SRT8Wc6C6RBqdxjNkmlMAbZ4P2VY0240timMsYBlDmWV/F2L0YHwYNTHxGCzz4yXBHGzodxILf+pRX8wYTz8Yv/92Dc791/C0MGq13axDvaDu/RguuQxrZkG8G0XNt2jlisOSvdkVvNDiE0ikG3VqbI72J9vh4Jfy5OWXXmlgPpfrdxq6cHsHVFu5o71n/SeMpf3TsIXvv8es5vOHredf8nNPYF09BCue8svMmTEDB9tib7Av5mseB2f3iT1fhOHXMGglIrJ19YCxdXjks/f72kWCMXvS79ngCh1aBCKcLb3ZTn7mjJd1Uaz/lAGfw6czrxjQ+uOxezyfP4WE32LNsajJefOgID7gue8nV864P+HSmpBL7seOXxuU/nvtSipwFYDuP9Yku87Bx/Mvy674S/8l0aXW/ugl2n18hebpZOhf3jjHWWgvE0eV+r/RLTTd6aZKeeEvJH9FtSUNBn0/VyUFhb/BPt0ogscHx+6YHZCvP+/6WwC2BWwK3BP7rf6R00GfznWWU4ph8ovjE4duLKfHF3//9318vbcSl/IGvtI13VuEvnC34aC/n4eRHzZGvEasUL5lLP7wSC/yYOMbZSmwLH3hFHx+ELn7dPbqc9cxZ0d7ZsBd2xVZekKHN10rRYBxe0FBipvn64kjsp8BrPJr50BJP/Dle4OAL0cX/6odbf0kYCR7ywo9LjIEfPIOXkEE3uePNXOQgnkcfmuCXDCqRV7LM2RGPxpI92aEDXRJH8PHtYjD4zIUutPeCSExjHs/oMhYO8sIzWO0lFMGXbCNn/dp6aWqu5AmPkm6B3fjPOjrn4AuMPvKBo3O28yXdEEPSGXJ7DkkhfN+JoWv531ux0WweAWfBvcX2LCDdQJYibYDunrIJ6OtzgKLUlMpPPFJmbWDKaJq3oHsPD+5TyK0pI+OYgqIR3WhOiXHMUPoSJr7Addm4NhGeOty3GTzbzIzofiaZAbEZo6dEAiMEt40Ub/D5HJIBYjxKqGnvEKOGy5c94UR7G5YMzAF/c/RsTDIwJvrcw2tsCSeb17z4Rg/Dh7+y5+ZbHNEHB/lZX7xkLI2LVzjBaXPA7pDSmlbH16O6w5i68bVdhD0Vz9G59eI/cSfHs33lH4+tJX7IjVHmGOiWtaVLYMjDs3vXJsB2nnONM7y1L+x/cfhf/76Ot21f2SSXlc/iO+/hsVfTl7MGX9s5tj41HHgB2+GXnuBTm8RJX//tuGhHR/fVO7cvugQC5gALr0MyB2rfsiPZEJ/FcpgO1NYMHZx4upnM0xUJIfD2d0nf7FOyifdHNOrjJOmBIgAoSbp0ZRftVXtOINE+X/1I3rsuEgsc7cLtPdzJkA4KAOmot1HmSPeNgTfePUtYCmKyt9GkdkV3OGqn+11rq+uHu+IeX60dGGN3zugEo6jxIqAJJ/ju1eGtjrdks7DJNTnpA6/GY2OCWz1ET/RXR2M8nuvW+qmV3Rfmc+Uf0zfP9Jk+0hE6nJ1RK/xHdBdkst/kiRcywkuyir67viVwS+CWwC2BN1sCbLnzgXNSCRkU558kUPiwF08vysTxvtxw1pLY0C5OEI+w/xIIYgHxiASEdrjFKvmcR34on8UfOeN4duA3vpcP5tJufL4SrFhZwkNxvjAXPGIhc5tXXCIO4rN8LV0SpXMMfHDz/f30SwwnnuMfS4CAK8ZBm5hKEYN5FjOCMVasiD7P5nOuI6Pa8UVekjQSOfwtP2wM+kv4wI1H8aQ5+uPT6I3/EjudA8GarwQQnuAsOeQshSb4yMkaaosePh/f5GFuPKADTb1AojdkW2LOPTz0Ah/WAo3FKeTUuqmLoer3bL3EFNaJHtFJ7do2LkGzs5GEmfbnkhQigzsxdG2Z91YsvgWmvC4KTjHVlGATQx0cYLbRCnY3+JW0oEzGZuDUjAt4ikchC+QLavfgsH9DBVyHjALiguJNnDCGns0vWUF5HfRLiAi+XTZu4+EDy0iB19dhJdqDJSPGGZwx8Pb21nObCh821ic+8YlLhp6NRZuxrr62MeacJ3i07Tg0gtWPxjZt8GDhNg96yMHmNpdnvG2BAywa9YFxMcRwK4w+Xj760Y9eB5fgkona12JoiyZ1MjxrOGuLFs8ZrW1zr/0RjujbucCnZ7Vvv741cHTDeqrpDoOaLBi9nFk6pjZ+eY2fNcKt5/LpfmmL3+XthE8WK5sMeXI55daYZPeofgSzbTum9pM289rz1t99QUNtj+YFJ2HUZ6k7Z/LTlqzgdnlun7jniOiivcwRZrfMfY41TjIInMCr5FJ2i0M19/IJR/ye8m0NzzVuXbKD8WPvoNV8wTQ2XPG8OhQfYNZGBLt1+ILLnpozXtT0luzAgaHzJdAa07zpV7D62SM2zx4IXr16ndzQ1/qBKZnWuGglr5X9yibem8Nzaxz/0YdX/Yq6NW39ksNJa3wat2sNvr6FWf6633HhMX9XOqFmK3uJkt6W7GR/+BM2XG284JjOkn2+jd7jW5srOvK7yfOubwncErglcEvgzZQA++6Az6eKuXtZzaYrbLz4BIyX7XyopIFnfx+UL+NLJBW8DO+rIjE8H2Esn1PcUQ33+qTijtr5HLGBxAfatsCRX+O/zKWIm/vfr/gpMPw2GF839eWQuNqZxAsq/go+CRH/oQfYaJBUkVzBE1qdJfN1niWI0AivxA54MObURzYSROiCBy/gfdHbV/9oByNxo4afzJx5zCFeFL+RZYkcOPTD3/z8dV8R8eFoMqbkVX4bbrySgTOHOZR+fkZm+uNPUsgL0b4qMh5eegJWH3jtZCfGLdbUL9YTG3hpaG2M1S5+M0fnGTT0tZr1gD86yd/8xYUbD5I3uaYDFzNveLkTQ//DBaIslCslo+yUIeOij1JVKEgZ4cZRNvc2WZnr8GnvAlewbSP7mcYZuO9hwL1Nq6bMNpUN36ZnYNDr2X2HewbAOIprIxdIq0vOoInRffGUkW+Ogu4OMerufT3z9ttvX3PAG2wJHPTZoPpKOjAUrn7+wniUpGHg+0kdXC64wodOfMAbDcHFz8K7Z1DwAx6deHctX43xB+nQxciV5GL4rAd4xrHxGYfWii5YR59EBqNWOqwE2/Oplh2q9KcT68AaX9+JFz5z7vh4TZZk5yJLa4J3ekJG1oo+9YUVGH10mAH3TM7G+5LFPM23PGlDo4Mfg+lqz6zjXVofyWnbXiers/11sl245Fytb+8fPb8b3vpaq8W//MbPOdc5/qQ1fUrXwXOE1kkQRb7ZFvbEnBIfOavG9RYlWHDRp4Z31zQ6Tno9Bwt367h0L94Cit4I2v/JqvnicWWfXNT9rMr9Oae2pWl1Pth0MrrBB7c2be+NXXoEFQKFghn7wH4QoNobuz7NF587F7i1p/G+fETfOW55b46t4y855V9aG+2+wvJG0X1zP9LNd1v34Lfe9V+5wZM/TOc8d+Uz6aWrv2nAN5QgAkv2gj02qU/DyV2Ql79LPtal9Tvpup9vCdwSuCVwS+DNkQC7LpZxse18Bh/JjvMZ4mpxOd8rySCBwOaLMRUJEc9f/vKXr+SLfjj5hz1jgYW7eFRihr/hC+srhpJA6GsdyQH+R1lYtIXLFyz8Vx8PaEcX2vECtv/hChyc4m88oNdZQ1JHrIMmfQp6JHgkLCRlyAGc+AMt+kseFa975v/Ij483Di/GkJ8khiQHODLqT4iYg4zhBY9O5zd4/UJFcqTYwpc7Yh9xprME/owHT659jW3OvjbqSyDnK7j4bnP3dRSa4xM+sjQHeWjv3ExW9MTzO09fE0nagSULMu58SX547GsjfIAt9kE7+ajNrV+cbL2044O+0K2NhxpfLEeG4trnVO7E0PtYLZuyDW+4DSKB0YFng2pKRDHKXhpHQQt4bcAMRkbjrCkdnG3kAvY9QHRoKWljwzmsUGYX5e3gXgKAEbHxPJcksjoPSiAAACAASURBVHHx4mDZ3w5yb36bCu/nXPC6SqIw4OE0Fk4bg4HwVrcDgHZj9DFIDIENbf4OPTYXOnw9gVbw5Iledckg7ebccdGF3saRhXEMivv4aRPncM4abk6lRBV5KNYGP4wRw9ba6F8YsHjscAKu/rN+pJLpQHNufd57Dqd5el6a3Mdza4fHZItPh8QM4sKkT9aAE3nxlDhafoxprlMGJYQ45pJCEp4M7vLYOm7b8nEx9VQWP9gufe69ITr/IHdj362G1/gti7t2+KMxntFeErc+tX1PNtmJ8C/M3u/cy6fxSrCrU6t/0Qve3AU1jTeusYsD7mgMtlofWlrvR3KKtovIp7KwwbMldMAbJAd8Ni+bCibalsZkEG1wJ/Ns0gnfmGhGC33tU+v4hjMYctMOJxvJbi1+9/rBwVedztafDbF3wqetdYj2xrEP7GU87JxgFGOTp8BE0CTAZH8e8R5PkoGC2egga3haV3DJoDHRlwzT1+Tec/0XgU9ln6NbW/q4Nfie843oYA8Em3wjnjeopiuCR/aKbNkgsoKHPVdbM3CCPm2tEzwV4+9yS+CWwC2BWwJvrgTEB/2EKPstMSEh0H+uwcbzBc4IfMN//Md/XDE+v+OlsnhMPOsFZ3En3ymhwHc6e3g5pc95Q2zL7+QD+Q/PCv/kXlKHzy4e4Y/FFRV+CJw2sU7nunyZ8eb17MKn+cVGnRfhwif/JmnhvgQKfpXm6CsdcBIv/Bv69Uu0iO37OZlzlvMX2sUc+sFvP9qMlwwS8/Oz/bLCGDJxdjOfuILci5eKK+AjT3SjyZp4GQjOOLiV4gQ0FsckV3Q5/4EnH3L1RRM5tD54sX7wOGOaD4/Wl2wliMwpHiAPf1rB+H6RUiyDlj6AiK5iPfyZ31r0AhMuV7SGB11oEYfQL/VzKndi6H2sFsUoAKUUjAjlowwUQF2iwgHbhtdfRnMTP+47GPUWlPKlgBkISuYyt5pR6LkDiDn9BEWhzOjSVnIILdpssBIA+2UII2dzSezY+ODx0gaxqeFrbDDgbELFxiiLbEPogxdvjKP5Nslg0zPy6Aqf8eAYcfKFg/y0gWfgk69ncsiYwI0+tT544YObkWgeMjGGA3EAAU+OxrjgAMN45IyShbrDTDXD0OFs1wMuOKwnRxCdGbTqS3hP5XwOf7INbus1arUvHvLZ513TeI7ffkeM7vhI31fmJYf62mznCH+0SAbl/Mrkdwi0vkr63VrqJ6scDJhTNuc8YM51iYazPmUG19l2jokGfxuoPWyvKGTUhe5ojcbF7T5H0t8Z0gYW/3sfDed47drsd7Rw4pysuZOhOjiwXdpaL/DoXvnvOLDRujx1Hx4wnHW0WO/W1rzhFyTRtT4Jz6lGp+QrBywIiZf4uZh5Kq0VvP3sa+lvj2lLlur9I8zBqNv7yRgv2Ul7OlsSXDID59449+d+CT69iI9klgz3Cy59iycZxLN+c8Xv8tG4DSpb8+YO3+JN9ovbfGCsj3Z1utw6rGzdg299Fnbvo2dljZ/0wP3aBu35Tfd4E9QJ8viT+Bd0skVg4E4W9gVdY6/oJD+gbfdq/Nz1LYFbArcEbgm8ORLgDxzmJYbEBWx7vk/Cp79rww9IQID5y7/8yysR5Lzyta997fqC3XiHeT5AEsSXInyFNj4Fzs4OfISxfErJhnyhMQpfI8Gh5MuSmmd0i/k79/BV7s3ji25nG+cwdJgf3nDjAS3mhAc+PrAkSz5Oe74VDjETP2cecinpAp6/AwMez31dZIw2MMZJrPWzJz6WrMgMTeDw0Rc+Elja8BAsvM5Z5hAPulf4XOc8+IpPJGvQKR503jAHeeO1RB9ZgSkRiBZ0Wt8SbsWvZEYmxnbmE1vBgT7rkRz7IAPf6HAugdf4/WiADPsyqLjLGHR4bn2MLQFERoq1xpv6uZU7MfQ+V4wSUgpKWMDpmYLZLBkLbfoppQ1xXjYXpcqI2BwubV0UsUB+A1pza3fgoXz6GDW0uRiJkkEOOg79JYlKBOj/vvkD1BnIkiPGMWLxaMMbK0ljc8UPPM2rthl/9Vd/9YLVhzb0tuGD9cwQodtmZgTMBa9xJWps6BdPb/rJtqAfDuPNET/4K9mlTdIHDyUyyMu4+Is+/XhDQ07IBkeDtWQMzU0+npN3PDFo2s0NR19bMYjmr5zrGE61S0l31PQnh1G7uTM2tTUuQxW+4Dw3d7R7Joe+Gksu2l3kHEy6hDcySUbGB4uW5cG9v1tj7aKv/ura40nd/PhO3uB2zOvur4meSrJTr4x2XLDBn7Js7NLcf1+6cg5u55JwFJCc5aRHP/w7BxhfItkD7YU9HJ84oiX6H+E7aQSTnLdOX5KJNQg/GtCkRld93rBodwCv7yMf+chbX/3qV1/tGXOY8xH+8CSraBMM2E/n2Phc+NWb7tVwNy8+4lVgVwK+QKU1kMgsaKPf9ji7ZA/YI+GAH061K7qz0caAbx8ZF1xrVB3N4AUx5go+/MvPoz9A3bxwtGZwkJd5sptkYp3Cqx+8K1lFZzD6thijb3VudTm+GhN8NPT3x8Dt/NFmbQTe/IKLDXWxPdaGb1z7z2cKSn2Blq/pk3nroA1u49xHD9z4uMstgVsCtwRuCbxZEnD2cIkF+Iz+1y1tYmy+wAvXj33sY5cd5zP6qkUMxU+CY/c98+fOQfyL2HxjLF+Z8BHiYX8KQ4zgixI+gj+uFCeA40s8+/pIosL5D15JgnxpPgsuSQhnDvTwwe7z0SVOij3EHfl8/qpYsK90nJvy7fwz2vFlbhfa+D5+UX9nNX7ROF8QG9NLFvw7j2pTow8PZO0sBA9ZaxMjkSt/i29xPvrx3llXUkhixVkQT4q+Pg548XSeIzdnNOcr/DhXKNZQEik5oamf0WnDj2fjnL2sK34947O4hL7Aj1d8iBPQDR9Ybej0XIzTupF3a4Rmcir287KfXlgLcS8a4BCjFM9EQ/rySoGewc2dGHqfi0RBbNwWX+3QbCMLWAtaKSzD0OGD8qVwbVQbx4Zy2YBdvdkEv3jbBAylzWDzqfdCm/5NmDBuJVLKitvMJWC0ue/wz6jYhNpKvuhjEG0G9yUTCuDRbFw4zbfyCF57hy5GDM5+RoZO+KLDH3Q2f4Yy2WuDQ13yx71xvjwwV4eJeItvdNrMnm16hoiRQLeLvM0TvQxWOBiDDEKHCmvQF1J9AcUgLK3hwIergx+41SMGZo1LhkUdbG3ggqVbDJmSAdsarf0PYcbAQWfIsDE9k2NJwGTEMJIbmWpb+ZCT50p0wZ2M8IW++nbroau/bZLc4MPTHkrj29i9X1zJUtveP3p+3bh3a28dqvGF1uY65wR38tFageVkycjl2X7vzQl5ucCnF+HnjDla9iL7Et3JMBqNgUPtkkw2hqOk/9Y9GlqvYK1L8+T0BWAlsNXZtF0re4qzh1tgQEeao7VojuTjGUx746xb82Sg1iZIEPDENxzulxdzhg9N5Nr+CE+yIVt9a1OzY8kq/YzmhcVvtk+7sSvj1qU6nsMRvOdojheyCj4aFz47mUzTnxLU6V5rFu9oce+nl/u/3SVL8gPT/K1Ba1J/ck+20SZAW37T1XQbPfQrfHyMZz5IHR+CdvfsPJ7yE2yNII8+5kNry+bmx+wv9JmrJFz03PUtgVsCtwRuCXxvJcC/iMkdwtlzX1ez49rU7HyJIG3//M//fP06QfJFnzY+pEQI/9KfrDC+WEZ7CSJ+pS+F+l+y+Ap+Y2MXPsuXq2gEB0d/WJnPhg88/+M8xEehFR/OOuJoCRXJogo8/c2cfn4FFx+V70MbfGJGvs6Zif+CCz/x5Fm8KCGijTzikT91FiEf5xYvO+EXryklZ8wjzocf/dYB3eJFZwOJGzD4kWQxB1+PRjiSq/H40G49fIgAvvHmBCtmcoaCx8tG5w30tdbkCRf6XWRPvu7hVcSb8YsWhQzggBsP1g4tZIxvcvLTdbyBK/4wlznRqzZezGAO8ab7Yij35AtXcfo1+TMtd2Lof7FwlL2g1gaiUB0SbJyUBIx2ykuR28AUsSCWstnkLobIxYhQespHMSm6hEcBt02jvcOEw3VFO2NWkgV9JTZsOPMKqm3ykis2OBh0So64t1EKphkSm9e8GQd8ohWdjG7jzG8s3F3orw1O/GuzWRlFOF1wMLIlhMDiEWwHIXhceChRgS+wxqMxvhg1Ro889YNPNuQjkVO7sdHRz8isFxwuPJKdyxx7CILfnCWXwGcgyadDjDXsgJQRsm7pT20d2OhPbQtvjLcg+iuNIf++WNmx2ntOp1pjskBjNf4YQP3xlD7Fm/XXRn70j5PJMDK62s3XYXJ5Q6sr3vWh6byCicdTBidOcOHesTvXyuuV8F6O2+doayz9c9+amNvVfNqD2T48KWQTfPfLT7izKz2Hn3xLKNuPbAM8ZB2tfQrbXjlpjD802VfqYNWeJQZ6+yV4yQHmSE/nF31Lr3kFL9mYcKt3zfCajUyXl6bsW7TqM8/W8MXPwoev9QoGH+67yGRxwpdMyJMt3OSLezJY/AIItsK+offZKzYzGxbv5jW+NYuO/s5Qc1Uvna0fXF3eguY72NUCydZFUApGwGgtWwt4leQTPfpPGYXLmNY5eM/JLLmh3SUJmWxb91PnzRVd9Qm8C8BaU30utjldNB+7azz5s7VkQB4CQPYpX81Go79AMrlan2SRfO/6lsAtgVsCtwS+NxLgk5wtxB/iTf6ys5DEAvvPvzjg+1kZ2y4+8gJCTO8gLyngLCGG7cuhTQLBmR8Pf/EIv6Lkc9R8RLFKePI5ElBistrDzeeKCZxP8CMm8nIDL8UPcIMH68xQ+8YY7vEsntqYQht82vg1fo8/Q4czFF/JP5IFf+qeP/SFkHk//vGPX/5SjIMmsb528OKREl58KR6MJV/+2RdEZAvOWpgLXn2SRmhBL7xoAg+fywcRfm5mbAkyNFhbMiKDF09fFfl6yYU/a4gvyT+yJiuxhUIOziCSPJ2b0arAhU5xLZzoRAv6jBPjwUPG6uIjNKPT2Rh/nvGSrrTGjas2Z3RdBDyzcieG/pcLRtkKhN2fRqfgNYNGYSkPuDKojIaNQnnBgWHkOpgxPPrMY5Opuyi+RAODBU8KrZ0BKGniUAAnw7QJAPiMB98B3+YsEaJmbBgK94wfnBICn/rUp6624BkUdKDBhg2fdnDmQmOZZfdgGEP9n/nMZ14laMiygN6GZCjMg0598QWmQ1EJHzXcHARayKGDnY3tMi76zO0eTJ9VkpN2c7ngYFTB4ZE80NLau5fc86WBv/vC0JI3WhpPJtGUnFI/+NaQdN+BS79y1r4K2wK+sdG5Y9y76iPb6Esm1osRZizJsYQWeWij03SSvOgBnuAjA/dKX8Foo9M7p/5ojDZtcCQXe4RsM7TJ4dE4Y8MT/1tLnu3PusJh3P+kGOe3zf038vGE5qXrxJ/M7eF1ILWvbOCquCdrpb/T1JufHFNzcYic5+pk8mytw5tszJv+Zk84VevK6VpjdspcaHc1Fi73p9xfEf90E37z07PmiO9whDNewHPE3ijt/mo8fWqsOj7x7vIMVp3+uVfsF0mS8GaXPReMJS9tfQ5dkqOaXOBOJtEQjfG7fEcXGOt6rr9giw0xBu7q+Iq+5J6cJNXD3fpbs12b6FsawKIDXu3JP31EJzyNzQddgnxZjEk/NBmTTYGfXoa/MfAbB++WdCyZwuuihysPa4bufK3n7vkSAWnrD7+56BMYe4S9o+N8bnqg1p7Mvo2w++GWwC2BWwK3BD5UCYhBxOPsMr/o5a5kjyIWZc/7VYPnEgfibIkSPsPZQBLAOH6E3yimOm19cQj/kw8UE7svIQBG//4JEHSEOx8mmSDZ0U/R+Ch0SWz4MwPOEuKb9Tlwow0seuHdpJiEibNh84m9i22/8IUvXHJxPhMj9msS5xY4+TexOZmgwVg0mx8+z/w2n0jWzj+ewcBJtubHF5z8srH8KlgxiAuPzgh4wJ+2XnbhxVlC4afBWVP35nLWQJs4yLh4JVMvl2ozvp+R5fvx985Tski88eJlMgldxUP4sIZox4N7516Jol5coSOZiE2MRWMvWq0JHpztyKQ4uTUnk2Koi8lnXO7E0AeweJSSwhVkZnzUBc0ZHYrnolwUivJJvPSTizNwh9dGN969DdTVJ24UuuAdDf0BarhLgmxChEK7KLxNa0MWLEveZGQLzD2D12dzfvazn71okUAwjvEsUaNNQiv4kgcMTpumr0xKRMhYozs8cHWhD+3hM5+5berGb20cZ8LggG1tjHE13j28DBH+4PPc1w0rL87HGnQ4aZ0zHp5dYH7gB37gwpM80N345udo4FJ3SCKbnNGqZI7odQYn51YdfM8dvpoL7u7pEZrSB3Igb/rpnk7ig/HHG0MLlrzjiw5mfMNNbzK61sEbBPJJR0/+et6fk60+t3fC/5227Mpqx36ncfWf4096Wyd8VjrMqtEOh0+fJaRa49bCmObYtp3HmAKUpatxW4NTyNo62V/WpHUh+9WP8KUH6S9atHlWonttmPbmrn/bkrc6ObX2S09zNRb8znPOGa3pRbLRTm96GyZ4iXd1Nra1Mg966L42NRj38ZMMyFMAAB4u+9h+MMazMS444XAFq39tdffJOtnEh/0iYMGfsQVTwSfD5Epugj+JvOYMZmF3HeBefO6jA5z+1Tl0KGo2Lfm0xmDzcfm8xdfaaqss/a8aX94sXriTa3JuLsGgPyr6yU9+8gpquwRubJOgMR1g78ELMtksvim64OWHjQFvPvCv25MnvffzLYFbArcEbgl8dyTAZpcY4Q8VvoQfUEv6sOEd9MWtkkBe/DjbsOPsvXb+RGHjT5+af1J3GdtLYuerfF+xgnnzefxPCYr8jLnM2xcy+p2z+upGXIFGsXF+1pjgzM8vicPNYw5JIC+q8tsSKD/6oz96xX3vPCVFwHgZjVZ+TL95JLG0Get8VpxhfucdY+EkY32NRRtZog9d4h/0WBMxprnMiUeJI7EReHicaSX2nC/0GwuHOEfCqDgjn4u+lQMc4BobfeQHj/HBeCYjcgDvnOKC03qIvdIF48yDdkkheMlVLIWH4hMwZNPai396WQoHGbnc45mslUe6VNsF8EzKnRj6gBaqYLwAM0O06ClI2V4Bqc1TwsFmsxGMZxBs4LLcFN9YczCGcGuj3Guo3LuMtxFsiA79Ni2cnjv89wUPpbeBBdR9VQS2MeAYApv605/+9HWvj5EokSRTbHOiyxz69ZkLr8ZobwwjiVYGA582WQkHm8wYNMGjXZsrox6s/uYjmxJAeAdLnm3iTfYYJylkLLwlq9CF95xCSS7yhMtVMgUvxhnDoFhLPFtjRtez8WiNlnB0UITX2nbt4eg8GAVjjGtLsOoOq+DDb/4MZDTEC4OKxhI/ZEIGPkPFKx7THXLrt836FmcGUNsafHKnG+ZDD1rIqD0CtlJ/uqxe3s7t+jqja1wySqbGPoLf+d/tfvsWVziXzuiPXjCPxmtDp/p1MOdcwS1fK7/0o71SffLOGdqvEoD01TqBTWcWf/evq5em1rWvmFb37ff0YNendRak0EMOnc68eHr7wy4q8JjHtTJLZ6pXv5OLujWh3+fcJR+C78siuOgzG9J9NfpOO2N/ZGeMiV9jWofWJ16Wp2DgjY/lDw/x3324g28N0zdwpy7smOSycvUWrfZqslkd9hVbOkV+rU+6HF+NC3bXpHtjzJPt2ucNwsm8taK7BeXuBeJssVq7oJiNdo+f1oXdAqMW7KFbWXk/ovFuuyVwS+CWwC2B774E2HhJHvUP/dAPXXE6/8FHiEXZb3GCQz0/K07wAvMb3/jGq59ZOVPxC/kj9h0+OEqarA82vkRAMYt+Mbx2uCROPMPNv4hTzO/e2Umsgm4xtWc+xjh+Cy60SLhIQqjFB8bw9/m//Ju4zJdPXqqDc/biz+CFE71e1Den8fHqbCMWMRbfzpp8oyQRuM6ReMFrCTjPaEGvM5h5OqORgXOeRIjzEXxwk4F5nXfgRRNe8WGNnAEldNCJLnjNB86YEi7WBIy1VeB2JiUnPtyztUcT3OYgD/dkonaOQXs04cXYzino6XyMVzEBvH7pQZ5wkgfezI1O8OgPni6i03NrVoxmfLFlu0Tbcyt3YugDXLGU6t1QUtAOE2oKTfkYF7VNQ+FsFrV+bRsQp4yUN2OT8mUYUtSSKpuQsplsCIajr3U8SwCU6IA7I+QTRJvBpR0sA4Aul81mw5bIaRx+bChjGDk1Q8AAGCOJUtKpL3zgLklU0odh6W+VkJn+5uo3s55dGfTkDDY86EInQ1dmXx8ayMnXL2iOVwYDTs8MLGOABzW+4DC/dZLcwiPYH/zBH7zwueA3p4M3WPKNfok2pbXr7wVZww5G1cEF27rXn8NrbLqIhw6FDupKhktNTslFTRaMNx37+te/fhl1PFgfa0Ye6DeuhFJyUjc/esiCkY9fBh0tHYxzZJ7d157uwgFeX/2r5+c+M/dZamuc/m3r/tHYcJ19cO06BBeN8bXjlm7trZ+xjXsdXY1t3ZbmvU9+uwda//rI9OTLfrRWp/zhic7mQaviDxQLEPQbF5z+5Y3OC5okEaIrfWycNzfsHF1h99g6upIc04fGqQsMzOWqj06yMXC5wgPG79nhFtT4sgh+eMCQ0ymX6NVn/5IR2yHIQi+bml32xZJnMPaLfSKIsk+MN6d2OJNztFdr3/3qvvErZ3Dp0Mqke+OCCc7ef+cp6SaAxvu5BtEAfvEnk9ZUrYCDI91Fp2Ju+51+VKLBc+O7P3XZM5ytjWdrlJzV6cf6NLTEQ/bcmtEBttkXRnQDHHwFmmw4O83v0JvsVvy8YuK+uSVwS+CWwC2BD0UC7DI/qy4OMDH73E96+HP+ho1n6/0vqOJVPlqcDZb/cBWH5O8lG/hwpTikOCMGJVHAwc+n8B3RVPzBT8Df1ytgxFP5Oc/8lRItnp0f0OxsIEmEJ/39XEv94slXSwaBMe/GR8b867/+68Uj+fC55hRniLfiH0/m8Td/yMr5xljnGLGKPl/ZONOY3znLOUA8A5fzXckYsPCSW35aLa5xNgCvH/8lgiTJJF3MiR/JIL6XXMwPf4knvJhLvwsteIHPOjiD9VVQa+sZb0rnMXEOPM5XYtRkRz7m7UxiDJ+Pfn1g0dKvW/DSObP4wjrhVcxYbEGuYNFobVt7NfnD/9zKnRj6HqyYTWxDKRTcJu1Q0CFaoCpgtXFskgLwgt+MTBsUnv6noYJnYxguimkTdtk0Di02jixwSQz9FNxm9hOY4BmKcNj84QRro5REYjxtGgbAHBkoGX+wJYHUcOAVnwyOtg5V5nIxDL5aAWu8DWijOuTYqO5LJKmN1ybzC5/7EjslstAHrqQNOegzh3a8RmtJEnNJ+oDB74/8yI+8SgRpNzd6rROjyaHgn5Hx07ISKfC6t07WG21w55gYwErr2wEpQ/g6dU2fqtMX8OkM+s1Hjh3gyAGt+ObcGDzPtUkcgtHfAUrN0ZIPflz9raOMor/DAw495m1O9fK8RhStZON/JEA/2PRbX7zFk+d401YAkeyq9flZVz+xbM7Fd+L3vGVh4cHfzt34R3MvbmOWLn0nPT03/85z9sEV3w7jdI98CzqSOx1Lz4zhrDlS9sU6wqGUCNw1it6Vb3QvPctHY5pXYEDfW3/4OXR6xXHbO3RvZdNYsHvVDjbZaAvGHGyg5wI4sOyGYDLdopclCNK15leHj0zs3fRY+84B9sWTzTMn3GTKliyvj/YAPDtffLGxO741JB8wxmwtOBKINV941gZ0H0/JqzpZek4Xuk9nT907kz/p6+r03ocH3i3JIP1SZ+/cbzJI+yaG6DC+4xl/ZEHflNrNzc4LBI3nm7QVJLamfJl+OE9+v43o++GWwC2BWwK3BL4rEmDX2eFi5PyBeEGbF0jiBy9l+Us/qVa7jHWG8TKq+ELig+1n5yUOxK8SAGJU+PKDzhwSJfwOPF38N78Oh5gBjHs+2Rx9TeKlMFi+Q/wPj6+MlHgozjHO2aH4y/+qxu94Ee/85axifrTxkeLsvjTip/pKJn+NZ3EKuhR0eMnrqyMFb2Ktki/4R0NfzUoaOd8ozlDuXfiQ0OmPQhtPFs4r2uF1DupLKDJGk7Uw3gt3cSm5oY8cyMsaWEPrYQ68i2X6Ogt/4lj+HC4+G438OBnCjVd09CUXfw6HEj4vHnspbi60K/hwBgZnHegImTh/4ImMzYVW8Zg5yMqakCu9SRb4LX6x9tYMXmt2xjvX5G94uRND36MFolQUiYLbKDZxSQabh4Lr67CBTMahn2zZZGBcFBWsDdU93BS0hAnjwwhSaJvB/BS9BMnnPve5iwYJjpICjIZx6GGkjLPBSybYODaqpMAmE0oU/eRP/uS1MeCxseDpyyEwNh+6GRz9jJ5EhA2FFpd2ONDbH1TLOaCjizGwgdus+GZ4GCn30Ycf4xkABksfA5OcHALJnHxd4F3o1SdxgQdGRM1odVDR5j7ngH7zosllPm1w4Qmf6OvA1BpbZzrRISj8OYBHKmtM48F1AFTn9MydbEvmJOPWWm2dyAcexp6MGUi61brgCz9od518WCv0Gx+fYKLBvX7FPGhMZ/V1WATjWcng7oEtOLXxOd/wgm2PMfiV4HsOt7n8gemztEbJOb4b19rsOsCxf/g6HNEPtrZ4P2FOOhqzfLhPDnSe47OGu49aA29F2Ax7JR0ns+W/e2uytMZbcwcXzOscYPoXDerV/dfNYa+xUSU+wpNOx7PxYDl9fS7420O7NstndNDJ7Gx6oTYfvbevw5lMo8Ucq7vuTz4bs+3xED3me/GUXLIm7Ir9yd7Yj+iLL7y0Durw1L9163quZ7zEg/5g2pPxpNa3c1r/2rZOD+vvGUz38MFVW7rUmF0rtEhg8jvWx3N72bOATwCrrYv82ClBZWuRzaLzuxZkzI7xYSsTP0x6pAAAIABJREFUuNHIJ8Bxl1sCtwRuCdwS+PAkwFaLO0vGsM9sPHusTWJGv1jb1y2SA2w2GH3ODe75AnZcPCuJ4d54Pp2v6Ct4+I138UF7tjLOmM5W+RpJFn9IWnzCN+kXY6JFAsHZgG/J5+VD82POa//4j/94JbHA8HPOd2jS5yzGR3kBgx9FG9xinXwTH+hM4eJXSx7pd3Yxn1iCr/OsH37nLrTnB8UdzmToAaf0ghH/cJQIUjsL6s+XlyDiP/Fu3v6GqzjKfGgt8cZHgxWz6svHg4FbMS+ZWC9zgRHDaiNb7daqs6Q5yVncCL8+62Hd9aUn4gB9+MQ/XYLbOPR4gUgO1ldtrdM7csQPvulQdLe+5A5Gf3HIxcwzKv9fJ4YsaItaQGrtCq4L5L9b60lZKRNFSskoKMPT52+MDQVGp+woWPfaHf4oa8E5OhkMBqaDpoSEjaY2n8umU9tojI6NIRtrbgcRBqYEykc+8pHLEDFMYMiG0v/0T//0ZaTAey6h0Fh4bXZ96LSB0WpjMnwMvzng1e6rIrxFJ1o2oWItJHAE+bWX4NoxxsHJ4DF0femEPkaKLGXkg9MPr4MlPGjNgGr3bC3Mz7lIdMCrrXk9m1P/j/3Yj118JWv1i6cDX/STtct8GdStfYmyepkuks0eCq013Wls/bWDNWcXuZlXbW7tZIJ/9Hb1lkMfg0hmxpAJXqKf7I2toIXhRUcGMtqtufWHR3804TP9R68+BX3aHQrNF9++1IlfCZfmbA+bH2wXXLuvwbu2DY3mVSvw7366Gp9KPPUcnNr4U/7g9C3NjY2O+K1951mdAO+NRz/dqu8RP/qSb/JXe6NGZ3OWySo+Frbx8dV8S3/jaosX7ckXfZ7DY46SHOZY3V6eVg7GxtMj+OhoDZeP7hcf3ZJQYB/YJ/Rw7sGiPR7YCXJDK7jzKyx6Z158xmN0NAZee0q9bclCmzmth8ueYlfaZ61FdXssntAomEmeWxckJVtzun/xZI/wHk27ztqUaFLzR+0b41vX9kTyMq712L4L4Xso5jIuWaxctKGDzK0X+Qgw8V+7Wp+g1/qy9SWK+E7+i43ObrPZ+juEkD1fxDdEQ3r9Hsi/QW4J3BK4JXBL4H8pAXafHS5e8eySjCgedM4Qg37rW9+6bH0JCDaefRdzit1L4uQLiq2RKJ5VwIPLj+Vr+AaX52jgn/wdI2civsFLxPUz8PGt4NCIZr5HMkZMrd1cX/ziFy+/Ix6RiNCHZ3PxwWINsPyV2Fvyyzh0dIYChx/9YPWJNfg1stOPTnLg08QG6MC3pIo+55nOVPCiw3mGPJ0x+USXcebx5Y6kjzkkipwb8C9RYx7z95WQuAR94NEGnm82jzOotpI+Ejvm8wWRucyDNjDanUU8o1n84pxrvfDR+pA7vsmJ/HbN4O0LK3IxhjzQ3wtu66mNjuHDnObAX2vjvg8DSmw1vzVwkSd5K+Yqproankn5P58YoiQWk3LZoBbTZaHb9AXGlIKRsKhtiAJ1i0tJ1BRulUH7Hgje69ob14aljBkiNHtGs0t7G9+80a+u3QY0ruw1ntFpLGNpU9vMLpvLxsOv36mqKTODYTPAgW+wNoGxLj+LEoiTEYPHELgYQLWEjw0OvznxwND432NsQDAlg+Dz/MM//MOvPkUsYFf3dQ26wHVQknCyBmiOn9YKTkaHjPqyCczP/MzPXHLedY0nRojBytjFD4NlTckXzeAlN8hNG/48G6fuC5sSbOjPyMDZPfrhxU+FHlTIV0IkXUuvyMG8SoeVxtVuvVdPjDFPcoVbG5pbb2tNRzJ+eG09OSyGnAzh8EwfFXDkbL4SM+hBQyWjiPeu9Nyz/uiNl/YSPJIh9XteOZVwkUhrncB0b1z0RU9y8rz3npv3bH/FzMsbcGCWTnyYV1t7qf+NLPjWbPG9bq6dY+WpPRns/cmfvvYIegQVHGgyRwvZVOiF9bfu9lLrFl/RftKbDPQvf+C6yAFun0kHpz7niC807VqfsPG1c5rrhMMrvVSHL5qCTSfthXTY/PGpXjmCc9UWD+GPpvqDVZtLsFJA1ifp5rAWklVoYIPttfZuc6rJEe6lz3061zxg0o+VqyCULkSfeRTjw4mWfFtBtT48quFTu/DbuL1Pr9TBd19f7YtPn3m6lpYC+q3rZ7+iNT+oLR+KT7Ij8xI/fCd/Rd/Jjf1GSz7QeHZbG/nc5ZbALYFbArcEvvsSYKM7n60vEL9Lpkg8dMjv52Nsu69vJDT6sgWO0y/wDwpfrF+ygT/Unl9rzn3mP1xic/3mQ4s2c0g8wKGPX3I+ym929kK3+cz74im54azirCSmdt4wTjKFD8rf8k9KXx5JWIi9jQHrpb9kiDn4YGPxoxgLlozQ654v5BfBo9F4PHTOI2PwxvKBYIzhK9EPFq0K/4gecSO54wVeZ0h9ffnjTOgcQR7kTgbWSCIHLHxk51xCJl74wBss34yWkkXwOLeQUcki441x7nNPFtaFbMjV2EqyJQcwaCY3cQB/DxbPZJa8/+3f/u2i05qTjXHmKQZRe964pnhQ/dzKs08MURJGglJaNArUBqUAWwqIbUp9lNnCW2gbwm8xjQVnodv42uAti1tQ3AGDEsGRAXCQho+SnMW8qyhgKKg2dYbFODScwbk2m9NGLcnlvovBQC8a8YEOdKll2M1ns8PjQKg2pyt4RsJmd5XJ9t8i4tMndjYRnAyNv7dj8+FZm7HaHdwlErTZ8NoZFjjJUkImY4UeOPupmA1qHHl0UJJo8dwbAfd4Mi8jgWeyKsFjc0s6ZQThw0trpB/v6DEGLnrR4YFhK3uObrSRra820GsM+aUb8JAtvsjR2GhHg/H6zZ/xWP20Xtpd+FH8RjrnoiZTdaX7vp7xvP0ONJtgyzFYR0kXNTpLhpUUyllxPIyyn/dV0Ncf8Y13bQp5xptnPIMhLzLZC23ts/g2xj5ILuGqP9722T08K7+l4bw3R7Dq9mOy89zfI1pZwrOlv1e0+NxH2wkfXDjMA+a0BTtH/dHZ89bLn7ErY/bK/igBoi9ntfNEd2Nbm+CXl2SSDE964zM4iQh7ih6SK1xsrjkEB9nJR3JYPtGydLlPhsmHfTOfvux4NjrY7Jx2doYtSj6eV//wYg+7euMXbHir0UD/2ZCCTfIvWcMGuF8+jG3PsCVsS/iNJRsyag2NRz+Y5jM+ODa28bt2u37JGZ3WhU3jT5Jh6ycAri29SIbpi374Wut0wfPOufCrL41Lh+HHz3lZS/PkC+vX7vLcfQGbRJsg03NBPd+hjb9KTubGH1lYZz6VXUQb3WjNTn6Wj/v+lsAtgVsCtwQ+GAmw5+y285c6X+eMxU6z53yshINzDzvt4C554vDu+fQhnvNLG1fwK/uMA8/Zey99xcf8i5i/r2T6giR4Z52+guHH0KBGv58j8T3ian8zVfyDbskSpRjC18ng0WqsC6z5Fe3OqPlNP2dynqgf7+bJX4s5+PnOZpIwYno+rZf/+ANvHrEAfOQM1j06S7SJSUoUmYu8ze2M4PyjNj/68Gpea9RPzqyZeVzwlOgha/LUTsb4QqdnMGRibOtrjs764rJ3nr4ecpa3Rp5XfuZOvnTHpeCZfPl65x7xlza894satODT+Q9v5CNpVVJSvNbHGa1JOtac4oxirmviZ1KeZWKoxIjFKuNIMdzro2gb7BeUa7MpXB3UC7qNcVjWB57CCBQpSokeGU5fzYCr3eahSODNTQkoLTyUVKGASsqir2RSuM1Zodg2lgAWLs/gwGR0KCAcnh3uzV1QDA9+Gm+jZWhsjIJpSp5RjCa4bBTX22+/fc3h0p+S++rFM5pK9jAMjITrZ3/2Zy/5+Jojw2UDMTD4aQ59v/Vbv3VtSLw52BjXYcyBh8FFez/pIHtyRAu6wJTAgo8x6edxDG+49JEDGdngxhjL2BmDF+uIH3S4GCs0k5m1JxO/ac5AgcWvGk2/8Ru/cRkOPGSUOQgFHFm3DhkSfcZmUDy7B4feHJw1I3OwrpyX2mGnsvjJCI/JiSw4CrQxtNrJhfED22GSbPD0Uz/1UxdM/DUHvbYeeGsMmtMrNMWHezD9/tj96tIj3o1d/pJVbYubPvSF1crllUBe4lrDvX3JOjr0tV/Nt+uy857469v2c56ed+0f3WtDj7I0xP/Zt7xJWLEFdNY6WtcSROlOdXzGo3bwnDG9sVat78kfWsheImbpAWc96EZ22r2rQIb+rR6YE76ly7PCYdM1YwUEdMcejjY8uI8+uOyZbGUBYcECWLZfMGEv2K/xnw327KLP5EDfyTK9NYd7PK1M4W7fgmcHGqOvcbvXWjv0JeuVjfHWgz/Yt6jGSRyjAY3xvLhXX9CVjOFfeeNBf3Xjdnx8ppvJjNzBnbp0LcjLon/ha68tGURja1XdWmQLq63PX//1X19+Ktujph+udNC6kh06/PSAvWPXVm/FD+CsmYIWOMCQV7rx31zdd7cEbgncErgl8EFLgH13XuD32GEXXyduF8NKKojZ2Wz+Tp9ntprNNz6fou7S7z5fzB94YeVFp758ohpO8QsaFP7cvTMVfwuPuKq4Pv+4vulf/uVfLj+DTrC+oHF24KPEH2jlV6ILnCLeAQu3ec3lHCJmJ4OSR/rwoqDNXM5XLucXeMX3ZNZLuM6VxVK9eHK2KRFivHHOaWIf/IIzprOSudHrDG6sGr0vnmIq9CvoRVe0uw9HySH4ydmLHP18s2SVMw359D+FgXeeI3/z6qMXEkjOFp7JCTwY+qMfvtWB9MIaK8UIeHV13rMuzj3FEeBc1i6d7L41hzs9gN96kA1+nlt5VokhCiuDSTEoisvCSwjZcBaScu8CbbDtvsuB4Bd+4ReuRW4RS/ZUSxa4PNso7h1QbAABe4cPdKDNBrSJwLrA/fqv//oFVzscbTq1ja7NWPcuuPoayLON4wBG0cyrppQpKpmA8xx8xoPSwl1pQ1RTZMYVjYyGv5HjvjbzgHXQJCuy9QfW0JzhKuPq97ZkZZOrP/GJT1z0urfp9qsh9P3yL//yhQs/JTBsTDT520a7yaLT+oJ3JUtyZGDMr72/HYJvXxHgJWNZUkr9qU996ppXAgsu+kMGsun0CV8MGSNIDh1S+2LIhkevgyZZMIhvPx1SyMuF5q3JjhPKUO0XQOD0KR2IyFsBn+zD3fqFq/7g1emd5JqDEdmgWTvZ4Ifu4BuP1WRlvfEeXPNwmGRBfmDQgd72mXsl2lvD9h192Lb4UJtj+Ut+yTDc1wRTwO183Z9w+xw/2pKvcXBVoqXn6Nvnxblron15auzrcJywJ1wyWJk0x9LbuORN90sWJPtzXM+tYXstHJ7dW/flwzjtHaD7b+fZLjYLfE4TbHjQgxZjo682MPTTPI21RxXtdNh84Cu7fmxNa2g920tgk7H7eOvnkdGbvYPTfhSg9MUbmtl6c0S72jzq5sg+oTc+jXPfWiQLY9FlvJ96ocNetb+Sycpo1wC8JFy8LE3um6M181zbjtEWHe7BB7djyFLfKUvPjdMfzAX4VJp/27s/YbMlanyrrYMkZPqQnyM3dLP366vJzqf9cOdX2DHrwld4cQCX8dZIACugFJjCE89sIhtnLFq03+WWwC2BWwK3BL47EmCX2Vy+0lkmnyNOVSQC+lqFn/za1752+c2+XsmfiOH5bSX/mv3m+7QpxfB8ui/v+YDmEAfzLcZp5wPE+Xy0M6izCL+urSI2EUfD5UxinDMjXL005ofwgzf+y6WPb3cvsRFObfklZxP8oQOM80k+EHz+rBe55heL4Y0P4xfdkyvZ6cM/fBJX6OIv+Ve89YUQn4gvz9r5TPfGkkNnJfShQ1KLDNFdcgi880S1M7ux+HMulKBzxlUkmayd+sVToklcaQ3Imsz4an1k6vwlydaLs35illydL/GJR/IxHp/ObGInZyDyKF7qCyq04VkfOeLLl190k4zgLx45Y5h0obj1lXI8o5tnkRiyGH0ZpCZwm9OCMCCeKRhloYwWjSJtYF2wvkH7X/3VX11fAFEKeLZv71Mq8/3DP/zDW7/7u797jTFvAalNV0LIPWW0cf/kT/7krR//8R+/NpNLsfH0UyzKh5eSN36apa1DFWWk0OApv40NL/g2cXymoGgnA7QJbhV9ZGITU3QbjlzNjR60kEHGC25zkSl8fX2DBzj0+6kWmC6JEfe+qjG3jVciBh0u64PHjJA5Xf4WSYYbPwxywXyy66CldmBzqDO2/r6CYYiNxXNJKmtDbvRHP/rrsz7m1MYg4YOxYgTV/nc1Y71t1vbZz372MnDa8AQGLZJqDAhcHS5yQGhxoTsHgHZtGSV0gQ+22loa41LAwN+4xlydTyU9QAfZWAPytjZ4tPbkQ1760GydOSs48aWdnjQ+PrQx4PCYJx2ms8ZGY7So0Wq8/r1qX37c4xfujLs54K0NzpXr4m/va3P/fop5/OyJTm5pTm1gHsn9dTDbvuP/J/TFZ2PCmczCm24ka3pGFzyvLdu5JVDpQ7ZE3bV6Zoz54l1tPnuJ/rRmZ23c0tW+tY84YU6+L2KSrXVnQ+iqe2+G2EFw5kon4C5h2e/Zc9rnvkknVg/xCV6fdgUNkhGCkGCTB9rRBF5bvGa32T77A8yObR1aC/JwKeZPlvVnG6vDBT4918b2CLrYFvYIfLDLk3HpUHObE6z5k3ttjU0+4JLfI/2/GHkq6UYw2ty/rpx9yXPnApOt4bPQrw2MILn12zkFkuweW2ZN+AC8CPYK8OiRtSI//ewdPttTdE0/mbZWr+Pjbr8lcEvglsAtgfcvAfabH3XWKZbs79OIs/vyRRzKrvP37L8xxnahoDhFrfCPzjcKnGy+8ZIBYloxMDzqYnt08CP8BV+Tj+ZnxS1+2tXLeF8IGc+nfPrTn77GmQNN/EpJJ/D5NvT0ooMPK+FiDP614VPJt/FHfBV/JEmGNvGPpIh54hec2B6M85Q+MHhzFiAD8+G3c0BJKDGF5I4YhtzICD489REGvNbEmUr8Bgd5oU1yCG/wlRAy1vnK2ok1jSMLeJyrzCnJQ676yBKNzkWdUY33TDZg4UIfWDIlI2uJDnJBx+pFfh1faFOThWTai6dzkfhArOAMCIcXTK0dvksykUVreC3Oy7L6t+3P7f6NTQwRsA0vMFNTPBvOwlAGmT5t+rS77wsaC2YxBYUWviSPQ4aFN14bGJtLtpLCdFix6CU7KLl7dUmgP/qjP7r+oLHx+sJlXDgpAuXVJ2v5K7/yKxd+m+e8OoirGRrKXBDc388hD7QKWs3BEJDBKmd0gOlLKjSbt2C47KhNbw4XvORiPHgydk+m4NGjrNLDaeMzkIwP2tFq7q9+9auXvPAviPeGVjLIHMkYLps5niTAWic1OGvHcJnfGhvTAQA9fvplntYGz2TD6KiNLXlkbVpXmx3NcDIODDoceGAAGYmf+ImfuHSP0ZQk+MVf/MVrvHHmBcco4v/3f//3L3oZMbQ7ZCcvbw6i2Vq59hDTugav1q99D76elRxTxr9af/jDiZb0mDElb8aSfMiWXDs44wPfeMK3jHqJNusWfrSRAT2JRnjpibVED5j4cbjWn0HvjYP5FXLLoHteGaWfxsObDM5aXxca4Oh55XZN+FS0rdyCqS+4YHqubvzieQRz4tl54ud1cxirDz/NI1HV25NkrVZO3MZm6+i2fQm2q3m3pg/02tq2V63PjjVP9ERD8y8vYFbPvdWzb+C1N+1VuLsezVGbMe7VLvOq0xtz7dcz0U5vGodG7dFqP3D+0QyXvRDO2huPTvaEbPgPQQM7aQ5Xump98Ac+eo3reevGocm8rY05m4/NaUzyaK2Nsb8ESI1NPo1ZGRnHNuEbrq7Vs3D3v7eBwRu8rWkwSwcc5jp1AUxwYM6xnldvgmmcOrxqMkNLa4u2/Iz25cvbZEl/timdsDa9zSSjdCn+BJn0tDXQbp09t48uJu9yS+CWwC2BWwIfqATYe7Y9+88fSCyIW8WozniSOWyxPn4YfD4iX5JPU3cPJ/vO/hf3innFPB362Xr+Ub/YX5JCPFxMzl/og8M499/85jcvuvwCwNnCuQwefhnd/IwClj/pnAanM1bxsrjCWQMvLrTCow2PzlfOkvlQ8VqJDXOKrdEHn5jDPJ7B4AG+EjPwO5s5i6KxpJgxZCHO4TfRBK94DazzAT5KCqnJ0Hhw8OLJ+uRL0WIN4SopVLINrdZVIog8+1mYOZwzwLu3Jp1p8c9Hg8WX55JBnsV28KLF2lsHcszfayeT4tfgS45Zc2fD4mC8Wd8ua1yc9eIpmZSOtRFW59LHD3STfAjI3pjEkMW1eBaEYlKU3awWXL+aQtsoFsoiW6QSEasQFGGDaooqMHRlSCQ3KK7ATzsYCmEDleiBQ7+CLvMqEkr62pzGlaQo+QPWppCxtelKUGQc2sj4sUHRoM1XKvEWrLESDTaMTWszUGoy+/znP39tIHiq9ftv4M1fEoN88CJhQGnJzrX/i9Iqc5uuQDn5GY8OGxzP+CwAh0/GPNzkY3OSj3HWFU/a1PrLzJNrRgs+8u3nEvAtHcbhvYMm+bngRzc+HNQkfPDfWwB4GS9rIhHnJ2jkKMloLDiJN2PwYc2//vWvX4aREXP95m/+5jUvmiSEGGbzoc94tWfyycmRvas1IjPP4Kx5Y9wrfuJmLIeTXl8dLws4l6RapbXTzmiZg0wYb7K2BnQ855ZM8OTruZyC9cB3dMEfH37awXha/9Y9o/vf1H17IhFvcJGzPWzd0OW+QCDa6Xj725zGJVvPyytdkXikI8kSbLiWHvfJdvlJV7Q17nU4gt0xS9PSds69z4/wRJO5k/XJbzS29ief4O1l8qPn1ijZtX9ai53Pfe1w99tz9/S8vuxmNKYf6W48PpKJwEbwQAfpYvYh+3rOEa/GsWHR37j2U3D97SDwAoecv7niFf1031j2VKmNzBR9lfgjD3slWqMFLJjWS+LFnrCfCiyy6dmLcLSG5k/fyMCV36quXR18857re8pz6UPn4nKPFzhdrdvC6Y/P1p98VgfA97z6u3A7Nvmmv4/0BczZ7jk61eiyni5yJ2Pr2KWNPNTiBrog4c1er/7RFfjg6QWC/VMbvMaS111uCdwSuCVwS+CDl0Dxaj6YjdbWS1fxdC87vbxgk/Mr+VM1v5Lv7p7vYN+z6fmMYiXxqNjW2UAswYcXE8CVv5EUQpO+v/mbv7niWH/6wvm0MwifI0Y2xlUyig/KX+Exf+ZeAkhBb1/u4FViQiwudnJGabyYnnzE875sRqNkCfo7gzib8V0lkfQ726JP0kU/Pj1LvIjHtcFN1mIYtbMQftCLDmM9G6sGbw58gik5hA/nLDjEcGDdk0PPzhzwq/FWEsgZxFzOCnh0XgQDt3gMHNmQuX4ydObVr+TznTmUYpT0hC8XHxtvLcnAGiYz8kCjPnOhwxxg4CIba5V+hbfn1vKa/JmV72liiJApoosSUBobQO2ywBvknfdtzmr9FAtem9GmEfCVfBDw7TPlFPw5WHuzaGH120g2UEmcDFGJDXP87d/+7Vu/9Eu/dOGWWCo5IuvZQYAuoAVPX/nKV9767d/+7avPHAWfNrCxkg5qzw41MsMUFfwqX8EqHOhC0+/8zu9cameu+Ke4GSEHJZeS0kpqZTi1d0BozpVZ/JvTJvI/lEW/+dHtsmbmdJGpPhdYdKuttXVhWMAp1gq95sQvGtyrzak2Rk1PyNtY624cnOT2T//0T9e4aEs+ajDgO7jRFUZWGxp9NcQ4lUBzcDCfnxsyBP4eEhrQp+7gyUCRO6NFtvrIEl1K66FOf8G3Vsne2EryYOzc46dizcyREdLOgJ1j4WOsWxs8krn2/qaScb6oKrnHSJIjGTPAfd3TYTS69YG1NtaBzOkFOHQp8XAaY/1oCLbnVww83UiEScw1b7wGazy8Lm3VwbdfklOyDbax2Qm1tvaANUZD8Etbc9cWf2e9Y9zrVxq/ePSRV7IL9tG8yaK+eG2MWjI0ZwcvmFOG0RJt9NcYerq0ps/2gr1CVsrKvXUhP9fSlB7AzwbYA8nKuN3b4VnZlOxBR7RUr7ySXzTQ62wRuvW74peMwrn1ytF9+KrBornnpTm+tYVzYe0/e2V5AQuXuVw92/v27soHnP74PnnWb5x9v/MuXmPs2+SQHVqcV+dTiUcw0dV6Nj7Y1nTXbu/Bed6yepIcH+HdOVpHeopHtPX1rGfta2vpm/V32ddqek4f2W46SU/4B7jBuxcEJnvwYPRZv5OPb2PqfrglcEvglsAtgfclAbabjWWXlXy9GFicyod+4xvfuF7mstNsNHv8yE87w4lh9fET4PiZfFi+pDjB2Y0f6xcKzjO+DlEaX3zQf6zz4unlq692O9s0prMofiRGzlgEnl7CmqPzjLn0oaV4hb+WgGlufot/IiNjycC8+BQz8P/OSPpdeIYPjebhz8T+cJIvufYhA3rJzHhjwJmf3D2bhyyck/R1FlCbVxKmn+uhEV5j+V3n+ZIr4TKXsf0cHN6+5sZPPxOUGJRkQn80kkNyJhsvfNBdDIvOZChhlL4U63S2JIf4FTeSSbFA+D3DF07zbcxTXLYxWveXAj2z8j1JDBGyw3af46spgKtDKePg8EmRKFUB36PFaaMne0kPxWLZNP2EzH2XIFHChnKYi8IrFtOzDVZio/kzJCmFgPSTn/zkFXAapzBGJZAcskucuP/zP//z6396otAlg2wQdMIJv7lsODT//M///IUXzeawsaPJeLJQf+lLX7r+0HOBOFnBZSPAC0fGkyEzXwkY9V7myBAYZ5Po77+r18cA4Mf62Pg2q7WT8bbJ9Kv9cWc0wONiGMwtUUN2zdUGjU/yhw8suneTGttPUsC5WkOygQPP9KZDAfrQySBKAJGPueHtoGftwPs7QQ5XDKp1MhY+/x2muaKZvqCNMaJvHT7MkT7i2VhFW0YFbI4ALRWweO4PVIdNjZrgAAAgAElEQVQnfcv4mNdXMoo+STh9rXNrljP1LNHlGY/9kXCyZMStZQkhcNYbX4xkBe3m8mYiGbSXOuiq0+XlCb1wwZkuVLfG8aLOgccv2GShdgWXfL090r6y628EtS/UHPrKNTrNBcfOX191c+3c4Q7G8+vK6/rC+7oavvrCkWyixVdW6SDbmu4bB5ZTZSusEXlqU7wJoevGqOFQ9GcXrHPjWgsyBCNxA8eu4/JJX+DdJD+a4Anv2mV0hzueVw/QhEZj8gngzGkcOumqWn/PySmc5hcA0snV33QLnAt/aPKWzdwCiOyMcfrU4U128C9ePPSc7jd++ZW4ZhsESMElp8anv3DGu8Q/Go1BJ1sFTg3GPXmQDbsm0HIf/dmh1Wf40xVzL53Lbzzrb+2jK3zVydVzV2uzz9pai/iNj+zo1u5dGyfgzUWe6E/nwfEH9DJ/qia/j3/846/e/JKdcWD5E8/Wgr+5yy2BWwK3BG4JfLASYOslKpR8KHvL/joXOeCLz4snwPEbbLuzVz87y5cUl6jXj6xP5DPYfrHAxz72sevsIabpJTKbn2/hT/7u7/7uosX5RhLDWQZu8TSa1XB2bsSPZ3NK5BjTC1t4nSWcRfPf+WVj+HJ+Kr+pLv7mm8TrYgX88et8lXv+rK95+pMQ5tVmvGSVWEZSxhh1NIklxAfodj7FD749mwut6NDn3pndl0L4gL9zk3m1FT+BNa9EkfvG6yc/Y3shvUmgElTgrEtzeiZP8ztnlrRKVmiCDxyatCvqjWvMK/4lb7JIbuaiZ639rkF4No6BM10MfzHHNfEzKh96YogClMl032FGLfAqw2vxKUeLbnEelV2s7m2m7i0URS+43sO9DVIQ/cUvfvHa6GuM2oDqgkT9KYDDmOSNDQrGBnLApmBrhNBNQSgdnswZDpu1RFLJKDjA+u0q/HCXYCInCRn4yMY8Dl9f+MIXXvHcfGDMY74OFWgmD/xoh/vFU7KI8XXhwSW5ozYvY8kgoM/cLhuRcUSPjYQGG9izv89D3ubv0Jcs4QAPR2+2bT5zg8F7h5mCfTwq1jSe1HjKaHeoTbcyxHCYDyx+GQgwjAVatTMeajxJHOjDu/noDxlK+JC3Nv3kYrx2MnX4MNcebtCNDvD6XeA7hHs+HRj8GSPjkwE8SnwweujCPzjyUDvIwqGPTMnbMwNKttZMIol+4cEfEDefdnLnnOgHfNZDQUO8SPBIzJmXPFvf9lfrk/4bv38bp/0THH5yDNdkL0t/MyY8apfxSu3khz/FvYTZ7v3w6VO83ak0bmGCW5jm2PoVkpc3S4empcnzOXbnrq+2rfVFk5rcXLULSuhTCXRySWeCFUhlM8iPwxP8GEMHjbEO9IXTVsALHOjJ2o/0MJoa21xqX+PYK/Cbx5h0KB7QYS46lg6lG7vmaIGzJE46nz0jq+DB6tdnzmyD52QabHBg7OP0NzovIbyc2/xw7JjgtRujD278RVvwnvF67pX6l9+VD/g+KQ/nyjl7E23hIyt7ml7gDVx7wgsHZfkxznXK6Pw7Q+YGB5+C1pV/Opnc1Kvn7tObC8FTacyODW+wiyOcZNbVmrEj2dvufXnbz3yj1TiFjMzB7uU3vNxg49h/usle4xs+cC7rQg53uSVwS+CWwC2BD1YCbDL/xM5KLnh5KZ7IH7K9xff5QzEp+y6OFbP0i4/61+/kX/gNdt1LX7GOs4EYCC7nMkkmxdlHnOSMo9+f50AD/9qXLPyscfzDxjzmQH/+yBhxeP4Y/s5E6x/5/c7CxU9wFC/BK06PDvJxzonPfgWAXr6NPJzPxHLuO8fhy/nHWQAOtKEH//35E/EjWuCUfHFu8xMuBT/JPRg+0/nEWiQfY8lTG7zwlLQxztz8cbjx6sxiHvL0DJd1Imsy6csm99rQIbaFyzPYjX30OeNZI4WsyNl4bfSmseYzr7UXY6Ob/nnxrEZ7sqZj4bMexUnuV+8uoGdSPvTEELm0KQRoFoURoJj9sS9JIRfhW5y+whDk7SFgN5cDn+eCxIJ0i1NSouTHBo8UwCUQTNktrA1UogRtJWba4CncH//xH7/1cz/3cxd8SRVK1qEZLsqBHji+8vSTsl/7tV97leyhjDYnmk7ebHS4yAeeAuEOC8aiB94//MM/fOsP/uAPXimlMWhQbDBGDx3wSGR4lhEnH3yuMmeUzU12/jeuDI83qowH2lw2jmeGRvnc5z73infzJzu0lgwC7wsnfdoZIvObj1zJQlG36T2jtcNVSYvgSgwxVOTJMKQD/nYO3dDmok/mButCV3pgDMPZ+iZzzxJ1HRi0RxN5pXuMtT5OAu2MsfnwrGaY0Nqaw2tdJHQcqD27lByFuvXpUNLXPGDRbk4w5iV3ssQX+Wb4HU7xzWAyzHjhtNofJc0Y3NYB7mjKIdM/685Y0h38d53GMB6iX51+WYvGtebt6eUZb/C2FvEaLJzuW5NLeC9L8/dM1u1Htf7tm6Gv9svZtmPCv/Occxq/Y8JX2wm/OBubg/ETN3YQz2o6Ffw6IW1sIhn3JoqsydH6ccLpIrlZ43eefl5DL+kD/XHRt13bZA2/e0Xd1W+84QTTFR/03BztwfZzOoG+9K210pbeRItxdBN+8K39/vRMG7hoUbfmizNccNNDOMGGE6zL/rCnThoa03iJCEGQMfai/WYOPJQMW5mCS57mBNecJ1x0tw7kKoFD7mD1N2b5iafle+Fbv+g44aOHrJs7eXpufat33a/Op7I0e2596zvbFgefhc/aoq+1R0u20HrQDbovQO1/vEwu+uhgF13siyB2rbVKD82Vf8geksddbgncErglcEvgg5UAuy0+LfZg2188vbxmj/kMMXv37LDDuvOJs6J4tC90ihnyecZqKwaA48/+7M+uPxsgVuLb+Q7tYmD3khH8hP9QR9zkFyJ8u1gAXWJr8JIO4ukSC/y8mF8plsgvJa38izisWAMeL3bQKb4Wn5NDX0mZg0/L54rv9ZuLfxNrkI8XHPA4o2kjF/OVFIK7X3aQt3MKvMbgCV7wJYXgEEeWHILXebmvfiTw8GxuY41DuzZnErULTuuEZ36VvzWf8a0hecNL3mhzLkKvy3jnWfkBePSTq9iVzrT2yRwNYl0wjefrFbKUpKIfYl04wZu3GBjf6ERvZzZy1pY+tZ4bW+GF7rROwTyX+kNPDLUpE2wBHUGmGBaOglEOi+DeYZqyGacUILqH06Yu0Nw641Cixybq0FOiyLO5//RP//T6yZTDhXkdotFnLspIWbqMRSflolApl2fj0FvihjLHr8McwwI/XODhQhcc0Q4eHtlKXzKBk0ApARJdq5T+tzT/+xk88OHZZWMwdObBq/r7nn7+xAihA8zyZd7my0D83u/93jVWH9rwVIAtsC4R4WuUj370oxcNJQ3iE05zgbdRGSk0wGeD23g2oLFoZqzdG4eOvYwnA/D0A044MrDW3QZPP9BrDPoV48Cbez9BJH8Gs5KcwXEGDihkm+Fp4xtnDuulJCNrxujlzMwJ1nj8Gg9fPwdbQ9Le8BVMvBnbmlUbk56Tl3ayxSPjjYb02T7hAMjBmumPHsa1NaVnxicnPP0ne/ezqst21X18X8p+u/6JCUbB6CExqEhE7NgQIaA2vYq3bdsrEGxJFEGieNBoIhpNNB6NCrbOpbzrU+7vyu9Mn72jyX7FnVRBMatmjTnm+DfHGHNUPWuhxzzm4GTpteANv9P9ygRustCnaJYjzkbNZ1zyXPuPr/pq9adnuB2ebX886QsmOG00ul4cCxucvvjQdx7n8+8UFp5ktXa8NK5MuibL+In++E4u2YU2+FNmCsX9e/bVJ52mZ2ObA42LP3q0+jvS7fIXPdlM/s99/jJ4/ljhg/1YSzs2m9y5+ItogAOMFl4H2hRW2bs13R8YRAMfEX9g4cmPasFIcKyp5KhtDtfw8z2Sp3yuvnOt4DU6F1fxqUKuedNHcOkXjdlMcvFs4aJNP5/aH6oE0wl/MkJrck2/6RZ8eknH7rvWrvzqTz/J9BEM2EfwjW3e7KmWXvPv9MdX4idd8HfFvWI+/ycJFjfSQ7rg21zT1+q5gn4+C8x93BK4JXBL4JbAdy8BPlxOzM/Ll+3JxGjXYqDY8PLVrxvMxv/aP9iH8ePFOc+KW/aMChrFr2IM2K9//evXy3H5heKEfLaYIo44xYu//Mu/vApCYqe9grnsTxQ8xBh7K3jtA8UerXw+HPs1t3xbDo5Hz/v7Q71wkI+ITeglD/DgxC/xyAG/GCW/gMeccgX3fX2Er/g2xp4Gf/J5xZP2ywofxpnf3PIr1/be4Oy5wLjX4tXc6IITLYo78ik5mkPBxjP4KgrRIT7QZKyiSV8N2VvDSa70bV4w+O+jBrHX/X41lMzMgTb46a+fl3kOb3ah3YMO6YZc8UCO9NHfl6Vb9ghPukQD/sgFLe2f6GFzrvKPdzVH+B8vDFE+YVIYRTAOQrcwnZJ6yugZ50CBxjlKHFvge9/zEtV9VrJtcVjsWvM4K+aYCw02ChRawQS8hQfOOAvX2Yb7i1/84vW3CSSaeGn+/RoDrqqpf/AHf/DiN37jN57xw1XRA98ZODz+a5Y3pcY3P15K6PHM4D1r0XpWIqxFM+OGGy+uff3iD26DtTBKetGRbJsDn3/4h394/ewo/pNZci25xrNCVjJoYWwSHy+c9srfPMlBBVwRjX0UFNDPFtCPDwedqfi3KKMDT8bRp7lb2ByBOVfG8IBHKzxtwjgofDgdnEYOC63wo7dx9AuW8yAf9KIVbnLiSNznqIwrgHGw6cg49u5Eq2IUuHRlXI7HZrlNEbyu0cVuODsBAA50wu/t+f6cCi4VeXOC59A5PrjAmwdO9+bPAUZLck5m6xCNaw2a0zwr3+zLGPpwrLyN6T/SNX92lI2eY7K3dJYdri/o2TXh09Gzrnu+/cE+golm8Of1I/hwLa/6orXrc068JdPsQd+eJy/us89kn9yzv2gMdnVZorJ9p/zg4fv4H7jhAZ8uTjnuPPlVduQ0PnrxmkziEV5ryZFtus4u2BGa+SHzJKf8xcLyZZ7Dl981n/XTAS//XCIG1nNza83Xumj96bP+KwolE23rBR5w+IIfr05rzn1JGXhFXvjSX/LJfrr3+TcdtAbTQXpYWb58SrAr0gafvPGxsjOvZ/DU7xo+sK497wgm+tw39/bpP49HfTtm9ZfOa9FAj8nbODIWA8g7OZuD7vw7+0996lPPctUnDkqCxQlJtJZ8xCOxJ72mx5P++/6WwC2BWwK3BL4zCRTj+GK+Vj5q0y/fVsCRE/Pliv37FT4/X4zh38sX6kNN8Uufn4/x5/ZW8t3yz2K4WPL+++9fseFzn/vcC/FSTi2Hl7OYX6xw2BOI257BI9feQhVcaGpvA8apH49iiXtxXryR84pj0WIOz+UEvgTyUh9dvUQ3P1mAJys5v1iHJi+oFGMcijfyA7mRHKkcwx7LGAWifkJmnPwAPrjlPxWL4KioQz/iq72DvYiiDPnq66sf+4++xDGn4hN8+oPFC7ztCdEOZwWfD58KRvZI1QvaR1WcIh8w1RaSL/zkUpHMtWdsyDW9lFuBIVu2Bq+9pWJXL/XhMq/WeLSUW5GvPIGO6ZB+zKPvXTz+xwpDFl+VRsqjZEKjGAooOWU4FYP65MwCpCCG1EE5KaVkUuu02HuWMyjxtxgzmloGUkLuP1t99rOfvaYx1uaBoTjhKHleOhg6niwuBmMMx9YbX4sU7RYYulU4zcPoLVA0q3xXdNFyFmRiPgms/4oFNyNmfHBtgQM9+vykzH9LMxY8PFr0aG1WXJtDxZwz4jDIAm1o0e7hnpOw2PuKpEKDceTXmIpI/s4QGsjQs5J2NJvfW+uf+ZmfufgxBu1azrok3FjFq5JwcvSsk7zx41NSttWBB2PNAwZ9dBqd0dqnfmD76stcXevPqRvDWeRM2AL8/pAzPTs8c7IHThav6+Cji87BkZ0z+DYuCpPp1nPwnC4akiUnCJ+x8UHe5EGm7NkfOmdjdO0rLq1nAhzHBVd0cNye4QnPxqOdDJx79FMZ85F7dpbNnnpvLDwFfjBOY8ylvwNNDvQ1f/SizbX+1nuwK5/4An8exmYD5zP38IZz75svuQXzOvjGgj/X1aMx0RTf8XPCxr+3QlsACW7hkzkbEuDYGLnDccrRuH7qJahbj629dGsMm1ybYA8SgnBqk1UyWl48AxMd4dY680XJz324FaDEg4qmBepkJ6mCH5w1VMEbjtWXorO1wj4kI4I5+YAP9hrwdLBva3Lt25oXn4xpjedfrM38rDHJw1p0L+niS3vhUZJjfP6G/JNR68Ta1AcHfpOJcZ5pPXOtbe2sLrretRgez9JrNl5Bd8eBMZe+bLs1hX5nz4NJlrWN6/6kUX82s9fRBf+e/jYcWuNh6VWQfvmU2Oez8221+p3Zi2v2JdZpk6e5weTX8Uw393FL4JbALYFbAt+9BPh0/lVc9d/HxBJ5iPyFPy4+2kPJieW0xRqzP4p5xTpxXb7Uv6RXBBD3i6V8OVzm8CdC/At6+1HxwP5MDLEPMqf4DV8nesCVj8BRrNLnebiLUfrxZRx4dMrBte3ZwPbCx56i3EjRBL2eK6jA47m9AHh7aLkefObtuVxHLmO8+e1ptPY87avsCfGptb+Q69jvkUXxTxGl/bx5FUnQYW45lMKZPtd95UMG+px41m9etAav397QXPIjtO8XReREBuCN7UskuI3Bi+flBvICcGynPnqh93IE1/I19xvfF4+xbEAfWcj9+uqq3EeegB79cirylMe9i8f/t8IQgyAkArSIKF/xxH2FHwk042YMjMwzSmIQxjI+LeOuwkhBHSW0JWjuXcO7xSEKT8naEteuGZPF7p4T6k0qXAymE0yba22LVxKuuvzLv/zLVxLJCBlP+BmiAx3od+/f3fvZV5sINJAFwyz5R6fFjR5f4cBtc6FIRiZOtLVhgR8OCxnt4NG5PLjWR+Z4pRMLG44KA21Akmdy/tM//dMXH/vYx56LDzu/+Rx4wztHZB5zuG+D1sKtgOS/gJGjOYz1XDKOHjBf+MIXrgJHcgeLx5J048jZV0M2XMll9aTv7/7u767/OJYe0Mth4NVJBl0XKOKHzsgWPLra9LFjRTXPjKEnTgE9bLm5jAELLodBt9khfsnIEV3kUKBBV3/0LF3r61rRp0CU3ODzH9jgJguftpKD4IbWZI1v/eZNh72JyHFms7XoFGDgxE8bV60zeynYNR/8Nuzk1Bh8ojn6wYLTml+hws/z9IGj/z2WtmSajcGRv4CrI76aQ/voAJfuGx9sNHYfj7XwZReLe+dq/nPupXnpbixb7+srz9NBOgzOM36QHfGl6efUU7zUpr90RO7Zmj7yjfZzDLsoMHp2yh1v+s3BR1jjcK4995xczAO2n5OtveXTs4Fgk8PysXoJLlwLd9KcHoPhYx4V1+A0Nptf3PiTtOAxmWrBnLySV33hWL21Vq1pc2bz4oIkjm8SQxY/fMt/vMRr99qO7D5ZnjJFZ/a9dNBJh/6O7EBf9p1sw9X986Cni+h+1MKTD90WDXvmD/Kp9OC60/qo2CNO9UZU7iEfkeCZn96DzW+R833cErglcEvglsB3JwFxoFjuWjzicxWA5LF8rsNm235QniovKaZsrhElxdJinb2TwkQvyspHym/+9m//9tqr+FupCijyePvRXhiJE2LLxrbmqq94Iyb1ZZE4snEIbnPHk7GKVi+f8nP7DHGl/EasxysexHnxyaFwo8BDTgo8+DeefODHI/rhIUf7Qvm+fs9dl4+jU+7WflCeI58I1li8m881GuSUZGlvYw46ciqggBE/zYFmhZt0Ri7mr0CE92TcnMk7XYu7yd3YikBkiBZ0e94+gizhxNPmCdmXObOxrs2BTrKUY9NNXz6ZI1uhB4fxrsNJD3jFsxyYTT2yyezlf3P7VgtDBMuAKeQsBjESzxiXCiADYLQtPAvH2WdvBMqQjGNsJWQpp2S2BJhRUJJ7xqHq6G+zpBjtGg48GdqZVH7jG9+4ihEZjs3oFhwsMifDyRBtdtvwBm9BO90zEjRnVBVVVFjBwGczb6OEtj2MUUjySSPYNifGVHiJFob9W7/1Wy9+9Vd/9XmzVcGjQpbWaWFaBCrn6KOf+EpW8YemqsRgbUDwEN1or5AhkfZzOV8NVUQhy2TexoWu4PQTtcVjPMdQMQlPn/70py+a4VtYC9DpCyx/HypZZxfmLXDQ6+c///lLD8kCvn4bi1f6MJbTbfG3MQbbdRtZ8jeHs6ImfTkLNmDh3o2heaLLPHjI6XN8xqOz+eCuuAR+A5MCULDso4Kfn0lk/2Qfnca7bx5zu3dac/h3omHHRy841+zQXPEZrdpsPVi4fOHSH8ltDrBs1334tSsbRQH3rdNwRht+vBlgs9nZ4lrew50s3IfH9R768XEe+oPtujb8tcYunc33ujb4panrnTfdZOfuXZMBft03R3ycPtN9z1ZG+tPP6im96utormy+OcKhvyOZ+bKDD+irmEd2BNZYc7qme7adLNzn8+APt0KYr4GyFeOzF33B4kFRVxxKdvGQ7FZ+wRiXHLJxz/KT6Otfzv/zP//zJcfWVDhaL8k2+swX38mPbPAtQZP8bJIK1liHFwzG8CsSk/RgTjQ546t7/O9LEGOSI5zgHS+fks3sqrhwPXh19J8c6TRdgXOkr8VbX3SEp/uz/1szfetqZea6kx72RAc552Pcu3aiV4HVxkM8KMcQd8SlfK44B2cF/GKQMdn/IxrvvlsCtwRuCdwS+O9JoLijzV/zvXyteG1/oPDgWTFzY0axJjyKH36C5bCf9OcUwIgBwWr9uQyw9kXGKg7YE5lHPLBXFYs9EzuMR4+4UeGq++i2D5Zne26M2FLMEJfar+iT59qPOcA6FEDk2PHZPqtfqJinfQN65PLokwf0VRDa0e053sC4lpeUK4iB6HSgGYzco6+R5Alkbgy8WvPEN52gCf3m7isf+Qr4vjZSgHINFg3wwtVXQ4piFYXIUn+5nBwHDX2tpfjjWpwmB3Bg4HY2jmw39/OlEp3ZjznoAV50Vb+QC8kN3KNzc6NyKXlANiZHQHfP9JfnXJO8Y8dbKQxRCuUQTJtjC5BRECwFExIlW1yU71nPjQmOghhbhQFtCRulu84JEH4JMOW6Lmln6Dah+oJ3XZGkFnyFkvqqgtKlOVpUOSL9q/Suv/SlL734pV/6pWtOY5wVkfqCiJGG98tf/vL1t4bqC8/yl4FZoH3hQiaMdccli+BbuPirOKPtjDYOAG7yw38FK88rniRj7e/93u9dRRqLny7aENFxTq6vZSwsf7QNLs6N89BGv3ubOJ9swu2Ar8IRm8APx/L7v//7L37zN3/z+Wd0HBo5BY92BaT+K1rJu/noAA14+5M/+ZOrELV208YtO9DCnyyDXXvTR2b98TvX4WWnbfo4IfZtE6wAJ/B0cPZwcpZauiJT8tP6OisatAUSvKU//KmKc+KrJ3TtV0bkAwd5sZ2ClGDAjsjGnGgAs/btec41Z4uH5uNQ0WPsbnqtl2w5HMaQAbnkzBdncGQTHRv8e57tF9jJJjjXju5doyN592z7rwHHsTLoERku3ujwXH9jomvvF1/0nHM2Dt7m6hpsODzbk1y7P+muH0yJxhYpk2k8gAfLF1s7Eg/+K5v2zJhoNc6YnmujR3vSE7xn2Qv7tebYhP50r3XqZ2NwmZsNZ6uepxPPwwsu24rW4PojzNEc/c1t7Mpav2IxnxV+bfylq/SpXzHHumrt6SPTjT3mT5Zocw1u6XkkV8+jMbtr3Orb2OJW8Hjno5IVeDQ5al0nO+PJu5gDj+O08eaF1/UJs/ZyPXx1gN+zsT1PZ+FLTtFXG3/rV/Ya/Xwc/67f6T7e+MRyDTmI/IT9i0MSXvbnuiTf/et4Wv7u61sCtwRuCdwSeLMEird8qpe4fK29mJZfVkQQT/sKBpzcV+HjjL/NBKc/mcHXuzZWjLOX4vf7QoSf90zO7YWJfN29l67mKW8yf3Ff/iG3L0eByzzF82jYOIlO4+VUG5vgEJM77K/M76t8B5rwKfeHjyzAKIBULBKv7Ofsr3rRbO8EDm0VrcyvoGLvQL7oxW9fCcFvbrkOvu0PPTMnusmuL4fsH9zb21ccAi9XorMKRGiS81d8MSd+zE8OzUnX7skCLBye2SOZ273neDGWDov/8FcwrI/szIV2unKNVgeaHHQPH92Qief9Ugl+8/USnbzpHUx7bPKUO8j30g+85TTXJO/Y4RXq//1uaaYsiiBoitVKmpyEzQgZikWspbyKLwwMjASWYfdlUXhUBeEhcIrtLIFLcZRX4qe1WCgKLU7JXKd7iqzqa2wnw2EoEkE8MXa4aj3jHJzRxdFkEIyZARvLoDMOPHZdAq/1VY05nQ4wJcY5Sq1k1kYfT4x3Cx7mroiDloowijItpBZKTix+8aGPbsgnJ7wLDh85O/jxSHbNa+6SauMsHHJSnIDPonQo1Nn8VSAyt3H6fO1Twr00RycZkBda4U7f6wA4JH+oGU5Op7nSGX05fv7nf/5a1BY5B1gxs2Ile8WHL844xOwW38ZU4ARvPFhFHzaM9+xLP/7TIznAhY9gyS7duyYTuLytKKCA7xlbtGbw6u8bocd4/fjl4M1Dxg791pz1xnYEGPKBGw400mO6j5ZH64XMsx/yo08ypS/0aeHrBN8auIh5OsxpDoFr7Sr7yhckH/JMXp45jdu1Rb6CE9zJbNuuW1foaC12/Yq8jzTp7YTf+4Wpv/ke4T7hm1D/jus6OrXrJ+iZLtgkXbsmdzIPLjmxF3ZARnTtbI2lU3SQOVmzby0/Qr/ZiLZx5lg/RvbWQn4R3Pro9Btf/Cv84IPNd2qdbIzdsnH8lgRlg7XJOTuxfq3/+vMRK1/Pkks+Zu27sVo0kH9+hPyKHWiPjuSttRbBg93CDsW8yzQAACAASURBVN/ApvOX6cs8xpETusiGDIynA+NaX62r+DGGPEsQzRusdu0mGotv5sAf20jW+Th2kBzhSI7aDvjoCc0L43qPU/Yfefh082itZfute8kbeoOtvzY9neM8x0eJHH0Eww+5Rz+duXdKaH35VWyXh2TPfF2xrzVz8nPf3xK4JXBL4JbAf10CYoRc3P6pzbdYV97qC1z5M38ufhavxb3imtnaw7T/VGRoXyQXltuKT3y8uNDfafT3TBVRfAHbHHy+mCPXl4O0LzV/e0vXYpO20zMx1vhiq5herlBMLuaLPa7bq6JXHMKjF6muHX3darwYJO+yZzYXGuQ+4rb4Zj4xsbF4x697dIKVO2rxq9/eHIxcjy7QVXz3zN7CWPLwrLxbHzrggM+9Yo4x+sGiC3/kTi76zRPO9CYWkzkc5GVsOO058G4sHOVu+EdTfcYpFDnkhXCba+Xftf7mIve1H/S6l6eK9eDYm75iP57R4yiX0IcmrfnfteOtFIbaCBIig2RkNglnwskwWmBtKurTzxArDKlwUlJ9FnMLsQ3QKpaB1N8GhtHnCFqwDKcEt0XZIi2x9dyiUhgwrrnX6MCuM6J4jgjNEmvJ+Z4VkSSVJZYMzN9PISeLz0F+FlRjG5ds/a2hCig5xmhpPPlzYr7EwYd5KmZtQQtu85jTW+TFB1f8hb8Wvh/90R+99NFiW3msHOHw6Sbc6Rj92QznZgHj2Th0W1AVj9CIZjLDy3vvvfe8cJfe5tTHwXNYOUk2hVb6E3C+8pWvXDTRlXvy2sWL5jYTaAYD1qnfmXzM60QvJ/AjP/Ijl81yHGwHL9k6OXN6CnZoI4dTh3Bx9gpDnjmMx3+yqGUz9Ie/eAhOP6dsHrIu4GjZmznIA5wxyS89VhgCj49sv+da/Fgr2Wjr7uQJD8nM3G3yOfzoxiP5VrhI3jlfz82X3LX5Gq2zT2GjrfWtDaZx4boQvsK1cOg68S8OY1ofJ66evUL9kcaYfW5sfSeN4B7NqZ9u2RL5sz0yT+7a7FJL5o4KMdYGvXuGT7LOd7EFMme3YPTTv0DtvrVhjCQlP83fVwwxhr2wB2PXHi5Cno6KMmwYfLZz2lnw7BWv5tBGh7VpfDbYmgbvSA7GnAc7JBv+zPOVYbbQGPLIj6KZnNAaf9lCukQneYNDE5mSYXZuXPGqNRUOcK7J23qoQJRMi19oawx55g8qtuVjk1e0acEvv2CT1dpkBUjPmnfXVzjBwec+PD1LhtF69nu+a2htPn9LbuLxHvvMdYUjMD3bdUw+O088Kf4v//odbJrOndkZufHtdJpuN3Z8hMD75pbALYFbArcE/ksSEBd6UVtubKBikF8fiD/2VnxvsVPMEWeLofw7/+xevyKEYoJ464Wr8XDLFeUM/uQI/F6yGqNI5F7chcu4Yry4uvMW7xQ2yq/hNn8FjPYdeLM/EI/kAuKJ+IEeuYX+YlCFMfwaD84hTsn1iq9yQLjkCfD1M3Jj7AnKNchLDmGsFqz8RD6hCIQP+3A06AeDBy/27R/gL2+vOIS2LQ6Jvei399GGR34IVr95yAquZGp+MnAPjix62U43eK2ARDdy1/aP8IUfz+aFg+zJu+JR/eVByY9Ms5tiPr2352Efru1Nja0wRB74IVc2onXvIFs84N9J5u9ifvDWCkOMLGVQcguEcpxtKlrcFMwYnQyhghDlWwju4YArRZVMm6sNBOOAM+V3r88pkWPMaKtvDUN/92droVik4b40P4exm0R7JEH+iZ/4iedCBHoyPs9Lems5G8WOCjdtxMzZvMaTiQTVH0ezISO/DjJr00dmDvAq35/5zGc+spDIoEXQeHiNi9foTTbBuXeikZOi087eoLYxs0AsJHyRR44gHKeswZMDJ8EeKqykT+PQKUCooOPP2TzmcpLDh09vCBTQctzmyilkA5y//6bUXOwt516LBnN4c8yZOaOPI+iEw+lZm8c2qm3mkql+fw/JWLaZ8zAuWeLha1/72vU1GXod+OdUV+b4QxvHg3ctmwXrGZxkRbfLG11UeNNvDdJbtsE2F9712iP8Dg4QrXhij1rzdoYPXdFIX4KBOcxLZsY6BaSu9QtKAs6bDnjaBNIPmSavbG3Ht+6SFbm1edy1+WhOz09cyUK/53u/859jF+7RXI/64MiOtG3a6ZKsV+7ZezSws2yTLrNnm2Uyz7bJXFEFLH2y/wog/Cgfoa2QbSxd0Wlj4OIP2NkWM1YeYBzs2TzBoa01n0zxbR66Na45a83PltBuHfZWrHW/6z+50n9fmJFNcybD9JNOzZ2toxV/0WzM+kxj6SZ4a4u88lEVljZmocs487FH86Uvclx5Np85oy9bMCf8/C44c4BzdsAP3tEzbXifAZ8u0IGu/OaJK1iyXFzGNOejdbFztDa08V/izDbyv41JN63bbSuq5RO2ZTuN3fnZS/YYDexDrBHnrAHyh0s/f4i+9buu7+OWwC2BWwK3BL5zCYjp/L1D7BLbxTOxULwV38WE4pqYJe6Lew75idxaPu8fzvD37uUHnlUs8BMxft9L2uItGPFDPx9fTlBcQ8PGuHIe4yrYoFE+UY60xSD7iU45ObrEm/IrMRaveCkPwdsZc9yjU4wXt8QiOORS4pWYLR4nBzQHi4ctDpFJL4kVeuDWyrv7+Rgd4EMrF0Gf3B+9eNcnx3GtKEM39CTHwA9e7cnqB6/YokUj2n0c4DlYvBeT04P54F+99IVUeRQ+4YnOR/lPeVn71XJW/KGl/YnYj0/yAYNXz9BMlvTonv7w7t4hxzMHe0Dz9/0XQ20CLURComBCSjltANvwMT5wFK2q20Z6F49nTspok7EbiBLWEtCz9dxpIUTfJr8t8jNZ5Cj0WeD+INg6A8o/4c97iwB/jINhMCzXW/zJMI31L5YVRfCIvuguSd0WzCc/+clLXp0WRo4SXqdnnKw/QPw6PvGyibCvhpLTZeVPR7JwnXzR50snXw2l612cjeVk0MEOFGFWjq7hqc/iUsXnUCzILTglKwuRPSiYkEM874ai5J1To/dzzjbMZO5rsuaiG7S2CahVQEKL52BzJGhCT4UF8zoENQ7MydGs/aK5Ig3Z+JtEHGewrYfs3r/rxEM0rm7wrFBUxTyY6EYfmtmiIIVXtqhYaIw5/azMW3hwaMW/A+7ojP70m22SK/sSCIz33PjkDQfZoMfz7JKeBSLPHYuv6zZzYMzB0Qa/Y1yD3Wd9KbK4yJqusiM0VcD0zJkNob/rR23yedSa0xhtdF4XT0d93W9/9D9qz7740m5hKD/LRvIhu8b64oU86aPEY/02msy3eq2IUfJCz8YaF7wWLa0NsBWU8tfZBdh44F+tLclNNpQvu5C/OpKBhGfXs8fR4bo1iBY8lKylU/a4dmgNRDcYuOIvfTV3cGjN7y5v5mZj7MppPekLp7nZmWfWFPmYyzye6V+7TF/mMg+6i4EbI/ANBz1V/APPH+Qf0dFazO4lnOYwNpvZ2BjfEjTP98yuVj/nF1oLs7Ls+pQv2ax+8O0PiVewaa7GuUdj/t61kxwc9cObPwlXz+rXykuiAR10SR8Sw2wmeJuL9Jw8jbmPWwK3BG4J3BL4ziQgbsnbi1eu+Vq+WPzl28FsDlM+IPcwTg4N5s///M+veCKPFev4d3sLzz744IMrF/Zfivl2cdLXNsaKsWJnfy+omNhXQWI3WsRFezyn2FfOL6aaszzCvHKcYuwZR+U08pRy02I4WtFY3mEeOYV8m1zAm6svcMhE/JJHiVtyfrjQCwYdnoEjK7IkH3OArThkXngrkJlLscU48kEveHOQK3max3P7N/HSnPrIhDzt7+GjS/KETywGRx5kWTHHvTnwC9ZXQ45047k9jTwAPnLWxw7aP5ZD6gfnVOhxmLO8DVx7brIwp76KU2TUT8jJm6zYGHl6xi7da9Ei/4AH/WRYDkcv79rxVv74dAka5luoEqYqfJvQpgzGkZIYBWW12U+xKT3Fb+tvyVB4TkRbYqdt044O4/T5eQ5aN7k0zmZ5k3qGH0/+OLLNcw4CfEno2aZ8Sa1FUzJuQeJVmywyWk6IITG2Ngfg4OBQGGS84EHf+++/f/2RazJr0xPPYEtqLZTf/d3fffFrv/Zrlww6lv90xjmhIUO38NFg/jb2+kqS9VmseESDcZ1oSReec2b+KPS5oUl+aGhRcSzmgb/iENpawK5/+7d/++Lfc/JDpzHmMi/H54+2KXTFq7bNX62FG4yxJfuukzkYxcHkFK3JqXHxnPPoudZB/vQFb87Uf5rjYKIfD050JHt/0Nx/mMMn3vG2xVN9bI3cGmM8uswd35wvGX784x+/Akxrsz8OB4/1Fh/Grhz009F5KJzBa7x1U7A0R28DdpOooKhIVUGLXJy7vsAnZy1bQFs89Uxfjl+LPl/T1ZeeW2tLOzrSydolPrOZ01c0R/O7T8b6jG0thCNadu4d3/NHsn00prG15v8/T59IC2Dmdx/di1O/r8f4hHxENh7/+RI00SX5kP3apTGtbePMsetfIsFWzzmiq/VgjvBkZ60/82UDyQA8+w6Pfn1o6Dq+s2HJHtvke4stwWr5dnOh2TpHR7TAjf/lLbxgjLGGJFLWI1vMzo2ViCQ38DuWbMjXWGsk2rJXyRA/Sx7g8J1sasEGj2/4883w+bsM8DuLOfHub9WRCVytq3QR3kuoT0e4o18bn8kbv/woedJP59phdpJN7rPVP5ntfxONjmhvvDlXBnBEV3LJr3iWLvM3+KYH9tNZ3tF9CaTWiTZJI1tpfvjI1zPz3MctgVsCtwRuCXznEuCHxVS+tQID/8vndooDneKNeCcHcv0v//IvV9xrrHxTDOXXv/nNb16FCLmQ3AZe+TMfbv/jGl5xV04rRoiVFYBcwy3vVUypcAC3a/Fh6XItNsAjLjnwgCb06TO3PnPqM8bcxUh5jH2FfaJcTw5XroIP8Ug+DSf6wcJjb+HaC2B5vngfv/YRchg8w9u1MfbjcNpTkJX58eZQ0NFXwQi8PAgOzyoikRe+FLbohnzkNXiE157QPPIf4+i6Apax7uECW1zW5+dtFQjhLJ7bwys+FbuLz+RiDnjO3AY/7EU/uZCre7xnE/TtdKALDNwdXZeXlEeVL9X/POAduXgrPyUjHIJlCIRMGH1xYIG7bsPsGaERvAVG2KptVQRLYuFLyZTv2mbOgrHxAG9cSb9xDGjHNM6YvpiwIDrD1TitMfCD0Trwx1gstlr8BNMXJRV04sfiyXjhyGGcLacBty9I8JEMjM2ZGMNoyQ7dvpoJZ5slz4JxzRmi6Wd/9mef8cSPtoVSa9H1LxrJtfkz/p0HPfjviyr4onH502/DYoHSOZmS1xYxOE5OC18KOr5yooeV2S5G8tmfp0V/86KT0/Z3hPDvREPzNLcNkv+wlv7Nye7cZ8/kUAGKE3bvWbZIBunfHHArTmTXbN+5+ODQh05/k4ij4xzbZK4tsw223hcI6YBcXcerQIV/PMDfmAo10U1WxsBL3+biUAUPzj0bEniyw6UnvWQ/xpABGa+jbu1lv9EtGBhj/dApuHgxNx2RobONtfUkwIQjvsGn11p8tREnj45dK/rIGx1golF/cK4bf9pzcAvbPLXZa204tn9tuvnAnf1Ll2ed9MYfkSEdJPP8GL6CZWN8jDWWrjwjS/La9UiG1o+1WoEi+2l90PUe3hjBVaGWfOmvtdQajRf35G9cPmFtCw2tp9at9cEOHOHDQzZtfofERV/4gl09kzNc5IEXa7gEZOUXj31tku8C07pwnayDpxc6KSnMLiVGrtfHL9/Gkz26zJld07VrtKY3PrM1Aid+ekO5vKyujGULZG+O4MC0ftGQ7Qe7Muw6XmvNj5dkn0zWntd+m8c6bs26TtYnfveLy32+QOsZ2TnZgBZNeHVNVvjB28ZKY8UmfeTYmS/iS+tb29IHBu3p9xHNd98tgVsCtwRuCbxZAmIH/ywm8cfiQH61nLbYLA4Xa4op8hv9/LV/Tw+XuKkVO+X7fD84ewNFFvjlyH521pci4gWc8LTPFD8UQOAXL8R3z8S4voTx3DU8fuYkNpgX7eJ9ewHjxIyNub3YlV/hu/zbWDjcg5FT4MG1eCWGey7/l1ObBz9gwJb3kbz9qMIGGK258ALGfV8JoUuuBZ9nYp750C1Xwyd5oFPxhh7MaRx5Gmtv4b5+hSW4yId85SnwocV4+CrGuEePsZufkDnc9ZU32Vuij6zMSScKYQ4yB1duQ6+by3TvOXk45FXGJXs0kife5M7GgGFH9mV00xdD+tGOVj+XA/8uHm/li6GSM4KjhO4JkIC0fYpG4BVOGEYKS6El2SnPBlwfpfUFgmvwzk3mMgCKArMJGyPbg6ItqE5z2Byt4cCP/h/6oR+6DANf5mvTWtHBWLAdFqTKtE/hc1qNh6Nrbdd+2kQuOT64zkQ1fjiDv/qrv7r+yPIjmN4q51T7mqD+NhPu8V/hxGK1GEqy49cczuYPryrtz/3czz0n68Fos4FwlJTDUaGFnOiJ3lrkHA+HlFM1VzJLVvj/4he/+OJzn/vcM12ra9dwfOELX3jx67/+68/zmQMOz4P3Zl+QcGQznkV/OtL2927oVBU8euK7cTYj+pprN2/1pzf/acHPCNlUDi97R49rDkbRMNlwVK0X64Q8OGj2YM6Vv/uctDHwtSnlqHOmqvn60epIbytXeJ17xLsNL5nTF7vSwoW+aG0teOYtxNKC1/iFH90O6wJcbw7QuHKCs4M99gyucHgOzv22eHOPnmBa/+Fc+O1z3ca3/vA3hr2RT3xrz/sTD5i1d8/hdZovnurLTrMr/fW1ppY+zxXJ1wezAbDwO9BgXaR7dkKX6ZW8yHn5iu7swdiFb20VHzzvTRQf1Bc0vSyIpmSKrg9f/XcROMyf3/BsdfHy5cvnT5A9C+7kEa35MvTErz6w4U0urYkd05pg4+nGuNVLuCVeuz5cO40lP3PC4eeo5MWPrC+3BkqoWrfGoc86IUdyWbk3N7joW12lr/hAtyP4bC5+tl17DGfjs8P6tXsNLhk337YXEa/o6Fq79mD+bME1nJ43N5stMcMfH+drJLCd2QT5KejThb71lW0OrBk5iLilhVs+w3eX7JP3fdwSuCVwS+CWwH9fAnyrAoE4xreW1/QnHTau5u+35bflyXy1XFG8hOurX/3qdc1X+wmZ/N0erUKFIlE5qNjqNB4NFZzgVVyxl9SnrTginhTjcO26GFJM0S8HkK8Xf+T24hV8ff2jECOO4FmcsV9ykI1+uZJrtIBFq1zBvaKL+CWPQgOa5QNyOHtZfCtcaMUvMsE3OVQA8uWOsfSgYGM+ex840GMu+3lyTUZ4rJAEVm4HLz7NizbFmj4IAe+roeoCWn39mQv3yVTcJmcxtnjdM7w01jN09xFCck/W6eUS5tORvsgfTLmEthxgdUgWcix5mANfi5sdslc2Rhbk/q4eb+WLIYJlJIRIUFrGyogps0qptntti6/N3CpBn80JmKp3bWiD12Yo2y4dwVgwFkNKX4WmvPoYTIYH3mEsQ8/YSlA3ES4h1WdB+gPI0QWn546cW0ls/VVSOQILv7fTDK03+gzTtQ2FP5zmaF7yzjgZqGvwfnb3mac/Qh398ZdDNR48Y1Zg6OdV4KJ1adbvICMJtUWLXzRXZKrgVlEEb8nMPJyjMeDxs+Pg7ed710RPR3zVwqEQ8uhZGzc25u8xBROfqyefnXJ8nJ2FH21a/KCx01c1cLJDdpnNF1D05yjbRAgeroPPrnJK5vC3msJ12p5+uvnGN75x4e6rj7UNsuNkPUMrZ7lw4aZXTrTNN3yeKRxy+Gyc/smvtzV4A5PttLbw4QBHntlrDjU/EL/ZmjGCEXnvetOfXbUetGB7g5PMktFFwNMRbrzBG56eR6v7cLt+HbzxZJ4d1bamwgGv6+7R4Wj+k47oiY742Xu4Tjk0budqLH7JnL3mm/KjK1/wYOmL7YDZ03P4k1XXdF9xIv9bwD/5oyd2YD3DzQdoww2PZ+EjT2PYbDRnq/ndZIMesHTGdzQ3mcO3/oa+8Joss8XFCW9vvdCcXHb+tRvwEgG0gq0Is+simoyTRJnfepIMWv/ZFHqTZespHYfDXPRkngpzxqxus7vlxRjn6mpj0NoNuvC+doD2aEhvrdVHsTN8aOgLtnCAz1+3hopR8Qu22FKL3z12jqWt66XLdf3wVRhr7bovmRQX8ebkp/kadO5Jjvy3OFacIndxYOHWX3yE+PvmlsAtgVsCtwS+rQT4bptu+TifK9b3JUk+WLwFx1eLxcV0vl4s81w/f+0Fi1zXH5pWlPCliwIQ/P4BjBcB4ivcxonRiiKu24/Iq/h9L9NePr1wMod9qTHipvnlIw5xx734YF4wYo3xCiJwdS9GgxFDxEBzgxNHjFcUglfOA05+/uHTizEFFnD4h8NBTviHvwIQuRnjhB8uz2o9Nw6/ch7X5WPiuHH40xdeNNpn4AVvaKtoRGbGlKOJm2DhQH/ztX+yPzGnvb35yU1hCl/lOV5o4yu4dI8fexat8fWTPxzm2DwObrB+6i7+44mNFM/laOZ1eKa/2gT7KJdCs2f46SMBuaB8Dh1oZTc+ZqEbOZz53sXjrRSGML7JJQURZgZfYacC0RaLKCDFwkGoFp8TfErRtkFdpZdYm8tZEtl1StFvYQRX63mJpL42E/V5TsF+MtXC71mLuITWWNe1iiZ4Nb4NtOeqxAyT8TAkMMa4V81uYcRL+MzXST79dzI49W/bNcPkfH76p3/64j1cS6e++i1uYzjo+PTMUbsyQ7/iy+pxx+1YNH/iE594LvTRu3N1Ad4CPIteyzvaLVhFHUWVkv8S9e4tWM7ph3/4h5/pX3qSL/rpAO8cnuvOHARb5FQraNEf3umMs0CPe/1gOHC87Uau+dZO9Sn62Jiwk/DD11ca+MC/QiM4NJ2Ozz3+wXDW7E0gLDjgB53R2kYabjg9h9eBPnODwUNz1XqeDaC/TWzBiiPedQk2G8vOBIrta96FzUYr9iS3hT1xB4uuk861367RsWul4gU5VcBoU9saM3Zt3HW8RM+2C6vf0ZiuF+aUy4kbbGebdnazPjj5JweyELT1bxGJrjqjTVsRhA2QA3vIF2+Abox54EeHQE5+bJDP7XTvlFg4BXE+Aby1szad7pb3dAuerYWnt1Ds3dl/q6josfa4ukJLfJKLtXzSkJzxGX/woTf/ldzXPsGTM9j92opvsj715R/SUbJkj+ZCPzmiKfkXG+CNF20JYXGldbtrNl60ZAlHtgAuOQWXncPNJ+MPjLazdRQPdCP5xSfbkYhLpOjFm7z0GTw8+lqDrunldUfzeR59i7Pn4ew+2hW/+blsAx3mJg+F/+JlsYSN8uV4yX+CFydaF1p9WuOXxtfxcfffErglcEvglsC3JCC2iBPiuXxUXOVL+Wv+lg/evFIM3jzBffmK/Uvx/W/+5m9evHwq6sjzxcd8vdhfLl2cFHcrRol75ocXTWD5+f7mTz5ff3tccVexpfxdvHaKDWgvd1JoqrClD71iTrmEWCOWiFfg0GpeuY3cp+fiMvxirPxU/mVu+MR2dJGlOCfmkQkYxR9xWmyDEw1kb0648ACvvQEZaIOvmFTBBy59dKHPOLBoMD/a61egQ4t9CZnbnznqM1ae4N6cxicT9kE/6Gm8Z/oVkfSXH9VfvoI+/OETnenbGLDyK7jJqnwATrr3AskYNLlHF/k76IK8zUt28gXypRcnnbyLx1srDCVsiijhdb1FIULf4pAFBbZikMScAwjGcwIPjjJTaIrPMWRclNexCZp+SpOUuzZux4LtNL5rRuLawjY2A1mY5jnnY0Qf+9jHLh7wViFoeYCnhNMCsqD8dA2N5nYyuL1mbIwRrEKSefVxLFrwJbju6QF8f0R5eTNPdLvmhMBxKgycM9Eu/W0+tHSK3mQJVzJ7VsSrC/Lwt4HahCxs/KGXPCpuxFubcm19xiiORc8WaeDAg7ZPNN3jJ/jkrpKsoMIWs91sje468co+Kx5ln+mTDMjaSZecrmcFA2LIPrXOHJe/7RTe1kBOLnn53XRBgSNzVrTQ4o2Nc5rL425qXHsbEe27VtGDbnoiGzYfb+CSCZqjvzEVJgp2u77oqc1xLefKEXuWXBZnNuoZnpLbCbs25Jp84N0jvPrC27w2soJ+hQb3nfoqQiwNJy1wrX3uffSa99sdwUTjo/vFgw5BGb+9DdqAmJ7SRcWDLSJlW8GiETyc1lMJjrEVJwrU6z+NIysH+zF2bcaYE16ChhY0GBNebTpLv2RKH+zTs/xw1+ZNblv0wNfKZOHAw5c80Ly85VMrEJYksXHrw/psfS/N0QQ3WDKsaMAvG0eWyR4d1m9rGiwerOcSj2TfejxlhC/zGcMmyDP5xz+6zKWt2LO2kO/KvtOBlj3kq2qDM/eOgRsNzrO/4l42qU1vxQBxDY43HefaANuablwwaMhe0OS6NUof6fnl0+ahuEkf9EDuYpFrenTCS87wknNwcLo+6XgTH/ezWwK3BG4J3BL4j7yDf+b7ewHKH9sPbmGoXEK85oPz84oO4q0CvxjLV/uTG/YI/hTDX//1Xz+/rJLLF+/FNv6c7xYLHGKc5+IA/y+WOuAVZ8XlfVlSjPUlS9fybPSjr71FOYO5iq/4lU+Y03UFIjk6GsDaS9i/aBU52segDT1kRGboN1d5gDHmrDgEX7FfW7FHa5ziiRhND3JeffYT8IBXRFLwgU+f3BkM+l2vfHyJI18BC2a/GnJP7uSIdvfkVuHL/eamfk5GXhWKxPDyFYUkhadyt/KTcgx8oCG99Dw9l0uRMzzmxQe9yWnpkF1lg+I73dMPebEz9NKbfIosycaZPb1r6/utFYYojUCdLYSMglKclFrLaEq0KGMLQhWTMo5VeMZA6Sp5hE8ZWso62yp3Fk+bKIa/JwN1gjG+NuWiDx4GG4mW3gAAIABJREFUYVy/MSyhXaXr22QYXn/DJsPPkZWY1jIgJ75+/Md//Nnh7YYzmFqL3L+Bj2fOgCwYcZsXCxY/jNV/BssZRWftJupgVLQZPh2WvOMT/NJkHrLx2WZ8g8FXbfD60Ac2XMGc8lCtreIPP96ceK6gg2/0eSudLuBx5pDNrdKOn7WnNgpohg9MzhZP5nG61p+M4ckG19GsXXZN7mjPvrKncMJvY+EEixZvCJKjajpYzzld8/mJXYXSdXD4cZhP8Y0zFyhae9mfMa2Fxkev1nh0WZucoo2RsY1vLSY/LT2hGa1bVOH0O/uqw1rqTUM8Z5Pa8NJhNgKHdZeOay+Ap2PtzXVfI3kW7oVdPI/G1weua3yhwZGtsrE2jNqKdNme8cvTI1rgS98X8qfj5HPpSC6NM7ZN+27y12cuDWQuwAdL59nxrgk6/fDp02W257rNbz6+YAp38rfWyAhuOrOuwBkT/PJIpsZaX+DNhYbF3XpO5tlBsqxdmdG/futtbbw1kjzhlDQEh89OtLFZa4i9aoNFJxlaIyV6raHVC1lbE+AqNJT4aSs6tjaay3xsCL0VaFuH5ktf2U36ZQfms1YfFYZWv8ZY6/gl/9Ne4iN9wW18fCbLhTt10tjodE834dGGh+3QS7FCrO5IX6+717/+4nngq4vsEwx+sxUyNp/WSdYlgvRVsY4s5Sd0QJcOdMOnz9ogl+Zp/Z903Pe3BG4J3BK4JfBYAuKfuCge2gPyrXyseCju8d18cTEInFyD3+WP+zs4iiX2O1//+tevl8I/8AM/cI2TExvTvoav90eni/l8u5fExsjD+XG45Cbilj2JOYw3H7rkDsUt8PJttCoKiatoBV9OY34xx709sFxbToN38Rjdxik4wF9sF5vERM/02YOaz/yuxWe0lUOQMNiKQGjqJ1P68W4s3uU1cJhbroZGNFUIMpbsyECfwhT6xUTwxqMNfv1g5SAVWlzjLT2So8P48g57HbgaW27QF0VoQsfmHwqBZF/RB67NLfopmvnLA8Or3bzENb2QDVshS3jpTk7lOZsEo78ckH0mV/zik90aDxf4d/F4q4UhgmtjSEkUSfmdBFWiz4AzYs8pAHybiAxgDcG1vywv4eYQjKnYlNFkGGBbVGhxT2n9xMc8xjSn+9N4SnTRbQExfnAWVMUoz7aQUDFBHwPy3E+ZMsL4r2U4e+LNoeCzifI5zj1H5veuCkmMP2MvkW48/Gj29VJ0a9G2xQ/3+sH307mlOzrNvTRbGPROPsuXa85r+9z7T1xoLZHuuftOjtDPybYoQT+OYCTu6Pcv2Fc+aItGeCxSX1bFS/JpwaKFQ+LMswk2Q9fZU05nfyKWXWWjOTk40M0+s9EKnwWJxmaj+PA3gDgcNGt3TaQnvCisrWPLTvHj+sOnDb21RR+cY7quTVfrJF1ztNYTHukUH/ulQmszWUSDOdkt+3kkk9Zi8kenQCbgoikZhI/+2EnFFnqmny0OpTu6BNtGzhi8g2UDnq98wr1jstHs6rQTeBzoFaD63Lkil75OQdJx8pS9L91rhysb8y0NwUXfab94hZ8ss8HVQ3LVgqWPdLVw0Za80AG2An7FEHgc5OYZG2mDTK/R8ag4YdzSn92wLTSFGwxdWgvhp+OKfsk32wqvcXw1OrRoaX16VoEGbqdkiF7NDQ4dm0gsvXBWzAWH9/VPaI9+9IGHF5y5KgqhwbjW0/JsPmPJxVzxbi3D0Ti0ptdspaKfcc5gm2dtzDzog0dsWDtI/9GiraCzthTdC9fYsyV7fdZH+lge1n+7JquOk+7ua8MdfPaldW7c4j/ZhSNfoM22vG3O72Qj/CD/QxfskZzZNl8ZLB2jR9t8zwzcF7cEbgncErgl8EYJ8LPyKPkpP9tPm+RznolR4lrxQ/yxp+N3xThFhH4OrOAjT+aj+XYvdB1yarHMOHmyIpC5xBytL/ftA3x0YJx5zSOOKxKJS3KKYksvXmvh74uRcnl7QbwYJ//Cg3uxA+3mMLf5xA60mg8u+wa0ieXwGSN+wVE+4VrcUdwRm4wjMzTCAx6/nivqlO9oyxl8dQOf+EzO9EBO8g74yBdt8IDTR35g8WJc/fSDT/jhdU8+eACnv30Q2YJBY/P4Oij5yrE356nYQ1YVhNC2NkGm9i/Gltfoc5pv85buPYMDPD7po7oE2ZInPZY/eK7PGHo1F92QmXs2kJ280ej/lz58K/+VjAAtZobhpPiqh5TaJqSq3ybEKUtfSiLs+lOohQom40mJGYTWAQejWVw2MBRuPGNidJuIgjXPHt1HBweDJwWTkk28ZXjGwtmG0zOLiGzQljF6XiLq+tFpYSgSSEQ5g073eDMPHOhgoGTiejfyjLRNvefm+aM/+qMXv/Irv3IZsHEls5s8g0O7v4/kv6o53MNt7ug3PzqMddhkcDpgcohtoro3H8fg807Oz71nJ1ybzD/+4z9+8Yu/+IsXfvPGDz1mC3CgryQ9GPQ6cyquz81HtGsteIUz185kjE9zJyO6ccQ7++i6tvHGJcN0bmzzZttsyMmZCGb0k7MLV7LnbPCaPS0uuPWTJ4fOkfVmwfhOwcDvrn1p1cFGrc/sWWESPSuLld/zwKcLfIATZOkXr3saB//S6rm5+u8FHD6+9QnOHX5miifj8eJTXXzQM1jP4G2dutdPBnTe+mLv+Yjk1Bj92ZSx9YPLD3SdzvC0zzxHB77QkKy6P31MeOIz2XRvnL49m2PlmJ1u67n74MO5c6WfXc/RpM91PsQaI28n/85ObJT5lXQDNzlaH/TE5zde68wGVr4lNBIJiZdgCzc/TScLqwjObxRbslUw6S0e0OpakbOXB+hFg/5kyP+wWfRlK+wr+aze9K28WpPbB96htYbQhbf8LdhHckl+WrTBfcK5bw3DszwbJym2PvDujaf4E43acNcmo/TDV8JLRtlPssJLn3MnbzDZur7WRGNOm74IeDpaG7X4cKQ7rcP4rvd++12jgW4a/+heX7aKv/xPflZftpFduXcNRussphhfjGFfZEl+9EyG5J0ML2bu45bALYFbArcEXisB/pYP5dP7YkcuUT5TTC7XCU6e+sEHH1z5iHjelzByevsoeWkv6f2czB6Of5Zv2Ff6oshhfrFQwYPPt3czB59uf+u5vKb4U7wx1rW8yN893XhRTEETGDHEvHLz9m1yql5k4tFeSp5jDN7IAOy//uu/XjFevzFwmKsXYYoS8h245P8KNmKe+4pDZOFv3JKvefAFB/z2b+aSf9kjeKZfToFm+PDeXrgX6cHC4+Uy2sD2hRGd4tu9AkqxFm/yPbj1kbs50QC+mE1uYM0Ld/kCuQejLVeQz+Fhx/c8GK0+tpQ+FwY9jU+fWnyVD5OXfQs50Jt8y6HFq7539fiOvxgieEpkiDZ3DM21llE6LDILkhIYmFMyRcFdM5oKD5uQUQrl9HOx3eTvwithawGaN6Wn6J7prwrquhPco2MNhbIVvCyw7c+wcl7d58Tw748L+6ol2jzLuZUcN15roX3605++ZITX5gumjUhJPxwvn/4+QrgWd3Ro8dvP1NDyiPYMHO79MmVh0XEeNmv9dy3y7jjHGYsvDjv+cgKeOdHQBgpOOOL55J1svClQTDAumYArQddnMSuErJxX9pyt3yFng9vSARtFby29Chy1y0v8kzcnZT1kk9rsIH5r6T2Zg0nO6OzAr39x3xsIgUoRpI2nDYrgqCXjXStdt94En9aiPs9ziGgReKxx+JIBueTo13n2dUZF4F2X4Jz4iX8O3hzuPdvC8q5NeD0TxPRbg059zp7rE5jJ2xHcwoPtyx7y663Iztf16mZ18ayIuVi+BOF0nQ+Lx3A/wlHfztvaaf5zLek3B/vqC5lT/q0tcMH2lkiRMd8Ct3XCbiqS8JXw0T9ZV4TNjvATbdEPT/aY7aABrPUJf4HVfCUh5klerb1wGudADzzWavzoDy/cnWDJMpza9TPRTW598dkaWPs2X/qlWzJjR9YbG8ZL8oAzn1MRxxj4wdVnnGvzoAv+ZI9+18aRSfPok0B5LhbGt/4KO+QQP3ht3boOPljjJGlsobW7fERrLdxoSu612YD7Dtdr68F4zlYf4SGD7NtcZLTjjO1++43bo/Wjr2fhzd+zS0c2v7GiN87JScunyG3Inj7I1Vi+P73BgW/3Ts/13cctgVsCtwRuCXx7CchFFQrEo17gi1F8abFJblHema+XD4uvYgsfrPDBj4sh/r6QHEMM9Dc67VXBKeCA/cEf/MFrj8lXm1deKD7w9VqxW7xRZJJ3yPPRJIYVG8HJC8RSsbl9Ajp7MWv+9g5wd4jp4ok5zC/2gTUv/L1kE9cVIBRI5LTk4Hk5lj5yqmiEPnRU3JJLlHPbo/eVDjk3Bj7j0I1HvLR/sb9DP1mWa5IJWvXRT3truMmWLtHQfXsk4xzGlWspPsnH2UC6Rge5VOhBT8/K5bIFrXkc5VU9g6P8ZHMH12TmYEtk7758Er1OsjZveVoy9Yws+qIN/+oE6GAL5a3XBO/Y8d/6YoggKJMgOhlWm0stIVKCBaklaAKlhFVQytDWnyFYqK4pA35G4NSXsuHuDDeYrlcP5jcOLY1v3lownp/nGkvzt3iNYazuJYPoZZRbkGA4ZGYseM+NAV9Cqg1Hib4vOvw79vo5E2MbD4fTov6Hf/iHF++9997z/MFtizYLEh0OfHoOv/k937aFh2bXdA82ebSAlodwx19JMpjgjffm/7Of/exFi3sOKVrRkRw5s6985SvXH4Y2/ykDY/Bf5fyUTTKyWDm85Te+l/7333//xS/8wi9cNpTd4n/tl+zqqwWbw6n1rOPf//3fr78N5Bkasg/0pftki/ec2NKQXWeDgoixNiqCEhkkf7jwDH82l72tThTUvD1pntYhultbrZf4iv9gwUXbuXbO+9M5Ny96Wx+n3M0Hv35wj+YNL1iFNV826HsEv7BwkeHysHLuOt5PuOUHfe4V4/xOfW3DtefxAt/a1GlfwZ52Znzyce05+slOYgSvtdT6Cc818RzhePlUTG6NaK3zjmQjIZBIVWys8MKO8YuG7M6952hhl3AaK+CLF+JDPto85mCDbcLzffEQbnCeiSngJTbmFxv4BfOsLnxlJqGSUCSL1ns6jE+yIMPe0K0/8Czdo8XXf/BJAIIzD/4kC9HRGH6uglA8SgTxUpGyz5XhNQ595IkHsiS/lU9v7vDefPkEdm8MeHPgSaJbglYMxTv8ZJfvlTCDQ8/6IDT5+XawcHiev/LcST4O/Q6ya76eXQ9ePdO3tqw/uaVLfTv2xNOY+rXJEO58CnzJNX9WG40996WsBDyZeh5M1/Hf15t0gW+5j4Sa7NGSjVxM38ctgVsCtwRuCbxWAnymvZIYJH8Vc/jc/CmfLvaLv/rKO/hZ8Yvv9VWNuCgma+UJNupyMsUiBRS/WFAQMt48voYV++yPzMGvm7ecgb8XE+AKRvwGI26I8Zvb4MNpLnvZYiQcaKwQZKyYYc6KTuYCgy799o9itBcT6MKnnEyfr3/skcDgpZcp8h55gj2P/EEOgIYKN+D7cgidnsNDtulAoUZuYzx8FZPK5+A2B3o/fPrSRz+ZuBcPy4eMrR+N7vEHzkkvjd2YC2fFKW3PyMxXOuQt13Hf/prOHZtHlJvU13OydeB385B0FVz35RWbA+hzz3bsv/rFTHivCd7R47/0xRAlVBDSbkJusVgYJV8VjMAx9o4SeG1nC5eBSKIZZ8kWGPOCWTiwjKRF6TqloaHksHkpPaPISBhni8gY/RmH8Y4MR9szVUAOiSFnMD0LjjNxXeKsxYvFvF+DgOHQgtsNiT5z+VrGsbhcowF8LR1YpBxRzmzx7hhO0t/4iceT/mjnmNBsMxSv8PQ8+rtHD3hf5STn4IONXy3b6Auek+alFy5OfHEkK/w7OckcVjJZ+ehz+Fpq5ZlTMJ+DzMG0SVhHlZ2trSWXbOhC8nQsDDtls+zamY3XCgyuOzltY4LlcASSvghyrc/awLMDH3hYfsCwBzBLu80j3J6h0zqITy1ebL45dsHAvBxwa5Ct5dTRueuvL1HwkgNtrS5scju/HNC/tCZLreACf34mGWuD65oMVzfGOGrJqXWOhsbXt/fJNjtvXbVuwmm+xRmd2Ub0XpPNsbTUXV/6jIZsnb/tjVFFgHgm+/STr0ym8KOnt0JLY9dgkl1f6FQU8naKPcDnsMbYRraJPr4VDeyr4hA68tfGLX/oBk+/FTfTI3x4rdihRRMdJNdsdvGik61kh2DQsDyCp0OyCK4vbMCy88V9Mfx0wItOdAXXmkjOawfWGrzgrbV8lvWUXJIn/PEOh/4t9BRDd1zwxiZL85A9upY2+OA1pvWEvl3TYJYeeOmU7OAF31r2LN9d8YpMWxuen7jMTeba5At3tt46o8NorYWvIxm73+ueZ8M9B9PprSte3Bcn0gu/J8FsrZG/kw753Iqe8JqDHtkEXMUe18WwYssz4ffFLYFbArcEbgn8Jwnwr/YxfL+8VQyr0L65jNzHwc/+xV/8xeWD7a0UhcRm/+ynF54KOXx3X4eLde7FHoUaPrx4Ca+9LT8u1jrEA3QZ5wV8+1EvXcAZLx56rjBQviwe2kOZB4yCDvz2PVrFGHHINXrA2zMbg2Z8gxEPjQdHJnIV9wpF4rKYT2byIjQYJ+bAA95zNOPLIX6L/Z4r/KDZc+M8Iwt0we8ZnGRXsckeD08Vbfr5WS+UKgTZZ4LDE/wKTeJ8sqRP+wzz6Sun8FM013g3t+t0DzfezOWZcfha2yAb85RblAdsm+1sTtC1Z+ao2Je94EE/muUD+s0h9qOV/vBsHF3of5ePN34xRPCEXBK7BR1MWxie9VaySqGWYFQl+2NRKSxhUYRNKgPbYlICL/FPoXvv2uGZo/sSTH0lpyWmJazuXYPVNp4BBsMA3G/r2tlCN841eiWXbRJKMN3XRxb6oyVjJ7uMLNj6OADyUeU2R2dw2wfHP/7jP754+fQFQM93/ujU9+FTJZbccmYlxtGk3evkTl/mxMfi7t6zNhDGhCPY2votHnSd/PQcPtdt2tyTjdP10kAv//RP/3TZW89XPq45X2+DFaOaY/UGB/v2Jh2vu7mhazKr37N+l7wOJSeejcHpuSM4MGhsgw2WvJIFh/2JT3zigudk0N2G2djWHH37CsmRbWa/xqL3y1/+8rVB1w8Gv4IuHAKGlmNnNx3oMIZ8yQncrr1nwKeLXZvZdnyurE5ZwmEs+vu6JpjXwcKHh9ZtPEWb++jp76F4Ft9dp498B1g0dI82uKwpx8LDkW2nL+OiQetQTFXoDl67cgETrV3vPCu7iqCtUwmQo7ng8UUaXQazm122Rgbgwkvf/EAygws/9O0AB37tRuAjE0kFf89vWy/GJAM48gfWrestbHgOtzZ5e97bLa1Y0FtDNph8tPQkmZG4tP7RFd5kosWz+ZMLOH5Af0dw/W2slbVrsHCHlzySicQJj+DgDh7u9C05Rad4CLbiSW30gUk/0ZbdBiPJW1nGX/DGowEvCi/xYvzSvTKqf/mGN7ugK/C+gmPPnhWP8rPmz5b5zvx0eoabjYRLqw/epSt78LwTbteP2vhe/leG4cs2zRedrjvpd0+JaV87g9ln2ZwW7Q68oJGP1qZLNkxf2d9J731/S+CWwC2BWwLfkoA4Yc/p4EvlGPysWNsXNO4raPSiUJFGPm3/IVaJUYoHfLnD/Ve/+tWrCGHz7kWzHPdrX/va9cxPz+Q0ruUW8hAFnGKWOODnauIC+uDh7/l6+XMxQj4EFr3yZuOLfeJ/B3rdi6Gu+wLKGLmBPrzBTwbFFn3ijVyAPNBpDjSYU0FJnxzNgReygl8/XOgzDx60ClaKNvAoAMkdki86PDOfPnLSVsgJr37ylOfAq3gETi5ANr2IwU/5VMWn4qXYDA/ayaX4XPymWzF1+/FYTE+2ntfvGl/2fQ57P3kv2/CMDhQHXbOr4BtfG81sBH9kjzc2Z/5yHDL2Z2N6URRN72L7xsKQhYDxNtkZhM2lBN7CYwwUScFtEhgXJYIn1JKoBGRxMnaC3rFg4aCgTeKMO/tatCkveDg8K+lss9N9rU/8VJajrblTcok5vurT4tkYDsg8npPPJvnJooWwskEnmvQ1tuf6nfDpU6G2cQ0uPfS8loEqwOGhvvDDR8YVoDg8fW16mhM8HqKtDdcujnBGbzJbHOnqfBZd4UcHnYJbWsEF49riVeD41Kc+9RGZ4anTQgWXnJbOaDWff19Jb8bhv5Mec0CcK5gOz5x078x+FO1c9zw5tVHIftyTa/aS3Wobkxy0DrgVF8C0AQxP93CSj2P5iJ744ez17WY0W2jTYj3vsWsvmvTtER+etzatJ443GmqTmfG7jh/hNCb4E7afafU8+cRrNJknGLKBh/xae543t2fW2DkXu+wIll7YZsfy3pzBNm8wzd/z5R3d0QQ+vlbX2VDjdgxeJTUSBD5bwKZvhUVHvOHJtTXB//Ld7lur0WyOaMjuJAfGsB142R44Y4x3GpMfNL8g6k0X+HyOMZ3mhrc3ba1fuLNtMMnGXNFjHnO0vtM93GgBJ+iThevGgU8m5GadsVnPW0e1+rKD6DBm6WgceuBOduCzwYVJLlvkMdZBRo2PZnO1drWtWa05ogv/ioaSq+hr3uQE99ovuIpiJ+/pCA5z7PP40kav9sRhrPmC08Ll78EpIoFfHuJTwfjEvfbe9SW0pyNZo6HrYNxnn9l/Mtv+81m4Go8PMFq2KcehAzYuF3J442pzgC/xpre3ivvp+BXJd3NL4JbALYFbAq8kkC/Ob2vFD37zUezxjA/mY8UNBQx5bD8L+7M/+7OryKBg4s9yyPnhFHuM7adSYja/ba8KX/ERTDSJBfaq+spnelZ+nSIVROxF4BPD0CAWwGEMHL1QUkhwLZ70ckyupAjia30FBns7PChmiCPGV9jpqx97FsUYY4w1l3m1FXqMkWtVHNIvdoFXAJI3oBefYNFjn64V38gFDXJGe2D4FYPMC6Z9vLGegfd1DZ2QETnj3wHGPMlQ/gc3+oqxaJdvgzMnGvQVl7UOOnVdW5ELL2gSj/FZTEYzWKe5ym3OHKJ59LuOruK/Fh8OeTEbJEv2wZ7e9eONPyVjPBJrBlhyybAIFfOEXzLPaBN4b3z7vIzhUwxFWSR9edQbZ/1gSvJTWErR7jWhr4FkMPrBRUfK6V67fQymsWtoC49H99sy1qqyxnmW83Jt8Wo5tZ7hm8y8RXbAmePTdoIna/hsHj75yU8+z9+mINjdJJAhp2JjaGwwrhdOv8WzPydb2l0vzZxDNKN78breudyDV5l1nM+Cjx6LnRyT2cIvzXS6PyfzrHM3TRwLnCufhUWTf1vfkT1kA+ahIwWONgLZhWf6nNmpdm0XDNttjHmaA59rw2Ae2Zk+AaGfHWZ7eCInpz6yImvFkrX5hXHdTxijKweXU9YWCOCJh/gvoHB4bdhrVxb4MRZ8Rbp8g7Zz1zZ4PiZamlvb/MksHcEfneSbrHuezAWkYHu2enk2guMiWDJenK7RWt/S5fo8+E2wwUUXPbbBrz11a0w6bj26z+eEqzl7+8N2yQ3vYK0HQdtzfYKiUwDj05N3dp2NZLPGJQf+vU+w01fyBBO9ZM7XgaVz85VESBCc1qg4okULe8m+zhjAhh1aY+BHu7nXlla3YJN/Bejl8dQXfObHr0Rs7XVhzQEvfsAHC7c1oQ0+uaFX4gA2XyXu8e3xasxJEx9ALsbRY7ay/ib+11bjBY3LS7pdO0Ubmne97rrLDoxpnUZrc+8ayQ6zj4XZeV3Te36fLMSM3sjtvGB3jq63PXHvPVxozi/WWg+eWXtnPBFDd516Tu7ymQqjYi287FlfNFck8syY8Gjv45bALYFbArcE/rME+Esxi19u38hnFu+KZcV5f3tUwUB+I5YoxPRPkHy1IZaI/WIcX+0r/H65YQ75SXsPsdx4ftx8fDkamlsOvTlBe1XPO30BY3xxW94jrqIN7ebqRQFcYmP7PPSA1Yo1nssZ0K3Y054ZTQ7zBw/OOLIyt8KUWA2mopW8sMITPskNj3IFdKFRjqGfvIwz3vzkqMAjL3CNT2Pau5tXzLMXoUM0Gktm8gFw+PE83fWBCVzocF/+RC71k1fzlFuVx/gQgqzw0BdV7qsthLN8RWv+7l2TP9r1oaMYLkfAe7jYimtzk6V50kH7azaEbnSxg3f9eOMXQ4TBwBkNQRKWz/kJnQEkaAIjYH2MyzgLRiVTkYgCUswKrKSfQlynGLCOHXMmghnZo370lABmCPWZgzJLWpvbfPredMLFQC2UvgAJ1248WviMp8STTCw6VVBjGE+bLS3j1jIw1060+ITf1wBtPnJK5thr+PyR0J/6qZ/6SPECjjYgcLiGk9w8ayPwujY5kSf+c5Dhiu/aNmSL/4QJhy+iFJ2isf6TN44xWhdmr8nUz6v88ehHNOpLpq7Bt+neTYM+9OrLxvCdrWrBOHImtcFfD1891+r3OaM34cEuDfCxk1rX+DUuXthPm4zkUwAwznpjA2zCdQGGPsBHf+shGmvBLB/RXJCGc+WaXSw+dmX9+5md4iP47GrXoTlb22B9PbB9yVF7Xrdes8fuwbmGF22OdOWejE79NKb+aMrWsoGew5EthM+zxtWiI7vujwiD87bKsTxZs7s+eg5HR3R2v8/qQyv7yI7Q14Z3aXSt3+ZXsM4/ZS942CMZ8uHigMCY7zJHsk5mEhaJA93zZexWYsR+gkVDNGXzvSEzD/zoQFN22xg4JTnsOh8EL9gOYyVI+CuhbF3gNx7TVzTUJhNzJ+vsjF69MVu5da1FU/iNT37BkCH+KhKhuwQRj3hxgjMn+Ym7yd91ejWXMa3v5vJ86ese3miD29tVbwrBdpJBdr62EO54qjX/aaeeJYu1EXM64a2gvbKC65FtL37XJ9zS4DmcaNhgPUWHAAAgAElEQVTr7M0zp3VHl66Tec+sWW9Rk5d+tiTRzc7JHM6SR/ohZ/7StVhOV5Jbh9jDdtdOrwf3cUvglsAtgVsClwTEC36yIoo4UlzmO/liMUKeCcZ+yj1fbq/54dPXv14y/P3f//2VJ3jmRS+4l08/oQfD9xtvf2p/KxcqZxabXfPt4qs8tuLP5hvigVgATkGK30eruKBfwUIMct9Pw/ppl5jRP1gyt5xJUQdOuNDo3j4bHnxo5Tyu9ZtXn6KQVv6En36iVX4kF/PcXkFO4bk4Zg70KQqBkR8pasib0GyMawUe9JNB83kOFj7yM6/rijv2ueWA9gTmc+5ewLwKUMVcLZrNo5awsHh10iG6zQO/gywcwWjpXLtHuYcW3+ZrTLlC9+c4NJHb0lpuYCz7FN/J+HvhayH8v7Ew5CsNxiBRIggGbUEREOOgpE2qEmwFIQpuYxDcKfwUlYBPJaXgHbfXaOlYWPPB5ex6Feu6n5MtPgZpgaf4xrjvmZZzsdH3vGdk0llRaBNusmhBugYLFxlwelUaLX7PGRnHxNHllBpTUaj+ikvo0dcZfHBaeOM52rdNV/WRL/hwn8/h3L5gVx5LU9dsK5xb5IrWxisMKUj62cPijDetYAJu+TxlwBZ+53d+58XnP//5C0+boIKPPtfZBb2gT+swPt7qW9vTZ85g95p8HMk9Opub/pdefzfG5kkf2dhsaNkFOswFl2CCjzYi5CDQ7AlHtK+tr04VOx3wOsnCG4LkLVBxkHDhZdcF548u8MG4L1CANVc0u19ZXDevjmhqjPuFFeT370DhKx0FGw++usIX+TT/uTFLHvGEv+hMZ933t4PCF635C3DRHT8ViJb/8Gn5EPzkq2o9Q6sTjnhaHpvDMzT0czLXxqFTGx/JqUSrNhvsHny8hEPiIeBLAHoJkA9LdmhTiBEgtU7xgl+zSU4GZJzcPZOAwK2FW8JnHNtrDHoUV+E0X59X9zJiYY2RIPbGjZ9Jlulq7Q2PEjKJSXDJAq2r0/RiPfbZOJzJULv8hQe91ij6zVFSJR7gtXnMhT+JF/rJJ30a5z4ad4xx4NCVripABZ9ejcvOwTitVWs3PrKZbJwv2p/P5oPIce157Rfv7rXJEGzrOR1kl2f/eX/afrYRXLgfjWv+bV3vmU1qT32kl405+tg6udGh+Suckrn1QZ75bc/5Z333cUvglsAtgVsC/yGBfHf5ipihmCBmuhY/5aB8s3wJHN8rdnnB4Wfj3/zmN5+LGfZQ/LPnYq74ZYy8QDFBjmE8X2xue1Y+vxcvxRaxVCysEFRc6OsddNl3FANwI7bD7UCD53jo5Zdn4oX8oS+K0GF/U4EIP3ID8cR4fCQDOCsyyW0q/niuXy6lmIMntHuur6KQ/i0OoR0d5pcvk51cHp3gFIcUoORm4l8/W3MNvzgHhxdm4KoLkBmaPANHdv09IvIvptrbKTAZV19x2TPzwoEP/Rund/086qfbPdsH6SufONdgukczudENWdCX+ZOXZ2TArlyn8xPfu3j/xp+S2VxKahg05RBYiQ4lE4rnTglPCrBoSngtAMbBGMGXhLreE742khlciqaMFll97lNsfSkbfInraRjnfZVH/eHc5Db4EtyeMWxjG6fdRNj9SYNFbnGqEpfkk5uTM2FwTs/I2cngfuzHfuyaB/59ZhMARp/TWI4t/OAXZmEt0H5OBm7PeNXHUfVzpJUFXI3pulYxi7M+aQYfX8GyK87EnD2Lx1o8sMV+H9zb2TZJ7p34x1N8Gx+sa1VdtvKTP/mTz7TReTqttcD7I7+er224fp1DyU7XJrqGx8aTntdem3Plbwy5kwdaslFOkg05OW54OPKKD6d8k6fgoagRfcblqK217I6DFzSt2fpao+hpzWvNK+DSAVqseWOCr0gH9jzNnYO32c+HtP4Lvo+CgL79Ku2EwVt92n4+1c+YtOh1WsNOQXH1alzn5RTmoI+dI11mE6fu3dOxI1jXaxfmb82x05IZNrj+yfgd15zagj/bID8yTsfaPYLtS7xga09Yc1ZoTDdgWmcVMNBOPn39WGLHJsgTj2DAO61N8mdb6DY/2OjHV+sDDXDzFWCLEbXpPPnwm8lCcpldmgMMOvIPbAF8Xzux62wRXusoWNfsDyw6wcINPlpOvZAVH5+c6HfXSra29mHNNoac+I1oiJd0kM1oWxvG0nO8xDf685dafFsjdBbeR3yYi57gQW+2ktzjmY52/QSf7Z42nK01vnvtyvFRf330uXhPXD1fP892yUe7tuCa/2MTxY7kxY74Ofqjy2RAl9ZS+jWWDMm6vAkN4NKDee/jlsAtgVsCtwT+QwJ8pdMLLv5RfOVTi5Xl715QyC2KPV4WlovY1/Cxcip7UPlCeapcQ8GEb3bo5+vbv2r5dzmwOd3b+6Ch4oex5caeixfFf3GnnKc8SYwRd8BU5HKvyIAfLfzg3bv2kkqs7WVrscNzeYBnxvnbPPJZ/cVwuCtMVShCr5gkHon3aLIPJSfyljOQkz16XySBM8ZYdJMdfuUl5sSPPYCWnPvyCM14Mb6fpIHRZ85HfWhvHJrA4o1OKwrVX25RXrG5xuZRnpcH6H90kDXaO8i5HBXPaHXQsXyvPIC8yw/gYCPoZXvfK8cbvxgiAGd/bIrRMCpCZ2QEKQEleAq0oJznm+QWgkSdsveExz0Y1yVvjGOPTSxP4W8iuMawSav+vc+I/u3f/u0qODQuOPMziDU8fGdwFsX+K3fwGa+FwIgy3nCTF15zVAo45MvodkMHV/g8jyb44G6h1YJ1zUjxo/ARjhM2Gvs5Gf7q2zZel4eFjb7gmk/L0TjAR+9JRzT75JMcu9e2KWuMe85w5ZAslk6LWdBQyAzfzus6eNfZ3slLOs/O6M3pQEMbDfd77Rn8Cx8Mu/YMjNN9MkMLHpsjGjljcORpM2LtWW/G2YC55qCWp/iGj1Nnw2T34av/QsX+ok/bulPI69AXr3C3brTwtM7NkXxXD+Aaf7bx74so4/taIjyrY9eO8F03T0d/hNp1MksPjQk22oPd566TwcqEjLKBc25FOGuH7UQr3O474tFYckkn9ddGO352zDOiVxfx5jaaozd+tPSGdrQ46Z4fMr8jPOjynP8R1MAFH63B5qvBWFf0tckT+GSlcMkHOSUn1qO2Ipyx9JxMycZ6BV/QZefO4FZWL58+Bed3wQaDLna+Npts8CdhkQxKAJwVMNcu6BM+eMkBPJrBi2Xo7PClKP/9uq+F1nbQhP5TJ+IontOTlhzBogv9jdHisdO8EqnibXJqnYLjLypq0Bk/gm/rDVzz0FeFrWwALek8PeHDtX7X6MvW0n2wfPn5ZVFySBbBpqe17+S8dv0s/NdcfDvYZBqtK2d92VgylOCxneDzbeuvyT269bOT7umm9Ubu8BRvyKKTzd3HLYFbArcEvt8lUEwpJsp1+U37HzGKfxU3fS0kjinE8638tDxDUaN8R1HC1y9yDxt2hRY+WbyTg/D3Yqh8gl+WA8mn+4qm+CAOtBcpzxELwNmvOYwHp+XXxV6tHEIRBV1okU80Bo8VefAhn1aEMBaf2r4OQn8x3PheFMvp4bU3lxOYS8zHF5x4V4ghM617xR9ylhPhgfzwZX4wZI5fORaceEILHL7+MV4uQQb7E7NeLoG3P4BH7lRu4utn8+Fx5WhO+IrB8KINDfRVPCbnjdPui/m7borjxeHyDveN2XHhLA/Z2NwY4+hLjo5eNlbuiT5wZE/m30vHGwtDkrwOSma0hELJrhkQAyBgR4JN0NqSSUb8wQcffCThLQHdZLTrlNv8e7/Kdb2KB5+BtMBrS/Rqg3tm8tXY+Flj3DEZrE1CMoKr/p032pIJGXF0Nph43aTzvLaI9H3pS1968d577z0XEuovUa1VKOAkzX/ChJsenTkpvAcbzNLhGXhfdqAdzRxqGzobHPyYV2uht/jDbfzS6t7iZ0sWVLJrLvMHHy3g0OATy+ilk4UF4+8MCQQn/+FThKO3fqa1dMEVTgU2MGtP6bI+LfjTNuFJ367ZuZb8/EytQqQ+svDc5ja8ycGXV74a4Ywc2ug1znjOKvnpg0eA0La+8CwwoSkd7mZTv80xfl0vT+bYddJ1cvLcmbyj5SL46UgO2n7b3Rt543Zd7bppvOcOzzpcL95T/nvvesc3Lnza/YlYvOfLdt7mV0SToFgLbN7PwZILmH56m53rS/+uo6nreDxpa77a5bvrnhlL3yVWp09dPh7BsgdrEo50khz18d+SHHjXDsFGCzniWWxgcxWJJB7WejLIdtYWJSLWLzgnGPMkE3yap0SxNVLxE26w0fN/nn5+K2HbGGM+99rwwRkdp/z07/zG0DUeJUxkIWnDK1qNB2/c6ipbAl9RCHy6al5j9CWn+smGTIyp4NPYc300pvVdsSs5rB3U15htXWcH2vjaMbtGgoffdfCnDJMLWa4eXDfPpZxXRza4dOt7dCzuhXedXXRtfDDL5/q7fJvn+bd8nfu+NLQ22GF0icOSbvdkIVl3WA9iU1+CsRtx0z193cctgVsCtwS+XyUgBoitxS5+VzHCxlsOwV/zuQ55lxiv2CAvEY/5ZH4XnLgn54WPH+5vBXnuVMzIl7s2hn8Gy5fDZW4w6DCe7zZffxfSGOfLp/2Qgg26wTvgxE8/D0OjYpHciM9XLEEHGPtpscC8aIbLiQf5hWt4PTNfX/0oDimMKQ6RB/rkUOINXE7z9nWR3KPikLkVncytCCNOoU3Bxx5LLmiPCFd7DfjQYX7jKhYZD97YCkrFS3KrXkCOxuvr5VYy1NKhvUEyT5bFbnIF1/HhUxEKPDkFQweu5UdkU0w27swb2FQ5J1n6Ug1MJ1xkT65sY/OE6CgnQ/v30vHGn5IReF8NSV5KYPS1oS9B1zKONkPu99TPkBiEJLdn+rtuoe59xrAGtAayhnJer5Jdl8ieLcNdWPhPGAunvpJjRRALzRF+htLmqaQcv/rck5FFbMMPH1w5wsY2zhjPGaefk53w4Wy8e/JkxBxSuKNnaQt3f79k+TNPPJFpRSSOhXPYr5tavMnMMzCchA10NCcz854nObILMEsrObTwtGxOUaW+nu89PXz84x9/lumJg+3SGfmDTQ7RF72cXZ+BZhvZIF5PW9t7DqTjtCv38C58smvM2h65GJN+ohPP4NI5uGhvfDZpDLszb3N5tocx1kE85mhzhgVSAax1vmue3TUGjj55ZR8KyHwHR2ysM+d/zmOsZ9rkeMpavzWhXTw7f3wsHvziu2d7z7Hv+Phu7uTVPVl27lyu9ZNXOJbOnXuv0RLu83r1dOptcZAJHbY+T/mEBw6wxoIlw046Na4DTInJwoJL9kuDAGuNSX7Iga8Ld/yZP5smd8kWu2LDTvdafsHp5YP1KJHhA6xf9sTmijfZTPrV0gG8nhVntNEdj2gAh150819r48kj+iWq8QgWj42JjtUh+ZEduvnmil87R+PMYT4ywIN+Y/gxp3maa3UV3+SPF3ZgnDk2nqarbAAcWVqvWriTVfwmJzrBi2Nt+4Q7bT9YcyaX6PVsaVq5vek6ms42Wk6awK1f5RPZU3GnGFq8kCTyrRs/yIYt0l9FT/STnXsHfZG3wxzGu4fLuGTgWf2e6UfffdwSuCVwS+D7UQI2+cV8vtZXM44KNopC/Cm/rQAhh/diVPzlx8VkfpQ/tk9QMKlQosDC54rxXh73RYs9qTkVXeQY+tHhNF97C/euzbFxW7FHvC3GoRkc315uYN6+IJIHiLOKFoo27Zf4fjR4hpfihvnsu8Rd+BR04NNfnmlOMpKXyDHwgG+xHR7xmLz0m888/fSLLMV7z+RBxti7oV++7hkZ6+/vLPWlkTnJrhzj/7F3f7u2LVW9x9ejLC7FrQLCBgVxJ2KQW669MDG+lO+hgAqJEKO4DSgC1/tRHJ9x1nf6oxxzIueoZ+9pr6Snqle1atX+VWutV+9jTvfkVI4FH14234dbbgMmmelTHEyVq6jL03wQQKbw46uYjA54zCsn0raemFouRHbmufCubM5RbO7luXtrwU1vbIrM5Yb0hBbrwetCH328pvLiF0PLLEESekIsSdqHUcLrIswEGkxvheEgXPVezdVnTsljyVSCR8uOlQTWB662sU3sM4itf/7zn9//cn2lZDXDDcfOaWyNwXgJ/s6tT+2y0fAXPNjGSizbFGqOR4mOhWW8JfI2Eydp0wezG23hogNe64MzTi82VvI3TpachE1fQqtfG3x4o50jkgSjt/Wj86TB/Ycffng/RIrmhVn6OT7l5G3vyeo5uQb3k5/85M3Xv/71pyQ+Xa3OPrqdRvcTH2vCmR2Cq61fWdtzf9rOrtGc9Jn+g1n91pccjOnLcar7m07psLHF19dH1jzXX3ndmTkKPPaHT3Hx2QVs5QBOgQ8sPRY0ojuY4FYG0ZYsV6ZL86P12eviXhaisTWbD2djajjyVclOHYx2+3Z5XTpr2xt4Xpz0srTUPnWyclj4eDrpji9fs/jyy9p86PpTaweHdkkVG8//Nsc9uoPV38+DHvlq41vyCYK3S4DtsAGcPjyhwTqSHokBv4VGsC5w2cbSEl/Rwv/sA3d2AndfMbJDODeuoGF15SBdIhP9K8OVHVrMW9kuLHroPJrRU4IrMUEr3vCNZ/Dukw349r9x8kuW5phvTvGTHLOXbHdls3EWndmOefTq0HZlGm7j2vGh1ocmBY3k4oLXmItsvGzo52TJCXy62b3afkoXa+/RWr203Il4V8K7fee+eQST3KI9m0Srn7qyiXypPokxHegrRsHBx8mNxCcJtlgpBkvC9evLD6ARLnEKjcYlpGxDH31nP8vP1b4kcEngksBrloBY0UM/P1tccvggRyhfcbDg4o/BiImeqeQ0fLFnib/7u7+756wOYPqpD1/rIES/fEDbM405vbgpTvDNDh7QUwzY+FCsk2MYRwea+fDyBTTCq/DvcPLt8gE0+RJFXo5288WFXlDC0ddS+hx2iSlq8cI6Hf4YF6t8IQ2fdaztYARN7vFqLn6tI35Z13hfY5GJPs/p5e3WcABF/uaTobnW1B+cZz5rwi0HNeagDax7NFt7v8Iia+s5PDKvOFxcxgsdoMfYmQc8F9Pxy2bKPcRffXQRbjqpvbX+1gr/mc/VD7986LUdCpHBiwdDp+Az7JKcEuIS0RShLkltjj7G64Rtk9U2f3PDpbY5ThoQDeejfmMlmuf4KjtDqM+8LfDvWLjqy4AlvxzH+ZOjE859vNjcNswWm6Ir3O7Nca/t52T+FX39xnJY1cbQw+k0b8d2Hf3oTxbGepDH/+pQ2zicivHW39p8MD5FtFnCfcLsPbwd+ERzMgCX7MDZ2Cufbccbh/n973//zW//9m8/wYY3GHjwkB3FVzpxj3Y/J/OwXckOzFPcb/sJ8NZY2pJnff1Hu3AsfcHAre1nSXtohofkl068eVibjs9gF6c1u0eX/dL61orfk0cwwbf/g1lYOASY3hwIHAUPOHYvdV9f8ouXZJCcGjdPcHdS/6gsvuf8gXnoTocekuFrTTafnNTBR8vO1bfrwEGODseSlbrg3xrRGQ2tEe77orey47XVZ3/z394+bS5p4mvZi7WV9KjGY2+ujEejfRz+4IwFa15+Gu7w6uN/JGqSiBKX3gKWhMU/3JI9D8MCLDsx16VtnA1Hi8TPepKJ/iECXyp5kLCAD5YdwgMOHRtftINT89984caeYlR+MLmBJws64uf4kv4mET4lWckk/cCNP/S0BvwleejHb/tZTS4dHEVLhzZw1Le6pQf96OlwvkTbnPx4+xhP6dx4f4tBIkWe6KjwWR3YsY/4aJ+EM/jkDYc1gkvuu1/MzWc1P9k9EfCgAWbhtfNF2otj99TCgCOHrtXBL+ujM2uwRzqU/JKLPsmsdTw44J081e7ziWKkeXRGpuSusBOwV7kkcEngksD/FgnweeKZmCUn0PalipjKf8pzjfHLfKjDjn7F4rCF7+6Q/Td/8zfvhx4/+tGP7s+dfKzxfj7k4EKexHfL0xxYiOXwiesOVcQoMQ8dxQK+HWwvoPtj12A92+LBfHSI39aUo/QigC7lI2K/NeCWo6DDhW84HLKYYy1wcJvjQEYMwZs1xHky0m9dhylyP7GnL3zNQx8ZehHk3piCPz9rC5954OCxNtrRRV7BWs/aYpa1jIldeHGAJP/QNt7XXdrJUW6DP+vAQ6bJlzyt6b4xMtz4fe6HcgowxXZzwot3RR/+9bMjXwVrF/ODX3zmda9GM56K6/QlXr+28uLB0Oc+97m7QVVSrk0pofFWbRPokk+GbV7X3oPp8/VNeEs0w6EueTyFTkEZQW31luhOqY0Ft3U4wCxe/Qx068VXOz67PxPK1oKfE2BUEm1vVo2tEWtnqNu2OaOj/h236fYKL1rAGatOjxwPHfZfrYKtDod5PsukRw9n0aHGew8s/ZE0ztUcD2dwL136Wz+a9KHjU7ffDMcTmJNu9z/4wQ/uP6sL7qzh5CSjMV1sTcd4Sdc5HXO60ns24X7tIwe0zui0seCthU51dAR70gfupH3vjTcn3qMtmuOPXtIJm1v6wLa/4vkR/TuWHlcvxumZP/Dg36elq5eVpTXMebTWLxt7mvSukexPP5Fu9Fu7++Z3Hx1qfeCrtdNXdIWvh7/0ok4v0ZR8F592trA0waukx+iMPrDRGFzrRFuwcEX7+uNoNq910OJa39uclY019NP/+ux4jCb3fbItkelrIQmF4ElG1k9W6nyHxG6/qAEPnzXRkoxPWqP3tAF8OchyEA+mdZY/6yvJ0hxJDToWb3pfWElFuJembGZ1tzpBh8SMbHZefN4JupV0E93mdMiTDtxnR2SFn/6eAPzaDh+CT9ftTfdoi9etk2d89DXa21syLTGKPnW6WRvdce1kvHjTb/adXXZPDtlYdOh7rt1YMqm+C3RK9qQLfvfqvbLRHdPnkE/+sr6QffN/fKyHF/zaB/rIn1zN6e2y/EkSL066erPswUSCLdlv3kn7dX9J4JLAJYHXKIHihIdvzyb8ZzEJv+KOuMtfim/itHgi5wTHv3qJ6nDBoYL4/N57790PNvhf/tWYXGPjnmeUnjf4eHHV4QTf77mneOPFibxmny88s7gXY4tfDnL09fMxhyX8PZz8Pf/fyyoHMl6Mg8UfOsxFhzigRjuc7vEttjjgMib26Jcf4Nc6DmvI0Byygg8O+JtLFg6HyIY88AmGLMUwbXSKd3DDhw85hTY6zTXWSy33fRGENuvCt7LVh+ee/Yq/DqLIZ78eKk4n/5fiOfsox9DeudYwhhbxVW7ZxxPG0NLBoLiLBvPxBY7M6b3DLP3lbg7/2NJrKy8eDNkg//qv//rEs1NYD4GM0t9xeZRQJrASw5JitT6GRdg2kvuS1k3cF0fJ2nOCf85Y6s+oMhR42sAZkDqjOpW884NfXEsXHMZyOuTnsuk5BEbHeVn/H//xH+9OLJzxaX5tdQmqTR7+7dc2x5h1OUTr+GrmS1/60n1+1/lg7/4f/uEfnv7o8MJq9yDYGvDbVF14chpvvZLcXctPxDjdpXcdQrAch/9OxskGmxziP3o4P+WUwdLuVDhZqdkZm2JjJeHf+ta33vzBH/zB04NV8gPj4SfdRUf2sTbQGvq0G4u+xtGmnQPFbzSCbR9Yu3U5I06nn6GEc20jOwTzV3/1V2+++MUv3vXA+bMD7ewNDd/73vfefPDBB3c6lebDib5k3Vhw/SzMvv/0pz99f5tAj3gQvHujI2CcDt8aldZLVtlVdbI+19952smV3KJdH/tcHPDUp736AtdY+H01hNfu4c6PRXO0tU52uf2t1Vww+T9t/dmENZrbvORxH7gVa21dO1mgLd76Uqm1s632x/JtLF+18PZKNERr9Nsb7DidS0xWj32pw44lDRI8tpzto5ONmIMmcGJCSQ98ruCtH6w2eP7HHgLjAu9+5dJBRvu+mNL90hxvyWNlUd/arC+MPnr3Mzzj4Qx2bS55gzGOV/s6eop/7reIDSUm9jG+wUiqJDH2t70HJlvEE7rIG/zibr34zqbQJNmWsAUT/2B3zySn4KrDFTw4dkK/vsL79V//9Xs7OHyyE7Qr7YloSw7Z9y8I5nazNBkDF2x6OutzrzcvuGQYLr5NbENT9ofObFeftniEV/aX7VqL3Usw04FDc7GnL8cWLz3SGXzw0C9ccFxfD53av+4vCVwSeI0SKJbKWflDvjD/6cWxh3owfKLYw4dq87/anh/EG36Wv3X44QCGL5Wz8sdid4cc1ujL9nw7/19+a31r5v871CnmlrcYt4bYLI7y//o8sxVfxQkHJw5u4ITLmmrz5D/8P5xwuOT1DmngEhPAoM9aDmk6yPH8ZQyeDn86xEEDvsuRjMvLHMJYz+Ugybr9zKycymERGsQn9HdY5AsjXwmhiYzcl5PA18GcNYzrc8BEL3DhG0/FXDwaA7djxiviNJ7MI1N4y5m02UF5QbYhBxLD6cNl3AUHHtGtn05aSx0N6ZkNmqNEt3nsi1y0X1t58WDoZJZQCaENXE0RhNfVPcGCaY66v+XQQzCDPRPNTWgZi3lrJNGl70wA66t/edg+cGsM/Wwo48nIMgZ1c5oXbEk4ujmbDkw4uA5MMriMjoE1vzUWfwaoTzs5JKsekGwIm9K4DcIRWpMj/Z3f+Z27kecAtq7fIV3rWmfpQxca0yH+Smx9OWRtTsmc567k1trB5QTU1uXAgj1xgUnmxpJJ9C7Nxjw0oVNijT5OjGzYGvmA4QzpykVP4AUWcnXBnQ6SQ3Ja28im1kbB6UeXdeMHjMtYzgQs+UaLsRxbfN0FcyvoMc842XPUvV3JCaajaH+k0+Rs7ZUnOfepZeuZz8Hby6ec00k12lt3YVtv7f0dS09VMl0czQuoddyfck826sZ23RNX+mrd1mDXkgsyphM/WUufavvKGu03+tXvivfakmyrTvoAACAASURBVJD+5g8dG1evzNGhf0s0Lb+NL73x2frB6397S5YkOOwBrdZ1KdHZ35gBK2EAm88OtjUc8Pb3aBZfckxGreMw3J5mpyV51nYPlgzgkcwI+PZfb/VKYnZPodllP/cGjezZP9s/9YButOCpN3farpVF8rCWL4wkZMbzs9rtWbDpKxwOYSUI7tGB9mwfDWg2x/h+wZOf2XXWfsxxbWyMfn21zbFeeoqu+NzYpC/bwws9xKuxXSsflG1lu9YxZl74tMOX7adfPhV8eO6AtxJ/W689B6cvu67vxKW/uTuv/h3T3nXCvbKPB/Xu1WD0sTd7RaLr619+gx2wbzyxeXbqnnzYtwTdgWq6M1dscq8toQYHd18IwwkXnFe5JHBJ4JLAa5VAzxPydjm8Fxb6xHg5Oh8oFzUud+A3+09SfCtfyec6+Pjxj398/2k0H85P+1pE7u/wo5hZjILL5Z4PBr++XrsvmsnePXg0BesZDH6+G538el809acy+lMq5jpQ4OvFIuvunA6YxAD5RYdceOxrpA6B1GSi+KqlfvTK6dTk10GTPjEInegiLzTok9+Zbx30WRvcR7eXTT1rOCwCAx7v8Lnv+UAu14GRuXjDh9wnGdOJF7B0DI/amALeGvRFv+Qip1Lic/MBbfSD2dhuHntIj3cE7wpY9MLPlsAo5qOjOdmJ+sQjr6Jfuc1rLL/SwRABlBRuIlyieiaZ7imIAtrIaoZEOZRSkrRJaXjqY2Alc+rTMFKq+tFYituxR8rMsNC8iZh2iTBHxSE4ZWQcNvdPf/rT+0OFMQ+Daocz1nNlWBlg/a0TT8EunWBsNEb+13/9128+//nPP60vmbRZGGhOjqwy7gzbmKt748F5eExu0XfKBn345yDQ4Q83o2s3TmtsbR1vhiXNz8EmG/Oio/XYBhl38GUzd7BDRuYEy1bIAp0cnn8L7ydnBQS4V78cGFwejnt4UkfDyiTbSy7dq5PZym71iU6yWlrTe/sD3vRz2gBYh5b9m2N7hrMvQIYr/tbe1snVH1/JD93xo2/nLx/pyfiWlU1z8RIusP3han3poBreYLeOzq1rt6avQtiXsnjvHe8K+bTu4l+eg2m8n3gmm+WRvXQ4RH/JRb330cMujanJJX1ZS2kN9+0p/eYbq/2o1hdvO772hab8KXuIx/y4h06FzzJuD6wPQUewYFoTbeDjeXVifgdNJS58lcsezi7h8iaJryzJAu/tkbdqkoZoBmtNNEiAds/mJ/C2+iguFWPMiRcyak+inV69QUpWwVXrz0e1b8MfLe5daIYznfJ/ZNLfIABzfjmkb/eN+fDuJTmLV3Vz0ok6GhyG8RXVwRtfe357OxSUiFmHbJNRuMkzWyUzvJsj2SzBwu/SIOkrmTttPlzJMJzZbPuhOr2vnd8Xu5VwLT/a3deuPvc5HOlpcYb37Mu22HAHPPggW8k0XGSoT5HQg9NvLhtg62RLl+Rmn7iXhKu9sFCMg7dnxNxeMkXTVV8SuCRwSeA1SSD/yf+KlS6+VhzznCJmujcux+dz+Vo+V74gjjlkMO+3fuu37gcS8mQ5Tl/Q8NW9hBUP+NzyHbHKgY5irKKvF+D1ea7wjFGs6IWS50LwaFb4cf4b3Wjm59GF7n6OjjbzzOnLIHmQL2nEB3zjA549HIIrvtCj7OGQecbRAK914dXn8Eye5Usj49ZCg7gOh3XQLCfyvCGWOSzq2d0hEhw9m3k2NAcd4pqLPOGqr/ipD+3lgeDkgXi0JvmRa7pO5sXq6o3T5Q8b8+lVPLUu2XpJpU324UADuGJ2tONFrKYzdJUHmadkO+XE0fha6l/5YIgAEywhlHTbYJuAl2imfGMMIJizBi+Z1K9NWdopagWech4li42BPxO8xfFoLONaXhz8dDnxZQgufcuvZNna61Cs0dXarcvQ/LTn937v956MLJnluDp55RAUmxcN4BgtebbZtq2PI1N2HIyCTjpsUztw+trXvvYLD1V3wCnxAV9FG8503H20mOMnc/t3idpg0dU9nN/+9rffvP/++3faOTVJNYeP/13DH+L2s0abnRzwAU8/t4OLrNJnOlmnoe1h0NjaU/o5dQd/cNteGWmDCceJN3oe2YQ+42yK/jlONoY/9kYm5Kh2gT/lbf36W2Nh0gu6lofleXkA8whnfJ28t6Z1omNxhMvYynJpjZbFvTIlo52/Mo2npe/kLf62Xny7LtxwhiO8Dn/7Q9V4BWc/xUf25p6NCXrB9MC9fNaO/nO9aDrnoCd5LN1oWbrYlLJ0ZRceOBU2xucKxuyvQGlNsC62mC7sR7y0Fhzxr98loeirIbjhKJCGky2zc8kA/NqSE/B4aL1kYz247Q8Hux2chD8bV4Pr7wJ04KEurqC3NdAf7v7OELj6gsseFjeYjWdoWXlYJzzJZmOHvuhK3tbrS8biQjDmmqNGjwJekfzQoeQrGDJvLnnGgzn0aB3+Nl3FdzYAXrv/NlYipa/9t3s0XuHWdmkHg8f9yWM4wEVftpre3cdr+/DO8Luye6M+cK5w6W9uY2cNJjvetrXJS6LuJRAZdzCm7WK3DnIk0PCKW/ZSP9P77Gc/e0/GS/jZmXngHDZL0CWh5RsSb/Ml5Uo6vN9c5ZLAJYFLAq9IAvx0B+B8nod6/lJewLfylfr4YA/ung0cVHgmko85FOqFKf9rnK/lR+UX/LW54iFY+IqBaj6+PyZtvpilWB9t+uQ0DlP4dHPkF+7FTHHDQQL/D85zifxGLVdRjItzDmHAibsObPBVPO9wCN3G+ipnD4fQIsaLIeaSw3k4VB6lFtvFl9Z0OEQm5IjPZKMPHjEej/D30zEyA9ffaYLXXPdoLWfrp2TG4SInl6/n+zkZWbscKOEX/emgOP/ItIvzG+/Jgn7KV4rp1pIDiaGeJcnRGJ7Aw4Fm/OlnF2Tkni7AlY/teuU35VWP6Pyk9714MNQncDHpAUch7DVkQiQkG21rQi9hZ0Dg1G2KHgYkPAxODR5cl3vrMZZHRvGcAsBStqK9c3csg7KGhGyTdDT0kyl8MbDgt84wq62Zcbf20gAu4+J08J1D5FAkl5yag48MONw5qPgqYW3zdc8h/e3f/u39v3PhA2/oZ/g2rI0CP3344snBEBrdh2udZv3wRwOazFkatF0LD87VmDp9pCM4yZ6TR6dkGVxl4Th7D4ZkxFmy06VDW9/KqrbaxfZqWyMa0134fK3za7/2a0/0Bou2aKovnZw0J6dwti4bKNnHs32h7gGZPLIThxHm2Q/P6cjfGdoDvtVjTlcA7W9bRW+6qAZ70mws+oNb/pOBMfOD3TWad8op3Mnt0f0p670nl2hOj2s7S9sj3WSP8Xziju5wGl8dZtf1Lby+/BddolPdmsazpWjXB0f0vET/CeeevdLz29thNb/KZnp4bV04PYzabwKmBEcyxvfwDa7Vgy+zwNh7+Wh8oTW606t7Y/xOb5vgdpkLbzJj/9p8UQFcjV52Dle2Cz9448aCw6N7eLN563uAF+T5PgkYn2K+K/jwm4te86I92KUlW7OHvKE0J7jmq5VkrW4fG5MEoR1NfR3FJ8cneF/dgIFb7JF4matdQhJd2UB7IL8RPfyKpI5uyWrpkqwlx3xR8+BTyMgcvOPDuPvVS+Nq65Pv1uklm0YzXF1gwehvT1i7vRD+tck7cbcS/9q7h8zt0q+tVOv7+c9/fo/x2lvgVKy78sKXq9xGTab2hQQ3/Ujo+XHxSWzHZ2988Rpue4oeFWs62LMf4WS3dCeWebDoJYnYbp2rXBK4JHBJ4DVJoJjHH/Oz/KccnC/kQz13eoYRf8VOcY0/1CfX8Ddx5TQOPszV5qP5Xn5WTmAsn168KdaB52/FuOJbcQlNfLlYjRYxuRcwDhEcvvDt1nFYpO5gyzpoledo8+fm9CzD5ytoUxzY9PUPmH6O5aAFL55fOqDyLNQXVcbRKX8jKzmG+XIO9JlXLtjBkXX0o12OFv1oQBe5gtXvsAhObXJ1T/Z05XxADHRvvByrAz54yLL+/uA12I2z5OPASLyjB/edD6SL8gW1eA4nufgKy33xXD9ZpDMxmazgZFPgjffsX47KpnwVRQZrK9mL+Is212ssLx4MUdwWwlJsCsbWw4OEMsVtneAohyAJGKzaprI59DMMdQ8wzauPMYChbIpJ6dqVbes7Yd27GE3JaAmwGl05Jfd+/gL+OcW3XjXn5esApcQzOkpOrbtt9x3YMFCbM8dhLtxkpagZ/hp9mymY+OMQtTkZjowD4jDghqMkvZpe4YC7NU8Y4znK73znO2/+8A//8Al26Tjb5qy+Tj0lI3VvWh/Bx3djyQe+pdn67A1P5Et/9Ooia44aDNvzUODNdbZxF/StnGusfFs32JfqeM0eoid766HPm3gw6IuX6uTu71iwL/34SxeNu+fEk4f7Uy5gnea3XnyqwSbj6LVO/C4+Bw8eKFtr5bXw2skuOSWT6uDDdY6f9+dazV9e0qe59deOV/OCWxroaPWtfa4JFxmSAVkJTuCiNZs3Tz+cC6OvNU75NCf64Iq++k6et197aU6XanSsPPdvIKEvvwjWZW21Ofmu4MLHlsGxFevWz5+xx3x59fKO1tbk6yUcfJS2vWHd5FPbvuYn7V8xhD8Db/14BRt8h6xiCx8gZi0tKzu0eBCXQIHBa4clu+foCM/GXX291H26Tk/qZBrMyiWZRjf87Esslaj08gAtHSpFV/aRLaMze+P3ybO1rAPelY2c9CwP4Qarjb5wkGe6S+/G0ZENbJ0dgakkk/Bkn8Fs3R7c+eExtv3BVqfj837nr67qD2c2qybfDoVKsOlJ4lldm42yVQ8EEmgJaT+rltTj34MK2WZzJeHFr/Iea5O9NdTm0utVLglcErgk8FokIJaLd/ws/yYn5m/5VD6SP3T4oIB1eMCv8rHG+wmZAx7+1xi/7wHfsxDfqS5nyKfz9XxvX3dunBRz4YJHf3kxPNoduvDJcKADnAMmBzr60IcvdYdUcModypPEa3HBc5u18OnQpgMoz0fanuXgNm4t/eaSEdzG4enACH3oMJf8OijqJ2VkE2597uHAWy8J6cCaxrQdmljfsyZe+9tDHQjpI3vwYuDK0xr0hp7yCnqQT+tPx3SCJ2OKtj6yIWt1MZ1e0YrPYqN5cj62oJTDmUP/5TLgN8ZbI1htl/Fdm23CvTnFfZFXUl48GMKjZJ1AGC9hebBOKGp9xks+JawMp0OdkigGkCLUDNkmNM8cxqBtXm1zjZ1KSvbWVVLYI51QXHAZVIkoOpZuBwUStQ6Fmhv+Xa91d80SXXUPUvWda5e849fpN1qWrzaDNbv0+fnZV77ylTv+9MBBcaCcDlgb2Hqcw9vbVwNkmOEnyzaq2piDh/0PYvUbqw13J/bResKFf+d997vfffPBBx/cpyw/2m04dQdfC6cd7cHrSze7fvohGw7mZz/72d3B5RBznNHghNthy2kjrb/1tnMGrdeY/sb0VfSl7+yd3rU9nBtfedVOR+59+QA2WRhbXWq7lrZkm9ySFbrYJbtQK9EQbHPhVPSbn+zCcfJ7zne/MNrbF76zbs3qR+sYs1d9+WK8607wlPqtoZz07Di+GwdPPs2rHQ89yMJJl83TX4HPVwECabJOh2DAhs866UjN/+lbmsLbWidvxuurtu72LV/5quyTH+qKbzjJuT+kbRz8+jb43aO1AycywUP43BtfO911jUti+DH7VqBPBmgw176RLHUwIvkBu1/DJCNfTXXYH7y6eEPGSjXf3x8A7tCJz0BX+yBdoRst8BfvWkP8yjaSoXsxD08e+oOFOxm059Bknn4XmEcXHOgyrzlkja69dq1gdw2JE5myU7HDXAcQp3zwDldzq7NbtaufnKEtHlYe2TR7Qa91gmtvZEvWyNaX9jsRM5ZesslHtrl7oHH+Hw3RXh3+dJJ/dU/fdLy1ZLs3qcZ6aSZxTr9wbzJsDWuzYbgU+Nkh+dM5eUvAHfDRjThPPx4I2JF+OK5ySeCSwCWBT7oE+ErPLR7wxcsf/vCHT4cqfN7b2/MMf+iwwUscOUmHLv0si6/ma/lOhzJ8KZ/sGZavdFhR3Nx8i1/lr+Vqirgot+hQqFiAPnE/mHIAX8qg0Rpo4a+tI7aq4eazxR54HRSBUcQAMUR8MN6XQ3i1HtzwdMDTQRheFAc7ZOK5x9c21gcPbz81kyNZzxrwy7XU5sFDRmQldqEXL9Yntw6LwHTYJWaRn+f44lr5mjlo7r4/KO0eLvOKtXRoHTiSMZ42Jt+ZnAKuvEANH5qL63Sgnz3pV8oR1OD0m6e99JSDbF9Lgy0na41fpOx13L14MGRTMeaM1GfOhMp4bBaGY/MRpJrRVJfU1LfCNkboakZiszCYLhuU0lz6KEAbDoXBbJ0q6nef0ZxwGQUFaz8HR/k7/tyarR3eEtzmVtdfbeNyDJxQf1kf7PKwmwS9ZOVTyc985jP3BJHcbG4PLzYznSjmkW8Fzvqq20jV/m19X6ScG2PnNLb8wfFoY0U/RxJv4YqmE7e/d/TVr371ySnER7jcc27f//7333zuc5+72wab64sgjlEf23Ri/Ru/8Rv35Hll0docZo4kewjOvbL6eELyTAOs68QFfOXlPjkuquRYvbJJvulL3XiyUVeiJfrXPntj4gFOCUbtgDBn3tius/DxcNIBJprgtLZ/We1BPWfc/g7H0tAa9d2JvJV4gk+pfjf8dH/2R8OjfnPpK9mlJ/fBn+t27+2/n/yA24ez6I438rRfs7VsdnlKhsmFj21+cPF51vEXXcZ9CUfe7GRtnP9ZfO7Rc/onPJkXTu0TpnnZ+8oObD9T61CDHPCXfNBBhj1Il1TZz65gsyXw1rIunMkx/O2b+Is3ccyhM5/boYwYA1ewyd+4/YHWDlfoQukNEfrB89+SmQ5T0BxdaFGyI7HToROaoz94c8ggO0yOqxswfL06+WijSZ2e0I3+vj7Fw9KVTK2BNj5g/+h29C/OOyO3gieJJj4U89vPp81nK2t7a0/mO0C0f4IxB87Wav+p4W9+MHfAKcmt2tCuD3/yIze2p68ClqzITEy2f9jw+tr2Z7bsXpv+qrVdxnzxZS3xGR5JtIcCa+CDbblHsweA9CimdwjkIUAbLFxwojN70Qff+qBflMx1d0ngksAlgU+GBPhk8bo/hsz3OeRwzx97Ju0wqJe+xQo+kO8Vm33EkL/2DKu/HIGfra3ubwvp51/1WdNVPBH7rF2slq/Ay4+3rnnitJxADFbrA9thCX8vzxALwJrv2U2e0sGPdcEb61Co2jz9DlPg16/0Myh4rIEXeNyLJ+I8eemzLj46CFI7mEJzh0VkjB59xsU3MYpc8dRPx8iEnMVyYw6gzNFnTX0dHK380fzR7RDLWHnEWigei/3p13gy91wnhivlBmynGGoOupMjORjTn748I8oJyBIN7uFAv1JeU20d9kie4Jau+4RXUl48GCIcf3mdAAiYMPw9Gsr1cxIGqk04hHteJbAlURTtIuQMngOglE3YS7DCBw94/SlolbbtFF9fitt+Y2322ulz4YIJR0mme+3F0d9qkGiTl6sHF4lbl4cThxeS9zW2ElR4e3BoThvRRjDO6Xmr2JvE5HrW4P2hZn/gujH60t5a24bHD5rcq9MT2VjXG2WbwsUuOkha2G3vOuYr6b/1uzfG3jpEcn/iBZsObGDr45FjNGZjc8AKGWu3brJevP5uSvjSp/HkUPuOcPrhyiYaewR7zkkHyVVNjvYSfM/pqP7WbH796uQJX87S+vG3NunLuG984xtPfIb3OZ7SQzIUEPpCo71q/WhY/tDgAUsQErgK1AUCtFdaZ+9rg3uOPjDGurp/QvxMY/netrXILX6N1a4OZT8NWRktH75mcv/29qarB0a+1EVeSvD9hzXrp6/kGg1Ll3krk+U/GYCHa20kHPHpIVhSkM+qjj7wyR8fAm30LZ0rL/3m8WWLN1qSnzn5Sf7eA644w+ezEePZlTnmg+uwpwd98oQneaCXbtint2nFEv5X0tQny6uDfHhxCyzfCx5+9Ky8wfXVawf9HcKENz2gu0MHPErM+PfkkxzBx3e6A1NMMNdaXhLgoYQNv1145c+TU7HV+tGFD21r6EcL+5T84hluegCTnbFlMPCSa3qtnY3AvbTjJ72DUYwr2Yn7LjD6t7QPVl9r/9tuH0RDNkJ2DuiSdbrCu3iM9w5hxDgFLfkqa2vzZ2Lh+jJtb0XF5BJucyWxcNITnYjb/CD8/DCYfpZtX0n0fRHsIAke+1KMsx5cdINe+hffPFAYV8PJTq5ySeCSwCWBT6oE+Gu+m7/jl/lpPlTc49vd9zEBn+fLIn6V/+xrF88RxTF+m//0IrgcRb0xBg4+tAP7fH0xTc3P8rkOMuBHh3zIPG0HLuaLBWD4e3x4PgGjv4Mihylw9rWNMTyho596dTiE1g6FOtjpcEgcsI5nJ31q+Qpa8W998aZ/RQ83XOIRecDnWUq9h0LG3ItnYOnCWugmG/z3pav4372YVbyEFw595uDXhR9j6JQHNUa3xtEM5qPboZHnjPQEj340kLca7+IhuTUfDDyKNeBjJ/I7v84BBxebgodO0Iw/tkdX4Yhec9Dh2Zs8+xMG+l9jefFgiFBSCiUQIEGUfKgJVA2WkBmJ2gahcPdgUlTGobbRJe3GwYNtQ2ZcwetfhVOGe1dllVQ7mO4XxtjCeZBAu69yPKyXWILRVte2pj6FjDimDk0YF35sdIbEKdXeAyG4bDiw1jXfhgbbiS5Hp691rdlPz6I/HteYk1sHLe7bgOlDn7aLoVsDL2hJh2hCf8lzazgJ7ouT1em2l57+GG4yDT+8nBLng88OqKIlWaLDwwr6jLE5dHF+bWz8dcFFdgp4pbXhNI+9JVd1eHsI89VF89eOtBfv/eZWWqf7rZuTfIwtTnPTRzyks2y/wzhzT5knaw8n7Lj1oit5drhnfva7sMFHGzhtXxLRkYdFvkDbf4YTcLIrcMkArWA4YPDrBxZ3+Hfu0lN7+1bO+ulpf06Gr2R26iI9P9KNMfiSS/fJe2nQF2zrLZwxl8PLfoLVA6maDQavTj/oXjht+zOY6miLpu6XZnTFb/iDJ3d9vtpw0Mfmz3XjKxzu20MLa242iz5jweHTRf/ggk126ACv32GPvdwhhTnRCb8SDnU+hP9ka2DQao5ijehEw851H85kEh3iEv+RH1TTQXAL3xy44xV8JX1YSyzgx/CH5mhSJ7P2ZbQnl77yAxtd2vnydGTdZB9N1ebBqwbfvoUbXWQYD0sPWCVbeXtLhsWtZAt/MolvsYFvMDe7USvpp7HFow1XctPOttgDGpVzvdZVmwtPciJvl59Drl3SQXHNPtXOH6m7lgZ9fKwDpuiig/ybms8rQeYvJZJkZO2PbsluP1WXWEv+ff3qy9aSZL6TrM1BE17YpCJhdy+2ZTfo64vhdAnmKpcELglcEvikSYDv4sf41Q4i+jsxDjXEng4CPDvwfZ4b+E/+lv8UA/hPcU3NH/OhfHd+m9906YfDYYUx65tvjf68RXPAeT5To8nzFT8sr+7QxwFNNKK/jx+MW8/BQwcT8IoL4gBeOhxyyOVevIFLfR4OWRvtDm/wb9za7j0rWU/bMzb6+mKoAxVzfSlFbmIUGjrIwZ8+8dZhUgdE6LVGz0/9DMwhizE6Q7v10ZPc2KD1wYmR8JUHFHPJ3FdHrQvGfP0uJZ3pp+viORr1kbVn8fKDxo3hCW3yAl+fgSnGl2PT/cb91rW2Nnz0zT537JO2x34ZvS8eDBECAyZISmKICsOSVJVwEjplg1MbI2hXbTAZALwu8xmQJBxOm+JMwswp6WosJWUwazTLcEZUDVfKtXaJo0S3hDtcGVS0hpfhmFtSKbkr8dTHYNxLjN17GFgjCi8cLhuxOQzahmPcZIHe5ASWDsjCQzqa0doGyMiTjXm9VczQ1fSwMgYf7F/8xV+8+fKXv3ynW+JbkqlOhs2Hq7XUu0Z6XhgJMUcBF5n0WSbd4zc+4MI7OGM5EOtyqOl8ddt6ySBbM7/1tMmWTXJ21lB6EMoWyNmh4KNy2lsw6yAWZu0yes2JztVZuJavk8cO45bf5BYsx5eN4YktCo4cYfrc+UsjGrqPDwd6HKp9qmZ/2RE5L93aAgXH3lcWay8nP8/xeuJ8WuRoxPsj+ZOBYr9qx8/izmbRSO/B7TKnXcMTXPpTZ3PJZ+UKd2tsHZ704eFUYASTDvM37SW0Ld/Le2P19XOyeEje7uNDX2uhs/X0ueIR7g4GTtqScfjB8n/2fH+EGm57jXy0gzW3NYPJF7PfbAy94OBDv+SEbUsO2sNwKwV5a4TboRMfADY+8aEkWwfu5kqQ1i80J1mgJbktzRKtvkayV1Ye/ZcxuPpaiJ/ddaIj/aFH/O0ndsG2ptqVPk9dNo733p6SFx6jDV3o4B/415UN2WWb5GROesgGsmGwwcABFt5kRo/5AnOimY1qo6nx1jTXP3XooMW4dZK/8Ur8ZNdoWJ/+6U9/+mnfwtEYmaIhW4jearRYV93VWvGkRj/7Q6uanOnN4Zh9XRxis/723XvvvXfnxfrm86/k5WLbvsxmQ8Yl/WCLXeDh8eDQwZHYmt3jPb08CehqXBK4JHBJ4BMgAT6v3L9fR3he4N/49/fff/9+6CHeillebPGF5jkgEOvk+PljByv5ZwcT+fHiTS/0+VCF7+xLnGIg/OD4fj5ecc9Ho43Ptr7DJLRp8//GPeP16wX4xH8wfL010SxGm+N5x/OQeOwlgDX6GgoP5spPxC9ryff17ZdDDoLglYtYt8MhONFubTKCx7pk5cAMndrkpe2gDB6w4llfAPUFLDg4i93Wxa9xfS7PD+jQ34uP4rvnmV72kyfawBRfgysfqgar7QLj7IDOxL3WvSvoXQGH/nJPclaK8Y/yjqXBuFLO02Ej2yzvaa3XUr94MMRgjerCigAAIABJREFUJfiSDwK3CRkgQ+1nD5vcEArhdyVwAqUMAm2zMkCGAKd+RfJUsrWJmLFwGH9kKBRZ2eROX0o2r4dlmxJvJYgZWjW+GBq6o70Ha/T2FQ3ZGC+pYyy97Ss5gyvDLfFe/JLfDz744E5LeHJW8HcS3mHRhx9+eH9AgiP5N9eGTsYcDAe78gq+degTPFjJ6B//8R/f4eFLbluvPJ3M99XQKWc48NxmIiPyQ7faesk6uGp/rPqb3/zm3aGwD2WdeRuV0+GUcyDV2QH8P/jBD9780R/90f2TRLrJjvDH4ekjC8n7zktmT53TyNai/7l54cgOFtfi3/bKOn627xEu4/HVW4dslD1ot7eS4+6J+EFfPJGtPcqhkvfuR/f8QTjMA+9ASABIP+da4HfMvOWt++S0dNWnhjeZqePfWI66Pm/nK2Adqq6dJE9jDgTZdDQtXHJpvWDwY/+UeCyPwVjDvoevfb8yDeeOJ7v2+PIf3rPPfbhqt6fgyXfqU6wR3B7gROM5x3xz96dn0ac/vST75gcTXrLSBqf4wk2fxKKvhuxPdhsc3NZ39eUNW+ffeoBeGZND8oQjHeTPwbrozhhazNGOzh6wg0Vj8lXHV28W4V7/Hvzy6CGfHwcnscXjzsMfWuL7pFssaI551sBnMrXW0kU+0Z8trI2kT4ltfCcjsskOk08yaw01XW1JT2JXtMEVT9kJOlxK9tM6+ozZv3zL6gWuR3PAVMIbnr1/AnrXEH/7OrS9pUbn1tp4S+bxWZ98hs9ce8QznfV5ejoDQ48lrF668dWKdfSLzZJ3fhX91tUnLxOz6KIDvd4Ow+thQHztoO/k97q/JHBJ4JLAx1kCfDkfJh+Qu/Jv/k4l/ylP4BPz1fKHfhkhz+cn+UDPD2DgcKghTssd+M313f19oWKdNfhdPpd/hacXyebpRxv/LRfii9HjeQId8IPxjAiPAxQx0rMKH9+LcPTIBzpoEoPFNuuLG2KAHKGvXHoRhr++NkUf/HIhhzh9OSVG6HNIQxYdQIkf+ooX1nNAY5zswIpX+svn0YEGMOIbGfQVkTa54dshD9rM1Y8f9OtL3v2xcDyj5cwv6EupX5ssXY/iuz5j5RTiH/sgPzIKH5iN5fB231rlIO2LxvWXf9A5GZMHvb/W8uLBUG9lGQPjIVyJpgMEglfaZCWMBFgiCiZlEjIDKXlSUyLjA2fjMSz3rpIaG8qGsAH7uyYpNaX/snqVhx4bD141nhQ0RLf1ciCUD+5Tty9o9DMMfR0IGVt+4Wljb7+2scbdNxct+IfXuhwZXp2EckpkXpKKXnoBhwZOE3xyA9tmJRfOyt8ysEnMsY7NCj95d0BDBrsBdxO19vZpJ3dz25jJM1z41VaTYffpJLhde2Ee0WEt9uGUG+/K0hJuNX13MIJXzig7JKvvfOc7b/7sz/7sPiVb3fm14V/eFuaUW/dgmlN7x8Kx87WT7SOe2l/tC/rHhyBgn2rTsQcdMItrZfmIL33erPtZlmDbH5kNh/HFl87RhO6z1vec7eg/8YU7GUTj1slzZaOvA6BHMmu+sX7G8RxucnsJx85ziKTEYzydOiTLfq7V3swXmJ9eyK/+6vqi6aQtmUVX8nGfTtILnK2l1o8evsieWH/FNxXg8eUC37X+LFqTQ7QsL8E3P9qiw30+MX/GR3XQGA0OCdi1K3q7F1PwE+/W97m3NQRz/vJ8aA9v/EUzGuylfHR6SF9wohl+/njlUTt7SGerX3j3ao66Ah7NHeJXm1f8SkfRY44xPp6/Wznh7eTDgbjYbixeowsteMDn6hYOvlzC6CJ39rO299Hta7G385MzuMAkEzjQbN2SRnzjJ16SdTZTbU540Ko/2PQZLVsvfck43RjT7j58C7f6BOc+2tXe8HoYyMbIns4k7uJOh0Zga7NxSbr18S5WkZU5km3j+A3GwwQ50i3bMK7AKb6TB7rglz+UrzziPd6u+pLAJYFLAh8nCRQH+Dcvn+VQPkboz2jwd3xb/pNf5PP4TXC+khG/wcBlnjgtVphbnHDvcMW95wnPmfLn/Cgccuz8p7zAc5ODEXPkyD6UKCYZM7cDmXxxh0T8ePkN3GgrTy/X4cfxVbwTX6OfTxcfjInxDkHcwwGXw5u+eBYfrE8W8p8OmdyjT3wRl/AvXjsQ8rwpvvT3h/qpmQMtdPR39HqOAodndHUABJc18JCc9z+PlQc0pn5UNm8yTm/poZhvrj732mDogt7Rpo+cyV8/nPFnDF/6zFscxspD9JtrHfGcnWm/5pj64sEQZdgknZa638Q1ZeojWEJTE7x2ikrI4G0aYyVBjK0E1njJmQMNRtrFyHdDL+wmc/rX0M5Er82mRndJdklcybfNqU/inGGoGQMjyVC6z1DCa7wN3MbtYKlP9nqQRwMZS6S1rb2bIsOPdpuas+KQ4CQ/BZy14gGcA5R/+qd/evO1r33tjt86NkpyTkfpbQ1+YbTTY3Ubpw2yek4Oag7Hun/zN3/z5oPbl1Hp7t64lfTVGu7NCy46tt61zn7zzOdoycP6Ds44MLxXWs99fMeLsfq1l55g1KcMglMH97Tgu3XO9fy04NGBxa7DntjGP//zP9+dHHkKMByyC1/Zh0MIX609KvG8az7iOXmEM5jsJfk4SLGewMURnzynG/PWblZn4arOHpb/RzJM9sbCd67/nAzSQfVJw6N5Z58vjByilRSsb1y+zTOGrxKO5GEse1f3N4nyqc1LJiubaIYjWdRHDtrnf36izwIkXNrWpMN8Fhr55fzc8t1PxBqrjl5rRqMx6/VbfDCu9oY63hcfmO4XBh3u0ScR5NvsAf5PH1+YDqKDX+Mr+UR+QHsPS6wPFt7q6E4eS0vJCdgOVcyNN3Ts/GQR7cH1tdDO6yEe7mIdevXjTdKH9uhXw59NmSeuwi3pMy94MK4S7uxueeZXvDFEU/Dwo6WCZ4lfPOK9Cwx8SuPLn356YJvwuuQKLrEw2Wa74XIPn/Ho2jXDV2KqNv6c/1g678TeClhX83bt4PW5spl4QFfx0z5im+RsD1U7QMNj9/0NCbjh4cPZJ1t2mUsX9o6HCfMl/Po9vHT4RC5yJDmDy1x82BtsR5954kPyeMfyVV0SuCRwSeBjKQHPLvKrL37xi3ffL9/1X4j5Mr6PP/MVEH8sLooL+Wc5Dz/rkIg/91JczReKDfn54qd5+uQ28IPNp/O34RXL4OWX+fjyA338rbgovslJvGyBvy+O+GX+X+Hv0QbWC13Puj2TGefr0Sl+e2YpzqBFAQtXhzb4Jwt06HMvblijuQ5q0C2PEBcU8UYcsj54ODsswh+cZOfZiTzco9c67vvZHrqsY216Mwf9yY0seuYFl2zRUMzXNh/vqxd6RRfZw2dtckln5OrQUKE3+NVsBCwd6Stug+tjFHgcWJEDWbEBMiKPSrYCD7mhDy2bXzwBv6LGiwdDBPtSoTSlDULJlJpREKoxxmhjw+cyzphKljIUMBlTimxMbR4lw2G8Emx91qy9cMGXrJYAo9kmrdZmNBwMXF2MQbt6+/XVbz7ZwMeYGC/eJOyMkoNKZtHw9nZoszjONc97h0LRCR/8ak6BfGwmCaVCDxJ68Kd8d9No//mf//mbP/3TP31KIleOZ9uG9+Y4eeK3wy/OQbIbXfTH6eADPa7VTXTos1mNK8EsnfHQG4BkndNzUo1X/HOsj3iGz+XEvU0eXdmJ/goc0bz1Ocd986rrC7b1shdrbF9w2UgPQ8m3A0H6BqusfBzQhM9YMPGiXtkvTLRm540tvvAk10fzVy7JWn2uvbpZvEtzste3dJ/34X7Eb7h3zur0pCv45+roIJf0qC/7Zu/pHw7+pD0cDems++i2n4ydez6cK3e4zUu20btyisZ0CN7l3rzakgPBPp/E7qwZLnW42GCw4KN1aZRo+Rsr5gSTrMCHT218cWb7+o0nK35M7OgrHXtCIubh2F4vSWmOv6HT/oFTO/7iRT+6kwU5SBCXr/S6MiO3+O6gKv7S4eph4Y27xARv3/jKDg52v8CHJ3sdr+iXyEi0zOHr+Pvm4INfjk+yEYNa76TLvGDjxTou68KXXQbHzyrmGi/ZzhbTq3niWnEef+kRjC/ujK0Ntla41P0dInNOeYL3UzA0VBYfftkMuMUN9sQVfnXwDn4dxER3+wGMdnaTHOor1yFDfLvwSl8eIugVTIc3km594ma808cmqdbk2+0TbbHVPmAH8Wx9OCXG6VBOAEa/hwoyucolgUsClwQ+jhLg8/hchwwOC/hvffxmMZQP6yscbfmV+CQHEAv4ST7QPF+58LX8b8+f+W0+0bMCXIp7vlRR51cd8PCh1oE3OuA3R219eUgH+GDMR4sxOYK5HfzrUxxKOLgxvjl9hxvw46uYJKbKvczn48GJCf2MjTzEDjSLK2gALw7007PimfzA4RBZ65OPdOADVvyCF19g+ymYOCYHkd/hXXyTlxQf1dYmM4dJGyvhgC/ZmiuewYNH98VqvCl0Vx9c2vKQ4n6xG0595XTmpmtt84rvavCeyc0h5/rAlcu1Ft2gLblHz53AV1ZePBg6/wgv4W1S8ZOf/ORJHAy0ksIJNuGVSHW/QoXTSZ1CoTsPLtcaFrgMQb0FrFJ9GoIxa1jTlZJ76PZQUGJsHC3nBacLnuZzGDYVY2VAHTTZsAy+AxK4dm2blUFKbvtMMv4zzOjN8cDlD0V/6Utfus/l1Gw0jnO/BmpD9BYRzckzmarbKGjh1JTkthuutvH48B/cyAzfaryjQb1rNTfZpbfVY20OR0IOn3LCtr6kmCxc5sQTW8pBGQvH0lN7E/Hkbj1tJTl0byzbhaP+YOMvXHufbRvrap77xtVrozn6amOri2haWe768X9n6F0JNvqiI971L42NJw+8V1pr12xu8Luudna362qn6yfk0zjHVhcLf8K1xok/2tLnSevOC3+4g+Uj/d2idAdu5QYer+xMgmJM3/qz7pNpuOz3xtStXXvrR7QHn21lr/Bn/8aU1swnWjt7Q6vSes/Zp/7g0m94rZPfWzvJzvrpYritX7v14QSvP/pOesWo+A4WvKSGL1i+oi386HKYRU/Gwh3d+tLRKdtg4F/aVx79MWmfdfOPJx+SsOwD7clmZRJ+MSr60LIJUrQvTeCbGw/5SPAl2ZJT9IEX09TpPZsteXVfor1yAR/N8dCeSG5qP1t1cNdYdmos+zFfqb7fvCvJyNeW/e0wOJINXQbTfOOKe+ss/sZmiSfelya0+fr193//9+/7mOyTTQdC+lwSSYm2JLs3lfCLUWweLuuKn+KZFy29WNLnxYXEOh2owdMT3OVk7NsDhnuxUO3FiPyAPOi0tfq6dPm82pcELglcEvj/LQEx0At5ftBhibyJP/TnMPhqvo2P5f/6uzeefbQVPq64BF7MM0cfP1y8Eu86LHe4YcxcBx5gtV1weK7zDMWHKuKqZyX+thfL8IF1kMVv8/dw8fPwiIf5YeMOUbwQ4qPRyKfjQR+e+Whz4HQ4AgcfjwYyclAiLngJxNeLHXDKMeQv4PHnGcrBjrWtq5/s0K6IJ9bbr4v6kshhEbrwirbwOdhBM/7wvfL66PbVFH2ITeYlR+tYwwGX9fS3fvV/Jh6Th7noostkjD/yQgtdee4Gt/G5mG+9aLbmrt+9PvAueK3HDujbWq7XXF48GNqDH0LwL1UdBDxXSkg3mcwwCNgGV0rAKDYjZ9gdbqSQkrFHCta3Sux+FW0t913wWptRS3ptlC4n1PrRDsZVcqvNGfSFjmSrK8flPpwSZ47NHPiap98F1qaRKGorNhRjLtlHF3wuczgMxpzDcAr99nZyDD8c8RjPd6S3klzaFPGCV2ugjYPhaDw4telWB8ltZZJsWp8zT++ri9WRdrpvw+Uw0ZHTx+OPfvSjp1P8dAYGrxyWUuJLdw7FspNsrs3v3wF//vOfv6+dTmxyTgq+v/zLv3zz9a9//e4Eou+d+O6VvvgAo7hHewVMY9rdL85kGtwpV3MKBtkBngskBbLwPy3+jp7uo21xwePag8/oMW9p6g++Nh6ehV97qz+64iMZLKy16Gdp1V47OcfgX/lrR++jeU/I3zVO2O7PddzvWHRv3znufn+25/6E92UEP0en6yM3oBprPcFWsmIOXPrPPVWfcXjIeuWKF33g+u9k6UVfOk0G7MKa/E3BtIdc98tTez9+4kmCEg/ppTXBwtF97eDRAQ86+uOB5risDy5+wsH32vfBqVcO1ugLo74AMmcPiPAY3mhor5QQ8mutgY70Br9+SSD8HbwEmzzoJX6ttTIJFu/6FbDJW5184Rc30NNa6mzD3HQLrwQwXott+M1OokPcKaGV0ElA+VhJFxsMHm5JIfnojwZ8kkl2At6e8LOq+I1/MPHY2Mqk9h3oHT/WNf+R30hOj346mxzhrL31ynvbu3Y0PNp/2QL+xU84xCQyFlfEbfap7+///u/vMYjMkp857ttPElrwHcjBJ8mXG4mNdApeP50p+DHuoi/rwiO3QEfyLn/wkGGPsQvrgEVP/Mf7VV8SuCRwSeB/WgL+wLT808/G5AF8rFjDt3UI4F68zbcVn7vnM/k/cEoxwr2cX7zONxeb+eBgixfipzU9jxTL4fK8xH964Z9P5mv5eXPf3p7L0Mcno008FbeNF5cd8PQVj3yP34cXjLn8tfzDOg5qjIvNaOHDPROoPWf6MgdPwVhfLkd2ZCh2eEYiF0U/3swrbqPBvWIdazvI6SsetHU4BBd5uMhPnJcvlDtY27MK3NbSr90BXTQYU8wnZ7Jz0RO+0VQpbu+99cHSXXmc+BhtxbToUMNjHFzrh3Pvsxlj2UN5WHQuzBOhr6Tx4sHQr8ojwRE4gdkAkpSSOX0Eql+bkiiIcdvE+mwiitbepIvSwaZgeNqoKfNM3E6FptxosJY3diXZ6hK0NrvEqeS3hI5DkZT1wF4S5x4OGxaMDWzO3peoGzcvA2s9/fCoJWw2I/m43AevVtCbXFdXZJI8jHMy3/rWt+5fGKHLpvOAxElqu8C7ODHFvMVtTeslP2048MGR51it/dwFnzetX/jCF54OyDi9PrFHW/bCqcKtD+8dGrEPzrOSU4z+7KqaLDk4PHJ2HBZnp39tJl7je+WZPeJLW8nuFj4c2dqJM9j6kzN4tpIt4hu9dOGw1D16WzM7bZ3whF+/vQdfDyjeJLRGvGVH8bQ8G1v8y1P6Db6HwOg6ZZS8dl68rCzjY8caP2lcXSzd2945J85d98R9rvlorUf4Vr9Lh4MZX0YIqAVj42Rqv1fWhukbPuuw5R7ok2Hrq/mPaAz+5MFa+Qt2oKyerM1W2Jy9Zz1402n4zeknYh4w+6TZvgSPVsV6cK6PCl+0GI8GdTZnTrDJdOkF1wFSvlPdHLTmR9StF97g7A/F/lLAiivkoE9CA6YLj8kBLF4lZg5V4OQP+ab2MVzoVyvmRgsd8H3ig7nxrk63KxMwfZG6MWn1c1/kVlrDHLTQKT+qjW7zreEqiVqe+WOwbEDZWAvn21viyaemc/hKulany1N2oAbT+vBFk7qx5BBceLvPLtJH/e9E8OS74EmOxk57avw5GHOWhvRePkIG7MT86BdXlWQIhiy7muPlDptpDfPZjz4wyUJ+JGYpZO3BwZtob9T72TBY89T2ZF8nsWN96PXwwFbFU30KWukOrdnSfeAqlwQuCVwS+B+UgOcjX5HyRz5KEGf4VgcefBdflt/lSzuM90UMPwaW35RvgeMb+TmxSpzxdbfnCLm1NRyUqPO1+Vv+vpdO4qIYYX7PY+YX24y791yBBjzwv8X0vsTVJw57FuO78dSLJ3T39ZB58gJxoH6x2zqeA/GMPjDyNPPQL/6gr0Olck1yk3OClzOB7VDNfYdBeEe7+CDWWKfDIYc+7uEgU88k8lM66EWDF0ZkAK74Vdzc+EkXDoHkvcneOmCUjcdkoF/Ms5bauDaZkC09o13sMqbA7+ANPNxoIiuydUC3sTqdr+43l9j8wTxlc4V7xyss/6UHQ+RD+K7/bCnJpWDKtRlsAgbcxRByCD14GEuBJXtbZyQpkVNYQ0VjyZA2Ojrs0e7hXBucQyT0aZfMN9ahkDocnAU48zrs0dccPLpsQrwYY+zm2NT6O1xKRtaWDJIHePQYi28yXxrhsqk5IQ7AHLitC3cOD140dhLus00P+6c8k5OaU4HHhtr1yfjcQGjKLqyFDvPxIkldp5DDVXM+HBseOo2HBz7ysbYTcUU7Wkpwe+h272CkwxXrrUPgrKI5R7D2C15/dBoD3/3OtVb3td2vPeKBblxvb8GPDskk+8lmyLe1tvYFCn6SVbbLZsmUPfmp4Z/8yZ/c5Z7NBR99i/Pkz31yTCbVyzsZ+E9RnK892j5duwi+/de6YJSVa21186qT4X3Su3nLS/3qndv9Ob73O+eEP9c49X7OdX/Kkyz5LofK69vyY+bA6yeU+tiFIJy9q81PruGPFmMr+2hIxr6cOeWcjMH6Tx7sxNq7X60LL9hsQpvtkgubNd5Db3yap9jnEjBJhwTOGsbMTa611a2hrg1PMPrw4q2UtcK39WlX4bWu+WjhP0pkiinJAU+SOL5JMmRv8luu5K+2ptq+c5GBueCMJbdkQW7gyC58auPpGX27d9wbk2zxB9pwd7nPLuJPwgSHBLR1zD+/AgKDpj4NX7zJM9mowZ7rw4/PdEXW4Q3WeDaVzaldfJ03hvDHR/ONa7uSS31345pi3JhSvTaTDZx94Nf+Qrm4jLfu0qkPb3KW+Gsf7p4of2mPtP/5TFdzyVxS6wEjveuTyPMb4JKv8WwZXv3sSiJMXmSPPg8O/Ai7kxSjlV0bV9DSW2p5QjlIcrjqSwKXBC4J/HdLQJ7/la985e7H5LDirvjFd/F98nwxm3906N3hCbr4OyWfvW19Ygw/x+f1k1r9chPlo9uvNZR87P5yRT/fya+Kv+UCDpg8U4n3aA63Zwl4wMovHEiYax5++HtzzdHHJ/Pv4qh+a/PPfWHEp/PhchZ+usMfsRxePhtsP1kzfv4B6T0cIisw6IKDDPo7kOKO3IE8yJqc5Q0dFokd8iL5HFjxyAXOcyWZ4l888qyF12KruWKitdFgDvjGyVlbEdfgqC+Y8BnXR5fWF7Osr0/8NRdML//ZDPnSI97AlmugwYWmM7Y31nrRURyO3juhr6y8eDC0Gy2+t++/QjAUZtMyGvgozymkWn9JlfEcgGRJP1ooTTFX29crJZPqlF4iFB9gS5pt7k3We2hnWMbgNL8Ezjw0bK3dgz3abWQwcMDHeF2cgc3ByPHJgURHc/SZBzbnYWOBhyN+/axvD3o4CU6FY22D6EM3p8MJoA0OcGrOxSblkKxFXhL1t7cH03hCi8ta5qNPnXwdVHRw1qbJaWxN5u7Bkon7R5sODKfFSSUL62uTXclt67fpo9Pc1jXWBq/OjrJltb/x1GFYNpWcjbOv5teuXh4e8cN2ekjCN9m6R1t2p2YvaEB7tGWv1db0IMdh47cgQib06B4egTObXTnHUzV8eyUbfejk6NPbicchhnU5XrWfmgoybG9lBZey8j752vHltbmN7/3Oea4fzI4tLS/NidaTdveu5aX7xqqXR4cZ+9OaZKnOjsGDo1/92Q07ca9eetBgLt/oquT/gk2nj+QVTPSwPfuuz6A7jGhuD776zeXz+Gk1eqI1nsCzbWPsMv9gvUo08AuSkJUNePfBmKOdfNAqQUhWwVs/PYDVz7/yeehwtT92r/kaiizxH0x+EE59ycI8/OUPyQFs/sj6/KS6pAMsefBj6qU72US3Gj1ixfJYkooW+OHgC7KFEjP496Aqfk5bIR9jEsj94klfsGjBLx3Z49GdLPFsXXymq4XRn03A06XP3Gw2GLU5CvrSpz735kdT+us++WUr7rOp2tG49c67L3wrcC9t6fE5esmLfCTQ5S98I7nqp/9iGt28ffdiAAxZSurRKhYHq/ZzZ2/Ti78emNAr1pEfe00uaDaf/MGIC3BI6LNR9MNV3AETLHz2iTlwXuWSwCWBSwL/XRIQz/yKwDPKe++9d/eD/I7DB/7KAQvfJH8So/jYvlxxqMIHipE9RzqwEYs6jBBX+bR8OT76qRX/K5e2Xs+BfJ85+vu1Bj+p8Ilitxpd1vC85WdW1vCMxfd67kKTOMCnw63tyyFweHb4Iq4b6yAMPgdB5vPfHeJYA7x+fX31o09sMd4XNnD194r6SlTNx5uvwIEusnOQJa6JIXiSU/nbRWQgtzeXvJIz2slXDKGLfmIW3mJ3PxEjdzzTD5zu8UmmxfvyB7qna/jLKeGlkw5/GleHw3OPNhmXA1qvnyN2qIY/bTrag7h9bi6fKMe4C+xWPAttTmL911pePBj6zGc+8x/43r4f//jH/2H8V+2gRAZQokXYNh2D74GnTdIGolwbdpPN5oNRKLUr48sxpHhGyCC7cgwSJpvDuAc14xmbvhzNOdc8p7foJ5v9yZmHEnQbk8Tb9OaX6HEGjB8NORGO0RxGzHGZY30bgHw8mJsHpw1h49vIDH9pdOjDqMkx3mxy8+Amc3iti/4OYdx7MLFedCbT9Gwu2qyXjOorGUe3Mc7Ietq7qXY+WLTh4dvf/vabz372s3fH1Cl+js18eDhWODlDzqSHqBJ4tMMPfmlvfodPnBhdR0v8maMvm4rubKj7lUM40hfZavvENb0kq5VFOHbt2qt/dsORdVC3Mgm+z2fTQXXrJpN4My+Z2C8OqPz8qb6d7wshdkZPZL5BIjzx0rzoOnlLjue4+1PGp/5aq35rpo/GTry7Xm31zmvO2ed+5+96j9bdPjJdmdNDl7FwRYuxT91+piS4udi1YLo0o9OXPvp6YG6NaFWn4+RUvfTRN3/Lbqxtz/V2ztrwm4dWa0qAJARqcF1oVVoDLNtfutxvgbOCpre3ZK0HZPfoQVv0xhOc+WM4Wwd8ONGxsga/F9s9Grx5AAAgAElEQVSN1ugG355V8638cIngxhN7xH6URNqTYO2LYtTiL6kQu3pwNzfarbtyip5kkD9Rl1iSO5lbp3jIl4GBT9yxVvy0Vgc+eLemWCXZbHz5t348WwMMvfdHLa2vT1lb623k6lo7OcDpQqf53YNJ360bDc1fW8CDoq7fPbzus5etz77kfkf0roTX7dLm3pj466AMP+gt5miTif0qJuMN/uIUf2198v7o9gaTnIybT49sSLwFz6bsP/10aS78eNPuTa25EntfkoqJbMwa6DTXffkN/HBbA0792mKuN890377Rf5VLApcELgn8d0jAM47YyQ/xk14M8VfFE/GN78ofgXfxi37ZIO/nrxQ+kd+Fz+EQn1qOUBwBxy+Kz2Kog4PWAiMew8eveh7jY91bBx18b88UfGOHRHw0HsTEnrHg7rmsw5qe7YyBQ0NfP3UIBEYfGvnyDoIcZDgA4qO1+XvPf/KNDjbElHiTy4EX2x30eLbzHMXPK/CKP31JChf5u/dcRA8OsshbPug+OZKvZ1AxDh7zxLrimDYcxXrrFZ/VXlL0MqQx8gBPB9Yjb8W9dfFYnKaH8i2yp3t4+3KK3uiDPdAf2q1X/tI9PMbBGbNO9MBXTK/Wp5Qn3W9eYXnxYOi/4uDnPyMziqcgSikRocQSXgbIoF02K+UwEMrdRDClpdCSuRSu3jHrduhhXW2bKOdA+YzGWgyvBLYEy8FKBz02h83gUAUe/QxX4lhSqN84HiRmahuXYev/6U9/+uarX/3qfZ3o6fAGPAeAB7LqEInxo9Fa5ixN5Ka0oWxUvIEBu3RwBtEBHxhfSqFdORPs7dMGl7zwmQzCy8HjQVk5RjP+8Uc/HJf1zPVwDCaHoN4LLk4c7nNTw+XCp0Om999//y5nsByPdvYGDq5wr12VYIPZ/jszt9LcbIT8koXagVDzoknd3PCowcVr7ZzQ4uw/2EWbunZ4wKeXcIQ7WoNdvmqnp/AIIoJED6Pt2Xja9eNp8cbvI7hkS4fNqS9Znbh2jZXnCdfYKeeTxnNe48/pKvoWbzyS886HWx+Z2sPdZ28rw7XDtYVHcgNrrjp/Z87KrmCOnpPHlc3SmO6jub0FxgEqW2iMfQSfTaxszXFlc7WXt5VbMkkOzY/++LXv+3na7rmlAazDbfFEMsb/ufhwtRgChk4UfMaXpI7vABftyTta+Hc4JGRo6Ip2uMIdH+EHI4nlg1aGxavWWL10aJAv2HXSZbQ+8hv1ZR/WIB+JozF+sZ+g4WXlkg3oJ2MyVIsV5rY+uOKUsUrzzVHAuPgy9mR+NhwuvEdrdq3OphtbeGPmKcnurHes9tpZ+KrBLH3W8BbVgWp8xDN7kLuIp+mJzNhfCXaHRv6WhnnGy0ck2V/+8pef4pm16dRBrJiYDunpZz/72b3POLn6I9dvbwereJEce3gox6ALstsXCQ42PYTAiV64xEdtuZA1xOPyC7Rc5ZLAJYFLAv+vEuAjxc/f/d3fvcdAz1CeZ/wSooMWzxH8FT/ET/JrfBX/6SOFXiAVU/lRMHypPrD8It9dLEO3uAvGIQp4/tPzh4MY8/hA8a2YKA/gBzu0QR+8Paeh3fMEnhz2OJRRw2MeWHHB4Ys4YAw8/yymd/gD3ph+Nd+NJgdBcgxwcOzhkDG06zPWV1R4FEuM8+VeGPHvYNEj5lu7jywc9jTHuh36FN/Mh1/+b57DHc/kZIHWYjJ4uMwnW2sVf8nAhQ/wZL1xl27IuBcg9G9coYtipfX1k5d+vPdFEV0agwMuPHtuoQf2od98Ma17fKETnDXFWqWXLWg2jl6Xea+5vHgw9D/FOEHbpJSeUiUxGRGFlOwZB9vGRKNNX9KWA8iYqzfhDL4HBAZF0SVnm1iDKbljZGBsaJdN1CGL2rirQxFJYAc4DLiLUfaGfedxMmCsqZ8BM2obDL/GotE6eMr4raPNCZSYuo8X8z788MP75ulwhBPW5hzV8Yk+hxltgjZuG7R7uM1BC9rQHw9whzenEDzHT3Zo5iDw57JRweTk+xlTzqOxxWdDf/e7370f+ugvkY9Wc212Tuyb3/zmXTcdCMUHG1oeWx+u5Vl/sOocoTreyIP+yHv/TlPzsj33taO79ZaWs30n4FaSSfTVZ21Omn2hJf1HX7T1ddTOX9zRksz7LDSbSBbx0P6CL9q21m4Pt85Zr7yNPaJNfzre+Sfs3p96XLm/tM5JXzjD94iO5GKudmu5Zw/9TGzlmGzAgLf3yDvZG0+Pu2awvuBafGCNkXd0JPvVwfKhDUc2o+5eshONySB6gut+eQaLF/5IAufLCElLfIVra21XMAurP/xk2U+aOsxAS/Tz+9GPN3ueT+VP+R3+NJ9fwphslzcwcFbjJVmQbTSdey267wq4FbiTd7qK3uhIlsabT1+bfGq3h5uPJvDp3N7Gn4QSr+FXr1yjS33yfMomu7EGGpTe6vE1G4+N+VKM3aC1pBhOeNKhsWiGkyzpArx+/Cn68y/GXHBnN4vTPLw0L1lnV8kWbuuLBw7GmnOfeBRz01/0Ro97NLtHV/lGPEdv9x0IWS+8bLKcwdJoI08waHS59+JJIq60Htln02SS3MRfc9Kbl0LWzsasTeb2BPxeIqmTj7W8LUY3PIoanXDQ5VUuCVwSuCTwfyMBfsrfffXTMfk5P9PL40+9+4/OxWWHGw4qjPNnngscZvBDDrXN1Q++n5GBr399c36c/8rf6fP8Um7CV6LJYQN/2DMZeM8tnlMcGHh28Ywj/vKjfKM+z11woFufNhxo6isX+ZA58GmjkR9Hi7jtGczhBHzmoc2zHdgOh/SZo0+9PyvT516MdgCk3U/GPAvB1Rc9ff2DFmN7WESm4oDDJPkbmbqnCzHGeDHRXHDkQS/pj30Uf5O/Gjw9lgeAKzegH88xYlax2Rx4jJEjmfXMix/yNAYGnp6f+9DAPVmKe+gHQz/uxbtiIfkbF0/9ZJEdiH1oJStwr718LA6GCJlxMDqKovAeRkr+1G1wY5REWSViOQa1K2PNOTDWdRDaGVkJMxqs3+EOB7WHMcaccINjHBkiw+rBg4GizT1DBAsfXPo6lMkg4ddn42ozeGMOcGyy3ljDYT2wksOcmXH94Pu5VfSjy8XJcaI+ffefyfBrTodW0cIh6De/hHofJMwrmTQ3eHO8+eRYvP0Nt7nmdJGFtodePxHD88ooOvTnVHpgMS9ZkkFfF5nPUbVWONBADza8jY0u+o4f/Ck5rJxJSa+1wNSfU1Ir2Zi2vp3nYTj4O/CtZMfd17f2vbQ8gjen9X1Gy9GTBZshB864r9bA4iGZ1e4rsB5MwnnSqz/Z4632ygSN0QNewfsefiS/5oE/ZXny+oiW8J9j7WPj/k6UgsfoCvfSuX0r/9ZQB3+ut7Sec8/78OEdnQo5su32V/WuB27lrJ3d7hrmuII1drazU/3BrsyWP/P5qQ5k6b0LzeE3H77sov0djeGPp+bF62lLK9PlNXzV5AJ28RmLxmKHvmSjFgPsE75LorE+KVqi1Rp9BST5WxnEb7qMz/jqsLs59a+84w8NEh805dvQjzd96CFzccu9vV0sWHnHvzWW75WLBK4DIn6APJbf7GLlom1dvGQj6LEOuvnc6Esu4VSTIT//9vb1imQKb2iKRzBw4Q+eXnSQv7WzCTDJojeQ7qMr2lrb/Ep7wb128Qdf6Mqu1NnV2iKc0Wk+Wso3yFD+8dHtSye0oB/dxR/xOHmBdeFV7pINZifwwO++MT4UDjTrV/h5F1u2jvWsL87hCQ3B9/YUP+zYQ4qHGDmGh4beGuMPbeJlNuSBDYxYAi8cYqgYY12Js77o+neJX61LApcELgn8cgnIXR1w8DEOLuRs8g4P5PyRGGOMj3Fwz8fxQXxcsSKfbbViWjHQmDYfzMeZo+iD2wM/X9k4eP60ryR91QO2F9x8HxocMpjn2UcbHD463OJfPQOila8HZy1rwuFFtlhsXQct4qi8wSEQGLS674shMoHDGq1ljnzAIUcHVHs4pM9B0f6dIbSA8YzYYdF+/UMH8OEXrEMbz2HiEVzijHExrK9r0CrGiyViGPrpQRxxifsd/OhP/nBaA3zxdnWpLR6RofXorjjMHuCmAzIAq01XDsrEPW19YhV76oUI/aKnfIMe8IQ/c+hOjATjwpP5xtAA3tpKdN9vXmH52BwMkS1lMiCKkJCUSK2xMRBKkUjlIErg1Gcy5h6eLrhyIvDAwYDU1vRghA5GtCfFEl1GWQLGSCRoakZs46pd8DE28/s34T3Ag3flQBgtAzSOBk7B+sbBcSbVNiR+oiv4jBZsn313cMKQ9ZFpPKEPDmuqOYoS2mTroKdDsODxBtY8TkPdBkTDys56Jb5qa6vB01Pyi2bOlLNwGaNfji35Soj3ayj30QonPC509UDAsSjuS7xrq9GYE8FXusWLYKSsA6jdAYTxDoHuwO8KuGC33v6F33lL18LUjza25ZCPkxdcBKUOFpM1nf3Lv/zL3Z7osbVL5t3Tg1J7+/Tjjb3RAzktbfF14ghfOkje4LXh2/WWR234wLXPq4NrTH//wQtOhRPHn68V0CvZOMvS+4iHEz6YauPaK6tzLBg1egsmOxd96E6m6Fra8GHctbJ3H1y0Wh9MAXnb0XbSsGuFp78r1b5AQ/SpFXjgdC8Is43g1NEd38EuTPjBuJIDmuw7SYkETSJjXvDxlayWzhI4NiCo8/fgNpDzs3wEf7FXMl25wm1tfmuv+AMb7cm/OdVg0QEueG399iX/qS1BkqDYw5JhMSFdFqtKVNHSgXw8oB98a1jfOktHPKwsg4GH70MHn0mWfIskCS0ldfRvz5Fv/rwaH8kv/ehzsZPiHP+eXtBsr6rFSfopaYyfcPW3qvTHr3pLezI6kjUa+E0Xv8ivkQP4LnjaK625a6PPevSJRrDkQmdwkR99alvne9/73v0nEmDM65JIk+/mI5Juftzc9JS88IBniTu8/gA1+YvbcBgrNuKBLh3c9PMDyTm8eKFfdKA9u/OQAa916Uau4Es8STdc4MmMbsyxHrvQV65jffOvckngksAlgV8mAbHds4J/VsKP8K3ijbZnHLGQ//KnNcQ6vsW9Zxo5AV+08U6bn+O/jOVre5bk/xR+T9zlO/mw4oN82qECn2ud4qYvasRDPpLP7udI1uAn+c1iZj8dw4dnlvJ0+Ph2B/P8JRrxiUc40BN+4+IJfwpv/R0s6etnaNbzzOmZqMMhcH1hVB+eyAN/1pSvkSN+jO1hkXgCvxyK3HvuRrt4AJ6sXWjrkAl+ayvmkC0+0SLuFFfhAStWgqtfm443b9AWbzq8gpu+qsmXnNFCn3SIfrI1JnbB72t6coPLfX9riezxZN1iJrnIv+CkCzJmC/RBh3JHMX55uhP0CsvH6mCI8iiJoVAyA2JIFFESS+mUQ0nrIHIG6uara8OnHRxdwgsfJ2VtRsIg1Ppdklr3+0DB+HIU/VzKPPBodtlENpd59TMwBxA2XocycJVUcgqdSMNnPjwSQQZrfrisi374S9LavOD1Z/Rk2m8n4bfe29sb09aOdrUx66Kv9fYPUPcwA3cySzf11Z8ckwmaFXg53D7Z1I+ulQV6bUp8dPiRM0YnOjoI0M8xsAeOOL7JCgz+OTx0JT/OxMUxWwuN8HaIssEEzTkxbQ8WyukgwAS3bbA5tfrd72W9vV9a8coJdnhGfugVYPG2enZPji56Y9vGleUh2tXxwiEu/e279HvSGE/gVhb6l09tczcYtK6xnbvr3wl7V1aenL393Ok/uuEHU+AKz8ln/f6TnuLwKNwnHXsPNlq1o/8deU9Vc8K5NSCHL+hHZ3I1x6VPaaxxtkB2yT/4Fl1573pLY3wvvfW1bnOt57Je7dU1OLbVT7mCy35XTtZzbyycj+CX7sbzJ2t3eIJLrZ9vkNyA7ZKU8PPZRXPiRaLQ4bo5bAmMOckkGvLh0ZIuVgfRIRmLhpUhOtIFuaFPogaWP1razQNP30oytUY02OPt+9aJ3+iK/g6gxB2+lN+Ac23NXPJQwh1eslnds99+5tgawcYnGrJhMK5sINtI38aWx+CTV/D60Qm29q6zMl7cZJZPVPsaJv7V6Rvvrbl8JM/WOuXhXtn8I13UB5822K7yErbgEufJkU+zFh8vlrEPdIvt+n74wx/ev/pFNzngTyx9O19AWS/d0D/8cIqTPYCZDze89g+67CU2HD+ScmPW7+vl4jT81nWPBmuqs5U7kqtcErgkcElgJCAO+akzv8N3yNkdWIhLfBBfw7eIj2JvOZ55fFE5MH/qcILP5NNc+vi5/C688uZiCDKsU3zgq6wBpkOPDqWM8W98mrXR4/Cj5ze+GL14QDM8/LW1PU+gQ0x1OMKn6uej+wLIfGtZg492YAPevGjIN3vWJAd0es7BtxwGPWCsoQ8NDsjA9BLdevDly8GWH/XyAB/8Nx49g/ac7BAFHnjR338mIys4jZM7fH1FlOyLm2A83+lHl36l8drF4a3pKjmLVY3ROXz6oo2e2A1Ze64ESy/4QmOx0nr78o3+0E4G8JEXeuVK9KgfHXSDT7KB87WXj9XBEGGXyFE8JZR0lhCCoRgKY6AZeQprnrokDMzCa4PvYYsBUT7D6MRR20ZmmNEErj/6G7yH9h7c1TmujDo8DHPhbPIumwwtjE+fBM2aDipc9aOlAyFwcDNgD/+1wRsD62LcZAfOZdPgI7rV8HN4HZCgnRwcPJjbpjt1gSfj+uEFZ64LDdZTczb0Ec1t6OTBOYKVfCpkUcKZ4zaur43MBvThxbpwxWOOgHPg/NScnjY4tIIhK3ymXzXYkttHzmodAjgO7hGcvkeXOfVrd5GhQ0j04EmNVkHB4Ra9kx+45I7edBIPZGu+AOABjm0EH+1LVw66hxd6Wqcdb2gjv8ryoW/nuE+O8YcGl3XAruyaW1/30dmavlZoHxeAwhds9LOhj25v4pe2YNHRZ8pwSlSseRY4V78Ls/3NS1ZbJwcw1u8+ecCjf+UBpr9HBK45YNzHI5zGHFT6I7Lp5BEf+pLro3F4ooPNSDqicXUXLWjQf+r3tIvg+7qoh09z2S59ZsPJwr6Ehy3TYw/OyShe+GKJR/SFJ7rCl65as71in7isoZivkIO9hw5j7an2HjxwkgH+HCb7ykm/JI8f2r2Z3YDVjg74wHWhhTxWv+DjSy0hRNfOK56hUz+a8+ESSr7EXH6PH9EHZuWj7fA8fxp+uJN38PRAn33RlVzUS7tDMPrJbqtXHv301Fw0nfYDNhtKr6vfZGpedN4bt9K8tRt9jamj5f/M+Pc9kq9Qb3v9TmP6aqu7hzv4ajy6Ngex1/p7dODycZJ9MVh8pg/5jgepvt7Jjum9Bw5ruiTIb2/5SzAdRCYTMVfbeuzBuC9hO0A0hg5JM1zGJfbsA361uC42gWGT4oMYxd7SR3K96ksClwT+d0tAfs+/yE09m8gzxEu/TuCDetkuDoHTp82/8Tnm8XN8D1/D73QgxI85FFHzmT0vkHg+r0Mdf7A/P8kP6gfDv6GLf+7QRu4Bl9JXzGK9Odbn7/hJ88RmMZEv9NyCX76bL+wLIPP6Ashcz4t4qd98tMHN5zr46W8N9QIdLnLowMgzEdmAI0/xwVzwYPsD1D2bWr/DIryax5ejl/zEHfdoMo4mukCLezBwklfP3+Ie+RcHwbusryTv+827ezzoL9bIARorx1KjBa5yAzDmsAvyRRNZ4gltYp77DqLYC52QCfnAI0Z1KOfeHPfsifyiH43aZCwOind4fO3lY3cwtMm75JaiS8wYgwtMD86MImMs6eogyIbtoUJS1UFSybd5jJGBueCU8OrLuNTG+kkYB5ChMUoX42dMHjY7bLCxwUrOSpY6CHHfya15+q3t+jf27m3HtqQ49ziPUr7GHOw2xzYyBgMGxEFCwpe+8bP5zrJkLB+EbAQCZJozDUjc9aPs+Rta/9ofuWsB29jQ1JopDeUYmZGRkZGRX0TmGLOKMQOixufegqaHFkC0DnKAnIVDV4FhCwR98pIZPeM3HnICunh5RuMZrw6n6Ho3MummgDP6+pfjBUTSCRnSI3oARFa0dGeO6K4NVZsai/Rf/uVfrr9HFOD15RQ5kkWuD33iZYzG0rxa2MoBHTnMGRldO1Z17CvgOYFowQBdoCavTU4oW41Hz+iyOX3XP3ndJzN7U0bWbL4NUTxrs3bFVozX2AK8eCZb8srZVRcdVYbWwQp9t4lpnRkDOvxdC/raPfVHlut76d2XKk9PyajeoQcZgHJ4kCxLp4zN41FSts90KCmLh6+H+nJo5xLd+WWRMjTLM37yHdNJ19j6yVb6Yw87juiUGw+7bNzhXfoKGz2vjntevsm3ZclYnkzy1n5lOzf1BS8FEif96t142RKa1i2bRGNs+nY1f9k8GjaJRrtwu761iSdZ0YVV2kWHb+VwhJOHO3J6VYemr7mUwWepgI9M1ps+zEG6dE8O8qmPBl1y14c2i6XqyRH2Ri+XsrF4letDu4JYPPFgw/42DT3BBDQdJq9c6tO5PlY/MDIc1qaxtlYaw44jHEYjlacXctOBfrNLdO7rOxvL5pYHPs2NvtS1ZhpHtnAJ8EKG6iqTK0uv7itbPpUlg8Mr/a2tqPPs6z8bnDCy9SlX73Lfs6Cc362sdulG360zZS62zJeaDzGMtWRTsLFNgWw2CCsF9OKJ5s4G5OEWj/CP+EjkwFuuX3rAW1sHiflOOMim+gI3OeXmjCz6Uo9PPskaNrf3dNfAXQOvrgbgIOyxL3JoAlNhm7+Vaa8h1neQ8Ytf/OIxFrAp54dt6GEMnELTFyswy4uHMLZ9D18dPoc99gbwz0GTmFY5nFMWf3XiAzJ24MB3wbVeSsM24yBbvsv+i5+Fq+SEmw5byAVHizX6MoiPVU8mftsY63sPIsTy2nc4JLd30g8d8iVko7sOkZRpRyaYrB+0DrOMCz6Tp2f4joeDng5FPNOtuBsv5fkQL5vpuUMmvohM4X9+bv2pNvrJh9qH4kceebTNVf5Pbr9obPjzJdFuHECvyotx6NKY7ack7clrviVzaQzsQB89s0f9OeBiZ3RJbr6MHZhb8RU5nnt62x0MUbgJNtEWlkkp+DchngEEQ3GhMeEMUsqAte93p9FuIOVeG18LFHAzNguVceiLHII49QBErlxuQ8TAyaONHJgI1i1Khue+gxg8+0kUo9t26Cw+Mv/oRz+6wCl6C1m9PoCPoN2zQFl/LoYNjNB2+GNRFJwpZ9QdHBmzsWqLb/JrY1yN3TjRudyTqXHKAUinq57J1WFTvKMnVwG5caDVV/2nu/RnHskroSNbdrDzZVxtlOJLH3QcnTkwhg6j9Nm8BiZtTOR49nwJcEvJ/tRzYKVOu4AtHqtHvD3r15cGdN2V3dloJBf65JEbB3qgalz0AgRzmvStXjkwRIOvvjb5Uob9t/kwBvIqx4vzNWdy/zmCA97xkeXUyamHdJFD0MZ6NbdS7eXa9uy+/2pknZpL7ZRLm3evflNzEN+eo6svstCBz2STX56MrR0bQPL3Zj85VuZzHMmzsijr+bQ5/aYz9+YxevKQnQxhXfL+0sD/Px+SuWYCNo482bI/c1i/0a5dRlfZjpmc1eMLBzllupU3t+gEhtamw6blqX/P6QF/vOpPvTLP1RV0eDZvbEnQgAY9vwF3O/xU3jrpZ3JhXxtua5e9pDfl+slv6KO1m8xyyfgk/eAjQIN77vMraF3RondIjK8gJXp98BOCMuXWqXbkp0+XN1zKkl0faPWXPK3HdBI2NQbPZImODey4knfnKt3QS8FZ7egOLZ7qdx7xQrf12bh814t742m9XAO6pewDHxf+pXQaT+VnGXpjJvfDbQOQjSVruk0f9SevT7l6mNEhkGdjX5qezZ3/rGnDQO/NGRl8ddUzG4DN/dwr3ynv7at77fAR2GvjOZv61re+ddmTxB8WRFsDAmjj5UNgLvl7M04OdcbZ22l9oMt3e6ZPPocvlujTPO08XBX3dNfAXQOvjAbsT8Ss7373u694FPbAFBjYHkbsD6/EBfyUgxA53EEHx+Cfw45wVD1Myp90CANvKlenDzyU4QfD7Onc9xUznIJdMBFe8q9iaFhmr4MejqORtBU/qA8HyWVsfQEkxugQyPg8O3DQFz+nvfIwlfxiH8/2Mfo9D4foMdzup2P2nMq076CfnHSoP/1WzlfYM/A7xtNP0/JH9IXGGNIZHsZIJ/m3828PKU/n9IOvudRm5wONtPNTHHBV3JJnFxnpmUz2I+6Xhh7pTz29my9ySPwYXdFNfq1DM898JR2Tz1ywS3NsTsjt0r9x24/on49Dwz6ec3pbHgyZtAJUE844C0BNunpGExhYeBlMgKFNRmUCM2aLwwS7Og1sQw54GJqrgwOLqyBZQFVQz6AYJFBwAR5G6A+q4eM5kLLRN55oN3eAZIGgZ6jAEiiQSQDI2LsYqA1Ohy36sTHozTC+5CIz404n5DQm7S12NJ71h1cHEcapPN2r96ycPGR04VFQSgZ1ngsQlZHF2MwDsPdsDuPtAMKC1b/FGt1+EUTXxtbcl+PhIpu+LeB0BEgD7/pCx14saPqt/W5muleH1nPgRI/7nO2xv2wwkGuMK6t7G27tJHnyJ0sbArT6Wl3RLf0Zo/mWzKl5dGhG7+rk6Ppyim45hNZTcssfbpseBwDJbxNB9y7rovVydXZLq4un9BKNHE99pjfzsc/qT3o8vZVmK9YtOdxrG0/tGsOWxa/6q8GL1BrYtuYK38axPLetOYgm+vqKznPjPcs8L/3KmT6a72w0fnIOi+N3b31okzw5V8/SjqWy+t9+T5mSuRyt9g8v3opli62P6tHDIgcoT62P7SfZ4mHMtZGnW236aRFatsvGVwZ6kHa8sFrwhC69rg6ir8/wPHwoQIqObpO19SmQgh/qtCcnXfT3rqw1WGUd1kZurPksPCV4q8OOn1gAACAASURBVAy/8BVdcm2ApY+wIHnR7RjohA5dzTVa/aEjO2wIL1tT6V2ezIs72u4aJDueDp8cFPfmU5/K6aA/+C6QMg74LE8edGElfrVN33IyrM26d8Hu1q785AM/so/aZwfmfv+eWHOdHZEv/Rhf8iwdHvW7cujrLKdTgbO5dr9xCdy1GdGHtmEe3CYDPbrU90VSWIQPenOKptjDuPkJ5XxDNsEX49umS3/Gig6f5hSNMdCHtvAXrdiKrJK+1KMTSzhIEqOIe8gSb898CL+sLzLkn93f010Ddw28OhqALfDG30YTZ9ojOSgSO/CB4pxiW/sWmAJPYE1+hS/qpTdcgYP5BZp0b78BA+GatsUC8Eh5L/8ebrENvwTnyNaBOJncw1pJ7AHHtIefePb1EExXT44Oh8hqT4PWeMQl/KSxw0HyaUNWmApbJe3Ro+vQRk429PLKHdB4tmcS82jj6x/P+Bu7mNEe0l6BLsUl+QN7Q3Weff3ivi+E+htCxlwZWc0R/uvjlOnb+OkiX8zn6Vc5vWkv8WN8Ch3pW32xkXpzVZlnPNEWo7g3NnqU8BXT2JMX15mfYqMOycyzfuiRHjyj92yMxoo3/a/9vPHGG1edPuWuXsrou7jiEuYZprflwRA9Fyinc5MnaGIoGVsGIVdW8CXQbzIZI0Cx+AQz5YIcE82gGRPeFmuBMUNR5rLA0ViclXUQwricgneIwliBmtwfutTO4tROUAQ0CsoYXZt58gA8MgKMDgPe//73XzTx7DBKjh4QWaQdRgENC0+9soLHdGJBWBwdkOx40LscNpAbbXL7yki75NigFLjgXxk9AgcyWMh7cGR85gqAfPGLX7z41090xg6I6MepszlBp1wfyYBXi56O6JUu3Js/+jEONC4Bf/fk6n7z6NVH0+YhWyR/QFhZNHh1CLTOCa+e5ey5PuQ9G6fxNV5j6bBr51U5HQFA95xrwbj2bRLijb/U2pGrkzgvbXOk5FPfONGo2w04fq7GhCawTBc7/uW9tJcAt2QjSYacRs6GnkvdJ5vyHY/nlcFzjqXy2iiXyJW8V8EtRQtPNiknpwORs3zlQEfWcMnz1ruHDY1j8/rmtNl/mNbY0+OpC5tezjf54/OUrpI9mTwv3c5r/TXf2X06U18d2++5ejldK9+fnaGtXZhOBvTbZ3SVpdPGjwca7dyrrw3dpQdjjLbD8ejk2Yk8OuUwBY7Bhf0bPfohwx5WWIvWlDbwvPUdz/TsGc6GvdZ3AYw21kB61sY4kkm9AMf6zm9Fv2Mlu/oufXWPPr760Q7f3pzCEpic/Kedkof82tIJ/8rP8gHJ0E+/HIzwm+RJ39rVL13QocOSAkNlLql5zlbldL+pOjpavZHfmPFfe9S2Z23Q0L8xWUfZ2Ll2PJMr+T3Xn3LyJ3vrVk7f4Zpn61rsIUWPl7Lmguzw0EaFXpu//MNbt0/4X3/99ce1gp799TcQjSse5gdf49LeM/+cDvRhU6AcDwEyedgxmVziCnFMb6qNwZw3F9rg3Ztl/IwNHV9MT/rks/Ox6Ml4T3cN3DXwPDUgBucD/DdFOPNwO5SBLW+++eYV19pX8K8OEVz+zhn/Zp/m4AMmu188hSX9h6vFYnHv4jr80pav1Ke++bW+ruS74D5sJktf2MK0DquUwdJkgmUwS+yNl/0Gv6Y9bMvvw0O0sDvsU9chizp9GIfUgZKXYcrIDmv106ERbFXeAZm6DpBgsXF0WIQHmfBV3jMM1kY5f0+v9pv0RJ58GH05cDIOdcbgWXv8lMF25Q77ehlgL5hPxEO/5MXbc/zX2s2ZuuZOneenUv6GDyEHnRZbaE9XZNAPWrZFT3yRZzZiTJJ5ZBv5Ln6OTzIWNmuM9kbtv4v/8NHnc05v24MhSi9wNvm/LjFGIGRyLUZGw/iVt+llbJ4Zifo2oiaakXTAAixcjBmtIKyg2uZY8AgsOnjp0+kMyMk4euWNoZ9oWcCM18VoXQBFQKe9hS3/i7/4i6tttECoNh1EdVBCbvwLJPXr0gbwMvz6AjyMmk4LhvExFouB3PpXp39ldKN9BzLaFjTSee3VK9felewWWgFymyyLzr22crSd4OKpDzoOMOOHV8DT5sWc94eJW/Tmmg7ajBRwtxFQblzGaKwPN6dh7PqVK7d5X9BigwHW5t0XfOPNplz6c1VXrk4f2RA9AOrmXDmZlBsH3dKTMbMX7d33xRC5mx92WwBujI1Zrh15XXgDd+Ntw5Ksu97Qakf2xtPY0q9n8yKhszF0uFE7eZf6aPvJWI6tjVd5sq7utV0eaM558rxJ36Udj37QRr98knH5qGdr+MEBNIKZ5Gy86Do0iLe+zvHsMzr6gF/WgLmBU9l7uq2Pxp3M1ZfX7y8p4vag3eripPOsj+Z653x5R8cG+2Jn15v65goOwdHlWR9L15gaI/ocf2XJrh2+/eHneCvvaqzJWp/xLGf/aPEgs0Nk+u8NIjrrxHoyJ9lN/Bu3HA0cFEQJivBp7vG2LqxvuIO2NvFvHDtP+RFBj3UbPmuv3dqWdskLC8LrsI38El0YtwTnBXH6YX/kFxTTm6AeT1dfSJE/TG8MZHCVGkc6RufCp/WNv2djyM6rK7hT7sIPfbqvrf7w6Csuz9md+2whuTxXnn3RRamxxqO8fpMHXfcr+8rvPj3RexcdZ89yOmIn7l14o1XGL+YnlEerrgs/l2c5fVqT/QMCeoctfIR57stjdpTuBb70YUOiH7ZLfi9nxBh8thiD/UpiFj6InUj6ZfP6spEyDvzZFv+ONtvFTx+tx0fl32/uGrhr4FloAHbwz704FsOLFWBCP7US08IA2MI/iZnE8w4rxITFQDA0XF3MzRdXt76hfQj+/Bnswgd+kYNv5+/glT7Va9/LdnX8JxnhFZngGozTVjsH5urF6B3S2KPAO7kxwb0OWPgeeAnTldtPwk10vUwnm4N1fXVQj5+9G1pfBHVgpH9l0dM1ObRtPMktTtJvL90depDLVYzikIcc4k880jtf0pdG9E9P5KArc4lOucTn2H/giTecR1+Orr8bWrylnXkyn5Vpw9fhTW/K1bOfvmKtXTEUuuIq82IM/Ju25oy+1Hs2Z+QzDmNgF+qNNV/XoRiZ/ZkDbY2jGOJZLNQnBvG2Phh6Qt6XFjGqAuQMV5kFV9CWcVnQAi2Go85zgbZyxlHQySil/gAlA3Ryi16Qgz8AstgE0GRgcOoDQACjTfT6tXBsPgRqeDBa+Yc//OGrneCtwwCg8L73ve9qr6w2+Pzpn/7pNRaLPgDzx5rx0k4ePZDew6oOgshMRvxd+AGnxtKmRBm5gQGe6AJfgANcyUB+IAbYAUxlBbedrJMPMAIiPMmmfzzJHZ17wA5ogKp2gNrc6Mcz3miArft0ZWGbS7z/9V//9XrLalxoyCm4Nf/Gkx30cwhzXcoeFhTcl9gaGlfBbmVAyD3+rjZt+jd+Abh74yZ3B0RkpjtjYl8Se21eOZ6+LkvfdJnt4YO2+WVvZHawAXT1Z46SiZzJr68dn43EW7Ox1yYQb3zoT9Bch+3epX/OTHvzk1PRtrWak+k5Wcp3TddvfTdXO3f6ldRJ+jQnW1bdVXhLPScXejYoKbNhwpfDpIPdALtvDGs7yZr8/eSmAMf8pSf5/s0ffaRrssT/hbi/ZH/Jf+pi9Ve7xlPuQJTjhg+c5vbpfvV82ni0xp/O47vrYu+TI92wNfrgoNHtdc6RtvFS50pP6SC+nvsq5pyreJB5+Zx0nptr+m8OKiuQrJ2crWVDeFenjcDFeg0blLlHX64PuABjrWf01rR6ubUUbbo0d3SYLwrDtU0ecqQbZWE6G4flsAl+k6m5FLgLoNQJWnec0WQf6bT5aIx0oE5Cqx08zlYXD5rDtWP321d88JLoK/61Xz713zxv+2Tfdms/8a9MDs/4kexB3hjIaR48y118NV/sPh1lJ/ReH9mWQJu+03Vj86/rsx/2YW6/8pWvXDGBe/6Q7/y3f/u3q4yP8GxO8RIMd0BjHMUxzQmb6QWQTUYbJnKTrTlhf+yML4HnMA3fXryxk/DN+PDvRVjxRodGO2+XIu7proG7Bv4gNdB+ASbAiH6lwGeJa9XDCXhUPAi3lHnmh8JN+cYTYS0MKuaAbzAMhlQfTsM9GAP3YI+4mRxoi5PzkeEWf2tftlhq/9GXO+1ZHC6QDbbBQVhXnT7Fh8rxE9+R2Rg9Kxfb0xHfrp02D7eXwnC/wyE08FUMrkxs1Fc6/VxNvbb2r3SHVt/66SdoHRb1d4zSsfoOeYyliwz6Li717N5YlTcv9O1ACn/9e6Z7OV4968d8k6uEjk6kfF5l8k3mCz96Uyem0MYc6VcdGs/5V3OvP3To+RxzJievFyLswj6M3bnIaT7oFD+2gYbN4Pfc07M5GDJRJi8gYCBAooXGIAS+JrvJL1hhUAKggnK0jMHbUYtQ+U9/+tNHAGoT8Od//ueXoTBCV1/7CHJbHBaSOnxseDr4AIwCJgs5A/zIRz5ybcQ61PDVkLYM0UJA1+XwB53FaFwf+tCHLlr1QAffeOvXQggcATQjLygrJzdAokd8AkpBbAsKjy76NS4ggYeLHumkw6jkBSYBhTKyAvLmRBl5AxtzowxPoGmcQBlQv/Od77zo6ESb9OteGcCUAIFn4wAAxkfvxmg8dKBMPZmNE0Cpa/OwAHBuKF4GXtlHm7f68mwu2FZgY/xdxspJkZlcNgPkzuGkI8CKp3kGauQ3Jx2IKUOTbowrmcjsgIhuXP1Ur80buXbDZPyBdjyi7bk20fazMLJzPDkfbzWMxXya3zao2YX2Of91EDmV03mgPx1H6x+tDZvc4cpJm4Pxd0niocz8LK06ctZ3dc092bSjizM5XNFOncPGPQQq4IFPy9N9492x1f85N9Eo37nrXp5OVj70Z3nPjW15NsbKoq18bcL90jU+tIJBuOd+r+Z9ZVRv82o9nPa5eorP2qOy1RV5zS08RnfyVUaGbeNeucMea8+a6zJHK3N9l7eJlyvLTsgNf+EZrISheLpnC3BSMLIBl7YwDvbBEBf/Ai+SBz06MkvyfFM4ro0+YIM1ib5x9BUTOYy1IMh6JX/zjd6za/XhftcuOuMUnC6O6dfaL+Grb7hHJ/rFC039GotyCX1rp+e1W/fmXl7qWS5lm/uc3k67iT5eq4f0t2V04FJHTmv+4eZvwjv53hsjumg7MKocLX2IWZIR/+yDr+M/lJEDjTlWllzGVGDfeKypXv7oq69+9KUdW8OzTQ8/wZ/gzT7R8zdwDS++CH22ZY3ja+24x9ccqicbu1fPHuQ2L/lBdtt8P07i/eaugbsG/mA0AJfEoQ5hxOpwzL+l5wv4IZtwGAPXYIJ7OCU2RM8XaJO/CO9gk3t4IWnjgktwCP7lA+xN+tf08FjSP9zSP5whp32be/LCJTiHpgMiNPCOH81nd3jlwEEdfDMusnnWN+yU8FSnHL1y8uijcmXGwPfqD38vd2Cq8XlJHmZri15Ze4AOgfDQxmERGYwBjX7pWvxDd9qTVTvPLvrOp9E/Xvs1UF8iNRf6EdOjM0/t8fI78mQxr+6Xhm7QlDd3V8EtkQ1f408P5FEmKec7akd36OT2ltXrg67NsTpjx0OuPb9TmQMhOqB788XXOQcwRnOMj8QWnnN6VgdDJsrkM9yMUkBtATF6Qa6JBgptWhkSGgBi0t3jwZAZjbd7DO8DH/jA1cZVkOYQZzf22ltg2pXw9yURQ/IVkIMJC5yBuv/0pz999etilEDEgZNnfdvwo0Pv/rXXXrv6FKj1bIOAVlt0gO38IkmdMmABhNss6EfgKhBThk4ZnsrpInrl6gEoHbovyCMLkOsgRnvPHcL11VAHSPQbAFvs5sWlfQdYdG0+jBW9t57akEff6NCQ2/g/9rGPXXOvPjr6740A+clO5sYvd3AAROjAXJs/l2eXe/WlAvQA0HMX2jZMNkYLXurITado6IIsbZw62KMPcgF09HQCmJLP2Ogzh4WPcWXb9IKXAyD945X8rQvglxycnb6SWz/oG5Nxe3aRtZ8NpZdyAApwzdU69BxQGyJz0kUOMuW4e063+u6z08qSRV1l8cDXvcTejcE6zXlEX390uHOtrw6H8EDXfHve+3jFW33jMtbVHxvW1453ZddWWvmWf/XynZsdV/2VR1fb9NLYz+dkQ49Hem5+e649OrriwJf2XC8rO767pqLdcZArfme+OlXXGPqCBb+VN18Qf22iOdf38kvP0bc2rBm2bZ43CYBhHezBd9eS/ugsvcmt12jxbC3W7pyLtdGlrS16/dbOuPNNbbrDygLAHcPyd49v+okOb3qEKwLCZC3Xp4QenRQPNPrFuzXa3GVrOx/wKzvAp7WgjwLDxlrd1eEtre48J0u2szZ0lj1Vtzzc19/OpzLPLuMjZ1dYKDcH+vjGN75x+aXm0njhtBiArpR7zr9aZ+lpbZLNeaY7bcQ44gwHMvFRxj9Wpjy/2ptb8guEBdT8jXHkZ9lPL2aMoY2ben6DbMbMJ3XIZ5zqbUTiqQ/9qsMfPpO9L5n4Z74l/0bOe7pr4K6BPywNwCT+DbbACgfDXuY6ALJv4DvgiLUPA2Cc+FY7X/LAPc/wc2NGuBa+rg+BE7BHgpHwRD9wzB4kvBYn44+Hvouz4ZF72KtvB+Zkqj1cIoexwEg424E3ev3BWHXwTTwNx7Sz33IYAifhKF20N/KsHF/09GXc8E+siBZN7SuT0xs6dfolP/3RGbk9i8u0F48rpz/y2CfQgbi4F/vq0eufPshCx/l+fajznJ8jM/7aVr6WSn59ock/NhflDrCKCfDgx/Tvau6a1/wD28k34OOeTOZCm2wKLzoSG9YHHuwQrUu9cbFJ9Ph55u/ojU7NrflPF+a/mOIPa2X+5tI+u4MhC97iNIlyE2rC3dswM2KJAaB1MSxGkmFa6J4ZBANl3DaMjCSgYhhoAIiFWtClH/wYYEH7j3/844tWGRk+//nPX/Joa3Eqc/kKCF2/ZQQgDqS0Qwv8+smYA6D3vve912lmQOMgwkGCi8FbuB0o/c3f/M3F22IllzeTxq5sD4P0ZSFZPECrALWvhpIjmbW1GC1W8mkHWI2vAy0yuNRZoHSnjs70pw6YucgN1OnQs7r3vOc91/gLRJVb/HgpUwcILWbPQMy/WAe4+Ln0w1lpS18F2m2C9GesbMV9m5T9uxXshs60kZ+bCDYlVd8hEL54dnVglE2Qzxg64CG/Os/GBfSUmQ8XmfpiSj0A6zCI3m1SyeA+OZM7QMNTG/1K+mPfZEz+q+KWtDE2NgBAd+zuBf9kcK0jz7F0YGTt5VTU9Xw6C30qw5dd4Yt+9R0NfmjVx4dMyfhiCI+OCX2pOY5H7b3l7ysjtL4qWhpy4N8BUjz748/aJKu5SOer+/g9CnO7yX7k6Wnlrd/4b1t1fgJGhujqL36NT7vz/pSjtumxdas8OZMj24pm9b/90KmNY/Kd6witOmOWVy/f6xy39ZRd1vcpQ7LGd/mxA/Rn6u8MWXvodw2z3caW3GGltbRrHv/GVB/1r05wZq3Hv7pk1nb7jx7u8j3wkq+yBtDKrR0880HxzuY9xx89/nDfWHec7lt7uwbxcbDBx0RfPV12ry4f7D75suvGps0f3d7SGYc1D4vSWbbY37ZJXjK4zx7Ls6F07fllV/Muzy7Ty7ZvjivzTL4uz2QpSHbv6u/5dIATvTq4xlaKV+hJmTnjr8hDZ9r+53/+5+ULii9qB1/IHn7L6S8dam9u+RiysYl4iGno27N2Lm/1+U6JrOnFXPPZDrgF+g+3l0Z8KZmNne3wTW1OxADiJu3x4XvZCpvlo/WlfXbomT9iy3RAptYTGnLTiTm/p7sG7hp4+2oAFsByaxmGeZkNu2GN9asOVthbtemGI9Z/GAw3XLAr7HXgsrgLE/DDA8aoc8E62JIftZ+DRTBR0oe2fJ17+xBxMBo4ZU8Gf7TvkAHmwR9yOjSxj+KHtXPYpA/4ihe+6sT/7smmvXttjdM9LFQer/CxF77qOnypzJctMFj8DyfxcZBhbHyKsVfOXygnF32TzT15HSiR0bO50Rf9Fr+T2V4TXTF4ukffF0vF8Ogl/o7OPJNj56V5pN/qelmq7c7tPueDmr/m0NxJ2QpZ6JYezD/9ujdvfBeZ6Y0/MTeejZcdKuPj6EkZP6Y/9y7tzbU+Nz67BHiG6dkdDJkjYGEiGQUDYDAmlJEGPLsACqQyAgbASIEHsAASjEswBIQKWgUu2qD/0Y9+dNF3qGHxkONzn/vcZWD1TS51DLhDG18U4eOnLxYv4MQbP6e4HZBoqx16n3CTS7JhR4tvhzDo/NcvMqArGMPbIujzbgGZti51noGL3LgEnmQBUNF2qENm+vBMzxZdp+otRgtfok+y6QetvsjkWXmnw/RkcXagQ+eejQeoBkAA0YUHQDPHLodl9KWcXHhb+GQnCwA3HnXn5o2u+vtC2Qn56Csnpby6xiWvHA/08ZbvIZQ68jW/+OdU6JLcOSo8zSl6OnBwSAeNy4aYDvAQiLM9cpo3ciSD+3WydGht4IPWM/2QDW30jW/BvXHKOQGHfa4cRPNQH22Q5GxArq767nPq+HaQZN7iV3v10a7+LyO7pfp4qi6acrzpyHjxDyf0wfnWD9tsznNCnjlYKf2g2w08vjbo5ulM8ZZvOsvxZpPsQJ9nOtufND1Ht3l94bnl+oQx5sH69yZNykEnQ7zl1imMMP5sLfrtJz0unftNaFwwwjzkiMtXjh1/5c3R2X98N0+Oc2zppLHAGJiUDK0X/WfLMA/WWqPWlfW0sq/elLfWWnebs0cpHa8c8HNp3bNdsqzulFlD0bP1MGHXo37Ms0CQTPDIWKPdddF6bVzow7vW9sqNTr268tZa8sYr22isqy+0O6+1QbP2Rb74uN95dZ9spy3U18oeTX3Xz+at/fTS2BYH3XcwRIb0wH8mazm8MBdS8uLJN7Arbc2nSxtfHvm7efibL7EGXLJ2al//8Ild8qX6Ng7+kc+mTwE9/5NfZuPWn/a9QSZXbcjFTtiNtsnJJ+lDGb9FTrLxv17e2LDwV/k1uiO/vvgymwZ1cAcf4+jAiUwu8t7TXQN3Dby9NAAnxAKwwMEDjPnud7974ZGNOH9knYs9rWl08MIat+7RKIMTHXZ4hiF4SXyxmNMBg7owWL3NPXzADy45pIKVMIuv0taeDv6FjzAMrpKXXB00wUVYKh6vvQMUGMpH6l9f8Kz9j1yddmEmHGz/IkZqH0MPMNnV4VA/19JPh/DxIg867WEjXcFUY8FXvYMcNB3o0I3xecbPHgFvmE53/dFq85Iu8aJfcuXX6Fa8j08HTMq08YLB3NEbeZSvPzRn4tdoyLl8za1UmZxu13+ZH+WScmNWVjv6ZWP5UeV8Cb+jP+NxLz6jj+yMvD7E6BCJXvRtjl3mXb/o2Ia+9ZvMlwDPMD3LgyHzZOIAQfkaovuCcrQMoMCDwQnwLXDGlKExqp///OePwbfApAMjBqidN20WigXSSa02+sug0DIwRmzDCFiUkYFxMlpAoD/lDJ4sn/3sZy+5KsOXwZMBfbJ8+ctfvtriCyDQawcwyVHwb/z6Z+TGiQ4gRO/k3AK3SPBDa8xAla4EfBYN2ZxY93M5feoPINNBhzTG5d4hFnrjEoCisxj17RlfdA5T0NEVMJLI0oEQ51PAq93f/u3fXrIAP+2BELmNk5zG5Q94d8BFX/2ErDmmC3K22THe7KT/UIZWub85lZ3Vvq9MtDHfBbHAPln0ke7pka2QyTjJzwYcEJLXvX7VpUdjoQ986No84UEv+nCpc9ERwJdyBOY04CYHezUXjTPZA2U2UxublmybvavTHu94es7BtOb0H4/tX31t0TiIwJc9yuMlr901mBf89t4coNm6Be+zDp361n20K2c0cvpAszzTTWXpMFnqs5+nvayP6M56/Ev1TY5kqU77aNE1dysr2tVB95XvuLeuuawefXwbf/JYF6d82869enYJs2q346lva4m94cnG4UK8z/GvbpKhsh23e7gCl6w5AR2+2waN1Bjl6l0CIkEe+8xusnG2C1Na363BnrNhvI0xvgJV637p6y9dyB0WWBPkzc60CVeUodOPlK7wUgdLYEoYEd3qLrwyDvfxR0tn6QVv9fpDaw2TDY2+8NRG3w4ABJ9oW+9o8fAsb661xSvZG//V8S01T+knHsmFXp2UjpvHtZnGvHk80J3pLCN389n8+8rQHLVeKjee6Fsv1VWOhtzpRZ786ZZP7SVVttJc0rs2bDK8f+ONN651Y57MERszD+9///t/KabBkz2v3oyXrbfm+ERr1vzo2xoQKBurfsmoj36WoLwXHOr910ZJDEJGa8XmiR/TF1p8bVj0iRc/44s9Nptd8lN7KW9+zzm7P981cNfA71YDsAROiNvl1rT16cUWzIBhDiasb5hl7YdPcEG9NjCDv4IrsKSfkMFLmNYLGDT5CiO12c8/wk/PxfPk4O/Fz+FNh0QwBf7J832wzDP/Zc9ETvdicXzhKYyCk+J7sqBXh6aDI/s644F1+sa3AyEy0xUZtRcTqXfo49kBkDL3ytTBSPfq9E8/9gHKyWFPCJvpTj8dFol39FXc0n8koxM8XPYQ5sXeKT/mQIfs/Ape+TP67aDo/JJLXf7ens3ck339XvfmhV/K/+mbDPSIRsr/lue3e+YH0dK7e/piZ8VEHRDm48yH8aA1rx0UGk8xBjtDQ/5iHH3Yh3l+7n7n2R4MBYkFkRlZ5RlXzxYwg3QJRiziFlEGpo2FobxA3SJkiIyNERUoBWr6tSgERfgANQZnodr46JdhAkQBlPyv//qvLz6BFcD53ve+d4ER+sDVFyT4W8QWibZv3b4wQsN4jQNAoSEzngGpRWEhdNjUgRCg8HtgdOR1oQVGBZgFZ37mpswFnAKIFjs6oCfwdBAGWDzT7Wc+85lLPhbUtgAAIABJREFUTjroQMjfEAJwHYIAeXpGo52Dtz4172shbftMXX90LanX3gXQgZMyDote6Upb98YnL/A9D4GUayvPngDX2hJ9AWZ8cy4F7vpfu8DHPNE5melFO2M1H/ovGKZ3+sghsRWyvPvd777aZ0PmWaofToPOAnLlLrYSaCefvht74zMe84iWc8CP7tmRyzrJeegXnRTg52yU12cykKn10WFQDqnydS6tXTmZyq8OJ2nTH3iuT9XaNB7Py7t7NI2dLvpD5NHnCE7cqPvGmax0Gm0yxyuZ0pk8mu6r2/6fkiE6dbWN//bfOOXJuP2f/TnM6hA4m0qn8Uo2gR97rr8wpuel16b65qTn1WVjUde8yD0vXTIot86sDffmMP7RG3d8yVb9qad4ytXB7rBiZUhn8VJX8Nl6CjN2jNrBQ+uoQNZa3HVoHeyckGUxis+Bn9poe9Kv3tBEh5ZM8MW9RC/mFj/4Yy2GDXL12ktk8gzr4Z2cbsKDpcOTXHBKO8/GbBzZYGv5nOdoVsf0nC1Er328lEUvP9vuPF+DuaUtW3t1L538PKtLD9t/faZP+UnXc9gZTuabjc99/ZAh24H1XqTQofmj8zDVWpXSsTobATEDec2LdnB879mf+ubeXGXH6Nyj8RMzNufZpsKce65PttBbWDLpS5lA/+Hmc61LcQ//ri3dGJf1kp/i//hpb3D5PGuk9YG2N93GYGNE5mIiPvee7hq4a+D3owF45SDXWreG4YT1K76z3q1Va7bDhPZaMMJ63thw4w17KlgSTsKFMFReEvfDxL6a1/95UCTWzhc60IE7vSj2rG34aY/SXkT/cIosfc2vL+MhD0x2+MB3orUnVA7j1BmjVLlYXZmY3hiUw7kOhTqwwkNZfhbGo4O79AeHO0CyT7CP6Msf4+qwiB5c9Iqur348k1HMrB7//JKxwm6ytIfI16nDo8Ob9YHu37rFgurJ2AuE9Y1oHAaxE7zJ2fzGSy7BffLRK4zva9r8Mxp1PfNf5lV7+qUT/LU1p+lCXMN3kI+cxu4gzdzQsbLdz7eX1W77voR8hunZHww1Zwzs1yWTXhDG2ATQDJqRtDjwYBgALyNm1OpdDE1b98olBurvlGSMQLJg2WamYFw/2n7rW9+65ABi6Bgw437Xu9518WuR7RdM0QMsbwU9A0c80PsKJVCs3PgADxkqs4AcBLRxkC+YAns0xgDkLRjyAcn+NhKw6m2fRcoxAE5AZ3zkAwz08tGPfvRasAJHCxGtS3vApcyBEVo80HXos38DJMdCdgvb5YDFoZR+9QH4gWxjMv5+5mP86QB4tqGTd6XTfd567QELudzv5godPUZPj+TocIi9kN0hF92o41Q4TWDv2fiNgx1wCulPP/HVLznxyCbpvGB9ARh/+tW+TUDjdjDAloG3DYb2tWX/bY4ugzzSOhLrhPxkcLWWyGds8c6m1+l3j7215jlQzqbJtLKROVp6SNZErG75nfOKN7n37wyhUc7GVw4HSKXKw4fsyZyaWyk+5EYvoVd+/uRs61fnyV7b6MqVu9+59tylPsdbG88wyvyYF/PuPmyLPt7JEM/G0DrqOXq51M/ijDe9Nzb59hOP6JZ+x6yNL4H6Q4ZkqK08GZI13ZcrT4ZoX4j7aG/LszlcnRoXvTkgRyuAhI2te2simfHGo0N1axsOw4LWMh7pgmz4oOsKWxZT1i60j7ZDJH1Yb+aWrMZfG+sdvUCQ3HAMreCwgDHd8CUdMJCLfcNWWLvjxNthxmITedlVY6OTbKF87aF5kFuP1uVpFzvH53xrt/yq3/ne+2yhftWRlWzynt2nu/M+vbb+0lt0cnX0CD/oLP5hZ+vOM3yjwzDcPf2bY+m//uu/Hv+QNZsyfy5BdL7b3NI73LXJqT9lfDl75VuMXz98sJgjWc0vO8JXG37dMx/Eh+DvZ23WYW/+8eowxzgE3g+3jSM/oE/PfKXYAb++rDS2NlZ/9md/9viixVj4rOInmx+ymhP8WzvGdk93Ddw18LvTAGwWv1h7/Aw8ESvxHzbn8AR29GKTb4EP/A2MsX5dYu/u5R0+uEe/GJo/hQt4h8dGDXfgUDgJM/GCM3AT1uU7+zoSjpHHoQaMQasdnLPniD/cQadeP+rgtZgeD3LBQ+3EAx0qaU8X6PlXump/Qrb4wlf4R0do6VPsj4ZsdOFZufGFxbAQvisnF3ztyyJt7H30D3/JUozPf6vv6yA80eW/jCfdm58Oe5qL6n3dZDzGXd3GWOiiIdfyjZ4MxmGOyIkfXZsrvOhTO/XR5ZPtQ82BZ/T0W4xlPHjxEeyCzjoUolexL5noXr/0ob/2LGJL/eLNJp57emUOhn7TiSyIZUAZtbYMosBV7llinAGZ+7dup6UMSEIX+ODH6FwMFA16waGygM+itsnICOtLfw5ipBYU+SxqfJSRHZ/vf//7j5v9+reQGH5fr1iILRxvAy0gC6YNh4UkCLOJkFoQFlCHQUBfuw9/+MOPByHxQSdoBHCuT3ziExfYKgNIftalP/8NziJ1kGPcBcQATtnHP/7xa9GiAQq+ZiI7OpsO974k8rWVhQ6EHUS4zIW+/+qv/uoaF8Br4dOVMgBLRxtUpqOAR71xBTZy+pTvCbZnDsGcyj27zJFE1uaZDuijQ7WH+ZIpWwOSHJjLGDlVOqcbX2vpg/zGQp61S7ojfxs4Ob7K0h96II6WnNk+noF/tp2T0LZNOH5d2zdb7Csj8u6BkDkjg/nZtZNN4y/13FqS0+u2Aei7RmtzMXjBo/vNT95052p+WufWG1rlcn3pUz16iaNMxvpQh45N0Sv9KhM4xS8eq3fj629coQtjbIY7WJPTwYlN6AVi5jl9pyu0jYGMq2PP3shYF+Yq3rWJtrHtWLtP1sWq7LF2yyf7jr4cny706B5u60KAU5vN61/Z3p826Rm/Dinhb3qsn3R9jtPGXVBwymx81kZ6TQfKwwfrKQxQb71K6Rb+KXOFu+ESumSLvq8jegOWfekjfEp/2sAXOK5dtK1xeTKjNQf98cWlaW2kXzJZ/7AVXXYcHV5SvNWbQwFhOCXPfuNrbcC1+MiXJn47D3i7tr+dR22yu+iytavRLUUf/+VV2+yLTMrwkJOvuSlXvjaBrxc4dJY8S1uZPH5woL9LZz3SVzmbs8aTQbm1axPhYJGs7MF806cNiGf2jk+bmGg6SBJQh0V8rTXH3+Cjr/1KCB/9/Md//Mf10kacwK/zU3Rko0VGffHLXmTkR+TsAb11aLNiU4neuH/4wx9eOMk/kINekp0vtzFykcGGocOqXgbh21dPzfE9v2vgroH/PQ1Yh/YW9gUOcvk1a9lexv7BWpbEF+Je2AFX5PAAfoXLsAN+ir0WW7WHBVKYDdtgiP74o/XHDlnyD/YE/CB/iLfYDJbATrxgBtxCpx0+ZDUe2COXyNZBTz/7UqcfOoCZsFBMaOzwUI6Wb6UHfdEVWv0ZJxoHZ2SkC2PBD6aRVRl8NFYyoyVLh0V9edPBln6MT5yAnh/QJ/49t1cyVjIYrz5X59opzxfzB3isj+MX0OSD8tn5TnOGD8zWNz+Vf2xOtSF7Lx/yhc23HH/1+eJrQm7Js0sMhQ/9uqdTY+MnJLqyr8vO4scO2Q/dKiPj6gCPysihPX0893Q/GHrJDDOojOolJFdxgf+voqmOAWaYFjqDayOXMQqGvv3tb/+SAWb8bdy0czHafUtO3uQRMMUTnQvodeJKDouijUlfudRXgZbFoAw9WgcUDmdaUIIwgS/Q7VBI7j+v9TUMkOqgyN8iItebb755ARdwBDjk40z6GuhDH/rQVY9OYGkx6t8f+Ubjp1QOZfQLdPCkuw6U3P/d3/3dY7BrDA4ryE0+uau3p21I6NAGu+cCaLTAuHI5nTjYCizQ0ntBdnNh7Gii49A4CiBpTOyCw5STzYVPB3NAy4EKR+OPa3MWgE6AzQ4aS7KxCXZlDtWRI3mBX/ZAHnX46RstgOY01JUC6gCdrrMTeckYzAU7xZNs2mSHtS8nR04GjbTOwLP6Pbgg/+lYzja1exTsxc1J1zM56Ee+40m2yloHjb1+WiOe8WkdGn/PysxjdpB+V4/4OLyoTv/msZ/wrdNSJ2mvDbq+8knvyR8d/qcOzJm2axfNS/RnmxfqvHh1kYM9Cnh2jKcdRU82c55tvEzGaM6+mpPGqP6pftE1RrYp0DM+B3DWkDZoWh/uty/8lUUXbf2TO/3seNQ7EDYn5k+wtrKaszbighjrT27tWT/5h/gbQxjkoEdbeBR9B631kdywMgyrP/naiD7gf4fM4VY+UL46QU8fi4XdK882tUlf1ePV/drCzkFzIQ8X1LvfPsmJRzz1vTzRm+tS8+jZvUtbeNuhbbTNaWOVp9Nost3V+VP3sNsB48rmHm1l8apcXfIbg7HKzVsyNQbjZtd0ky/ocJhdtAa19cz/8JsufeBtM8OeVi5BNj9Un+yYvXkmC1o+LLwgF/7WGdmMgQ+iY/cwRgyCL99ORnZu44Qv/OiFhiAeb8/akUNb+ELeNndterLVNjI2bPyluW38zds9v2vgroH/GQ1Yww4LHDzAHzG/9Wp9d1jsGY3Y0rptzYdfi7XKNg6rLpwktXtYZY3rQxvrXnK4YY8Do2pr/9FXOnBGu+rIY6+AHi8+22F1Bwmwpjbq7RdgpbhdwtfYt2080gFZ8fRMTrgEz5TBYnrBI73YG+i/L1jgHh6wTjv4ul8XkR1O4oWGrsUdyh360EnxXYdJxUHF++rphD9QJlYlr7lYP4cGlvfigIxn/aWYW1qa/Nn6PPsx+iALHa/v1H7nPJ7K7G/02d4tm7GPdI/GnMvp2jjIyReYa3NuXPolA90av4u+zKU9YPszdp3c5l39c0/3g6Hf8Qx3EMO4GHcAxaAZ4CaGyngZfEbeAgMiNhKMHM0uHHy7gENG7z7AFCwVRAqqBIkFUC1QBxNtOPAnu4X2ne9853IAFprcAQ1aqU2Fr5CM0QIFxII+m49Osi1MvIAQUHMK7u1jAaU+BIf68K96LWh/e0lwChiBqgMXurFRooNPfvKTjwct5PrBD35wPTdOoEpW41SONxk7PCG7OvOgb7LQTXNGhgJQdH2CaW48a4+XP0ztHq0c0Ev4pXcHSh0EkdUXVHSd/siGD/lcX/rSly65yKI/vGwutdXnaTtAmd7pawNj9+xBjocxcKQrL15ttgJafS6oN2b1vTXgjHI42V/2mvPY8kspt4RX62Dv+5qOXtoU7SbkbBc/uX5Ly/ssq39yGYtU/sjgdhOPdPkyWuuL7sjpotf0Zr76EgCf5K9vPF3rINNfZWRyz3HRNRzgwFa/T43h1IGDBnYgAEhW+alTfWUD6WNtIr7e+nOy6owt+1rbM5/kZZMdfJA7HvjXv810b6LIaeMuGErv0cnTr/vt3xjppv4aX/2k73NOT12hb0znuNCaVzikH/gG56yp2lhP7o21xA7ajMOvcKV1iF67xmf8bAtOhithhZyOlr928UAvOITxsKK+0NQmenUF3fiijzc9k0kbvB9umG6cyhqj8vTXXJRHpw9jWf1r07jwN1fZRvOLnp2ZVzhIPvjXF7Z06yUFebVvnlee9KuvfrpZfXNzPivXt3Jj2DXRuqyNfNdq7U7e0Wx798Z8jlu58ZBZHZ70t/qEBXSfrzNOmxZ0fIlnOqMvySZi541t8Mfq3Xf42EsOOMFP5z/Jwub5YXFIcxdNMsutey800JLDGoHp/B8ZrIPsW3kveGxw8GVj+FgzcEN7emB/fA6e2uNjDOrRS+iMu7E2D/f8roG7Bn57DYjFxf58tc23eNKaFbvauMMj2KSsr0bgEr8cTsO29iY25u6t6/zAxiXwFf51qN06V+YAwPpHr1y8DzvCTLjgmUz8YQc+tEB2uCPWhjH1Y1zi6A6A4g8XyQpH1elLW+N3+II/fNMHTORT0XewQxY0aNXBUmXq7YmUwcL2S5WRjZx9/fPW7euiXig5YO+Qo1hBH+7FZmQxTvrxbO+4L3CNGUZ32J8vSsdkbQ7RKO9KX9Eopzvl+TO5cmMRK5GteuXuXZIDLv6M7PHmg4zfGPgp4zUW+G/+8TcH5kZb5eyI/3N1YMYfoOWXGr9+zAGb7AWr/vgNtkY+Nlyc89uvnLc3h/vB0O9xfhjZr0oAjIEzSAtiA3RGb0MOGNAUgAaKgW2LkWGj0ae2BUpyC8+i0EcXUApUyUiGAsvXXnvtWijJY4EKytBE5zDEQiQbWqAIIPFwaAQ0fRWUfPKvfe1rF+jpWxsbrBap+q9+9atXPZnx9jeE8AdWxvT1r3/94pucNhHGLGlvY1lwioZ+tcUjHaLVxpjw7AKq0XAGNmicRKBl3GRSp1+8+/qDk9Q2uT/ykY9cz3j3pY68gyIghkY9AHT1R6Z79nea9KmfNkn40zPZgBl+5KPLrrW36vDAa+3i1Mny0A6Qmn+Xe/ZGx13o1/a23+Zky9CnS+XGyybxPW0Znesci+fKT5qlX5rakOmU63xGS07prPOc/I0lOnX0SU904jkazye/ZEnOU/f9Mb7Wn/Zo4+ke33QQn9oJ1HqjdLarr23bnDYX8pP39hmPa2C3pF9zKZiAL/j5cpDz3+Swq69rCkCyIfzT465V7Vf3D7fN4h4gZcfo0kOyK3MA3CFOeo9f9Hg8RZeNwgZza9w2r9Zg/e46Wr7u8RRUhpvW367DdLrjtsbR9JWR+8Xy1Ud8l/9iWjpV7x62wB72ET7W1jzkW4wpORYzlKlb+6cP82xeYGr6yO/UN/7wStL+7E9Z+tufZVWuTuIT2Fs6I58+0+v+3bC1g6vxLeEnxa9nPCRyuVffOPFxL9Dsp8UX8S3VR/yyqdqUn2MWYGvTfHVgY4186lOfetRP2MjXCIitM+3IaR6zE/7ABofezUNfMOmX7bp6a4sGH76WX8/nmJ9//ud/vtYuO/Fs7ejHPVmNx4uc97znPY86IouNCPn4THzR8ZXu050XQ/h64YMn+uIBPs26stno8AgtHDMu+pJsDvEjv01SL53QGAcZ7+mugbsGfnsN8NXWaDhj3VmHnv2ywJrtRW6xnJ+W9nUMbNkY0r31Ha7m+z2XxNjWMozTFxyBgTBLf7DZQZX4wTM6NHIHEfCHL4ArsEuZuEQ5Hn3lAw8dMshhjKSdgxryozd2MuPPX/YVo2dyGIt+OhxyUIae/JU5SNovgfDvy58OkYwDjhtvXx0ZM/1pT99wrsMiWEtG/fdFUAdGdPrWbc+Id7EY/dpL0gO50nu4TZfmjwzrr+CxcnprP0C+aORd+iz+o6enaIyvA0Fj0f/2t/yM2ZwWT8j5A7KaG/NhjHiYt+xFzFXswWeZD/o31+I5c0S+9jXaZX/60PZVSPeDobf5LBeYFzT/T4kbIAuyLDIBk3vlnl2Aw4IPFGsD3MnVgYJ2ACVAVOfNbcFYiwsISy2wf/qnf7oOMNBZfMAArTLy/Pu///vjhjIArA9A6I9dkvHLX/7yYwBdIG0BAxo5QHNQpS3ZOkhps4DGpW/AAUwCnQ7nAhM81ANveY5Bvb7RC97d489JAWX8mktvvIEYOczr66+//lhfwG2T0WZA/sEPfvDijd6zzY8cj4BPH42Rns0dXXIiAV2b+wVU8ukPn9VF8yQPoIEoXjk9duI5Z4JOil55z3IylqKRK+8nY/jtGyP16T1e9bO8o0sPK8tjp7eb6FYOY8wBJOMpa/y2fGkbBz7KXfhubox9NaT/aN3Hq360ldJtAZM8WdWjr6y8Mjl7M0/mjE3kcH9dO7wFGd6ctHlkP+dcd/CDLjtDIxgwj4IHGJEtJHM6kqPtjRBZV0b0q+faKU8n7m368VnbZs/NQf2Wn3NTu/rTT3Nan2j0QZcdWjeHaKNH11pavtVrozxMgI3W6q5BOtixRo9v7cI7eXJoV9/o8Iax4ROMwSt5tdPGVyICu/NvEsVrba6/vYR/WJfsS0eXKwsMk+T6lzrcgJW9MU5/2ehFeEv0gV82qO2Z9mdb6bu20Tb2ZDhlrjw9NV/bXlnX8s9Wn+oj+sZVbhzudzzGmf8x3vQUVjefaPiCDr3wST8Cfr4Pr+jz78lJduMsyHcv4YE3X8d+BM/K+Iv+3pX1zm7ZWBsp/fsi1kGPjQnZ9NWBnnoHoua7MQvmYQ0/zwfjz3+LK8hFjt18dEAk1694ID/na1k+mK/UXhIn8JUu9NbAPd01cNfAf08DcMzhA2xwQGI9W3+e+UXr2ZqFO9YtTIEhcML6VAaHwrSNGxx4LLbCCBt2BwuwBKaFmw+3Fw4OU6zrMNyBjxfA+oYncK941b0Dg76IdHBDHnsEcuNFbmNDA1d6xiPsQq8OxvCZHUQp60sl+xltlJHdAU9l+oFN4aDx05ky8neIRD5t0cFdOqM7POkSRsaTLvlPbdH1lVMxH+zGGx+09GXs4i681jeZA/3Sn3bGLfUlN/4d3ihP9+iKj9y70OKvT7yiL77yIk982KG++triW9/ue+lWuZjDvTniT9hZfOlYW3o1J/Rgvjz3pZCxGysbNj/teZPVM/nYQrEHu3ru6f+Nqp77iO/juzRgIRXQWywtGIu4Rd5GJDptLA71BZ4tdDyrs6gEdwI6AI2PBWaDCigBgBRo9FYBGONn4VnwHeDoV1tOoFNrDkh/ZPM3mRqPZ4s+cNAP8BOgqturoDEQ0Dcg1raNVxsDsho3AEd/Hobg649waxuA2DAD0ngBp8Ar3ZIbuHcgxPGRH92WCXQF4Oo5IeOn3w6GAkQgBhD1SQ4J6OpHWUF/c2gsANHXWWQ1jua0sSjXJxClsxyNvzXVJ5/R4p8s+t556OdLdJeDsxnw3OEieaSAv3ydQ/flV4Nbypn8qvtoo+k5mz/LV/6zrTfbEhqbsuRJF3K66O/JcLISPUSzY4neXEjmQdnqYuV0f9ZtGZsp4CoQeKqNvlaOfqZmjgRLrY/WQPQdOLEN6zu6xmhdk19qHLXFKx10+HQRvkjNJXn76gQuZF/ZGB7LS3l0S58c9S+v7ORZ3+UdOMUvuZfnzsuugeSU1ydam/sO4muLf9hXX+oaY2MTZO3hTfhcANdcLj9rX0AIf9tUW8fNaTosEMYTZoSr8W7M5OpAoICqg6H1CfimDz7BmF35mewRVi726598+kuH0aJzqVfWeHdus6On5j26xh7t8knn2UA0T/WRfOrc16Y8GXpuHJ6tNQdZ6XdtB13jpC/JGuMf0PEP7ACf9IWmec+WtM2furdWfdXDHppfvKxnX/Fm39rD/NZx/PrJAjplNiB+yuWZvGTUD54F5HBILMD3kxVPtsbnq+O33npxOJy/xI9/qX++2eYSfZsqOjIez/yeNnwj2zA2/tF6Uae/XkJ1ULo+8VLwPd01cNfAr9WAOFyMan2Lr2GStW0927xbz+pgfbEHPLA++R/rU7nLfYdFnsUTcCT8g2/iXpdUfK8vWGSNh4EOwa1ta19Zh8DkcWDiMIO/gkG9MIEfyhwQ4IkGVjhAQAPjPJOzr136+Zgyhy3akRsuG4ux65NOHLBojy9a+Zahw1esRQ58oqMrZR1kiMXx1r5DnnQJ48gLM/ewyP6ir3XkPWvnmc7pqp+bmTN9NAfmgW74Aj7Dc23yaeYFjT3J2RZNPs980Tc9wXHlxiuPV7mx0j06NPqE82jzUfigsX+ldzTm/OG2V1QH39UZU37RuPGkIzbVCz7l6TK/S1fkleorf34VPtN0Pxh6phP7mwyLgffmTID526Q2oBYUoLYZtsD1YUEBWTlABWgCR7SScmXaWJgOWFq86lyAFfBY9HjaZAAgCx/I61+urhzvPm3Eo0MUAIFug1iLv40acAl45IEa4CF3wXX3+OCvvs0RGQqW9adeGZoNxgG5cmUcF6Cy4fLzs5yhN6/kA8xb35dVyUgOZeQ1rwE7PZiLHS99mQtOtLcl6tmBtujpG6jmVMhgXuKrn1LzVF9k6r4DhPOLEg4snS/fHIn26zAeO7vdrEOqn+pXLn2vE0PT3NZuHZf7aIxpx5EOk4lemlvBUO3qXx174NTYHn17Pv+2SRuhcg66r4oa5+b1n11u/qvoz3FGGz+HFWy6wyBzcvL2TD62IAho3ceL3N7oSelPLtX/9XBL6bb7U9/pUbtdj9natm8MeKkvCGh+0CZj7WzIOf74LV+0m7b/5RlftCtDPxNLlvpEYwNMx3QomBPEwDe0sCA7oH9ljd+BIX138Iu2w3Nt2Hk6I4+3YOjxVr+0xrDy6iN7lXf4BAvQadtYrf+wRh5+ydN7usNXedjjvuBLnfFJ6dRPsdiPcuPVrkQOPoKNSsZ7jmPnOXtbO6ksW9h5Ts/Rb7tkULb2srTuXWSiL1jsMDgfi0c6L0+O5k179/Fy33jd04dnejR+unTBUT/FZksdiPAV8JydN7f4xsMBCtn4ABcaNtl8qrNpiac6vPlqZZ7hKx/xD//wD+/4kz/5k6ttY9cX/0tf/D/f55DHM9n5l/xPGzo6suGkO5eNkU2XPjw7HNKvcvcwlYx8C/2ID4y7r5+y295uK4fHaGy48GjD2Rzf87sG7hp4uQZgDezoBZZ/BGPDDz+sRb7GerZO+R9r3eUZdoVblaMpjlC28WW+xtp28ddy+AOf+NG+KLTGi69hDxmsbfjouX0HGRy0wA744LADjXu+Uj1eUvhEDvT8YnWwRuws6de9gwt9wafK7VPqDx7SEUxD6x6/Ds07FEKPpzp09OPZuJNF/KmcPuCrtmg6GKdL/MmVXo1B3/lgbb00x4uvysfIxef1QY71meTNd9I//mjXvzZn/Q1SNPRS+fo+ZdLS0hU5l+f6aHqW5OaMX+Gf9EEf5tNYPbMD9iJ2Mt/q+EbPHTpla/pwX7/1k1/s+er8mab7wdAzndjf9bAsSNevShY559AhFEDx3OK3sDmXb37zm9citwDlaIBKQKT8xz/+8eOhUBsSdIENOdDhp56zakHjBUjRu3SwAAAgAElEQVQ4OIGqoJIsOS086j+gBMBoOANtkls9voGqsZSAk34DFPrJkWiDJ4DtAAhvjsjfycBfW20Ad4ExGnL6O0+S/h1KCHA7nDA+zqQxoM/xdsCmHpALwukBDTnTFYcCMFfn9bc67j7HE70vANItPvoPaIGuDUXzHo8cRk4iXtXTWffVkal7Bxv6wZ88+PeGhy7Sx/LdIER5doI23sqjuwpviV2Rp2tlrixdZkN4a8eOotGP+nNc+tt06qQxNs5zHPjF9+Sf08M/nfd1UfNYfrbt8HbbJrsy98Zkg9/XgsaaPmtXm9Wh+9LOERni0RyiU3bqTT0du5rz8vraPp6iWVmj3f63TfU7P8m4dNaDdSCQlZsDl027dUve7KM8Piu3+zClw5vsTPnqzb2y6M/75r45SYb44U9WgRU8ym7wtX7Tc/S1x1dZfGGjMWqHBzkaW/PoqywHjurYDjxyH4/o6jM+8Uz/6LIJZfpZncTnYvwiqW9+FfWcPcrXnnpem1APo+G3AwybEoFth+y+LnRoGv9kLK+vZEbn3jjVtb7l1qBNWf4Uj+5hOjmU0R89+rfyf/mXf3npwltYQTEafgZvPsZ8qff1rY1MgTTar3zlK9ffDNLGM77GqF0/K4Np5rlDJnzX1sOTNloOhfByOENG/Xkmn82LzU3zr8zGxLPx82F0wGb4GLLztcbDD7pnr+RB6yWL+WFXfgaPnjx8Md9nXOaJb63PbOOe3zVw18D/1QBM8tJD3GhNiZ1/+MMfXmureM8BjHJryzrr0Kg4UFnYBrfEt/LKw0K9hrHu9QdvYAWei53wAn6QwRoW94k/rO0OmmGHgwD4hlZ/6MS61j8eyhyudFik3njgnbK+liQ/PurgPDn1B1M6oKpcGwcUnvGWGzPaDoCUdTikvQMSOT2SST+NqcOipdkvi9C7YJ843z0M1b5DkA59zBHZjMdFp/RBj8bruXKHcvoxV325o05C13zVBp+Tf7RoyNBLSM8wm07qb2nFG8r5KDE+W9CmOEDOH7WfMR/o+5BAbr7o2dzzE+nGeLTPx5ov9RLd6UuSvyr+4X4wdE35Pf0uNCAgc1mAFmpgYrFbdOsQVh50LnT/3aRPwNOBjgVvkbta8AES4EEnF2RyRNoLWAEdJ6INEEq2PuknX+NBA6wAEcATPGurTO7CW1v6EOwDb33W7q3b1wXuObicnNzfVyIveXKSOTrg35c/9I2mfjgVXyYB+A7DBOkcgU0FWfWXLoyveSlXB6jpiOMS2OfY+/T/LN95e9k8o8kmnnI4lcXLZtuY9e2iO/MUn+XhXr/NV/1sn8rwq6yxL211+tlxdN9GNMeUHuUuutWH+q7VMf7KHXLtz9OSm97PT1/xzamha9w2SuhL1W3e2MpPeZWn06fq4lX/0dDD6if50dXGf5MSsKQHcrbG01djkVsfbH15x7eczqxbQYnNueDIOqzN0qfrh9tbOPRolncyJFfrmrzhlfvGFO++moqfNVFAlX5WDjzQCkzhwClHeqzfcEsepoary5/cYRD8wZv9uegIbbbYGPEUEPYFSdgol3Z+bRAErDbgNviCZ+3RsD1jtEbhA6wryGp8eNKdZ2mx2HpuLqpHo42xNjfJQzfuS/pWVv5YcbvZsmxPmZSuu68+meXZEz3CVXpla9ZlGyYYLcX3enjxXNnOVbLXv36UeaajLjpqra2O0GkTHtJ5ekPfzydgPN+AvgC7vtDhaaNgLtEpk/gNz/iTHz92snLbOCirX/QuujBnym1mBOT0RG/ZOxvqKyH+C5/8I1+CD7/FLvktcvJVZDVWfL7//e9fvM2PDaDDOF9nmhvyovHfP93boFmT7Nfl2dj0SVa8G8eLqbtndw288hqwubbmJFhizfJ3vTyw7iR+1zoVy4rL8sPWWBgFf6zd/ekRXIIjMCacC4P5R/1oZ22Go9as2Dxc7/BI3OveSwk0MMe6JiOMMA6YAMPw7usa/cIbhwjyDoQ6MGpMDg/wUC7XvzgaRhmTgx7lMIlvQK9cvN1+gCxoxNL0aXzkkbvI7gXNHiA5VCIbmekPP8/6CV9hJn7ay2EnvYuf8PWcfsndz8vww+et275DQoOXi+xk28vciDHVh5cdZvUCDP/mCk96oafmPTnWH16d35Kx4dcL9fyNuuZbv+aTLZoL/MzBH92+mC5eMP/sgd+A72yofQOe9MQWjFtf5oue9UF/knbs4VVI94OhV2GW32ZjtFhdv2nKMfym9E/RBRyCeYAARIAgYAZSQMOlDGgAcYDBkcjbDLQZk+MlAQ/gDnAFwQC4DRgwkQqMtVMPFI1LW8Cm/M0337z64gQ4A4BEHvKSTULHMelHGfnQG4P+5RyyoFg/+Nm89JYBWDdeIIoHWfYgTH/aruMgI3lt5PHv7Q9am8B+PpQDQCs1RuUL/KezqF002lZ23vuygF46CKqvnEa8zz62/uR5CXtL0Tz1fNZFs2Ols57Tmee950j7OZm66PGLluzGKaU7Y6b35qdycqW31bH7grDsiHzK4puO2Pxe5/yvHGe/+sG3+TrHs2Os7c7N6uYS7EWKH/4nzT6j00e55gUL0ZWrO+m1bX2fdCunwNImcnnip/05B5U3n5svz2SNZ9iY/Oo3xafDG7jiggu1KRjSj7VvfcOMDn33AAaNudu2tY8veld6Q89+rfnqOhhp054N+CKqvxXQ2Oq/9SRvXEuTTcnTp3rP0aX39Jj9e46v+7UNbZ+ag/ooR7Nz7b5DA4EmH/JwO1SEQ57xNY7THuornXhOzuTHe+lWdvJYj/Rm7C71ytJDubo2TukAbf4DDhgD/+TvDH3sYx97xJ/kM4fkaW7RCpL5JGWebcQE3j0r60WBcZDNBsDlP2jSVf6IzxV8o5ME4XxdgTrf1hdEZO0Z9hmLZLz8GL+rHE2bue71Qx8duKr3pbE2+rJJ6nCpMdtQGJvcnBrfPd01cNfAO664z4sXsWQvElu71py14tDVWlZevGKdoW99y2GUTX8vapVZ29YrPO0+nIQV1q/Ne1jncINMysN0eEEGWMbvoYd9fbUKp/hxucMqufGggxPwqkMkZersBfQFA9XDQW3JqQ6WGYtyzx2EedaGHNGTDT2Z4IxDGGPvcMdLJO3Q9FVR2IuPA4zaw1MxvHI0cK0DIHGKOUDrsMOc6BMNHRsH3bk3T/pEZw/hWbv1Nfksvjz/Qm7Yq89NzVl+Sp127RnoJ347vx0Yxcv4os1XbL6+GS25zDtMRwf/3dOPcfEp/BBd5IvoTR+ScdAbPnTj0l7K5l4Vf3A/GMoK7/mz0ABAWEALeAxOXfUWPwB0AQmA0tvCVUT1aADh8kan3W4I8AX26AIx9QClABc4kQtQeuteIKucMyJHzhQPzwDK1b32nAz5vAXVL1pl3nwoR8MR5KgBXafgQJXTQ5fDVoe2gyTOrHFwSMDV6X3OPODXT7oop5eAe3W/87Hlq/Onyo3NQQlZydx403Ey1Hb5Ne9nXW2jXbqXyVOb8jZT6LuXu8iUbW0dvdBdZfUrV2586rY8vaXzaDePRpmfWrRRoyt6I0vtG190ggj1XXgtv9VR5fJdD5VH+7LxLV80fQmk3datffkX2eZ/bQp9z9tOmTUn4e+5++vmRaKn2jdnnqNf/fu6Sf/RxWPHeI4rXskT7favj2SMrnbqpNXr0hijAyLBqDXrmZzRk1dQBBMEwDCowx65uU5P+uknXNZ9tOjQJBPe7slWsOQZvUuZeUtueTKr28MmPMgQbTyUuc9e6Ue/LhuT/lA1mtWRerSV4dvzlinvOf3v3FRGbqnx0SEMhtd0pRwex087/42z/7519rPP3VuXfdWHX/pN/w7WGv+51lYv6YdutGm+6Dx8T/+eFxPc6y+6fGMHSD3DEzrgR/T93e9+9/IvdODq55Jo6IgvoRPYgr4xFXTHn32STWBug6TcSxL/idOmrzn8wQ9+cPksc2B8aB3g9OUS/fTzinDU2sDPywu0nquzCSCni/x8L942SXy4NaWNcjzu6a6BV1UD8KV/HmDtORSWW0/Wj3uxoS9XwhZ4YrPfs7z4whpUL1cGI+BCfqxyX7K4D9/oHx70M1GxrrUJA/AQQ+tHGVwhF4xySAC/vMAQX3VQrW9rHK6gb5wOXBwUeaFSnbYObsTm7vF0qOCAp4Mj2FE52cjVwU6HQ+TDx+ELmTp4gl36pEc88DR+dHSjTD29wT597wESWmM1PnVv3Q56yE6GdG985FVu7J7JJ7bH23P65pfgN39iHqMxB2jUdbUuzI1+jcE9WR0K4r203euDn6CHc9908na4pN98mlwyDvOtTm5u6Es9GYw9v8o39WUrWn6cPuzFdi+UbWon4cV/sadXId0Phl6FWX6mY8y5WNDAB7h0AQP1gEHuuQuNNvLK0Fj4EkAKIIEOkChXh+cCRQAENNTJAWGB+B4IcQh4cYAcDAcEEAt+tdWHAPWNN9547AfQAniAZ6zk004f/j4MkFPnb0Go7+uhxmqcDnf26yAAqd74ciZyTgNtzoSc+tdnF325PKcTujOO8wrgN0/PaGtXjm+06cVPq+jtbNcz+lL3gXrPS1NfW/cUj+XZWB87ut1o0xjUN/b4y81pl/na8cUrG1vdxSP+6WTnQVnj9JVY9s/ZNn9omiu0+1/HemO189o4tZHq1/3aU/wLMKLLnthSNPGPX31Eo40rHptr008Xd72uXN3bnFoH+K5crc1rQLeEXuo/jjU/ypqDlUGwE9+1++hXFmU73zv/6tA2Z7X3FrNANvpsP1nlDh2MTTDTQYpASGDTVyHJIqCDN2GVoNVGew+Jmofkimdt2sx7Xpndk/PEQc/KySCP7uH2ZQ0cUV8benSfDvq6SJ/mTp7d0EX8Kt/5uZi8SMs3mVtDS+c+3W59sm9deoCzZN41qY+dK+16lq8M6shX4Kk+GnzxNz/KPKNPB5tXjtde0VSW37J+bOxam3QpmZMOvGyW2Ada/wBC+fpM9tUchFf8iXb8mb4c3jjQIb+xoP/Od75z+RSHL3yhDZN+bWqa/zZA/KaNGl42P70B1ze/xoaT03q0GbQZ+tnPfnb1Z570Qy5tXPhlk9ZOX9xaM8r1xb/RD942aucc8Mfp6bSh+/NdA89dA9aKdSZ5+eBvdVm/1qO1w3fl68M2WOIAx1q3zlzhplydw1nYgY8+enEZvimDaWFqmKg/PKx3bR26wAs8tbHmyYHeepbDBH4TPmgDh/StDUwgnzF1oCM2R6d9fx/I+PnqvhxyWKPOOGAXGfAgX23wN64OgsgAa7y4EdOL1dBXhq4y+Cf2ILsybcX22vHtHQ7BT7LCRHgWnorX6E9faGEo/eDZgYx5Xd9i3B3m8CNwNZp8Yl8BN9d0Z/7DaPzOuJDu2EI8yGp8zW9xVWvJPJA/3vRrzs+4Qb0kb97zqdqbI8kcm5NeZphXOtg4lczrT7O7/FlfECXjc83vB0PPdWZ/D+OywNvg7WILCCxcIGNxyQF0gSFx1wFYoB3cBDIFuS1ebfSDB1768WYQgAApgOBtHz4CPhdHAtQl7YA2oAiUyA+k5cmDLseinX7OK6eHv0twDcxri56s3rQ0doGmKzBNJm2U9zazgyUO473vfe/Fnw4LVgtY6UAAro905S0Ex23DLEg2zsAOKDduZcbM2dAX58Ox5+wDy8bdcw6lOTYGIJo+3AfS2kja9Mdmk6V2aKX4ldsQ92a9spfRrizxPcuuTiadvFStDNv+5HXW5agq9+wyr3Tg3lhshLefU2fpbuVsvrJNef9NLCfX+mhuyqMzt+Y1Gz/pdk4bA3nZb23JoR8pm8rGBB5sqGAxmRsH/vixR7aJtgCjcaWX/q17dOSuv50f7QQ75CPnH//xH1+Hm1J6bF7Qbmrul64yARX5jImM+veFk0BQiqd78nRpL4AUyDX/5erQJUcHOMqzk5Pv6gVduHLSn7aZXMmqXYccxuI+murgCayBneGZfPVev41RfTI1DjKvrNXHkz6TF2+0yZbNqGdn+osffZkX/JTtfEbTePFMd+ldXmq+PFePr/KeH4lfcrN83S//7cc9vukRnb7kxqdcbryNLfkaZ7l1ARM9p6u9x6NLfX6ZLUvK1NsEOFjUTwdB0bIPa0kAb+OlnI/hd/qaxlpI/g610CsjTzz2QIcMgnI2ViBPFn/EFobl26wdBz/4xtsXWflVMtuI8VMPt4NH9oofv++njuTVF//Kx/P/aMgHm3qzDLfaGKZD8vCP2hiL9U4H5ocs93TXwKuiARhg/cAH/4GM/Vs7Dhr2ZYk1bW1aK9ZMX/CISeFMPgO24ck/iwHEq3hb7zbuYWg4KlduzYaLeLngi37J1EGTQxF+CZZY7/XrHmaggxv2CfYC7vE1JjgAqzoUMhZjCh/UaeNgxsEFvTRuhx57OBQv5bCErsjQgZFxkZvPw6dDk3xgejMWWKZtZeIR8sKpDqXoor8xRD4YTxfmCQ/PZDcv5NQ2X2Qu8MRjD4HgYQlvbbVpz0SfUj42v5mf8kyGDvjk+bTayqNHa970IZZbPu6zCbJnJ/p3b56zI7T0b9zqlGvDH9C5emOl1/WT+dJTRm36mu1VWPf3g6FXYZb/F8ZoMQH3AkcLbi+LzmVRtiHosMOC7TBDDiziZUGibyOiXoqHfm0SAzGHJoESYACuTuctZD8/AS7ayAOtNiYdrACjT33qU5e8gCGZA21954jkNiZ4Smg66Fr65N2fHKArsDauLuV+DkYuZQDIuASsAZS+OCt/dBpwAntyGm996f+rX/3qJdsnPvGJRyCkn/6LTUC7uQ21cQNJfM1FICqPlgwL4oGo+r68CKxP0G6O4pFT8LxtPG9aupfRro6aY/2V4qEf6eT5Sx2+eHiKdvnUJ7r63L4b0+qZvgpSVqfRZnsv0yH+y8+zn4Ll4JoPuWvp+49t5rj68nhGX87+WpcCkvir7566OjwR8ORo8USz+iaDTZr1y4bZ1vJE29gdHEXXuoxfcyM3fjx7E7Y8Tz16pn/5jtnzrnHP1gTMgDN4tg6279pU1ljpp/WwNGHICxN71A16F9pkzk6iVR//xhBN6z85mj+BnMDbnDzcNs42zGg7xJFrA3OaL9gDo7QrQN1+4q1d44lnfD3j11gcYOyXS8kLr/CLThs86LrgTZlUnj7yD2yCHNtfejjLtVVXn9u38nik6+jTu/LqGn99PcV76ZO7/utbn8lZ3lxse/fKXe5bN3RFn63lcv21FrJfdlC/5NAuP03f1jpbQNcXRsmEzsGIDYWDGOXmgO9Upr0LX18IKcOLLaGXlEnaorW+07u+2VwHSOywlzk2cv3cy2ZMOV4OgWx0bCj97aB3vvOdV594fv3rX79eomSj3nCToy+JbHB8OenrB+O1uWtu8KRnuXHbkNKbgyw4Yx2Rh/7u6a6B564BBxr8v8MAsap1yn/LHUpYB/K+ClRnTVojrT9trbPiVWtNGWyyLk98X5zF11rP/4szOqQNz/C2Lj07aCIzvvrpgEh7/dl/OFThH+ENLMLPvsFBgvp+GmbNwxQYQF6HM/DIQUpfHVUOk5TLe0kbZvYFj3iCnPRCBvRkVE6P2qKFrX0NCYf0YRzkgGH92Qe4KWanVzy0p4u++iGjWEZb+zO6o4N8CDwV5zhIKsZhz+RSXoKDeEnauvrZsrL9gig/ag7YiD7zW2iba230YdxkRm9s0erjtAvP4k36IRM90p/2+GhjvvChP3PJDj3ryz27KP40Zrrz7ICwr4t8YJAc+OKpT/29Cul+MPRbzjKDdBWEteAKGhiSe+BT0BY9g24xuo+2wwMg/LtIFgA5yEVGYyCPFLBbhAVvaAR12qkHOGTVzjjJb8wdGgmmtEGnjbZo0bT5sGDf9a53PeojGdIdnl0WuS8C0Ohfn71ltIA9y10OSOpr9Vp9ZcDh4x//+CVTfQK+9ICHpE5gWvKcjuTJ7d7Y1jmqo+N44tH99773vQuc8WsMHKJ7Y+uwiLz+mKd2+HME5FEP8AqoOY6PfvSjlzzNKd1zEmuz5p0+0UWbA91c2/hEq6zy9LFjqyzd1a4c7VP02i1Nzy/rY/lHu/O299G+jNcps+faP8UHP+WnDPGR05t53Zx9eD7546Wu8lM/Oat0vzrdsu5P+uZLvpi1dA5wetPeoVPBxPZh86jeerbW3RtT18rG7nq7GM4kS7ozVk4fHcwIG9XXL4ctoFAm1/fyW9rmgD7x3vnDT6rv7skpwNJ/8i+tQw6BW+s+vk/ZwI4/WZrX5Fya8z5bSEZ5/bgXxAqYK298Al7zIRDK18RDG2MTNNHzzqtDNuNC05vW8FluXukCjfRwO2yiC/hk7uGuumx95VaGXpCLDj995xc9S/o2B8aSjWab4eupS8+rg+i023mPbnG6smRd/abP7a/75mbr8IjPWU4WP/XsK8H63f7xDCPw0cbhBZtL98q6j2bLqsM3n97ceZbYBL3L1TV3+XhrSoDMPvgfPqevbWy61r/ym/0hfX3DjTYD2jQeG0UvJ7R1meN//Md/fMdrr712PeuD77LB7G03+fJr9WlzUhzApmzSks1PMfhAFxnkeNGhcaLnj/HM/vz0jVx0w+ZtvPjUvgLQl02ZDZ6r+cInXuH1VXlPdw08Iw3ABGtMspasdevKmoP7MMK9tddLJIcN1p+2xQ9ya9tP19GFTfDB+gnjrVH+y7NL0g4NORwy8Hlhu7XZCw9l6KxTMohr+XLrGn6gS1YHBvBFGzRhqLjZWNQnWy+V9+shvkx7iR6MC2bUpn7F8OTpIMpBE1nIjZ9yutJWGbkdStFRPxuDeeQydodAaJR52eJAA14ba1/mdFjkoIPs8Iy+8w0OfZoHPD13SG9M5CFn/mz9mhgBT/OJhj7Vkx3/kjLPy0OdZzGA/rSPpvnOp8XHARcbYxf6VG+s+RX5+mK6ZR/480XFKh3G9dUVXdEzDDdXP/nJTy6+xo7Hjhl//sJYXxWsvx8MvbBAhsAoGBMDZKjyglf1EsNwMRSLyEKVu1p4aBkToyywKpDwVgtdiwgNQ1RfcMQAXWRQXtKGPMrXcMlTX23Qkkce8OJTsCtHm+ybbx8tuvoAzmjTTQtb/2g6cCn/wAc+8KiXFhggLbiTN3Y6/eQnP3mNpfreYBeEyQv8AHgBow1lwR0+Haj8/d///Ttef/31SzZ8G492tSUPEBbk+nKnxB60wQ+t3EWGPuM3F8rQ4RPf6I3DZsB8kWHHvbTu8eB48csWGr+8y7/l/cIXvvB4YOT33trjb3xksgnpqyJz1PzSkX6MAf3+nCmbyo6zt5z4OvNsWF9SOR7u1xG1dqrLBtEl22lPtYmvNntly8qirWzbqFua+n6c5LlZ2vo6ZfZcf8vrlAPbHYNxtg67d/CobOWPf/aa7SRm8kTXszFX5j7daue+NUvPq+to48Nu0Hpujs2la/mjwycbWdruHd46PHJx7itjNPW7+tqxokvG5lWbkz4+S4/m4RbACaTWdqKVC3TIBpf7UiD+9RF98ocjeIZ7zX8yCjwEJ3Rk49lXB9nM6rKfe8LjaNPByqBNtAIc81R/6NR1qNWcqWdDgkQ2FUYkb3L09k6AJxhTDusFyOGUPmBc+tAHTIIlZA8fw0z12XcYGQ0+ydIYsyXY557ukjk543eOxXO6cK+91LxfDy8SXud6rSya5ql82yuDuXyAdiffbbM23z36badcm+RPv567qt+y+FWW7TsI8cVaz+m1n5+F481Tevasb3YlDuI/xCESGzDHNiMC6vwbXbMX85nPFZj3pWzzpU82a27/D3t3uuvbUtV9fF/Kft7aodiFBPSA2BCxSQwG1BATY+JlPFdg4lubEDAqMUQMwQi2RwUlYING359L8f+Zz/ouf6fy3/hEs+Gcs9dM5q6aVaNGV2OMGlVz/tcG26YCLvBuMvLFfq5gPYbz61//+rWGG4cHOGy2jN28xZyw3+Rg820E2Wg5BN7kEGh2AJytkxuvNlP0aMPGj/EUbfj5MxwueLU9XU8aeCdpQCxwcOAgwNf3DoXlLC7+YW3gr25+wxfFml48tBfRxjfbePOvYpeDE+tWB7z6xCO0i+GVfL4vlBxSGWvDLxbBLb7wQ3zko/3NHwcocJJFri/+oEUG9PTjyfqpX9woJsBnnEvs6bAHDLrGwhNePOFZu7pDGPFCmxhXO90ao0SXfPDJQ8RedMRWdbHOGq1fu9ioDe/4M6avftDpYEydXEq6E8fA2S/hyx5HzGxOWkvICobc8Y0WmmBc9Okm065n8j0w2vHmgoOdrK4bny2Yq/ZcyUUvwVVGG8900loTH2jSD7xs4vktB5R3WEvEauPc4MhI//rRbE2MJ7S0WXvcr8r1jj0YEqgKVoyAUWWsDMhdMskYOUkJQOMyDnCCzx56eBYk3ve+9z1ukBiN9pKQTUYk0AIJR/+Jn/iJi34Gp8+JMEM1Hj8cSWAVRAqU+APjmZGCyXBL3tEkSw6coydzcitdgo5Ta7ToafGVzOOTDpRuX6wUHMDkNB2CpSft5Prwhz/8ODad2JCRpQRfCf7Tn/705ciCVZsIQS+HDfef/MmfXDB+81zySR+CHjwlgXT7la985dmv/MqvXLLBad7MRTA7X4KMK32ubNmAsWgXDAs6/v7BqSv8CsjpHWy67O9kwGscnBaYZEU7PozZubMx93bdRV/JtF8N+Z9dHCBl92DjL717W2CD04UGPeKN7bELCTSfaKHPnzaw2zBYdARZc15gh6vFIxqSC/jaOBekwW5A3sXmXBzy54WB/xyT3a98C7fw+cSjMh7w1Z4c2duJ5974YMhFPjZPXrrc8cnNPrIHpUW2+Vkd5H/bFt/5vL5+Dpauo4+X1Xf4GltffC9OeBufTOElW3OeboM/cS1OvK+ek8VYcMkZn83rPRrBhk+ZvdeWDUp8zAX+8e1gRlK1Vzw0X57DubhtvEuS4YXTffIcHodHYj0/SxX+ntIAACAASURBVH/JmnzxK3nmM+H15RQ/Ky709hDu1XPz+CaB5kFihQcJGJ9tDUhf1hGJPvsBly7QYJvimwS6v/PAZtlwMlcW0xzmw1M8SV4s2YhrLz62/uApfYCrvnO6bXA2L9Wzicaf+kjP9TfHntHpApfN1bb2Vh+4ZK8t3W38WD5Wt8amm/DXVtnYs91zXyeZo9Zoa4SfThVvzRW74wvs3ry40h17aG1r3eUrbjKgA8aaxU6si9YKeNmr9QnvxQSbIBvKYoc+cNrSSS8/0LN56+sA9ldcSe6+RMYDv2tjBod1UDueWlPxhQ4/lfewSTj5O3hrFzo2c8aKu/roQE5l4wIeHnDkpwv5AxpwrK3s3D7VnzTwdtOAWOwgQszmo3LF2vib/JRv8zG+0HrEt/ggHxZf+hpGHQ4+BKcYVWwS+4vB4oLY4aAEDrjRKY7yMX32UuIOenyfX/qShh8Wd/gnWHsIfR10yMn5rb2PsWTZfrGln48pwYpHaIkP/F2+3k+7rOH4Qlt7XxeBB5MOtOsXT/AVbF8DFcMcbBjb10Lg6URcMg4v9Cm+w+GmV+t5uDzLMYw1b/CJYfJ0azaduulX/mBOzYE28c2c60vvW7YmtV4YBzccbMbVWLDVF596+wz84XPh4IimOh7lKeSiayW9kxF9z3jWJrazwdYeeJKNTdCrfvNMj+U+6Ze9uIxho+XlV+MrcL2jDoZMKgNzMyAGUkLBkEp4MmqG1M0gv+d7vucyFIYDRrDYQw71Dgo6fHAS/vM///OXqRTkOCsc6Aki2l0MkMH6OzC+DFFvo55hSs5KxqKBN/9LSJsMvEWrQ40SKkEazve85z2XDOSOBn7cLu3dAkYHIsnBYdOZ8QI6mmB9fi0QhU97dHKi1dUnP/nJZx/4wAceYQRwc6Mko4S1QxrPDnviEW2Bbg96yJrcvkpA27h0EGylYE3nv/iLv3jRdGCixCN+LWjq5v4Tn/jEsw996ENvCghgwHeD/fu///vrTTz7iVfBNlhleld3mUeBqjmwOJLdDSd+8WouwHTYk1xKdsW+LQA+w0/uEmELS3Kh622rxXMvuNf2BHMXeyEPO6NztPBrYYuuvhYUds5e2a4FV6k/O4Vzba7/Gj3YFiU0dxHJP1sUKsHnJwX89B//6bYyuT2H99TFPuczSjzFv+dwJFd87fO2hVdbsirjdXlbPdWfDI056cRj89ECRyfFIHGwOUvPG/PSX3zDxb52LoMH0w2GTbh3TuisuexrHLDZxdIOVmnDyqb3ilZjlNpWV+d8L/3gimOe6YX9lRCkm2g1ZmkkT6W+Yqd6yQ28/CHbDL55i1eb4dYmuks+X3noWx4kQ/Bax5rn5WNtaO1qdaveFd90jYfe4sGdDsAWHzaOGZu9gAmXUmws9qtHM5mtd2TRlxynL6RX8U9itjpGb/Vi094aFVy20Xwbgw88rw9pv3flA/rg6FpbWB/QvzwtTu2rz3NcvC7NbVNvDuHd+SSP535mpk4+NNX5m8smxHotJ4DP/KSLSm3G8oPWrupsxGbEGlNuZX0B3/pujXJ442DJeLBigo2N9bG5YU/mHm/gjIfT5g4cvuUY2o2Vv5DHONfXvva1ax3LztD1+X9t1sY2iF4OkQGtNkHW1tZavmRjZQzewSo9pyObLzoSJ+Rw/cQDv3i0sbA5tOFTuuDRR99rP1fn0/WkgbeRBtg+3/ICQil227h3yMB3+YqSL/Xy0/rH9vXxaT5sr8Bv+KPx4gcfcShTHGi/BF7uyd/0eYZPbt1PwqgRXX3oomm8L2n2gIi/oxWv8lh1cC5rH5z8XszQn29bc8VM/XLr+ne8mEF2vJJH3aEIHpJZnEBPHBMn9NOFdnKC1SbmKXecfYlntOUF4puLDsVKeJ/fXgqbK4cy+vuD1HgS+xyUWCeNoSf46KT1BC/iIX7I29qya5G1mNzmQYyrL72GCw3XjvWsX5urw0N45Ka9KKhf6cZzh3jm2fzSb/2VeCAn3swjPbjMI/3oE7/ZAtnhtEaZ02wRL93tZ+DPfluv4HhVrnfEwRCj4eQm1cQzBqXEV70gwgkz/EoTzfBcvfXRl3G3uS+xKfnZRPgzn/nMs/e///3XGHAcnmGCwcNu8DvQQMPGvjEdAoEtielgQ5u3eg428NphCkcQkNwCN9r6tX/hC1+4NlslyRk6OTdZI08buA0KgsxuDiT2+CAXhxFEfd4NbwsBedFevtXx6+dPDtDwlw6CW3nR8L+T/OzP/uy1uKQPeN02dnA0Z74w8gUWXtExpqQMb2Ti9AJ8n8FaoHaOLXwCBT0a/+///u/X3xuCB04yn6VFwaLk7yK50AdnfDaTDdFXesc3PcHXTcbmFJ+Cevjq07960/65z33uSqDRhNMYi0g2oRSIv+/7vu9N9hxsupLc9tO43WDg/43b34LIh/iRfm0t8rWRMZ0a16Vug2Hx6dC1haQy29xSvbuvy9ocr52etDzT8ZbxUvvypr7tWzf/8XAPJhuEI3rVl6/opdvGvagMPlzecEvQ9vJ1Whsfc9A80HXt7J6uupujFursEq/7BY1x8DVOv7oLfvE2mwC78wmuL2fEPrD4UTZvyZW9xE861r7Jy2knxtcWzuYpXJ7pjK0mt+QK39FFY3kobuBjb7gWr2ebWf7vbRWc27/6cNgjNoevt16e473SOFfxGSzcq6/FffJ5xvrFC7b1gU6sj41Pd5XaHQiI8fESbrw0jv76qkI7/GKKG+1g41n8cnlmN+B23uiKfS3N6J6yeg4uudpI6Mtm8cGW4+Fi4HbpPy/4aofDdY/uvbZ7sNFZ/UZTG57CFaxybTK7qr3n1Zt5qD98ZGktyp/5YXmCNYC+rBHWProPrnncZBne5k1+Yy02Tjs8jS0WycdsXlo3wYCH23qLNxfeteNH4m5dJZt1Cxy8cFnvbAxaJ+FjT9Yumxo2rc9GxyarTSc4sdJYdTdcbFsbXaNhbUKfPqzB/C7a8kq+7rDIGkQGdHr5SAbxLV3yLXU6Qu/petLA200DbNhhL9/kq/YDfIRv8Dm3OMB/9Hc4VBzgE2IBv+CPmwe0theni2nWUusOv+Fn/I6vocPf+LcxYPiXPnza+HfAiw4/lkOLKa72LcZph0dsgYtc/LrDA/kzmGKDEr/6HWKQpcMMcSdYepLv0M8e9IhjwcNtvHHk8Qx/bQ5gPLsdcIDrcAi/ZOvAx1ixHP/iozG9cAKn3VhxbL8OEluNw2s5fOuNvg5W1Dtwg8scdbX2eEZr10NtzauY7OeH+DJP9NhhTHMeXjhd1gxxlXytkdrPdW/b5K5eJpGD3Zh39kkv+Nv1DB797BS8W1t3NM2FgyXrqHMB88rGXpXrbX0wlCEwAMbUQs3oTahnQaCDAM7FABiui8GAq2RUvuKAlxEzEkGPIbn3IGM36er7t0DQLDGo7MAkHNo5TYm0McFIjtGOD0ZsHKeJ33hSGsvh6AFeAVmgchDmjy+7yJws4EuSqvuy5GMf+9hF0wXvHlgEHz24fC3VH2wuURNMfCIKTtvy6Q8s06+xi89zc1D9U5/61LOf/umfvtrhaBMgYNBTSVgHRr2xpCd96bl+z//wD//w7CMf+chF23NvCQsOlQKZAIkXAUdi2TytPPRtMSuobYBUj/fG2qhqJ0t6xIfgZR6UeBOI2Ibx8bp2RCZzrLQILQxbYeMb9Pzsztyi2ye78YqGu08nzX3BsYWDL8DbAY05jk4BFb1dMOB36g8WHn35XXqOz8YV/JWCfZsMi8riDz69V2bnL3pe3w/2Mvb/5sJ3/CdDNOD8Rtfyeo5dPsNxtnlGw4GoK51ZrOln5xvMLnjmrSQs/TVfSrZQwmYRhC960fFsLprHkoSdQzCezbe5LmHUhr6/T6Av2VYn6HiWREhiLOj4aGz8xj8ZJagdNpEXDf2u+O9wSkyHL5jlIRnCbXx+UXkhvV0nn+ivrhu7+mtMOJSnDlbO+vgp+vyy6x5ebcXF6Edj5xkcfGKKDe7i17c8gas/3JXZL7uhVzc90K04KdkV18Q0MQl8fIARrzzTGzjzAre2LZfe8pC8ySjmN3fNf3ow/2w72eFZWXd+78nfuIVL/uYk3naOlscXjSXv9j1O8q0Sj9lOz+DV0yd5k10b/baGBqukY+3GgjH/7MAcuaw75oOPW6faRJk/8+WF0CmHjUVfx4KjZ/7rMAYt/KArmQYnF7HWmvO+JMKPjQv61p73vve9j7ZsvLHifustXvzPY9E1Dq/kACOvaI1Ey6ZKfzmMNhs0JV6t715Avfvd737ML/ryj7x0hQfj8ZEv0rk8so2ndnoDS49inzHq8gP8xcdpPzvvT/UnDbxVNMCfxVa2LVbIua3L1t78j3+xb/Gfr/NrvsHuyw9bmx289PMiOPmhHJffFufk0XwOnuIcfRQ/2sPxL/Gi9YXf8zG0+aBDIbGinFNeI+ahxWeNc0BkjFymzT55HBa781f9cOKHz9tP6YPffosMdIOf9l32nHSlXYxBB7w96baj10EbWHu7dIqmun1IbXRId+QRvxwE4Sndy53goUPrsLnp6yBtZKDbvtKpTXsHN+ar/K51p/kR8ztkKWfBD/rinwtOdSUYc0V+cw+PK3xKONF0wc1ujFkYfOxlb0wG48RTdOiE/tiCNuPhowcXPXh2uIdn85Ye4SK/uZHDoAenCy78W+fNOxt6Va635cEQY2REJtWi7ZnjmVAOwuG0K92chLEwihZypQk/S8bqsAM8w2jTzUDccHH0ntUFBV+u9IUOhzVO8iLgqfesrbG/9Vu/9ewXfuEXHvlgdGQQDBniHnAY4+dLv/RLv3TZJt7h6oBE6e5wiH4kZJwFLIftEAgvgnyy1+8LFF8BlQh2oFG540u4BKzgk5P8ybky0K0/7sz5SkLTXzo1l90cGu7kJBN8Ak/BR0kv5v2U0Th3ySX+/Fe6fR6PZnwmG1hwfm72wz/8w9fCIOCGa3XQW5MWsuzl1FvPFisXOG3m2i3I0Y029M2/RcVhI15WVvBs063+R3/0R8/8gW/j2MjKoS17xys+yZLd6483QVMyEO4Nzmd9n5O9gKokX3JuXV/w4dg2dYdPgji/pRcyNgbO4JVdS/Ox8aESXHyE40Vw2sEu/tq2/cR7b0w07tFOjh2nvnPbohlMcw9vcPCsjrIVbdV3TtX7Wy7FTv4GtjmJTnDNBTh9S0+9Qzz+uTDh2blevePdQm8cXsy7O77TsbJDp75CKgaEG15weBEDJW5w0aF7camvrM2FttVb/Ot/45aM4RN9+FZfzW80Th0Fv2OieZZrM/C6+es553h11d8hRWPSR/h92SMBEg/EF/HAmHgPl1LcsW61XgZHt2yBbrMZstZvraGjDs123vn1rr3hNn55TQ5x6aSfrGQCVwlHcjcHq5fWuXi9CD5cwfccnsp0v2ulNjrSdg++v+/T2JUPvdX58rJwqzv4fNHTXOrLVs2JK33A3TqCxxJoMOmEHWovj4LLOi7+Wz+sedZZePiQca0VxqhXhsfmC155gXHlL5JtOOjKBV5iDsYmBB1ysZv6+W8bmzZp+q3XxpRnWO/YGntsg4ov9Kzfe4iEnhdZ1r/nt59flC85zIIXHfjkTNpsGPHJT8hLB8bQFfo2MGRGj2+JX/RWv82iMa3HNpzww/V0PWngrawB/iRW8p++fuG/YpCXAQ5I8jd+6cBFyb6V+lsf+IR8m0/yDfj0Fw/4pUs7P+QfrZ9KsMUtvgsf3C6HI/yM71p7+CV/dcHbF4jwOGBxMCL+dECEJ/TAtR6ioR9d8Yz/1q8PPf3w0JO8WfxC3+GEq3Z1sjsAB6NdHBBfrK8ue1ixV5scgx7sZcQW+yhtYg04/NgnkCU9i5MOOfADD92IkWSmb/Dogu+FHV70kcN6Dl9ri9IYpcs4NIwNRvvWWwO1qTf2LOV6+tkQuvTqilfw+CKzfmtauSB7YXN4BR99ZXsk+Q1480Rf2tmkdvMDB1spD4OHrovZ2V3yoQ+HdcA4vL8q19vmYMikMVBGtYdB6gzfBHcoBKZNRkmq8Saacdz7XL1NNMfkvF0ZBwNpkdfmXodgoIKAK+fQ9vyWhDBOQc8t8WjzI9HoJB2+4LSD5eT4d/f8l3/5l88++tGPPiZf+jg43pIBLhf6Dqz88Wdtm2x3eFVSVALW6a6xyY4v/WhUChieBSMLhouO4OswiAOWhApc7j/+4z9+9vGPf/ziRZ8gkG7Dj5767/zO7zz7yZ/8yUeHXvrGmEt43P/xH/9x/ZxPvYBQ8ocntKPnLSG45qNFbAOU4Mi20kH8mzv441mAdyBY0ABXEo1Gc6K/hSO7iX+4etNIv0668ebvJy1PBUOBLL3i3cIAZ7ond/I2B96O0k92EI82bNosTB1GobM2jJ4rXsKprE0Zf/0kxNtmC6nx4bgQPVwrj6a+SAp2+8828PFZPbzLu7bmBnzXyvjYeKeyY3f8+ti2n7STYUtkyNO4ymg1v41Ztk5+wuMQx5yn652XYJqj7H1hzSV6O4/aglHqWzh8GbMwS6u60hXvHTCI1yU88QsmOGPE6uDwHVy4052DbrjcYlO8rzynvpdvdCQju8E0VhxAvzUl2w+vhEMSmXzNGdz9RLevlsxP464Bt+vUnTbryIkzu1p5bEjxC5Y9gskuw/1A5rFPv7ij7F59B7/96uK0eJjPB6dcPFtfvOrWQsn5rkPxS654iz9lt/5kOudxn6MpJhfbwpvNLO/q6VbZ+OpK48VL8dXaJL5pW5ithzNc0YvPk/72G3PaRP6Tf+aDbZC0G4dHa6QLf8blL8pu7WyR7RjbnHo2TmmO1P/u7/7u8WdZ1iYHJm4HPs2T8cbIHeint/D44T98BE1rJf7w0ddF+MaDFzY2osZEhxy+WEgGz3A4wAKTfDZeaODD+uwASL+NB36sQfCq2zSVS+HZ18C+YGitxp/Nnk2nfjHBxsCGRLtcC13wdKjPsxiR/tWNLQ+ST8q3kquvJZr3p/JJA28VDfChDgvU+Zb1VI7ZOqbkQ/JWtt/+wNrL5r08MZav8Ef9fEM8cbDjUuf3Ygi/t7Z0qKMf3nL84mZfzRiTP4ITi+wN/QmIYiGc4hA/FZf4rjjC1+XX6g5f+Ls4ip/2fsHsAZL83DPa1mcxBS1yGacPTTk+nrSLIWhpxx/ZwyP+ObBwiUnGw4svvNJhY/EnPzEHxoGhi15S1UeHxUr86UfbXMjtzZt4a07gRDfdysnKncij3U2WYNRd9d1b//SXs6FDr3hy9QVTOCvD10/f2oPqj+bSja/a4GejaNlf98sac0vf5V3gW+vgME5fB3dynF7AwGWu20/Cda7nl1Dv0OstfTBk0jiUQMWoOQbj5QBuC7B+RrEHQRk+Z+CgnKCNssm2SDfp+gUgJSMQ2ASo/WkYI2EYHdgIdBb+SkFS/5e+9KXr7+iEu416hrmOxZ48/+7v/u71tzAawykkU/EDb3cHGQKHK6MVyPTVzznc8IOF00UOyUmBXIk3+kl/Dm5++Zd/+XJ++NHmTIJYyVMHI0rjOBanIcMemsQ3mnu36YEP7/Xhp4MzdXTNebImXyXeycjBJYwOCONZX84vSMJnHJv58pe//Pg3oS7kc21AEOD62wvZTwlpwRJdfyy7ZzrANz7c9FMAjUywdNV8kCO4eLDZzQ4LSmzDOLpNb5JqC4b25rZ+z+nSQuhqrvBHZ2Bt2JT9LZt4jGe88DN6SBf03kIEvluABdsBxD24gv6OW5rVm9/42IVC245JRyfv4M5x59jwn+338Aer71wsgtd+T57w68PTPV7BnHiXr/rotfGrz9qVbDHZoxft5qVyeaoe3hbUpRM+ZbTA7eIbbfyrs40SzB2PntuGHgx7Ayf2J0+yRK954BNg423tTZsDFDi78jOldmsIP+jgKXr6og/PypXeleGDX93XTXD2dRN/4VdiBd9u/rKR1QPa1paVFd7oKMEYE51zbpaf9KpEu5vvF2eU0UhH8SmeoWPdDQ5MPKujt7gam5xg4Im2eutdMTzYYJ7fDpHE7eIombtW94+NDzx5TlfxEc7lZ+HCpy3dJY+YKK7afKycwYWTPNZshxaNXX6X5+Zi+8MT7/G3etaXHuht5UM/G9WeL8Bz+kM44rM1ix3xAc/48YwOO7Z2gi+XYhMObVYG/mId6oAJD9ZzByH02BdC1n9+Th74rZHGdGij3bz31p5eNx+yiapNaR1EoznwxlhbXw3ZoPWyxDpnA4YennyNJT/S38EV/PIE8948kI0e+LWcSp+4YoNGn2Rgr3IhtOlPH/7kquEBa23UD4fNq/X56XrSwFtBA/ZGfJPd8hH+zhfYN79g0+ICn7HmOrwAx8aLDUq+0PjyxdY0/l085Af2W/09Oz6MB+uxgxQxx4UffXC5+urQAZGx+EDXfkKei77LwQjfRg9sL1H0oeO5Qx0xTqzoQEg/3Hya34ZDDOnLIocd+OzvAol/aPeVUe2eraEdMju8gJ8OxROxDx37HTkHPavbw+pz8E12tOmmF2r4F8/02a9oF6PQE886pO5rITGZLM0bndIbHOTb9UaMNz/pXB872LVGnN612ZyTy7W4robbFQ28Zge9kCNv65Yy+GwlHMFo9/Knnzt7Ofr8ljfgQWylV7x3uG9+owHG3Ii/YMi5NokW22N31glla3R8vJPLt9zBkMmxcHKYEnaT6eYgDL9DjC05c0ZfyUlNeoHJRJY8lGgwoOqMXJ2xcEALP2NgIAU1ByTGuCUAleqCo6BQwqWPUbVZBwN/CV2GJZiRGy08MG7jGHKHUUo8SLS++MUvXn+IGiycJT050xqstl//9V+//lcz+jAGXxKdku6VhxOR3YWn9CWoJk8HEp7xI3jSFfh4Wt5LMJVugRv8wsY7HWwi+9WvfvXxSyDtGxQKLMH/4R/+4bP3ve99j87coQi7wSc4c2uO/MHqAgEZ3c2/OSKbP5jt0Md8Bbu6xavbaXsLWG0FmtqjsW+bV+bGoYNPNivotyErQFmQk6vDLrKREY3sJv1n/54tNpJ5PDXn6TC5/uqv/uqyv/g2XyUH+VHzYAwZwiHYC7gWI2Oa97XLlbNxleksXtL5vTJY5V7nPJ04T9jG77gdc+I/n43LPhbHWU/GytUbnmo/8b9JuHkAH5/hMj9iYglaMWtp+dsre5CefTSn8c2mO9zbudR/znn4so9Ttg4Vi9f4Qu/UyTfSQTiNjYfaUovn7tNGwdBX+pU08S3xDl/Brx7ix9ho1e95cYJl/5I2axedpP97cuVfi7uvFJvDaERn5bYO8TN0JJsOLnqjurHoYvLhIrsYoV+9MjtaWPVgJY2SrOAXn3o/U9PfbSx+w6299aZY23zsvMBFjxuTjWve0Us++tGenk++ktecxddpM8ZkE+FuHWi964vQk19wxWBxNZ5Xnniii2ilk6WbrsBkL2epL5sgj/rJOx60r3/1TE945tv9bFibtSW75uvLP3zFiOKKMhuFr81C7fiEr7ghL5KjuMDTq3XCtXqpDX18sW2bDPDlUOrs3RplretmnzZPnvEjR1G2gYqf8DRXNo4Oh2z0zKG1FK/wyMvU8dFXP3yMXEr+at21MZRP0Sma+aFYgFc3OdHWhjf0+3oCPnKVh16KebqeNPBN1oD9Fpvnh/zBfoHt8h326QCnAyEw8nhjNi6UAxSTiz3aqxfDrGG73vJ7/s6X7efECmsPH+6nXlQCTvyDkw+Fdw91+pJXrg/esxzYGL6Hjn2ONjFArt0XhmDUHdTwf/38X5v4YKz8AT1xwoExPYETE+IDvHY6Jas9FFi4tIGlTzkI+n0UYBw4+jXGWHEDXAc+4o64ZF+MT/D4ok+HSXIDY82DNjc8YjH56JTeWmPoVR0udNTxE4yyenOQeZ7rWLgWPljzwWbgxhM692g4lMKHuQLXvJG5NrLRhz2decj2si/P5aat6ca0Hhmnnv04LOsjB/YlHrvZQF/BJsc7vXxLHQwJNpJqN8PlDAzere42kQIHYzShDE1gcjeZyj65y/icLKobx7CUjGUNoKRFELTAd0DSGO0MumSkDXoBR+lzxv5ODzpt1EsgwQgu8czofK79cz/3c4+8gNWew5Q8ZfD42j9ELdGAN76Sq3FKQSudcQ4BF3y04MATPJ///Oevn3sZlwzB7iKgjZ4EbU6cngr2kqWS6w6TlHQkOIDXHwx8yZKeyPrjP/7jl9Mn/wYibd3GCJTkV8crGXaO0dLfG0/8szv2kS6MpX+wydLGQol+AQ1fHSLWpt9doldQUjrV7qucezjRyzbAs31zABf+0kPBDLz55DPg1M9g6xk//nhnm5zk0tfiUfBFs+AeXLbYHFTSG35Lwhf+XEDCWYl2d/g34KZHZfTqT6YzQGtfuj2/CG7xVN8x9/DtmGwhOOWL7mTUv/WVqbEv4jfbTyfwOHjpjV3+uYkZmP7QNDhzFZw+cxZdm0b+EFx4wLjA20iBcUtAdkHWn2z4Qmth1u5WZ41BI/2A7Q7v4genH4/d4VmZ4IxHpcUfT8l+8n/qvuf8JBrGkc+tz/PO7fKerNk0nPyf/9B1XxjBnY+DWR+gT3yDbcOLbjZRbPKsvrbUH4xO1vrCD7451udZMt2b3MV98gWH2AOmMnjy6O/npfpP2HQRbDDin7rSupCcO+/s2JVdXQ8PVzwou42FpzLY9AGuhFC5h0Jg629d3wMh/avz+E3HS+tFPIDRl43BV70x6al58uw6/cWc03u+AK5bG1ugXyUdOzT2her6U/XWJLYO1kZGnR6skyXP6MVPcoOjN+Maw+7pTpuYA4+Nkg1p+oeXr/oJdP7FDshtvYs3zy5+ke+kM3DaxCo0/MxMvsK2/fwcLTgd5qAN1i0HcLeGt8mT/8FFDpsOa6XcR07qC6XntwMrukKDbqMv55Nz0UmHT3zYBq88kq/RI508XU8a+GZrwIFGa4tDBfZts+zwRbzoUIjtGQ7WIQAAIABJREFUb54hFrDjjcF8qfVy4w9fLZbz0WIWWH3t7chuD+dwRF8xr8PWYqSXCHzOM9/Bl0MX+yP+zs/5IR75GTh7oV4U6ycn/GIAGP0OAtCFm78bB4cvbxz2gBfTrN2e+bLxfb2jDy56IYN2eiNfhz4OcsSabetjBjjFHHsLcOUY5oN8xtGdZ/OSjsDvz6fwCca8whccXXcIJN7ARXd9UUP/rWXF12LqG7d9Hfm0G6fe3KvD01rVehG/5q9x2ffixxNdsj/t0VxeqhtPntYWPLAHJR21Pph/V7ZmPtlgusAv/WiPHpzsHi0l23qVLtn+//1WC2wyLK6ciKMxTkbMwJUmycRxFgYHpje9YBqnrYOlDhkYgHp9jA4uwYAhlAgwmoyXwUgKtHHK4BlQDpAxr8GSwzhfmYRr4TM6p+HgBA584VFQFqy0VYLRJ0GSoJCjW7Dx35DjLaPOEZZmdQ7nDz+TOTlyPDDJgUc8cEz65hRugZJzuEualHhVCm6CgH7jtXkuoMVHOiK3AIx3/ODdVTJpztWVAin9GNuFXyfg6JdomVO6T88CQvjC1bOkWbA2ts8JNyigq53++90p2sEosxm8s5P06nmD1/KMvkCudK0dwRn+2nuTgJZ7x6gnj4Ou/V96jF/Zq5uPdBk9JZ7ZrNK8m3/PyRlsfEjs6ac5zt4XHg8WSzgbF621h2Tmy+C3b8etrtJfuj31kv5fVO449Bu/8C9q0x7PlfFpfLpanWkPhu2R081vbAi6t9082GC4g1N6zi/5GV9aP6LvaDtM4I8bMzeO4cmG0FyCEx/5ERhzuvKYc/7tcEKcNfc779Fki8W2Ymyy433jVO3NOZ+GG51idHSyTzxJAIrpYItrxZJ4T376oSs4+XR2e8YofJgLPMKrv40gnO4OkU/68KO/PrE+RPb1GesAHOFPL/Ronq194ZTcgW1u1qfSKVj08ND8kWd1JWEvZp+w4aTfEmCw5FnZ1r7AuvLbnU/1fEhJpuYKzuwwGws2/felsGTSGr8viKx/61/RvQjeLjiNhyvbUsbvxo9tE/dbN9TRDZ8E2lrMfti3tRgNF3zgu3vukMv6LT5HVxndC8HDVZuyNRCs+Tcn5wVu8Zw8xN8pl/blUd2NRjhKqNlpeNCnd7bTW1/jwBpHN+zWFX5zZbz+xudb4OHKHqyjJeiN9zWcy3i2LPbxa3KXo7BPB0tsCm5zY47kPfD18in/L2/KRtlvX1H5GgBfvT3mX3CRAz04yGtMMVPM0u62dpIHD3IctONVPzsGgy8y4Tmd8i36ULI3Oni6njTwsjUgxvIz/u/AVtz397c8s1X23j6Ib7BjPqcun+QjYhxbz9f08UU3fHIQuYPLwU3rKVt38cXWQKXx7J/PWfvkJvAUq8R2PlIuhI6YxCeL+a3R/Av//A2v8IBV5rdg+KR1Cs3WQP38G124jdcHju+ioc6vlWI1eLLiUewTQ8FqFzfouzZ8BWes2AGuAzf9dIWucfDIsegITaXYR2Y09Ytd5CHv5obiZ/orhyt3gmfXSvU3bodAZFUaR9/wlXMa61nppiv01clRjqFsfpVuMtAznbEvMRJPawPx1BglOLK17uGr+cWXix7YIdswZx2OZQP0wl7xqB8dvOgnI13TrXzUbc5epVj8Lf1iaCdT0OD4JsqEuE0eg3JpF6A4Rsm09gwkY/F83hlT8Ca+IMYgGLKbIaGn36IuESgJMLag11iBq4Ma9W7/u9cP/MAPPCZYYPSVVJKB4wisbu2vv/76s5/5mZ+5jJ2TMNaCqpIMe/lSihPREX6Ny3B9SogeHAXvEhAwOSk9wZsOcz46+cIXvnD9F+c5X2/WVmZylQDlSHjEfxsJ5b25sejQ7SZ9kr0SOqWxZPDzJvaB9w7I9Am4Anvzy7H9ce4f/dEfvWDxBr75VuItPbEjuid/gQLdAps+bxj9vEF/yW86zx6McflvueGCP72B9ax06/emEmx80DfcG/yqG6PfOFcykMuYeFI2r+DiMb7RcnurucHROLjSK1l6I9GYi/DtIlN3Prb20zwvvHp4krfndBS8Mj0F2/jFWd+98Ytr6ztG/RybXDvmhDEuXwFX/5aLJ1hl/302m+T/q5OFM775pouV35hsXRk/xm+7usMLMaZEYX2+uevLnxZ2dh9tfMRXXxuVoIRr516djHCUEGgLXzTPOUHHFwNwiwfWgOLx2n16laCQy/pw8hFusJIOOMUI+NCH795crf7Uk2H5IIukij5tfMOZDpZ2tpHM8R4+fNt8imn64OhO7+FbP9CWThf+3k+5jItfulp4fcWl7DCeK/ERXHFmS7rcsfnt+q/+9C1xzX5WZ/GBbvhOHMHEU7C1B18MrR/84lJHOxlP3V4dD1d94r8vPNPF8hI/8Z2sUIATU1uD+wlxPC2txtXHblxrP9XhdYFdXPrxkW5Pe8purB/1weO5nKf2npV0aq75dDpIXr7oDTc5ywOMYeeLtzbzn6/BuetvvswvvLAxPnn8Db3ntzf3eOF71ihrr3VKvRsfNqnlWeVjNlg2u+aivEv+5NAmfeLn61//+kVbO1h45RdtlOVhNjHoOuAlJ/uAs02szV65kFhGHoexZBU7OrTyZt86QB9k72sNJX7hgRcP5H66njTwsjTA/viVQ3C2L4+2EeZzbDif4Vc22trYO99h1+yT3fIhfpUvb/zqZ2l8w7j2LnzJPovvFhs2vsHlMs7hCThjrJ3iFdrFeL7qwKm4SBbj+qoGTZcxHRY4iCYreeiAbC7xDD2HR/iz39DPZ+Hnn2TWB0Y7eLrzHE067WCdboLVLzcRB7Spd/gPzmFMuYv9XLw5pNBubsjf3yAiq2f4xJnmo3WCXsjWz81aJ/rJFh2ClYPRsdhmPvHWnoBeWqsuJd2uXUPPvtYx7XI8c2Y+0aJfuJtzMGsv4aKL1oHWFWUxEZxYTH5wrVfnH6AmL5rsgR2bq+Yy2tHMnswlu4Bz5Uz2d3L5LTkYYhwMUFDpFLkkeQ+ETIgJ5QiMmsErObhgxagz8NOw7k0kmO7dYKu72xy34begdyDBCBgMOI7SwYsA4+ZE3eQqwBgDXwli/K5R4UngKXChkVOBy2CTqeff/M3ffPZTP/VTl9GiEV/0JQHBT3g8f+Yzn7n+u/v0miPGk9JtnHJlpxvz5t4NTgEEvI2ITYr68l195whtOpIEkZd+BD+lAK+MH7pJn/josKkA03zhxWa3z0fpowTPfHXQpE0wef12GOe/pDeOPcHNLvEDt+fedOCd3qKl7KL/1bWx2gp6Bd2V3+EQmIIQ3Nnh2qMFVQIObumvDZLL/PKH/lh1ePGITnNs/jqYWHsvsQVfUO3vRu1cZnvnnMKfby1M9CvXLxe+Mcrmnf6CMX5pBn/6ufazrXHhWFz/XR9cK0/48dYc3+MrvjtU4dv5zenbyQnP9t3Dq+3kKT+utAiXnDXv6RRfwWWzwaT71bv69u/Y5jz4aCwtNKK5etfWlyx8m9/hOZ78hEcS1MUHxKPeci2tnXNw1gkx2EY0fCtT85ks6Z3e4MePJEJiYM4cauy6k24b31yvbldv5MQL/iV8/Gt12rjajCU/umsDO/enPGxCfOD36OG35PnkKzzGdKfnxRtcfUsDHbDaijX3SjK01puL/SPc8XvSKdaFLxrp+eSj/mTJHprn4NFLvmQKdmWMH/QbG27lPVht4Dc/EJMdCC3/YJav6tFMp9nWaefBVwYfXwuvLx/NturX57rXbp72zi93/ScnH+EfbSTMs/WcrbdOWmttJsH3P5nB45mdals9myNrMfnwgIa1iU8bo814beCWN7T4vz9GmnxkDOcl8O3KvvDrogMbKmuqDUObgdbYDlaT3wF4X9TKNWzovvM7v/OKFzYe8Fpj8W7TCd5GDV5y6beR8azU1hvtNkLwylH6yg4vT9eTBl6GBviWvY41h02yYXZpDWXTXiY6jOEH+TifKqcpVjik4JMbO4o3YsDGOnU0+xKjOOawgM3zdTDFazgb3yERGHGmfZp+sUcc0M6X+HqHP3yI31ob4cCrgx3tcIhd8n0wDnTQ7lBIvwOe+sTbDoXQ4d99bdlhD/2g0wGQ/Q2dkk/dOCWe6VldPC1/IXM/DXPArL+fSPWMrjXWWm9u0CrmezkoDsFvLHzFO7Sb92Jl+u0AH6wY5qJHl5f34ivc4OkBz0p4iu3awocvfLua0+qeu8VueOAwR/hVNiZ8xsYPGwBPZm32QM9vh2ZosmW6Th/JYkxtJ07zS7b27HC4o3cJ8Qpc37SfkplkBie4cCAGLQgwWCVHzCC1MRBG3tdDDIQhd5AgMOnTxgGN5ejuNvo9K/UzFnd1cBlVAawSv4IEY2fUberArzFvgsVe9BmHT0mPfjjrO22KnN0SAU4HPidewz1xCGjvec97LpnIGJ1olHCFX9LjjZggtJ9fSmA7NFF20OCQxc/P8ELuZFn50YLfxQn7GwV4ie7K3Fi4bJDA023yLu/qZHDTTfr03KZCuTqE3/yasxJXvHeBFxjgYF/eBi6d8EYXbrraZDN5owvWfEuQ2fbKEK/ZiZKs2ZN5M394XVskB1hyC1b6yWWc9qUdr+ifSbFAiX94Ki1wPpvPtoxfHXpmK+bGtXOm3t+hwW9zZ77x5V77oOOC8/qYsfSQXuDEH32ki4VJH/GTjqO37Uu/+uOk3ConjytjdE4cjUlGZTKv7NtuET3jUjKli3SwOM76iX95W37ikb6jo1w9Bg8HOPTz73tw4CVveCheLtzyJgkyd81fthFfYNmVfr7fAf/6afoDCx/ewBbXz5hNDvyB8/m0fvjWjuKRfZUwmJfowi8RsJ5I3KKFPlnFw2Ar0xt+xU94JaTRBYcfeOFMJ8bhwxqhbfGuLfSFTTglJwt/2g1e4SQ/2Hv6Moa+wIpT+C4Onb7WoR3Z4QoWnDF06s4W4bOupBc6xQe9aFs7WbvTniz5IVx43DFLDxyY4sr63PoGOMl7Y7N97bs2bezTZ33Y9UUMC6b1o1JfMd5c9zLCvJ1/nwgO/LV+qHfFt+fq4FrflK0t98adY4rp/RSuNSJebTjiHZ3aHVCUf60Oon3i6a1rc8A2bKI8w2tNgsdc0StfZSdudeuQK36V6CdjvOF127Rbc+WOXeItvPrQs+a52S57ZmflOuyajYoDeMy32Gx/54Pd4AffeOonZuaYb+PHnBtv/cJjspr/DrL5j1hHh+nAcwdPclvjzBUYPGUrcBuP9+boUeCnypMG/pcaYOPs/V3vetdlj/yAbfIT/sXm+DMbFcvF09asMw9ov1RMZ7Obf7RuiD32f3yhmMyX+DKa+OAPYIpBxXA+y7f4Kb7wZK0Qt/CYL4GHkx/x9Q6F4DcGXuu9EmxxogMw8uvn3/qt2fJwPq3OV/WhjU/7UO3ioXZ5DtnAq/NrelXHo9jXnhdPHYSIK3CINfSlnczij/H02YFSsndgV27Uz8jgB/PG7cCFbr1ooxN8hau1mf7a+4k55Kdnc6RenEt3YhQc5UvK8rlKcWvX/9Z5+tw1OzsDS164yBJvzX1lNlOpPZ7xu2ueeccHezEXxfPyKXTIVztd4sc6Yqw+sZ38r9L10r8YoniBJMNhUG6TVJvJpHgOkXP3pQYYzlSCAIbhMSJ4XS2iyq6SiBwhuPoZAsPr5nCMC78leAyOE/azn8bAGd7owKu/xbvNCp7QWLrBBVvppPLHfuzHLtAdA8fKxonpxfWJT3ziGpMOGTyHLkFVkssNxh9+9pUMfJ7BdyjECTibC0+cg1NFP1mXn+rJ5CAieQomaOPDM3rdbIDeSyLDgS8B2x0On2z/yI/8yMVbNpVMwRrv8jO1b//2b3/UIXwFTTYDp9vB12uvvXbxQM70pNTfGHB0TH7zEr0NcOqrn4uR20W2xqycYPFLf8p9zraySbwFCz7501dyW5gsuq7mVrLaQR+8xqdzJVngU+LTuNrjv3HsU1+LSoFaf/fa6bb/P23817/ZDZzon4sBXRnvWjzZp/allW6jEM/7/F/U/6v2jXg8x0a7MfrjsbpnPkAmsUxpXLaSXIvjpLNy3+OZ3AsTLrj7eRi62W/zHQ9weovNrs7DhoU54cLHTpKjkszsYmHArX7B8PkShmDh6F49rezwLM1wa1t9eF4c1SVFFn3yduDTz7nw3QHeOU94WN6yefTJYx2SFKZH7WixZzfda3OrG79zf/oLnHiBk56au4XDU3xkB/o3nhQT0tvqbnHtOHhtoNFtnvrCxwG6A2V4xYqTH3S0uW3Q6dR12mkwJ7+ew6EstgWfbOHUvnF/+5c3X+zgByydgctm4Aq28tRneBdnfBqfntPL8nEp4HatXMmp/awH+zDs0e6aQ2V3MHDEg3r2n334YtEB/9pdOE5fyUaix2aNU4orviBlj9bGbJoPJ186YsP9HIwdwMs/bKCsRTZhLnjgw791pwSc//hiQVubRF8j2MAaU16gbqPq2Zpt86SUt3hB0gYSDjmHzZnSM7/t8AYe/HnGH/9z4ZucxoAhExi3XIk88tQ9kEWDTOIrPGSAo0Nu+as2txdd1emxDVIbRnTEEO3pKV01/0/lkwb+Nxro5ai8UYx3+CHX7uCG/fEV9smO+QS7bm0rhlhLrRvFi9a71jxw7v4OmH1V8Ubc8FUPP2vN4yfWG/7c1c+g8s3+Bhif0CcWFLOM5zf4KS7CV14sN5MvexY3OsgJh34+CzddaDcez3Db39EVOcSmvkQCpy4miX0OqOGnB7LDZSwcdKrNoY26dnJYg8HRs7Y+THBAJL44EDNOnJAnNAfm0LptrDb7RPrEj5gHt3bx2YWm8XjrMKW1obWp2A6++K5Ozz2DwXPzKZa23rbmotU6VX+2gQdXcuwap07u5tBY8Vbs1OaORnV4FldrXzROmfDK9vDn9swuxF/zSXev2vXSDoZMjIkv4eYkgkkHPybHBLg4DoNmIEoOLWAZwzEk9PB4blNRIp4xNnHwujKaNYJzcvFXAFMyAPgaHzxHklytI6wRZUyVGdYbt5Pa/YOT2w9GAoRmCQD8X/rSl549v30K5yox2sMUgVJQ6RTXeDp1ZeAlLhyIXDmass8O8SLZSX9or0zJ+slPfvLZa7fDk/o75KmMfzRLuCwwC4//Dp84qQstQfNv/uZvLvx400bmDmgEE/PsWf03fuM3nn3oQx+6goK5628pxUMn3mwFvl2kdqEyttvcmiO6QwsutPQrq2cX8DS/4cezug2I5DnZjSFTc1B78l+KuF3LK5h0H7zDE4muS1uBtf7Go2cxIA+e4lm9gy5lCybeyLlltI31v+UVgNHgn2QJb+Xa19YfxLtbWEj4M9+mO7i6w+FZvRJvntNZvKaX+iK4sCfM2XeOicZZ7rj4tAlYm7LIJlP8n7L9d7zWvzLumLMdfvbXIUW2u7o13kaxjT87Arcxojntq7AOb8l3yrCHS2IQPCe9/EJyaa7Ry2+UeNoxq7d8Nh9cuHgxP/pbD5TFdTZGH+KB9aTxWzaH8bG6gKsXEMuDuG4tE4fBJ/fOdfVkyG+iZ/66yYBXuqYj/MfjaeN8tbZwNWfirkRm8eqDa3XnsMQ6EC6lOIh2CT8ellcw4l649akHk3/lR/uc/2pTNy7aqwe4Ghc9pdulDFf0gw9nz9eAhyscymK1LvKUg3hOV8VLsc914l79rizxlF4eyD/OV8/JGe7V8+pg503d4UxfIeF19aHe2I39Oz/prnL5DA4d9bWX+KjM3rNt8OlSab3mO+FHz926hb94/8pXvnJtSK1L8kDrr9vmp5yiNnEVbrBu7TZD5qnDHbkGvsS45lOJF76cXHSPB235fHYSHJ+HH25/f8gGq7wHb9ZRm7D04MCLLjw7RDLWT8xsQK114MklpwUr//NsDNndaKGBB76MJ5tBOPimcfImuY6+p+tJA/9TDVgX+RRbtiawP7mmAwTt7IzPOjSx1jmI4d/8qXyyWGGdLYas3+V7bNhBS1exxBgwfEPd1SERn2k/YP2W2/KLjTfg4erQB18d+oDjs+HON/mVwwYHLvDyR/yREww6cKKtX3uHQ/YcxoEHp11s0e6iQ34Phr4c+tChNjGhgyDxSZsDcPGCbqy/cPbFIv2TRRv9d0BEnv6mkFwEDL2U4+gXu4xpLsDJL7R3EJXuK8GAR79Lm3kQV82LqzbjyC5X6DCq+Vbu1dxueQ9GHmSvLR9hU/gln3Fu9icOmhPzI362hrd2yYNd9KXNxdb7irT9sviJ/9ajnpXmwvx0YPQmYV6Bh5d2MGRCGAyn6MCnTagJFngYMuPt8Ihxmwxt6gycEej33H1uAkyu21W5c1eCUn/Gucl68Gu41fs0r00/Y2WMNkZ7QNKbpQ5yfJ2D5/grwdlER1Amj4Me7Yz63e9+98UO+nDp66uPDL1+MP/yL/9yfQXEwNvAnfJ7xrPDIk5HlhwTfyUZ6gUTtCQie5qMHzxXpsOSI3P23d/93Rc+9MC6Srq04VPpBp9D04M+wV3gVM9pk5ct9SaBnXQIlnzgBdnXH/5+EL7MVxs7z9109Y//+I9XIFJ3B7fw2v/iL/7i2Qc/+MFrLvEbzwUs9OmtPyx9Cf3QVuDynH0at3XPcKYn5T43V3g3Dn/NWbg8W6D6X9TSIVi6ZavJ6Q969wVWNOMXXXoEbzHB/9I6Zd7neKlNuZeFscPdbAc98oG9V8bXmxDdHsKdHvUvfe1LPx2ePMVr+OtfXAvTH1lOl5KEZIh/8PfqtaFlo5cdecbvaRPgu+tfeeszx/wBT/GyPEn4xCK+DNa41Tt++3tI5gfcCRMtuIIp3tyjaZFH00Ic3DnnJw9o0G8vEtBhu/ns0sEvOHKD6wtHGy40OxTCQ3Sbxy2XB/Txbc3pq5ne9rNd6xGaeCJTfKWb8HquvvOg3eXwVoLq2dtWvIZjcWUX90p4oyGeS3ZXP/qylcUdrmK0RI+sa3sXk3cuYxZu5axdWbxYW11+tBdX1LvBuBofvY0/+oNrHVFGNx2f/FhXwn3GvKW7fxdoVbAyBb/yVb/XtzwvztXJjlM3l3uzuZLeZGw99eIgHe5cwxOu6i8qm0txqfhf2dpBh919Zt8Xy2D4Q3Gx+Wgdl3c8v7300k4uccj6zZ/Wl/SRVQlfOuB//dQ+XuECu/owT8a50OYXeOZnNjcrk/W/Nu3g5G02R55tLsrn1PviCT0bSn4nP+zASgyyOYy+2NHmkJziinyjjSxejQFnHFmS/Xu/93sf8ygy4F+O2EYJX+g+XU8a+J9owJrJljrcsO46pG3dsz/rIKSvZ6ylxan8zHMxB7x6a5PDkA6Z+Kpnl7F8iT278nG4bPpd4Pkg/uxD+JQ2/mSfaCxaYqBx5NHOV/i0vIyPotWBLHzFFQcucIKzD+FP4Bwg6FMX46KnRIcOHNTiCy3trnKQ2h1K8Xf+rY3/iiHGo2FcXwHROV7FAG10b27whb9yoD6UEHOMQaMYSRd0t7mE+fBMlual2Iln8tAZPtEWj42BU7zCI50bg2cxG1xXuLKB1hZj9spmtAez/XiQW8FN5+SKv8pw0DM85r9DP/ZhjFzQHPezRmOtLXRufqOvXb29mXrPcIGnC/fu35bnd3L9pR0MMSzKNSmMidI7oRYonMgxfkZrojmiCWUYxjWRe2jUJmE37ej46dAaaAbU5KMtOCi7qqOpzwK7d8kP42NwnNBngxkQWA6rv8RBoFMXePDDwI3rYmAdZCjBrJM2htP3hYi2aEqMoqXswMhGzf/EVYBIbnKVOJf0KP/0T//0+qlVwRvc6gdNVyWHhQdMTr1lMqRT/Eh+4j0+lOh306G5TF9ooqM92PhKBwKIxQuMNvDmKjtTh7OAAXdJZwmr/mxI0MYn3dVfvRIOyWIHKcmZfu4FxfgNNpsMNl02V+lKiV5zUn3L9JRe8U2ndKK+80pOi4o+Jf2AtSnFC9j8s2c8gItX9NTJsjJnA/pd9Snra5x+GwN+7dr++N1x6vvcmGvwXHhaGgsXP4Gf/AarfcctPPy+nskW6OreQVC8VsJxtjnIMD/mQSnWwZ+NRzfZ6UU938uW4lUfmOYl+NrJZSOJHh+O7vYb4zI3EoH8Ilzn3IATh8GFB0xwSvrKHz3TXbEp2ZqL1VE8WB9aO1amYHubJNnCi3b4WzfIEY/r+82zPrCtJ/HvQMl6ZF7ie+WP98qVa+05Os2T5/CIjfhLvn7OtrjjR9u9r3vWXsHQt3USXgkrvtJVNhV/8aTM7tb+8qf4MU5bt3HZ4c4h+PrqL4bV7hmctcFaFg/36C/NpX0Nul3a6IYtRI+fpGcw6Qm9nZ9wVK4+78la28mn9v7Xyns8xudJb+kaF/2zna7Iw9fMqWvnoxwFX+FojOfqi1c7nGyf7RnbeiwnsU48fzi8UZdnRIdfynfkHRLv1t38gK8Z0zPcraloaBeDuvSbM/LFf3p0ONthV/D6Tliy2MiUN5TnfPWrX70OX9GwKSIH3m3Mytnwj18bHl8dkV8bHuWjtRVD5GXe1LvIQofgtdlMyCU6OMYHG5evgCtfsenjq2jIF102gPJhvOKPTdEv2cQJ/Mkj/azTuk3/bptAG8Wnr4eykKfy/1cDbNqm3x85Z0vWUv7NNjt0YNvZpbq1sfyA/ecX/E+dvYLR1xrGJ9zFzvyRzYPjt13iFb8B03oCb4dSYK3NbF4s6A+/4614pzSG3ynhIRu/EQMcGMFnfHtL/gOO79njgYcHDrGDLxrXF372Fvau8PB9scoeUfwgl3bwdEV2bXjW9sbtJQy90yv/RZMejAGHT7iM5ffmhBxiNdnB4At9MQTfbvGK7t34gV/skKvCn37MlTyhuKsPDf3u4mvrktKlvdi8a1bjmi+0xV1yNo/NcziMERPxkB3FQ7YQ3n1W1w6vC011NqOdzXle+9F+XiuHvpVV/kTfbEVp7l/F66UcDJkMk7ZliWtJsZLhN3EckKEzmJJ6TlWOsyJ5AAAgAElEQVQCXxsYt4QweA6TIWXIGcgawT2DBmfRLvlRZhQMw82B0d9ExRiBgGEn6xpQ/PivVn/oh37oTU4Fbg02Q2/8X//1X18O7uI8JTWcpy+Hcgr99MAR+1sC5NTuLukjV8+MnQN7RluilNyVaKIF9rd/+7ef/eqv/uoFi2/406+yIBNNC0v/xS/45UNwoLtK9U9/+tPPvB2jX+3G46m7AzelT7rJ6SJ3uIzDe3Mm0NKRYGGO9Nen1NbtTQnaC1+9Eh4X2bV1a9/n5nXLbMH4tdOFgbfAtjYbTZu/vljTRoad42TU1tdAZDb/6Yg+k12fTSqc0ahMZ/hwsEDf2copF9mTqb7krI8tsDcHeha14PLH9Kf9xNdzOBujrO2qPFy1b9/yHNy9eaiPrumAjpR9ohsvLyrDWb+DIDiaG3FKH5zBonkuZCuD/mxveU6+9OE5PWaTfdkUTe3dq+f43PHBrUz9ZE4fmYIpBoZz6VRnd+FfnPhmG+KdzU8HJuFf3ODAuG3AxGT9HTg2Xnt2ni6UbBl+SZPYIUaAs1mDr8Oz4kJ6O/VSf3pI7uaEfKf+0MZXtLMLY13rC/F80o2f9Md/e9kCd/zAtfHZ89pJtrK6iQ+4o5s8jYcz2wxf8MoOsfLz1rXGX4I+yBp/8Xry5ydTHfoszXCgFy794pN5CV/yBB9suqs8+5e/5fuUYWkn5+J6UT2+X9R/trNP85Rc6Y28K4OvfOjftXMSPSU87J6eXOnN+mBtcIGxAdr/nODquF10mu3vGrrt+vWJFdYWfHq2DjVm22rHH9k8841g0ntrXXEEPDiXcfrJsbmcmBIesPIHGywHPviU4/RGWFxwwZu98SmXZ/4Lnn6UaKLlhZG8rEMl+OW18ll+4MDIeJtC9PHUhhL+fi6mblMi9/HFtY0kHs2XTZ/bphDdNis2mPJo+DZvI/PT9aSBb6QBtsRG2XtfvFgH2TK749OtVXyu9ao1prVoy2JDsOEBg46DUoc57JV9s9PyUwcseGLbbBx8h0TRJA9/0sef+AQfBN+BT4dE/JlvOXhx8AWHuoMRvurLJe297HcYY78Jhg/TDRz2fg5X9OGdLvBtLP2Qg3za+DZ4OnR4pNQGxpcsvhIMBzhtZAAnd2lMBz/a6LKvl9AOl3Z7vn5dAwf9eqYLsPTkBke3Ygw4/HYHI+eV51tzi7/B0C98rUHq9E6n6vrFqWyJbujMs8u46tHb8rTT+rTj3Xi0tNOfcnHGJ/gde+Jdmesr1ldaB9kneu1/TzyvwvNLORii5CbOZBRYGBzH9sxIbbj6ZE6bWzDhmPo5hpLR6WMkvW3JIfTrcwkkGW88NOH68dJVv+cOFNBmEBk8fhmKZ87pKxtOUOBaHNGpDYxgwZFzHrTWWZa3DDrjFVC0wUF+Ruo5vB346BdM/dQJnHbyCH4d7lQvedH/e7/3e89+7dd+7dKXMfo6dMInOpwf74IY2umvBUDfJnIdQBhvE5eu0KNHNJT4KikTRAQtQVYgh88GGqw+PLnwGX584AF9c1RZvT4HbK/d/n5R/NbePGdvX/va165EeAOHerZQ+cUvfvH6MkvfOV/Bs9EOxc75pY/amtvsZe0nedeW1JcmGLpKJvUWWvUC6eqGftiI+VD/p3/6pwsu3Rrvpmc4zJUFk2zRvybjdmUPyp7XRrSlJ/XGJ9M1aNpX9yvniX/HhONFZWPrD+/Ca7OJSY/0JSadc3zytP3bBxcd0+/5ZdHKuLr5RjLBnW0Epzxl0xb+cGe32bv22uJ5eS/uBa/cfuPXl/K3e3SztWI33KszeMBIRm1uFm5pSFj0gxMTgwPD1/RJdtb3Gx9f/THovgRKD8pwBIvPHd/hSy8oTj7zM3w6WAInscBPPHboZa069b+2WH11nn5WPjyLma2P6MQ/W3l+++JDklm8yX7C25dR7LNP/7OntYtwOqSRdOa74Q12ZahPHGmtume/wS2P4NDcK5obN+IVXGvMlnCcNLU1rr6FWZzRX9onT55P+U8d7hh99+id7caAW9yNW7nqP/nednVjOjRiJ9rYYPMDdzZZma6a/3yXjVsnxcj8hD/8+Z//+WVz1ovWDes3u+/AQt7ntvlgy61deOqlR+sPnzNOX2tRJXt14JKeyie04auNId7lIL1EyhaV6CvRwYcxNoj9wWt8wOtnZjaQnvdLI7TKZcqtjJWHkdEBjxggd4Gb73RYBJc2cL5MwLMNo3lBg9/+67/+6xVH0gEd2xDJw7STqS8x8C8WoGWtxrd6c+1ZHuXOntcun+qvpgbso/gA+2FfbNBBLnu2plqr+Em+zz7Zar7funcv3vC9jSn2c57hzNZpvZjN7tk2H+QffsbJpnu5y3+iY1xfOeHbZf03Fh7j+ZZ4YYxDFnQc9PBLvPHpDoKNB9vXQR308BXrKxx4kzvwTT5u3cQfWDzqI5eyfYwc0p6xL7HsbbShA87+VVu8w2ks/Zob/PHhvk5yYNxBk3ZxEG46oFv9xZPmhk46ACxHIC99is3FfvPicJBs4hh9kVu/elfrUCWYc624Bxsufa1hzWe49MlL6Nwc4pccHWTpN6/k1CfGsYPWKGXX4i7mKVvzyHTKEBx/YA/2p+gZ8ypeL+VgiCKbqCbORDJAEy2B7jO+3UB0IMRptt0Cz1G0c4T6Sr4ZkLfee60xZhS9UcsIMvqC3xpZBr+w3hr1Jm374WFAbjxVVzK0129/7+YDH/jAxZ5xBcQ12oIYR9b+2c9+9tlHPvKRyzlcS+M0cPAW/g6T0OxQKB3BvzgEIDo1toQObMkI52shUPdzPQce+NeOhls9fvCJX22CYl9Y6Y8fdgAmuYzhgJKw126HOHCi15y1cHUoJLj/7d/+7QWL39ortbnxICH28y9yJ0u0PVeXtKFXoCsokTUYpaAOztX8G6Mv21Hvj4Tqc6X7rYcnG9AXD9W3VJfI9vcvmp9sjJ7U6WH5Xx3xv50rPPMpdOloD+3MF1gbUwlDMiyP6QueLm0r9z6DaYySrurHc/XGL97q+tLdI9Gj0vhtDrex9w6ClpflY+s757U3732dw3/ZVnMALpgdv7K9SI7V6cI0B6uLZI6eEh/iZYcZ2fzygW+JIDs54ZIRbbFvD2aScWWDi4315U2xZ/2Db4j9vY3cdYDOsmvlG7cDEHDiFB+FG4/WiX6y0YsCNBqffCUarTfGxu/K1jhl/XwNXfRLku/pDxy8C0duePCPdrYQj/qyN5tMSaVr55WM6PfWMN7gNA/xCU/32gPfLbYozV9f6+K1MdEN1nO6yaYu5m7XvXWrvmBbY4JV3vNXbfW9CKZ2sMmzPC2OlfWk15ja9zmdbV+4kk1ZW7rRdvJS3wmzuM8xJ43Gkn3xgdN20gWTvYdrbWP7teOFDyrptBcia0ONCU9l7cUVOIxj/9aR1o/WX3Zqo4mONr7qP5vofxezZsk3wMgXwHm2gVLaQKERXqX1TQ5prGcXmD0E0oYvNOEtV4HTGC9E+J2NpHVOO7w9g4kvtDyTkd6026w1Rp/cyxqJJt3wW23Pb4dlSvmN+EVHdMi3bQrb9PFLm8/+Dgi+0xcYsHRJfzajLnHFZW7wYlOXzHhWlyvhuw0uPRuHZ3lYL+eUT9erowH26RCBXfA1fvcd3/Edl22wN7bDTtlyhznq+XVrETj2x5+VPXcQ5JmfgeejxY/gim8OV/SDczCBj76Kw9t3fdd3XV/muYx1OOAypgNZ/Ln4CRjjOpj1XBxweMqHxBZ0ioN8Qn4vDvDDftLVfswaDZ8YsQc+xuujT7TJwv+0OWxxgIOmg58OhTrEAadfH9+OT+1gOkBywAOm/AVvnslM7+RBy3jrvPUdvn3BCQ4ePOIVbPNBb+lP3TrjyyHxC5wLLFndrkpz1jwqF1ZuFCzbgbdcqzVBbEaDbOcL2ROffjg6uGk9UxrvsFyMMwf0E8/L78XQnQs/cLcGsT/xE61X8XppB0NtVCnbZAlGDMAkdvWTIQtTQarkn6EyIobOgU18d0HKm6baWnThNrk5fJONH4ba/+qRYYMv4VJf49ceHnRskvcPUzI8tJK1N10lG0pO2ifC4TdGICoBAefmtJIIAYhjkxP9DDxZMuLTSf2u/rXbgYl+Y8DHW8kRQ5dIoOMnRw6s0qe+9J4De3Z/+ctffvxiCm798UcubS0OaOnr4Ak/6Xn1vs4Y3XTKDpZn+i8Zs3ihFd+CDl3GU6V+V3wl01mySweL+9VQtqrsltydst6DCx/9f9u3fds1D5LKLjpoLu/9lCs4unC7GpP/KAu29JgdaqM3b3H9UdB0RH/BJQPc5qixSvB469bWG91HAW6VeDvLxR3v2uDFA/6an/RKFvfKe+INl3Jt6OTJcz7TmP62TzbWonHSWB6275Rp+dXX38ZJ1+RLtpPGveeV4ZQtGc72xtS/cotD/Ec8KZnLTuOdTsCIT2fCl7/Abe7BdVi/9h6u6HVwT/5wBNPPuCQ78AXTnKyfOdA5vwQKzgbQeuA5fWdTaJkLsfP8wqjx6Lo6EANrfDjeeDjMkZyxV3wZu/qjF/JYs8gcL8Hcs530sLZzb04l1fA2dyfOnWd1tMAo4ba+kQHu7g7I+Do5wcVj+FrrGhO+YDcmLGw8bAlH8OHffnVxr78zFEy00Vz+4VqYbF65PzmL5qnXnpM53OEM38nH8nDyf+rwYvB2LW/nz7tOHGDR2BJMbQ8or/7VwbaHc/lRZxNsnQ2zzzZTcLEv66WcI12v/2VPYNfuq8PLh/GZLaDRpd0aBI69sTuX8Z47bJEXWtPB8enyBrjwxxf87AuPK59+z6dNasezcvUK7+ZafRWRbvRpI1N6gAPfNhnWQHzaUCkd9vTTMHWXr45tAMuv4OuwBy64xVKbTm+65V/a6EJeYbPnwOr5bTPmstmzOREH8C8n9KINTrLh89/+7d8uWJc4Rh/0Qpfq+Ceb8dY8Y829vg6S0CkvQAP/bjTKYx+JPFXe9hpgN/2EzBrLTvkYm8lX7VfYiEMRBxfsgq+wJbbcvXGCYthetqjeIQrbZE/B8NGNefDUx9/YKZvtb/HY8PO37BYf4ehAWT84BzL8kN+A4WPZMZ7q529kxyM5yRwPYOiI7MpenuKzQyD6Qq+Dtb5M4l9kECscxCjpmY+TR0zTxjc7oKJb4/moPrkL2Eo5kxiJNl7FDGu8Eh/a4MWng7LiJXn6eRka4Juz5oqO+oJIHPCMlrxQ3mDe2IU+Y4w3D2CKQ2Rp71hcZiNkgM/VmOvh4TptQnN8VWqDw3jw2vGCprlHFx08shn8gcm+jHHHh7J+9W0vzrMvNs8+XtXrpR0MmYwWHJPard3GXsmpOA1nYtwl54yuvyHEwdtQ6mdw+ji8cS3uJbyMA62lrc5g9FmcJZOujEafunE2TOA6hFB65rAMirM2TqnfzZhKeAQiRoVXCzDZyFySg07JiMQC3uijxdgFHT+F8sVLTpJcSoEAnQxY8CVb8MHijW7SXXV6E2hyFDov4IPBg7baBTu6gbe5xaubfkuIyOUWUI0FjwZ66m5j0MqZPe+bCrA5KbyekwNuukknaKx88RvvdOh/bDPHK4/nbu2Ssj0Ygt+948BpI0N9i4NNae9NhOcScraRvlZncGZPG8wKSPoWxoY2+6VDuPQL4i2AbK/DD/ylZzoFj/8CuTe4r90OE9FufvKXZEdHHYy+AmpzkA1tkK1+7ysS/MBnfHDkXbzkXvz1geuLn/rXH8MZfrqhi6W1fNYe/vq2/eTNs00fu3SLQ+koG4EnHPfkWB78sdGTfn4W7dVX8upTX72h32EFutmnutuBhtgp2dh5DT8Yfm7hJRs4+E882jrs6XApeunAs5gEF5hitf70E12lmMZ3xHw8ggeLTyWYnosfxawOevpyRzyM3tpC/Fh79EvOgkPfetS8Lt30F58dKHlOj+rNRbziN33UZt6Wp9olouTfeQuun3MZ27wrl55n8uEtutYr9hlscigb+3/mq6XFeQ16uNhZcMWJhYXrhPd82vHig7P+jYHhJbODwPXvYi+YbD9+KmsPTyUet295qR7eLc9x5kQcTX9wdmXXW558LC3zsPLd42nxn3Saz+Ule9LGNl100zqdfa0NNN5Y9/KfDYJ3NS461hJ1fJY3oCs+sJkOkZJTX/V4k9P1H27oK38DixfP1g1yvHHbFG1+kFx8xxV8eZl1HX/Rgh+udA8nv4muEg4XHH56Yr43rxOj8jHygbMp6wsh420GwZQbKf2vpeWJ+uVVNjZyQ8/48qwuNtm48mcx2UEQn+iroe///u+/cjg+jj555EZyKQevNrv4th6Ia3gS2/BhTQQHP1p4IrfcAc8OrvCWzOlG/mX80/X21IC1gD2UE/nah305xDDvfenCXtii/Qt4Nr5rrrUKfDGgmFA8Yld8wdq/+SdftTdge2xzY1BxJz9ll+DYtjF4Bw8HO2WHbNo4fGtH16GPfNhhkb2XKz80Xr+x9nRiBj7lDfwKLngcOqBJD3wBr/zSWLRcjYFHDDHGRV/gjOeb+sF24MQHyabNGKXDLPrlc8aJceihbxw++Wgw5KRXMRYcGsVpOjE/8NIBmTvkaQ75szr8HeAYV1w2Bs1iafPUek1Ofc03G6rtqtyujfPhBo/GeekPn3IvsVkMLwa1f2AX5mr5CI/SmmL+7+FuDD23PpGNXZgbl3l/Va+XdjBkwkpG1N0WKpOagRdYKJ/BmKSSchNaHZxx3kwrGSGj28BUgMqxl36bZ2PxlJNkoJWMgjMpLYAlFkoBisNy0owU3+hYPFtAN7hlfAxMu881czx9xuJty/iGm2H7+VZ08BEdfAraOREa9CVQSLDoJr3RVXft+KFvpUt7MPREB/GGZ3rzh7R9YRTN9IQn8+dqzgVE9f6oMxzwFZjCr83t2SHO+9///kcY8pC5Mfhye+7vB6mTYXlfeW3g4Uy+5AdTXVkwIJvn7KnA1/Of/dmfXX9nqGcyusBlZ2TvyzT96QRculPSiaDXV2gXornSuyb48UUn7Df7oTc3fyGTRY4NWejJzn5WH+rNw+oEPu3JBS67RK8vm2IP/HnHpzJfZacFcrizk5VTHS594axe34lTP73muzuuOdPW+BfV4yf6C9+YYOgzG3SwkF6jF8/kXN3AufI4CMqO4XD4kp0UH5TxlN4W5hLsdgVzyuG5cep91WOxT4btrw6upDAeky8Y/FtMw3XaUXBiUQf77JasSxPv8EgeHaCK6x0IFfvBu7XrBxsM/vqCaL9EWr821jM4vPR2Kf2Iyfrc5A5emd3iU6wk7/K38iRXX4+VEIOJn2Qxb/D1YqOkOxtBL1htxmeL2fs5//rJACd84Wycsrv5VC6+4k1t8YPW+lnjF1/1YLPj4uPykd2ura889S/dk8/FURy4h08bfsMVH/ee42F5MX7H7BylB4eS/u7L2WfcqePV09JJT6vnjW/3YOFiJ+I2OvzSuqOd3WnbtQfu7BQMu7L+lC+Ex7jakylZHVD0sylta6ee4bQe1e4ZD9pae7Sl/9aYfASP2sAal+2rJ1c6AleeVo4mNyOH5y5vxMG2Viqtj77cKf8QF7xkc0DjKqcjh/WvPCY9iFfhK3/553/+52uDZmzzia6DpV7gFYPka/2dIHX9xtgoapcjiyF+QmNO8YoXNMkPxuaYv6NpI4g3MdKm3yYXDptUcUz+qk/+SrdimU2jw6y+QqJf8YtOwYqp+JdnKt1g+4lF80r+p+vtoQG24HJg65Yv8pUOB9lqawdbaa/FZozlj2y/da01sOdiAJtRd+Uz7FbexIaKbR2C5i/FTLZeDGB3Dlr4rD2YXJf9rz8Xbzr06VAnv0KXjOCKK/ggDx+CD50OTOUffKqX3PyID+FX6QsfvMLbIZJnfuVwh++A4WP8GG2wdKdEE31w6RUP2uHjl3gDUw4tn8IfGuSQQ/B9eQsd6werH95yDPPpOR2ZE3UXnivp1zP90L04Zh7F1PaV5lCe8fz2ZWNzpQzPhex2gXNrV8KJH/XmVtwgY210ro0+tLnIRZ4uz+GFx/NJe2XK9pTg47lxteHP/HdGYe7Yzqt6vbSDIYpugTbhFj8LVQuMfrdFm/EJUBxeIGpDkiEznia2MuPSFxxjZjRohF89Q8tAd7IzQGUHQRlceJXoCDocnaOFX5+x4Q4fGrXjQTDJCYJJF8FVrhMIzpIS9PCXLsA0vuSGjiUnJakbvNX3Lhj5ydEHP/jBC69+7QJuMi5vApcgl6zxYFy8bD09goc/3Ccv0aXfxoB17+FQ47TbpDukAu9ZANefPSir4zO7CSZ5g9MenODR3Iej8eaxQAimcc1ZJfjmuSBUX+3m1LU25Llgqm5s9ty4LSWHAhgbiJ/05AsIfzB9ZdVnfmtTL1jqax7hWh3a/OzP3tJB/BaMOxCyWEUj+ZXntbIEt7jB94UQW4ATnLLr1G/j4+nEt3xUr9yxNlp0wr6UkoLog3fTU/rbMtpg/H2u7Nl4i3t2Hr38LH1Wph/PweyYU7bGhR99mzn00c6us5V0p+zga2Gyg2TrsEd8aH5XJ+DZgLnqK1C0TzxgxHoJF1iJk9iivjrvoEoyhWYxyGbNJsvN1kpmV0Y82+hItiQX8OcbzV/6Sx/6F4fx+OxFRXIo0526ZAkP4JThWzi0wCU3vawNmbudj3DEYza0PIaTDqyv8Q4Pe9GfDTWH0fCcXbWeVUazMWQ61+bFg1eXg+i+/pBY77WyLV/xoFy6tS9vK0u484stl25j0N8rHtJDfT1Xor/6bM6Uext/ti3uxdvcdEhMf9rEY+t8X8+c/MIBtrWjZ3Fbe33xC2dyGxN8dlkJrvErQ/3bBkc3/tTFDnlHOHacOpttIyCG2GzguRiCN3Cu1jslW+oZHXzC8/y2MWlMtPSLHz2jmczGld9Vij1wyR3Yt9Ka2R+shg8OfX3h0FjzhIeebaLUvcm3HstnjZXbim1wu9sw+0KAP4tJYOGyKRHfyNBXGupihrhibLwaL18yLzbzNqPG47kD5954Jwf/lb+KE/QCn3mD03wo5QtoG2tuyC4HFYfhcUDl0Im8YIzpcAHO7Nec5Xdrw0/1b50GzKX1kn2zH4cU5ozNyklaf817OU9+rnRwYB11+MDHWvfhKJ5pc8NdzCCx/v62GD74HTx9ERQO9sl/+H3xoPH9/SG2hx+HHQ5OgoPDjS68+1MwfoWmMX1xB45t83kHSX3pI9cAU97Cb9i6seD7wqb2dOmQxqEOOP6oHW7wZO0rInteMPwQDXqlf34LRjv9KXvhCy8/p1vxQB9deZYLee6PMfeVMJmaI7LSafMprtCVfjahHy3z1K2NTZgP/BZf5BpoiG90D96cwlesEQfIJg64wHU45bn5VkfHOGUxw7M6OGOtGdrU9w9kG6Pv+S1+qhf71feuPVsJNttCSzxU4rmfFl7Mv4LXSz0Yok8Tu0ZC8SaYQ7s4IUPjZIzQbfIy6MraPKvvRO9zE57hrQFGW9vZr28NdOs5E5h+StbPjsJJFvUMi3GRO/k54usPf4S6MTmn5+UzXEpvjPpfrvAEDm5BLr3CIynhqG/cTmVd6WE3EXTXc3WJqYBisdDHmSUBJTKCsLrg0Ek1HlzgN4lLr3jT18LTgUI0o4N/9XhSoo0XsC1OwdemdDsw2K+jsoOlY+zv//7vP/vYxz52jele2Op/8Ad/8PgHv+kP7NqdNnarbA4LaOk7G1gYbS+6jLdx3583gjWm+U6v98r0d+o0OeMjndSezj33pVZ6uOdf8IRrZcdr/tEBjudtT0fafB1lgUpP+lY/aDR2y6vx4UrXq+NgT9qNicct7/V5U57dWazVkz05KrddG71FHx66NR4edfAn/dpGvKuafs725fnsW9mXt+YzHtcHlqfGxHeJkfEWX3GmREUbeLDVweyhx2lP7KMY35c7YDr80SfBcEuexBvJFbro0Km1Ahw64PTh052O8QUGrBtMfoIeuOahr2wkjuED2+/uJYvrK6tLdMRbfErq4mFppN/wgUNnbWhtYuvptUM0dJJXH9q9TElHzSHbgOu0o+hW9l/MB5cv1s9X6bBDPrpAIxtV3rOhdEDXCxN/8Ifj5PHquF3LP5jiX+NOHOcaunjOMdsXHW3Jtrhq008uvCTHltqzD+3q1ld/Z27psePsUUKfbNbD1U9zYWw8JnM80S9e0VI2b1uPdjxlV5XaFz4claes8XOvv7ZTD2sPxR+5w1lHSzs8dBStcp7yEra/Pz1Lh8Wj9EIul3744gv+cKTTcPBjbWKD0i1H6tBDriUvsqHzhZE6/5Mj2YzaAKMPD5+1QdMnxqBh06gPPvLIl+QAbEGbTSA9iJF8Dw5xyGbVIVO2qU8bfuSI3nJ7UYQftOARP9HXbrOMrnhiU4lnd7HYGPx4uYgneZuNrTqZ8GDzBJ+LrPDiVV5ENnKir+7u4Gzbr8FP1zdVA+zeXqvDAv/znfnNrqxJbJaPmGN2YgNunDye3clj2Mv6PZw20WwyX1YWa/IfpUOVfA0fbMM6jaY6XtgwG2Tz7aHCoY/Psd0OL4uDDmUcqhRTKrXB7xmvfMABBXnQJA8ZOwAhIxpsGB0HQvzI4Zk6vvt5WIc4/Fm/Axr95OxwiQ7RSH/6+JI2eMlhjFjDD8UAcpLHHJAPHfjQBQ8nmcyLZ7TpHN8OeTqAMZbu4KPPfm5GXpcy3eav9KTuQqP42ZxuHG0uK41RB0Of4dEe/gvxccVDcHUbo4+uxDG8sTUxiWwdWKl377qDB3PTwZtYSV9g0k202QLY7IENvsrXSzsYonDKZ9wmgpFROoczoU5QC0ZNkrLgshNcXVnwgtcNr7Zg1Fs4M+qeGVoJEPht16fNtYbuGdzeDPPeH6FmuCUwSvIaV8kwbRDCCYYBMsqSDc8WW29hlDZAZIovOutQCN6SFXpIHw5CPvrRj75JJ/q7GwePRUAwscjTAfqcxzzhIZ2A5Tif+tSnnuI+cKoAACAASURBVH384x+/5pZsaCrJko7ApotzbuOBTMaSuzlVptvaTnj86XMLlL2lrK1y7QHf2WM2BG5hPAve6VmfMSdM7eHboFS9cdkbvaSH2tArWGZ74IztOR3T5YsueOhx9YJHehMQ4YJz5U2m9Z2Vo36l9p6TOR3lJ5Xbrq32ZD/hVqZ0sW3hUOIhWeMrfpbWPRqL5+xvk1Y8saiuzS6NleO0A4ce2SpcFvjgwb5IN8s7Hdy7Tt3smFNfPe984ofPdzfvwcBnY9IhSrqwgRBvLLC9JVzbSAeSGjAdHpwwnsHAsTz0d4HEG3Tcb9w2fGJeX95ow2/j9cFRefrxfuFjbDExXsnsQgfP5gm+6GjvQAndxqOzOk2mDss9g1/Zmye8i7H6whG++Gre8ldjjcNLPIJ1yCAB1Ye2C87oVu9vEoUv21p7rq5P7MnOlPToEMvcrFwLE8/JqexAb/WOp5XLuMYsvur6XD23rtyjDY6sDiXv9Yeree/5ReXKFJ/wbr053LkOP1jxms7QMM9sSh08vfCD9BHP8IcXrC855BiLFwxddNUXH/rEs2DiuzzDIUZ633LxaY+P5i273THL6/Zve3LYgGpffanT0eYKfR20651+fJPJmC6y0a1LXb9x5Kc3tLXRoyse1kbgs7mSg5S/WEcdchhvA+ZZLiTXwMcZO8UQsuEdrHxMPKlurJzmB3/wBy8+zL9YYNMpv+sWf9bX0GMn5MefDZGNIzj8afNs02iDiTd4k8NmEaycxuEk+dtcOeSiG/moXFwOKMb8J3t3srJbUv15/FzK69wu7VARTAQbFAcOBEGd6R14AYUX4UxU0IFDQVGwoewTe9EL8FLq+WzP99QvI5/34D/TqrL+eQJ2RuyI1a8VK5pnvyfRlG9eeumlZz8EvuX250bG7aHkG/rTBx+6kIHO8BTjZCIv+8KFh4/9JJ/Rid5q/mAr495flP8zFuAXFwPWVX52qaotVvnRpQefintxx1f9cEsiFxPlA2cFMeQShe/QOPOKyw++ddnI14237rmgqYhFcdRXdvDQFtfwFDD4e9dv3uIhZsCiR16XLGI+PDFovoh9crBBF6jmYF/BuLABI17xMQYOPbFZ25wxt+iMNzz88fBFD1uax+aAfuPmFptaG9mR/Gp7TfDe+xoInX5ItKZ1WWQv3wUuncjTRZ33aGqXj9pD7V40G7JPXxCVS1sv+Ii9q8WGPKAUA3BbT/RVovGs46nvtr9YYA9xlK9OGmQwxtbaCjtqy6tij770FGd82f69/Zw6/cgJPhvF1zia5Vrx8+KLofXgv6kt2C0UNu8moprRW0SNewRejspJakmME3Nkv+KjY8wTfjRKOPAL6IJYH+d76ivRqM+xAqUazD5tQCy2aKNJHv301A4+k6aLBNaEQl+SlngkLbZADy5aHgH63e9+9/rihd5w0DdZSty9myhgughBK1svDjxyxsP/Kt4/0IwvfPJoB5e9vWd3cuSrlRdd9oejbYOz/4ZO/lPTHb30gMc+6LJFdLauDd+m7+WXX34Gf+KA7eEHPvCeL7TDUaOpLP+SSPFoofjmN7/55Itf/OIFqz8ceOGSzYGl8WIJPW06a7MdGH9u5DNu72go/qHKYjN8NrWog4GnX0321Td9fvjDHz75xCc+8Sqd0zs7gE2X1Ve7x+GmL7/AJhc5kqG290o22Xp1qg1+YXo39+WN4mb9l53CO3n2Ts9g+6opem146BmdfB1ePtx3/jUH0RHD6MBfmH0/Zd33Z8Y6GmtHQ+f7qXc2izbbWRjl4vJA9ku2YGxm0oct9Mu3mz+KhXQsL+OBbjbduGHvZEDf00WSRbwFHI6LKL4ujm2M+jMw9t0YXx+Rw+EDrM3S8l/fB2cjET3j6/vsUn+ygFlabN37zjt93tGJbnCNBV9/fgynS7A2dnxwT871/8odnZ1nGz/Lt/7wXTqxoyf5i6uF1a7Azafw+Om0FRhl+60L8JTNAxvn+4Vh65V6bSpn3sspeEarOpslz1mn08pw0sgX6n3g8i14dbkczKlffOOX/avh7Bj8xpIn+sWbOAkOrLWii421+/LS9vB1dk2nHTv1jMbGOHnjs3roIyN5dm6StTVXvXrZG3jI7xDSXovO7TfQUtpzyCXyi0NU9nOQQENuaf0iG7hsBR8va7LHYc4aq2/tVp7TR1750UERvkNlhxWHS5dKaocqfPz7jA6b8fjb3/527Y3zjz0XueQvfqSbvZ9DnT2Ug5/DZodD+yQ8fbnen5/Jp+D8I9XkMs5WcmpfF5AJXfmSDYzBo482O+HjkskBybqvjW8XOB+5/Qk/XT1oWTvsVejgYSd6sEsx4cBr70TW8jh8bXzQJhv7KN7Zvz11sXENvij/ZQuIBX4Wg/zJ/3wizrrcEGv62H6LOcRP5hV465K45mtl84l3MYcfHuYKfvkPLL/za/NXnzNLvrY2i184Ln/IJ5bA6ZNLzBFzoD/dxAdP8S9u4InN4gms+OvyiS3MKzj46ofbXCYPGmK5HNCfpoGDb/6XL7z3pQ/76PeuvwtV9jWGXz8UdYFEFrBs7eIVrEsTuvaVkzZZyAaW/azX5prHGFxrMLsYLw+6hME7X5n75nS6tZaw7841scD3fE1uhRxoyQ1KubRcro9/8jGe7Gq8PrLZl9KbDMkZLxdlyYpeMPC1yyHtH6MfHPp0Ume3anS3gOMvuooDuqrfzOXf9sUQhwgQk0XQeywYAthE78Av6DZgCwjO6tEHxiIlwPolm2MLCG0w0SqI4PqFRF2Ab92mQiD5/Baevk1cTQ61QOwJlwwm5P75DzkbV1fSKXktgvRBGxw51eHGi3xtIiSC9MGbnSU/bXyzSW1J+6c//ekTCzj7BLc0t+3wBpasYHvQI8P2afsHmD/+8Y+/Kqmgt7bSBqt2IOvfPWKH/Bj9bOOdXsEnOzpN8PW79to32sEu3s9+9rMnL98ukYqZhd2+/3n7c78PfehDF5xCfrrZxEnwfUW1yYVvir/tr52M6OXz9SdedO+QBA5Oi8j6Cn6y1U6XU//mUbzSs37vtfuzvPr094TXr9hkE7+rV/GcbHTYORVsNtk52XyjT3RdCvAfm6fXaWe8Tjmy3cpGN3MGPTlpdQOHTnU8vJ9t776sQatfd4M7aS5uOq9stdXbZuO1EZuUZ4INPhsufl8usZn4obM2edJTuy9rbCjANK4mg3iEF35zKb2CL142BvX1fzYjA3sV3w4f/TmV/jNmvcNvgyNXWvyTp1hMDnlYjHTQCT9di4P+lKtLso2pdECz/vitnuzcO3v0rEz6ign96Z4dg40nWGPsEmx2WTrpg/+Wjf+luTHS/Np4iY44aH1dneMBridb7tzty6LmKZjls7FZO72Cbd63BvJBNPTde+Aq6b800nf5bTs9VrbonfKim5yrW34kq/zcDwHRJkMyJaf3U2bw+tGLl75sHo739WMwK1889Z20kiu83tGtXfyk2+b75DFW/8bkyrPxUpzFRw3PHs2lX3mZ7LvPSqbioHnVeoxGf4ItbnuH5wKI/vyiZIu+1r46b6XLJ5cr6DsYJC+atbOnNck+xr4UP7LZH/k1HS04/cBnH0wW8SF/yXkui4x3qeKSpK8W0HIx0uUI+g6g9hsPDw/XQdw8la/tB+2nf/nLXz770c07WPvtd77zndcB38E0XHzZjvzskk0cwMHRA4wDpQMjfr5ORBO/vlCgxyuvvHLZ6u1vf/u137endZC1n3YYJwt92K8vqbyzcRdbbIUmPcWAQzAbgmM3dPDFn/30k89D9g5y+TifvqhfawF2b8/S/2HP3BG3YpHtrT/szL5808M/YtilSGtY9a61zWvwivjhv927upixTvBv8VcsLj5cBbyLEzDmjlhtjoIXEy6NxHV/6kUmMUZefWDEkHjp4gddspHDOJkU72Kwr4fwBivm8cCzyx70XSoZI1+XQvYtcMwHPNG2RzHP9fVnTcGZzy5tnS/0kR0+++MP315T7oLPT87W6HinI3/QDWyXPZdCt0JOezD9bFNepQs8hQ0Uc7UClvzyh8JfCnwFPjkq+R2t5rCxaOfvcio6cPbSEDw7wymHyId94Qi32Nl4SSf0PMbYUn8/FBarwaYHXmTDtxzZZf4z5d6EjTd8MSSALQ4eC5cgFMgmDKfr80j0OSCnqj0Fj9qlADwToINNiSinqwusHN1hhjzwwKDXZqMNhbrSZo4MBX7tcPE2VgB5R0Og2SBo2+DgZ0wS6EJH3eajX5NMZrjoeQr05CyR0mPpuNjwj0Qr+HjA1M5Gvdt4+V/XkwuchQG98LpY6j2bog8+OZa+dp8xZ1/yk93Y2jp7gzNJ+2qI7ujH33s86vPrms0dXOPBnO98tRc+Tf6FC1+yz976ir3F0SfB85XFRFKS0N3iSxbhZKtNMmhnMwd575ViSg23kg2TS3940dPHlstbH976jKGTzltr9+dkcE643tHho75E2rllLF7qLcmaHYyt/MGvndBjH4sQ2OaWts1KuUT+WN7aSrR6P/uDUXexLJ+IsejRO51OHt57Vn6yidsOwMVV+GDRjX/93rXzMbj19/4JGnyxtrlAW56ySc+2F5Nb0b/zR1t+UZIHb3TTyeFGbr13QRJMMVR+UTdPsptan7EubrSj78cA/cUYX3QIzHbN+2Q1Tjax2EVPMGjHu3/LyK9Nq0dy5188jZNDXHXxEn81nfmgNadf3xtbO7I3OmITrb2wOu1sIxdcfDc+tIt36yVaNqsbp/y4PsmP+m3qy2kbT8VdcVYsREe/0hdh1kq8bUKT7wK4FbDrb215vAN2l0pnHggPDWM98a7Wb/7vE610CvceTHJGZ2GTP5h9X/m2vTqHZ3x1CKa4zu/5JrnJew8XfvSyAxrgo6nuwBvMyp+NwolmPNPpsbpYqBYLcn+6hFc8bOwYS9fgG1/7LdzSC1fdHJPj6BQ976tTeqJjDwOvXOJQFX3zyB5CfugLI2PmlDo7o98Bs3Hy5Bdw6HovF9sToG0f4JCq3SUPWeCsb627eJC3fYEvs+0nwrUnNp/goiHnOSDad/RljRpPOOA9YHx1bgwueeQO73jSSR61luhnF/lTvqAHGeirz6HLgZv/1fZ3YORve0gyOQCjpd/FgsMWOdiVjeREBzkXROUTerMPumgY50fyapOLHmg43MJjazD/ePpnvvjQhz3wQQ9dffI6eLbxCz/ZOthdjnhRnlnAnoDfPP7NUpcELvHYzrrHf2KBX/ih9b75qOZH67J28027d7VLiOa093I2P7nsEF990SNm8eZbcGCcneApZLUHFx/iD21xW64ge3NYLHeBhE9/SoauOBE72mIEffNAHIk/eOjTGxx+xthCf5c2+opROrAhe4BhX3O8CyVz2Vx0SUtufNi0PvsWsQ3OPEKD7mrzlp34BF26mSvNWTzsK9CGY17SsS/f2c6+B4z+viTKZ/ISWdJVbQwfNINjW/MJPz7lJ3uOCniw4dfvXal+hnBrBK9PO18vDrrlFTkv3l16J19rAP/B3wsgtrCm6UMLzY3V5IPXQ1/62+PR9c1eXvfFkM1kjwneIxgtBN1AmwyMbwEykZSCipMVE4UzBHMBbWKsQ3NsdTQ6WHVAEAi74SiQ4oVfwaxGZ9+10ZBI1C1W3ptQaOEhQZgsbSLAtDkR1C1W4EuSaPrHfl3aKPTBQ2JpA1G7ixI4FmkXQ+Dx1nfvaSw70Uff4pz43vsHiMm0dOksKTZJw81u2Ssd4JJ/eRojD1jFWHSrg6/OJ2yHV/211Xj5Eyz/O3r0V27jPcb6hUB7aXgvRvC0MZKE+NWGQ1xmczJEE1+bs/xsIwqfnzeeLoVvpRjIDtVgF693NZhsBz9ZxXz/55RTn5UxPfcXXvDhrB20myPBkJs+D7dfDejZpnPtFQ55bTYsSo1XZ4PsoF49JXHztw1KsqzOcNI/OkvfWKVLHPmpeE3naFTHK53rj7Z5J0blNPFVHEbv1HXfV9/o8kVxilZfMAWb37Nr9Ng1G6AF1wK4tlq7LGyysjNdeodbO/nw6VIGrKeYWh26IAnGJQF726jY8ISzde3Tjr6UQ48t4G4uDxZvsi2cNSb+2ZRO4Ix14YKmjV7809sBkszsCN54suOXvuZbm69iKhrB8BM4tgODNxg0g4km+ayXNiJg1w/FfP7rPVz1luJk/a2vi5+3ve1t1+VwcvAT3h0W8fa4rLXRPMtj/Ju/4LVXjmjo3/Y9uPLl1nCWXmObM57H7xzzXl/tezD4rn1PeeGQJRrr1+URnZUXHvhsAn5tuDTB8UlfxISz9coGPrmy3cpjDbFeqJdPcVef+sRfOuC9p3ftYlNu68vgtU32iD464r4vZPWT3xrjufdnY+KX/OYnfPAKO7T3yJ7e0w1ecMbhGa94Nx9d0JBDPgEHH1x7H/Laz5kj/UlacPaB2vY9rZPWSnsINTyP3CXX2FdkH//Qs4sRuGDZ0IEm/4JT+rLIQdKh2B4FDt7eHT7Rd1DyxZD9jnne2gVGjsPbvsU+R050yetPZvxjxHIoWRR8HbL5yOHWQZh9HLodGMHB15bz3/Wudz278HLAte8Xv/jhJd+QFy22dfBnK/1ylX5fbtHHgbg8yz/wHFDZBU947KDQOX/qx699tDZ8PgTjKW4u5DdBMV+sMy5i+nKFHfmxeDBHxAu4fiBo/hR/arGkZFN2Lo7B52P9iniqoA/P06VQ/nHhwp9ooaOIB3OgPwmjh3OWOLTGKtZkNPgdrmcviLSdLcWquaCQSTyAFUvmjR+DxSx+2ca4WBTfeMDvyzg6iEfzgwx0I0MXWGQEL+679EluffD+cbv8xBscOcnIvsbNJz6xPyEPfcmCVx8WRAcOnczN/lxMrvCeLc3h9lb9cMgW+Y7eW7zjn83kLsWFVTmcP9jG+Ug+waO8C5eNyKUur7J7eVz+Mt/pCF/B1wNHjUfv8NgMDl50FDdijRxim135irzsQn+w4LJF8i8/ebdLdbhovNnL67oYklA4TVLW7l0gtFHneMVFj8AW1AVMAdkvGGCMeYwpHFiiqC5ZqTtg4QdPX04PV72BkLP1FaQOfv1JWYEJrqBuIVl5JAE8wZg8TSwTQ5AVhHBbjKolgW6C4dGNDi1m5PKg3XPqF2/j2a12OCbsd77znSfvfe97rw0KOInVJCr4q8FaxLusimZ87/Hzb9d86lOfuibxyuCdLvTd2p+f+bdu2PGezPWxnYc8/d/J8nvyBKOmL5r1nXT23QaELdi8GCwmqvnEryP0KO42jkpUv/71r69FqguhYgDsxtHSbwzdjSvwxWl0NlZX3m0vz50jax9tSdPBb+0Una3JJPbEhfnb5k28F//kWnvce2+BKMGnXzqujTrAZlf08o167bBt8I1HV06Ql8o1dC9mkn/1rS99GsOnPz+zSDe3isPgH6vTQS2Odx47GCgnbjob2wX2An4Kv/Jtf/pHc+Ef0xGtnU/8II/3iyGZwazOXbj0S4y8hb5+dt9crB0+GO82+3hoW7D5yWNDGj8bBX1go2nDY63BNx5d1rT+6LdxsNaAg5vsOx/I0p8/dQBpfH3ChvSyNrEJPmuv9Rc8cwxsa+EJC55urYf0ISO47JRP1fk62cCtf4MNLnn4ke3EbTbMtuzIPsm2uNqVzUEnfTKU35qz1cGC2dL8D+/e+M7/xV1+J14615/c1dlLHeyJE26weNcuR69dorXrw+ptfPHSHYz+543jc8YsvNVrbQg2msGprbtythxu0w1GTkmvU96zPxutLfBKd/XKGn6yw69tTF62/ugTk+ZqhWzktW+iGzmLdW0PHPNr7WCdguOxT8iu6DWn8CCLPZ5x+H2JHC9z0PxwEE1v8lgD2S5a4aMPHr5aftd2YCEru8P1mOOtHdFp/uk3Ds7hkR72ZvKHSxx94ZSXw+lQBc6h1GMP50BDll/96ldPXAjDsz91qKSbgzDcn//859ee14+1cq2xLl36YYAN2J1t0UYnOZNZP3gH5r6WYPP+fMxlW37q6xT6kwM8mtp80BdLZFK6yNfuRz2y5huH6mzcZREe9iwuGfgJHpz+BCmf5yM1OM9/12KP+nD7YY+P1H3Jyxb87oDNj2I3e5mnYk+87Zze+ZfN0NA2P/Bia3YF68m+zWl9DuEuNsBbD/lNvwsjY+ZB8vlRQ0GHrGQs1uDiK37wFBvJLia8i1kXP+C8uyDBQyF7F0nitC/szAX9wYRvXH9zpUsncaivPyfrUsv+gvwufcjIB/j39ZM+ctaHhr0GW+k3F+izZ0Yys3FwxvH1TlelfNy/K8Zu8BqzDzC32RS+R+5o7tmndAGTH8nExvvv4+Ijh/EX2cEorTXXy9OCDp5KuYXf+dBekHzwy/94uRQkozG5DC847ZuKO75rbelcj5an+C12R6TLTsWaNj92Pl64N2P7dV0MrZEzvJpROcPCL1Al5TbunMnR/7ht3jlBsAsKTvfAP8s6Njh1gX862bux6iZD8pZs1PHbPrj7ru3Rv3LCJZsgtAha1OIpWNvQqE2Cau0C9ic/+cmTj33sYxd99kKr2qSQlPcx9u1vf/vJ5z//+UuWNhz6tSUSk7uHL3yh5WsaiRTNfs23cJLFZqRfmmwYslf67eTSVz/foZcv1Ks3eVZn7/TLjt7JjcbSXX6/+93vnn0VEww+xUS1MbakZ3DJeb77lNvFUPGD3tKOfvSKB77lJzqq82+XQpfzpxSDYM8SzaV9wixetgWDP7/hq11Ba3Vdm2onz8Khm7/xQ/f73//+9X9PsSi0octWavA2+TaVybi0kxHP1eGeHYoX9T5gPUt3ZTj5ee9LGBvN5kU46Z++1ade3rtYsoDZQKEF/uS/tqNz49rnRZDNgfFsXb04O16/+D7LaZt4nzKctLOBHNxlgZp+NglytIWdbPietpLjwNBl7Ztt1PfiT39/9mU9wB8tC/x5cdEFYXDyQ7KBT17y2UgsvnfyWW/wSEawbJH+ajlRnKDnPZ3XB/odRNEjR7plm+gZw88GDKz+8tLakH/47vT90jOGHj3K6cmXbPm1TVK6gusSqxy/ctybi9GMR76UH60X4TQX460uDsuHS39j0cHQep/+V+Mo0dru6C1P4/HTrySzd39qKda2pGNw1WCisbRW19pLb/UML1rRXrzaq0/w9cGj18rKJ22uG18a+rxHH2451JrepYY+eX1lXF7w4gtm6TUWbvXKGTx5No6aH/Wba+Z9vNXW/ofbQVUBb87qV4cvvrXTVc0u1qa1QXm6+IDjsATGI7exC57mRbYEt/rBb93TFrvxa81Hw0ULO6AjJ6IN1rq8cslfDlLgxCbacoWDIjre7bvQYgu42kqXIPznMOww6Rfx9pPWKhfqcOhPJv/mD3rgxYC9nQO4/AnOwc/ByuNw6ZJGLiEnGtb9fgjhM3tK/5C1yyHyW9vkZ3T949lg3/rWt1564OcAqi0XyWF8QN78IqeSxf7B4RwMPfFB28UAW8LDv4t78OTNn2DkKLzI0J/2wdFPRjQ7tLIbu9AxP9on8z850Mnv5PUUI5cz/j8tbOOygN3EnPe+7GAz9hGP7LZ7J3YxT/hB3VrSfClOq8UsPi6YwCrsaQ/VBQuYxWdfFyWdJeA7D+oXE/xljoSnBst/dOlMI+bEiZjnN3CdPfhb/OdfOuLZxQ7Y9ENP2yUNPs0fcSd+2Yh+5pzLHjVZu/hxpnLR1Z+emVt4mSvml5p+5NGWL8CLVbzrows5yWGvwob8xnbmuLYx+VTNVmDQ2/O03Ik+Hcsp/EkuvJTG2I2tuliG5/yornRxTQZ26nLaODuYywpblbO9xxtMD17NL+N0KEd412ZP7Wixf7Txdymn5idysgsc85k9NjaTceWJD93FGd+ihcZ/h7l/OeMNlNeePv4LxHK6icfAArpN/G7QJWljAkBCKilzeg7kjOitCPpaENbZ+u894RoTOOiWrHZs2xayArN+eCUYY/QzOegqkAQRXUosYPAU9Nrhwtu2d4lA8KYvveBFi108+kw4DzzBa0JE3+TtKamCM0lMXPBsTt4WRDpYKCUI4y2YeHXxRK5kqM4uEjGefJhv2Ap+urIRvNXHeF9n0bdxupEJbLxqFxP1Fwfeo9FYsbByxz96kkZw9dEre0r24lbyK4ltPOjLz9EJP7iNqzPBwEmnc+zEN17SEms2Z3j3xD+a2WNt23yxePnTt/MfMY4WPj0uOZNtdayNphJfsPrQYvtTr3v2gk+WPiPtF5bsC+exgm96wecvi2ux0di997VZcPpsssWhHNV8Kz6WX3rrq92fmolvj4V9aW977Vn/0oyuMWVtqX3advWJzoV4KytjX/qsfjaIFlv5ms7nHCODr3Ieg1ne2YwvtPlF3kFbntOPHh7Gs60+sORiN3DN2Xy789kGC135LTrnHI/2+o+u8mNfzOzatHDsRJb0WLvQlyxqsjlQBctG5W/j6bo+zi/1VeOPL1uRMVqN71zsAky8kw1PMtkUksU7vH025pJhZQkW3BlfxVw6FVPwm6vBrJwbGzt+D0YeoNdZ0h+t/k2y6KJTe+GSc+Nr9Vu59G+JZrqBfUz2x/hHL7zeo7X1wiaLvLe+41sF3sL3ri6/qtuXWE9PW68s0cx2z4g/bUQ/ubyvXMufjKeNvYtFRZwmS+sZ+cCI+71MaY+SXPjwJXxjrTHp3frVPNBv7lbgZZfWFJcTClgPGOsr2/Xlkf7k18a7vZ+9jf2BdRJOuoG3N9u11LgDK959bYOOg5k9VF9KOCyyU7hgXLzAYd9ynB9WyQq3A549nJzhMUY+sjjcouMyyH7TgdW+r68f7N/syx+eXkiRwT5BbukLEns9eYU//B9TO3iRy0U+unC80xFPh9V+uCI7+5AJP7ZwAcRO4BRtOf0d73jHdajjvw7n3rtYoG+HZgdBl86NObzj4yAtL9Ofje1t7ekc0MlIXjamj3yLly+M6KtfTUaF3zrn8gAAIABJREFU78jNpsbEQL6+AP7Di7gVE9bK/iKCz9jQxQi/shWdW793ju8cpCp6jbcmdzjXzweKOHAusu6zZfOMH/jTuL7iujla/IlRMoMpX+QzfebCXhB1jsEbLbrxN1iFbHC6+KF/l1FiJH+LQftQ8QDXmLnS5Yg5L4bMATL27wuBheeyig3U7E4fcaw2b8mAlz50zS3xZ5ytzCN04YLFD28w5in52NAeBnw0vbMXeTcXk49tsjUfd+bUx1Zso+Cxsa2d/fKPOUUG7/IKXl2ayWP9ORiafGxPoqaLgh4a8sDDLeeg42l9SBaw8MDSGx8xyi/RlqPslcxRsSGO8fHeWpAtLuZHSX98yM6u/NrHFSf8m/H9dV0M5dQSpmAukTAiw3tMBMEkCQlSyVhQeYKJFjyOKhA3gYCNfnjLRxtepaD2LmDIKVCCQ8u7IIp//MJVdwkk4JQWBzW6aMBXg28iGEc7+3jX9qAZbrzIA94kwatNQnjsi7aHPRV08AavH+0+CxXseKD74x//+Mn73ve+a4GzSejCCD8PuOQpSaIbb76yETExTWqTtEnnC5NPf/rTz5KPfrTS3XsPWSQxv+zSe/tPuPxtE+FXrN7jm//Cc+nx4Q9/+DUL1wkHPlvRiz7sIuGLTckBDDuwAb7ro+ip6fm9733v0r+42lg+43HfL4SnJT7snc1LeGS0qOsn970S3dNG+04nibp/RwEdeGiivTTqB5Pu0Vp7NBY8m2jvg37j2ouPfjGdfr3f03VxXVi0mSnO883Kqm/f2eHURYx1OdIcbD6En67VDjTFuY1GcZmM1Ysfrr5sG9xpa+8uqxrPhv3bFvfgL6JPS18+kZGdbEbU6dWfG2ULMu2j3+VHm+e19doQjNwkt4PpayD89Otj02jv/IVrcbc2xId8a/vmt8McHnLfflETbHBnPhPz4Lt0ItPKk33J3SbaJom8C5e9u/DKnvhu/MFLx7UTmuRQZxPjXYqhl+7FSTXeXWzZEKVj9JOtOZwMa+viIphw8n91c3fn38pRPAcX3aWXXNl28cM7dVy5aocfz6V3xqqxLuuKu/XLiZvcyYGntkte65NS3+pY/9nX+73L98dg1wZrF/2rs3a5szobra3is7rq6z0621dbnQxL58RPtuRbP2izOT/YiySr3G49e8vTP+Fuzhg3Bqd2Y/IXP6AJhl/It1/KFqNwXHokK7j2mPYO+RHc6qO//V0w5hm6+OrTJreanPYKbODdPNRGV8xFg+709ZDDIRAN8PDVDtQdQOUDuc1exP4MjKf9iMMiePR8ydy/5WMf54BrHA3w9mb2gPYx+Fgn2ZHsDo/yof0EXDwc7Pxv5h9uB7UO42jJl8bYhy3QY2NyykH+AeOXXnrpGqdjF134y6UOpfRjm/xhj+UAhkZfI3QRw7/o25+kKzg5k17oyJ3gwFgPxAUYspLJuPhwoFa6ZHLY4x++AktWuuPNN4rDfD8auvDTjycbqNmLHfraqn093f5Ti7VSbNCHj7okYwN29biUKIbLE/ShFx33ixP6K+UA9tfXnG0thisu+UEsgnchwQ9oGieXeBePxvWJfTTtB8QH2uKFjOWT5iae4hsfFyrisC+H+Mxc6dKnuUy+voDSRzfvbAOXPPr7ekc8iAux1vmADmTCQ3yTgUzii77G1C6n7G3IZ17hY0yc0kfbZRI443DYCJ3izrs5oYZHfnOj+U5W84we5SK2RMucwUPJR/nP/Mwm5kzFvM++xsWLyyeFn/r3gPhKPstv7Ic/+7E7XA+54wnHU85mA2PlVTS6XMJLPqIzHL5lS/KIC37Bv7WDvdBqHSkO06t47T1ZxBSacgjenQXxeFFu9yavxwgc6llnR4eRSzZt/AV8GzZO93mqgOaMDZrobV+OVSvq2sHd69dnXOA0XtCGpw4OzdUrGAFYItLuXQBKGm7HHbQkYPTRMVEEHpgeNDaJShYubT760Y8+S7bxMUnh9+CpbeL94he/uP7cx2SywJm8FjkP26djOtscfPazn710A58MydPEahLzSbS7SNLnCbeJSDdttlIvreQ468vgt3LCh5tcxl0kSUgLvzwWJ1+e/IoX42zwox/96Ml73vOey2+ST7+CnXTZTazekzNYi5KCB/rV+pJn2xtvF+KtkGl93WbLBmhxg1+6ja/uZDj9IEaaa3CySbbyrp38p5z6Fy9ZTn29+3XfxQkaPeWKU4fmW7U43zm4cmbfvu459Ujn9Deur3p11O5rmD1st8AuLbDmtz7jHjjspVTHKztWr+3quxBvZcfIYz6hr3ZZUL4RH9rk2LgIHy02Tz6LMhrpTLYzJuDUv2M2DjYMNmjyUAstGPQ8Fm/5nIxdpJDNZjP56RpuNurPxcDZuO5FyeLh1ebV5gcsWfZJdvJ2YYSew513T18WBbv2Fk9wbQysU2hnr/WdPnB8zib0Jus9my597S7X+hKojV988UYLv/VBdLo4okswC/fPKHrtJQbYnmg1F0+cdC4eiqH4GF8YbTD5Lh/HB51wtHc+L2/0g1VvXgt/aYabLMGYNzbCfEOm1qT1YXxWrr7043ew5hd8F8V7gR7flT1Zk++Uc3VJt4UtH+o79U7u/Oeyqf8j4Qm/9ssu4Z1yrw1qr43k1f1iZm0F/oyp7KRfPCjV2vYq9ht7WOBzujd31MnNb8a8o+MQlM3ME+ui/NHluDG+K67V8O1dFG3zBzw+wepXwKcj3mJI0ddei+xynT0IPRfPoW1ptW7bBxaDwZv3i08uMOi39mv7QQodD9upxeXDbR9if+dQBKavGBxsPHCLKTZyKEa/f6OjrxMcVPnZ4ZKechr+2g6/fOpgJN9YQ+yNyO4Aq8A3z/jn97///fWPXLMtX9lfkHP/tMw72cxRdmwdi5YxtNmsQ58fMR28HdDRBtOXomTCp72bgzo9rRXWCbYin7yvHW22Acc2cDz07auibO0dffrTER0y8529dXlG7PANWONk9/6fUsjOVg7Q2mQzD8wNPhVL7CuO6Mg29rLNy+YFfdhuH7ZnY3EXPPzacPBko77U4kM+Igd+eGvXB6evxs0rPOwtuqwTr+Hgwx/ox4evjZvjiksRsdqlD33JII+4lOBnfejg1Rc/8j+fulwBI04U+osJfNDtAggsG5pHxroAYkuxRwfw2vrQdDGnjYZ5A9842fFDy9NFU/srY+xAZ3LTo3ksn7ArWDyzDz+i096GvcNhx3LgpeSttBbxDRpspI+s/E12dV+KsancWO7JH3DERPSiDS5Z9JU/yUk2c0ztXE0+NpXD0PEkF3jzvbzG98bTnezl2+WNH5rmLFrig27yAl3z9z+t8eb+7xu6GCogOMUkERg2wQJUuyDmJM6sFpwmPSflLO3eS0QFFlwlZ2/i0t97ASGo6otv+E0a78tHG2wBaVxbgCej9spcQjQe3SYF3QvA6GwCzTYFPXzjHmPBpnO2kHwsRJK+4C5BnzZe/emiRL/ketoRjD8n+8pXvnLBmzjBRt97jySe/Orsv3X2ry949J8HZ8yEzy+r39omWX76059eXw3FT11BQ8Gb/Xz2eOoWnWjb+DzcNjZLRxsN9PyaIJlUGqvOX9X1P0O4NdBpY8iXEq33PfzDT4ZopUu2UacjO4lR9Cx64sPc1A8memj2ZFvvDkw2Ecs3/vhuG0xP+sWHDGwaz+CSGa3GznmfPo/xhYvPyoMX+I2T1bG2RdRmt4X0jG/4HotFh18L9UlrbZYsWyf7Kecl9K10EWShFHO7qIXbIpZeaLn4yDfeyxcW7eWvve/pafPfRX36uaiwIOuXv9GEu7bJHnvhEo9gm0PkbR7pcxgJDw+HBPzo7pHvwqWfDTibk087OYNB20UYml26gAkXzWDV2c+fQ+BN375UApv84BaeXH2Svblz8082zq/q6Nt0rB2MrS9rLz38F645v/qsb4OtRjO61f25VvNq80jwbNev9PnDmLLxxn6t8cVucBfwrez7znvt8tTCrTzBN46WnCQOihX69+d3xT37nL5It+Qx5+iGTvMNn2TCGw38fFnxvJLMaGef2vBWp+2P5tl30iGHPvXyuifTadP0XV732vrAxqd3dTG2F0DJtHNE7CrqDv4dpvPJxitZ66+mn3bz/CJ4K2DR2nUCH/TUxfOOw0Mvm4DrXR5qvAujHQfniUd2FC9LT9tY+dm63bzdfR0Y7/Yx7W/tO7qQeLjtL1xK6EPDQdqPcdr2eHKlPIiGgoa5p89Yv3b7egEdfNT21g6c5Lb+08n8cQEDRjEWrDlNlv5czAGJHC6h5H005Et07HlcIDaGD3s4gDow+tGN3uxhjtpr4VsfGvYmHvzNteawfG597nBsHF8XUA7Y+NDbvrM85J18H/zgBy+Z6EV+tiEPPl0W0VG/cbTEkUsnNOTq9izksD7hqTi4o6GfTPYO9pHg6QOug6a9VpeTF/L/g0JHez/yugjyLrbYRvx04UdOdhBfLpLULkl2PcpvajRdSPC1+OBXpRzlYM8+SjnCJV/zqgsGfMSNCxK4ZGh+wWNbMPDYHo2KuSO+2XvzEd3AufAzd9BxcYBH5yWxC4+/5BW04ODNl+yljS8dwYJBw5m1CyBziJ/xYlvyWE/Q7HKInfv3jMQrPcxB/Oju8gNtPNm0/ZF41A+eDsbw5R97EXMFT/y840PPciw/odE+Tqzmi+r1mTmqn95quruMMScU/eVBcUEHfXIUX9MXHt3V+tkKHfsg89/eAi4bq9lV/BQX8UXXuIKeeDKWHNYHtmN781OM0BM9OYDe7JO80W+eescDbTh8gUYxsOe5i+mbuLyuiyGGzlkMLUA46CwcsQFrnNOaiC2W6rOvd7XgKAnEQ58g4liPdgnthIerkCf+tUtq+gugAkuNZhsdsGhvvwXKRCrBpXMyJCc64e8YWsmkf2VvrLpA75Y2etWrf/ZQ+wT5Ix/5yMUnO1bnk+wheSvsHt9kgqOow2+yReeU+eSzFw8nrehuTY61afZPl2Al+/x3Cfm0wK3ASb/w8k91/TYZ6JVExLiFwAZBYupCLp/gcfLqffuzc3jqdESDjNEK1vjSX3rNE4u8RzK1gJOxC6H1VXwvglOyp+Rq4/cYP/j9yQTeyZbf4bVING+a5+Rg5+S3Obd4pEN1+QAcnFPOZMtW8c53+w52L0MslPJFspy4NsI2GRbA5lD2W9iTd2NbJ2f40UbfQh9sMZ2u1cZPm6GZntpg2Ckaja+swXVh0QGMHSzaNgFkKo+ecwEeGHibZ5MNfTjw0QEXbXY0b8KF0yVUNfptxm2Ilkc5LV59RdNXUcmcTYIjz/b1ZUl8WjeCYy+81G2s6OwJ9jHacBRrgU2bBx7a+wSXX/FLxpV3fXfqt3BddJ1y3fP9rgfpSea+arL51I9fJXnZros063wy9Wcd4DYnlbfQKbfpu5fHyN6Y8WTX5zFn8C5GyShm2xhmy7Xt0jD3xZ7n3gXvwpLlXlndGk++dI2/8cZq36P5WF/6GF+aj8Gfsp1y/CvyrO3As8PJu76N12QVk+TIlwtT/IfPx+e80Mev6o2RfFP/ztXlt+NwGuNzB5d4y/0OLnuRYwz++nPnB3oOYMnlHb611qHHXEBDAWM+mRf68Be/aIONt71EX+aUW7wr8tPq2RcU1lL5xSHVwdSe0+HGWmk99t6h2XxxQJJ7XYKYA97xx8+BFTxafVWkbe8gr8L3bxy5HLXX7Jd09Onn/3pGR3ZiG18J0dXBim5o0ps97D2953PykvvhdkljrbAHgFNtz2KP6DLIn7mhyYcut+255AF/xmZ/5kDHLnLBK6+8ctEls3c2949xg1PYTO6y1vCdSw287JXQIAMZO6jTx16xSzu6yuvG2a9LBOMO5vDpxo/60KZ/lxGXEP8XCpvyGVnJoN1FUOsSO4GjL73yjVgrHnfdaR52IOcDNmBjOroIQBsttmw+irf2gFR3geLdGs8v+LrUFHtglXIIn/fDJroKvL4EgkcvuqCjqIsJew789Jl7xQT+/IOPSxdtcd0/7i5G+NE4XHFj3FxqftCPTVweacPBl3zkERv068ctscS2eOtDk83YsYsf9ja/XXzgpZ9Nu4Q0f9kDHL7mKNjyHf3ZjCzkLefpN0fRVPIrHcnFTvxI1y5Y0AcHphzP92+5fYxQnhQ/zQF08zlfsXvzx3zAI5uxAVp4gGVTuSAa6vI3+7AVWfiaz9FBg7z2aeTjQ3OVzqu3seRSdw7B38UU2cxPtpZb0HlR/mkBM+p/vB5jNHEECAdznnYHg9rVYEpCJpCJKJmGq15aBbDaU3KyaBR4yyN4dArmDZLVUeAp4JTez3Y4BVeTQt3h1ZjJK9AtqpWSZsFYwuxdLblKiv2fK+DqN0Elyq21PehIOiZp8ta/MNump0NEk6oNSnV80OYX/4evdNVnnFzayVefpNM/jEyeYNT7hC+x05cseEQ/2PSOj4QqcYBfnU778L/NCprBmejRF2uSqdjxq1a8l06bALBtVto8SILpsPagf1/YFEfVxcImb310qc4OW/erQ/7NF+EkB3k9EpyHXS082TI7tECbOxJitkz3/K+2gIqtlUe7B2+PhUrRDjZ5bSSbgzsX781HSVnSN1aO2FwRTnTkDvOmzUw5pfqc+34RsUiVR5ZHOawFpa94FgZe41ufejVWTlEHY5NsAbI4sy95yL96ek+esw2u/KrecTyMn7r07lDQhr2NYbYgV3Tl0vrjkY3lm+UfnHH8+zeA0s+cEJf9ytoliU25eWUugdW/uN7z1fKmXzTDDS6/gydnNFob+N8cJgv92R/cqa8DDzjygYu+2Lxno3wgZtCkjzmYf6Ofb7qYAocPOZNx7bn0HoNj8/Rijw5MaKJVvIgBevEFvuDIByZ76qfr2tH8Kjb96g7G0/qc39Fny2Cri335oPmaHeBsGwwaZFifkps9+QLftXeybpzu/NNWbBzpW/w9NmfBt17D826j77Fe2LyKA3nNRtWvteW88mE5d/cAYPDcdeGxNWLXjM232l0iL0xrQn3htP72b1gkZ3V6eg+29YRextMJjDb+i9c439afHCvX0rLun7Yxvmsd2xc7+Uq9OoB38Dnh+CwcsSJmmovipJjAU/zwqbxojnn4Wdx3wEALHTGJjnmGZnzIau1yOKugK9bQs7bKJXij1YU32GwknuCb8+LL/HQ4xLdchAaabJA+4FyUkMf8BKPQyVwXt9rlO3jW9g6W5LFfxYdfwNo3kNVFgn40yEJuOrOV2uGs3G2MDgtHFrHHR2zH5/DsKcjjwRtfbbawB+5LFfLT17txly7kgkMPdrGmsa89CHhw6PORB008ycVHDuFksQYERwfxjw7+fMcG6OEjDtjcOFnpan6g7/ANvwM435HFGP3Zz3v+6tKdXug1V54Fzr+xIZ7oSf4u8fWRge3IRI98kS/Zd9fEcmw1nbTNPzZq78i28rQcLo7U+HsUOM0BPkGHjdiWTdm2/bSv8LWNgRNzaJNbHBXn4MQHHVtH4Sh44AmHnHQXo+QmUxc/YJrj+Li8YAs6kKGLVDD8W/zIEejiDc7ZD05nQPzY0jt61guw5MGbfdhfzNGx9Y0d+gHN3APb5Um06EFuepAHvn1AeU/8GRf3ZBbfSvDkaa2vbr+Qn9FoH6EGx6ZiAz16mFPso38f8oBbHH35PR7oehT80eUD+vEz+dkMLXDmS7Yu76MrltEWi+QWK8UzemhX460dLJrsyx98gWfxdQn2Ji+v64shNmux3oOhPk8bBzUH5Mxsrc8k4JSFDd/40tK2eAqUgr+AFiCeJkf88Nq28RKyYEq20//BrdwFdnrEq0nR++I2thMjOufY2qcxgdxECq93yUaJb/3ZQl2f2iZ7dY9e8N7BoafvBz/4wZOPf/zjlw9Wtmx16l0sBJ/ttw4XveD1La30yZ+9k90DVt+JZwyOJPlwu0QCw78SjiQu2YgdCRzct771rSdf+MIXntkPveiuLi5S/BtQEkZyrq20JZXkS//es3mxtHpr3yvRaDwaYOkkQXpamOnnsVE8aeY79cp4yoEeW/WP8AWL5+qSHict9NhCv9qvcXxRWb8a3364Lqss3NrlE+21QTqsLmubZFudt9081Lfte/Dr62RP7pX/1IVeZ19fN7Tpal5G9x5/fZXT/s8Gbo10rg/vcNV9ISRXnvkS7M7/2uWdbJCc9UfHpZJFuI1m/XKNxTx4dC26YG2I2KGxvkQKNzmTDQ8bJxuNNizawamDxQMMefT3blMVDr7ZaG0F1kasTebaYm10+srFA/ptoOgGfuHwEQMLR77oBrtw5M1O6wewXTChR9f1184H8489bOCihScYPmLXyuqoLzr8c8Ilz9qvGC2vbPyit3M5+nD0s2GbY3oXP9rJuzjJvDzW3sGig0Zj6xcw5FfOcfZUwIMpJ8m3wZ5zsnd46bUw9cfvsbHsHlw1vjtWe+vssfWOJ1c+8r42XJm2nwwV/WySLZNPX/j6io0TPpvL8dFYP2Tv9E1G/fHlU3mhPrDWr9bA4JInfdV4wk/GbAK/IhdsHIPh+/13hsBG39wigzntT9Xw74cyMsXLQRZd62z7EnRdSFl3W8fJ4sLBu3GPg2J/LoIPnv6dzofbXqevg+1RfJnj0sg+p4uu9gn6/XDGBvDL3dZdexgHXpc+f/zjH68vbVygoKG0V7fvdLhyoDeGNjj8FPzlJ/6Vf/qTErbybp1nq+TQZhO5F60u1LTJ6XCLp8O2tcM7uf3I6XBafDhH+GpKTmQ7dOS+t73tbRdcc5mcLpXw65ISXXKb92KD/A6Y8od+dNiYTvmErnzCXmTrUMr3bKvgpV9NBzkOPlpdeBjT1wH+QnwDBX82wIuO9oT8y7f8RVdxQFe23weesvNYO1/zkx8TWrvQ0Gbf/MhG7I+fQzc/0pc8dDZufils248u+MDroA8OPNs0z12cgBNT/M3OLkX7WglNMirsqU0+fvJODnPQpRL61kVykl188JE5RA626JICvuIHanPQ/BQ7XVDDYyMyiQVwxYQ+Y+yGbpdZdPHeuoxHcGr80dduLwWntZy87AiuOYVHl3HmK90VsBXzvD7jm4fNGT4xT9lcbCvsU870Xq4Fky/1i4elp49+6LI539lP+PFa4R84xRs56WpO0MM73sbRNe/FMB+JG/IZLx6Lk2hXw8fLA1csoK9mYzKIA3K+KP/bAq/7Yoizelrstk87pzZhCwLvnCqAJSql5KKdMyVpyVVi8ZxBAEdBT2DAq097x6J7dT4twSRXgRidxvX3FID33pceFsEK8han+nYMbDzvjYeL506AcKJ/8glP8FsQbW6ikZ32HW04knr+SzZ1Oj8137OqiafOBg0un9ouXPYro+iuLLXJtHS1xZtFx0SXhCUNC8nvfve766bZwi6uJKQWpdNGxWRyru/izQ76F/eeT6Nh7NTf+/lk2+3Pznjf6yfvzjdtdlAr6uyUnaO5voDDbuzDZuagTYTEyG7pQIboaaPP5n3ttfxWDjhooNfiA1+fOPRU4rG6pePaKJvAW/usfhtDZzv/xT//Ri944xazFuzgyb421M5O4W6ftkWwDTg7LG/ynDIuHW1FvXFaXFyDR1l8ByeLudxZznxeDJ9jvaPZvz2Dnqd/x6fL1qXPJnDztY2D2DIHu7BZXnCzTbVxmxPwNkJdutiArh3jxc5gLfZg9eOrLz8ubePJ51JC7vDkH3UygltfMDnbppONNngwwa5fyUGufo0H+zw4sCvr+pRNosUP2f2k50CTfG0q04H8pz4rb3r4cwx+vhdv+C2dzVXN50IzXqdt9XdhSlY02Ry+saW5skfXuNLcOP10T6ezb9/RImP8o9t7sPF3AeDyOzn0J2eyy1/pEp46nK3DiUZ4i38PL7orx2PtZIyXmowOXw732vL4KXMywc/3cLd/9VsdwPcU+9EB15i2p7Uj+tXNMTGv5CsyO6iki/2idvGgTae+6IXXmqZennA6AOlvHbLmKeZf//Yf3HKLw1a62HtYW8178YFmPONFni4YwMqNeDXv9ZUrtcOTy80RMjpgWl/NnV2rvIPBEw1rvIOzgxQ6f/7zn6/9Ev7eyV7uZEf7nYfbZZMcJ/ei49AKnj4OZuAdSPU7FDvIWQs6gMPvMspBurMB37i8cZClKxvmg3yGH/3sMfDcr0XoQOcui/oKwx5Gvuqg78CIF1n9KZp/uD068rw/UWMz9qQnmnxLJzQ61KNPLrJaJ+ReMPiRsYMzmX31yg7ybV/m4OEdDpuxA9mLX4d+/WLEWBch9N6LygvhXyx8TR46iAWXCfTES5u9xVrzrrVaLR48dO6xvjk0kw8uusHlv9ZKIsJjd2OKyxj8xSG+ZArfBUqx3SWRNZ6s5oTCX3yHf3PahQ0+bERfMSjexBoefMjO2uSAR0a0FX18tF/+uMBR+Bo9NiSHducINLy70EHfvpkO3vWbA2iSid/RpK8+diCHmHIZ0b4GTbTY2dzrQgdu+6v+gep+VINvDB/FHGYn8majbGzchVF5Al4+NqatsAnd2ZqPxXL/wHQ0jctHzlRiurNX9NAobi6iU+hDV7FgLrEvWH4PpzzZOtSfiaFPV/zLJfkUi+KvmF55ilG1eJGj8BQvzWM+LBZOud/M76/7YoixW5g5NadV52Dv2gJAEagCpMVPXwGaUy2sXQa18JW8oqMOLwfu2DqVrMGCKYjjp9anFKhbG+8BU8DZULUIGrdhtAmJjr4N2Np02f4uSpK/8Xg2ERbP4rcXPTt2Thbvfgkib7aIZhM/mSUvEygds3P2A7dyaVuAbZqya3pchjhKdIJtODka7x0ti6XHIi5pScbiQ8LzjlYxJpHTE6zJf9oFXfolY7rEF7yyiXZtFb1sLPZ9YfXJT37yVPU17xt7xReg+rXXhsm49dl3D+fkwzYeNmQvSb2/sWWnjRd28H/L8+eEbc7U/btWxUVxn2+Ke5s/9kK3jc8mdfYs4sdZAAAgAElEQVRqEcDLBaHDbJuPZC1/wK2k+2Nxhl7+03ZpYNHsIB1vMPkdbfzbLOIH7oQpPqrh0bOiv3/LCC2bD7V+cMVz8RP/6C0tNOsHf8/n9YHTVqdHn67T5V/hH+/kxNPBnd3aVNTHp/rZCP10zF76bIxsLLqYArN5HB+yohU9NPt6RA5CA80eOGu71gibFP34glkf68+X4fYVFfoefFe2cPJXfmLjfpWDF05y5S91stH/lCc5ijswnmRP3+iBI3O09K9eyUk+NgUXbrJF0wGKbMrGz8aIsdUZ3AnbOPrF3kX0aWm8r5vIxF58xMdszoZsE+1wd12ITjWY+FWffgLroIc+fjsHTnon3ein16kP+Ep2Sf7kLj+qt9yzU/yWxknfe7ROnq9icOcFfPJsu7X04XaINy7XKvS7Z9clvTa7w/JZ7LDhvQf94rLx7OqdPPKo0p6CD8VwdsoOiyemFHCK9aQ1qjhD2/qUntE3Lj7bQ2Sz7BI/cHDJKW9FBzx+ClgyyBXJZ5wsraP1g3WpYG2tmMP6rM8OM2RQ/+lPf7r60LE3cQCXYx060ZGv8XSI6ld9e6F+fSevdd8B2gHQ3kifQ20XWunh0sMh0l4BjHkcHYcoMljvyeCgjidYearLKDLRBT90xRq5HMyyE3i2pGM2V8NzAG19cRh2aPZvGdlX4MEGeJKtfw+JDx30FHy6lCGfSyA6uyBw+HUQR0cMoCV+5Cm5w5jLN2MO7myhyJ18YIy/7UH74gisfnqz7+5Nyc739Od7eGyXbVzakFXO6mKFntrF/CXAcwpcfsRL/LGFfTKd2K+1Vi2O2b3Df2vSzlfy2keQtTNcfmIPh32ywVHyH/3w9g5GfJAre4hXMcRGwbI3ePYw1tc4Yq45hkdyaIsN+/hkYlvy8pc2n6JlvPxBb7Y2T/AC470LD7j06UIKb/KKqS5iyKaNNhrihlzoRI99yE4+MtFPH1u6FOJrdNhFX18niZ9wu0BqHoARa2LXXNeml1ohq/nUj1DsydfigU7Gg/FuPoLRp9Cj3KUNptgDQ0dx0uWmWp7I7xeRWwlHHT01GuxPXj4xd+krVsRCXwCBY39xCw6fzf0bq/htzMafTHiqWwfo9N73vvfypTHzyzj6ZHlRXm2B130xhEyTchOHdo8J2pggkLw6qGn7FcKCp3CSBVFAgMmp0cKrAE+Fgi98QQWmgG8yVBe04YNT6i+gwNfGt+CSLCURk4puFsgmVjyi12SovwBeWSQWNOm9X9CgsXirBzomh69j9qIHziYAOL1LNnsZApZO6SBBSSLsbpLA219F7+nIPuRnD4nKRkEfeyX/1XikNHHXTguKTvHj/zjmlx6LsuRrgc/30cHbY5MF97R7CaWafuynwMPLw1aSlYQh+Rcb2R1+C2s+zc7Jn2z7XjxtXCVzffGCd4/G2mdhwDYXo1lf80esemyMP/GJT1y6Jrc6esW6TYmEXVzuPIC7F0LL0+fLeEj0fs2Ahy+7WcjQP+MJfvTVZxtu/nyeDYyBs7GUX/YLjPUVuP5RWr62aSFbMDvfznY8ksO4zaSFx4Lfoqr/MXrRyA7Zv/fGXRgbS39ttlL4d3m06O7hpfFqeGuH1U2/TbaNSl+m4Lvx3ntzqHe4/WlZF2LyCVt0IZD/wbocANfhvS93+EEfPHosn3SwOW/j0IYPj1O29MyW8NDulzf0l0f04wlfHxh4NtFsE15+Ca5LSLAeMgV78kqmy5G3sn48dc5H8fG+dtffFz7RK48s3fAbO/NLdNWKceuzg2MyJYv32sFG17s44v9snS/6ki05H6ORDFsvbPhq86Gx+O5FFN7NieD0KSef5lZjvS/vbZ95O5tuPlw+a6Ntg7nnlxPmEnrK8o93OkWz/vK69dqm2Hotz1bARS9c9WM6Glte0ck+6vy+8aOv+DVHvJtbin4ymT/WmGisbeKz+qKjLFyy8T360bK+t+8Rjw706Q5Gzu3CiGz5kmxLp3XNOmiepGMHLLitZWCSh6zaeIIB79AHllwOndZLBzo1f+lXy499fYGGfZB3vu3Cx0FWX4cdPwi6IACDh0uUeETfuINoNgVnT2dfSD77JLI6pDrsm2f9mYy1oouZh9u+uEM/2mwHli7RZl9wcneXWuWT3VfYQ9CJLHT1f4m1ZsBB10HcIdl+w16lgzRaXS74kzN7GDFg/yjuPWIADv+TD11waMgd7NM6Rg/+c36x7thHy232Ny6Z+NN4XxU5+JOPHODkTzDsiC8/KNriCZw9LRp4w+E/aw0ZtD3ke14hMzrs6lKMbfiILPKweNh1lU3tpxVt4+3T2AtslwDG0VHwEIvmaHOjC5HmLPjmOL+Tgx3EAb0UtTF6wQPfXyrQhX/JIe6M9yVQc1VtXE0WfrHfFDP2YfDhNafYEC044PHHG9/+zaIuKNCiYzbkaz4Ui+zSJRCd+jqLX/WbM/TQT44uhcQP/fkCHn1cUujTNs6OYsJ8RMe5UJyICzUY+OSlizmq8Dl96E4fxbiHLzx9WYQuW7gcK1eB16Y/Ggocxd4WzYfbnM3nbKqIDzEjDxm3x2Ij+aL5TgYFPXLqR4/O2miB6WMENNEq3oyB7724UttzkTce+KCpgNc2r8yj8msxAcf8FffF+YX4olwWeMMXQwyuMHiHUH0eC57A0W9Ra8NW8HKeSSBxhIsO3JOWd3glqYIBDX3eC4ra1U0Ochawl9C3UgA1luwFUHK4cKividHk2boAXlniA489TGyTUPJJ9pJs+sCJLj1Wh97DwWvtQWZ8JJMSPPw2GS2Q5OAXEzs5oi0p7Z8NoWmSodEmJPnhslu2XBunwz+t/U+djK/sYPQlN7qSpERk4uonS7GWbdY+YoPsP/vZz568/PLLV3uTyKmfpCr20PdYjCz8kgTcHhsHlyO9F4Pxi086pLt3OpK5vuyzfcXJ6u+gtzYsNteWi6cdzfxQHBe/eNtU8f3GlnawYkaM8rNNQ/KjUeyq+QfsfiUGNro2a+wWXhdCcFvsi9f428zYmLUAJ5MaTrTXzlfnreizgMkvHgvy+l87/9sMiitxb26Ui/JncMvvbMcXLfLiSUbPY3SKH/Q9FrUuo9DLn8mKjsWzzY/xbAx+49FYdKtPfqvXjkXLxraN6c6b9Vd4W7uEM2/KNXDhdGkTrb5mAcs/7BasdvbbOlv0xQy/efgN/bX52n1tYbPC1zZO+IQXfDzUS0M8kbXLKhv2dFobi3N2K56WfnBrz7NvY2bhyOPPINr8nTGSD5K5+Ck21fl8dVw64Zxw+YrO+WZpaEd/6ZkPa2u6rS+STd/ZDu7sj++Ob+4Dzwd81a+o2WbrlWNpJUt18+y0p/H0Xp215Si5rR8lNu8bX32Xrn7vJ098gpNDV9+Fz1Yrz8I2Tp7W7/J2cMY2r8ABa3+y+sR39fn73/9+HczrW1vWPv2nX8w2/5M9m3kXN97hJhucZFhfZbu1ZTw6iFwC3sryAIOPA1Iy0ttclm9a88GRVUkedKyR0WwOJ3dyqsVGxXs5nz59GYWe/U1rK9p9SQTOeqvoe7gdmvCRA62XDjfg8VH7wfD973//FXPkdnFj3UfDw6/2Vg6H2a5LDzz0tc8zn80pctkj2Sc6yJLV5Qu69u4OdfiKBfnZARmefaJ4w5fc2nT27yPZXxQD9Me3+UVnuS97w3fYxBO+vRr9/vCHP1z62cfiSzcHcrmgL1r41DmErejNTvKF9eAd73jHtR+imzGXRXxPX3TYgWxk9a6wOxg05DpyqMlAb4d+ujuIK2QhG77a5OlArgbLF/lZrY9NXUiQid3J2d6MnhtXdCG/NZgcfcnBV3RHS1zTE31PJXuJKT5Qmg/0ty/wDoeufNBc4398u4Biq+yIVnMLTfs7stCB/xRyo5du8WVH+lmDwbIHG6DvYkPMGwfvaV6xs4uY4io/6qMLu7AdHDT4Ar1+AOZnPooeu9EHPhnZ0yWD8S6B9nKHTPqdK4pB9MUCO/M3H/CTvi5e9cExv8MlE93pJGc4lxjjB7GsH53dP5jHCphijS2KM34joz7tijbbsgs+5TF08GT7LjHh8zuYfIA++7ANXfoSzTjZ1fk8+dTsiE7zwzwvzsikTSZ6gSv/iAcxh69SPKKXLuV6PvYoaj71b4+5zO5Cubi/gF6UywJv6GKI8U0+jwBQM35JWaBwrJKj1LUFTI57Xh19PM6A9o4HmvFR62/Me0GmLRB67smSHhYyi12wF4OnJTlWnuDotYfsbv1NKAlKckq+8P2S4pen7LN00fOYpJJpt99g8UEzHiZoycUkB0Mutf916Lve9a5rYQdnopWMS7Ls5JG4ugAgb7Kv/EyRnH7h6aJg+5usq4+2Bf7h6WaB7BKKWvK1iGkryZXdV07t5K0f7XimW/Loz5aSDftYLBzAJQm4pz0ka37ZsWJJnYxq9CvZvf78unEqzvjPwyd4hFcsLc1sWN/CatMtPvm8OcZ/xd3iZQ+yLH70wk9O9T0+4NfHYNBko+rmeBv2/GQT7gCMBphkSpf8V53+6j3EWijQzj98GQ7ZwILZxTQf6lvYjaP02j609uAcTfTA7bPyJ0OXSdlAXTxvLK9NTzt43/gIT39xnBynLfRbkO992ROdteM5L/psWY5HJ/3Viw/O5t3TJhlMjw02fDj1Nae7dOrXW+P5l/xwgs3u9YHrz5bD05ec8LXbOLWZFIfklBP0pc/6Ew+yycXglu7aLNmSSW2zC4/OYjEej9k6HxcnaBQH2smlz0HeJja+wWWbYgmcAwzdsiNafCEeTp8aw/+MtUuQW8lvxQH4nqWfPCtzfffGkjd4/H3p4rCSLPJz9sxXa+9stLZaXz5V4Vnuim61HHRPRl+cOuxYp/YHFHjlrXDLl+tDPinnxmv1lKv7mgV/MPdkW37neDjBpOs9evJu+4h+hEl+8Om0NJ9H757N7HH60aOYRDe545P/7BHO/depY35dWdgRXTxW1/qK5+p0s7YlQ/NRTJ06J3N2dYFCTv1kRlfbPAsfXW3z07jHwdAei93NnZVXzPzjdqndetu62I+oYsdhC761vT0UmX77299eFwTFl4sYe0Lv9n0Obi6PyFMeMofIZt8nL1mf+vIEbfsj+yV7RnTkvP6hanqSS3EAZgPzUm7MVubLw22/5/ANn87o07MfSbQVtiEbuPYM+vygIDd3QASDL7h8SzcHZpc19pQOkfaU9KaDtjh0MdCBHT6Z6Wyv5/CIDvuiL1eW611u+JMUMNrswE/4y5t8wU593e6A7yGzuDSv2/+8+93vvi4S+MwFBJqKOLBekpnO4MnmnR3QYJcujPAlK509ZAYLBx32aE2w763dRQn/okcHtdiBC856UA4vPsuz5CW/C0C4ZGWr/MFX4hd+F2Xs0yUCvuaAi47mIv3FAR7WRjzEpVgRT10ugSFn8Qi/tZS96SEW8TBGV5c4XTImVx8vmB/4sCPadFHECVj9Xa6RAwzdydAlULYTO3jzBfpo24t0fqMTPHEDDl1w7MjX5OdTbWsdGO/ka32mk2KetT8SQ/obaz25AJ8WPuQrvM3HLv7EIH7G2KqcB62LJPqxJ3+B1e4LI/D4ko9tytPwzXU6oQOHLe2D+Ng8AQ83uxfXYoMfyIQG2ZMl/9Fx8zNYD1rmCV+xO5nFnTzdJbd5yocvymst8IYuhpDLCRwlITC6ZCSIvHsa46ye8Ex4zgFnApToWxDgCkLjagEEt1JQFCBNBrVA2okSzgYPPmRafni1KdugW/Nt4GtHw02kYCe3xyZLUEtIHoVsyk4+feTKbuEIbouZCSWwwwP3ox/96Mn73ve+i76Ea0IJdj5Ix+xc3eVTdnnMPnyCn19q4NIl3upNOrXBrb3qXz3BsC9bkJfs7IWXd3q2cCWbd5svGxA0V/Z8HM450cHjz1549jUJO9FNfwVdRZ2PJJQtyc8eXbbwkYsxm95sAGdtsTGyc8JBx9jiwfVeaUz9vEcMFjds2WaBbT34RPuch8ka3+YFOPLu3N12ePq0sxucvgLStgjsXDOXt5zypGcwG29+TbSZjF88k6X4KFbQsPFrUdlxcVOM5PfoLd3a+KJjo0iHNknJtzzxrd+hG6xn6cc/vOB3Xgb/KoPdeTlprCzRhUYHBwJ52iHAuzpZ4p1u2UWtr8N4G7H0Sje2tlkybtOzlyDhNF/x7XJKu4s+eDY6zW+82RD+6d/k6yLJhszGhwxdviR7fI3THz0wfTHVJcP6NvpsyFZgoh1cukc/e1f3BZL80wVKOAu7tuYrMHj0rCzZBhya7Jg8a7fiwOGMXbvQBIN+X/r0JVd+Lx7Qx7eCnsIWbJ1/V598lBzq5i3cdA5u6Tevy1erc19o8ZuNPj+cvNI3vPgtnfg3hpfxckj9W2vLS/KZfOqg2YVAeMblufJweXHpgLVWVNLzWcdTPvrLx6dNtn9zpXb0wl+d6gunddHaSCeX9ItP/mCjs+PP0yGd1x/BZ2s1+vm/mNRX/BVv+monQ/S2P9+unNrxFKdrk7Ufet677AEnn6Gpvz8byy5kFIP2HejKWXzvMeftI80P8Sp20ds9WHKbt9rGoxlPew30xJ2Dqn0Lf6EJr8M7PLkMvMMdf4KzPyFXh2R2+Mtf/nLBKPBcGMhrXRA5vKJrP/ue97znomvMvuLhdqC0f6I3nfqy1eGPnPa8xrUdgMGIM1+YeVe8u7AyD8jBnuTI58bZQo4xls58Ub8Dn3zmzPDSSy9dutLZv3nZpRQ96AaWXezT5Ky9LCITf5vPLjbAWhvg+aoIfXbHV78/a6ObP1VjD/tXtmnvyW4uQuQn+rUOshf64PTlD18wOPTzJR7w+B4vMeeAzId0MW5/zz7ijS7s4x8W//znP3/52TrYJUQ5+TL6rfSnN+igKaY8xaFxfnRRkU/UXZLVR1ZxTT7wZGQTuosva0l64osfXHrCgQ8GLp8bQwNfsmyuhg9PDMJTnBXtzfFEjw+6MEMPHH+yE1vwH7piswsePIo5tiWDGOFLPhdPLtXISBfwLhq7TBJD/Ix+sGQST+yARl8H6WMvMolNcOJeHOANTp8aD37DV9zTBX/95FXECPvxV3OGfF300DX/6Gc7ftb2lKfYkW/0KWxIP/DmEvrG5TK2iyY7sS35jfMBGtpkIHclGGNsj654kiPsJenKj84LYNGlk34+ST46nU+5G0xy0JMO7Mt2aPAPfuQ31uWQ/hfltRZ4wxdDnMD4HFrgqTlX8JR4vAfLUd6bmOeFELzoCQ54BX+BQhWBUELbgIluwaUGq4BDQ0CRoSeZvRtXwCgF3PXy9L2+xSd3i5NLDPjxDTd5vdcGB9eEMUFaUN0EWzw8JS30JQgT16LksdGA3yRZvtkIL8kxmORZu6RTNZraPWvj5K8vuyz98NiI3JKBxEPP9O3fOshnJa59x8ONe1/VLAy++bJfCLIVXl2WSbjiVEJIdgelfu3FI1+yP1qSu2RCdn1oSZT5GP1o+dJgLxOzA1nEVrU23fffNriI3Mra/147v66fs+3GDp3I5ln/xSc9o3fCeP/+97//5KMf/eir5gi5PW1s0Qk3mYoHMDZ19DWHd57wQz6Er706nfqB2djT1rc2Szf1CYv+GbvBLfzS2LaNrflmUeliEb2Nw97D836We/At9OEvnZU5fdFQel8ep56LL9blVDFtY4IveL6JXvBLp7bF3oYpO6CFRnTKz3tZYNHvgojPg9e20SCLHIcmeuaQjW7vS39l049GNPcrHz5qPUo2uMlHj/jS3dPXX+sf/b07NNADXrKRF0wyLq/1JXvY4IubZA4v+uAr4XaR1EVV9j59vJdhYJILvbVZfkmH9NNvs7a64JG9io3moPd4ptPqkS7pccrb++qbnOGWU7Zfm6x9zUTP/L9+A5cvtl9fNtFfe+XFV57W1/xKBvCtM3KgvOaia3OyPuuEXPxwOzw/r7THOGE2rzVWH15K9ilP9r7rXl/vLow2mOrgy+vJdNKN/vJd2fRnR/3rx7VvdlfDUePVO1xxSA7+0jYP0MiX8Ni5CyzvYNXLOx/WH28wyZvvdsw4eh3iteVLNuLbLnvkAfL1hZD17tRJLuoLIXTNFzTE8a7/9NWXPOkD1lpenNt34GEOpx9cXyg4iMltrbP+zMpepXiUiz/wgQ9c+Us+sjewx1G7AEGbHRxy8SCLvRKZXSI52KKtsAG41gJ99nYOsuzhoIu2wyxfmQvos6Ef09Ahl7zqT9DwTUe06MQ+dMSXLHC9o6VtT0wGMtqjdVkD3l5Pnwstc9FTH/i+FuqyyBdEvqQnE7ouKOz1yOdizOXQ2syFABp8KzbY0CWAwzbZ8EavCxb00MUbD37px1txY3/BPmSET8cO0ODkOvI4U9AFLzHCvmyCtvob3/jGVbP95z73uYtmeY9P2dXDB/wil7qoaD6wPVr5gt3hk8uFFtn4Cg16dPmozeflFfDkJLdC/2KBfOBcgpg/3hUyO7CTSyyRC5/mpnWwyzn2Zw9ykh2u+artUdDBhwz8oI2uWC+WwThL4UUO8pDBWQMPeuBJfnIkczy7FGIbvkdHTO0XQ+yClvlnTLzU19dA+LAP+cB5py++eHjXJqN3sYimfU9fFhmTF7r4k0/4GQ+FvehH5y57spXxf9x+KJNDzGm0jNFZLIuBvkoyJ7yzIZuLRXBiA301Gujxj7kuhyloFo/e5RZ9zW12VPDk03JiealYqQabrPm+81AXX8bFEd34VSywAT/H72L6orzKAm/4Ygg1hmdsj8Ww9taSh0nKcYKjJKXWB1aAmASCrUNt4+H7R+QEYKUgETwOP2gb3yShra9E4335tyCHc+J6V+oH79nDbvLrowf6Syc543Xiw5GU6Wkiefd0uWGBYBuTziSPt4UlXVZO8jZpjJsoJpySDNfLnQKPPX/zm99clwDo6uu5g/KMF9psQQ+LA536fNQ7ubPf2j3a+GYrfJKFX8mR3dBnG8lWgpJIW2x9SWVTINFaYB026R5dPDySV19jlYwkVzgtTORtUc4f6Z9syX7GzxkfzYfzy6K1x2mb3k/d2/D4pcQGgJ3V/SPgwSerd3FABr+o9e9HFDv5jD3ZEn02RtOj3WacXtE/5c3GavaCl0+bm/C1127k8IuBZG18YxqPtbl2ttY23nwLdn0Dxi/7FppiORpqsPUnv/4uhNhD/rDQFTsn/egZj2Z1NJNtZcg2a7fssrK0oMI96S6/e/Rs5Dv8y8HpkA/Eo7mwJf1sPsyJLmzYAd5ph+BsfNgpey08PHzIIpeRJVpklNvQXfrJyBddklhj4HZZhWcXBatbbXWwNlXJh3a80jccttAm734VQ8bF0U5HNJJXX3/KRjbv6bv4p6/zlQ1reC4g2uDlf7FMF3ZMDzzQK77U/cnb6r1xlZ7JsX6NFhi0yMYWXRDitzZe2y1d/cpjfI3HP7iFpascX2yBT9eFvyfL6qO9cy8eyys5diz5LyVuZfVET+5pbbAm2YRuzgpPnf/OvtM2J1zv8Svv4l2ethZs/+bmM0c2ht4++r2fuN7L++kRXPjbr50t136nrY2hvfbuYGqe6xffeODf3speT1/j4Rvv3z1KhrU1OHhK8dC4MXOOPEp6WRvDMe7X+mLA4ca4GLX26hebSjK17iYv/dBojSMzGtZkpfU2ebKRuQwHbn+C0br917/+9eoTC8Wi3GAdh8+Gvirpz7fwcOnhcG+fRC9rr3G5xn5In30jffqqGz2HZfuN9nMuSLQdXvuqQP5CQ85wGIuvPG4/yMZgy530ppt+OvTlCFuxw67vYOgjvzrk+nKnPZD9C772bC6dHDDFjQO13AW2vVMXQOzqYsmfiLEVee0L5XqH+C4ArIO+cpJL0SWDXKjtayj2Ew9y1SuvvHLJLHejwcb42Ve6iEKDn6INp3WJjdgYbT5ka3OB/PRiWzZqb94aBV7+ERNf+9rXLv5f+tKXrj1yNtPHnuSgDx9nd3Kyo/Fil9/0kVdBBzxdxCG9yK2f3ODJ6J3d2aaLGb7qaxt8xQ1aaOCBjnjTTz+00KFTe0Yy9AM33C7p+noITnYyrogxbXLwr70KnuD08UOxwKfZmkxsRx4+ID/e1iH2w7tLJDLyi/G+BAJDH5c+5o940ocXmtYJduiiyKUVXH4xLpZcwJAXbfsm9nR5RHawzR96mbf8pI+sbAa/PM2WYp8M+IqT9itihy7e4bMjefM5+uzBbuZXf5KIBl7oKfBr8yF+XY53jgJHpvKaWON7PmJHNPDGi2/IC9ZzFjqlH/x99OPJVuiQm27o8ZNYfFHuW+DfcjEkADnII1A4WlBwLofsI2gKRo4rycB1IaSYECaBZKmu7T36Ja8W7GjBL6mdbe8Lr73PxfxpSZ974/qMbxB6V5Z+QRt8NNmrhz4ek04QP9x+LRDA3tVdbrixNY5W+qnX9snUZqEDE7u5NGF3v+h0g0ve01boS2aSo4SV/unW++LpIwef95B/v1zJX2edjbJ7+kkC8QBDJ0kmO0lc4oReyaTmE7BtkqITX3yyk5t9No6WdmNqOqEnftVb0FvbnPaJTjTyPZyzXR8e2WPlKEbIQX+67ZxqvHg8cSVbtmsBzkf6xBS9LTgWB+/x6YK2yyZ8Vv5TD3jJD67LzDZ5krIFAB3tfMJHeFgA6fCYjc6YSE/w2b9640zfGa8nzo6DN0/IeJYWp3OR2viqjUdxkgz1nXTjn47J593T+/JZvcJvvNoBqs11tNXl4FMueH4VssHwyDu7+TAeHe2+8gEr3+PlAXNuWtACJx+1DiRHenpPPm0HCxulLsXh1WfBx1MfnuFGS79xm5jdeAe78KdeNlnWnX6RW12iX10se+8CiswrWzqBDW99SSe8uvw69QmWvdmwy5m1WzElVvgQDL3jvbGx8WceNRasuj70HHzZY2lks+xYLC3utsFXopP/oxUMng5scny6ru1OOs2r7LF8g3VwyR7lDrmof8fmnuzHaPQAACAASURBVG7Zhpw98cDTI8ejU/7evLh2bg6Hl73u1fdgw0MfT/lWLrcm9OVteu3e5N66sLJGV735Oz1Wnx0/dUtm/dte/WobZ281e25dvorGSRe8+QVfTJCpNbB4ik9f9IBho3y8MkU/ObyvnnDQ1Qemsd0T6CtHaGcDtJofS0M//NZxfnTg67APH70OL/ztIBq8w2XrtDxnLZcPxYJHMW/QMK6vH9Lw0vbIJeknXznss5PDHBkdiO0NXGa40FEebntU/S6y1A5a9gradCezufv2t7/9gu2iBT95ySWKPVd2ya4X8VspXvvR0zh65Gyu6avf4ZdfxI09nXcXMvjZz3i3x8FbHvOjYYdvNvV1Eh26MEDHF1dyD53l5exgzEFfvnZpgJ68wo5dntln2UPTuy8++AcOW7oQcDB961vfetXkYjNj4pi85NOmM93tx+nGZnxaXLWvs9fnM2MuScDz89e//vULh55f/vKXr3UUT77GE49+XBUn6HrYGe2+SkK30uUAHvQjt7UBPt7N634Q9g6HfGjyvXgRk+i6UMMTDDuvHOgqfN3+kq/pRAd+g4sH+8vvxSEdPGD09adF5KaXGi1ziU3I45KDjHynHx4Yccz3YlNMdQmEL9m8Oy/BoQ+b8Am67Ise/C6K+KM/cYTTn6DJAXDhuKjhL3qxE3hzWnx4Fw9kYy9zn129exSyZTux1zxjL7Kp9anRK67oRx506csmZAeH9n7pwz705Q/zjY2bv2D5D54xMvKFuKS/fnOObZONv8iBd33o0CV9qi/lbiU9wMc73/OpuaR08c2ufLX5+ympF9VTC7z25PM6TMPAJWyJRlGbyNWCQfBwon7OLgAEnUOkCWlc4fxgCtpq/WgFVy3B9/fg1+CtFETRbeJ436dFGg1FkDVpegdTX/SqwWxfQRqdM2jbyLS5U1tgTFi2sugKahNvadeOl39Q2j8kCJ/92LGvPrSDx18C2v/NvbH8hqdxk0hiaDLikw4XsaeFLehARgndxO9vyPuMHc3oQNt2E/20Mzg8iyl6SaZkYguXTfh2SZYPg++dXrWXZvYuSVmk0Vs/aVfIafFvwxZNMOtvbV/itEEvQUW39+rsmq7V4EtqO3+0u6DRBnfy0Jd+/GEzxE4WDH4SH8bZ0cLcIm3jI1b4q8eCBI8s8Ttl3TkPRsxmExcS++uPOS7ZszU8sp/xEH1ja7f1B5jmtHwgD7TBLKaSYWG1e/Ld+jB/h7v+fxYMTxvkVuJ3LlSLmwzmhA1eJVmisf1Le2nd47m8T3n2vZzJD9nd+JkPLdRdooSTnYONZ5cnbVb5N/r6tG3ebB5tELQ9+k85bNLkHjTAoE0OcWgsP/eljD5PtIoBNRibF3T+cfviJ33Apos6farZN526vFoctOlevfjZzVzCFwxd4rPyrd3RtxnsENcal33CLw7w3JjIR+ozFoqX5EyG4Ip178Xpwga3tXby55N7vE+cYNR0yq7xW52M2fSKmWQObvkvztp3eYF38BJbcn6b3dYHsH19oq2oy6/yldyvb+3nvf/hQnKUT6wDrYGNbf7J3iu/9toseP2bK7TJLk/L6erWhXJmssuj5VvrgbzvEgz+wiY3Oug3Ft8TFk82WbjkjNbKn55spn9Ltsiv5/jaRTsfLX1+CU9s9UWPfvOxPSg/uzDIzt61yRxtOPrYzprGj8VDuoHVjm92EGfaxQo4vH1pC1Zcr32CjR6eYPCXT+Dos+cB20Ea/XJiuMb1KeTgYwfJvhygi70Amg6aaODjQgNPOQs834oT8NnafhJuh7r2ffYQDnjmFjrBOQSTxV7cIRf9X/3qV9e/h+ngiLa9R3sq6yJevaPvQb986JKBj4sT4+1V0AqHHeR/sqCJt3jwbn+CJvs4XLOFMTLaq1gzzBNj8OgBlg3ZxtognoyDZRN/ZuaQ2QWF/GIfLJ/bT6Mjj+Fvf4+fSwS5yNdHdHCBRi6wbImnQ2yHd3LzT1+Z0bc4sj6S2QUEHcSIfZu40SaDfaDnq1/96mXDz3zmMxdPpUsL8hfzfIo32+tTigV8yYqONlu4RCDTw+2AL+7Yg15dLpJHTNEPDF3gs4t+sOi7rNGPBhnQsXdwvmiPSp7mL7nAo6uAZ7dig9/+F3v3kmPLUbVh2EOpAYDETQKMLRCiBw3oMwJaTADpHwFjoG2agCWEgA4yIITdYAJnKP9+knoPH+FdvhyOwY0KKR2ZESvWfa1YGTvrmK41/khGOoGb7T3TGf2hzzftO3jyXsC+5KJjh2i9W8HJN8DxHTTpii7JgC6dmNPwbI4s6LlnS7qz1hie+gKOD5DFPH+iR/EFn7xAF+QptsgihsCkD/Pols/g0tiMHOTBM11FJ72yDzrwk59M+KAPfNKVgyZ5hO7hhActvuGZv4nZcILLZ4zRHzxkta7m3rgLvL6YB+Me/mQpDvTk7aJjMuCLDC5fxout3n9eEn2++ZAGXsvBEKwMwPiSPidiUAGnSYKc0DhHkDTBuozpJQeOwIAVjnC1GYJpk9Brp8OgyTHgcdXc53z6HBnPaOgVADlu63sGf+8KrjnPggW+Lb7WYY2XuOhH0qETzmpcEoVn5bjHDzySt3V0Rk8Smw1LUJKrjVOASxTgNHQ7KBDo7MFGAp9Ok9vcL37xizd+/OMfX/zgzzo0JRN4XPAmVwUxnRTEJQd49371Bz8ceIYPDQlTksJX861Z23aPpo2wr4jiGT5j8NA3n7Mp4CU9Lz5j9AeXF0RFQTZcW69P9BUPGYKtzxYV8GCSI/rB5jvWtK4/4Qo2/0Df2PpTv8TYgG1g+Qa9sl02q2A0Fs1s6Dm8xpJp5zdu0oMxvGmKWZsHXK7kaR1c4rV41J+0Vo/Bw20TZBe+wZbyQfGd7+p3c7EOn/GX/utPH91567TWnvetvYBurediqfHWpy/j7tv8grPuxGluN8ml0brtm6ef9LS6isb21u9z9qFf913g2EDeAE/3e/VnXwoYuY1PdoizG797m7c5sPhT1LdHeG5/QM94Bz67H+DLhs8nwBTj8Fuvz0fAto8kKxg87J9+Lf5kW/mt9Yx/BRm+WhO91Vn35qwlp5yLX1f74eqx+3s2MRdO85rn07/zifgFY63Wur1vfvnNBskXzMnD4osu2PA7pOuX5XzRfOvSJVvwWfSaP/nf56Ub3/UdCsGZH+OHnqxDI1zpRa6UszQ5aOezCfzkkdO3lWPKXdGqP/MN3Jtr0u3mGjiT0Xi4o9X68opeviSHfG8v7QeLcjkYeORje0SH+nAtnmiFS16Ph6W7MnSfzMv72mr1tvLSged005rVVWtbt75pLtj4L5d48SsXyDnRWb23V5nLx9O1fm2ZbeDXqk3bkxuDy0EiOhq+HIR0j18+7yUMjBeb6jcHEvvDFJpyFRzs6RnP77///lUDqiGtNcb2XtDUhGjIxfJka/0Jmnn8BuelTX3X1yJ96WLNw62uUEupn+DU51Pg1KRsId5c6iIw1jgwUXfAo9EHudDCrxfn/LSDsfQLJ1u0NnvAk075cLUtuckfPB2qJ/cLKbypfxwQ4gEsGr0Uk6X9y4Ei3VVbo+/l2qG+f96iGt7ewwfAosmOYOCU79kHTfTAgmM39+xDlx0cwUOHDtiyp5o0mfGLj/yFj5mHR26yvsNwdMhjza9+9as33nnnnTfeeuutN37wgx9c+IoxvuOQyVXtxi+swxe9kkUjC7uyDfk1+sUPvugK3w5JPLM9PozhE2/okhHv7AUO33yQLhwKdfACNh2xJdrJT17NgY4x/FrXPkOWcBk37zKPDn76sh3/xsgCF17wzGb2L3TZC88OcOhMLUReOK3ND/GJHr+yHp8dGMWTMfjMq3PMoy3+6MiceFdjgGlfhNO4PEa3+G6PM1duo7N8hTwd/OBb7mcTY/QPh3XoypXWGoPfONmN9Q7PtzU+wo7w82/08aLO0TrchstcNgomumR3n627v5DcGh7q4fAcLnonD32wF7v589BqTLzQA72ae25Pa+C1HQwxShtEmyLn5iyMVHGp91wiijUOL4DMcfx6jsNhOZv7rhynHh4O0ldDeMmBOENwOZb5irqc615fAmkOjS44eunFb7+GmO9/Pw8G/+b3gEIACC6JFsziRGsDuWQDRy/3koUghdsBDj7gcvhh3IakAGCDEqj11nVYYJMHj3644e+wyBy+wGeDNjaJL57xuwG7z9mgMWtqxhQL4cQnveBbYiKPlq/YaG3+cNBJfoY/iZ2vSc7wKnj4lOd0AWcFUgUfO6EPx6k7z8mVjbJFmyZ9VYjlK+kqu3fouDYlQ3bWe86XrNu16ZkuuocrmA7nyKAIQy9e8r9w6m2QyWE+PyYLHPRF9nwi++Nv/50hvLDR2n7t1Xo+hy+xW/zbiNhAK/krEsFI3PGEpnv82jSt6+WyzS+dnL35zTPrg+m/seDq9wsfYyuj+8V7CXFr53jPrbWmjbEX0vKS9fdwLl8nj+dzfBinKzRWV/l8tJY2HvHmKsemv8a370Wrwgtsh0XFojnj8CU3ml7W5T68KVajWR+f+O8Lnl444OolAVwHQubFfzSjt3R3DB9eZBS08gK/6oBm+UWjK33BoyCSp9IvnirYWr/6cm+9IgUtRXZ6Xn0vPePWOGhD5zwMjZ/s5Dk/zPeMJXe4Pd/7d6XiMXwd1NDLHtSA01a+1U3j8d9hHz0bO/WpMEaDTpbHeF85w91YMq+cePMyxR/W/088xUE9nckLeNDLOclqbOVNV2ceSf/lLDj3vjjDS+OXMm8NzuIpuuWO6ICLN+v7sWHz/eZtBXlz9XJqdYScazz8m0PbX4Pd/3V8utJ3WRt//5ToX/9Foy+Skinb9Qy6sVYuTPZrrnV6sRcfi4Me2U5Lp9E5+Thpm7dW39zawTi/je72aMRX/8aGeoQu27c3zzicwB89qXHYdn0g+YyhqYc/W8KpDvDyGH608C/u9NXi9n8+4rJ/4+/hVgsWr2QUi3o1ufte6MF6CeSnaku1gfqnA8jqLbRc9gT1ZPs+3sD7qo6MHQDQF9nhkWv6gRTf1pIVvLimC/d8Gx5z1pI3O4NxSLA1s/oSv+Ry8OEgqq9KyOnL++pAh1b4zg5yGPvRER698KOPf3nSnkc3fkSkFzWUF1Sw/mSswzH7Bpp9KUJH/YCnTnLQQEZflKBBNnsbXOoi+rAP4AP/5DRHdmMON8DzIT5C33zXPkpH7NOBjhrZPyyuff/733/ju9/97kXfOnitc+9KFvd8hd+RvXcEMtEv+mQnF1j6q36kHzzwP3TwTKdk7GAFfHFEHn5HZocUZEGHfMbxUE6T59naQUnr+VDvV3RpjUYuOmADOiQPvWh8wxz+e79Czxo+gyfxxJ7ooEEuPooX8jgIg0OcktM7DXvyEePG3Dcmnui7deRFi1/wJzENBxhz9IpvNumQyR6KR74RjDgij3Fr6cu9MTLAkY3RKpeZJ085gI37E8HeH4p9ejVffkr3+bTePLuA4QviMrrsDhfb0Tf9koE92K4DU3g1Pf5c2SsfYN8O1NnEvydmjI34D135t7/4qDXP7WkNvLaDISQEtYvzPHVxgIr32OIkLo4rCDgEJwJbz2FKfu5z/tbWcxzFjuDR9M3ts/uczH3Oto5njAM15x6PnQZz8r4UIW+JKh2Qo42bLBKAYLauDapAgtu9g4oKAzjhEDCCS5BIWALYM9yS1r7ECwI6tCZe0erFyrjEbM46zy4JbWklt568dEoP7uN5+/TZ5/XpLn3Ww0cu+oDLhX56xFO4rKm1niz8AJwkQBd7Wh9+G7lPl0s46wf8AT6wXizZhPwSFN3BDWYbfVVkmO8wqLHsLKE1ViHNH+gjfWXr9Tk6IZe1rvMLtoWFD7302Jp4gr8rvuiNfclJ560lC577OktSZg/PNrB8HD50JdpwZyd9+ko2YzY/vNnk4BG3xYneM7tkW2vIic7K0NcVeKtgAYemgyqbYnT1q+tsXZ8N0mew8Z/PLfzia/6EWx00Z0zzYi/n8VX8k9sVjeAWx96D61rYcx1fDj9fLkfm+8vz4rROYVUx38vp5lj3CtYObRVG4BUfik70FBvwGCtHW+dKvwpjcHRhPX6bx2fw/XkVfGA7mDCf33jpQDe+4XJtrHsmDxi9Z/4Cp3wafWvOi36XfwdJy088ZctgkyP7kJ0P43X1s34Q7fQUDuvkpHSQXsEtv9nTL9teVMK98PhbOvlK9k5v9MrO6Ianufr4jC4fam+Cz5We5dd0szx0YEQn8RJ/q7v11ZMPz9kdr2KNrvCOh9XT8nwvjprHozyya+WA1ffJ5+YVa+XYXkrKNdZo5hWt2/BNDjThAit3l8/Axo/xh9vLDLjmy5ftI+jLve095sv59pl+wW19OW7h28f07WXgtM2D7nc8Gdp74jUZkvvMvYs3HPdsdubeYHc8GvXmwtXYPp9z2UwsmctvqwP62oeddn+A27M1/M+6dBNOMMa9bOVH9lt6ZiN5xXz21Yul1umzc7VuvlXu9SyO2ZNfte+iIe6+853vXPjJA0bs4o8f8h0vUvI0ORweqC285DkMIFe+CZ8/iar2sM/b773AyyHu7Ru9ZPYnNB3aWM8/8IDHDibEMr6zaXnIOj6rWWP/r27CN75c1snX+IHferqhr/zSnFpY/WM/+/3vf38dDpEZHQdDcINTl8txak56sufZP+Q2cqLdARBc7EUHaHoBh4tN0WPr/qzMCzOcGn+gJ185oOkeLoeGDgisI7Mmz6vp6aIfO/Vs5lKn4T2/6sdPtTL7dfBE33j3f6H95S9/eR1c/OQnP7l0hFf0ey9It2QhO3nIrobEO53zAzjZ1Xp7nnH61+PfAVe+DScYNOiVPP2QyIeLA/Dm4Xag412KnfiMF34v+3QNBk49nem9W9BNvmStgwO1MH7oiV+rV9Hjs3RCTvuIZ/J4Rpttigd4qqc7sGJP9tf3gyhdGNNbL97cyy/ktQdaL+7yKzp1X0yL54WtlqLbDl+Le7FDdrbWd/i08cR3PdM9ueChO3pBMxvBxS7mOryhe/N68/guXtEz3vuG/dgzPWnsDx+es5k1dA8HXaBFFuvqr8WPDe31ITHmzyPFJf9nSzblWx1uO0TmJ+g/t4/WwGs9GGLMHLIkIqCMcVB9FyfIubDovoRQAdNmANZ9jlPCMGZdFzw5Uc5Z38a5z+5zsO63UCoYyCKB6PGmqNJ3mXOR1SXRCAIJQN9Bj3F04F068SCRCH59G7/kIVDgkoAbb+Mw5v8uYaPHX0lcQnL16z24+PzNb35zbYDxZs6Fty78gdfjf5/jfWUAazPF3ylfyTcZJAT3eNXbjKJFF9kiXZEr3UrwHSJJUsFkH7BLHz5t++woMUoo6OM7uuBXD+DpEswWNOGpeKBPelR8oIenlSXZVq5wwK3IMRe/8RAOcpUMwdMJ/SlAegZToZQvSPQ2uHSOT7prLf8inzG9DYqefVVgA5NY4U+e+L8YfWxi0GHWNoUUGvhcOddGxWXr8jMwfYEiuecf5ssb+HC/tlp9L2/uvdwoFvMFY+tvm0fOubVdOQZ8cNFa3OS38VWMWZfu9J7lsHPjgyPcZ37b8V4m4ZBXbYjarlmZTr7xZ9NWgFZ8wFNuTqYORPhFh1u9/PfnVx28xAs8aCejecVoh3vl9oXbgxdwHfjs3oE3lyKrQyG8x7c5jW46xDIPhyIM/9nDOHg8di3P2aYvi+gpOeFb27U/LT48obF/ohZc/dLNVisfnXtOX9lQX4tnLxBimV4URWvL9ZsOAsGSJ7g9vDKerk//jE/jYPbLonTSl1Gr272nUzpcebId/GJVkXfSCke84jP7ViiHN9wre/fpbmPNmPlsqJdv+JI9MD3Hk14ca+Vludce4GWCDGA2J0QX3ho+5Tdw8l45wsvftqW3e8HmP+vLlfrujeOtvcH6eG/94mmdMXyV/7pP5vpkBI+WnG+f6QeO4Fb/3a8N7o1Zu+P0deq05x1fWv+myMcHeLYtfPknH0qubCD/oJU/rtxkzpfBdA/Gev5ZS7/RFj9eaK0zZ9/NpuqD9tHgxUi2ssZLj/rIWnVBtYkXww5A5AexApc1aPjHlq2zxp6tFuilGH5r1T7yJ9s+3A5bwFkLlr+rNzw7lBADaKqtjKtpjcHjK3HP8OJPfQF3+mxPIIOc5rm2+isGwLmylS/T8nW00U1WeUdNDQ9d0x9eyO7QwSGXgwa4q1/IQF6yVbvCbz5YtOV5z9kOT2iB4y/0puFB7QmX/I5HdTrdqHnsk+bo1F7VQRE9g/fjq8M2Mjn4SV/urUFfbmcrspnHA7uga5y+xSe+yVX9iJef/exnF4z674c//OHLHIZehx1sRlY0s4n5/myM3B2IFWdg7Uv0rokDOrcGfWu8A+Kz3NUPmdV98j298j306Il94cwPHNo4sGEjuqBPzT1bgqeTDuXoRMufyUUPfR0HN713CASH2GAvse190B6kdgZTPJKJXcWzXjMHv4MUOPGKF7ryTB446Ym+0Man9XyQLuHLfzyjSS/kxQc91shMFjpOd+bg7T2AL4gDMWCM/vHU+wP5+Cxe0XC4wnb8kT/ghR3pRWMHvk5+/Gj4kFP5lXk2ICffcbXnlk/1xtQR9BJcuMop9AQWH3wEf+kCDbTIIt/hm+6Kw4ux53ZXA6/1YIjxOT6HYFQGYJSSjnsXpzQP3r3exbgSjhcchgeTw+RE9Zyhwr5N0hhH8FwySuocax3PHLoVSXpXSUni2asX94Uzn3zkbUMqYfULRTRKGvh033rr2nQlHLislazMCULj+k6R3VeASUru6c3mbvMXsIIjmZKFziUZPOBdH38l13TDJjZym6314PTJij8JBY/hWp1UZFRweJZ8wJA/2qsXNJIL/xIDv3FvffqPV8/suC1fyL7ppoKHP9CnluxgswtZ8Nmv5k6bv/e97730h2yeveFXCPXpfrjq43ltTgcdIhUD+EmudGMu26HrYmeJODvELz1L7HjHiz6d613WgsGvxMp32uDprQJCkUnnNoR8KJ7yj5K6uERLrxm34UjIeOOz6LpPFroonq9Ft4YOGSRxGxz+TviNbXSy3+q4QgEs/YFLx2Tofnsw2sJmv2yyduoe/K6xicpNYjB/IkP48YQ/l7H6RxV8qEMnvuAuj4rv5Xn52HFr0Wxe8WnzLK42z0YcbC/fHYigC9ZmrWCVr3oxb658bX1fnnSAA15BsfSCM4efXvLhBduVzApsc2QPV/sJGTvs4s9wmTPeQUS0WxO/fQ0Fd7TRUmDz3fat8OEb3pXXs8tBQgc04Us/2b51nh0A0DWcaNKX9eSLHvho6vMJ9sJ7+tNbk26yOzg6gJc9zePJPFzm2DQfSbZo3vvTs/wAv+kTfAei6cPYymtcW1rdR2/5bg6f+zUT3aZPvHhuffSWztIwDp+xWs8nDye/i8eXBPKc+LYXlGv7E6+TxktiQ18e7M/a41cvd5zwjZePNvdVt9gT7MnlQ/3ut7u/7H24zhqlPwFbXsqh9eai057kYEyjL3D69HHqpbn8Oh1HMzuZP9dGY/kL/qR54lu45oqJaOnzJ728km+boy8ya56L5WK2efowRk/htibdiMdky3bVGfCL2eICrHjNN82zsd6+7SCTP4pLPik24CQv/u3r6ofqF/jkBvBgwHq2p+vtx/4tHC/q5hxA6MmGR716o9pCneVlEU9wuFdL4kN9b2/HAzj1xh4Y0AE+8MSP00n5Ci1j8h5e1eVq1HzRD6XWafRdfOGDvFuHkEG+br/ArzH1u5rPM9z49TLPtvYpzx2uqI/oAD/yEFzV8sb2hztz1fH+rOWrX/3qZQ8v7uoxNsUnOHWZXOpltgMo+c8hgedqWfKS0Vpz+Ra9ks3+RR78OoDwjmX/hJ9NyMiPOnAgH9vIbXRAlw+39wWXXA8/XsHDay08/M49/aGBpw7i8IBfDSz/0diZ/GzHRxweeEaDLHDS7eYpeD3jB070wKPNLnRJh/zbHFg9u3fo6pnd+J2Do2xEr3zdejJWu6LD/9TrbEs/YPDoYMY8m8HjGR68m4fHGF0ZowtxxDfRYgvwamW6h1+MsBVYOOmJzozTHz/0bkJXHSDRZ/mOfHxFTFond+CxnIQn9+wpJjos7h23nCT26U2Pv8bpua/F0K1eYYfsUc1j7OTNHNnZDH56ci/G9GKEXNvIhj6/RZ9/k4sfsDm/oGPznSPQG1ziEj1rntvHa+C1HgwhJ8kwOudwL7Da3CoeGd7FYcAWuHoOyoFtZmA0DtHVBh0Oa7TwrRO26Ztzr299ARLtNoTlu02vYAnGGnOez6uiyHwwcLp2TDBKLOQU/DZMAaJo59zwcmLzHFsyEHz0IyntCzaaYDqEsgatk2YJdfVdENMh3cUznJIVWhKMRGIMzyVdz/joUMi4TYT9KjjwlJzLUzqRvI2nNzoQ2BIFnUg+xsDTgQDfw5dkbH3r4EDX2pKMJAKfuXyiIsh6esO7+V6a23gkrOy3OkpOctAFHdJv19qg9eAUbsGm+11rHR+IVjFEn6uv5E5W/CoAWp99Vhd8jM9JlDYdMWiD5G82G2ttJvRMPzaoeDFHNuNiqI2dbnsRL1bBSe6e+RFdgbPx4WvhryC+NTT7NXP1nR7KLcGnC7rb2C+vgCsP6PEUbPD14ey5f2cou2wPNj2EVxFEJ1oxEu1wd7DD/+KlfvlEq/znV1O6Eg+9DIAFs2vQ8OwqP/Zsrhd2eHrJYL/Ni+DJ4FcqtNANl9hjO4UMHOXzcmm0FC3WiiFw5sld0Qcn+n6lktP4SHBoGbfeGFi2xI/1HdTCuRf8eNtDD/PxvjLiuwvPL26HMfBGzzo8xC/6rnQVLs/kaL6Dl/K58ZOH+ElnntNNXxad++TSTcd6Rb9CMd7XHvmCHv4Om+govuBYn9nn5bP75u0H7EY/2WD1DM5zvJ5zF9Fbe2oevLZxiWayrk+m30eUL+Oh9eFq0tHmbAAAIABJREFUfntzZ+zj6V5cLf70EO7N1eEr/ydj4+GPv3AtX/fGls9yMBrtL/JpOd4e0TgYObI19gsHPYsjXhsLl1yM7l7xvb375AIL37b0eU8HK2v39+QPX/r8KD1+HJ57NE9+T/k8iy++lz7K2/m6eXHRs3n61tzLv1qx37x9xtrsqQejX17FQLo0r7GV+lRMuw9eLgPbHiqfmXcZ0/cnP/ZldQN61WNetNCTW+z90VODWatGUuuRrxwtPsGlBy+/ai91hV6uhMv6v/3tb2+8+eabV86NFwdnDiPA1eDznL3Dr4ffuLpNPVUtLg7Qy+/pwQGHgwD3xb06yVoXedSK5NGrQdTh4PGnPkKDnsxbky3tH3RRrYsveMiCv75KwrNaqpro4fZCC3c+Ag+a5KV/ePxfz8iDrn3FQRNe++HMgYOcDqdDBwcf9k58060cnS7UdA5BHPKTx8s1/vgl/sGhhb7an93kBHz1dTI/++lPf3rN5a/44ktwwemlnI7B4gFdcUF+/oRPMrMPGenTvbn8EH3jcGSDDljKu3RpTsML+6ozqyXoBh001CLW0VW5MT6sI3eHceCiQTY4rYGHfvGobgbPDuKH3OKADsMPfr8iY2vxw7fgZRs9nHp80CG+6GIPi+zfZGA/NY44BdtBSrpG27gYtB5v+Chv0JUxfLOHmKFnfscG8MPlHp54Zj+6M55diiM0reEn5T76sIaurNHjWQ2JZu8o5vDhUld6Flue0Q9f+cA8euwlDvi6MTjBGyc7ffFx+ubP5OAvxp7bx2vgtR8MCVBGYhxBqXdl/IKOEdsA3XOsNhSBklOVyImyaz1bXwCXIBI52Pqlv/ecrEDu5ZcTuST65IHffVcOWmLnkBy6U2bj8LZZBSeoBImglRjIaZ35Xpz9TTc8nvEgMVkXru2toztFnPuSdT0e3OPfvYOTEjEZrME7GpKaeXxJAsbQwoe5+LGmDbW+IqEDgA5vwKbXU9fhbg0+JA8Jgo6sjT+JBi1y8KuTZ0m5TdC85AkHPUqo+Nf4mgZPOvF/afC5ruRIbskGT8HozWUjtOlQIkcrPpPz1H06CK7P6xc/nqxjkxIdfNag0Z/zGcuvjLNJvodHY/Hhns7oQRKlE/Lb1PEgYdqEbOo2OXglTpciT3HCT/FVkgaDbxueeKV3OqN7vIi3/C1adIkPmx2Y1RnfzSaXYW4tHaSH9Aa2jQIPxbbkb0NrDF/FJ17KEeEHZ7yr580l8URe97vGWBvX2lCshmP9LLrGNhbhCG8wC2tO0cZOYJd/uKJx774x/W6ymxcbr3egIx/tgQxd2vBttH2lszndfTax6YPhO3wivhXGxuA11oEQnHsYg451wcLdlyDhsz6ayYLvchYfaS9pnt6sQ2tlU6DBKz/QAbz6DpkWT3Nw4kmOsJb/4xv9DmiSO/rply3yD31f+tBDuFafwa/9+IRxNDs8gwtMMnju3xrqC5vFBS4+5AG48uN8LN71+RIYrb7x6EfDiwcdNd/44jJ2+nM0jTeP1w7MNv4X1+JJjuQL1/Ky8kQnvYrPXbO8RCd5PNOdl4Tiunhu39010ajfue5Xt8ZOnYcfPXleLnXZj84vmvEgD7Y/2EO0M/cF115r7w4mHuIjOY2fNgw2/V7EjmYuXE0tnr2/N7/o7sGa/yh8y9vJx9I7+VzbgfPcJWb5pnbuKWD4sPzAJ8HSd3sg+A6M8n9j5h3gxa+cIk/ApVVTwCen6eFp7zTW/pmP7BdCfMLLoZfGYkWN1GFAL+zG1BXqBDQ6sCCXnJWe8OwlNb48+7Ms9aR7cHLRl7/85asWsd7h0MPtcMS8ddVrnsspyUtmY2R0gCR30rn57Gid+/Z+/l79RF6yeAYHZg+Lyu90XryoWaq34LXPqEPp1juKr338aMOeenUIOelB87JvPbmM0yk4+585e20xZ606yZz9CI++KIIbTfw6nLG/ehbvdEG/9iA05CFr2yMdyHS4x350Jd87uLFXeVGGX+3bFy3lWzToN9sZJ4v285///PJn/vj2229fNmUbvKBT/UlefMGjxuRHvePQp2d0jaHDF+iDrHwEby6w6nj+6t6hQjaupzs1YLEPzlo+jzabJQ/+6JseyIVncOKkePM+w9f5ON7Uwei7lx/pGI/ok4Mu+Re8LmvV1GQSU3TDZ9iHLNaj34cA9ktzam9zev7hHQOPYM2j1bP93zPfTW57ZfEQj2xpjG/RCVi66P2FjOQwTo+rX8/2OPrZteTDh3XkxQt/8GwNu5oXQ3js0A8cPZVz8IVu8V7sZ4f64PmfNeDwU4+uuBUL9EEmPop+h1F4fm6fTAOv/WAIWUYqYAVbSZeRGdKcZMCInKeLs3JmF+cRGJxMs661JVPBbq1ClOOsM0mixtDuig6Hcm+ek1Vg6eHua451vOiX9MBxOnJIyK01bp1eApDA25j1AqwXZc/WgxE45G5O0NJTiTKdnr2EYw366DYPt0DpEKNf5dECZ030JW9JXNC677ABrMQF1jxeJMNwkl+ShMd9coKLb7RWN+BcHVC5R5NerEGT7Y1JKJ69KOLBPN7wYB0d4bkvq8DYONkVDjDZjQ+1WZRM4INbMv3Rj3508Y9fcAubHeAkexsYXrok0/yz/vSrPRCKL/TiDU20Wpc+HTjgFWxXfkNmOuKDxmxont2boyu61myQdKWgsclld7zYeMxJ7uJQccNnFB/kZn9xobCxAYtd+PQuslWgosUGLvDFSX5BvuKIvMWhmMRLchvHgzyAL7aW7NHT2gwVbPDk/6sncG0+7vHpxX83l3jVh9e8tnZqAzMWbT16+nzGfbIsXH4VTr21YOuXfjjjxZz7+Fx+wXYZj4dk83zCw2dM8ch2HdTgR8405/CE3vmEsXJ0+VNvrF8qK0rBdvAiJlvPVuXt8rGCBQ2FTzTioXXtIfV4Y8f45hPRTlZ9ByN4AAPfi1uhY12+ZF28WJPsenPWmFdgdABUAZI8C9f6dG+uOHHfQZbcGh740c7+9SuLPW0P0+J77R2OfCXfMd4VfH2xEK38pPnFmX6yfzIun8Gf6xbm5O8S/Nbyb7omazbLBiedYqo+PPXgXea7r1+aJ3ww6cCLqL2uZ/DJtzwkQ3tAMMaNxUd6i+7JS3YLX/PljPYI+1EHOeVAuaQcal7ebl18xAtYeblaINh7Mi2P8Reek99767PDwoZzdbk6O+dPfQV7rkEjPz7n4Dht5+sOa/yj4O6LYbB03Z/FhTcZkjM91FvnPh48Byt23OvlIbzw7+oMtrDWvBiXr8LTV1ye8egFyIut5gXQvm+9Q4F+rOMDcMnH6pRwoSGfmRNn4NSh1hffZAfjQsd+3IGClz81GDzW8KNeNuGy76sd1HHlKv8+jlqFzNapOfDZPwzbi3A6Sk7rwatl4FQv0qExuMC5jxd8e7kOppqA/Pl6tSedtYfLsep2OPFQbb9ryEn3Dm78D07Iar4vM+Qu/OABbo092FctZ0ztRW55BawalGwdJsHlHt5qYbzRl9qNTu2X7F8tDZc9Rf2Htj2P36rt0FULqufUvGTgL+yON/KTiSzw8ct+rFBHamDgpFP3/MGPqi7/23u+Aycb4M+LOFl75+Ev2aPanl0169BjE/s6/Rcv9GQcr3TVF+x0wz5kZis+Gn484FGNShfZ1ZiDDOMauN61eo9Ajx7EJD7omY7Qs5b/ddDFRsnZ+4h5fKmT6RJ/1pORjvvh1Ry7w48WXhxAqb3Jih6Z1L/iTvyCNYauNVqHf3AbLyb0xtiPntGjPzqCmz3KBb3j0RX/hZvtyIvmvk/Qj5xEXnYhD/3DAT5Z+Bif50vFpudyKz7Qt5aei2Nj7jUw23fPjnDC5+KL/A9t8cJHHm6HcnyfLfnU89dClyo/UftMDoYYjYMxeE7CiBnZOEflMC6OkINwGgnMes0LQ0W8hNZLontzkgnYHCknswFIgjmkPjoFgzXxCke/uHG0+HVvrt49h4e/e0G2GwdHlAwl6ObAWyeQjHNel2AlgzHzcKLtHm+uk7YgdbUh0Ku/gxawxtGXnCQZ+nLFS5u//1WlRIUvY3Rujb5ExXbWFfzJYpPpXo/XLvz3Sw2+zcMvabjQTAd6F7ouvKMnoNMRfOkWHrxJPuhZI5l3Is9P0Mh2XjjJ55kelxcvHcbJqxhM18bQjC7+Ou0Gizc9XvDLZ8hpfTpIJ3D07zOw0do0vywm2giTNdzhTZfpHV8KGDHgni+RvWf35uH30i4pfuUrX7n0akPoVwSbkE3O5iUubFRk4pNosSU50LcBijv6lnBtXnzLRsUW4MRfiR8+PlrSzm9XP9aQUUsX/NY4vuCSK+guPYPfGLYu+9UvLBzhRid/KOeEr/xwMfPID13YVMgVneDgaUMLXm/M3MnjwvyTwr9eFnouj92bT4bwRsuzdfHVc7w1F3zjDijEAZuKdRc5XXDY/PkTeHP0GIzcyvbW8oOKSLZSwIS3F4LFnX906ATW+pMHxZyYbjwcHWR12APf8h3v/JL/LW18w5ks9WSEg63JRaZo94WTGAmXddFFL53tWF8e4QM+uoDLc7jTBRx9aZOd6umBfvYgybrsuPbP1l5u0cEvunpzpw/AnbzL+wnXARs+simZT7ilsXN8t7lr0a3Fa7Yzhk5+RUc7dw9HuIqNjXN6qaWj6L6cmJtdu3Dp2XwwxrqXP8rneuPyj8tzuSg8ZMruJx/llJXL+uXNvf1H3pefo7N7kHnX/tlY/MdjuVJehqsfLprHw8rc/fbBNhYP1VLpqf6UI584x9N/8/Wnvp6aX3v350nG4tM6dhAXeJbnNC9zXq606gAyatbvoZH5DoxOPaBjLB88dYo2mNVL8WSNGOuLBi+F1YZe2tgU3/EVLfxEzxgc2u6Nxjz3Y4o9XZ0APx+Qn956662XfuuFFp3qKvoRn9bYxzvI4GtwO7Tgj3gxplagI/yWU3uBM1YdFT51hMMveslGZOrKDp7tH+kBvWomdOQ1+sSLGHEgRW5fMVUXZRP0igFyqqXIRofykUMM99lFbV+95TAGDXP9aZNDgXICHjyTC0/WWY/Haq3qYDVZB5T2XnWu9XIuvfq/LuELj9Vb6jpz9jl8egbXQZt6r4MN+ZQt5B448I22dfgqHuyRai84jTuk6r0C/WzRuxdZHA6xgdrtm9/85rW/8YcOn+ganB4cv9DolX74OhoOG9gDDFhy7p+MtYZ+warF1ZdsSq5iNv2TjQ/zEzKhRQa8FSt6dYwxOqlOxbtamY7c40ec0DHboEte67MpftEz1iEnHsmLP7HDHvDaz9FziSN1el+EgW3PJj9YvBtH194uPtgAbbyxm/edDnaMhYMfuCe7ezrmYw+39xkxiAe4XBoY+PGu5QdsSB/ypfizvpxKN8VjseWZX8FFJ/h39Sdw9NecNcX9RfTWPNfMl8vcpzt2pTdygI8f68CJMzZ5bp9cA5/JwRDyGY0jcgxXCaV5AViA5hwcSdAJEsmLkWvWg1t8nnN+9xwnXJxfsHDYHEzf1ZdG1qAjqeA7p/Nsrk1Z35+XkatkWRHWBtsm27zAFWCN93UOZxaAxl1wSmTWleCiQxeuvvyRGForaQjWDiwEq0TnGUybnQ1ZUApyvaCU5For+cFr3hi5rKFvCa8vqeJXT194TUclHr2xdNNBGbx4QsPGYx5/eKVb+MDQgfUlab1xvID1LPFKtPQI3yYO9mS76JHTuk7aJWq0JGm+lgzxQ15z9OTeONtIQvSbX+A9WfFLBrTxAidZ16/iseRV3/geNKWHfIDM5j27yJRtbRrigCwdnLXB26jJoAikA3L0RVBxATbe3TtEIrPk78BRw6uCEh4bBPr5Al3iyXixDje7dFiVvumVHfiWQwE5ID7QKWaLgfRXbG5OAF+RoLho46i3po2pdZsjzJcvKhTgXH6yTXmj9fDC2Xpw7k/+4Kt5+VJswgE+X/EMXy369SfOcmm0462+XFm/4woCBYDYoXt+0wa+tmsTB+PeHN7Zk/3gaF09f4HXszWtjV/+4eKn4CogwRsHh44iWi5Cwzg8+AbfiwU4V7xZm2xyQgcL5qMfbDLHX4cvcBtzdQCVnKun+DSGjgufnq2z5uSzQ6n4Ttdry+zED8DL4/HUOvBauaN7vfVk6bAt+e75B13RE14X7kL+2Drc2kO4fG7hTvw71z2YGl2lM7QVs6uz9dt7uNJBOPVnjOzc8gff8rL4i2d+IjcsH+E/aZcf5Ge5le7l/XJ//3cxdoGXj2jwwLl5LT6Ny3twu+K58f1KKJj2JHTtUfG1eaZ7uNtT7Sma9Vp+da+/AB5hwmUdXPYLuFoXXH362979aYt0EK36bLzw5324HQTwLz6lZjIev+Jom+f1j+iEi73kK7aT98hLf8u7L2PDY9xeyr5Lc+VufMfyNWON7/rwi5dg6sFZjzf86u3x+HSfb6gD+Ft1JrupH/lMOuj/UGY/VxOaU2s5gLDHWkuv4B0k8UW06Vur7im/+acR1Lz8A31NrYw/fH3wwQcXTuuqOeyT8MO7MdghPR2whTl8Wmu/6AUcPTqBA85qc7BqoNWLQxTzWjr2gg3WWpf61XN1Drr2QXws/uIgncMpz8KvflY/qonM40/eY0P3XvjljmyjtgsOvfDQM/i+PlInOqBSV9GBPMNueEGPP6CPFhu2n9h/8eKQwlq0yOJgiR7RhMceIP9raKn51IF0S349/ObY1//1+N13370OGK1nJ3NsYj/jT1pxRk48enYIQs9g2z/Lj2TGI33p0XKfH4Ljp+bY0fsNf81ucmJ2gYusdE0HDg6yI3g1NL3gTW2Nfzqiz/gwzpfkF+vpDE080Atdq5G959BPY+QCp45GR80Epq9t6B3//aBqnL7pkc+h159Xs3E1DZx4JxO9FDd0nZ7wwMeKe/omt6YXF/TRuxWdZi+6MI8OPfID69FHkw2LH3FKZvs8naFvfV/Cwet5Yzs7dlh0MXVrxQe9d2VHcvBjMvk4wn1+xTZoq8fY4rl9cg18ZgdDDMeJOA3jMa5nztTXCZyIofU5FKMaE2TWuDTjBUzwBURO39qcSc9hc75otF4Qube+5KPHp96FX3B9BSMZJQe5JJtejj2D1bsEml4A6G021nqWaPSCTlIQ8JKmNXD4okdguQdLDjCCNtzWd7gEj4DCi4Awh1fJRDKwmbTWerrrwMPGLrA8u6zvUARePCi0JAu2SS9kgQt/+NbbQOFIF3BbbwxPrgoLuEvOdIP3dGuNORc4vPMbePiGAhRsfpa9jLmsY9t0QZc2TYkCPnzQjXk44M8W9KXwMWeMjvmWtfhEP/nQUsTEB7zJQB5zdKbp8+mSnd5ac9ahmexo5Hf5oc3KOL5supr1ioY2d0XCt7/97QsGPpsInvkZu8MFh0QqYfoVCU2/VrlXGNCfuNEUGngrRtDrE8183zy9FWPWotUvC+bJik760bvARYue2dYvk+hk1+3LCfV0rOgB35rt3ZcDrIHfZgHnuTGhX+u+9cbLQ+zVWuPxsuvLbc2by+7G8ovFE+1o1VsbbfD0vLzfu98xhwz8vxdy+cBGXg51H3xFx7648y/56TwQynbWsgF7h1ffffQ7sOGPHfiUj5NJodTXMXwKbvTjB83GW9tBksIA3g5pmofDy1uHUR329GddYid+rUGjKxk8p6fk2a+dOtyIh2BXR9lw7VrMZHvPdMBe5LTmtPf6W36UDugu3sUzXPmPdfjEY1+KgRVD7K6B6WUAruSgw/XL+E2maIQDnsb0WnpbP+nP8fARfL64uOBL7pMPOrB2YZZ29JfHE0fy1C8PO5dcernB3iG3tsfKu14IOhCKp/JI+a8clf3isS93orkyuwdfrouHrQnaj8AF27qtVfohIP7O3trGlpfG5E/53GWf0pIlnns+ddv8OZ4OzGdPffYFb45twhGcuQ5zvTxtA8O/auHvOb+Ivh4Nvt/cwlovJh1u6L28sUk1jFj0wyO+5Z14jOfi2TPbdaBWjKW3ZM8XPYtnz8HSv9oLjvJFhzTxtPWDH3rIFQ9yYbL2g6M8XQ2FR/jAme+rILjx6RlfnrcORMMaa70MlwfB4Lk/2XGvdlU74UnsqC+sz9bkktPpOdskP7xqHjDgWwNX+uWf9Ix/vGhb34vhahxyyFPqVHoTz17ijedD1S5woCFfOiioJR84vuDCu7ynjvTDm3Xo6K0lPzz000s1PA+3dwy9+oycvdPI1/RGbrHcOr6Anud+fMWHf8iabu1Z8Kil8aOOZCd7oHUdHqFJXl/Uk58dG6NPcx3uORjBB5uCJZM/taM7dvTjpIMO/4xBPk13vYNsXMLBHvSEnveO6j94ywXuy4Ngejcw7wDCWvLh23q1c/jkh+r7bFlepgd6Crc14gF+MPTvQERvDu61mbXg2TPdkgUv6ie1Lfps4wAFHjrjl3RDj/wULBvrvXfQtwNqcNa0L9ODCx9k79laz9ZtLJnHr3c1NQA5xQa74xf93jHIzDfpEBzfgpd/qiHgJ6d4pRtywmEOnvDnN3jBp9gVn/Z+8GxhfTlic242L9/o4RBP/ESMWt9hcHHBDvhxPbdPp4HP7GBoEy8HWIeVLBg3Z2mD4gzubQCcwTOnq7UheG5jBAePXmvdwrQO7t0Q1wk5Gp72RVdwSFLmOJ4eLTB9ot3z9u4r1Div5M3pOzDpUKhfUTzDKVg7sDAGvl8qBECXccmCLMasIwse0XVJlBJRiRdesG3QxiUmdAWO5zYZSakDDrIIXDRKoNaUOEqIjVmHd3ozJjjBSi69jLA/GFcJHCz86JnHK3m8mLAb+eDGs8Om7FQfP2AkIw29Dt3IiBd6K1F6xj94tCQ7PV14ce1AqA2ELW2A+FN8x28bWLx84QtfuPxdM1Zi81xyM7/6BJff2OTZjkwdBNGVZ0WCxhcVwfhkU7y7pxsyKTDwSX600HW4pwCwTtEqcUv4Nnf/gCAa1oBzb4OsKMErml4qK1Lr8VPcwemiY70Nj37bDJMz2dNHGwE8Lv6LN7oH09Xmh6Y10T7n4xc8Xs5m3Hq9dsqUPMbZwwamlcvo1FVxGi9gGjdWXroWP7by0B5ehCe6K1u6WR7hKK+1tvngkVPosTlf7iAm+5R3W+fwReyzV3YE00GR9WCLH308yEXs5doDnw6LFELGy7nwLn0+qWAVs8V8MpprrTE4woMfNPAMBv3kMxcNMPy5efzGW7Lq44tcyZls+g428NjhkjXiDW734FzLYzbJrj0nY3KQH96No7V76/MlvoY2+dkuHsKfb4Dnw/IKGrsPZsNwppflzb0GthjNt0+f9dKBBl6i01dU/HB1HJ/RPp9Xzu7193y9GiMdJXs44zM5Ft+p19UfPC6277P9cr9ca2+TN+U2OVoe71Bg478vc6K7c+7bM+SkeN3euvKO8eVBvu7PlnePKfcbA48/sMsfXF3hj1Y8Lq9gqoXI30HW4rF+19y7T+fNnTZY2uY8n7YMZg8cwxNs/eo9/zG28D0bA7N8B2e83LdrixHz7X3g5L5smi/ByyZ8hT7bX6KLD/baAyOxpKFjPZzsCY9YE7caGtlHHYaHYtk6eVLLNxw0efFMFmvBxKN8JJ6NW6Nuwpu6UK/+sZ4vWINPdRJ48+oprcMOX+h4kVT7VQ+Y94wWnXSwJ7fRpYa/cns2JZux/VIkOxSPYNU4enNkwGt1pGcN7/RZjKgVxQqe4GILvG18FVOt8Ryv4DS6sU79qJ6hP4cm8gY9g1Mfedl/uNV39OUQiEz4lTuNR5+e8KVO4zd0RTZ6d9BgHVi1sH2KvtmIzsH2pQq52MkzW/IDuOUyekVDHY63fqhxIISWQx60rMM/OLzSK5nAGycr/b733nsXzK9//evrx0f/NtMXv/jFi2d+sjolv+cuMrqXe/EFX/bX0+P+8wj5tblilW2thQOsvdJzPq4XKw58ii944OY7bMfGfCKZ8O2dyTO9ObRRb5PdIUs+gQf7Hv1bT2fG2AQNvmWMLvDmmR/Zq8UMPunbHs9HPFeHi/t8VxxYh382wAuc9JDOwPJF69gKLfZGHwz/1NiKbGjxC7116KujyGY9OHqzFm3P7fkvbvUkOdkOL/jQsku5EK/5QDAX4DT4XeUUfHtmF7qgH41+k4Mf8f1kOlA+P36EBj78xvQRwJ92KmcpUe9mJulwNnMVizkFp6vQ6kuWaHMyzuHiqPBwcsVJDqfvklSjo7de3+bpHiynhg9t/cMtERuDv0A1J6nBkWOuo7YRWSNoyOASLM21GRnrsEbv4uRksaZDkL5OAW9eEEnonD46EpF5YxIfWoKhsX4Nlzg6uLEGPD5tXGxgDZzxLDFITp7pBDwdlPDa8JLTuETGZvi2JnmTMVhwi2txg5WAyOoejy7JJXuQscSElvVk0tKtDdIcHBI8+SVxYx38sCW54e5zUHrmF+GlA74s+ZDZs80UD2TkE20w4NDXjGn8q8SXn62/8V3zJVsbMTld/LA4Qk/BQB48+/WFL2S3fLH4oB+facd382h7eXOApeBjE1+o0R89sR9Y9CRd8C5jJWgbnzEwXXyXzSre9HDZMG3E+NGCDye8FWzlgIqCZCnu0XevF8M1z3yqXy5sTvHNlmeDd/NEz3p4zbGfy330kr8+HOCWH/Ra796clxc6onO6wWO5KZr1y5t7sF3nmhNWzoO/4iB4cCd+sPhhpz7dBl+ODJ593J99X9yUP8iHtmJCIWM8v0Df+nDAZz3aLj6C5wqL1Ql+jKc3OBQe/J/f5nPJat5LhXl4FcTJ1FxrVrfRVPTiCT8ucRCteExPyaRfvcHVlY0877p0AHe6SoZ42bXu8yf0+tOKe/bK59ePjeUH2SH866/g8v179Bd39+D4E7uz+RaIdIbHaIczOvfwNRcfC5seFo/5e+M7Fh09/To4bR4d+5Xn4nl9sHGxX15y/9vf/vb6RVxeSod6zzXPtehZ23jrzBnXVm7PjaMtt7WQ81kIAAAgAElEQVRfeFkrX5aXypHxJOfK8+QLVzTjo974Pd7iyby9yV4Y7M51v/15fz6n24u5o6Uvw8Ftby/jX8Y2ZsB7bt3iaWz7hQs2G2SnxXnCg+Hja7f0g79w1LfXmMOneDRnXF1hTN4Jn2f3bOnFPx1YJ4f3XB3rWY7cr7nYVd7T2lP5kljt/7BlXH2jFkBLfYEPPx6Bhd/+31cq4UQvOfHJP9Uq7qtp4VaXmfMyWo2qfpMD899//OMfF999BUB2fGdP/JMtG9ABGdJROlsZ+St5+K68qVZAw1p0i5liS030jW984+W/T6SW8hKMRjWwetv+Yn06bT064cUPHZK9L7TpxjO9m7cXqnvZMr7VNPxb3VeN0lc6fbFCDjnXvAYPfGh3GKGWZwN7fC/LbGgdO9Ct9eyOb3yB9aKtHoaHfcE4PLE3qh/JitcOJeGrdqUn/tCejQd4yOJQKx9QT6PhYJvP4Zvd8Seear3gk60LPB7wypfwUt338Fg7N4YvPkOn/I1sdIJPY+rUDivhAGMfy+fUGd4f8A2OrszD4549zOOFDL1zkMPergZRF+nZGJ6+/FGjV6e0b4KFO/147od99OmVHtQ5xb4x92Sl+9675H1+gy/z9Ep2eNgUbYdSdMUWYpxs3os0+Ogf3xsrZMAvvZYv4K6mK3+IMTTYyBq46IFu8ePa1rgxPHXh1Vp6iS+6R0/vXZcP4p+fd+D+b8ifHz5WAx9+Y/rYJZ8OgCE5h5bzdp+TcjqG5+TNcXDGNS6ogjVuA+lFRqC4F8itzan04eCs61wF+7Xo1krmNorure0l1iaIB8/6gqMkBLbNjzwCXiC4FwwSLdg96AEvKYPrAMaYBCSoO7ixGZHRs0DvEERAkssYHUm8HcgU9AWjZ5e1dKYX9OTp8ASf+BH8+IHXmn5pF5Be7vsstwMJMnYgBUfPFRDphhzWSwzBRQ8P5sCgjT8XOHJJUvETH+k5PUjKTuF9wt/hiXv+IvFu0uZrYNF1MCN52xz4V18X0WmJDRw9SD500ubOp8Dzcz6RD/Op7ouB9SXr6MUl4dOJCw98M/9UIJGFHvzKgj+XROtv99s06ASc5A+W3cmdDt9///2rkOdH9EUGsOm2zaQChD3xmJ+7T8Z8Xt8XVG3++Io/PV+iB3z08srOxRYcxSUe0NM2VtFdmp7JULOOT9qcwJnTo0fn+jae+r4CalNK5+m9cbzHV/aNx6WfvVtXb7wvXvhTuqEn1+Yqa3p2b8OnP7nAlZ8vr+C83OYXeptmeKLRmmgYZ3/+w0/Cv/yBBRfdxUXX1soh1prrAMjBprlwtj7a+CUXuvrWgesKlg74LJh8TLx0SMO28Rh/HQjJc6fewCSXteit73YA1Fow8Imn1dHaInz6+K7PDvsVTYcmXk7gRQu++A/P6V/GtXSPH7wXV2vz5SPfDJ9++WtdxfbmsGCD73lxgs9XybJysDU+l+bJ2z158dSa+HmKr/SCbv7Q+vqVA579gkk+KceQQ25Y+nsv9uNXjrLP9yIQf2DKlekpHssnq+uVb+9bs/uKebjtfXK4A4JyJbi9wMmz8q0Xg5Om5+Dxufmt+9aYl1Phg0vcp4un+L8MeGvNP/VMTjDJG9zZn3A9h7/12af1PT81Du6kvbh2fuEW37nemtXLPncv5umw53TQ3tV6fu2++sG9HxnYPR6a57vLC/xyhHk5iE/0pyD5qLrDIYjaqzpEb79SU6BjvRqjmkVOhqf920ul+gKvW8uIR/mOD9qXyMB3wXkR9zJJD/YB43hAAw6HCJp5jVzwlbd7sWwsfYGlH7rAO1x69R8YV/WN+pMe5Ck84a8XensMnVXvkkG9BJ+9U1OfLR36ergdSNAPHvBML2RyANT/lY1d0PPiCj+6YNRuejlHTYpXtabmnhxg8SD+weLTWPVz9/SDHzmtP31Sr4ldl5oWP+UvtB3OqGftMfSgxkSrf3OSDzgQUk9bTw480Ut8dejB7/BGlvwJXXYHa673Er7EB9UN5HRgwTbqd/jRodds7Jk98ZxNwZXL82X+mc/Bx5f4I/7RsgYOvNiL+ZxnfKLnWY5nC2NsBg996r0PGGdnuPg4vdAp/tDw3gBez+/g6J2r+OjHe3joh+34DX6t9U7YF2TWa+og+PFBB8V9OqJ3sGDoJf7Nswfa6nK65Jt0zr7ok4Wv8AEw7GsfF+fW8St0i2008IOGuiZboQWeL+CHPfRd5S/y7Fj+jjc49NbC7Z+6oBO64fNizbM45E9sRr9ii9zP7dNr4DM/GMKSwGRczlaRlbFLLHot5+ZwgtWzDYNTutp0jG1Bb/3pWJ4lCDgUgtHWG5MEBU7jHG9fWvEtaeOVg+G/yzN4/FSkdcgBh7Eucgi4EqeAb9MVYL346wVW+NpIJRhjBSi5d13JpwMT+PHQ4YpxiYesBbV5hwUC37018dEhg+Qk+DpQ8jfKNiJJEj+SnKCkg2g33kGPuX5ZMtZGgK5EjEd0+xUp3H0xxOZtAL6Qgc8zXdODxNxBjs2rg5CSs+QGv1NwtpPYwLv87TS+4cMjXJ7JzVYSjDmf3dss6Jk+0JWg2oTWh0vKepdNvaRWUWXTrfihB/NwGbMRo0UOn9qiRQfm//rXv17j+MFfidK/FYTvdEjH0acneqY3OmF/erDWBiB5miOzVjzYKMB87Wtfu3r08n2blA37D3/4w+UHNkHxxMf0eLHxkscYXtiAHOzK34pjvXEx2IYHh7jDH7vBh4f4wEvFIZ6tLb8UlxWL7GXd5gb4g3dvzjr3XeHtmX/06621bHYPfnMY3bIfv2ljjI9yUPR3fA82rCNLdtG7euGmO3nCGFzBhXd792To0NDa/IueXOEH55cXuYs/gnMp4Ix3WJye+0IIPD+DG77kTD5xw3/AwacHG57o9+KOTjpInx0kWZPvuEeD7ug7GGuTi/waPB1MNd8hDV2evFifv3UfvfS+ciZDMqPX10DWe8Z3Osi+2a5161/urSMX/SdTsPlda9afLqEf2+JcmHRT3mg/tuzkZ/HhyR5AFjwt/uDgDL+xYLrXLz3wC+fgnn123D19aR1Krc2zy9KIhxe3/Bu/ZNPQt0aOAxf+1jQml4iH/ERuKpfIZe1l8LniOR30fBF9pLu9+WDKMY2V/9DpoEfeMh89/LeXgHu4vdyEPzzxtXSCCU8wcq08rt9/M+lCemvg7vXN60+Zd+4eXP6x69YOrTHvh4wObYtJ8+HQZ+PGd37HzvvlLXw7hn7484+d7/6cW7n2PnuTQ1s7gSMHXOKs5/Zy9jFWDBr3LL+pgeBSa/AJsL3Is205jR43bsSTZgyMWojfwy3Hw+NlsrpGXuej6UoNUI2kTrBH+AeJ8SVmwaJRLSN25GW08JLd0tHmx9U7uPYL8uKnrxbJbay9e/0bDvWW9wA/yOEDn/Yc+UE9SNb+T2bWOijyTiH+8K/WUrOmfzDqsXB6Vu/A04+n1riHy7y9nG2yl5ilC7UefqzVivVsTy6w+EBDLnbBRYf4J5cXaXjUeHIXXcGlFusfmu5rGPpQZ3vGg0Mg/iP/gjfm2Tr38h+b8Q148UMXaKmxyWDec/7cjy7m2Z8v6clpLXs6eCOLfwidLcmIF3x3WGFfYGt2DHc5kj7g4u98XW2MV/6KHp7ohwzkIhO7V0f3bpIvFzcd+LABfukXPnHh3aJ7tOnQmB4N/mseDXjIwYbeMzq8QZdvyAH8Cpy1dItn/Kgd3JNLixcxxS/tb/DmT/RJH3wCX/yPD9O/Z+8IYKrF6Ji/4LMa0Zwf/DT80Lsxes9ubIhnz2h0wFZOQcN9cUwWa+QJ/PERY8mE//CbJxN9Z2O8G4OP7cCwqXGHsnh5bq+mAZXV/73a0k+3ioE5HKN2eeZEJf51CPc5FGfZYp2D5WTdlxxyGn0bll4gCT5JxqVQRzt4gd+hhaCQ4DpJx6d5F+cLTs9RBQgHN2+tcQnHIQ/HNSbAOgzxLEFImnpwghZsa0ue8AgwwUsHglUSFtQKNXDGraMLia9NBV5zcBeocJG/AHQ4gne8hFfCofvwmOuQyHoy0+fO46sTZONwkZfc5lzwktEFjzHySzQd6DgIQFuiljBcPmGWXMEYx79e0jUnKbAnXOh6UeFTYPDKfg4ZvNQ6qMG7y7xEDAYdePkKWEnQ4QT+JZg2yDaQkluJTt8LXr5aMWW9hOWCDw0+QE+KJUlfIqOP9M5eCgaFr5bdySfxG8dvNmB7/kcHLpuNU3TzntHjM+DwIHmKSfToAK/8WIy4jLETHdMNuV3hZwubIT9id77ome7IB18FDJpo8bOSfRuOPh3CZb58YA0/wXt+vLqFN9jszRfwVn5Y++ym4x4sfZZDtqdzMK3Rkz28Zz5CB0w4FKJ0kE/kH8tX962zhg37QoZulg58/IRv8N9sdNIJ78rDx+ixdQq9bMDO5R/j8QCWT7CBeDDuuQLCeuPsnc/AQ//6+FLgWSsOxWl8dxhT/oRPPMC3cOERA+UPPfjoiBW5OT7i0Tzd0wUdwE2/ZOOPYsm64NPF6p38aLmsiZ/VL5mTAw7r4SY3WtkSL/womPXrfBVefhkPiyf6yRRcPIW3eb4l7s2zv/XJsroBB5e59LGy4qV4AGfOBWc2id/gyGEPKq9tLIov8/FnrWf96iE4OktXxZJCeH165ck28aIvdvCdnsCVc8pN9prGKnrZUdxp9jV+1FjFa/uimsK+bG31TfjA7hVNY3sfjDxNRj7ohaHaZWGjI77At7/0Z2Mnbc/a4rA2OayvXoCDPvrhoHUXgkcc4WuMrrVk6vkpuF0XbGP6/MR99nQvr3QInP+s3Vu3/tjY2YMxVtvneNo+nMvP0ouP9eV7Y9aDqQ9m+3gJ1+af4hLf9GsPtta4WFbz8B01VjoCqyZRj7FRa+2HmriSz+xHaqDm4S82+Jw8ak16iTY+xVh5L72i31cO7S18Gx/5RnsCP8a3msN8eb4coJfPxRoZ7Bfl8OSXx3rHiJ/q/w6z5QE1En8PXp0GH1h7FT32Eq+mUltZh3ax54fV9jW8wdl7TjJmk+rT8ONNzKEPn3oLDL3CodEFOckDln7orHqYzNZ0sABnNZtx83Ioe4GxnizkREdOZz86JD8Ya+wd5TX8yUH04Uup6tnqSrkeTbblX3imF7bDux7/6UH+rHYnI5pkgx9/cigc7M8+/JL/+BMhfqIGdoAEBzj7jHxAZ/kmvyebOpas4OBin3wXXX5tjoyerSGHNWRS4+KXbdCwlvz8AD49/aJnvv0KH3gkO79hF3LRPT24PJsjN97LBfCIX89g8AWufQcs2nRGv+a1YgGv3gfhLTbEJp6yR7GTbeBjTzjg56Nk42t6/mkMT+0zdGt/KBfjITvQITj6oFt+Yc4YHaILN3vjvxoTH/hmp+iSRV5C23uGr2XJSJfVX9Y7mHXA+dxeXQP/lS+GXoW9kqpEonEqjtKmyFncS16cm0O0AbXJtKFaLyDMN9a9Z5c1HA2u8MUDp3Vf3+fb4CR9TlzPkSUHAbHjAkeyERAuGw2ZBKmE5LJWci7ZSEht6AKhA5avf/3rFz3wXdbBIQD7jLJkKWiM9yJv3JgkjwZcTtTpV+ICG08dLuHdFywlR30bhXvySijhgkfQmlNckhmOdEIu9kNTYvHMBooRMG+++eaFC5+SoR4v8HoZ9W86wG1tB0wSjTG6QsthheJBkrNBSa7W4pPeJTn0JE3rbHZf+tKXXiZsL5uSGN68VPTVkV96yFMhrq8A4Wv8QiMPvfIbMrEVftItO0uGxhuzFq/wk7tiARwc9OAyT84Kes9tAL04sQnafB0vNiGfDpeMPUvyaOrB0y97WcPnky15jLuKRb1mk0SjwpFdsh+c8Jh34Y++86FwwbNxaU1xV+wVk/pimh2t2xck92uXYNAHS2brGzfm2TiY8sL28RePPVu3uWbnN9+ku/j2rPUFCRuzjfly28LywfJeRYX+KfrwW0O/fL/caU3ydq/QquAB3/jKkrz4ZWc4KxziM32B5eviDFxFA7x94WEt3yDTi9shCvn5IPrG4FzZrDVnTTBoWCe35UfxXo9feHvByA769A3WPbruyYFPuON995z0gj5+5JjWGnMvr6wcydPafEMfr6u/dIj3eAaXrvW+HpTDdp17cxr7F9fx11z21MPBVniuqA1nPR6Cy27xAsfCrd+Ip51PbmPlhGB6vhbcGlit+eKTv9I5fWSvk9/Wy+Pk4m9svHBg0m18ye/2eDTLQeUrP0TI18bl3PJzf2qBP1f7wcq3uSWajQXnWU5CT86LD3wa14KpNx5d/Mi7+w9CR+MpWmhXl6Brbc2aWrbo2bq1TzaKx4V7ieR2c6478XoOZvFnt/Zlfl1+yHbBtK5x9Hes58Y8L1/xu/Pn+pXpnFs+Foc19mBxi15/wmScz6T7/EHPV7Iv/2VvcmdPa92joybIt8CJaTYV++ExDq8XVj5mrf3UF+Rqwr7SNS7eq3X4Fni5thws54KTY9F+uP2gik4+7FABr/iWS/mmuIWrPIRH9+aNV8ehDbf58jS+wZJHnVueIbt78+bkvXREP/kJfGTwBTB9R9u6Yh2Pf//736/azbg1vqD2vPUcWdnSvK+KgscHXA4T4HL52rsvQfBaLOMt/4DPOvZS6+k9q6XovBoZPJrmHI7IV/SMV7mQ3vACH326wJqXN9lXLqy2QgdOhy5oaHRovySffdD7ipd6NsaLr4rUrGxpv8hW1vQeg45aGy1rqt3oR1MXuGd37wpoevnPP9W58OOPn5EHDvrwTCb1PFuxKTnFDx2o/3tfss4ebh5N4/jhU2zUAVrvGNaTA09k8e7WwRL+HETgO983Tzd0h67Gh8Gpk9gdbjjpzxjfFUPw4om90FVHkxGv9GINWh3WobW1QgdH8VNcgaE7+uH78VrM8Un80hv56cIYmxmzHm9koD+27p2NDObBgWc36/mk3qWewyt67O9dBixbGsOva/MkvRVv9XxBLHj3Kz7J6MAaT8b5P1y9E9HJc/vPNPC5PRgiFqcoUQoijtsGYX4DyCEAmBqH41zreDkjp+ac+u7BcTTOXgLrvgMj/BgT5CXVEpLAVqjYaCQTzuwS2IpSazi0l37BgVYHAuYKMPeNS/aC0/Pbb799rTcfTMmGTuCTjCS8DnsKnAKxZElXcIKXlOitJGgNnumDviR2dL71rW9dyQ/vkhq7sEe0yIpfuMDBX/KTXARtMCVAtCQPdBQWdNDhU4dBkjgYeAW8Z4dGEh76neST0QsZfm0O2RNOdCWPNim82QStYTM6RoO/KI7IpHDAJ1nYGP/WgPOc3Sua6KPNXoLUjCko8ElW8+zTc/5J/vwFTMmeXc3RRbFgLdqe0XG5Jy8dWWMTcOFfay1YcObyVWP00AsseMWGNcVBf9OrALC2olBydgVLXg0cPuAkY0Up//Vsg3Hv4k/6eC1G9ejDyYZ8sHisL3bBkkOcKlDw09VmAjbc6T396Y11kSFY+mozda+VU8AUJ+5bd8LtOFyt1/PZctrigq+4XXrJrHfR7eLrvj81Y+c27nIe3L4UpCsXGBfbmgs3XMluDJ/irsOgYPPTZNPvv1+U/fuFvxcLcC7jfCr+0kd48SH2FDr5XgdCcli+Hj/pDozcK8fhIV2lVzmhcTjck7MDoXDHT+vgx3MHUvmW+XAubDazzrX27D6/av5ypsd2+sX5vDjDA0Zxls7Wx5Zmules0kE2WRg4+Qldwpdsw+K/3Z5rTW58dY/vbRt/xpNT31yHfPxVC8fS7D478rfVkfnww0seeaJ14TS3+dW4fG7PaQ+wjm7L/+Ud69o3ytXwoQE+e5R75Dp7iTX938Wif+oufPEmh3b1P0II75nbrIWvnF0O3oOke3agL+PpbfW3c+fa1lwLp33a8WzW4ThUp+2z39mD3TG0rV157o2hJd+I3/ZCe8w2eO2Xair3/CPbBoeWMXj4iZdU+ZHfqSeqJfGQnPCA18yjay6Y5dc4XFrr2ZcvaXygXGodvOpULb9rH5bT4LAGTv7Rn+5Yqx6KX/d+mfdvH9rTjYs1MsKnqa3gao36DR71MrgOlMSUcXD+rRnz6knP9v6+XAKP/2TeOseYOXkcP+g2BnfxSBf4yibFSLqiOzK7rFEvqo8d/uAzeXy58nB7aRa35CUbfs1rfmh0KJC8dNmfgMrNWnnDOrpU27ZnwaMuJD9fhJ8eylX8yrO1cKPFF9Gz/zvEAEMfy6fDHi/UxvGtplTzW2fPtO+p4diXvPQBho8bx5OaHn9g1AN+UKUn+wMe6IQcHfTgHY9w0QG70IFYYAf+jdcOtYyzUTzCpS4XL3hCi43hc48XdWIHL+LLc184oc+f9Opp70Jo8FfyqBHoAD1zaIE1Lw7Q7OsUY/inQzyqTeyf+b8xOiS7Wh7P6n9y4oHPws9XwZC5/de6Ytn+ha/8ATwdoY0XuQkPnsuFl1PdWn6PV41tjbnYxlq+bZ1n9OmS/Gxoju7oFn3P5uCDK38nEx75B1g662MCcla3W4tWFxnj55HlC6/Gll1sIRe0X6HjshZtegTLlnxTDDlEe27/mQY+1wdDAk0SdEkqHI1DcCqOxrk5hE2OAxnLuepzQD2nLLg8N2YcfM7nUICztblyOIGBjzY4yW1hOqwpQCQtASUhSDz+FEkAcuQ2YcmGTOQQUOEQmGhJMDaK1sHTwY2gIG9rBCwc/SojAZELDrpx+TeC4MADPGSQJPFs3EaoCSx4wfg6Cb/wwImmeTyxicRGj/Rqs4DHmMJdcn64bZzW4Je+bAbwSkQOyQpyv6DBKZHCS3/WuSResvW/VFccSOJ05LNBmxP8aNK5DdyGSQbr0bMeHHgbHL0bK8naUGxwYNnbv59DfyXFfIEfxLN7iRweYxoZs437fJdewG/yqygoaVtf4iwxogvO88c1ND6qsRF5+HDJvS8/StLRLfY8S7ZkYpc2LXyyi/nwJYcDOJsEHRujGzKwD1/nT+gVB8bYyVhFH/z8wzN/5Rv5gx5NPTg81NOXDY2Punclkx6NbEBXChI+qcU/GPjKNekm2+y8sXubXGuChf/MOea0pRVMuan1YCp66SsfCw6eaCruOhCio6V7D2/0k3fh6UY+yp7w4QNMuTS99CIuH4sJfPIBOMRWhxSLY+WFb3ErhuUzPlvhZL5DodVDfqPX0Dy/9AkGv168wg0/njrsEdNwn7bwnIwdjqVP/WmrlWd9JztdjD629ZfsgCf6S1/hiObaDZpwKDTx50qf8QdGIwd7nHKEE4x4oGv2pKN0u7Q2lnb8HswJ6xk9ByHstS0+GwNbHHffXHpfvdIB++b/O7f3cOAhnS7d8oH5bZuLwbjkty65yt4jH/dliPVemMQtOE0OA+vqwCFc5sO99+lALnbJoefXQWBau/flQ/tQufc8SFqa6SI9nM8Luzwa7/m6mQZH+LYPZPV/rvUc7hM+G2Xbswcf7u2Da74DdfEv55jf/UM8oFVs40d89Cu6lyLNmvyKb4Ezxv4ro3HzxvUO3t2rb/gHO6Un6+4dGIUfXBfa7tkcPvGQ35pzD7casNxgzJ5u3zVGRj6rljKHvnjqhxo1Jfz2GrD8GB31Mxp0mP74qtpKs5eIDfJ1+KKvplQv9HJazaQmizac6JFr7W4sXclv5vENh/gyL49ZA1c2AWusH7nwgTf8Vj+oaemrAzD3YPBVXOkfbvWucTTJbB90n084lMGL2l+jZ7jkC7lerUV/cMGjx6uXYzBq2N6DyEYGTU8OMsBXTR9te2k02dy8ep4d4JV/HWDY++wJ/K8/2XKfb6qt7KvWV/vCjU9+QWd+3HVwwT/4k/V4pX+HCO0p/IOds0U/vFaXG2cvMNbDR350+JB3F/GGHz5B3+D11SDkYzN2+stf/nLpDi680wcevDuhzV/QtgZN+gCvniWPNXICvtSkHULRiTG88SP8JQucbIOWd5sOmsCIdfjY1jp6YT9wfUVkHBw+iik4zZe7jPsSDB18NG4NWehKTPeDM3+Bgw3pxn5kHX2qm8nDjnTLf4obfkJ3evxbQzfogTcG1jOe2AJufBWr/Mhlvha/+XF5i707sCoW8Mef6JqNwbIZffzxj3+8dPXc/nMNfK4PhkrqHNA9R+Z4OZVkJIl3qglO6x+cBufieILCfY6pV1BxWBe8mkDoIKgEr2/TwAdnFxzGXRKxALR5KEYlGzjaAB3ucHJwLkGqBwNPl8RDJknHGHzuJTqy23QEu0AQiC5rNIEiOMzD4SUHnE/cJSP4HO4Yc8ACpyCDQ3KgAxsLOBsDecnkcAdtGzcd0jf9lgDMJ6dEQqd0QueCFh/xj5b//Sd4Vwc8bKGggJd+0bDZ0A+9+WKFbR3qdHhjI6NrCRwdCYGuHH5JGObRs9GSyebB/u7974XRkXSyIR6zrUKncf5QcmT3xvXg8anf5tnFDnr8fR4a/vkd27AfGfT0RH/81kV+fNMXfwLnnh+Z42d0DJZsJX8y8qmKSn5T0QiGXdGzBox5m0cv5miYw1+ND7rYis/wA3Tho99iHHzr030wele546ThubE2qXKFvo0unoKJz+aX73SAX1c5Jn7DQb9dT8H15QsfDrb4Qydc4MyzTfSCi27yxGvj4NKPMYUNu8gvcHb4Glyw8PQ1Cf8Ai3Zyyxkd0FjTXMWB9eyJlt46RQ6/UQAYS5fZBB5wjS/vdGCNPBEv4JYe/HiNnvVkoDdrTjtkXzAduIFbfeHNSxQ5FkcFUTZaH+l+59zTPf7x51o65lcWdMsvHZiV69bXNv/Iu8Ek2+IEC6f9gLxnfCyuYIPxnDwbZ80vXHiMnXly18K368uvxqK3dPHNhjtGF7VT7/CdNtixpcF3+OU2ecgeIT+Zo7O+FICnZm/0Yo/v/mQoueuT6dSXeTTkUPnQYVpr0A9+8axey4Fw2Ou8nO58dFcuY+nqqfnl09pwprN768yl3+hFJxz7HEy0wr02C6p/QL8AACAASURBVOacW/xr93NcTJTvTl8oZ4rrctDyCT6Yc23P4ItVY8lSjBbjyxcYfmxO7SZGvXzZj/kBPMbA+VLRvcbGfXXmWQ6Kx2pmdQAbmLOef7QPq6PQqK6Ww7280pF1YNV65afm4SGPHwjxgmY1b3/2hYZ9Xw1hDn3+qJZLP/gqv8JNfrg7cEpn9enYoV6H/X1NUs3RgRtcNTyiD5bO1LLqXrTlUjTFsTlw4OVNuiG7pv5Tzxeb6mvrNLp2PdzqdvPkxaOXaXzAhz91PprVtnQDr3nr8dc8HHzAuP0ILfTlFXpw8MRWmmd6xy8d258cUBR77OlwCH9yCrp97dMLOP7IjiYa1nQo4CBBvmOX3pnIpU63vzqAMk9n6gd5mYxqOT5EpuoDfDhEpxv+gSY4DQxYtT75y8H5tBghI3h88zF8k5M96R5f8nJ/LQHuT3/606Vj+mNPuPHnQCia4s4Y3aBrjizG+IYxNRL8ZO1P7fgh3B0A0Qc945XePPejejxbj1fjGtpqEuPr8+DhJx985sQjnWXbfBBfYOkbHjyBSSf8lS/xtw4f6SK7W0+mfLXDH/YgA39uLB77wZie6LSvmvJ5cMXsJeit4RcfeHO1p7lnW/Pk8O7Kv9jYuMNLvPHNDmnD+dy/ugY+1wdDxGJ8V4dCvexxmDYiDsdROQsn4ZDmN5iayzkFkVNgvbE2JfgFjSDOSdGXsOIFjHu95OcefnQ7sLGpOhASGNZKvJK0Z4lBonZIUZGJlvXGJC/ySoRgJSD4O8hBQ1Cib15wS1RkefF4IOJLH3ASl4Bx9Y84C1xrJDk4BJX1EpIe7xIFnvHXxlcwm8MfGcA67JFIjEkUxjsMogP6CdbGZY1EazMw3qbpEMczvumgoui99967DojA0xO+6ImsxumGDa179913r95aPkE+OOndWjIY09hfAsqueryUlPIvsO5Ltq8ebv/7lWQkBx20odIJmfN9sWCOP7GTjYIurcuWxROd2hiKRxtqv7B0aNeBBrvzEa1Nnt+wD/uhXxyCcc+WzYNdW7m3LtvoFQL8u6+G4CRbm87CPmUN+oh+m5he2w1t/WHH9wABf/c2RDD0qlih2zb4chZ8wXQ4wx6b09yLRevhYqtyWXDy4eJMZrg7sLEWfWPi34WmMXwoBvFpYy628CK3sGM+kpzGFTR8xIW/9BAMvtCDs0NG9NgOzngytjJ4AWouvcGJR7j6Rc+arvRknhwKlXDSn3G6QCu/b16voEUTnztPN+nWiwS+6SWeT92vj5jDd/jIhQe80Ge8Z698Mhw9m1+f3nG8LY/BhrPnZPAMvli+Fysn/YU57+HV4KsVM/EWzXN+x63pq6J4W32EK96svYd/5+N19bWynePxYzwZ9PKK/KSIt7/bP1z4POUGv18GJUs9GqfuPcPX/upAydjuUfBGr/sda84eKP/SpfVacN1fg4+NrNamy+Q2bVwz1viOtdbc6rLns4+m8bVnuNd20a0P/9o8fGtTYycecVeeKr+CO/1HrN5r4Fy7ZvXlPhnqF0+8093qCYzcUD639rzwq/6TK3qp5CfgjMmPcmw8PNxqWuMaGD4Av/vyNH2AU2O1Z8r56i01p7oRzf4kBo/tP3oywEeeeHEvf9bgtU4zJ7+qG9Sf9mtjvvrGhzF4XOGXa8mOFnncy5t4MpY9qnGqjZNb36GKeBJfcMu/ZIVTbSyu8YUPPOJLjY4OGuLJnoA2eewtL241KbreC8yrUdQ91rrkCLL4odW8wyj67Gt9/KuhwaEvXs1Xt+Hdy7DDGAdG4PEPHl24yRA8XTkMAufFH049HOzUn5zRoXrY1z5wsD885R245S48s4mer/RljBoQr3yEbshMd+7hpSO6lifdO2zwTkLvaHSYwE/wakwNUl6lOzzwv+oWMHDYL+Exbz3dsKEDSu9M5LTne1fw1RBezTuIgs9BqDV4ogc42RXv7Bp/cKDDVh004gUu+iZvh1n8B++e+Qu86OCVPTpQEnd0Vs3ElmDRMc5P4acTPKHXQQtfMO7S1EDq334cozO4rGd393TEtupj/tr+BRYvZKU3cPyuA1a2oCs4xBNbuzy7qmXkITLyHbagF3rEg375vZi+Nfapxwee4pPP8Q3+Rm/yULUYH4OPP/35z3++3jue2+vRwP3d7vXgfi1YOIpg4VgckQNyhjYfcxyd01aQW5MDbg+H9Tmyvs3Xv7lh3lrO3K8m8O/LaH9CZkwQSQactsMK/yiyIHQJLDxzaHQlCuMSRRuyIJSI8O/PzTi7zUcveAWThBUedNFDV5AIZvgFogMehz+Sh8RvrBNnwWTeGmP4hVuSwQu+rDGOD3oRcOjapMmKrt7/wnyLAX8Tbh1YX+x0aADHBx98cNnPfP8HNIEviMnQOsUq3CWc3/3ud5fe8UcOcP7Xle7bANK7NWiUYOAnGzg49Oz6Ku1V170Krf/WGvpw0du9JmbInV/QpSsfcE/36demphlX6PA1drOB8HUbMRg2NAaOvWx8nr1Qu2xUfAZ9MHq2FKfWsqV1xWRymC8f4MOaLnIEl1+UP6JhbXnC+p6TOx0pJGy65Rf6acNzwIB3cVEh2vrk8axwpB8Fg7VdwXqmK7FHP2QzFx09fAo6cYZWOSyY5DqfyVXRQN/xSw6bb4cjnuEItoOf6ATLXmDxE44ONsCWZ08+yGdtB17poIMwz/Cd6xXnHSLRMxr90o9Hz61dfdL5HjbBzV70V1EZrbVpuPewaW1B7niGv7mVNz74EPhkQDsdnYdhrckntw/PPX9Nj9l//c49+5/X4rZO8+dF5PKDgMKav5dbi4Xtw2ms2Nn5aO/8KdvJV+vBaX0N0MsFWbUTT/DLk7HgkyO4pbO4vPTY6xeutfKIHFQ+lN+8WHg2B67+3v5xyhpMem6+usPejJcTr2dr7l3hwEd89hXJSSfYdJFejZO/fNe8ce2UY9cvTOtPnQdjfO/DvfiW9vKbzZo/n8/xk4d8vthZH9j7k5fwBHPG7MLDnXzGT30au8c3nK3LR8AalzusWV0Ea64DI/swX7Vfy2f8FC55zj6ktqzxkw43HDREC/+95FUzo+GSx9SH8j6+opW/osUH7SXVYcYcFjigwHN/TtaXO/xdfldLwAOvZy/2aP4/e/eyY8lR9W3cl1JzZGMOZgoTxCUzBAHGJwECibkv5du/TT3Nn9Cu1guf3V22M6R0ZkasWOdYsWLtrLZ81zx84cc9/Ymv6YWM7U/g4QFvXNzHazEYf/DgzRWv1qAvFMRCfNGj3NXc1rj8F58avOAqFtl7NPjAZ0f//pBD/NOt6ASX/Ea+L0cHY5xe8I3fCk1wi4X+Lcx0gyd48JqcaHkH7ysgxZz4pSN+EN1yK3zi1+Ge3thW/ubMIFesaGj/ZgM44PdjL9tpzjPmk58/0YezDR7owJg5nYnYVA6DB7yzD3jjbGO+8xC+5EP8wF7rLMW3jTmr0ZFiDD/FQ+eGCjX4cXaAn+/lT+TUnMH4B53TJd/jnwpAeEgeMNaM3IW/4LM/KTNPH/xkMkZX+Ja34AmsO3+CC35j7EMG+y0cdIcmftZv6cE7GPZpLYNlA/PwXX8x4S7krRlnB7qFCxyb0BudkIs+8c4OaPATekt36Duf2vfM0Z9++Zz1QWbnuwqF9GweGbfFX+vOHY8KQmzc/gUPH4Sbvvg8PukXTTa/2jengVdfGMqZLQQXh9jk3Thn4mDuBXfOr3FEfW0SnLjg2YLj/H3+ap6F4OJwNkhz9KPNETmu8e6/+tWv7s8cFx6Bz7OFg5bF7t3CRdMmBJdApUIv0Am+goY+BzSBoz/jgkdzmAEniEneyeVZYLbJWDC+ulGcITdaFrcFCg5O/KADFl+CA94cetPXV199de/Do7tFmo7pQoGG/MZsTumBjnzdk276XD3d/f73v78HFDIK4qtHOtEE8mDAueDLvgIFfC7PV/vmNUDX2TfsbMUfBGsbSYEcrDG+xiYlFDYWfsuefUnBX803p3UGp/kdgtr0WqPRAW9DtbY8g88H4NXM1fhFVzDw6Cuh3g3KM3qa53Osd4l1G2gJpQO0+EHG4hJ+Wn+e8WWdgav4or+41IYJBv6KBsGEI7zWSkW0EvRg4EqHyeJekafPnqOZzpLRnZzs5rLxFzujv3g9i1Pkf6k4E09ip4TJVWFnZUx/JSG9V6DZL6f6cogewrX7QjLTZTDg6EESU7FnZfOM10fFptWvZ8k1fBXHTv2vfvEimQeLLhtnowpLwYNdXHenvLXiLz91ZS97Arxw8oVkWHtmL3sGHyuWdl+/b150oh39cOkPJjzL48qjINEv9OY3L5h/Sfif/10Z2aNficlJRnjWd/Ox+F+ZFtfS7zmZVvaFizPyiWtiir3P3kr3uyeBMe5a/YRv9ZZu0597ffCUR9hHO9StrtN3Y931F6/7Qknf4lia6Sf66e4/LfLvt+D27jk8j+bv+AmHt2yZftaOL/U94i/cj/wg+IVB94wbpz+E67znv+vrb/Pp5FhZ4wlueZwx/PWnhp7pp9gb7+7hcwdTHFzfzUeyP1nNRcda0tZf+Qg8LnNc9tpisn65ox/8HGaN2+cd2PpiR195Nlqe7Q1o8st8Ux/aeLKe+jonmdEqrokf5FKMWH3Ti/hQIaaDaDE0Pa3O9ZHJ3tAPyn2tRB/o0AMYfLkrYJRLGJMf9YUgeRQoWvNwKOywoZyfPjR0w2uOnNwcLfv0RRG6cv1y34o7eAtGHpbe0JeL02X0uis4KbwoGMi37IHw9eUT+4AxBs6e0tmFTeRd+EbLj8fm0bn45ExBTnk6HHJ3+MnnvR/D/ZMWcDvM4wMNfkQGMqVnNnc20Ef+bOUvCDwrlMCPPzzJJdDOJ8xB1xjdszEeOlfRNzld8gmy8x3nNs/2ae3pVuRhn/IlPNMxenyMnhVf6cdzBQt3ePBRjsaG6JsPXzbl2+Qgj/yJ7HTthyJ7OR7LFTyTxf5DVvu4Z7kXX6e/1qxxPKBJ53QJD53kE30phs++qtKHH3ttsaQ81zve5T1gyFBMUNzCA39gT/3e98JDNroreBqeXHyHHlon+OVH/AFPYk4FKHQqaDrzts5P3Nf7/6aB70RhiNE5YsGdEwnW+jhHiSLnM1YApxJzOChn5eQcywJzWSw9c3ybThtnzmnRcni0OKnFIWhEC5yFbIxjg7W40bWZ6XMYsLAFVXd/eoV/vFvgApBFrDiDJ4vcwQctzwIKWF/ytCn5lcI8C8RigkfQIrvP6tCx6AXSAoTPKOGhB/0lBGT7wx/+cKdFrj4fbNPCS3ptMycvWdF1R8+Y+bX9u242ABMs3ZAlPVYYgBc/rqu9Pw2wPX/obv2wFZuxuX4+bvPr4M1vbJoufskf+H+bCn+wNtoIsnNrCxw6bSDoKXjagFvT+Ul3/Li0fEmCZMO2mUsK0ANjrbrAodHV3NU2mOD6Okcis4UYehA/2gjT0eK2keonW7GoeBQcOuJMMB1UwHkOTjIlPqRveINdmvHtvnOC33iZ7G3iEqTkBHfSgDO+2FPMAZ9M6SJ+wqEYXcLR/GwNtoJN8ZmexVWy8pnFC7ZCgf7G4E3HxUM6za8UppKNHpMZjy5xDiybNueUh/z8AX/N775+lZ760sVayL5rK89gz7n1rU/mv/oqjIU3nTZvcW4fHNbD4vJcA6sFs+tm54AB2594NXbiCt8bAi88LF/Rbt3Z39I3fZ1tdeXZQYWtT152Xny5kzE7BBM/xuxZ9lDxzcHGIZHejdmbwRaf2lONJcfi3Gfzil3FRO/2QHtlP6yEy9xwotP84pt5LnmM1rh7V/3J1z2+9k4nxrX4805nxvSJw+6aPutldau/OY98YW13R/KWdsoBdHFGJxThPmGaJwfp8Jb9dz3u/MVxrtnw5ZunTOkQHJ8WazR6M8c7/5Yntidaz80Tw+FMz57Nc9G//U6fOVp66pn/uMQ8+2L88SG+rdAA1/owXMVWd/HUfqrBg1/rUsNzsHB4JhOcYjhceFdEqN88axTvzfGlBbzewfNlh0E5AFroiP3y5Ari7Q/pI9vAf9p9x/BVPKYvY+kcXbzSLZj0iS9j8iJy2Pu848F8cPjGp/Ur94gvc/Ulb/yxCzrsoKgQjtb80+2s0bpWxKqfbIo7bAcnHHIu8+k1e2ZT/XTGhuagVzHGmDxFcYzt0JS3KQCBMWYPti8WB5wf+LJipkKMedaTOfgxHz5fQOHFGDh2JKcY5e5M5MdxxQb6hw/PFeD10bO4K68Ue901/Yoa9ErndIImvdMZWTt/kRE9Mdy5CF65Dpnwhy/z7Wfkla+I7XTB1vINuQPewKHhvb/+wB+4ijXwauSSJ/Ejeuff5hrHo356wYMiD5nIgL98x5q15sCXj5Ajm+OHzdlIM0+DuzMZu4GjB7qyn5Xn8At8O0+W95CFbvGEH7am9x3HC73RDX+iG/Pw3h0v1qzY1jq6M/fc8IQ3+rPfwSUO6COf86o+fs+vsjcd+lLRGjB2tW9WA9+JwhCROQkn4zAckCNqFqfA3ELifG0snvVbjP26Z0w/h+S8BTp3CyTcgo2FCL9FCNaisKg44zZzq6IKpD49beHhs08c8QKHcbiNWfAWF+fWL5A5HG31FL+ago/AWEBwd0CAN377hSI5VPbxQg540UHX4ksHYOHCk7s54MhccHKPD/J6LgClCwf4Fn92gq+DP/z6r/bd0ABb8auXmnWl8ROB3ybE120gBWubisBuE5IM8Ff+oPEVcPyvK//gm5q7JMR8ayI4OPiW9/jI98Kbn4Yz/12fXjo9R9c6EFv6UmXnSR7Q1Wf9FX/c4VnYxha2OWBdJ8xurGiRvwOyDTlce49mOG3IHXx2w05OtoDXJVnqS8JTnnhzOGBLsB0O4F1ewXqPZ4lOf3om9gW7NCQ+fUkDb78AV/wxh8xwGocTLvKeusAjefji0oIz+uxWgrV8rI6SK/xoZdedszZtPP3H76mftVmw2STf612RA+/5rgRNLKevxQtnMPeF89wWv+fWyNLZdVF/fcG3lsK3NJZu8PHvfem+FP/PApNEtwIofT+SbWXIPvH3CD6elp/4gyve3UueJcv98iyHEIMcVB0yau11HbJ7dxffogveu/74DLY9cv/cCyxewKT/cHZv38dT/Ae78uwzPsRNcOs36EVTP//CV7DmBb/xmiytKc/moZes5rWePNeyw9n3tndjyRKc9/S5ut7xpbnxhTwvrcfFefoXfMb19xx8tLpXxBV/7CXZJ9rxTKdk0e9ARmfijznZxVrQ4GAb8OJ884zDZ2+03+pHt3nNLd+D16HdoYsNvWcrNMzDg7grrtbg7DLPIVsxxRzwHZTlxfkoPvEkhuGLvA7faJIFrIOgA721ZJ58Gh0y4cueIAd2kK0vPeLNM/u6o4EX/JEDb81xoEfXOxgNfIdvB8/sqw8vHeDhoTMyW3fgyAKGDvArRrcO6Joc9ld7l34NPFnJbk55Nxz9G0AKJWTFm4Znc8ylR2eGvvyx3+AHPB41d3PQR8c9XzL+dCt84I+++yopvW2hp8K4M1I/trElfOj7Qso8PHmnc/u6eNaZSJGoXI7dFTLwK9ZX1LJvy3PYz3h5BFrmyC/x0vmPz5CXb+m3L9Ixe/NBz+TV8MSWYPgy/loX4jm4Lv/DG3ls/lD+AAecfIavtrbwxb5w8xN46Yk89NGaMJesZObL4cePfWbjjCII+HxdTMhf+RAduJsDDv1iRjZ0xyefIoP5cnJNP3mdC/Hn/FYhCKxCllyPL4MjK/nRwK+9kX+B2R/vyN6aXHnQdmmN8z37PJ3AhV93voofsc9ZlR7JZx65v77lefjnW1f75jXwnSkMEd0iKkB654CCgYStAChAb+DjiH1maowjuTg2J2/TyGFtkBVIOK3FzSHRDa+AkfPXL8hYIC7/lg68HBgOf5oVXxWa8G+uxS+YmgevcQvO3L7iSVY4jYG1ccBtQbf4wudOHncLrEVWf7Q3EK7OjBfwCpzpF5znFjHYbdE9+6/375cG+FxJV8VNPtHGZaO0wVt71o+NV3C34fJda6w/J8y38qk2kPya5oxZQzYQhzN+2JryzE+LDfmgd/M0z/m/8d24fKZsI9SMSYhstNZim+HC/1+el8bK01yb6/Zb22JS9OhW8kB/NkfwZOyCZzdgz+GTJFVAgaexhSGvJGbpNX5XxLMu9OEDvuAlXI/kA1sxqkIP+sWSnQOOjiu8xWdfOKWn7CGJgjP5Vx9gJAp0VdIGLpjVOTrRKqnKFubk0+lFX/PpRPJMF5skrV7BprvTRus3eN7W+yO9ho+MFUv0LT7PJ87w628M3/zdvrM+eMKCb/20djbmhzM481t/u8+kj5X1XOfLn7l9yZHcjb9NxoVdmicvy+PylKwdkuQIEnI+J7+whxqL93Qi9khQtyAOZmNacar+bFEM68sgOMHGS/f2XO+e5Qbip2Rfi1Y8bcxr3L1cILhsd+ZMzenPmcKXvuIfHq1fbel9ZTWmz5rKhtaK5h3ebHXivAM9w/XsHu/dg9n50ere/L4yEdd3XedX4D0vT7u2o9G4Oz7q745ecUKs6AuH+LHuastruoqfYNJZ/rCyh6s5dMrOYOSVbGjNgzOmnw+hm9+wmfEu9hKfn25Fg+KlMXsU2PJmd7i7vFu7YNExP5r6xC65q4OpwgY+Hfr4cYUgMsNnngs9PJgPNznxT65kph/7Gfz2UPsl+YrznvGjyAAWzuajlz09kwGsPvSt+fTS14H6FWeMVfjCv0M134IDzfQMH5rklrv0p2B+MHG+ID9eK47QDRj68Szn7x0ufRr9glF4wAuf828PwUdOMUIcU6igU/LjK32bAxd5yKsfPNkqTFkz8JvvoK5AIx6KeWibx1foEy7nMWPk1czHuz9j1p8O+Cb5xVh5Yj+E4wXveINDYYn8aGtsUG5Dd/QvvxR/82k8KB6yBXzsgWfrUI6gsVX/REZFIvqSo8pVFeTg99cdbCtvhd85j47waJ7ciO3IgC+waFaUSr9oVjzin3RMd/hA1zz42Dfc/KFclP695/N3IW4t3zaOj1o+DTeZ3c2nG7oEq4/+yJwfVQgiA3nwvzHAWsAz/GwEpz48u/S748tzF77gJJ8xzXz8yNn0u1pzdGzvpRe+wQ/QxQ96CqJf3/Khq307GvhOFYaooOTmv1EHZ7M4OKl7jpsj5qgCuSIO57NoSnIEFnM4ZIsZ/TZJlV5zOLN5f/zjH+9OD585ApTFZa6AxaELqg48nN8lEMFhHnh8o+Fqk9hA3sa38pjrMqeF+N/oKlhy/C+NTq/2/deANSFw2+wqGNj0bTQ2PptNvm7TlBx0qJVcSGKsDevEumgNtqG00bhrbRx759+72Sye4Jq7FmnzatNCEx0y+exXjGhttea7w7N0el6+e45mdKLbBupdQishsDmWaPfrFD5s+Pqjv3MXb79KV8AJbjfovhACw0bpON3Hb2tY0sOO7Esf8SAJlNTtPDENLLzwn7EpPshG1j5BBnvKlo4lReJjflMcXt8gU3qDx1WsTQcSVzrux4ASHeP4cuGrL5RKslbXeAJDxnSxOjaePvIJ4/HjHt+Nu++8eOkeTX4Q7+lqcZz2M2/jMFgJerqEiwzNs4aWtvlsTP/tkW+L67smk+eRLHdhn9vy3PO5xsPVulm9GFueTv4bC2e46ge/MO214pG91v5pvPiyeNpj7cUu+7++4pFneEr4HYpOXhffxrHw4HdxRssPV/HVvHThXYtnz80jk4JgsCv7vfPW6gsP+ifcI7uRr7Z2YLf+Md14ctfvDhaNfHlxoLu0Gktvq79HPJ19+LCuNfS6wL30vrLsMxzhP3nsCwf7IrwrU7Sbby45xAWtezoPDh596S7ZF//aqfHVdUURfb7SKD+kf3opTukXG3wNgx9+0wEOrH28vNch2WGfn4P77W9/++b/xKtPXEYPHrETX4oP7uncM7h4BtdXC+bC698QwbPYWy6MR8UFe4P46IuFYmx6cRf3jFega12iaay9Ff3iNb23dtGFV14AH/7xIE9378tBzwoo9FE8IIv29PxVDpxkctBFr4IOHOmdT5CZnp0JvJsjdrMhW3l3maPxbcUfdMwNn3HzHaIdrH2tg2/6ILc9m0zsCjYfohdFE/ziVfEGDeNyAu/gXQ72+u3DfRmDN7zgX4HJ1T+ijCY4/JhLV3DTsYKF/NHduyIQXBV99IFHS58/KVTI6H9zr/gj/yAfOPrHQ7kO3fAXspOvr0HB0J8iJR3zJTjwSF5NLos+ObR8xT4Jhq74lrMe/+Wf5EPPWHGO7OaiZUzOzEb0go/yIbyydfDFigqv9LLxzDy24n981trU2BcvFfzs/fkY/ujAu+e+kKIfV3mSwltnW4WgvnSi73KhzUuiu/FJH3uggd5exvhQPp1v0ZsCm3XPJ+gQTf7tnb6v9u1o4DtXGPpf1MCBODbHkyhZBBYn59xmnKO756gc3iLN8YM3zlHhtljBVFCy+CxUl8VrLHo5v3ngBQN3MP9Ng8fCudqlgXepAcG+rzcUDGyoEgX9NlSbsM2pz34lUnuolXDyeb/A2JS0Cgmb0FmH3l0ldtbwuW6tg9ar9QwejE1d8WLbrvfgJJfw93WQ9VqiDMYc+Ja3NrX6zo2uedEQO2y05GzDlUzQC33pQyM6Pe+dPla+niVJ+K2wtHpDP14UR9itzX7pwSWeuTxLghVu6KS41z39R18/m/ZLqXF9y4cDKVtImCq+GF95w0cX2Zt+yLbJB9xg42PlIKvxeM22dAPPqWfwkhV0Km6eckpa8RPM6rRnevNcQxd8CXiyJm/+km3MS6b6JOdwxHt6bW5zFlf/N7HG3CW6bCMhW91kb3ct28PX/F1b9rre7wBHW3zhDJf3xdU7FKu35i3qdJxegl/c4L03trykn1OulRtv5HPZh/tTluUzGHeJqxhX4t/c5pcLKOJo+k/Zlv7qp1jm6/GhpgAAIABJREFUXlHH3QEEXAe4R3OSe3MR85I9Pk7a+57u0a8ZD4e+1bP3HV875DPLK3h5S3P4dety6S0NOIOP12D3Ho/mgstnwJx+kF/p31gS3Dn39Lula72L7a3X1cHSPZ/xiHYtvpMxedxP+dOpsWwVXO/We18G2pvzCzHB/sg+9iG4HCSfbod5fewD3iHMwdThG06HNHLmmx2oHUjt5fjvsIp2f45FRnEMXgdgPwzBUeHZIQ9tvu1Aj08FAvBodGCGxzxx1UHdoRrNYnY2dSeXg3e5Cd104DSfbA756XpjM/pw8EuHUPjhwQc9Wddiuj1VU8SRz7vwR1/m29utXbyY0z/SC6YCG333p1Oe0ZDngPWugbcPyaWyLfye0xE7p0O6Jo/5eIKzgzV89hawbAsGLndXZxHxLf/EP5vRC1tXFEPTvslmxUMwdM3H4DcPj/QHhs6MeadfRQcwePTnaHyiL8nh5of8jqzms4V9jv3JSNcVWexz8QxvX+qxOxk19qRvuNhQngoOvH704OAf8ONdYcLaxmNfecl78UIGdPMX7/YPuPmKPYK/Gac7ePilAgtc5qLDl+GjOzqwjui5XBTv1h64ZHS3ntCq+AUfG8APLxjvcGdTfPB9+ZhxsOTEu8vHEdYfvdIH3bMFndABnbNReU18F9/IiwZd40s/+d217vCi78733NkKv+TGLz7g67zMj/2ojJerfXsa+EEUhqiPY3E+TmzBuHJQzukqOOaw5unXcu76FuYOMM2CtoCitXPMe9vcE9f1fmngNWjAeqkgZKO0sfSFkI3VRsDnrRebgM3RRmsjlOS42+D1SRgEe+vAhtQGC6biiT4bJzj0bBA2Wht5rbXpvfXZBlQicMIEK0GB28a1RYNzkwt+N7c2wMYWhp6i3WfAKxMZ9sCPHvjm9SumdxtvRZHGV75+Da14sgWIeHR3cDFm7qOrIsQWYYJL7qVPBr4g6ZFIPOKx+EoH9CxhsZlHY+XwnCySWvqS8AQL19I/5+JhdZw8Fd2SHY70jS8JmgTntAE4yQc7SYiSMbrrC2yvX+urLf5qzvKdPhYHGnhYv1eoyi/P+Xx57ep59XJn4rntn+rRzam/4Nr7uuuHV9PnMOIA4Pncu3YdLL7m1tf70toxz9H0nO+tzupL163rXd+rn5Lc1sTi99weXAyShDrgsF0J9MIkv3gl4eWncooOLZ4ltB3U+j+JnTpAe3lPp93Nd20xyFgFoUc8wYd2haT9Qin4pXk+rw7XVst7+lv+g81m3TtsspXClIMg/uVFeMtv9guIRzzAF73oKzLzbfCu5Su88ZGv1B/s+lI+tutj8TQnXUTTn5q0bvlMDfyu8XhIlu5LI37D0fw3SG8P7GhOcruHe2k0h//XD5ZN4l1cVbTx7lDId/gvOTqA0YdGRrTFSM/2evDw6xOr4M5n7dlPt6KDZl53vMMJh0O9VhGI38oLOsB2IE5eeMRrxSy2hwfv1li2BNuPH/YmcT0Z0gOe+Zx+PPNNLZvts5gJNt1V9PFu3yO7Q3PFIsUN+yxacIubnsUf/JAxecnnUK6xxdpHn7UChzypdY2X/iFu/MJ3/ptH7YHJB06Diw7Bw8d3fYWFR3LoE/PYzX7U+jLuax947G392znmKGwpEJjH9mD0y/u896WQHFGzn5NHgazzUfP4jHyyPzfjC3TM5mItnyMvXWoV9eDxz3aA0Zce+4pIEYG/eUcLf+aQF78uNOitPLCiVeP0IcbDT4fWiC+UFLb6Ioy94g3ubEpf6PMDsPyAvvhSeTA6ilJ4yP/piSz0ktzw87VyGbx45lNgFHH4t4aGr4Y0tmRHMuATbvPMsfbpxjxymqeBoQ8yW990h99omu+iT9euQf/Har6BHzDsLvdLn/kk+eBz9Yw/MvJfPzbaY8WKZJeH0N35g++d6at9oxr4wRSGaI0TWnz/S8t5/69zwXNi19UuDXxXNWBj6xcOG4mNyN1m7dDcLzaSJAEcrI1EsynYDOGwWUo8OmQrLviV0TwbgA1I4C/BBG+sjcevVmDOYkEbDno2KJuKvpLC1bs1iSebnA3eZgw/WNducG1g4Q/v4musDTIcaJC5ZAPeDkEl1NFto49n4/giZ7KCWf4qQNDt6iP+k31lMBaN5PWO175K8X89ZBew2m7eYCtq0R26ko1gg3cHW4Fj9Zys8QK2ZF4iD+faIp2n57WP5+XdXLjorsJMeluc7JBPVzDZcbQk+HTLFl0nL3hP9v6czXrIrsbWZvGefvDKD7O1eeTh42jioznRyZ5wRH912RdS+U68BLsy6GPf7iVoS8P4+kCHS3PANe7AJsFe+HAHu18z3Yk+t+i50wXbsBsdpL/VxakDaKJlbO27OIJLHkmoS4LssNCv1saTjU7IDE5cUvzmFw47fL8Gzjg8kmI4OozF23nfhFhOUtyjy3gsdwh2cYLp0EiGikHRdg9PsodndRFf9aVf/atrz6fue3eHu8Yn971/oPecD2b1wN6nnuJj+WxOMsabe+uhe2MVlFpTrYVdYzuHLMtv9Isx/RK+tB89L55T/h1rjafDR7DJG8y+r73git/skJzmGkv3xQj0rBeNn3nm99ajMfrSxJbww2FPcEjmg8Ud6zga3a3peNq+7GAOOnCi/8UXX9zXGR70WR+KBfYJ64Lvw9nXWtalH492P8cvnGDkJWjIKcyHryITWc3TyNa+0dcjcOCpfL4/cfJOFmPWfnpwuBXLjaXT9VnP5FIkSV68WCfJSr6+ioBf4QTP/I6u/dimIOIQb87TLbfS3xcl4rGGBlzoOfiTRaHAO32KV36g0HyxI47051Jg8EROc8GjAR899TWHIhDZ4RZ/8WBeNlMs8mONfbIvY+hF4YSuzPdFFDpwKJTYwxV9+jdV2QQ+jU/Ch088sxNc+LOn+tM5+YfCXzGefcttkltxDi6wfENui6aiKfnIiZ/4omOwcl62QYvc8OANLfs3GPwqIOm3LsR1dlLUgI/cfBY8/vuhis9YE+bADQ8e9OPfM97ojxx0gHd42QjP5vNHc9iIzejTON+F03jrpT8bxZf1RY7WMjqe25M97xozF3xfXm0OVDzH8zb+XCzw3NW6ZBP8wlms4BN8QXGUHq/27WrgB1UY+nZVeWG/NPDd14CgLzArFrhU+yUjNra+DNJnc2zztNnbfGyCNjyFI0G9BNG4zcFGYQMzZgOx0fZLhs0FnA3IBTd4d0lQxQh025iMbQte/47pN6c/ebK51ad/aYUPv21obWK9u28SbPOUDFRw6M92lp94XnrGXfB14KBz+OhgN2Bw/YlShY/G0y05PK/s/bLd5g5vvLIPessH+ZPXHSxbdZkffLoC1yXBAZu9kjfe8AcWHD9Cn97InL7SffdsBUfFD7xHQ3+Ft/hDd/XX3BKb9LSygyGvhA3+bKC/K5nT8X61tbYN/8ptLvx9EbS6gQdvWnNWH9GFV+sePP+hT7rM1vG4c3rOXpKsrrU7vPoXbu28sJ474AQTv+760A1uYfSznYPb6v3keeWV8H/9fPAKF78gP9/LVieP8SDWlJyLW/1ZTfylDwmrcQlpMasvhIKFyyW5TwclvukvXaYT/eh3yHrbPz69PHkWYysIVUTSj4dgoxd9/GSH5SX96Eu/8e6df4JZWyys53y152jqh8sdjnwRXDj0r3zoBBfd5ga7frC4lsf1e/3hXfzF3XwlXO5Lu37rKx+N/+6LN30sP/u8dOItmquj5hizPsQijd+wf39aGD6HewfRcJFBW31mi8byiw5mZOzPvPTxKXR7dgD2A43DqDG+6ICs0CBeimsO6n7g4dP5aX9SmQ3xxU+SCV++7mjdySEcUK0pOMor5A5w0Y39wlqsIFEcL2bCKUawWflHugDTPlDhBm/tF6vD/Lk1S+6+gKCX4oiDKz3ATRcafMbNRTtcZNIvpsSvHEt+8nTLidqjzQGrz/38qoNexCY8hVtRhO7pT3xpb6W7eMju5ijWVDxSOOjf98G7+Qou7miDV1CQg7mjzxaKQIrm/QmunCOdw2Ovpxt4+KkxvHz66af3ezY03o+N8PuHtBUv8Mte9ntrQZNjsS9/RRtv/I8+/MDlixUFHsUesPh39bUnGeCjaxf85sBDd/xB4YUt9dmb+HU/iNqv9bvLdxXoyOoSl+lGwcv8fK2cAv/sjz67KDBZU3Sp+JPu+Sad0BEe0cmH4UCDzYyRky9p5pAzWOuJbegHv2yKVl9ZJXPywotn9PsTOzTwJQbgFy380xnafdmTDoqv+DnXvT40+W2xxTObw41f+MzzbEzcqSB3F/Jq36oGrsLQt6reC/mlgdevAcmDja9fMGxgNsASCe8OXh04bRL6BG0HewHb5ixhstHZUDbplWzYKJoHDi59Nqk2E+Pm2QSWVoc9cHiSZMCpRUe/Z3d4bYD9StgGR75wgOsqOQsHPGcLd18kbAFH4uO9BAMNz/FiLhrRadM0Lhm3YZ/FnoUHw0YVnE6+w78696wIsXjxVWGCTuIz2dzbxG3ckhQbNVmWd8+aBKiEE08Sp3QcztWBOebCC65i0CPdGONjLs/4Zk8JNF0056SziYnn+F44zysTHtkQ3pV3ZTZHA6tJcPGGp3CBB3fSMme/ZOHb2bD7zoO/973fCT+3+IGX/8R3/n3Cxnf4HhVX2Dzbmu/XY3rfg4dEroPl0qgv/3kEA//6V8UsNs2ecG4iGZ50mtzBuTsgZIfVVzLHZ3g7COMnPJ7jbw+9Dp8OJ2xWIm28Q69ktoMy/HCHa3E2Jml3dbDX75AQb8mbnuGoiCTe7jx8NC/el2Y4FibZ9aVTc7IxfdAbnw5GH1hwjecnwaxdzK/fvff75Od26sg7OPf1wfCu/6YjqLK352Dc9YcnOXf835z865DyiMdgrDExKLqnj+378rPP4XppfHGA7UsYMUYMRzs/MW6PxDM7sanY5ZDfn+qB9xWG1tdkyZivxNMe0OBDxwEQnIOhPdpzX8Q4OPNF+z46DvAd7t0r9qd3ONAWm1o79vBsLn6RV4EC7WK4H4TQ1sCaax3ac/CpKUpkb3f7fj9odZA+dUt35tfPT+g5Gn25Qzb43OlYw5t3h2f8WsuNwwsnmHTsHf4KXOSOd3sOWnCbg4d++MI73OILWxjP1nSCPr3Br9GbPvAKIugbU5xQKIJPoyNjaIKnUzS15KjAQx57esU4RQgxUBEEnf1SCC268gXS062QVaGIXY25k92Xe+jgQyzDA1+iH42v8CF25tN8X8HCGjTn448/vvOgH9+KUAqi/eiHlmLFaXtz+E3FdDTpAKxCkoKSPYTurHXP9GtvcZezVtD7+9///sFPfvKTN3GcXqyB/m0heOkMXvM94z+fyA50Ql8aPtBRZMkvyatl4+bBCRcdGVNoo1OXhv9g2AoceflIOT4YtteHX++uf/zjH3f9lw+xM99TxGQPekTTOx3jufXKzz3juzUZT/GXj6GZH3pWCEKXjvkOe8p36Q4tduFvV3s3GrgKQ+9GzxeVSwOvRgMSjH5BEdgFYPcKKQK8PkHaRmIzsLm0Abi3cUtmbBiCunkOSW0CNhB9NglzPPfujg/9NjH4XDYnB7E2F0prnrvEAnybZ/h2M5UASBzwZfPqE2Iwu1GF+9FGdhrLRklHDrErS1964GP74xnNNsl93i9myJxu0xXYcJODboytDsO7+PvHqM2poBZdiQhaybvzPWv0swmMPvO7NxeufEaCVmK2vKwOyMuX+Et2Nl7LflvoKFmRoG6RL12b69lc+gMf7nwr2cEaw3PFpopyJaDx+zYd9wXWFpCikQ5XB55PveMFv+e8Uxfe00u4g4FTskoecqWH4LPZzst2jbHzXh1QjLdOzvESujPhK8lbeDDxg+/iAj4c+koql+d4S85TJ8bND3eFznS1Olsc+GpefMQTmVwS5Q5Xkn2xho7Fv+QTE10SZv18WvIdjtWDPol3h8IOJehW1AHfmiNb+jPuYIifPahWHDjtE45T/ysXnNHLxt4rHOjLV9PjypXurTPrKz9Pj+76dt1HJ3xrv3CvTwXfPTl3vufe3devWwfxBs/CLt6ewdbyK3pp7pvB20P8L86ejS298/2EU1zedeuZbsUn++8jGhs3jOOXf+GXHPa+9pIOsxWM8vuKHQoGGvhiogOqZt/s0K7Qwu+SR6HCIRi99ib7ovVgjmKCLzkqgKRndjZu3lksEk/t00+3okJrqEJQsuERDbzhpyISeejNWuxPldCmTzjdNXT1yy+y8+qTHvGIf3AuXy3pS/bWM1z7D0THj4Ns/gwmWaxlcdqXHPjRT6/4s77xX17TusBz8xUQ4HDARxc/+AMrVtGbd/tk+Is7X3755f2ATVb6h8c7fdqXNUUb/XTe4R9O8HzRGN/S7IHmmyMOin/4sp86vJuHVoUvY3h04Mcj3ZunwEVvGjk96xfT2YgcCgJ8i7/1ZRCfo0c5Htr8kU8oIJGf3GizHRugBwd++8Klr0vxiW/+B7Z1xx710Qd6Gp2Jl36YpCtFC7gVgfCeT8pZrAEy4of92Ipsmmf0xADrEA7vxQ82xnc5kGe6AyPXTo90ophDNjAamchB5+QFiwdrA8/8rDVBb9YNO2t0Rq8aHWn8VOHGOxr0QiftA3Dhy+XZ1XpxL5aa67kLLy640cAjP4M7WuTjU/JL41d7Nxq4CkPvRs8XlUsD700DArUNrV/SbJ6e9bn6DFXgLiG0OQjOxjfQlzC563cvuSJgm4DN1rh3TXKg2Sj0m9NB3kbnvc0FTM/N8a51EJRk4c08+Gx2Njg8411/G+l94q2Rr8v70tmNLFh3eOGsMBHONsDu8dvmXn80/O26ZMX8/rY9HeK1azfYZFuc6Sl+G6sgxK7ZqLG9n3z2Hr6Si0c2oA9JeIUU+s/+xti6zT89kxmcZKICRv5kDjpg+Qv7SZD4Bb7IqlBUUeyUGYyDLH4qOsVPNMzZYlOFI/TY1Ht96K3cq7f0u19txc/ek2n1Wt+OeV49eK/RR3yAOceMw68tXPzqa+y8h1v/JmnZTsIaTXjASzodljrwBNt4Nn+Ez1itQ68DhqLQ8rs6bI57z6eu4UyWtXV6Se5wLK8nv5LTDlHiVIenElfJqgSV7c2VyPJn4/0prANB+oErPB2+zQMfP/Gw7+aj1cEtG8EdXLo/7Weu5g62Ayw+FKS2pYt0tDjNJ79Lf1+gmNOXM+TwDtZzeNjBWgq/ezYOxjv+jPliwDpMb6uLhQfbe8/J4z0655poLNh0vvLWF+zij8/mLw8Lv3QfwdTnDqdYUqzDC1x0Z30kS3OWBzDpCJznWvtSvOQHYNpzim/iXgUaY3D1dZEiRfZNN/jS3I2ZI+ZqYPS1tsszPvvss3vxgHzWQ8/mmlOM8VxugGfzzfHssn902EavtZG+HNjlL+2V7Y98tXyHTjbWpKPt99w+Y4/zFQqdoGOMjA737a10SG+tATLgU8EAPLmKKRV++LrmHSxc4qAfVjz3J0zmF0vJi0aFiXTTV0PoKkIoUPQnqeDzD3wrsthXPcs9jPm3WqxtNBU02svlO1r08w2FGfKjJxaKgWA0usaPSzFCcYHuzFX4qChFFsUJ9OjJnx3uny7CxX7hAR/P/ds3CorWjmZPQgsMfeIHbvPIzEfxiQ9xmj3EcPpFJ19WXGIvOqBHOIwp2qGLf8UKeSV50Prwww/vfXzMekZDHkt++MBVQINLQ7c/GQOvn6/58z+xEF7ryEUu+ySeweKNXPjmK2Dh45906osncN4rulXYaT/jQ+R00YNCmsKROWDk/ngqH8IHWvGkP/9vvcUDHC48uPS714rx+S86LjKhbU1Y6+bl/4pj+Pv888/vBcKrvTsNXIWhd6fri9KlgXemARtJBSAbaV9K2ERsmCU8NlCBWeLgCxObifE99IO1+Rf4S/ZsMG0CbbZtFDb3TS7ANrb3nf9oQ6EweCRVFSRsVJ5tUjZvcnbIt2F1tcmgva3NscRmNzFfCSg+odFXDehs4pke0uFuho9klDjQPzvYXOm2TXbxxm/8dF+9nM/eJRfwt5HjNx7JHX971x9+sBobVwCDa3HQv8TIgTj/SX5z6TK9ukswKy6GK/nwkU3oWnKVj+J9dfhI3nilRz5Ll/RoXnfz+vMzSVY6AcMe5pZkkSNZlsf0I2GxLvKDeLor7dbWTsbOvh3v+ZzjIILf01cXH5gOF4/0Eu/hxsfapDWoPxucdgseTEmce4lcfcHFX+/hc3fRa/6Z/z/yzdWHA0sHlLsynxtaXeyXz2c/YMm4/OFjcRhLJkl/skm2JdUdKhzYJNMOB2h4dihSKPOcLviHQ4FDk0uSGy8dCh7pB4zxEne8gKs/HOlydSvRdmior3n6JNvnv1uUndKD+/Y1H+9s0Tg6veMVHPxbbLKeOjy0jrIT/sx313eOd2Donl+mr/ha258yxN/K1Dz3cJ6Hk3Cm53ywOUsn3Oun+xyu+prrfWWwFnYdxPvqpTnZJ/7jc/n1bG40zrFir/78lR319yWt+WI1u/Jl/g2feAdWzGmd6LNOzBE/HV7pFS4FGvDe4RIz+SN5xLb1mbVz/h2eDvn5Mn7gs7bw1cE1Xbt34YsMDr6b56ytwZKJDlb/vVvvCglyJc36RFtRRYsunvoShC7QaD3ina7EKId2z/Yhc+LffmueZh7e4TC3/AmsfrD010G6+EMfYPXbC+lQoUhToEiX4hY8Cjzp1VcYrU9fZVj7eIwXuNlTgYMMihpkhwfe/uSMT5vv3Rhbw4WOeKo4gy+ykYPOXMbomQ5cYFqj8MSvYosLX+nTn5SJz34YkruiJY9gd0UZOsAPmmTCP5gKevQFFj06ZmOXddAXLOWY/iH0X/ziF3cePvroo3vs93/f+uSTT+77gZwRL9YCnslkjaPr//5m7bTW+tOx/Iie8GIOevTtnZz8Dz66YQsyedf4E97ZBP/4BmNuRST6UvzhB4poxu1TeM2vrBV6Apdf8nOysiufyz7d48m753xi78UuvKLr2vjTXgs/P6T7/LeCEPnpme2u9m41cBWG3q2+L2qXBr4VDdgobHo2FJdNRmD1XJGnRMfm49cBgVjQ7d/QgEMAt4FqNp6KDf3yra9AX4LkbpMoySrRA6fZvBsLT31tMpu4mRfuEgTJWombvjae4NDx3ObUhhXdp9vnzf0vesHGW/PgsxHSlYTWJukZzWRb3PHrSyBJZrTjqySo4kWFCHC7oYJrbvhXF42vfPW529jZPTvBvePmLb7w7Caf3JILckuY+mycniQKdBLf8GUXz+zdYVm/X/XAn3J5B19z+C9Rqz8dNPcN8LMcEku4+S5/PRMTeNiD34PL94NL7uywelgfhoePmp/tVv7V6due0ckGyb++2Vx3ul5czUs3FRhL4BZP+gp2/Xt1uM/BtF7dW9vhKZlrrMTde7KZI8Fjz1379CdZ7ws+/Ebz5G/1EI/xsvpjf0l3a1SxTMIMFu0afJLPlScaDiQS336571dwMYZP+SVYoqxAJY56Bw9OAit+wq0PH3zN4SRaGyPT2949dxDpEI7v5nlOj6vPDg3mduCLZn+m1rvx2qlr/fWtDfXTEZrrh8HSV/3p2jva1hz+2Fhfvp7c+q0fuIwFZ45+sruMJUP8nHd/toFevIL33Dx3dJMtXuuPp+SCP36jBV/P4Q9mddPzzn+EQ58DM3+pWNPc1nHz0oH72iI+7ow9t+T2uvJ4h3dlj0c+XUsX7h022cN78c7dPiMvQIPNvvrqq/vh3kGzwqaDsIMyGH39yVl6t3bxgM7yjRf08usKHdYb2uIdHF30l+7EGM/wgrMW5TPxrl98EMPNpxPz8V1L/y/ZEg5zVmcO4JpDbfNbnw7a5WMV8q0dcPrlXPiTj5mjGUtvxvqSA11ruS+8wUWzAhK9sal4hFd7oz4HbK28KV/QZ63BS9fwKKYoTjzd8qTWI32gx85gxET8hsdXQeAr8igMKiJpxvTjRwPHnvYwXwqhG37PeElX3sHqc+GBbfEgb6ULew39+eLImurfIcKfuC3GK5qQS1HGPHkN/7AfyXn69+PoEy9sCreL7/an+OZYuwosvu7RX8zjR2SGHz1fHPE/fol3+aGGT/jBivnl2vRDPjpVkNJPVrDuxtjSc/k7fHyFjvhStu9HC/5OD/iBryKYce/koS+teAQ/+vDShbXMD8wlu3H+BL7C1bleyJK978inkc+FL3yzgWc2JZ8+NOiM77MBu/kzOeNXe7cauApD71bfF7VLA//fGrBZ2OxtCG10njukCLB9Xo2YZwHYxuiyIfrywwHLxlbCYiPosoHod9kASu7cK+rYBErW0GljaLPTZ26fSHtHW7Px7KZo0wDbnJO2/sXfpuQeHvd977kvEHoPTzjIQ38VRsgezmCW9tLp+c74rYGX4LBF+NLhS/xHI7jel3Y86euLl4pYbPaIv+bji53A8w0XfJJ9ckt0OqysPG3mJYL5QLTgJaviDjx+iZI88KulTa7w6q/1vPI+kgM8HBIF/l7CpI/sqyf8GIcnH0p34cZrBR9jLnjAa5LJ1lPj8e+urXzn+/JjDD/o0XG6X330DJaOa6urvqzKhtl8YVZ3/e+54doDwRvktwfrgr6yc0UfetAHn0/c0fa+RaHWfXDeG3fnW/wAn1rwa/OVU//qZPn0XEFIAWZtZSz+0ZBExutJ07vCdAmpZFSSK2HuV1N9FYE6mLib44JfMs4PJa+SWIl+RZ14WZ56NtfVn9Xg3TyttZU84UMTj66KSKv75uJRf+/hCza88Omj63P93Cc/t12Tuk6/9w5P9oTXM5z2mnyhPn67eMwFG590FK/JYe7yH0/wrI09B7v8h6c+PK0P1989vSSTfs9iQPExHvZ++u2OweEQKkb2b5YET/5Hc6Prnt63z/O2+D3lTcfBegeb3tMtnZCRHosr3tmUn7OdOfliBzkysXW6B9caKd7Bo3kniyufT3a3t62pAAAgAElEQVTv5qFh/VWEycbiD993x7M/Q1Jowat8xjxxtbiwerUf2Yfxsf1op4dHtpQTpK/lG3/Wrz57hDs4vCoaaGICGPjtrR16vYuL7uA1PHumx6db8QSf9EpWfJFTkQUd/+ixAoOcreI2uRUx3FsH4cZXXzTpW17FE3pXIECnL4noE6zign5Fmuwj3uHdOx/Ag1gIHi0xer8Ukg+IS/CQAQ0yoo0uHPAplpBL4QNOdPy4xA/4Rf10pehirmI9eoof9I0WWcVy/KQX+xf9g8e34ocfvXxx5MsffJEZ3fwOPDkUeewF8lcwFV7Qih68eMcTvHJt/LSf/PnPf77zjyeFJbDtL3jU6Idt0CcPWDbFB38AR2d0pemHs38LC758lTzG9JHVHM/ybT5Gr/RIF3TJPvhuPfTFmFgFRtGMTPZytsaPNUUOuF2tD3eyJFPxhTwu+OjNmiATPaFPz9YoWngzZp0oCuH9au9eA1dh6N3r/KJ4aeC/0oCAKyBL0hQcXJKHDrcSBZsKOEFcQLbptqEJuubYbMBIskr4zBPMJa/ngUu/wK+/MfP9OlIykCBtEN3xYCNoc9gNwxz9tZ5PGOP62vQabxPtgG8joZ/lv80W7Ne3X5fIS1ZXsPrpZQsj+G/u3uNF3+KOJ+MO2TbQihf42U2z5xPX4jQGbr9WiWf4KwSS4RFufeGLt4olZC1BJjv9pbvsnGy7qXtuc19bKNRI4uBJp/ivrV3jNd7yqebteDLAgz8y891smF7zNTjXbuZ7T6ZwVwjrTznyl+4Kbujtv3O0tjntlJz1e1/dV3BELxue9vFe85w+ktEBgX77xyn1Z9foLd3Vnf7s9dIaax2f9m5u/dbyXvkDHXt2D7av2NZnkhGM4oxkNJ5WjvPZu0OKeJfPn/LDl81Xn4vLHPQcNCSjEm2HAdfT82FF4izp1tev+eD1V0DCB1tIZvVLpOFLN2iUBLtXDDJfIU6DM9lPvWcPfLjM62sgNFanzT3v4UALb/isWKOPrYqF3ukMT+FOl+0NdGcsvRvfdzjsKel+10Bran0Hv/nN7hHLd7K2hvOv6MZrePGQjY3Fy/K0eorW6UvwhKsv9Ni8dRe8O9yu5G1e7wrM1r5DVuu5eeTZufHePVzxs/dHMGSLt+bmY96Tvbne02E6pkvP4hafE3ccvjX+w8bh4i9k0sJjT3GwLdZZr3KU8g5zHLizYTyni2xpX6kYq68iRF8n4cVepjW3GJ7u5TP2JrkPfsA1lkzmiPf5FnsUY6xx8MmIH+sRjAO5Oa78F2x/uoVfXyenL/QVENwdxtdWnq1xY2IK+i55nIam4gzdee7fest+fBQdfKz9i3F+iEsOfKEl3qHHXsbgN4aGAgeZxTV9DutopRfzFWDMZwPz0dZP54o8/Zs97JxOxFQ6yPZkU1xYPtGnU/mKmG6OYow/JxKP0bK3wKnwhlc+YZ3hj87lpgoXbL/w1jDafeHjH+T+6U9/eo/J1qgiCtyKFXyC3HwHz+RXUJFPKyYpEpEZ73KSfqirUNq68WUj/6dLawp+csBPX3zB3sanyJSfk4Uc6Zyu7Jf4V3CqAGut4Q1ePkBfZGYX8qMJhl7RJI/L+qFXdsaz+AQ3WcGxrTUIVjGIzsjvnX2KZQqV+hRv4CFLPwrenffW8Jev4k8jOxz40IdXvo0+XbAFnHR9tfejgasw9H70flG9NPBQAwK74CtAu2w8gq67zaJiSIlQiYYxAVdCI1gLtub0y51AL8Dr64Dt0OkZLvNrJawlP+76tJLAYPHbWAmPPq1N4W3PjZkDflt4gkHHZodn8tFP8rVZxQ/YCilgwYEH18E12dO5Ocm6MixPeEw/YCQFbNJlU0XDFS9w9uy+cvVcv1988Gpzb5M2vwJByUJ84mX5CU/yw9Un0c2RGJcsNz9/Imv4JD/ZuwTY+Ne3hLIvzjZ5AbuJgPdaOOle8rBFqcaCJUPyZme6iNbKuLqFhxzpLRnRZOtNbFZ/2dC4OdnqkV7XfqcdGwsfveP5tFV4u9MZnVrn+Wlrs7UKd7Ku/HTW+6mL9Nm66g6+teqejd2zOd6C0f90O0x0KAFTEm8M3dPu6zvpafV18rYyLRw90Ev4k+GNYz0/rIw7f/uTFe978BQ3/ZLpEOVAhp7EVOIqYTUmSa0gbz75JPJwdsBJjx3A4HXA1oyZlxz64qe+CkHufuluPe281uHO1aeROx7Qjn46yO4OcOyrgUke72z5aK2Ff31n7eoZH9mxO/nZMH8pLuITjIOZ9RnfZK/hxXoUK4LHd7qEC95kiWa+2z186fPUR3Iko7tDjr1Ua12FZ+GbE+19p0c8mH+uzdZ+vDcPbyf+6HYPZmmeMN6Tc3Hma3ShpSP9dFn8UaQwJv7yEfD0ne4rLFToMZe84hgbWl/WjMOew2z+SA/2n2I5fGILWTzr11pf1hJ8+MJP+ipOw1e/Z7HXOnUgbh/WLxewZ5HTwR4d4+aiC0Y7dd87WHPSuTm7FtMrOcF4T5d8SdOHDjz0BZb+yIKPCiLgrAl99KgIIhZZB/FTocpadpAWgzT6UmTAL3hrDz7FHLTxxCZ44N94qEifTvUpNHinM/PgYAd5JN9ADy/kqGXnikpiqAILXGIq+chbfFTYSW902Z+ssaGvr5JjvyZSBDLOvvxN0bEikj9Rww+56UThBw/gFWTI42szsdU43SmK9c8pmNfXR3RkPzAHn/Rm/5O7kMk8RQ368QyeXOQAgz92J4OCFR7pj+/jhU6Ny8kVafxpplhJp/CiV2wD21dN+CY3XPk9Oyrc8BF27+usfgCzDtkML2hln/5vZuzdPydBtvYENOBorcBNB/0FgHxJflLBz/i5frKv9WIftdbpqL0VP+yOl74Ew4N/m+lq708DV2Ho/en+onxp4I0GBFWbZ4cPG0GHQhuBzcGhxWYg2PYZKDgHUPP2sG1OX7DAa6Oy2QjykgUb3n4pgxG4C+wlA72XEKFd69m9cWOeG7MhuLSFb8MIl4JPG90bAreHeMJPGyQ5em8czZ7JZ0OzYW5yePKwfHqGMx4Wb/ys7MbRqehEn66Szeakl5NPtNpwPUsY2I9NNgk2T2JT0gbv4nzEb18I8R80wKxs/l2WEs/mr/7QkMiYK1EqAZbISAZcp77iSZIkCTltjSfySaTIEj/NS1/44sun7cDVVuae9+scvu+CC016aA2xz+qC3ukbT+Eyrq3NvMfr8rJ9FfX4xNr2Eb/JwofQNyfeJKz4iWb2i1Z8PeJp+WmNuVuDvYPx7N763HXqGQ0NHPs3LlntORzJks0dBPrHLsOx9+gHv3L03Fh8d49WOPR7ZjPP7s0978bJQgaXRF5Sy0fERzFDUr5jDhJ8EV5JueSabJJtCbK+fvWWUEumtxjU2kFbSw68eS5BNtea0+9aPYNrfrYwz7Mrm5dwO0iFJ/rpzb1DZPP0gfduPaDtWdu1EA/6WkNg8hVzer5Pfm6nzOYubfwUN+t3ACwO0m/04gGP+ltb0Y5v/emgvi1Cpf/kwyq44oh4oe06OOVcGZJ1++IpHvceXLyl/97D1/3ReLAvzdFv70gX9Ow5HRpPR/RvHWj10TH/qZDhbq57fqWowA5a68BBzyHQ2iq2GqdT66U/rbH+XF/fiknwph94HOjFEc2Yvcc73tAW2x3wycCG4ugWhNZH4W9fDd4dvUe6q8+e1TPY5LbGn24H3Pyaz6Sz1qd3h1s8ytE08+Pfem9/dUfHgb2iTT/0gVP8IA8bodmah1PcokN7sruvZey/dAUnufHLtvaUilSKOFp5IPzZz7N9P77w3Dqxv4s98JIbT8bFRf3g8AuO3vHAlvrFUWPtD/r4GF2SSeylZ+9kqTiPT3EJz2Kk/sbg5wvoW9/8Tm5s/NNPP73/o9EKawow+uyteOCD5LVnG6M3+icXHdN/RSWy4Mu7Io93/oYOXaNp/1ZIMtedzeHnQ2j99a9/vfcpjMHDJnivOIVHNMvNyV9MBGtN8W/2U0zJl8htXeGbHfgLfsr16Zfe6kMTDN2TV27Umrb/2dvoQ7GMb8DDj9HGvz60rXs+h3Z88hk4+je0Wgv0YL/8+c9/fi/MeccrP2ALffZSzVdI+q72fjVwFYber/4v6j9gDQjSAq8AK7DbXARqgVnwlSQI3ppNQ6D1bmMQgBV8SmAFewHXPMFbv82kxEzwtgnqF+TB6NuEfE2BN8Fb8+x61Hw6bNPUgm+OzedMvkoogykZlDSYb5M65wcTH/Fcf3QkIn1d1UG/MbCSNBtrfC6/aMZrshpfWM90mO73C5R0cGf+1k5d1F+xgh3w6J2N+nVHf3KtfMl+jnkvafMsWYM3eHf6cl9dLL6F9Yx3uNzb3DfhTTawy0+JcnrzLtGhp+Rb+HTiznb8Mjg843d9KHo7r4OcghM95tMVfNKPe2PhOeWOt0fj0TSmNddzeq+Qaw3Gu/upJ3MqCJE5+zyiv3QXz/Y/glm/Pe0CHi4w/RtCa7Oeg+sw0tp173lhJe3WV+P5TPyFb997hof86be1KpE+i0z4AZffmysBNl9zX5n1gdEHTvx0QJXY032HnQ5GxiXSCjx8Cqzk3C+aDgLmWWOSXQm8hFhiny7QeKTP+swRr93pDF/xFp/x3xhdmlOcp5/VnXna4qmve7r1vv6bL7jTgVaf9+TRR7dkL76CzW6Lp75ssfZID93JFm/g9TvkWcNsXBGotYSmOfr1saH3eEnee8fR8KGdMMv7qa8TR+/pcP3OWLLsHcxezV1bLN3zOfjwi3vxDG8xJ/noRZ/Y68Duik9z0yX79vWHvvzNPRz51L57ttfiIfwdOK0Zfd6tXWtDXGAn7em5qGKu/c7dGHuaY62xJx8rlhgjTzGFfuwZYOwZ8iPytW94xmP/xhA47/rRcyB3926sg3a86HMQ9p6u0Kx4oUCQ3uVk2ZF8dGpt00H/pgsc+pM9udeerSk6YBM8kc1c+PNxz9kJDXYQszSFIn2KE+mKDL7AYQf+QL8O3/rRdM9v4Gi9ObinZ/e+XIKXHhQc0IKPPvTDBZaPgTdW8TwdKfrCbQweX4jwmeTES76KF3msYoovghRCwOFbP12Jy/Ky4jl8bEcXCqP45CsKNPYT8PTFV+TdaClM8Al3uQQYumIvcR6cvdq4nNe/4YcPtq9g5JnNvFdoYhu44EGzL/fpCj90gS5ZjIEjc3kqn0GHPumMjN6zAd5aF/D78Vgf+4NHG190/be//e3NV1n64OaffNW+Rk9k5xv0Z18jLxuTHQ774v5o1zojp3Hv+SZfYnPzNTx71kef6CoK8XGFJrECv/zlau9XA1dh6P3q/6L+A9OAzUhSYCPsCyGB3nu/EgjSfuFxCbIlYDYqm4DCkTkF4YKuL0GMu4wH05cMVfiN46Mkqc0cnt28vUvQzS9BMm/nwmlODc42C3D4L0Ep+YBLW7pvEDz3lxicd3Pim3xkkSTYzOgr2eMjXqKJDn678BT/YJa35clzv0pWbFv9hbc54e+9L2ZsgvHYr0PZKfrLe/OX/xOu90f39BeNk+eV17Px1cnCB5ts55hDruSGvV2SG8XO2toCrvQuaVNUWf6D7f4GyfODZEqyIpGxnvL31Y1n/ckV/3B2LU2ovfPbnuMzuPrd4ejLhuza2kg3591a4q+7hoJZGtF5xKuxXXN3Zp8beM19/TzZjelfWiVy2b15ya4IoYC5a/n07+i3xqOzcCcPyx/4eOoePxs/mrO8pCM6NZ4OFn9FH4mtwwV/6wDaQc675Fpi75Kg1ydZFYfN9YuofkmsvqdbMh+PaMb33jv0duiLN7QXDp5w6AdvX0BPLDamgUvH7unduDH09NFlawAc3dSf390R3RpYa9jaksD35VNjCjXWWvgX7/qTfnxk72RFN1nhIHv8iYlwxxuaxtmULPlzdPApfi4va+/F84iXhY1PNNNv4zvmOV+LD3ewpy6943332hMmvbpryXj29+6QW6GgnICui1f5gL4a3oo1DqueHfbolF+ZE93VU3LqAwsnv6DvbOa5L0/AwaVPfCu/AeuLAwdyvsyvxBK+ZX/Ih1oX+Qia8Lj3RY4DvwO0Pr7hzg/g6Ipv+YC9R36iz3j7jHcHU30VRrJhNsq+3tM/3sxxtxYUb3Yd5jN4C2+61Ud2MsCRzczxrogBF5uY48DuDk6fGJOOs7N3cqKlT4GbTuBJjuIeHH39awwMnOlbfqKQ4x0s3OIb3uiKfezt5vJpcGyPP3lX6wA9xR5rU5xMf+ixnTH2VLxobcHVv41pHtp4QdP+Dic+yCcuxx8b9CdixhRN4P/nP/95jyV0WrGkAkw/uvJPz2TET77Un5X5UZa+FEiMafrkHAou9G4d4R2vCjFitPlg+B2aCjB4sX8kCz+gN/q0t+LRRW52YBe8pzvwZMYr3aNBbjJYQ/RqvBwKP30JS8cVbMhrTcCDJ008yWb8SL/Ls4ve++qaT6KvCAiHAhEeweRv2RTufIvtycc+9IIP64At0YdPgcj6dF3t/WvgKgy9fxtcHHzPNSBoFlwFb0mdDUNQtFkIoJ4FelfJZHdJVMG6DdgmopWMuKu4a2DQbNO2gZkPX4G7Tcf7/qlS+Es8bYB4FPzNbxPAc7TN6YI33jIr2JPXfY9n85avxRtfy79N0SZMto8++uhNognHXuHB+/IdnzaunsHGg4OYjVpCzH5s80huc+OvzVBfiTzbuthjN9H0s7rTh/7KubxFJ50F1z2cya//pJl8YNc2y48Nmg9IMMyPLl2VDMerezowBmf39LF8xaukaP8R4pUFzeVb0bOksa+udjx54xWuxdd7uu39pTt+m7/6Cp5t+YRERwK1+o7G3sNx8hSd5SNdnfcTR3pfuPDro4uSvdbujudX67PZ19zWo3s2bTz7Gsv2aIZr7R+d5XPXUTgaXx4XnwTdYSH/iz/riny9Lx+SalcHT7FQogxXB0xJtT4w7hJYtuX7ivSSaXxI/h08HGocGsRFyTE88Zlc7vEL//nnXQuXTsMRv/jpz8vIJIGvrU6zz8qljw3pxpUvphvvjetbv3SwCOcbgreH+CRXvugeX3Cg1YEiHsXRijj055AQTXYTV82Fa/lED61iwa4RuNfH6K7xlQec/r2v7vGrNd5zOPQvT3fgW1teek5e+0X7PPlWruQMR/gWb8/xZI5DsL2ugsMpK9hkWd7gKibGX/jT7fIPBv90xGfzEfK4vBtrzYjF6y9sgkd7R3PJr0+jD2MdwDv0omvPwZNnd//uikO4gyR61oO1uOsDjfSbjuipP/8kC3p4F6vbg5M52572rN/+jwae8kNyoIFfOqIPdODID7MFXslA7vQHDr7smz3SHdzRNB98BSV4wGXvvhCim74Cira+p1uRge7jy70iVcUZstIXGniVT8mttOW9OIInsQZ8to0nfKGBLwf/vuphT+8ansx3V8Qhq6+B5AN93cJm8KMJH/xadjIXPvPQ0i/GKOSI32IMPuFmJ4XJYrx+TaEDvf7UCg2xSD+78lHFHbpBq9ycfZYXfPpijexo+/EHLl8V9WWqPnsGeoog/mFqfPEv9MHzc3R8kUQ+fd6NW2d0TY9k5W9g+HS+wc7mpGtfCPVVV18IgXFOwAPbKPDg3Ry5mP2xNWUMzb4S4heKhPwsX+AnFV/B1ugCL3yQLeklXeR/CkLWBJ7Yiz6u9jo0cBWGXocdLi6+hxoQYBUEbCw2E88dOgRjQVhQdQgpGLtrBVkB1oYmobbR2BQcSt21NuI2T5uruSVA7m2m3ftqocOiYH+2YOOr9w3+5ujfBMfz9i+PjXXHc7Av3ZPTONqSPzLZpOiuxDs+wXhemfWtPiU7NlP0XekwOU55gr8ze2sn/PYbc0lSsite0O8OfuUKZ3iWd32KIhKIHd/nxb2ygolOOFcPxuM3mWzOEqD+zRv+5G/1V1dgbfprv+jkh8Gn33SafeL5kUzJcNoxeeDaFvyp0/8AetbFqdtgwpmN3Lcv2vv1Hd9b+JPfZIyvdB+u5SX/Nda/heI5+zQnWft3otZ+8evO/8Qb66RYEB/ZIvyPbAUGrWDBSET7k4HTH5bXxdfzjp88L9+ro3ynwwq/lEQGQ579mhG8pL/DUwcYySk/lvzyW4mpZ+MOnGKrQ0D/+LQDkwOG+AyXdzFYki6pNsZeCkclua0HdzhdDiPJil7ynPoxBy/4dG0RyTzt1GP6BB8t8TB69NKhKrmjj3dyubIzfPjIFnR87h3mgzEfTe/xp4+vtW7Wpp7povWBFjgHIHc46BFNMJpntIzp6yCS7eFMnp7zV/3RiI97x62lx+y14567wr2+tuv1XEvmgXW42l/SmxM/vUc3PMaXl51nLTtctdctrn9J9e984aRnfHXmnWx0mh/0JUj+Yt/iV/lA81d39Nc6y4/0uayRfM6BzzpR4ITfgRa+ikmeza+QZH7rx7rECxhyoQ82GbzTt3HzHX6NF+/6cx59yesevoo5+sQVcOjEE38WA9JhtsKfZ/D9A8Vo5wPGyFFhgj6iTy90S34ND/jN9vk5mNZMeURyuwfvDjde45NezNevGBF+MawvrsDKKdDAExh02Av/Ygkc+Qb865/mryzJQWY00AIPZ4UmOjEGRn5Bj/Rg7YPHK97pX44r7hdffNXTV2f6xGL4wXhWHPLncnTt37LRR5b8GH/9GRo+PIs/dCAXt7eAEds1hRhf5dg37KOKGhV8+CV4uNmf7+FJkQMcPvk8/szFEzz6yMVX/Fs6H3744Z1Pfb544svkwJuiTHuZ9a8PvnyLvhRs8kN4fHUlfzPmuWIouP7dR3I93fZxuiEDGujiiRxgNbT4Pt6tDWsJT60xNsqn8RBv7vQGPjv1dWDxAX/sbB7b0ytb9IPBnYGrvVcNXIWh96r+i/j3TQOCpc3QxucSSG2yfg0QrG2GfR3kANhhR/CvCd4lIYKr4PnLX/7yHrQFZJuHwGrMJeB6F5Qd3BwaPRes/eohiSlpkrjCX4KE7j5H271n465w1r/2a4OoD6zmrqDTHO94lixqS2/xoWezLmnD//JO1yWBJXzmLM6ek2UP3XSW7rqfc/EKLtg2t/QQv95t/mznuXnGg13eTplP3e14vK9u4jO7rf2iF47mnTD5jHuHjw4gO9evTv2bBeTnc+kErdWB53SVHvStvuDOn4xJ7vyCFa7mJXe8RDO+o+W+ONPz8ra40oO+hYlPfeHQp3nni5LcihP6+F3ji3d1vc8v2WT5O/lqTjLG06kverHOJIRiD7pLe/W6z/nt+oPndBDs6r91EJw7XTyCjd+VA01J7MIH5w42esF0Xzixrj8FWL2JsxL4DqD5rP4tCInPkuYOdvSmibESYTjcJfx4dohQRAqHJNsBoSt6cIjbWroqVmc38jXPYaIYZF622Pie/dF2nQWk1RN8tfTuPT8OtvXsHl/826WJrzXxOp+Hc/E6YK3/xKu+hesZLQeGvsjokIFWdiQ7+nDgL/9yzzdOH4ke/OZoj9bAI19qTnp4I/jx0NyVhRzWXQf0xk49QYXH5WnRr67EGzj9qBSe7nhY2KV3yuw9mRZO7oFfvCRTRdMKFOFKr/GdTbJR+CsOwuOw2rrzvxznk3Ihvmvt8yd7erkBWOsHj9mCH3o2Vg7jHe+ujb8VgODzNZ88y/x8qxyhPzvvz8mal269r77XPumdnOh4tw63cJIu0cZDOieD9Q6//dQcOWH+gD8+W+HJ4bqcjc4cqFvzFbWaC85c+O2nyZBt9bMpuujgiS3oArx7OMQ0sH3h1Hozh8wVk/BEPnwpBri7+tENbbknm8rbxFJf48BHN3xEQcSdvcBreBXT/YPN8jV08FreLMciq7wErH448CBH9hUSmuSBH0+KDooPYDRykBtPYrt+68wPt/wWLB3bB/o6FC56wUv//g750js+NPuJogr7wYkP+oWb/fTB/5e//OWDH//4x/eiDz/AU18QmcMPFKr0oxO/eJLn48VccBXa6Nl+ZY8iN58DAwcabFhBqLwaHrzD2b6nIEROsPjt6x62Aw+PoqGzjuadndAvNuSvbEPH7OW5ohT/ZytfHaF3tdejgasw9HpscXHyHdSAgCiI20QF8RISB2ybjXHBUjB0gRUwBfMvv/zyHuwF6H4Zk1T0K0qHGMHUJvnrX//6P5IMG5INoySqgIx2zWaLvmCuldjhq+eSB321nt27jPWMbolWsCVNwcHbZZOJP3zbsBQjzn/Hoj9r63DiwFYiE2/xHf3GV7ZH/C+feIifvZdoRct7+i3ha8z76sQ7+K59Tzery/CFZ3Gt3j0vDLiTdrLFW7jSEd00D6xDriRE8sP32uCbf/JJX3uV8K9c0ZQ4SkDhWLr0snjjZ/XQc2PN7z17ZLPVw/IeL3t/2/PJy0vz18fy7WQ6x+BY/oMTA/LvDmCn72ZP92QMJt7052sV9iRyrfX1meyUnIsDjdZD+s0O8WydOsyl9+iGJz4d9vwC+XQ7XFTsWz5WH6vzntc/+vPB5W19KLx8sfmSU/AVWvLTYo6YWqFH8i7hVeyRWFsrDgpgXH7hdQAQT/vHMo07fHSoBCOuV+RGL9153hiO9/jEj0NCsuE73Xruyh7Ni453+0K08oXVdXPSOd3CZ39x8KLf/AysZzEhXdGH5/XN6OVj2Ss+k8Gc5pqzOKLlTkfkgK+10Dj9ODSs/fGEhju8jS2f8QhPTV8+urpKX8sr/Kfe4Vndrt7i1xxt12nzot2887150bCG5BLpt/udwDMN9+1/6fmcg7/0YU58uxtjB3bZtRq/zQMLTixzUF+eHNYdorNDexBYzbv9pq9ZKhZZK+Kilg3g9pWJuejxh/jEn3Uih4pvMUq+0w8c+RT+wTisOwTDl3z4cYHpni7BOFTnb9FxZx+y4pmswcWfORoe6QPNCnFgW8vpFCw/NA//zWcL/KDj60V3eZGGD2Ng5JXk3a+E6JT+4CK39V1+qU/eKqYpCL4xeTkAACAASURBVJhPHkUMY61xNNILW9EhHA7zYKzfp1u8J19/BkcOtOFTFMSDhteKNej6c0F6gN+c9A7GfPEZfjz4Ikhcl2eL4fsPe4vB8m3ykFP+Qa/yGz/K4lNhwjM5ze/HE/aiV42/0QWexHnFHEUccd7eak/Tb+9Q7KAHhSBj+n0F5Gsgfto/BP2jH/3o/lWMPRqPaPWnauyhcNM/PK0oJbfGA5nNoQv9FfXIpI/+sjXe6cA8vqPgwkfMA1dRrTXGh9iRb2r6+TPb96Use1aAbb2Yky+Yl//174CxDX1WbGM79PWj5d3aT+fgrIWrvS4NXIWh12WPi5tXrgEbTIUcwbJPZW0yAqsAKDGxgXrvF4M22cQTGDWbl03Thqiv4NmBwsYqcNrQ/ElPv6g1Fy0bqsSgJHk3PHBttu4Fcs/n2L3juW3iWJ8Nicw2BgcjuPAMthYNY1p0wOAvXtNH/8gieH02U/d4huPEFS39eHFfuVa25St+0lOHxu76k2VlWjq7KW5CF530Ycz1SD/J1sHX3JNueoznEyYZGw8n/io6uHt3nS3eJNVssHwHG43slu3W11ZPkiEJiORXkoTu6gGN+MxmjccPmOg1Fh8Le87zHm78J0/41g7BLm+e03HPq7N46B7/6w/G1g/DF1+SQPrJPvz89NvezQ1fcPETvmztEFlh2VjyBX/KG8zKeerikQ70tUZWv8uXZ2PdzcnuZNPSYTQXNrrxaG5XftH8O7Jb64AFfzgloOu34qkE1YGg/8OjX7Ylz5J/Y2KtRNohgA+LcWhLyI1XrBHj9ZkrXqFjLD47EFlb8ZNc6cO9ddT8eHBo0FY/zSuRbyx7pBM8wGtcHx8pnsKZ31Y4yybB01vzPGvu2TCfy+/BuhSaHWLQNpYt4gUe/Y01Drc9xX3jfutgbYiH5dc7vtMBGquzO/PPrf7+vaP0GU4yhC877lj6gS7f7HnI3B/BLlzw3ZP9nNe7OCFGpFv38NW37z2ffeEzZ/VWP1vgtXv80Sd4+z270gOejLPnfqnGds0zxvfd+QL/z8fZ15oCvw397B4vYFwOm3hxODUfLmtUvuTdWjDHoZoMYqA1aS6c7uRw4V8uJi/LB/EttzKuDzxa+R4ceBYv8v/0iBfP7ORwa+4Jhzf6gFuBIxp4x6s5YPCAVj5WvDeXrN7R6atya9f72hc/eM1X4KIrcxUi3MkgN7RfyCP7Isncp1thhzwKK9FPZvxVxEaXHuGWk7r3lZB5ZGSz/NSzYgjYcltfs+SriiIKEWhVYMRL8RROOOgCnb5cJp/Wv8tDJgWaimD4UCQRzxWUwNG7+eI3XSqqKBahgZ7nvjiyJ8DBt+hKv7lg6BktMsJhn/BMzi+++OKDn/3sZ/c9pH/Dyb/1o2CkDz9kYXv88R0/htID+eFAoz0Jb2xWUc8YXhWB0jG8dONdUQkMP3cp9NCfZ7KgjXf7FnvjgWx8wdphX7yRvcu7q3Wun/7ZTGtNGNdnj8Uvv8Ir+vzEM99Bg73wppBr7GqvSwNXYeh12ePi5pVpQGCWLKjC2zQVe1yeKwZ136+FzLH5m19AFTQlOwVQAdP1+eef3zcfwdY7OEHVRiaQ93loz+BcYEq8upsj6DosBdcG0p2KPRfQe3cvOfHcv1PRRmFzaV642qC915ZeuFe2hSspa/7J6+IN1xtCt4eSuO6Le+HOZwmEX7yywd7hwq+EqMIfHbBlLd2ylY1y+/W5gjF2bqL6yJb+1lbZYGWvL/jopS/vkp1+DcVvul0a4PFibPnsOT7XHpJCia3kShIBhr74nGcJDT2VdKePcC2d5QXPfYmS/2YHc05Z+8qpNbJ400d67AAYnmDds685j8bD0fp45Jvm6g9mYddu+vMj+inp1u8CS19rR/DiixhSESlYdK1L8ahfAcMZ3+7kKoEz59S7vlo+Qa/NXRzBrQ5X99ECF66TXu+ry2js2isZNbZ88Y90trwvb6tDODtMSDzFV4UX+BWESvwl5BWMJOmSY3gkzt4l4BJvhwJ/zusuuQUjqUYDb+7oPN0ONTX95FidGutdrK4YhD8HDHzrP3VjXvKVbOe76ak1BJacJfTpfuEbb3+CAxza9Bzf+PFv2/E5ByI+aY7+6Ob7+uBNH+jVzKkw4NmY+cmrzzx9+S28+Ub+Fg3vxQ68Bofe8p8cyWd87REf7o/mpbs3gjzPzxbb33O0musebjDRBHfO8Z4Oeo7WCX/2vzSOfmMLk266xws7FHvskfxIjsPHNbKUE9B7tjFmLluyoXs+2IHzkT7hiEf08OjdGkTTAdth1+EXbflNBYD8sbjRPt27PdF6bb2bb8yBVPyUW+nDX3OCwYfndBaP7ujwZ7Hae8WybJucdCCWw2MsHPrFEnTJWX9w1ji6W1Cyf1TsoWv7Lp619I0GPSnKwQ0HHhWT4IwfcUcRosJOxTVytRbCja7CARwKbHCIE2KW574Syg8qIj49F3jQsF/JESrE4Q8/8IixaIm9+EIL3/gH4+sYumavilT+0WlN0Qeu4rvinD7zxSzxHR+KUcbgh8Mcz/rbW/q3ftCT5xhnF/LgDa+e4ZHrWxNsWLHHuD1BQZW/0gsdw/WnP/3pg08++eSOQyFM8cTXUh9//PHdl+HHr73GuD0qW/cnbf3QzO703p+R6c/u8NIzfVo7cMGJV7blaxVuzNkvhOB08QVj1qL1VT6Cd+PwwAHXGU/Max8qJlu/1h9eyIJfNrcG6fNqr08DV2Ho9dnk4ug9akDQsyHZhGzoVd4l7a4KQ8Zdqu/ugrFgvpV8yQJcAqBAXZJhsykhFrxtfoJmSYG++m0OHToE5d/97ncf/OY3v7kHZDgEYZeg7SqZEcwdjktQ9BfEu6NXKzFRuCqZ688ldm4ynLh2g9ixndufn9FFMDYScuA1fk7c8ag/eVau5jnA2Hg0sPrjtz73s8CwevQsmbQh9utG/KLZVRLUe/i9G+u+cCtHz+7J0nM6W502hpcuskkmKl51YF39Lu701/2UJb53TnxKQvh6unLn1xJr/l8SGs9wSI6ak53N418rq74d917f4ovvR8lHeqaTlWvtsDiTsS8dmh/M2sfz8tH72inedp41uAUzvGWbfDP/dJeY8jsxhj6t9+Dhz9bWpUQ5P3jpH6uOF7LGPznju/H10Z7dtZ3rWZLtsHLCPdIFmNV/djnpLi76X/7EUOuRrOkq3sS9aGQTd/0Vg8RWz37ZlaBL3OlYnHaAkjS7KghJYsko8VUY6mCGPj/Hn1hdcuvw4FKkIB/a+Vay79188Rxth5mSerE738MvvpMvO5AVnDhXo6u1Vf192g8H/tb3rT86CO/ajr7z09U3H+PP8HQ1no84OJ6+hYaYrB8v5mjNRc+Xhsbyd+P5a3xHc3ldv8m30jW4+sgbPrhPv1w52JG/hXvx9ry44ye6zYPTM7/R0tHyUX868Y52PyjVv3P0Lfwd+a3FR+8796TZe/KYE//4FVtc2882bLA2jBb7sr25HRzBeRfH+CwbOBz3BQx6+tmfnlam6IQfHH7yAbjjD0w6IVeHW8Uj+6HD68YOsPzNQRmO5gbTXT988cX34U7P4MgkFluv5CQTXYDL3uDMsZ6t+772cSCGG5x7eyIYccE8fsAf0hE84MQxvCtQJHO2wguY8yshe7UfC+OPTp9ucc78imue6TgZ3enaRZdi1n4lpICTvpuHtliKDwUG/eQTY8mhUAIP+5Cd3jR9+GlfK9aJXf7JBbzDw54KMWDJYB5Z8YhfdqJj/WK8/B08PuQq+JOTKB7BQc8VHe2v+hWGikUVdsT89nIx259q+fJIPz7glv/jk6z2DzTtH3yE31fgUUzif/3bP+4KNHSLZw0tMrExvtDDJ38At3qhK/T0o+lP2eigfY3M6IWXj3322Wf3olT2s9fhn7zw9SdhZCUf3bIXffHl/Lp+czT0PeO9whb58WIO+5FRruPP8pp3n3y1V6OBqzD0akxxMfKuNdDmbjOy8QnGNsmKQYKWXzr02cDc28ALfm1O7mDb7AVRsGj4kyFNsKzos88CqEBtQzLPmADrXkIBxmbnwgf6gnqJjQAt2Wgefvxts2bs0V1f/6ZPurAhnLDmlyBt0nAHvDUbnvlgllbz0oNNQcM7PveyoeO3jcYdb8lnXnpxSKktPYehbEU3bBZPwZnnOR5W13iwESoI4nltacOXXLFPc91dq5uz6AS+zdKza5v5WmPhd1+eyVXCTi784aeiDP03Z+dGb/UafXw1Dx9LO5jmZyv6cknsbPjNg39tkcynrdNZ/EQTTmPdg3Nf3OD92ieJaW600mM6Xf+Ct4TdvOQyJ1rg6TW868/NWZs86oNPAZSNKu7o2/WxPgkfP+dz9FlCuvBg+uU7vOFIjnhZ2Tyvnr2vT+Mr3ebf6SId4KOWXtc2jYXntCvY5Q18uKOZbdzRYwd4zMWv/t7FT/1gVlbj4mz0xE3JcgeEvsjp31xwuBLn/Srq10yFI/FXDPfssFFh2AFHYt9ag1diLsHtS0/0kis9ede8F/cl7PFNrny0YlFFptVbOiOjJrbXF65ikDFzFcDA50/NdfeLM37TaTYOBu6Nn96N4RXPrtOHjbvwYyx+zG2ePrSWTvbetWpOfqo/e69Okjvc+DXuSja4O/wnA7zhOfGlD7DhT6Z4zqYrQ7Ddm58MzdGP5uomWHuXGFBxP30a11bf3s/xfT+fw7F8BLNygvNeDPQev/rZ3ZjCSjbWZ2/KrtlDnFI4CL912J/Zw8nfNbjMddUP1rqCA/72vXwkPsyX80Q7e1unDup8on0cLB07IIuz5QbJ0V0OYH46gBPs+Wdi6xfoOOybI8fBp/Hs3EGanhzAwSueaPDnk3RirGK0fvlIf86ED2s3+NYofVj34ikYtCvWwYmGuCb24U8uKc7AC5c55pd/4ivePZ9fCYUPffYlb18JiW9okpUN0pM+cRWv+syxj9MNXtBTsKFDY/A2F8/matmlQtJ+jYUG2cRndOD17xvRNfzsrwjjEv/FeD7nWXznc/D27wqhBydcijNbWFJAZnM4/x9797JryVGte7weZbURGHERggYdY0s2lwegy3vRokmDDrJBIIMtJECWLBvxAH6UM395/F/6dpxVpc05+4haxQwpKzIjRox7jBgRmXOVdjjwKY//9re/fY1xyIIXfXTrcIQfyQ+sHQ6V2ARuPNKFwx2HU8biWY6HL3jQIZ8xdOOgyJwipzmSn+GfXuiwNZCtrXd45lNw9ZM+tsO7Z3ugDhfhLJ/OZmp2V2cfemqe0yc50XDwwycdavE//PmzBWrrrvt7eT01cD8Yej3tcufqf0gDgqur5KOF0GIoePYzMEFTUBPQOgwSGC1ELvAlKNsvgAuKJZlqRZvg76DAvWKxsNBYGNx3CZSC8X41VJugb7FqoRaMP/roo2uxqKBpMcU7/MaC+/DDD1/87Gc/e4TTtptWOulNeUG+gF/9OPh2Ew8WQ7ooMYGjhaFx6RzM8o5XV7Lju2KhMN4YvFlEV0/weNanpNe+tGgj1/ilu3IYVwJbUiMZ5A/xF+5si6/0bExw2Tt+gtv+cCwsOKW/M7T0lleJKl2XrOqTSOSTcDa2cdFb+bVlY3Du+1omeUrIwHY1xjPdgwm+59VZ47wd7JPk+ht78qtfYtYbpKWhjz2V5Fp8J26+Ec/hWXu5p5d4AFt/+LdOV0t/71eWbKp2obP1todXUrYHkcY0Lnz5gDFr08Wd3uOnZ/oxJrnZbPlIVjDrS+63gJMM28gtLffxGe2Fg2f17xl8OGxIkrk6noNxyKGPbflCG5KTPzFUQU/MFDslxpLTniXGNiA2h/SOvrgJp/gj2UYXvKS8lwXWiQ6V1DYX/bQDv/CsHyWj9mI1vGKz2hj38WVDlb+K4cmhLR2T3zg2XFuhtfHUs/WqTSJc2Ujd+uA+PMZXWuvSee3F3nwoWy6PrQnF6HjznHz5cHzVro6PfOmMI9rBVdJ5GyYxrfWgL2/yp3QQDu3G50/B5XePRG432uI73hafsZXstXC1NWZxm1N8CX79C+N+2+sPJp7OMYtfX/S1B7u1/myWT6i1pY9eThiX7honjrFVBzHmFjj23DnOt8Irjwq/Nn4PH5vAY2z5V4eZ+Zr/0cm87kXWrvnujYPbRUdeZBRH9Ofb6bO/j3f+5Ez/6nj1nOzis3ZzWMnu3eOFL8LDN/u6cG3k3jqvRIPe8E/vNtt0J0fUTwaF3oopDk/AeJYvlKPhDx7tYhE+8JPuO1DxsqW8Ihk80wkYscqXLWjAgTZewPSVED3r06YYhyZbgzWGXeGy3ito1eawSH5JvsaS3/zAL/sbiwcHGeg0940lg7F4Jr+47V7M9kekHbqI3em4P+IMDj18FrPRoXeHNei7d4Aih6YH+akcH30XGawfbKxuXuNZzibHkROZJ3BZi3oZYQ0yDg/8CG4HSv0NIgc5/lYRHPSFJzLRN1noo4M/du5/fyOLOUIn1jw4W+/g1gfeeLps7rQG1Kadf6FF7/SU7YoXdAhnh07GOHxjB/7Wy5ZPPvnkwrVx/EJ2L6+NBu4HQ6+NKe6M/N9qQLBqIROMLCQlMQJb92oBStJi4RTcBDP3DhV2MS1xgE+QLHAWKOtHV2C0mLXwFSgFPm8QLAQlFxYPh0ktxmoX3nwy638eK0myEMIZLbQFWYuK4NtCGa5o0KNxeO/tHtgWzBbt1Tf4rk1swOxBhLG9gUGvy1il53iIf+3phw7os8Rix4UHD/QXT+EjhySxdnjpAr5gonU13MriqM1XR2Tx5qKfPJWglHx67jJOuwv/2YROwSxNzw+3z5ThJuNeJz9LY+G88eOPLu0WeJdSIogmfvSHF77VoXZt9BZMbeqTv+RLrvBJ7iQbZAeDl+5XX+E2Du6FiVftK3cHCdFOzydecu1hVvjQCOfqor9BsjIak2yXQm4FL8tbMCtLvgtX/MHjmR2U9P6/sf7Xf/UFlz3Mq90MnH4KLr9au0Zn6aXP+FsYttIfj8WC6O3YdLX2CU6tfXXd/coXXHMgu5pv4NkrecTCDhDjORnSIH6LfSev2S3Z4JDQOxBCXzIskZU0OxASu6wBXXjpjTxb2KxIYI0TV7y9d8AkEYfHRkC80CbuomU+8rVKcngmC7juPbeRMKfiH478CjybeU5P6vTYF6g2Ot0/Er/dtBblO2iuDdvE6Dc+34DDl2nFtuKNfm1kFJcU/O68Mxfoz7XyN07Mzq7akgsueNDKL08b04cx+Y97uBSbKOPi2diNEZ6D7T78S8d4ZeF73r7u61s50InWhexWmgs9b23+twGLl8asvWpDO92ujvce7PLU/QkfXyc+8Kd9yET35RuLn92Ll8a1ridPcrQOyEfkQ5Xg1PkHWl/eDmXzL/j5rE10+ZJntODPJt2vP/BXOZ6NffD6weqT97kcuOw4ONN3MvRCLPnJDqfNNb7wbI6A0x5cfr0HNuTvZ13u0QJnDLh+cicv3K9xPCvxRGfyWWMVz+7Rt0l372BEnEJDrIsfPIuRnsVDel+/6B5OssqXW/O0pXd0zd8Ov+BhOzYSD/qaSps47ABHOz3B05c6xovbfdWELwc9ZEGLDGA963OQApfDEc/oOfQB5+BBTQb5snWGrOI2W/uKBc/wo6cGCze9wENWhR7ZxTNaHew4YAFPPutKByb46MDIQYxDHmPxobBnL6Lx2KEJ/uDg7+jg02Gm9Qh+uaCajj7//PPLR+iaTHzQGDmndvoCiya78Sm+owYDN95dcFS778JzfWijS2drd/IkA9wuz8ZaQ9HCm7WXXuhD3OtrpUsh9/LaaeB+MPTameTO0Ms0UKBSC2RKQawFUdCSuLeptmjuQZHnDoe6t0htYISjAxXBFr0NhiUg2lpMtSkl7wKoy2IoMFq48KitpMZYn1tbiAqoyQU+Ou7B4sPl2am7BUBBo8Qrmmo4bXS+/vWvP/Ifn9fAW2nx71ndT7jScwtb/Kz8dOAKT/een7rgpwebmsZqo7f9ukpbuNR4sTgri7fkq/bGXIAD2/PCu28ztjrUhkf9rvrYTbtLwRPY+NLm/vy7NY0JX3DpLBzgJA38lb0t+HApYL2p0i5B5jfgdyyYcF83XxU6Aate+XUbD5/EKHzxC15pjHlV8pbeqtPJI9HbTfjA5J/V2TPciw9M88fcPHn2HI9g3avhpINkrb25d+oq/T9lL/jWP9MVHNF2j2Y6UofztLXnSjCPDbeb7Q9P9lzdnrqI9imDmBKv4S5+iDFwN1fS2eoVzWRofP5zwqWP9ZfVEV3iJ1vpiz5fl0hqk/Tz7/CAEYu0JSe/AFu9unEv5tmc2QxKZCXokmObI75bTEPXQaeDBbAS1TYQ6En2GwtWHLW5oLuSXPxK8BXyre2zIZ7YAV9g0Es32Ug/HbjM+X4SF461ubHGKeiLSWtH7WjhJf8xvnmnv00PWZoT2uHxDNacaQ2CR7tio0KHrXtX463Y5DZfop2/qMMJT/aNNn76eyXg2FYfPHggL3rxZrz2nrPp0k3H6vw4Xp/62WQ+Dmb9fnVfX3KFTx295kYyLt7Gq/G6/0NauLLZPq8tGxvc1ievi6u+2hZ2eYxWbeQqfsbT2rA1MH8xrp+WG2vjDme+VpwGt3TdN8/FfPorh8nH99AR7uJYOjEGLby496WM+SnPA5s/O6R0GGQ+4z/4HeseDhd4OORBq/90aTw8ck4xxCEyOZVokg+MWGKjjx8HEuSIDhhzlx7EmHRqg90hi3hID9mkuZiuxBdtZIeX/tM5/uBGU6xRmnf4Mc/QMT/w4pA4OyUjGDizHTrirQOn7FGc66AILB7ENjyJrfCRSxt55Nv4BvdwOyCBS1xOzh1Lh3yQnvELXg1v+ryEuxWxRRz3k7Lvf//7l/7pShxzSAGvgo5DK7m6r4H6mge/i4ftjGEnh0sOh/BMZv7Uz9McJvEZNobT4Yz1yDrUWiPvcs+vwDtE8TKC3vkteDrCr3xZ/sUXyY1/Bz/syK54sB8wht6U9jW+rgNLx/IEPsoPyE9muuQHxrEnnvGC5w5W4aUjtmyeoZHfFf/ggr8vvBxAOaQzhh2suenoYvJeXksN3A+GXkuz/Ocx1SKoLjnpXjAX5Fwd9Fh0WuzBVQQuwUmw/N73vvc4TnAypt8qC6SCooBlvEAJnws9QVG7twMtjvHVQm5MC4cACWeBUm2xUPffzONRAAXnsri0OLZA/fGPf3zx/vvvPwZfgTq84bYIGmsRKTlBy6XdFX0JhgVBOfW6OksmdQdZFmqlhSCY2sKZDvoEOPjw90yfcHruZ2Il92S0CEmW8UnnJWfuHYpEr8SFLktQkq36pN3z1saj35Vd1OlYX3QtmP2PGW1UWhBPevnhid9zek3GcHguKWpRD68+G0Y6ch/v1XC6J7+ajoNLj9rOuZIO1w7uje3KHtrjtc+z+Vnwq9to9zXW8hvN5a/+bBAtz3jOx9DQR08OsxZe+/qMMctzvKef6nCC7WosHrO/BFjSFk8nPnDg+bADvvS3elnfPuHSS3h2/PLh/vRzbQ4K1tZL1/whU3oGV3xNt+TiR+lMna/Clbx0/9QfoE6f+aDxktVitzpaYMQLNOErYUcnHwWjD5wE2Nj+DtlTOsp2ElDxVIIqMeWj8dD/wiL+iL1gOwCyiZAAS1z3fwOShFsP+Bt+rEFkRQOPSrS1K+laLO4wiKzWHAWceN2BkHUm24g9weQT2QoO8uQDxdT68QOXOAEOj3gTO9I5XbvXDl8yoKnNOPzry+b6+EJ6RD9b6muMNhec6vwnvMaRz7MChj7pQp9Np2I8GdoExmt8Jb84DCaaa4/0aWzzJ76Tq3plQT+9u49W9/DRdziNBV//dXMr9esLJv60uY/O0mh8PK1sxnQFt7Tcrz27P+V7Cqaxp67S2dJbmeiDDW0m6cWcKx9pvecfbKVufi8d+OReHVzAubbkix085Av5FrjVkXsv9Mxx+Fxoit3mvPZz3jSez8FnDJnMe/mQ/qd0CNacyAYr09pCO7gOUPtqKrs3Dl25a2PphZx83EZePM2fV48dHPV3ehwEsIHcLxzgxSNzjCzyYW364dSGjkMKfJRfBmMsGHYu9+6rFDESHnzgNz7YTQx6uMVN7eUv8UAn4q9DFWPFw/wILc/G4gFP8mG6gstFRriKAfRGFnGWHuGGgyy+shGLjelFgX7x3hjFWgMfXhzakJmNrZ/WB3zSj1yQ/hyAOETyEzVy8hU68SKBbA4gwac7eYHDJPOAbsA4VCMjOP7J59TWIv/72ne/+93Hn8Q5IArOWkZH9EIOMqGjnS3YgF3w3aFgB0v4w2sHU2SkI/i099z6AJ82umeL5oN2bS46pIN8HVz7LTrmi2zQ+nYp/F5eSw3cD4ZeS7O8+UwJGhYXgXHf6JR4Cvpg1C38grpgbBEQDAtUBSWJe0XAEiDBRUfQ3EXe+C6bPrTBl3C0ABcUwVqYlPOnA2AEbbVgaqzg70JXkLYRxKtnAdSCVVCFU7C1GPRH2eAq2AumXeRxL9iDUVookxdd8uCjBT5Zl6Y2pWDvfvXSPd6Uavf9FAYdPEm4wMfTjl38xtIRXLsxASN5U04eLLDp1OLdlzRgyfPUGO3wnPUFfCv4hNNlAS0R5F/u84N0ZFEHm30bC088bK39ZRfZs0Nfe8BLB/GlBpcM8WwcWLZeGcDhKX6SG45zLqWDDjvi05jkSn6w2WPlf0o2dCqSXwl6Y/C8ugtnMniDJmF5Sr9rx+CDW7zpFR9kCbYNiXrlCiZZ4NKfbxZ/9ONBv77T9tkymXpe/O7TZbZZuEfFHTfZMx7r9tx8DF990erZwYB5k38ZS5b0BA9f6Cc/0VIHQ7Ztd58e1PWjyfZffvVzEDpES3/6dA/GfIdfTFTSV36srTf7YkwJ9sIFGz9iqOTZBpX/iaHkEp+8FZXU2ziAc5n75hLfkyhL9umjn11Yc4wT58RxpJdFFgAAIABJREFU9NJvenRYXbuxLn347U12tgYnfpPfPd1YC7rPn6ORXmvPJsVdvNNhegC/Oqsv/zUH4jF/pWd4HeSCbx25jHIr4MQ/49wr4OqzNvcT32CiS4503PoaTD8nMV7c3dKXPQ+3TZMDefqCM/r97C25mwNqbV35bLL23Liekye952P6K3Dg3Vi1snZdnMHWH96Vsb5ti8/kPPt6Tl7PwVZvXzz62mRjn3tzSuGnrv5jB3jYRt0hRnzlO2h0QKAvParTvXyJHdFysaN51ZctfNFG3XM4HB4oHXTDxff5F1/gQ8bxRzHLODwZZ92WF9lYr6/JBeV+5kzxPV8SI4whj1zJOHDJma8mvzr9du/Z4UEyNIcak67xQM9koWv62XlUfOxgje7wUl5ET9lTDb8CJl480xc55E1w0qGYQxY2QFN7h9NsbP7RqYIO2+azavAOLU59gE9ufCQPP5DngncwYDz9ir3gvrytD2jglV31s4PxO1b8xJsY6dCFXAre3Tt8UBvTQRK8dEbmcjuHJ32J0x94zj7782BrgHY6c6iCTgdN2vHrxQGd9DWqwyR8gnMAwnfRsNb4wgd+aw3dszs5rUdgFL7rwKg/RM0nW6vsKxwIwguXA0Jy0CGcxoH3jGcFnD7zQRsY+mCPXnTooy+6ssa5eobLRa/6rXvk7dBwfU17c5Su9NE7Wyt0Ai++6eteXn8N3A+GXn8bvREcWhwKMhYXgV7gETwEOUFQoCroCy7GCDotToIP2PDodxhkIXEvGS5gUxp4Xw1ZcFxKuNyjUZugZWGBpwvekocL8FZsNsAJlC0qLZToW/QFxxKtaouX/x5SafGPTvxV+1tD77zzzgUrwMKXjNHS9/HHHz/CNbaFVuJUsLYY7c+06CDe08FF7FbgV+rfe22SRTRcFj22WJ6CD9eJX/vSN5Zs8J20SjQtxPkBWPcdDoFB4ymZtv1CfhQLNP4dxC2t9FaCE19g9KEVvOfaVlZtkgW4w9fGLvvDkT/qQy9YMO7zETyAjV68eOYfq6N0ZSz58n38JdPyesqyNNYPyGO+1Z88am3hSe89L35+CT7Z1fiUmEp8wqUO1r0CjmyS5b7cAUPGeEE7H8aThEoCaly+Brb5lHzpHcxunHOZhVvdw2VMNk336UOtL5/Jf7N7uqk/fYSn9u1Pd9kw2B2bz8Z/4/GTjsCn1+DSAx3kN8amU3GEnYJTJyt82d6YbBif6mDc0wXfBYueWOe+zR6eNt6sDOCLcekDPQmogyC4WifUkl7JsXsHQDYJDiIVm0axzJrUhsLapIjp2m2wSrzxkQ7zO+tBRR+dkK2/4RPv2Vu/hDk94ttlQ6Gks+6N32KTD1eJOD7iC5+tr8bkm2xo7uwBj/F4CD/9GI9+V3itZact4YfXpV8B35tjz+xM/8auT13At5L90hFc8VOb2JN/GwOX5/U97Y3LT1ff8Ga3Hbe04HjqgFS7cvIGn/Ed7rvvSlc9L//hW3htjXGvr3lXX7w2fsfsWPc98xUbUf6PB+35hHvzSM32DizMATaTj+Vf6OCF74Dlb3xJG9jwRlcbGuwu/8gONvb8RB6HbvL0Ja4xckF08xV8hU+bS5vx+HFPPnO4uB5farjNX3MCrDYbcXmoSw664/SfV3KpHQDtHIGTzsimhCt7Z8N0bSwZbdzFqnQXHN3CiTeHFOR10CWmsQ/86aQD5tPn4OzQib47nOuFl37tcNGLDTv66IhD8POb8jJypTu2ebgd5JBHfM5OxoMv9oll2nqZag6HR/x1WIJ/sUKelN+hLzbno72QS276cXCFH3rEhwIX3crlwbIrOuI9nhQywd0Bi/iOR7qGjx7JBCa+wPA1ebmDIgX+vjD1kzL7AnqkG/BoktdBCh7gdCjIR8HLSxzmsAs/IKuDIn4MF7uDe+uttx6/IHLwBJZc/Nccwi/Z8EJuOBS8sO9nn3129eGDzPI3eiardS/9t6/Kl4OBB0/o0kG+Tedot/6h5fAPfHOFjzVODuaPZ9/L89DA/WDoedjpWXIp2AkUnUYL2hYel+AmURHYBR5wLQy9qRVgBNgCjQAuARfgBB1ByuKieBYg4eowRtBzePLzn//8wmG8AK0usBkrwJW4SOTh7aq9hcWzEo4WpQI02uRE2706GO3xfSH5qsRbtWYBWmlsfMRrNZ0lizEWk3SBZzxYGNCuGFsJf7Q8r0z9gUx0XH2GvfJ3rw53PD0S+uqmfrAuvC4/a4vVx1N4tn/pJss5Jp5W5rWvRY7OdtPwFOzCLezK38+EWji3NiY9G6NEN3zg88GVp7eMErNs0iLeop0u2C49w6ufvCtTtM0jn3BHv9oYBU64+JL5bMFPptUBuL34n4RZUmM8H9WG53iD3z084Wzjn67xHO/wa5e8SSiDFW/gyIfyDxvw+G9TkW7T/7lhTkdoBqNGK76jh2b2rMbf9q/+kzc+88Gn5jhYvEms1lezi340szmY8F83U2x4JenpJRl3gyYZTs9sRY6VH7qn7FRSnW7AiH3q/AOPwUXD20uyrC+Dr/AVsLtBtQnXBl+48QmmT9atMTZRkmw0bYgk4v1dNmPplD+j4bCrLx714d2mzd9n4z9eOKCVntXssiV9ri3gwWt+rM/lWZ9NEJ7MKe2et4SLXuEPjzVQGx9rLuV71jhl+Yg39X4V1OYLvE2T/uJIfDQ2WeDV1thsYUPUxqI+PNrY4bsLfuP18Ul2SC/R9KxfnY3z7ZXLvXYXuI1XtTcXF2d91dGPRvjqP21CvvjQBy5e4qO2bEfmHbPP2756rx0sfOrsUf0Uzvr4grUfD8W8c7x2+VdzF304m5fmTXHTAYt5Vn/6ii84tKHJF7KhNvMsmfOB7FU7W6VzfuMAAAxc9Co3BKOIWzbfxoJJxvRtvPyyA2EyuKwZ5rb7dNJ4uPo5mf7kQC8++tkUWHzF+2mHtaN7cPAFX3/16kq8AqfQBzpiBBh8aTMfPZNHP/7oKL76ORkYNtRvrHH9PTd0+kKLrsHisYNzOoYvXugeLeu52kEHXuAGE1/oeBkKV18Jge/gkVzWb/Q6rKI/viUWG+tlkTYxHJzCh1wdcKBJnodbPgR/B5eew602Rhwnlz2CQhZ+TzcOTvAj/vbi2lpprJ9z8TN2Jy8Y8su1jUHLMz8Ti/mc9YJcbMYmDoXg8aIa/X6+5kBOHz3DJzaT3zwT49nGGoVHtIKH1zh5mD2StYuMfIHO4O9rJXAOmtgCb75Aghu8AyGw5CRXc4Uuu5oj5ACXndlaG97JajwabIPXYg89OlS7l+ejgfvB0POx1bPg1ILQmxjBy6IgWErUBRIBTxAR0AQ5gVrgE2BcfRZaMNIn0JUQg3Ey/sMf/vBajPQJTBYUl0Wie7Xg99vf/vbF22+/fQU1+JTolYTA497m2Oa7xVB7f59DGxzaGm9zEGwLOB0EixZ4xd8PEjAbTya8t9gJrBYvlwUhPtCKz5JsY4z394v6DBve/qZGOsCvN2ZqOJR4azH3XAHn0sduFhfytCAk68oXzvAkbzh7VjfefTzhy3P8bZIBh74uMPrpZPk+cUczeo8Czo0+cNly6xKgfMwwmxiLJR/Bx8InQ+jrs+krccvW4VQnDxt+edsgaXNlA/eV5IVbvxqf2aw28GBtoPlR/flQOkn+1UEw1aOu6zY7oS255qvkckkKtPPN9C9hkSiYl3QHDs/xvfIZa37SL58Dz4/zE7B49oyGAq9iLF3zWSV/kJTEX7TV0SUnnMZrb2w1HqKJrmS1N6AX8K1kl4UTF3q7m6z6kyH+8hO0145rM7D6s1t+8xT9YPKR6MSn9uTNh8UZh4h0Tt9tPMCuX2d7bRJlfl0Ma5znbM8/JLTwaMtm9ZODfiXnFX2u8PRGF89g8SZO4iHanh1o8R32B1OhR21oGGOOPdwS+cbnZ2oHI+CsLzZOEmrrlUQ+PWRr+MmsxDM5K+Dij+2M79KOX3PeWmgd1Keo07vn7Kk9PNrIZL65Ko3rLW16jy4+T50nDz3t4Wi8pCe1tcDGXMkXyKLAY73vXt28xTf9gsWTS58x4Yl3dOJJ3ylD/WDEY5uo1Zl2uBuLJr9Z28TX0gxvdePj9RLsq9L8g0eJX2OTx30ygslHwpHswZ8w9S9+sFs8x0Pt2oyNp+rF0319xaTlrfsOHT0nQ3ozvrHktdb0szPj4ldfV7ZojiZ/NZxsBi/f9lVDPlBO83Cbv93zx35imL7h4BfmLz7gQs/cLzet3Rj9rv70gFw1+molmHR71ulHuziyP3VfGwQn3laKQXQkJqxPGJse5X4KGLB0aaOfD6zvhgOM+IWGNnojS3/HR7scXQw1f+nF17hgxcDii2e8sIXaGpitkwmMC0406MEcrU1cEqPFA/T2J23hJwsY6zoelWKH9T154MdH8jiYwTuby0nYHwx4uOCkH74lH3GPJjnk5PYh7O/wBz3xmZ8Zq0+chgtuxbrk3kEKOvjgN309RDf2MvJx64n+fjqGHhp46gsg6w5bgvnHP/5x2dThTS/hjIePrRwwWVvx68WFOVDsJZM+NPk6OdkQfi/VjdcGnl84GIKb7hxmWaNXruaHuvl1KeBW4DGWbuiLrHICB5IO2shIFnkQvu7l+WjglQdDZ0A7xbob+9TIf+6z4NAXQAKC4NBnjdoFDsFIkNIviApULWqCpD6LkUAr6FhAXJtgexasv/jii//y9hdMcCWgaoELvICYP8NdcmPx4sfaFGPwuwmDhLg+4xZ/vxXXD79x8EmSwCaDGi/LhzZ6cQnG4caLT0Dp9KRLr3v4JQDjV4mefry4WhwtKnuIlY6vgbcSfIt6i0DtT9Xn2J7VdNC1caSYoY2c0XGvLVvkF3DtooRvMCUeNrWeFz5el594WF70S+DIbEHDQ7bNFis3+HznhPOcLNXa8Klma4lFdbLGNxn4h7I6qj94NRzq9IJXV8+LA7/x/DIYSUw/zcJrsjV29bnyJ1d+rC97XILcivlBrp0H3WdLfBkrEZLkSBrJor8rvwCniC0SS7UEx1xJL2truCVi7Cv5kfjk43CmR3jFIPyKQ8mOFhjPwZuv8O74fDIbqM+DkLVh93jbt6j5DrrJqg3N/JwfkUGpLV60ZWe6U9KH+75yWZtqZzdv+Bz49OVYfoWP+EInH6FvttUm5phL4RVX6dEbULUL33hK53Dyu3QBF1rwp29yJi99GoMvsPzEZiFdwNVPC6Jh7Je3DSG4bGqDhS99Nga90e7LVT6FX4muuEw+Y7M5fWVnOmi+gMsO2neTlzxw4FuiLhFfWd3bSKnhUfL/9E8um8popf94A2fTYh61scsm6g41w7t9xrIj+gqe8xe+gWf+SjfmUmPB7guL2tPR/kyUXqJ9EbmVeFfna/Vls2yQXrTDc/ajqS282a05kd6e+govvo2hA34VfPzs8/JibLxU11YNR/5vrPZwhLd2sAuTTMZU9v6x8Xaz7Y1bvC+D1Z4O3JsfW+CgE7ohh803fbeu8Yn8NHkWJzh2WN3iL37TW7Ei2uD5OdwKOgoaxsRnunUYLQ50+KPf2GKJdm35Id8G7xC4NnX+W726cd/XKe7hgxc/1iLz7Gtf+9r1UjF+05VneZxxeFKbF+Ye3sGBSYeNkzPTA703Fp/axO10Uk6browHYx6i58BAG/zZUm6+P8Gy3py8wFespS98sCfZXdkS72I1PYC3TrOp8nA7bMGzZ33rn/o904V+MuIx3TjQMcZFHjGdHOI+nHAr7vFjPJ0qvlgBn8xiJBnFtP7ItAP6fIRcYKKPdjxZEzpo6qf1+JBjoNHPx+igl990Ap9n/a1l1hi5vr58qp+54ds4h0cdMJHN4VV/Y4js1jI4rVv0x5bw0zs/gwNv8GsnN73082g6gM9YvgEXWHsYMPSz8yK7WMfAs09/isMaSj9qcuHdAda9PC8NvPJgqL+Jkkicv4ns3n+Ddy//WRpoESiAdtgjgFgsHFL0+3HPAkdvIlpsCywS8hY/iay/q6PPJTALYhbtFovaLGaCUZ/Tg/csUAl4aleLv/rXv/719ZURv21RRFvghA+NElN8xSvrbjID1kYOfJcgGN9qME7t48ki5hKg//a3v7149913r7Hali5aeGthkwh4Rr/DIPUZpC1Ym0TgAQwb4QE/cLBHcHltfKPTfXyoV66e4QC/OLRtv89VjV1dn4cG4Jcf9/TRmPpLIMikP7zx4Hl5eXx44iY+49XYbEBHXemsBGNR5Sdg8kV6xk+8w2uTZ+HWBw9/zibg8h88gbdghzP+wreyglkd6Msnls9kW19N1sWHfjKtDhoXLDibXRtah6UWfX7egbB5YDy62SR65qoEgy7owfx0n83Bk8HbOQmON4B0pR2c0sGDe3qlL7yZR+k32tEvAVODK7E0Tht+8wlt6K8PmG/BxKMkbt+44lsBp4QPf3ivvT46yeZogqukP7WNRT/lhKeYm4/Akfzhi0f6qD/cYNFW0o86+2ZvtPP/bYOPrP0Nlr7QEtfEoPwSzr6sShfxAN/6U18E4Zd86bJx5Gc37drQeLhtANDTZvPSW179bfLQa62S+NrEGUc2dgfrHm7/lfE3vvGN60WFdcya1VtXfrq6gBf/xneIo3/1Sg79EuT01xoAH92g7aofT3yNXxlLh2SkW3KkM2PI6zn56CG/tylEn6xwxm/2Rj/danOfLtTmaX4GL3pKPkFW7ejHEzz5pL76tWe34mN9eFfAFBP0pY/47adm6OMv/9HvfnXYc3jScTCNj2609JM5WYNbfPGTLtDIX8F7PnkxpjmnX0nf6R6N6KkrK2ftjVlegq/tqb7Fm+wLd957fgoPWbTzy924p2cy2kyK06eu8xUwLgcnvagCy28915+99TU2e6CtrH3AoC3HkX+yDV6zEbwOKsztYoc264h8T44abL5dnT7U4ks45GfRCMacgxeMNcx8ypb5PJnSr9qzPnzhAV1twRmv6BMPoqlfXPG8hyYdOuVzeDJ2v9ApV2YrstMdOuabcWKmGi9sEb9omuvwkRE/2oKJb2P1OQjpKyHxrT8ToQ/v+EATbTzIL+D2nNx0g5/0UgwSr/X5SgjuL28vAJT8A8/xTyfsjB688KGDJv3IZ+ASs8HKA6wLeFfI1XgHHXKd/NLBDHjPxsJDN2g4MLHGyX1ax+gKHFp4JLN+cPQm/zGe7uQGxrmXb9EbefHiYE4f+5GfD+CT35kHeIKHj5tbDnysL/hyUIQP+K152uDh0+KxnE6NH/puLqQLdPJ1+nZw5SdpxtAxudE3X+7l+WnglQdD5x+Lkjz5jE1xfy//GRoQFASkPqu38AoigqPA42rR7bnAKOAIlHAU7FtIBByBREC3gPK3FqOC7tYl6CXZArfDpF2s+g26oNUFXuAUUC0GSrjU+Cqp89zn8mi3uLTggYVnx/mNr79NFK/oCo4tBC32JQEtfJ7Tifto0YHrk08+uQ6R3Nefx8UPvv/whz+8eO+99x4TCXwYQ+/022VzdB7OLN7zPnm0Vzrw2bbGkQtdNR3hjZ66vD3rj7HCl0+4lxjCky0kC9FQw6tvkwZtNo5kgiudwLf8PTI/N9ufP+YDam3ohTN4bQ+3TZeFFf31o+y29OECX1LWQYMFGS76IIOxkhN4ydKimw30K577yUebreyNx5LPeAArCQeD12y1OtGGnjno0AdsvmPeK3iNFxtTyQd8618lEuBWX9nfZruve9Rwkyu8+9UBXRRXevNoPqUD/Da/0GqOsRd84fQm1UFuhzn68enLh/iMV3piK3x10cPqTCJJHrS1wwGnZ7au4IcM7E53aPqv1uPL2L6qQbNDgDYH8MBnHtCLQxi+kc7z0fV5NPFdvE2ueAKbLCsT/jybR3QFd3MB3/DqN17Bg/gWnDZyweNKDxJD8b95tP3xBjeZjYnvaGijw5WRniXmkmoJuWc2kZjiK/vmB/QBDz7oMbuLRfyqnzfgBy/WOLJIsm0KijvpCG/a4MwezQ38w9Fcpbs2YOZheujr0O3fn2Wg3wsRMq0+8Am/9RcP5PScPW0ybAQqaKR3MPRJNvdww8HuCpr0RVeVbI4/vmEMeq768OvrJP3xZ7x29JW+vEo/8Q6enyj64NdmbHMFnWTIDtrAnX35oZrOHRzuWO3GuPDgQhNtJZzpJhh+Fm2wdJRvwAkuOt0vrXQVbXSU5d8zHMsHXOlh27u/gKcsHTTyuUBOXOd4Y+Kt+3QC1nh+Ht+1aXfRTXI3Lp48F0/4r3mc3s1F85PNTnnieW1FLs+KzS6ebLbjoTmhNh/krC7+jabL/LUJB5NfGo/P6nTAt/EI/uxbeHhWb/jzXN19Mpa/9ExXwaS37XO/7XjHl3r7Fgfd0HdtbEAGbb2AcRAgN2EDfcbQb18irR+5Zzs04RIb8ut4o+e+qIKDXsT5fMc4/IDDA5piLz0Xw7ThCf6lD4d1FU7rornaeHXzNL2Zu/gyDl0HSq5+Liz+kJ9MeJLjWlvkQ3IWMRwNcPqtC+7V8jh61E8OewEx1ppkvS/vg6sDJzg8O+zhk/RoPDzkh6eXHX1lBBZeB0jpyVrmsMdaBo6MaIC1HopR+LfG8RG6VDv8YVv3DnPQRxcvYOBvntAnvPl8z/jVLq4bT5/kog88WUu0ydPu5flp4JUHQ89PnDvH/5MaEEgFc4HECbVFsZN3QcUl+HTabFNZ8MeHwNXC3IKpvUWk4NJnmQL6w22TZEwBXmARrEqw2/igIwgJxIqgBJ+Aa4w+Y1zkQN9XQz/4wQ8u2F08WnRKPDz39gofxsLdVeKnbozT+4rxFiBXG1h8xNPvf//76yAHbrqjD4vowpPZ4oN28sF7XvFQYlAAJ6Or4l5fC7P2dBBs8DvWomMMvWvvq6N4Ci/64Cp0pZQooKXfghn+5dViZAwdKNk/fPQAFzxopYdwRGdlbuzWK2PtcElGLOLpc21sTPi9YXOAsP7QIc76OL4UcNr7CUYHAPSZHG3MOhihA2P0NxfQxwde4QbTPKovvVaj7x4Pp87h0U5O480biZEkCm3468eD8dmksfl4sMFHV90bQPM0+1bns8mEtkSmw6n1D76T/Pg1N8QisApY7XDDm1zGSIzwCL7Dbc/JFb/5XrauPd3hkx1tNCRpG/vy/WD5OVvvT6mWJn75ksOhLfkNPMmVD0mC0YTXFWzj0Uxn9SXLyhDefExfcGg93GKwwwP8gnFpJ3++Rn7+4tnVwRi9K3QrWQUTzmyYL5HdPMhueDbefFKjH021DcLCJLfx/raDxJpf+LsLavQ6VEMHXuPXf9mzP75JpxLk1iPxwKYmHsz7fnqafuuTCDf/w2/jYY0kv4Q+XbWe6Xf4Cd7akB089yUVPa5vkZn/wZWP7/yEQ7KOBh69wItX/kFHZFaTV8nv6cf48GXrfIccza/8LL+Ibra13pFDgc9z852svoaDA0/4MY+LI/FrbPYqLsOJRsVzsTpbRBfMjqvdePc7lhzZB01z4MvbCyt1+jCuQ/bGh0vdl3Tg4yXfTm/NgXTLn9BRjDFWn0If8DbmarwV/auj2rbfOHAd9iZbMI0Bt/rUvm3hOevg4gXfeyX/8gYHn9t1Ehw52Ymc6Xr1EC46BGO8sjpIZ9qWD4c/1pTyRuPpu69ci4HGdfUTKjTAmyvy3+ip05G6ceko3VxM3kpffTYOf/RAbvE8eYoh6RRt92Kry5xR+goDvfg3d8UTbUrrCv+CZ/3RM7rLL97iRTt6Ysh+4VO8QdNeAIx5DSY+0DGejB00pE904TAOr2xi/isdQOFB/HKwIU6Q3xpCX9k4+viDG710hZ8OqvoqrXFiMDmtyWiUe+MDLnHIePmCuAw2f+Uz6DiwwRc59Gl3EGKs+Y8GXxHP/vrXv177CG1819/bEY/5pP+mnox+deMlKlrWKnpwsGPPRYcOieg6Oa0L8hg80m8+5UUbGnyVX8lzyE0OenB4ii9ywslfwPeFHbvRIRnDyS49d68Gqxgvb5Tb5c/0h45fRpSbXcD38qw0cD8Yelbm+v/LrIAg6JrkAo0JLgh1GCSACVgCnYAjCPnbNS0ivblowbEgCtKC5i4cJYACyyZ1ngUtwbRgLoC1uTDOs6tFyVdGP/rRjy7FwNXhC1lcbXCMERQLwvrQKClBW8DrMm5/umLxiN9gWygLlOROdjB4KeEPLz4tJGRS8IA3Og4eb+DZwddAD7fk1HN6Sw9tMIz74IMPXvz0pz99DOr4qFh88YoOeiUY6Cjx3X0/R7BQ9HO25ApnC8bigLvnhbPAsX88ZRsw2SDd6gOPh/1qCGwJH12EY+WM5qvqE94zfViUsyee0Mg3PSdbfWzSFwPrF+FPX3zZRs7zyk8WZflBU9JiEWer9JavRts4C7LDyBImeMBFZ2uHM8ln3ii+VCKfgq7+koWdl+kAL+Dg1Qamn9vkj+q1czoIPjh+5cJveiYbfBKg3sylm/iMF+0SGnrin+ZTfoRP8q5cYMQp4/AX7RLV7F4CJq7Bl6zrB/Dib30QzeDjWcyTNILTp+g7/UqCmr4XJluq6c3YcHe/PqRt4YxT0ks6bGyJdDDajTlthV9xxhyJfgc6fFVc7Fr+bF48O2Tho/wuncUDHQWHD29Y6YIfblzGk8uhDd3zfcWhtQ0e+6sl2vyCL0vg8ZVOyMb2eJc0a1/7Z2N48WfToJ+PZUProU/k4W1OthagrbAJXGQwFh0winYxjT56Wwu3cvoFPB0CRg+MCw9wRDu7sbV788hYF/rJLhbRYXqJ33x3vzbSlt/iBQ42zM+M7esOOkVrDy7BZ2fxz4bHvHK1loIhAx7Tk+fGotEaB9fGjXjDgzhoXPbMD40vjuojhz46VOdX6R4P9YHl32DwU0G3cepkBJOfhC+fN5bMbbzTbXrP94wjhytbGhffybXyhSs+0Eo38awvG27bo1C3m6dwL654TT/RgDde3buU9OGef8iltJFn9RScuUluMVNbtuwQJmnEAAAgAElEQVQQwRgwLrmTZ7SM4SMOGpR0ZTMsRzXXzRV0jYVfrDD/1ifQlCeBd6Wz6mRSp+tqMSyfRceVftAwH9Nf+rmYvRXP4gQexRC+kAz6m7/4bQ3rQABNePNR+Ug6EgfpzrM8HB9g6aoXUZ679HdIQ5dw42n5BUvvinu4y/m1sQWdBIev9V/P6Bsnz0eTLPJXtKz9+sxZ8QIPbJGc8EY7+/eTLs/wgO0/sDFWvIOLbshiTuOBXnuZi0e6smYo+sCjZ6/zcMv15OfWarZK3+KamImuNQjfioMy6x5Yfezv5QN8Dov8iRbP1hMxQd5hXXOQ4vAIrU8//fRqjx9rHl7QIJ/x9ij2M2jwa/j08yN/38f62mFTXxrBZxy6bNU60Vy4CN5Kvs0evTiBI/s6ADK/6E1+RV/4aN1zuHUvz1cD/9LBEOe9lzdLA4Kl4CA4StosKIKEWtAVXAWAFhYTX+ACa1EQsAoiG7i7FyAkBQo4AVzgtZi2gGjbZEvgE+BbEPoKSDAqUVTj3cn5X/7ylyt4w1+SqY5X41xkEKzjB32LWbRLytRwC6ptiOCuv+SmBKdgScbaSjLRSD9wCrTGWwTwAZ4sgio+0Fi8fW4a/WDpZvXGFhbBeNGXrvW1aKFv7BafzsZXi3U8q7Od+hwb3MKQuYRCnS+AWR/xjMdsDXafs3+40QpW7dmYfqJmbPIb07in7hen+/W/7Fwb3Sjxrh2theseP3jzHDzfaww8YIKLdmO0s5WaD8GR7dBUkpsvpBM8GqNkI2PjwRzSD46N8acEk6zeSPVbcTDLd7gljnB4UyQxwIfEThzRHk/5Bv59YdLXfcmEz40N5htdgS2+wAk32OaH8ZI6G2Q8Si6LU+B8wpzcJS3ihL701KYNTrB4xTv6nunenOwn0+HrC4cSzWQ9daXdepme0aGn5my6YRe4sikdg2WrbMMmbX7zcXD5ZWMdwtsQpV8w4B0CVuiYnPDjrbFqJT/o7we1NtBHPNuM0wM68Lvc53eeJeDikXH5UGvIzg2JbcWGj6/wL3U6LaZrg4teJeK9+bQebSKa7pLFxlSiHr9s70pmdjZev7HiJVnQSzaJNz/kl/SuPdnQUeBz8ON5+c+O2pPRhsJzOg0fWG39JJoOyAu2PvPD10z6JP/pXc3+bOa+djLBj+9iC35rcx+PHVTqQ9PY5OOHbEgPLjDJzNbpe3nNz7YP3/FEh3Cq8bY2TjfmJN7RhIdc+oxRioNtQrMbGH3GookOno0nLz/LRzcn0MYHwLrAVlaO5QUtcG1oPStqcwUP+uFT0nc0yGjO1GcMO9a/8SBaF/CtRCt7hEO7zTafJfvCfjX00f/wk7/s+ODi9+yLNv7yOfpzzx70zm7ggu0ghf7ErNYMfCanceYoGG3FjmKedvbPvmAU/Wir81V69Gzu2TzjK1/Jl8RFOW88xG81ODZBR1vyka2XZ8mRnhdHety+eN0+ek5P2tHDU+PWFxvH7s15Y7MlGa2F4oU5QVeK2KIvfzQGHfKDOe3HDuYHeIdMijhXHuwZD82vfGVrNNgMTDZubtGhtdwBinkA1pqYPrU93PJ7+HqR4D794dm8ywejiy96Ed8dloDjA/hIx9aFaMrJm29wiHdkx0tf/qIBBi547Y3SPZ8tVtElnVunrHFkxIN1xtetDic///zzyzZgHAI5mHL4Yj2DVxu9sCFe6LvcjM/1wgsO8sBpraA3/LYu8h/8dpAEj70dvGQxBn/GtaaiS67kI1v+TCfg1WSDFw3Pcjzy5WuXs9zLs9PAv3QwJGBU9v7ZSX1n+AoCJnN/J0hQ7jBIW5fAsJcxAobA0CJZgN+3v9oEYAHWKXlJg2BeUNYHRtApMVQLTn0tAkbAKeCiKVCD0+6y6etLIPBgBW3BqcWiBEAw/uY3v3l5AD7QfuqCv2Qyd7EoxvvK0Pi+dMIDenQQHc946RJM/eShRYweXEq6Aktf9A8XOuDTd7BgLC7f+ta3Lv5KmixMijEVCyP7FeTRssCgFd/1bf2I4HaT7LW97DmcyagGGz/RA0emdNQ9/MHXR7ZsufXijE78rVzL88K5t1Ho52T5bz6sP768cZNISlb4Yj7cpia7o4VHizq87tkq+Hwznvq6yGbP/LKAm6dq49IHndgAggsvXAoe4jX50A1XPGWz9As2vvhZ/mYeoI2P2uGQTJhfrpIEcxFsdPNTCQ4+zVEbLv4sjrjgxJ8xkjvF3Iif5jA4mwg8ijP8XbxqQ09uyYy40Vi1AxW82hA4POLr6pK5eKUHa5rkTF8XvPir9ExWY/Aj3vQVnrbs09cyxUs2qF8tnsBtvIIXcoY324AhJ90p2pX1n+Tga9krevmwcewJVlGn+7UB32Y719LK/sbhh1za8EUf6OT78PVlKZpkNM4Gw5hg4Y92Gz0bF3YvBruX5LK5xNizJBs+dT8VVKPjcMObV//ZABxdZMYnG1pjHM44mOMby3tf+ogH2r0Z5mvk5bPmQjhXT2QRD+DPD5sD8OCbTcgvgQbfPOtggk+gwY7FAHYG63LIRW5+2ryjv+xk3Yvm2i5bd8iEH3TAZ1fw+sHqp8f46sAJDP/KT+kUfc/ZVQ1GOzyu4kiyoAGXmOfwwkEkP0q/+ozni+zQl1twF2fjJdzsT++VL2+HedHPTvmYsa6+eltfx5vnxqrjp6+utIUTn4031rVlcWUb/emgn0g2bumHz7jmSbQ8bwmfNrKLsx1yBheN5Fc3butoPYX/HAMmW7C7KznVbC4Oq80dG1g2NBeLYdqTNT6MZe8OodFQjLP2gTOfwPFP63L45K/o8Ce68PKgl574Cw7O1qbamk/RM//pUpzIDk/BGrc2ScfpJ11u++p379N/6+naBlzP8bh21W9cffG8cvPf9B1d/fRJNn1gWtOtyfroVBt7eSEnBoXHODT1iyvmYodp7N0XhtEzDh76xaP133i6hkuBR5tDDPR7kZCPial01c+Z0LEGwGUdgV9bL7Pwr4gDYl2xR2zHDz7EPThbb7Vbe4q3Ym929lMwL17ILP7oozdxugMbaw3fwTu+9VvP0LTvwoeX4dYiff0KAy/GWGvJJ58gjxwGL3ybT3dYtHkOnvXji/9Hm161y4GsNZ5d5gve8JTPNQ92PsBrTtK7mK3op2M2sr+w7t/L89bAv3QwtKJywnt5fhow6QsogoPAJIDZvFk8S87AmOhqAVkRZAoK8HT1hqFgKaAVZN0/3DbQgg/4Ar5A4hKgt3YvkFqsd9FDF33wnf4LThYBvmgxAd8i2JtA+IwDa5Fx9SUQmfAEt69mWgxLCLXTEbz4FswVNPCh36Wf7JKTNgTJqW/lMDb86Yt+0VbOIAz+T3/604u33377wgPOFU3wXRYAiy/a4YEz/tvcW0zSlf7wBRtf0VsZgo/XfXYfju6rl8b2aY//ZIqexXU/e35K5pO3k67n5DnpGluxsZfElPBnI/XaOP2zi6ufT7BhBwB8SoFf8sQea/f8pgU4PYNDj2+hY4yiPV6TBQ73+3MeyQIbn/Ly9x1f/8LZJHu2AYO7OWne4Cn/xA+4Dl7NCQlRySBY/RIxNM0Jcpqn2t3jkUwuz+kGboc7vdUVe+jUODxJmMBKctCXEEn0OuiB11s/sAqewUis8IIeO4UvvSabOJJtyKGgBz4YCad51qFPOqevCnjP+Uq8hAsv8Esixal++gMXefXjAzwcNlViHvmb29kkH0ED3fXD2uDIp8gIBv5kUkcLbTDhB9dXJMmDBwcXkmp254P5MprZnY7QciWTuI5W8qWbXgSg64Ivn+ITxkiIycHmEnXteOoQxVzkZy5+S2+tM2SwRvAPCbiXA8aBw1t6pEP08ddhBB8yFixf7+9m8bd0ku7VcNGhurUHT704SGZ1+mp+mDfma3pQwxlePsP/6AF+/er472sieLWRW208nbKJDRO97Bcq2axDab4IHlz+DMbleWs6aN7gq/7oOvjgK/rgzFfAKTvnyLPzMj083HII8cC1tM0fc7I8RV8FXx0qsV0bT/19kbN+27jVa/pN//lKMHitgGkupQPPfJsv5itqJZstbu36+UpjgqtPjQ9w1drgcWjOV230Vhf6K+m95xMuGXeMthNOf/Th3PnQWPot1teGb/DFslMvyUX38tEOt/iP+UQf8Ka3fEotZ7B+iJf4MVfoozgUL/jWJ5bIh+DKZmo8mCdoizvJH13PjUmfq6N0VVvPvTw1Bi58laOW6/TCwBj8gqELspdfknPnEVkVfr6Hox0kw5V9Fk++qIYvX/O8eoY7WPyIiR2CNM/V5jLZzEfrLjuIbQp9iit4gStbWL/B9FMyfOCx+cqXzVdxF+31l3zoInAraKMh/oqF8ILpYAuPcgdxDi57DfYHQ3fWBDB8w/6nww9j8ilyRQu8fZMXEWjjW7HGkMs4/KBj7eGbePInMPqpmHwGPTpDm5+j1RrycIt9Dlv4IT/gz9Y++POfDj35q/+UR05BHu3WLPJZM60t7MYP8aLgTz+9lj9ap+GGjz3yX/ThYQt4jSE3Wg6vxKx7ef4aeOXBUBvhl4kpUb2X56GBgrVgYjEUOLoEqBJnQUcQM9EFqhYcAUFwgEfwapHQJokpgbMwdF8AL5lu0aExQcdhTItJ4wRF9wUf9Aq4eBJUSwRaFDz/5je/ud4Q4wd9iwJ4gZlsBTb17373u0sH7mvn6+hGXw2PBcWhQfLGb7poPJla7IIlI3h41N17BvPnP//5Wnjco9clUegQS21BYAfj6SL8eZ5nfd5Wl1yAU/BHP8mGhmSazbJlcOFbvZx9aNUfH8uPPs/ntbyeOHtmT3JUu1/95gur+5PXcDVu65fBxht6khF6pK/Tdquf+ugTX3DTc8lXevA2ySLMpuRa3+0ZLHwKX4WjBChZ9WV7bTZb/e2i5cs9vNFX4zHZ61ua9bVBM3fEAUmBWhxoHPzgJXHihwTF3JGAuMDjvwSxja5kRGKUDflHSX6ywyshon/Jin7w/U92ySRJNLfB7kG2MRt7wIsXakkW3ZMD7ewUbXTEBIcBEtHmaIlsOjJWPOvAgW7BbJwEQyd0VOKGTzznV80ZySedkTXfYNsKWdFT2AVv7IGmgj5c+DOvHSwVb/DoPphw9FWcdn/Xp0IH/ewKfhf+NzEEy5/pTVKsP93ioSIxzj97k48eOdMBHHjHowNEtupnw5Jv8sKPL3LTlXWBXPrw108BwMDhsIONXdrgNqfkKtY+sZyf4vnLWzKejsDi3xcy+QhZJNJ4wkcxePVnTHrKV+Bkaxf/Z1+ylugnt7HNBzrJJ8jIxtryGX3Z2SaBP6GzP3VmO+NseviBC8/JxpcUc1Ef3Pl1esrvteMzW9GBjZ1SLMgv4Bc3bQrc15/+4WED/Gebavj4Rb4Rv+FBvzkNj0tfusjXtVfyw3T28NWhkv7GZ4PqdA9mcS5c9ooPvJHjLOs/6Rf+xsWDvtqTCa74Dz7ekif8jV956wOb3cMZ/8HH9/m8utj76BlX+zn2fE4/Ozbbawu+Ohk9m0/pn00ax+/6+y7ailX95IYPFUu3bjy98FXxAKx29NKXOS9G6K8tXp96pg/t6VkNR2uCvniyxnjGl9J8w0PrXgdRzS86aC0MxvqSH8Hn3nymi34qBv/+PaJgyNLXNNrEEXGK7sV2+NATC/GwPkCOdFU7HIpaf2tQ8sFHl+EyLn26J3s5RvwYI18who7ai+gXt9XiXHpEC850fjF0K9FGw5gOisRYRSwUT60jYNzjyToi96Y/eoWnX1H0N9PoTWwX2+Tx4PuyBy3wDn3EZfK77xASn3AXF9FrzcMXWGuOfMRaxx70im+42Z+/27tZC/iV/zHcuH79ITcTo7TBox1dsO2jyNocca+fDxlDn9kWT+xAP/IusovZ4OQ51sT+VMCl2Ht51hp45cGQyXgvz1sDJr2AIjC5JPMdCJHMJBf4THABQzDT32IpOMDRoipY7QKbdkrgBBJvPNQtemoBVGlBcI+OcYJMgabaGwl89yk7HgRk8AJSYwRM/FtMXQp6LQDRbDEuCDot16aUmKoFxmjEGzh8C5yNcb/XhehWJN/x3GJEH3CH1z2eJdt4Dg+eXb3lJYfLpsamB1706bYx7uFVu/yPZz/+8Y8f+QS//McnPYDXB5eSjOrK9id/tINpAQmP9hPG8+KNXnV80hmZl+/4it+FcU9+iy/9RCdayR6d+pe/YGuDjx/kD2p0KvolJTbi2QIOY+I9nGptNk6SsfwvGdCEI/4kARZabcaCRz85sod+fmI+uhbH6hqO/qise1dywh2f6OfvzW/9HQwke3xIpPhlc1FtLpaYwQsWX5IvMokxHeSoJSCrV2PotAOHZIM3PvHuWULU3wTwTBdwOfBMTvHCfDLXzHeJF1uA9VYVf54rkhuJFz104SEbqRXJHdgOzehJMUYhs7ZimISxQ47wghEr8EUP9OFCr2SRHHQh1pEVr8aTKXodytObpM2aTV5fxMCVvcGbI0oHA/pLeq+OW3m4baJtauKTXZMvPYjbdGt8hyJ0D3b92Vqi3eE6fDYfijbyu5ov+RHZHITRgTef4qMXFXQtGYYffTpld8mwuVW8ECdLpCXGYqy+8KJvneOndJN9S4jzo+YIWvFMD/jFq0vpMC5dkxM/ntmTD5ATvnSaHzWX2TBd0GvzxPj0Gg7ziHzWcv2tc3DtPMdb86+xZLEhJgMeWmubA+YUW4HnF9od6jUvtdGTq7kT7nQMFs9gmw/0xlZ0kg4u5d2K53wQnGfj05F5Smbt6aixam1djdGOn0q29Nx8qE2dDMH3hQ/59cVT+kwGfKa7xRee5WH5dF8pVhnfvb58I7zG6I+H5lBwT8l10gCjnLS2rfvGns+r47PvfI5euE5Z8e4gQmH7fL1DivRHdn3G07d7l/4OhdKNur7ogWuMNuuw+cMfgwWjz/wT3+tLhvrDiefazKniSDyZw/j2nBzx2Fg1eZpPlyJuZdfQ2pZ/bXCtLdyv3GCycziWn8aKY8Y5OEBXvBVTwIpt8Wb+ii/R3jw33yYnvNnLeuMlB91UoptePaPRuJ6tHbWhDbe1Wc12ePSzJWPh9ywnI4s1QC0Ot3alr3TE7x5ua514zwbodSgmtxGTFOuOsdZfXwWJ1Q50+FC80qGDHXBwObyjA+sWWBc/tTbLmdDEL3hrGtxiOjxgxDtw9mZgHQAp+BPDrWtg9fWze4dl+ugDrV42udfm2b09VWs23RjfSwg1vNZffeks/xX3rDnWDYXMeJVX4fH+tdClljeivPJgiNHv5XlqQLAU7ARRC50EWUAQgAQvQcCmRJLcm/4WWcFTUHBZKAWJAgVtbHBvcSyICIgCmiCCTgmdIGnBb6EEp0+QEfjRLnks8bYQ9JUB/GQAI6gK3C73eBOcPv744xfvvvvuxd/KYGx03Su/+tWvXvziF794lLP+5Mnq+LTA+WqhsS1Y6cIiSVbtZLRohMd4CTc58Yv/Emw6EbTb3IU33aU/z3//+9+vt8MFaXjR3QMyumBTssO1/HruSqYOUtaea990UJ2NTxj4agvXwiwf3Z+84RkeNXumC7rfr6CyUwu8urHZGe2l033t+5xsanQkFekdH3SPpjb8pVc4+smNscEGn53A8Wd1ny9bnM3PTT7T29I3rqQI7ZMHiQBfK3lJN8sjusbyDbyVOGhLnmrymLsWeTrFH/8im5Ie1HDhT2Jj7jUPjTMm/Oi7JBva3BsPTqmOTzygKx7AT+aS7uaF+SQJkXCJY+Qv4UE7msUKcHjcL5rEFfiWT3KLmQ5Y4MFzf9con4Bb4odmF17hgS874i96eIMbDniTw3gJuWQLr658P9nRwyuY4nn6gK/EUCIpqRfvwUka0TE+niS8kv10W4LIDvmWe3GXn+Sn2uI530KHHvDLnpJwOoDHM7ol7e7JbS0ptoNziYXiOvu4+H/2IoexaJBfvHQg5CsnugpHsUJdfO2LSwczdAMXX+3gh+z5tTyHrHwJzysrnHw0/+ynHHTSIZQxzX9yOjQkZ/5gTUgH2aI54Jkea6cD44znf8U+4x3Y0Rda+Um1fnyaPwr86ZpdFDLTC/z0QI/5h00HHfARNZnCoSaXA8X+NhI++vlfY20U0mE+AD+f80bdWLTZCB9kBsf+bKTPVUzHs/HZgzzFC2Ndzct8L1sZqz/Zg9eOJr3yx+a/Wns0a4e/scFEqxqPeEue5o729BtsPht/+tFNXxcDtwJ+6boPlzpetFfqf2yYm+DAVM42z/AuzN5v31N0a9u+5QUuFxsVNzr8BMcvopEs2aT2U1fZO7jth6MYBK7YInahn5811loq9tQXv1vj0zOfF1P4uzhPZviy4zk2PWiPXrpYezTu1Buetu/Usb5te1l/8QJ+MOHN3+LN2kQeMB2S9PMitMhNB80jsHCo6bwc170crjUL/tZLMcW6gYa5qDzcDmviRR1OMF1wwOvSZu4p1lixy6GcfKS9C3rgjMMH3vBfuzZxsxcODpzEMu3iFB7EKOPYXDubK3xWLEUXDTiLcQ5wrM/8TU5hLB7swcR2/fCjbc01no+Ks9YBuTy8xhqjWMPYjH3kQdZEORs+wBZH8WRv57+Nf+uttx7nQXrDJ5p0VdGnrM7zB7xpJxue6cp9X0+L7frv5c3QwCsPht4MEf9zpBAwBDwByCWYFUAEbAHE5sHip9+iLEAYI9i5bGg8w+USpAQFi0ETfxc29z2X2JXIVZe89TaSReBTwMALpkTWgtEGqq+AghXwzwUdf8Z30AIWTy0kF6FbIZvxLsENHqUFN/it8QaXgK24Txf6wOIx2W3iessKHl+C9iYN2gu0FluHTvGbrsIn2Kd3C5bima4sHG2A4kmAbpEuWXhKLm2ro4XNnqf+LuJfle1zryyd7FutPz13v7jiOb4808X6F7m3vTFbJ8dJD96Tl5Nf/WhKVmwW9fe7dovm8t9XYNrZij3a2OM533LvAG7/bgj4cMVXvOmTlGRrzx0onfSjjW56Sga6ym/oQmLB//kTGc0vsPDzV/JINBxUwCcWgMl38Umm+OR34omNtyREO3p4TVfZQIImjsANR7rBB7zqisRHgo5PsQl9BZ99iedAFH1l8Xmmh+YlmcnQwUCJPBj2wnN6E5vESklYB036Jb9wpEs8kVliKI5IjMDxE7yQh63FVjK4xFN9ZFCjTWZy6pPQwhtfZPalkNigHU+SPO3o9Tck4EOPfiWffZ1CD/iVyIJJH/gUI9HEV5uC3tCDo9u+qGFHfqDgWX++yz/1wanvn//858WfZ+Pwrc5m6TrewOJPckwXYhkYh+T0QX5tkmb/M1w+y+42GtmE/HyLLeCiK3zSEzsq5O1lBf2bB21IOsBro0AnZMwn8WQMWcnD14sb5gtb98eDjQELh3t4FH5PR+DwBicY+JoL+Y01Gy/xQy7wLrQl5H09Ri/wFG/4gfnXePi39LM4tu/Nb7I4EPry9ta5OZnPJEMHu+xiHhV78EUuhz/FiuRD2z2crn7ilt/oM7/IQD9oqZtH8c7W5SNqpXkLvvHqxpLr/PKHTMbpU5Jduyu82lcP7o1T3PNRukIbb9rQot/mVfoLbzTU4JV8S5v7+Fs+4nX5C64+Y92rFffB9xy9C+Cr0rjVx8Iv7AnzVN+J1zMay1/PxZX4hD8d84m+/Axndsvu7Jy81frC716M6sug5nlx1vjgzRs5sbhX++KKBzyC5f/mPloLdz4nz+pleU4vq8tTph27cC+7z04nTu3puPvorz+R39wGo1+fWKFkA/6rT5xxgOFwYeeIMcbD4/DBOtzal77MG/3l7HRvPWCnYigYcQkuOM0vB8zpNVnB43FjsVhg/QC78iWX2lzFg9iFP/EpWvKBfFR7eYg1O9+zNunDp3yxdrFSHIcTjHUIH9YmurJO9yWqZ3pMT+iTxVh+hnc+66Ira4kxeObb1gTrHxz2IeyAnrXQ2kEHbOSZfuiz/+AAn+hZU9mILsiB91PH4OAnPxpgrdPa7Y3o617eHA288mBIkv6qIuG+l3+/BgRBwURwdQkYAkCHQhY9ba42KwKN+744EFg8G1fiVpAkoXvFgtBVWxoQMPyx3eA9C2zqrj5HbYEQAAXdkiy1S5uA21dAaApG0Y5G+Kv9oWZJvxJueCw6Lfz1/fKXv7z+LlFyJG+8xgs8JSrLt3aw6ui7F7T7GodtWuzhDzf4xoBxD8748MYXmmA+/fTTF++8886jXAJ/+IyhH7b74IMPXvzkJz95lCta0YPPImaB6O8noYX+6iK71q7v7N/n7huXjYxvodGWPqr1GYsn8qRjdferi+C2JlvyRHf5iFZt8KUXffWzOZoPtzdXdEmvy09w2iUp3vYbQ/f5C1vBDxYNRZ8SLXU6Cb+26HXAggf+qw/eCn+0IINzgNBYPtHfjYkHiTY8i9P8pz8FTZs7+PApaZG8tNECpz2dodXPTtCzTogtHZawYbjJhq6EzxibWriM6wu4/B0v5o7Ep81ryRua6dD89kxuPLnQhJO+kxttyY/kyIGpBMg4dqOj4CT7xoqhEiQ0SwrRzIZkkJx1qC6+gkPbRpEcYMkugRNT9dMjHcDVRhK8fn34YQ/8wBGeh5sPWmPb6OMPPnyIj+CM0UavklD00dDGpvDiDxw5xUDyxZs+vIYLDRuqNlBsUnIOL/x06PCSz3s2lmzhwIs+euBT9O5AAX/03CfvYIzBi3UIz+TCq0QTnfBrh0ccdxX30KUjPog/cVeSDZacDoT4JTzoGUc2PMKvj2/AiV/rE5j8KL7wFr/bbzxc4NlYiY42JZuWPMOFt52DYOmln02hsb5AJ2RhKz5qvuG5OarusPQieivxCY97GwT2hav+5rW5wj75IH461LOhYDdy0TFb6O+Cq00GHbrggdOFhtr4+KV7bf1UDH/5EH7hRgsefCnwFlvD7VDTXIquWp9aCZfaWHThVOdDyZG/Ga+tMe7BRtN4MYnrvX8AACAASURBVKjni9CtgNO3ugkXvypGaEMrXvAVrXhbPGiTJ3i09Lu6h3PHbD8YY4PtHp8nzhPmGvRViZ7HcLhfPnre/hPec3qJ7+TOfsbXV11buLcGEx1zyLwUp+nWYXf3ntO/GqyYaE7Ct/RXBjFXvLc2GReO5ell/MXX4us+vi/mb+V8bqyaHMbRFTnED3PE1ZeM4m36AlNMBdPfroMHzMNtfYEvXviDAr8573lfeMQLHYlfPYM3t+FpDjRPw4ePCrjskO9oa83QJhZoE5vDJXaZQ2iQPb6jzT54s37FX3/DM9h4ACceqtnfAUtrXr6CBzG1FwlkAG+uglesPXDTr3WyHANu81YbGPEafjmL4kWKfZmfp8lPHNyIKfRunbCekbXDLS9DrLnWtr5qow/P9lytob2kRg8c3tCXe+EfPvrrEAkvnvFNz/A0B7INPWgXh/t6Cbw55ufeeABzL2+OBl55MMSpbRAqnKpn9/eDoX+vIwjuAqCAyB4mu2DhxNclGAgObSjACprgXYpgU5IqiMBZYLDRPhdKwaLgb7wxXcYKaPpLaku+PBsrsW2BMk5/i1eJpyAbTjRaVFq41JvUSd5bIPsE0/gCnoAWLePC0+eW+gr48GyCmhz0VaDEkyAKl/7qFjN6aDFr0VxY9FdH/i7Qe++9d/Fo8Ui/q9v0Djc9eV66dIQXPJiXFv/GrL70S+xtKti+ghb46seO283KrT391X72r+3cK2e9+LeffGDV3fsj5f2konELt3o66SRTPBufXtLh0pdgSa7CGR0wfKT24PhLfqu/5EbdWGP6+x/RRzu9LQ0bHQc5xrvYeGVYWdnbc4e5fMB1zh+0ybRzhj7Api/P5oP5y39KcCRWfD958I9fhwzo2Kyac/GrDx1407k2G0fwEmvxiJ9bS5qPyQhOW3TU4eseLDi8il2ucKKloN89OekITffmJv9PdrLCZ27YABvnEqfANx/bjHeowt47t7Knw0Iw0Tff/C0cG3+yiMH6HSjgHQ16hqvYQwaxEl8S1/SKb3rsAAUP7IVvupXUliDb9MCXPtFZmnw4muGXjLJluobX/6CCnqKPHtX8z2EIntAhP57JVJwxngwOM8iNHv3TuYTSvTZrlzY6hluNpoufOFiCl+zoGqOGHx627EDIukcv+sBJ7PuqJ3uRpZ/VdVimje3ySTj7mgs+OJpDzV8w7vFCPvw93DZc4i94/drQBUe/bJ+N4kcfG5CBzHjfn5GJCXy8wyB6i7ax+KPjDkuzuxoN/R0cwv/1r3/9Gt9lPpsjzSc2MFbBv7Wiw9Xk6UtXY+Es9uDNRSd4Y7u+MAPT/MJT+NtAiUFk55vgxC20+S26+ErfKxc50DQmnwKHL3R2riaz8emfXZuv2sRMeUX9+uDbAo/++oINL/jwgs13imfGgSVXukg+dOiAj+aPwS+9xsVbtNW1oc2W6cgYfZ7Ve53y1Qd2S7gXT/0n7LZv3+rUPb7OsdGBoz51tgp3P+0yR3oBCF941edVLiyeRbsaDVdfdcLpuf7ohxOf7uNx71cGG3/+qY2PrUz9bF7saJ6AA4/H5YHc4eUz4i4YcTReguEL+O7AvnUMHe1o4cM4zy56NHfwGww/tE7FM/z07gC5NrBK/tffGdox3aPX+MZpi3/yJEOxSOykD7wo9AwePe0OLsQb/WJlP3fWRx7rFVrkMvbhFquNFavYGhx9GqsPHn30q6+8uT2UPRI4MasDJDQVezC85ZPWQWuCQyA8yF/o3gELmnDt4Y9YDKYDe7KLFeyHJ/HYmk+WvqQyhlx054CpPaGxeEQPTLkQPsmFH7y6z6fpRQ5BVr4nFslh9Mv7Pvvss2t95Ff38mZp4JUHQ5yPs1c4cM/u7+XfpwET2CaAHUxa9yatpNglYAgK2m0cTG4TWqATXNyXkAgKAhOcXQKIRFMQ32QjiQURfV21gxXEN3l339UiA34XoxPXJjYfffTRFXyDh9+Ff8GenIKeS9AVBJUSKnULTotS9CTSvW1pMccj/OqCpOQ3uGQlEzxgztIiX7ClT3y00JR4q+GJ9sIFi5fgWkDRMw58ukZLAWMh641veqKrkgR2tklp85E9wwFPukp39WWH6mQPfp9X37U/RQtf5/jgz3pxuu+iw3hc/zF+Zagvv25MNqDvcK3vswFa2Zs+JSF9CaCd7ltkq8NvgYfDogxvxX2+qk0CAc4Cnq7iZ+U1JyQF+Uk44AeX38MfbXiTf+clun2xYIPeV0Bk5Cv4IE+651PijMQAfH1tQPrCDg94dEDNJ81RCVOH1ekAfpvA1ac+48mej6c3CSAc+pX0Ex/Z20YzOcG6yNM9uJIvc4nusjO7hgcNBxTiqHgKRhLXYUE6Nack/PC48IXW8q+dbUvC6EzcKnEVa9DgB5JAiSa56Ltk01ixi3zGSdTiHS/oaY8ufiVw1grxET50SnTxwE7si4ZnfclFN70IAsPmXkZIFukBL/DiI5nxBg8e/I00tscTPGDxq80a5Uss/qfW5vN47cbCw3+NhY9NtINjE3BwwYknesM7vctX+Elf9WQv9uST5IqX7JRPGUv/Jds2kt7qphM46AI/eCM3PfNzPKnx67DXGPruRU02IEf0bEzor8O9Dmfhlfjjx5oMrzH0jj5bdnDUAVoxAI+K/n5ygHb86sMbneGleAN/sY4M/M14usx3wTc3xaLmt9hgrTQeXfBwaC/3QCe52Rcs/PTd/MyPG9sBpNii2EjhB04wLrpQGwtnuNkl38Gziy3NV/fFjfryYTy69G8pLuA1Wv3EjF60px+wxiePZ7bNj/LdYjM66OvvwkO8pTvyZaP42Zq8eIBD4evBq2vX17hTRnAv669v8YCNr/prC/e2o9tz48j5sjH0qB8s26n5l3lP53strPuerTPiYL5We/p1YCLOw2suKtFcnPF4yps8+GPb/ANvfalh/omd8KFjDJ7ERjGAXqIbHePPoi3e9MGnpFe1NvjDqTaGbPryw/iOBn9sLF/Cs4MCNPmSeZe+O8wKh7F8Vqxxr2Qv99qMxQN79HN7bea5OJ1f5ftoNh/cr+8kW7Ywhu7TWXPS+mIsHZuD4VHHp3WFfvpqlexgOxTrKyb6EH+yceuZfEI8ZddgxH32VtqX8TG4Pefz9EWvfJNM9C3mw219gNs6AIYu1GSxvtGly5oInzWT/eQWdJzM/IysxQO6yW7pD5/5DDpie/FJTTfkoU8y3MubpYH/M9L8N+U7g8h/c9gd7P9RAya6gOJASLBxeZZUCxomv0Bj4yTAmsC9aTDBJV9wuATN6gJ1gX4DrADl6guiRIBPUOkAona4tAsYAlQ0arNASBwrBcX9qkFgL+ETGAueYLV3EITfDWb49HMyf4QaXc8tfHD27B4/fp7Vf4cdH9XhrYZPKWBuf7K0QKkXT3qtXv27D5d+ATtdeU6feHZIRja4k6s6HPosXv1cBg5jXeHShobPUJM/3lcWbcmb7OqF1b9jelbj5SkceFbife/Bd4V34Zee/uQ2JnyNP5+jo91FJ+rF4e21Ra/FM1g406N7C67F3+ZDIWu2VIPdYn72RYK5UQKyP+czx2zubLTh64rv+A0vHiQN+MBnSQy41aFDhd7amz98I38Gp9jE6WuDTn4w+RqYYMmCR8mHuSmRcUls6LKvgdxL2sGCE6vSF1rZEl4HEmIDeHjIpM5XyUYuBxhg+Hi2wyN/XltFm87pqESxn3lmS3KLo+RF0/yDb3UIl+QQDrKD1QY33iWyDoPFZXrWL8blB+tn9KT/4baZVuhBAqmWYOtL9/rpDU39ElEFDfTj1YaCLfBtDWBDtF31WQeyZf5njaBHhe4UMrGRuGs8mmDg9UzPeMEDudgav57pAr/0gB8Jsouvg4ELH2yq0Kdx4OHEn3u6xiM+3EuQ8Qd/Lz+yDz1Y5+iIX+APnEQaPT6UPcmWz/THSvGVDdF0b/7jI7/RhufGX8zfCh4cBCpo7Ial+dicZ3d6pHO4+AeZweX31m140KPrbKLNl3mKNbx5lI29Ce/rkvxaH3nog87oQ5/SIZtn+hNHFHRcdKLWJx7SvwKndnwFiwY54osOomNe2MiAQdM4tPrKoK+NzLvWPHTQdcGLFzy0CQt3X9SAceVv4aZT4+gcfRf66ESDLcjEHujRuwKuw7NkJm9j3Tf33BunxpuxauOU2vsj5MkWD/m2cfCszWuLLzy5hyOew4MOnYghDr2ioxYzjdvrYu5Wauu5tvSxeomPYPWd90/BxPPCG5cvudeXT4XTc18/97fQyM0X0jvY5oG+7Bkum2Xw7EXX4pP7YMGnI7D61+bpMXw9o5ustanFM/TM9+RK7nDEY3Tg0VdZGovjKd02Jh1Xx9/ijU541HjYZ+PMR21d+Elf4dUnPvrK0VrB59ECq68412GMNVZf/BR76Moa4WW2MUq5SusUfzYXxBK1uBLP4NNvMXD/RpU+Y4Ixrlgg/uCHHNr68kZ8DU5cMZ5OwFnnm8viAV9ywSUO9QU3XsiDrjybjugKPTEQrHWRHvice/5pnW2tAWsNBO+yJumDy1rhwIb+rPHokIHfoQMPfwavGNv6gld9eLA2GIs2Hqyh2TCc4o4+azF4dNjMGnovb54GXnkwxCk5Fefv5NfEyGnePHW8vhKxg8AkgAiwJrmAYAMgAAiqEu7e/JVk2WS2MMKRTVuYCvYFdLZ1CX5tUGpDp6QH/pIfQXELGPi6bLzAdunHe3iN1VbAEazaKLkX5PqyByzelRat8MaTRLDFjr/W7+AJXsFagBNw6QafFTg3KD523G6M94VNMOlseSGHfrVNPnhwmwjQuYt+BVj3H3744Yv333//0T4WlZJZOohnfMfvuXjjg6z0ADe8nqvr8wwGT+j0x7JbaMmfTMmfrYLBQ/fVYFeX4UmfC5+O1Hu/+tu+2muLr56r/WTHQcviAet529zTBx3Tr/v4o5cOfbTrp3dlf9JmrtmQOtDgc+SUMLSBAR9OvmCeSBrYxn2bzjYb4EvsbbJ6Y4XXEoz9wkvCbH6XmMDXfTrPHv0kozltkQe7tiGnhIMs+ptDdOS+n8bQCXySDLHI/ExPdI+G53TKx2xq4SFrsSm/zDbkl0Ty697Y0jEdGmsDDlbs66BCYiJppHN0wcIPTrEZ7qAJTnI4yIAv/sROcPSXnh2WwpFtyEtWMPhzKRtz9H9524xKzOgWj3DEd/7nZzzskvzNc/29eUWnz97xSQZwflYJN53jlc3QE/uNZzdxTS1p7Esb/ZJqdL/zne9cuOiKPsiSH5SoSpDJVlKPvrHopRP84wWMPu1widXeYPIPMNqM51t0jy6ZxGI2YTd0fZqOLnvChRa+9LfJAqc0Z3uzTB592uFvcwZ/h5XJDCc7tS7GuzZ8KfzJ/HPIQAa6pSv97uFweZZkt6GoHWx+ni/CZw2lM6UDAfh7u8t2+Df/8ZWPsp31sUM44/XHi2d2IL8LPvL2lavxcOMpv4pXPufrI/6C73wh3NqMVbNFfHlW8NpBZW1guvABP53yBTjwkY7SU7yTHw+KcR1KGWdM/WDwSPdwl6PGN7xd4jd/a62T3+ClOEDP8IEPv/mFJr7cB6OtA+tsGy+ewbmSMXza6MR4JRmNaVywalft+Bfrjce7+JCus+OF9Fb6WqvntSd84V76wT5Vg9+yz/EbDXDhrW/HZptti3/w6Yc/kc8zO7FdcPkB/Vb05W+Ns+6IyWKHOeaZn7gH02UsemJkbeEKLzra9lmbAzelOMcuYo4Sv+7DR0aw9Wmv1KZe3YHpeXUPbou+7de3bed9ttAeH+61RzN82pZP9+LY6pBd4sk4ci4/J2/gW3vi1ZhivflSHiLu6bP2axOr6Vqbkk/0UqH1XB/f6StDc6cXNnjP5mDEuF7IkRcdsaU8qbWLfbXxP2uIdQ4u7eSHQ41/sdbctV534Cgv6KsiuUe5plwHDXjEZGPFcWtpX2vCIZcSf8Qk/gwX3ZIFL62reDTWmkOn5oJYBkc2xie5wdGdfi8y5Kl0mD+QpRhI51988cU1Rvu9vHkaeOXBEGeWGHFYTmOB9mwStFi/eSp5vSQSOAUfl4DhIEhAFAz6uZiAIKCY1IKZgCAYgHf9L/buZcmWm2rXsC+l6EEHNxw0aQDmdMNAAL4CbDAOcNuXsucz//Wu/aGYVbYXGGxXKiJDmdI4a2hoSJmzSvBo47QLn6CwC6DND1wX+mpBoCTnTHhKdFocWQ49PASUgopNAdiSMAHVCXcBGp6+5E9GdYuxYNsf1W0xENxc6CWbZ3z90WqbdKWEkxx8uaCNJ7v96U9/uv+Nn0elxayaPC2oLXzpCj+ZyN5iqj87n7pZ0ArSkhMFvPETiI2nwJ2edLVolODCp0c1+t07kGtBbBzVrpUjvUtIzudtJ1vPaqU6vK1P+zU+2Ul/bd17Np7BVEd3aT7iXXK/dOGgqWYvF/tu0pAukofe+rDlXpug8quezUtzrYOF7JR8DgeMGxnMq2QB51lJL3TdO3DqsCcZ8EtOG2jJ7toyG52JHh9C19hHs8Q6W9NBwiUZkTxIJujUFxc2nnjRyVyia4dD1gdw5tM5fuhIjuCTn093wEUGY4Em3uIdOuKVmEdv8pFdIsIu6ODr3jwxp8kS3/Th4+RBE096+U9O2Tp7i182XXgVuyRvZMu2YqyNvbEC17/6zpfAiZ/g9Bc70einP/Gja35hftINHTToDJd91cYxHmQDLxbiQaeS6HSs3R+3NCbxaV3Q5nCJ3RQ08cEbTHPHl5T6+EqfsFsT2Jk82ZhNPKNhTCo29PyXbuaScUCrAyd00MeP7C5Fkkw/+rNXsSr5+CjfxFOim8zo4eUAFh+5S5sINnCA1Nga7/wabDYutuNr/NnW2JGRDnR2KeycbdEgMzw80DnfaPPtfhIAvjUMrLjAVtZE+rGFe3yNt/6+FjKPFfZV8lH02wDQDQ/yFZ/YqZ+ZuU9++uBtjB08wcU/PdBwqER/MsGjp/FQkz+fa20hV+sdmN64awOjVtAwjnyCvd0XH/JDchhb+uiPPxpkA9e4qBvj7bPB6lAKz+2Dj766mKSfPcVXsSh73IW+FQeB8YRHJzhk0q4t+/UMT9vaVhu8tSf5so9+OVK2TGdykg9uNLpfmksHj/jgobAnWRWy5ZPglMY3Ge+Nb9qXX+1nHUz0Fqc2OOSMlzhirNcm6Z0OcLJ3unjuAt9BTzE0G9IzXaMrbrfG5Z/Rr15bWnPNE/KIMdFLv9UnmdIzOtv+3H16Rhe/7qt74cQO5buNAxg48pliPJ/OtskJjk/TnT8EE/+t9/60aWO447T3cNPhkS76zW+FPq580n045lDxrfmcPeBbQ+GBN7e1Fbusz+aO+OwAhA7iD/xiSzHB3I0eG6InluBpDUFDbIaLhiLO6LcWiXtkRac1Tnv7OXysc2S1zpTvabOWwgHTWtU6S3f+DQ6t8iprnmf9vRAz7i4w1pbiIL3AlTd1qM4e/JlO2TR90P/xj398l9XeQt51le+nBV48GHL4YyJxoP6WRG8HOftVvjkLmMwWHcmpyW7iSiwlb3011IGPpFvSDs7EVgsmBRS4xlAw2ElvcrcQq9EAg4agUA1HAFQEx4J2dZ9VtlAUlAWykg81eIGO73gW+Cp4LD/38Wtxia52+qCVDNqSGS2Ble/CLUiDDU8bnekrKcUPLNz4qemNjysdfE2AV/DoJm82KLC2oJAvvaOFnkRYcJa0C7gSQfS0oR8OWLK6BH1fZijk1aaffdyj7x5cn9VnX/V59XOyFgJ80y+71LfPC5f9tSmPanpp37p77Xstje6jGa9kujO8lWx+8vC8Y8jmxp/fat/DFvrpy+7ZvAU2/9BvIYdr3JItGZIpH9Vv08v/XPCTnw26V5NLsgomX4GzcMVfizMZowkGjn732dfcFs9LNviXAq6fgooHNp42mOK8Qj++xp+Shy5f3JKrXhhsnCJLMqgVsGJUcQytfkLCb3s7RTbrjLhl82ozbGPM9vFnl2iBK9alf+NMRvqKkWIoGd0bg2ydn4ip5rmxt+lSF7caJzpEgzwdGCwcWXrTjQ5fYQM0Gg/2QL++/l4AOgoZJaz6OzgPnl8q5PdSwDjBYytJovmejvg6SPMMn2zGluwSf/Lr86w4uGMPsMaezYyF8WfTDtQ7gNEGh93RMT58TDJsXMhHTskkXLEYDHmbX3wFrlgN1jM9XPiSBQ5d8MWvtUxNdnho5mvGnK7kU4PRJ5HXRkdtbEP2YgCZ9CvZBm3teJMRbZuBvl4lnwKOnJ7R5sfrZ2DYBf02CODJ4uJ//IbN+Fi+zGdsaNhQUq+WA6DRgRw/47vx4zfZBG001OYSXdYf9SlwyAfPvUI+z4o+NMjAL8inL3g+bGzgwF9bsgl87fDyYe3wyVAM4At0y654m3f698on0MyftPEfuNEuFqjBJR+67CYGmQv5Etuks/vGp/Z09pwdu8fTvRcA7GFO0lts1YcXPtEkwyNe2w/G88J2H1z96uzmHu3a1GKJAw32Nh/Zgy/3MjD5tTdPGrNoFcO0Z+O7cLdC93wnmU65V650BxP8yh9uOJ7lUn21il8wjUt1B7/k6Upe9coPRxvfyT9q2xov819sE8uWXrqcdTDJlbxru3QO967UrSwOvRVyF6/1k1fMNWZiFL2Dyab0AlMc6wAIneY/OmJuMPRsPqHLl/lP+wW2FzezozpbkcF9P69t/JpDYhXaeJV3pC/YvobTj0fxK31W//+z1P/P+3veujUCfjKYh+YAneln3cy+4ph8pHltnWBDey7rpxiGljbzGw3tbJb/e3aAZB6JX/JE8GK8dQ0eHtYZY9eBvmfygNFGdheevTS2HlgjyM8X2Rq/1mCw1gZ9n3766Z0PnTyjAZcs4PixsU0ftLPXJ598ctctO1j/yGqvhH754SObX23ffQu8eDBEvd5UpSonKqB899X/9mlgoRC49kBIYBcoJI7uBSmBQOIuyEoWJcECi7eoJVgloQJBC5+FQVBwSfRqPxcxATo8AQ//goSAxwdKFHqT3xu3rAoPv4Iy+JI0gV9Qg6O/wI9Hz7soaPNlz69//eu3i2b8w2lxVYNnG30FV4HNPd7p7ZmPk3N9PD9nV4G6wwN4bKW/2oKZbZKppLQFsoUeb8E4mtnd2FlAVubkLVlT48/eLcSetcN7dGUTY6GffN2T0b3Fq79zwwbZL9xzlminbyX42nY8G1d1Nmq8zhq94Lr3jL43YyUS0Uy+lTPe1XjkcyVQFl8HDo1RvtzY2ahJcPgG+5or7o1VMNnJgmlh1leSh2627idJ5pP51t+kIXNyZ4elzSebf+JBsp/8xQQLdTpKAsiBbzZU71cx6NKHjAqfZC9vvdToodvBgQ0pGfHO9uSTONCfD+ljBzqZ0/msRFAsA8uOYhLe4LM/GfSJZxIXSYyLfJIZ8kWPbuRPJn3uFckYOLT5dDTDNefgxjt/hGue06O/ByMpjVZviNEzFnRQ9AeDlg1hhxHmqDZfduTT+Qi92EA7fDJlXzTYPVnIjEa81P3klv3EEsXBABsbF3zAZGdjyefZVJ+DYLpEGw38FbKRqy+mwFlzJJHg83F+RkawksR0Mn5syC+sU+A8o9EGhf3RMW6NNdpw6UH/5q8aDZd7+oHtAME8Jke2bV1sPNkIfD7O1uShlz782Jf++YzNp3HWBhccmbWL03jbTEi6jQe5yGDM6QOXPPDQcG8O6NdXbOhrTmNODrT4MNngmEvoogHXmLJR9I2XOcc2YovEXknXNlbmYD+pIR/5iz/48G/+gZcLPtnjH+3iUfqSy71NihyEjs1buGiLO+IK2eiXbGygn4zsjq82l/uNjYufvdUKvuIV2bTlB2rjZZwaPzV59Snkb941TtrxBld/fStTugeTTdIpObIDukuzuZMs9ald8WqctCXb0lr41eEOfCvaFHz6MsJzeMVCfi5XSffWPzbbcSM3/zROYNemp67xT4e7ILeSbvv8SL/6wwlvYaNNv/rV+XK1/mJ3smvje+aUWAl2xy19iuv8++wnW2OYLKvvyhRcNOhRGzodmmgnIzsrYm+ypTu8xffcvFmewaPTnLsTvZXt8wwvO+jjG/zH3PIyRvzhJy52AyuWJK9nNtIvn0avOEo+dKxF+vU5TKGjezT0izVikWf9rZPa+km2eKWIHeim7+oUTTpX6IJfetOvfvf83EV2tOkGR1/98MnpQLV8KR8zF8ipJrd765HYRx7t9Ifvq93yJzYik/WxL5vI5R82+AmZPnKJda2raPBbhZzsXh7Evvgq4rH4aD00huSwZnjGI/ui555M9jONK73pAbdcw38ppctVvr8WePFgKMd7Tn3JzFX+sxZosraISa6Ng8TOpHWZqAKUxNTEbwMnQXPvMrFdJnubDTQ7DGpxIn33LUwFcLK4CrIlWC0YgmZt6HSqvQufA6WSixL0FkPw7m0k8bB5jubiCEiSX7RaCMCTkx3gCNSSTrZRg//oo4/e+8UvfvF20QGfTvDwaJH4+OOP77Do0g+Nktj0TceCJliXMQGTzC0w2gRg8uHzqN6Fh3wlAHhlW3LoS351X/mATyfteGvzVttC88c//vF+mAYGvWC3hseuZOzKL+iQnmc/mLNt8feenqu/e6W2xqE6no3H+XYJ7srlvhLfeLa48SMLvna2KMnhK+5XXkmQTQU7sScfdZ/v4mW+oMnOxtm8Mx/d8xF4JUGSGj7qyzAb02RoQ0ZGG0W1ecCn8Dc2/QQEzfRMd0nS/uTMfHfhWwFLR/Kjp5+fqPXRu3HoTZo+8aJYQhdw6e/eZlWiA47eaGvne80H7W3EsyWextMYNAfZjC3wzR/J31wKDq39AiX/MA7+Ro341k+tJEr0bvzxJ18bejQVuuFZkgpectj8Mw5t+LNZB77h20gVh9FrfI0lHciJP/zmeH8LhE3xRMMYkwW8eW8cP/vss3tSTh60JIva1qkDqAAAIABJREFU2QtNuGj0NS9YNBoD9oDX2Bcj++JIH3mLAfBaV9TsLbnsJRG7goHTCwNyg0MDHzz4u2frV29H9QWrRgeeZJW+4Gw6OkTiX/RGx1robWs+YizxUSt8QwLLH9GVSNMt37HRwN88Q4/8ePaWG10+rW58wRkLY2a9yq7szw7WVn4jMacnexmTcMiFBr1s8Mim4M2m+tAx3/XTj8zaKzYDbI+mQmby0JHuxqCf35EBLhhtSuPW/Gseq13sXwzrYBMvdNhGzc50d29OkVEhx34RBCb6+TB75stkoHvjhp6xR2fHNf74hG+Oo2P89NvAmHPNC3VzS63AN1bk0pYNyRjtfPmcA56zMVpw4Itd5094tYN1pUtjqA8t15ZkQY8fKWAal3jGF71wgotu9gJbCS+6tcOJToeT8QyfHsa5jSncvqrUx6bGArzxcRlbBe3ox/O5emUEc8px0kq+6NfPNtln7e6eD5sXbYbh8B3zrUMXY5bvrJ3FInOWbtse35Vv29YO237Kpq8XD+KbsVlea5/u0z0eaK7tl3f2BNP92jlYfc2Z+vNj/LK7PvHOeJ96ecbH1XwDH+/oejYW8Vsdo8nm4Nij+Wt9w7efEJsz+R6YH9y+/lP2n+QUs8SbYnr7jF4q4JEsxRp1c4wcntW9kBJ3+Aae5G9OmRvWNesIeHPEPtk8AsPf6Cj2iNts2QGR9YCfiqdg5X3ismexDawLfXTpL9+x5rAJmh3gkRcceLJaR+Gwh3XS3EaXncnfl9rWKfFYOxnQJle4/oYnvKt8vy3w4sGQibROYDL3LBGSiF/lP2sBwUSQMllN6jZdJrLgZvILBgKBSe4SIExgC4tgUECQCBcUaitQ4tNXQQVXgarFcYN8QVPQa7MkaHRtYEfXBV9/CYQ2JbotOgKlYKRfECzpkAR2Qh0fwct/HBOM0SGLCw90BDQFvIWyBB2se7rTlV1L4losBdQWA7IU7NNNGxrwzAM0Ff3aSuYbP/oJpjZH3lyCU8hAvvirk9UG1LNEMfug2yKbDp79DRA1uiXG+PERvsJvLE4WnWyffvAay3h30ERGumWXbNHzXYlboYOCTrp5Xjgw9al7rm2fw8uudHH16fjSjhbY/Kn+hWMLvsEvxDKyRldfB454pE9ySCYduBgHNkKjuYMHvr40YHtveVb+bAZPyX9twNCwSBsLY9RG0Tjzb7D6zWnzQX/zAK3G8k74VsRjNOlV4kBPcq+PiRl9Rh4cWo1B+ptHEgFxo7Z8NJ50p4sY5MsS/oa+GMMOdIED32YCrXgWS8Sm5EOXDcGgRx+2NS7RQrc3jOKUOQieDsbYPRu4x5ctyYcHeY2Tfrakn2ftZN7Dr7464h9glHys2OGZLcnmwo9exow8DmKbk9lB8p8N+AV92Ia8eAanvXhBhvVTsnsWqySaeMD74pag0pceDoXIos9FHvbkR2C0kaNP440du9AjmSWyZCQfebKrxJwMaOHBdg4fPKPDLnDpVAwik3uHNMYYLXhg3cPFF15vNCXHHTrClWuorRPkprP5yBba0ELHXOC/9CM3HLKA5zvokJtNXNrBwXfRhbxgbBwl5mRsfK078LI7fId77NphWUl9X4rxH3TJlc/iiSaefEYf/ZMJzsqGPr0dEDWmfMS9Qje+jr7LeCvo04Ov9HNpbWwHlxx0NZe1wytukEW/GOhAzPxGW56hL5vx6/7+ELuHr3axGfnY0pjia30Ln++WI7A1GdimMeKX2nsJZg6uDdDOf9Xo4ptt8kt0i8fqxhFv8LXdDXcr5O5gvvGCUyFjcxaPYiU+/Ls5nh29WNjDpMYfbYWecFz1aVPIR2+6KdFMBnKA6blxzZbhVd+JPCjJoqsYlCzabKSbL4tuDLV3naTjG+5J41E7HLxdK9feNx50zyfxBtOzMTR32CKfErvNueKY9miEh3/txkS/kt8Gl+zJsmMUvHr7o1F+Lt7Vdtbpe9rsLsybctoXr9NOnrugbX90zrZHz+KjsnJ6Tj+2qq92zyu/OHG2JYP25mljv74fTTSKc+a9Yl039+QH6CSTOe7ZWgFHHLOOyc2b957NeeOMBlrurTFiOfzWQ18KmXPo8n00PQcnRnpu7SB/uUM138MHDfjlg+Xr7IwGH7F2RkNdLOte/KZbX2yiyz76+b9+8Vcb3cXG9M6vwbZ/tFbgjQ8d2Ma6xp72CVf5/lvgxYMhDmQyVPzhzp77I57ffxP9dzUsoJmULpNS8JCYNfELKoKGIGQj2ebG5JeAgLHgeO5Cz6IocAgEAqi2c1EUAFr4qslVsFDvIpOFCuS7oKRPNAUkftWGSrIoUQynII9WJRrktDl5//3374GaHAK3kmzZDaxgqYD1XB/dydBVQgYGLFnAtpiRHQwe5Iaf/iVOya/WBge97qNFHrTQx1+NblcBGxz8ZEvm+OqT3ChwJacSfvxKMtGG3/jCXV333gLnD+o2htkB/XimG5gd6+0H39htu/toPcLVll/wgfWJ8FaW5+RqITNvXA6E0NXeBkQ7e0kE9PGhfJoc7m3U8pn1newCzr0NYRu29CJb/o1Wshovm8Tmn/EyBv3sK7v1JRI8PCorY/zNYZtLNEs08oN8SkKPps1jyUQyZPdk5wd0D1c72zX2ydIBEprii00/O8Fr0+pezMrf8W7usLv+9GBDMUtiFAy9OtCTxNkggxEzxAFff9GNfOihJfY1z81BsVGdb4EhT4dVkiZFP5+QFEnE0BBf8aGr5+Zz/pQOzVOyoM2GZFP0sT25m4tsyRf5Hdn/+c9//su8BG8zLxkjX190sAu/Mr76SkzRRUe8s5Enf75Ed2NND2sF3vB9RWJ94L9wXWg6nDGWxoY98CZnyTV/RNuYN67F4o2z8YXPTvEHQw686IMPeYwT3hJXYwMGX31o6JfcFjfJgwc48ODYGi/+K8k1Dg7d8Ma3A0+JPTj01fryLfbFj197s6qPXHh0YN9coD8Y/Ngy/uh6s5qfsTU+dO5vyKFB/saAvK3XzQv+Yw5ox8Pc4EdwHdaAI19vsdHL59wbJ74Alv3MRXqQSwHjAJyvmVPN3/TTz978QG288vXiQ7ECjH44dG1ee25TgQ/59StkZyvjysZszw/hwiMHHDETLLm1p5uaHDZk/QQDjexCVrT1RRtN/dE2fuiSfdcBbXzM3CFPNiG3+6enpzueeZAu5JFf6d+L7GhkM/c9g8sOzSc2SAd2ZzO6oF8RY/k3uGKCewUOubMh+q7kdI9vz9FMlvQNrvbtX5rB4x/PpR2v5E+WYLKVZ/fonSWcYD2jV+3exU7Ff34jnrBrfgumZ23ZNVrppTaW4gvfUB7BnDqdunl2OGsMw0ePjGRTVpflT1c4p/2zTXaFs/fJUFvw2VV94gRz1ksjusmY7NXJuzqcOOkSTv1LC8xep0xsV6ELG7HvluSuX9zLT/WhYewV+aCYxC+Mt3Hhy9rAlttYE8Ca8/rV5DdfxT+xtPieXxQ31eiLs+KKfEdO05excgx94l04xQPP1naHPPI9fimeK+RwT87mPHiyWVPIrvbzL/HfOslW+q2LrZ180iWmoEUf9ikmerZGXuV1WODFg6HXYYJvl5YFNLVJKUi1kBRcJLiSAYdFbXAEEZeJDUeQ6EBIrV1AKLlsQRTYBLEWre5ZpQBPFve+MFo87YJtyYn6/JtB0YXfoi3guuhQQE5vcHtPjmSCr19glBh53uC5C36B398l6idiz8Fmuz//+c/vffjhh28dIr2zgw762rj1CTYZtKEBXt0CpHZJDDroSL/0ZrP0UNsIWFzo3AJh/KIFv7EByw7agrVIuc9XLDbGDX+y4Zcd2B8ffF1b0gF845wMPYPXr11p3NSP7k8YeOsL5OlnhfDJ9ohWcsQbXm81bARtVmz+wOlzWYw7EFLbuKv1tfGNHjxJAPtmx3wQHntnH3OthZ4u7vvpF5uinw0dWkgAjKd5az6qybG28Wxu92ULmhZ0MjS/ktVcsPA319EUL/STR21BlwSUMBRP6EG+khl+YpNJRjj4OmgAj3d6kLUNqziDp1phF5utxlaiJUFvbqLDZ+s3vv0Miw7FLXDkR0th5+wlOSIT+cW1Dv/QLOmRBEmKbIz1n+NAPzR8idC8oK9xZ2t808thkcMC/PjXfuHD/sYZLWNZPDDWjQG7O2SCzzeebpvKDmPIBbaiHX/tvT1kE+OijW/oZ3/6kl2SWbzrgKd5xQbsirfkztxwgadfbWhoV3fohN+XlWJHPtKcJRfe+QUZyAaOnR12eGZnYytOGSu+Ry86kpl/gJNE+xqqr0q1ownOwYvxhluSzu/wp58rumD7AiYZ8TffFLGADuxNVn3NVb7WzxDzc3z7mgY+/s1NyTw65mcHpMatecT+DkTYXaGLuajfxbfg6Zf8s5lxM7ZqMqBHtw44ya89HvnHxtNivTaym5/mFF5kKtaBozP7oeMydvjjYXzgiy3FKPo76I4GfH1d2qOBf/OK3Nk++6GPp3ZjgXb+hr8LPjrilDFnF3gu9iMrP4arjW+DR4eu8RSTyAZGHDTm5IDrKpbeB+pW+LWLfPCU7N74wUEPLzU64NnTvVjuZy94Pd1iAnmKa8mBNnrRan6pyQ6vAkeuoQ/97KC/+2TtmS220OEsYIPben0Knuel1308PUdLrYRT+0kjOR/poo1t0a8WT8wHtmRr7fkuuEcXOtq3tgm3FsA9+8m98jz3zP/EDzGrki94zi7JFM21a7SrwZ42hpdtg1saa+OV9cTRV1s8etHCn8p9wJUDP2cH/fr260DP8TfH2Fbh8+atuNCLIH7MVl7eKauz5/z0OXvU39h5xk9t3mYTz/lIXwjS1RzFm2xg82994M1ZbealouZz/E8/fAfW8PFV8ydzWQxSyKbNmgTHvV/h8Bf0xH36mefyBmuzNQA9MPrJ3N8T1C5nFAfRFF+MAznEP+ulQpbWX/TlN9ao/hRI+sKxJonv8o7rYOhuvldRroOhb9kwCxBKwcokdkkgTXwBVCCQxJnc+sBKwAQmbSUY+iSOAkJJZYEKnRbDgmfPmUS7AA4HvGCkbROfkha4gpmA3uYVHfDaBNEuOIJOFxgJsVKgL3AnS0GYDQTDNrOeXWyQzXpWC3qVYIKj0+L1s4QWsGRoUSpgkmXl7F7twhdMNTw0XbsQLe+SRvj9oWX3YNg/m6vTWSBnBzB00Q7WOKjZV5tNaf/FzHN0s1Pj7y23pJ68+KT/jl332aKajddO4b41/pubaOc3ZCVnfzciOwF3vzLEiwzsBc8i63q6Jcgd/FhI9cfD4uZqscfThqJNYxt97fDQtxE1Z9gq/yePzSa6yWfRtPkD65IINlfz2exi8eVj8PU1psZBwV97XwF1iGQ+t7m3gY4/uhZrSQoZk5PMEjK6RFPSEUyHSPE0R/Btg4mmewlGP+/R388L8KUnXaLJb/hgNsx/HPokPz3hmr/GDiw47WiRCz3xg08nOxwJaht09DpwAMO/48c+bRAlmcYMTTYJBr4+9MDWx175G/rGVpwVb7O/5LWxU8OVDIInN13SCS39aslZ/tDBC1p4OgACA08f2/BNvuYe/zbe2rWhxUaSRTryfXqp2UNff1uGf6LLVtrwQRvP5kUHCndH/BoFLReZzkIu9nAZAzZkc3y16W+szA267U+ewNFJu7lFRraGQ262g+dZYYv8ytiBjx8axgK8Ayi02YVt+XLj1lrJfvrNBWOLlnnpme3RN0fMD338RHs683H4xrbDpfyhw4sOHIMhHz8XyxSy04mu5KFDX8gFa17bINALH3Drx+ylz7jD4R/Gi6wKffWRGy4Zq93rozddyzfQ6ZJfoOlgBgxbGQP04sfebSq8Ea+IDQ7a2FA7+vCjDQ599Pg/H6Jb+oHTTgdt9NKm0CHfNPf4yNKWk6AnDvAxsAp889g4oJnN00dN7mIXGLySWbuLzNmSf7jvb8GRAx08jSdezZPo6Le2J4f2ZHCPXgfi2hVtyVPb0gOjnbzdB3dvOEr+ml+f/XDB7BUsOeKV7PDBnjyjo999z1vri05y9UwfY+LwlK+zpfXb/Ine8gyvPvjdo2NONZfWfu6T8X5zK4/oy8Xh91XH6n/Ce14eq/PKjBe4dM/m0dO/to2Og4FwzRMw/LFDnn5OBaY43RiKU+yZL4tpaLCXOQNewaN4Xpynv7lJZjaFlxxw2YaM4fEXpS+qxURzj3z4oWtePb2JjXD1kTW58esgqYMleq7N7kxuRdvWZFLQa21JP3sguc/OUbBiE7m9hCyXaC0MVo22P/2AhrzTmgOOvGIeWVrD0LWWiKnilnVbDBdDxQ188CzvMkZdbG6PBo7NWjcXBi+HR3QkFzhjIx9gX/zyHXzEf20dGt2NdJXvvQWug6FvyRAXbAWKfk7RRqik1LNJLNiCF3RMWIFAAFfrFwgEa/cFkALVBokWrF3w/IYUDjg4AngLZwtTSZFn9y1uBRkLiNKChwY4gabNUGYXqOAlQ7UA7RJA9XeRyX0F72yxwbg2tcDewcPSeoS3cuhPjnTRRiY0JZD6lWDpo63n6mwX/XiziTbPLZTugzMObJfeni3U4YeHZ4lzY1cNtwQiPdZ+2vR7wyUZTYd03+dH99pcyt7H4+wveaa75FY//ko13uGp2Rw83c0HfmRM1Z5d+rosih0U2XxYbPfth8MgC6M5ZdMCP7nw0m5Bzl4lBuQD17jb/LXxQ889Xs3TtTP7SgyaB2QlZ/qpo4uOBby56GAPzewQnCRWO94uY65IsJqnnslZbOAPHSg2L9GTwNmkSVJcxR8+hq/Dm8bFffYjI1n1ScTYDLwYBKZ5qT2Z9KcDWfFyoZXsxih+ZMCDDi72yKeLAeyXvUp66E6O9CSDDblYmb3MY/Ikt3GKjxpvtPma6x//+Mcdnq35jrHC14EMPja5/Mk4u+jQuPFB9I2bOKlP0scXtDkIkIzlv3y4xBpNdOjbZh59hYxo0IEsSnOhr4TQwo8O68N34G+gsFk2wFvhC+zGruQkI5/rbS3dwOQbcKyJ4aBpDrEDf+nwxj091eDxA6tmG3Z3cMGG5rVkl435AJ427WSBgxf7WlP5oH56NKbkdpE72WzgwRZL0dVnHjUH0gk963aHLvwHDN76+roMfTI6mEGHHvjYiOADr0M/duJH8OmBV18T0dUaDF4x9sUac0FfXxyBhSsu23ixQbGDzsnI/koH7I2ntuZ2fqqNfcVFvpeN2JQs5C0mwqFnMpLHRoot4e9aZh6xuTnY/IbfZZOYXxV37kLfioPsDl2Mt36Ffvnt020TalzIR6bk7lkbXo1rcxCd+KVHMRqNveAnO930RVcfu5zw9WtvPO7C34pxo5eSLfMHz3gYx2wUjeC1V9KttpNXcPR30aN1VB+dxVl9ZH6Ej7Z2Zee852SEm/3V4LKJZ/Ol/DdYuI1X8kcvORYGbC8xGoO180kj3aOpNt/4qDi+vINZnfY+2MYiHeO5sNpW/rWpcY+XMUbPvEkfsMZHn7XU2Jj72vmFco5RMhjL5p5x9lx+Yr9iTMo90sOzA/S+8gxffzrdb25FWzGwPrFczNJnHojl4ubqxlaKmEN2sR1fenWwBB8OGsVzsOJbXwqikR/DJ4vYSGYyFGvYjE5oisniU/O2HAJt9umltPln7SED2OZgMRM/cotz6UwWvODCA9O1dKJXfLHOGXOygHOJgfRxieXminGFC8/4aHOYhgdcsHSjAxmK93djX+V7b4EXD4bOU0IJcaWT6O+9hb5hBQWYJrVAIzi4BEGTWnDrJwVgXcZFANDXgmhCFyRaHNW1u9e/dfQcchR0BNQCXYtCiw1492qLgxpsAXUXlV3U9Asu2wbWpU/gct/XKsmffCu3Nv2CmIOBZEj3RzV7PToYOmHR7b94kZVM2tKv+2qukW3AnP3aBFTtbOwrjnC0Jbv7vSQY+1O1+tYOxstCZx6CbfyCWfoWHXO3v2WzLt04qBUyuy+5b4xq33phl2aLdP3pTF99Ngf8Yb8sA3vKpQ1chz/q/amFOdKGwr3LRsXG3GbBgZC55V6fNzAdnvqyxPzpc99o8Wt82RVsdvVGRSnRZxdjbwG1wUt+be7hNk+i6TAl/mpJwMJlNzQku5JM9MWB+NPXeIOV2EhSShzU/vYbH2jc0OJ7Npfa6aOtL5fYN/mSyYaUPdju/Ptf+Qn/av6QsbHlu8aJnGAlq2AV4/F022zpJ7/L3IwWGnDC97d3wGg3hhKekimJGtn7TBqcOEIH9ve8/kMez2yk7NxnH59qswM8sOxjfMj/85///I6TrcBpJ1dxQhubicvu/cQmHwGDrqScz7kcYoBzGVvPDsTUvaUjb/5t3DyjwV7odcADJn3Iqa95A+bbVNKZrdiFLuzkWp+iL/83BnSRoPbVSZsFduMPcCXe6Q2uQwR84Fsnv7i9ybfG2rixi8RXogzXgRxa7M/GfcVlPOFrY38w5gd8NMmZzcuJwHRgQl80xST3aPB5vuyiS3FGf/FGzEZD0l4cgKv04qWf/ZGVneD0L5RtcsUGtqEn2zg4wqO5yd/0mRNomMfgXOytjX3oyo6NExrWEvbUbu7Eqy+CjC0ZzPkO3vNhvmruOERmC7Z1aS8O4+XZXCFfcqOb3zTP6ePKvtrzp+KYPnop+tzr2zVYu6KNzq2h8dPu4g9qBR16wcWXjcnNB4JRswV60QQDHm2yo6EGWztYbdXxT2/8o4mOoqaruvgghlsryBROdsynwq196WUXMN3r796a/AiPfzQWarIuH/h7Lc/k0rZ2YwNt6o3hYHp2nzzsujYS63uZAn5h0Qy29uV3F/5NWX3NR3GlQ5jTFum9uNnhpJ8NtAej9sxW+vvabXWkJ/7ZJl7VjVVrdbjPwS1vfrPw9BGzzrFbveOTfmydbOm4vlQbvumZ3XqmY3YJLhnA6o+OedkhizbzSBEjxRN46uTT557fwXMVC1tfwfNpcPIONb3kIuknXjmQBouWuYhXcZ0t5UjihDZwLvj7DE57eOr1d3JYG7Q7oBPLyivMe2uG2NrYGQ9rjXifb6NnHLOHdrFNTWc5MvjWm7sBr/IqLPDiwZCkYEsTWZuF/CrvboEmpQlrckoyXZJUC5eJKfEzScGavJIvQa63f20a21wWOAsyavhwBYhtlygUFCSABYsCZQG2QNyzOlj1Jhb5R8nA+gu8aGyyAMYz2Sza0aO/S2AV6AQsgU+iTl7BqsMTMrrI8+jq5N5oZc9so957/S0QS5dcFgo6xEudjqtbsNEKR51+0di+hY+eemVM/tUzebdPm+INieBuIWGv+O/Y7D0dGxOwe9V+1ulb8u0ZzMlDe0mkvk3Ug1e3CbDYGfs9DBJ39AfTBs8mqj9Oa1NPbzV/ebodRpgDkgI0LerZ2uaIX9lQsTM/I6e5lY2zv7EgczZh374YEStLJPAEixYfDr5FvC8EyIx2B3slERIN/M1Zc7R5zjbodWBf7JDEuFfbtKKZHdHUvv5EDrqhxU/i6zCKPpLcYMxJerSpZC9y0Tf+HVQZix0fMvhSgDzZzs8VjRPe8UPPJt8m0hvv5n2+KPHJX7Knmp3Zgvz0QcezscbXODe/9KGjX9IEX5+5wScka+iQVdwVh/F0wPP555/f6dEt/+AzxlXMzob6XOaaQwLyo2m8jYEDArzya/bSRhb3/KH7bNAKg8fpe/V9V+t8NPnZk96KsWEP41bCXJLMfkoJNdi+oDJGxtcY2igZJ8/G3rpqvMUB9/rJID6gCZcfKXzL5UCVTPkFv9em4NvapEbLOJOlr2S14WleijHNHzDh8AGbCTzYwNoWDXgOW+jKt9AD18ERPnRhG3KaU2py8E28e3mkz2GMectH6ewlFJ3YgK35WWuTuQLGBgQPNJvLZMmXyWZOixvaWnu00ZcdwZCxMacnOGNsjplv5gn62VYcbB5UJxsYMtOJHeF5+2/8mvPmtDkTrvEtpoAhF/jyJnMVfbLpUydLeus3juynH2/006d4DQ9O9NB2gcWbHbJ17ckeXrYKp/UluYwXu7rApjf53HeFV5yMLzr6wOHJDuGglw7azrJt8QaDZn3aXbUFF+1qeCe9fEg7/NbiHd9t6z48vsqf2Eifdv5kvvD/bJy+1jk+2LpfuzqayalNfOfvxruD6pXZ/doletkxfbe9+/A8OwQSvxTt5O4rH23Lk57JGJ/T1mBWruCi/5y+2x7N/DAayV+dPPpXNv2nbVZe/Yt76pCs2uvbNvfsUryJV/6tv5iQPRzcKk+3mIJ/88I9PGMsJtt7WFPgmXf48Bu2EPvExWJY9vHcBc+92iXWw0Nb/iNnFaesMV4+iY3g3aOtjwzWBb5qTrdWiKX6xNzWBLJb+8o1+Ct62QRdfmw90cYuCh3JRQd8r/K6LPDiwdDrMsV/R1vBwOQsgejNYYuSSWryK4KRwNSmwqQvUS7AFHBKbgQqf6NG0ACLTzgdBmnDr8AGp2C+92TQXlkYAaPA7L4gqK1FcS3aArZ4BXV0nU5Ljt1rF5gFJAHKM7x0BivZTb7a2cB9i8IG4b6s0bftZ9BGO7pkyR7hrc59BZQeC3vi0Se64JNzFwk4XRICByLxXZnDVUt+JcSNk8DOf2wm2EiCG98SxHvDrTRmO8Z+StgBkv4S6cZta7xcxikZ9KMXbXzo27gufjLoR2MvXxNZ9PC3QKlLoNNRO/+wAWELC2A/p7CpsdDiK4l3UNQ42UzA059vacMfH/Sjl63RQSM51B3QONSQvGlzgGDugU9ePMigjY8ac3PSvG4M01UNzkLd3DVPwe1BKDtKKhx6dihUUotGtiQT2Pys5MgGUam/mv3QKXnOHtmm2FQ73T799NN7stpbMD5LLjIXl8A3XmwBT59xoK9Exn1/fDF+EiW2EhfpQBcylLgYK/xsbjtw88wXxDsxE29tfcVh3Gx2XejgxWboS7I8u9dGTjDkkFw5OCuu2tBL4Jpn5lraNmMTAAAgAElEQVT00KAv3eCgq5/s+tACu5vV+4Bc5W4Bdi++eGbHfNgLEvfawKnNlQ4u+BX/Zf/src9hEXhrbnPLOOSP/MSY8BVjamz1g9VuHiZHh0PNhw6EgoHDp/NXviQJ5zONfwc2aJLh6bYpMWf4Ojz+ap3my9ZGcqEv5rcZ6EAlG4Hhn+Yff+vgqNjAJmDoQwc2cTjkmf7NMfTI6mAJPRc54YJjx/xfrV8fe7S5oAsZ2LC54Lm//SXmamer4lXjiRY6DjnMn9YZvF0K+viJy8ZUOxldimc2FUs7fE8HsQEeO6GBbzEYDJvoy8/UZGpds2EzFnytIscAQy400PTsop/2ruRIT3WywwNH/r5MKLcJxkY2HPy7xwcMeV2KZ/G+rxzBsKf8Yr8kAqedju7TjSyVlfFt4+0mHP0Vba7GtHbPW6L5L423B3DhkoE+5PUF1NorfbO1+WJetPZot1k2F7L/4rOLvs2Ho5U+1exunQG/NLKRtkr327a6uj9h6udLcoriDJr1Jcu2dZ9twXSfHZNrYYLTtvpET32277gsrWDT6ZS35xMnGdJr9bOXUfraHm79ybH67T28nsmUj6r5UnHSuIsfYpE43Fh6Fn/EY3Oxua+fz5hT+bd+cwfNvkqCLzfFa/1OvBPT4Io9fFW/l2byleKpGJ+twHXQD1584h/is9jOJ8VZPu/CszVIe/ErWfoi2lraPrPxoYd8St2B0X0QrvJqLPDiwdAHH3zwoiE++eSTF/uvzn+1QMFI8DAZ9zK5TfQ2KBaevozRZzHf5GAX64KOur9fIdjhU/JiQetLgOgIlu4FhHOBSvJHwfoMzAUU9QZrNHYRKIhGm7xs0kaWfOAF2ZI0/RvUCmxqhycdSCwMndgrWHz2C6Ngg0uOnv/whz+895vf/OYuC7voJ3t1C6W6BWd1In99C9sflc5O6C1cvGp/CS48MAK/hUNybyGTfHcgRK5kINcucI0jmMbQPV1dwZ6LX0m8ZLvx3rFtzBvLYJIl+uQu4e9vDe1mPD8Ipo2e+umW8PNvY2bxslCaJ8a5wxmLfD8nMy8sovu1kCSTnRyinTqjYxzIzjbkcgBicSaPTYO5xRYKWM8OHdyDwVudf8Oxgcwe2RcNtgBHDoeTweXDxrCvd3ZuuLfBKqn96U9/+naTpQ9/MrETezUGfqbFjvElk01iG2Z8JTVky+7kldCAMVfjiY/EW4zBL/+XXNBHXHOxdTq6R1tClq96bgMOTrsx5dvkZxPy0qPNnPY2o+7pQV5j1tighY6aXuSTEPEJ8PD05/P0QxMvY6nu6y5w3tZpR0/y5p6s4eCPB13SM//aeXIfjKt8JQsYJ77lYuctxtoY5LvGp0PXfFG/uVuibmz4Bx+1zqIrPvBH92jiYyw94w/emg3HISi/UPJhc6K3yuDRg5PfmV9o9RMA8uaz/MKBD/+0MUCbXPRQ+KqYp6ZfmwR9fFP87HBG3CM3HfEoHtGJPRSbC3OAf4bX/CzmkdfGVLv4Qx+bIfTETocj2YINzI3WGjz6moet8OtwN/vSUaxGQyyhO97RcHAjrtBFX7xbJ/vKDz12Dp+eYNBXi/lg3Ctqz+DF/taRe+et0E1BM18zhmyHNlnZlazZSrsxYPu1gXuXgz3yJjv67tHlk9Ybz2TCt3HyjBf6fCTa2dBzvqk/+cioeM4e/KdnfXjgjwae/b0Vz/xMHc3wVq47gzc8yKBkj+TShkZ9zUfPaG4N5yzFTT6nvzqdbND5njF038uB4BywaCsesGc2BNPXFbWpu8dj/XFhT7hH+qRffdlwx2Db8GJ38prfK8eJk522/ZRh/XPt2hg1Zktjx+Dkj0b+UL20HsGvfvDNLfNk5Y+GPvFQMU58mC3MQbT7OSZfdaiZfvLqnrN5ugWjRkNt3F3NkWC1KZ7LKbIHunTWR0awajJpE2PMUzGQjxdf6BGvXjqjbZ0Q+8QouREa7o270mGNZ/GdzvycHPiAFV/3gBovF7myX3HVs6ImY/7VfGi+0NH6QkZ8xfyrvD4LvHgwtAc//naFE82K56t8PQuY1CbgHtiYuIKIIpEzYV2CgODg7QccuILHBj7PLhtGwQVdbynRLHgJbBZHz+5dBcbqgndBMK3i1XMBVHt/9V9fcIJKycb+bRIwS3vpaE83C0NfMmgrqK68yc8uLRD4BvNIP339nAxdMI9og6s9ndZm+PRcHZw62tuXfrWtbZdWAfqEA9+4r53JIqljg88+++y9Dz/88L5w8aFkqW6M+InkL79pHJKpWrtxRK+kGx+XcW2BfFQnV/iely7aaFrYyBK8RWhx4m1T0CbHoVeLnkSarpJq/m6RtNhaKPm7zQhe/Qwo+f/+97/fN1ye0SWHuGbOJRc50Gw8V09+V1IuiVPaWKKV7dDOR8Ggp82i2wYJLFwbxcaiN/38gE+48CNTBw9kB2/jZpFvzuPz8ccf32NHiQ08PNv80Am+zV6bXnjsCkcCjTeeDpSMizbPbRz5HXh8tcOLX3qwmSub0ANvcww+ODYhmwJfO5ngpTv5swEcspfQ6cvH9cEnjzpejaF+eq0/Goedq8VMspCTLPqTFT+09ZEj39W2vN2Dvcp/xwKNvSTW1dwxnvxeIu3iy76I7LCEdGAl7GhYQx1giCHGtqR454y5wD/EHn6pRteYe5nD37WZ03wRXf7i8KMXP+IUPuiiVdwjr4NXNBzI4KWtOSDGaTPvzEX85Ah0FAuac+YUHmgk234FSmexiX5okKWD0+aOTTA98ANDdvOuuWEjFEz5THOOzmTBm5zw2Bxf+uCh6AeLpn59dKcvWHbT54C9Azt4zWH2dcEhA/jmKLpsABZdV7FADUdxr4DvGU3w+JIL7frAG4fmPDkreLOZQhaw2lzFk9rpnh54iCfGoz+Oi658SF86pR+67O8gpC+Kkq8vicCSER3wdEr/YjGdbdLBuDzjpYYLzrMCv3FhG/QVsK7sgEdt9IteuHDQhl8ch9OFTnYBm8x3ZlP6iRW/II/cxlxAtysfQkMbfdLVs0M8fu++9uwFR6kGm48tzModfDj7nH2DX9rajGXxZ/uSa2lu/8lz/RhOY6FeneqDj8fSWbj69DfOxgcMmxjLvmQOD47xYdvoN9bqvqzj23Bc7sWZ5IyWZ+PrEFvcEw8Uc9B48Es08Wyfo23zXPD5VPbhC5X0aYyar2v75FLjJ18Uc3vhlW8VP9E2p83H8lYHWB1Sl9+g5aJjOVrxUoxymGw9KoaFpw7Pvf6e2VIeKK6LgeRkz2K8Fw8d4tMx/2cHsGITO7N58/+tsa6bV2GBFw+GXoUF/otKFmiajCaiCSjICAo2iQKNZKxkyxstCUMBtEBPbEFDsBQAXIKDILEL4/Lc++ipwfe85tBWIPWFQQmBAERWhQ6CoQDiio6F4fz8M7nQSQ/4p4z6TlulF1wBt2CWfGRp8a8vfuGSySJGxgL5wjQu2SNbJEu67fg5ePCzp2yxsO7hVEdPW4tR/fqiX93PutZW8FwWP3ZwlbS1eMVnabrnL3vwho8CD42n2xtF/mg81S6b4w75GutTnuDRyBeSM109d6gEhtwWHvd78aGe8d6/lyAZsNEwJxpTC28JA3kthB1mmDfuHSJZLNGTuPMDsPDayOlzj7eSD5hX5iQ8OHSkf1+NmAcWWvo5aAATTTqiRyZ4kli0yI4GOLgOg8BZlDuE6csc9oPnIuPShL+bRzKCV5cMlDTADVYbOdEiNxnYJ7n0GQcy52dsYu4p2hrX5l8+USIRfmPJniU37vEFkz6e89/mpnlTXIku/s0n8PBeKmCVE44MxqU5sHzwd514YMHl2y/xvfr+dxYwPsXGvnYpXli7rK18R3Ls3hzXrvjE3k9Ddw0wzzyLF8bfBhQ98QgdXyvyMwdM6JiraJDDsxhUbDQPxQk0HV7n1w5ZHB7ZBOjrwAdfpVpcKI6ZT+7RUOMRf/LQXW5BVvR/cPtqhUwd+IoT9DdHvXFX+LgNnxhALzG0DXE6gENfjMFD7MC3tQgPdPT1RVCytv6K2/rEUbUxQAccXPqyL5rsURwqRuBBRrhsQh426GdqNokOdjpoa3zRgUs3NsGrGKF2sRMbpGfy9IxGsPlZtiMD2Y3J/vSXLjaFfI4saMZbn3WO/tmOnja31uziLR7us0FfpOarxSfjWRv58rF8/9S5OLrt5MCL7fMN62a+oD+d4buUZFMXK6uTnT+yAxh2XL7RVOORbMHCc6hpjtEx3tkzWo2P52yxbcEv/3DTwfPqGM698Vbq73nbFjb/ag4vnLE31+UFwel/JGtynbyX18oEPlqPZKztxM8m0TKXmj9otj4WR70g2/xBfy+70IhefigPaX1dXdMrueE2xvq0G3/xs2c1GLmkg3Dzj9+Yv2QyF8SJ8l4HcMW7fCe79xxNdOWOySpeig2e6YJOtsjHdoy0kbefjRXf4GwuBG7p7DM4z+rFC//E40vpbL2yDrEXWzZ3Vz/4jX+5mmdrlvXoKq/TAl/5YKhAtQvC6zTZu2td0Kg24U1agbLkqgMhgdikFdB8ERKOcbAwCnYWR/AlU3D0o9t4nQvMBv0CQjAb1Nw7mCgwSTDRLQDC5QsF+xYR1oEbbMG2tkdybZvko09NtW/gK4hFWz/4PoHedrA9qz1LiCU56b0waAVvc/D73//+vd/+9rdv7Q7ntCVdo0W/dA+2vm3v/g58K9nthA2OjTdBci+A1xYc/s3N6J60PQeHRgcYElb+ZOHTJqF39ZbZGIen9lzS2EFISfDKuzjgLd7ouySsFtkS6xbaFqcSeBsIsthEaXP5D2veLpHXoYbEBS38jLE2B5nGs8MhtaSh5BvfDluyBfvAaZzJZPNAx2ypDy46rmjaJNHPHMwmbEFXReKnnYxtFOmlHx903eMjHqDTYZSfyuknJ3z8SxTo6pld9vCKHkqbEDCNTZsFtFxo49u4ouV+/Sm7hNO8vzP5CgUtdL+soP9lcCvXl9F7Dpb8z+mQD34Z7av/u2EBPsDnFfNV3Dd/zDfzwzzk3+7V5l/z0Jz17JJo8w3wHQCg3dc3cHpBY0PQPOfTfM0hshoc3mKSPvJYx7XD6ZBHTFOKR/3BaZsuX5aIKejRjVzwJfPigDmEn40CfM/wFQeidNg/Kr0vnxyakEtsQYt8ZLJ2NjdtmNBwkCU36MCbDK4Ofsjjq05yiJdkZWO62RSzrzb2KGbhrZAJf3jFxp2bfSVlbUDf+KFVjHbvwgNeLzj6G4z1008BYzzhGwd18ZNs9FJaJ72g2HabUQc8HTgWX+iT3OyH7uLVB959402W7IN3BzTWOzzAZrPsri2dO4TT198Xoiv66ZmMtRff5Z3g4Oaz6KHj2aUffPSSQY0+eF89Pd1eOGlLL4ddjXE47Oq+gn7zZmVkPxe9T39Z/ks3XvFIr/j1vPrUd8K8FfDNTXyyQ/2PZNEGLppyiw458FbSKZm0JVd4Jw/PZ98jmEdw4fGpfKA9RTrJtRoDNLKnGAoWXrrl1+mTHGpt5tMpW7Za2O537LR5rg3e8uFz5DE39LX2i90dHosl5XPBNCZrZ7rJD9OrdSN+aNOVXdK5AzTzT3zGlyxgxC92AluNVvfaPdfvHm/P5XoLu7Tqp3vjgS9d0dFPx3Aai8b+tKG4L7bS4yqv0wIvHgxxHM5kgnAeCYMJJvlp0r1Os72b1k1EtcVN4NiAUKDoMIbtJZ2CqQXfs68LGo+CB3qu8Lv3HMzekz6cYAsWHQbhJRFWJ3cw4Sb79oPhGy609quh5Zs86kptPZ96pF98ydbfDipQ69sLjGtpo7t2f3S/cq2tVra13S5YYHreRUx7tqkOlj8IxN4Cqi0kYIy7hSX47sFLuC1Yrvp3IT1lAoOuhVFS//TmCyH4JcltWCSheMVPXcJZDSa+8ILFN/h0wdMBHjj81A5x1F2b1Er2JeA2MTYIvVnTRnbJvXG1+IGxEWmjIKmPJvuSJbukE7nwI3996Jl7xlV7OqQXP8Gvw9t8C7zxAEfP2sGjbZ77mgAuGL5FPjj44OlS8k1w+tmKnOiAFQvQX394pAse+K8uaL5ryW7vin/hXRb4tljAfHKZW8V2c8o8tbbydXPHPLcGttEXZyTN4ZjDHeiYnw5ifMFi3tZnXhd3vXAwfx1i6MezL1rAwBM/5Ff424SL8Wg71LFB004Ocx4/sujHB0yHQt6eo0On9GF/66WDHLFIHCXDxhcweHQo5PAIDTzaeNjYF8PJ6xJvOkRBz4GR3KWDneIqWdACj3/4aOuDq9Tv8KWvM60DHY508MR+xcT0JL+XS9aX81CKvaxDbMuG9GD38hT9/e2ixqi1QK0fzWJ5Pk3e/Aq9Yn/j86gPDlukN3m0tS7AtQ7yPzrGvy+J8IgPPPD7Ug1dbcnWWGvDq35jQx+0wOKz8ke7vuyBjsJO7tFfPH29MNLfBV9Ro6mwQ3VrHfnAsB1fIbM+suIFN9nQ1pe+yXYn+qasDGfb0gkOTPdshOfSOOnFMxkWf/mRk2/SY2HXzuFufcr83HN2WHncZ2Nf+NDFs/Hq0Nt9czgaeISnXhujccqwdnzb+eDmkY3ik9zxWxm2T3v+BhbvzaPWP6LBp7R3iONeDOhLw3hqd58P5tvaleaSWuwSh8RK8YzP5ivNWXS0PbqMh3lO9uYguHxOmyvc4JNJLdaah/Ji64v4bb2IZnKsH5z2AYNGefeDYbuaXokFXjwYev/9999udmzyLdIWRYHa4nmVr2eBggwsE7QgF5XaTFgTvZ+xSDb9HZSCxhkcN4A22dXRC16tvYBSwPH77IJQSemJszKu/AXkAjT67ksewBbwg1293SeHhOhRX3IXqPmhYO6KbzJFqzp7eO7vDKETTe3J3H3P0aRLOsBb22T7YIzxjof7bKHee4sIHSQ93jB4FpjRyFe6L4kFaxGxYYGbXL6Q6adajVU1GmwrWZdcWwTbvNTn2VXyVbtFIrnIhkcyaRcLPFfDX3i8WpwsaOj5Qim7xBOOPhcZO+hhFwc9LvgSFzAWPlcywCEfOdDMfuu32eq0T89kf6mgyf75HT/YcV9cOud7PudVkumUo/mIXjDqf6eQq03Wv0Pnwr0s8H22QLG6jbK5LcbYtIkt1oTmsg06+DYf5qgDGF+JmG9gHQC3RoAtHtl8mefWVzzQEM+a7+Ak8uiIX+KMgx+bDn8XzaZDDCST2E/G4ot7Bz4diIvzYiM6DhHQJhu54KItVvZFEXwwxSUHWGDwBENvspMbTzK41w8u/NYBOsBp7QCr7Uc/+tFdBrJ88ebvk1jL2cRmhk1al/enZvTpi6BkQFtR95a8MSAjOtYE92RX1NE33nJZX2wYa7Zq3Nxbdxy0Wb/CQ8OBUjqgzWaei7dwyz3q88y+eHavr/EzFp7Z1dqeHHJsMNm1dnLEJxm0gUs2Y9KYo62vtbmvprXHo7UiuTxnu+yp1m6c8Gf71qkOOfBMb3UykGsLeyWvdv3gu7In//KyzNxQfCEefDjRPp/jh+birB27V9OvEk1fPPW1W/2Lf8I3pieP4BxIoGd+ZNdHsr2kU7Szc7DonG381fxoXPmFw+/GJnmLJ0s7esn3SM5tO/tPec5xyJ4rM/4L5377z2d+Et/g7g23ko7RU5vL+3P+4glZFg7d4gmZmtO+8kLDXOKbT7eXq3D5sktf45p+ZAlfm3tzyB7PPHLwXXwwp8RcL0LV4aFrLMUIfMVNa40DRvdeFoC3HhV7G8vk7xlOh8vaxGK6ksUXnuXv2fGqX58FXjwYcvrISSoW6524r89c/77GLYiCSMFnA5LDC2/0JHAWEImLwK4Ery5Abpv7glJBwIFeb/oEfwFJkFH3xw3xEYDCxWvve077aNcOdnl77upv8GxA3373Cpqn7NrRFTQFcAcKJZt0SGZ6CHQFwJKRgmpw6H/00Ufv/exnP7vzjN/Ks3boi6fG5450K56z+34105i2SJC7e/DuLSgSQTWdJJ61RTP6JV+d4ktWBf8SbXDNR/frU+7xN38lyB0KOQT8wZufKMQPH3ASMPa18Pj6xj1ch0Fogdfmfi/ywHdZKBsbshkb9Ixd/PiENrQtaPxPjacE06bGJkebn41Z6PShj2b2wpfs5/g0Tlt/FZhHeI/a0uNRX23k+qolW35V+AvussBlgf+8BYrV4q34I46JR9ZN9w4wrBX6bVTFpeIVGJc3x4oEXZwQo6y1Je7FSocq1nYwbQrc27SgLX44oHHgo0281Cb+twl3sCSWqyX0aPd3gdr8kLs1op+8kR+stU5t7SyO2jSIy+TT517sBMs+Sj+psxaAw0u/uBxsPxt2+ALewZj43dqMdodK8hN2ZafWMfGeTGA6MEK/dYiN5Epori7wo6W9cSE3W6SD8WXvcoDNEdiHzWy40qmcxzNYtOC2Brv38yubLP2utRtaNpXpD58uisMgNjCu5CuPaQ1nh3KUO8KtwCUnHfSnV18XgVeidX5JVB866R4tNfnYTok3ffhbPpXtHNbsmohnePGpbnyba7Xnf8nLzsZezpNNsjcaK1f+vTwaL3U+kLza0ql64e/Eb8Vc4LvbtzS2PZrhelaq3ZvLJ72TxiO8xvIRbfD5YPy08UPzEr/mxMIFi3Y+ubKA3as+eKcNVq7gltcj+R7RQ6ex7T7dt11bz917fgSbPfXz0Q7JFp8/O4gxd/K79hN8D10xSPxXtDVH0iMbBn+24+ESq/k0euav+CqWod3LTn3uwYvrcOTP4MV//eKenN6hUv5Nfu3xohN9yVaso0vjRxdysoU4IGa7t1Zd5XVb4MWDIU52lp2gZ9/1/LIFCigtouoClgBgkpZwNqELbBILMOFGC35jEr0CpLcTEjK0Sm4FDe21CRoKPmfAiHc1uGiHUwKUHvpbyKOZVZaO+5N+bQ4kBU+B0OWNbAk1+Utkuo8OGeKpryCoDsd9NqsfjjbBscMRvm9x7dN1OrAv3fSRS1+JqvbGpHu1fkmPJFpwl+zAl2BFb8cUvMWphNfCAdfhDDqNcfb2zD7g8YuXxFdyLTG36ODbmOEBvgMYcoHBCxxcCw4ZyaJ9dUm+ZDRW2ZfNyGuDAq5x+9vf/nana2Hjx139Vyo8esuNFxpwS34bM/pe5bLAZYHLAt+0BcRJcan1w30beLHNRl5fa2nrl7XH1yZiqiReAi9WOsgQF63Fkm8xTbz2IshBEnx0xcTWLzFRzNMOBq/WWPh9RWkT2Jtim0H3ingMHz20isflGWjpwxusNUMfea0F2tXWAnEZfD8RE6PhWjN3XbYhEbcdeLSmkBV+a3R/LJZMbZJa3/AkAzvgbW0htzUwG9u0g+8PVztk6wAJPF3kTA7qyAbfeOLvGZ4vOeFkg+QzXsYPHbDslw883V58oJ0c+tzDVeOjBt+a7DkdjQm7+YkVGC9bjRVe9FHcK8Y8u6KhvVyB/Pr4UrLAYXt0o6XNWCjwwavhlPPoSybtvTBMD3zAKtqyR7pv3x3oVshwluDVxkIhpwsNNiKfe37ToRC45YE2OZTqYILzTI8OJbNptpID9zc6lx58B13mtquy/KLxSIZTtmCNc3M3uZdO928Zzg2Ztn/51hdNvkmv/tzE4p2w+tiFzKcNwJ588QguGR7psm3RUC+P/OMRHfjgt6w8q0e8Ttieg81nl657Y2KPpOSXOz98pePAl2+CL0Zks2DhKp67is3igBy7dj6e/0dTDEDbnBfLFW3WBjYS3/R7iYoXGmJN8SHaKxfaO7bd7xiijZ7cHA1xwHWV122BFw+GmMYiyXm8veKEAq1Afp0qfn3HKQgICia5ywIo6Egi2RlMwQyHnp9uCYkkB45SQK2/gOULiw6D+vx6g79Dl4Jc9NMkmupk2WBeW/3J0fMGnGhWJ2e6LQ4YwYkOJQqCk4AMnr343ga2eKm9iek3wuiewXDbVh90JU0CrCRE7Vo98CeT4E4mtaSOrPBdLSjaVo+CbIdcaAv86JWkgg9f+x66dPASvxaflVvyCw6u4O4evzYbEms6wAUjWXcoQzbJvhpMl0SdT37wwQdvD7GSia2SCQ92Uiye8CXz6JHD21G0+KI+8cLVGHcwBAZ9tLNpPn760PV8WeCywGWB/7YFitHip/gm7llDbSok723syWWtkdi7xD6X+Nt/B3MgomhzSC72aXNoJLcSF+H6EqdNAP7WjjYUrR/o2LzCL66iSS5rPrwOS9Anu4MptNAGB8/6qdh8kEuMd4nnbdQdxFgv1GJ2Bz9kRrcDFvjivDUELhp4Wb/Jrd+GiZzWhb44Att6QnY/cUJX/mmT29twMGjA3YM6OHTRp1iD3MOnF/7lAQ4F3LMDPLqUP/RTH7Jq15/d0WvzRI7WenzRIyOa8pb00e5QALzNIZuzVblCuSB+aINHl7y+WrDRV8gSLFod/JWrVKOTjdgHDpr8JD+lhzaXMVXSxz352b+8Mb2zbc9wXF6QyQGaJ+mQHNq719fXS+hEE180yOtFl3x48wD4SvDJn82iVT8/7euKYLMRPuajMQo/2mC7RytcdTLor33vF3bvH8HcmQydZKuuX33yrS/5eqaz2HHKUf/qgOZeybL21W/sTnqn7ivjyrRj+4jGytX9tsEng7pfBUT/pH3aYm2U/2lLH/dwzAt1+Php8zOt7sWEviAKtrmRXo2RdvPMAXtfXZqTHeCY++ItOPPTi1v3DnTFZnDFUnKIw/q79LvEUPDJsTDuV67GUR18Y88O7WPILH5d+Xfe83rrFw+GbCIFGm+IFIuIidEpa+2v13xfX/OSF5NRciMIuG9R1V/SgHqBSOBowp9cwQhexqq3BQUKAUKpXprwFLw3OEYfTgFz27qvb2mfAWn7lkbt6Y0WOwiiHZ6AKag/CmoFN7oqqwPcgjUaDlA6/PD3eLzllGzx6fNACB5axqG3rJKlxmnrxktdMqkW0NFW0+cc4x1v8Om+9x0ogSWrS8LrerodFEqcO6ihm7l0W/kAACAASURBVPb+pgQcMqELBqwkHYzFSF8HPumffSTT9O7ZAsZe+QPakl3tEiwbAYuKN23uxY0OgvrKrcMoNXnoQterXBa4LHBZ4LtkgY3HvdixyWzzLZ5aQ8TPfgquzzplXYHvsKTDC2s2OnDEU7X4CU78FisdMInh/SQXjM2Ll0XgxFLxXW7WIUhrsUN6dMlQzEaPPNaBii90yI6Wr5PkE/i0xnk52ObBGkEevMTzYPBEx6a8fjytS/jrt6GxDmjvAMVa1JoLxvoDH44cKVuoyc221jFw+JMZD/LaROpzGNbB2OZZ+Mtd6dDLsw6BrKtyLbzRQrevezzrMwY2f2Qvz9DnMMWhhjFrrLWTCx4dy1W0uxySOPzp2VjE1z0Z21Q2ntbeeITHLvrxr82zIjcsv9CXbI295zbI8aB3foQGWOMDlo+5KnDya3Ikf/nhHpKFA64SnHxneWlXoo+uQyj+/Kjod+jjsNK887zjA6cv+RoHbfE/aS7/+rKP573vedvrr04eMN0nX/XKUFvynfLorzhsMIboRj9Zglk5wLmyg77oV4cfn/C3fgSjbW26coa7MkUjHIceycfPtPMh+aX2DrTN1cr+OYn1Lf3piM457mC1p3MHrp6bY3CaM+jpSz52F1vMFzGjv6kJxpzpIEdtfJJdnG1eR6vxiF/PweHjfuHcs+nSEE/YK3tmc3QXTjv9yenrSTqIp1e5LPDiwZBEwxcmHIdDc2xfpHAwf0jwOhj6+g7UAs2GJq9n9q3d5HRpM2klFBI0CYxPqU1gyVmBR5IgsQSDZpNdANnA0L16rxKt2jZwu9f+qNReQhLeie95/1ZPQXvxtXVpTw/3AhlbFNDQO4Mb+SQ/Dm8UeB16SBAlkYI2HiXaEihwnqOXDI0BGv7w5y9/+cu3YxJOtILtoEOSKhGsnzzRbUzr67nx7wCHzHhbPNQ2EO7JbE52kIM3nA5aLErrR+Dg9vZCIh4sOPf6O9iRhFp8JO3GQLskWpvFhh21SfrRtAH47LPP3i5Mnl1//etf75sZ8OSnnxpPul/lssBlgcsC3wcLiGcd2ouv1l2bi16eia8l8vS1jjt0EUf7qZl4a13z5Y+4q1hn0O6rIvFT7HcQBAas+A9GjCaDNjI4xA9f7HYIRA7x15ogHts8K63X/cFpNL3pllP0tZM29Pr7Q9YJdMjW30lqLUMPbbGfvOjIc8hVPmHzQR4w+tKjA5j9KZr1xAuWDnjY0gGMNbKXEPHocMe6hhcZwbcm09czPRoztmz9Ld8gl59qoSvnJR84/WhGp1xErT849+UU7rN79NVKayF+0URfO5rrS3Aaq9Z4z0r8sq9nuMm3h09wW4f1t1klezJo3zXb2MWnA8XyS3K5wLvkWdEHg75i/LZkg2p96bN96VRfdfCeXfzQ3JKXPMoPwTs0Arc01q61y7P5mGe0qruPZ/rsc7xri+bW22eslGinV3KtLRYmOH8zsq8TV57n7tHAkwzV7p8bh+SG94jmynSOW/BsyTf66ka7Qx7w5n+HP2i5N++UaMPV3hdvns1pdDt0VJO1P5NhDj29OTjKlmimj3vt4NRo6sNTW3PQvOhAhl9nN/zEObjaFf4ONppgO8hJt2DPGKF/L/38VWzXjo62vRYer67slr6euw/m3nAr5GnOsGny1X/Vr9MCLx4McSgTRhF8OpC4nOfdnaVAUrCS8LkEoL36w3cWO4txiZHxYH+wNuvGRSDS1uLsHnwBYQP2D3/4w3vw1NZVIFs4ASQZt17NN8ie7eEv7nP0N6Alcwsm2ZTaC3rxDhecZE9w682qWtK19nEv8awkJ9sFt+OwdiW/567GrSRTAotGtLqH577nHadklnz1B+Ykh9rVHcZImle+/EYNFq7/XGcxhCdpluBbwCRnknEJfX9LiG06NJIEs4M30N4sdxAk8e9LNPg2MDYfn3766R3OIaULP/RK+MnUWL/7TLkwLwtcFrgs8N2xgJjX2mGdEg/FUhsalw2PWCmGWtPBtDFvY7PrJVqt1eK2Ta313hrX4ZM1QV5m/bB5AucwxUsEhz3uW3fwtpnByxoIRvEVBVnxs0ag5d7XGWJ56wGYfh4h3lvHPIMpT7SG6MNrDwWsma0J1hHrGlzytOGjl3Ufbl+tOgDSvrkDXOsVPni0GSsvtZmik7qfzFkjyxngu9hfe/+q3nNf9rSutubir59tyFg+1gYPHTQbP7zIAx9eMpcH9HUTfg7y8gN44F3yO3U5SrWvn+HhFb/sTx60ao9WP/32rKh9ZUHGdFg+8oh4q8H4kqP8E17t6Ml94qXGH31+hG62Ty44jeldoFsJbmG2felEzwGDXKQXo8EvzXQO56SZ3MkTfDS2RmPpJdPqHszqw34d1i0O2stvZcwOwYPVln0djDh8rURn6+glEzncVz+n4/IGs3IlTzSzm+f8HQ7/3S/yHHTwTXYQC7MH2LXpyY+sLiVeYkYyaRPTfCHIHnxUzIhmNlu62qK196sTXcjY4Q4ZvIj3Alqs3fntngxg0YObnZtf+Uj287z3waWvfvdizcqxeMsj/KV58rwb8Y0di21il3gn7rquclngxYMhjsPZOY1NYv/uWxDmTFd5NwsIIhZMdjXhu1f3abjFrmDY4l3SWb2LNBjP+gpYG+QKBgWtamO89wWdUzMw6FXrh1spGC0f9+EsvaWxeBvQLCLs03/9Whz3yZzO/FHSJqEhlwAXjHoLHbev+2wI34W2PnVXY9eYWYBcEr3gT5tGq/EqiVST00KDHjolpNp7jnd04HUQs/9JTBJt0yEJtADbMPRW1TP7OAyy0KgdCNFHkpm/STZsQMiBVj9XwBP/DoPQSnd96FzlssBlgcsClwX+b20UF1uX5FEOVsRh8dXBkHjaAZF+mw1roLXA2i83EJfFc20l7q2prX0OV8Ry9HqLbRMvPrdmOISx7nUA1TrjBVRfE/kJsAMM+Z01whrcgQKZldYkfMB0kakvccjpqygHQP37dfKTBUyHQuDoqF1Ro0Nn65Z+9tJGRv106KdoZEELbYc75S7WM31kY8fsVA7R2mfTaoNpjLKpddWhlDzMYUebV7RtdK231kB0rXnlWp6tvw7j8DMeyQPPYU6HedrhwSmXc1/+o8093RTwHcR5JhP96INuMDat7AW+tmiqtcPFiy19SdSXMdFtDNAG26Eb/PQtB0IHnOdog6d/8GD0qZX8Mfr63FdW3mTeevuNc1/doePSrwS39PHZa+GW7soDJton/Z6X945jvI1/P22L3srnHs9kT67q7QsGTfFidXiO9qlDcifDqW/81nbB3hneynO4cJS+zvLMJzo0OXWM99r2DYu3VfZZvqdseCjo5XNioHZzwvwxn6O1+DGCq705oA7fvEbXHF4fB+Oin3nbfEkvc2h1y25nnczBw3HffH2OhvauaHpGz08urS9ixI6x++Qms/kq3oprV7kswAIvHgzZFNrwWoQ4KOcpUfEm6yrvZoEWWAkJu5qcbGtiO0kvaRNsBJqSCJNdouaAroRCDd4i4T7a7gUIpUCvLigIBvo9730BZHE2MEfjkebh1reLwPIWlKK/dJZ37QWwcKJDT/aTJEogOxTSX2Df4IueZzr7uuZ3v/vde7/61a/uNqN/fNiSv0vy0JWc4QWmC46FBpxDnZXRPbjkgNt4ooW35+Q96errAosHPTsoQoNsEg2+4tnVgQ8c/sGfJLH8woakyxtkC4V2iaQDX7KazxYG7f4GE7m8deWfkmR4ZCYH+eh5lcsClwUuC1wW+HILFNOtQW2UxNoOJxzGWFPEWzDieH9fRiwGK663Fjt4Uawv8MR/sVzMth7gpziYsV45pLFBsp644In5NtYOecRzePK7Dq3wdXBApr4o0oY3HDLh26altchbe2sF2nLIDpe+uG1mtcO3ppEb315QWIfQoicZyY4GPnvA4HDJOqS/n3t5Lt8gEx5ebLBD/80MbTJoQ9844Mk+8aaDdmtk6xy69O5wyriwUWNFNmspOLqwM1z8siv85INX7gUu+mxc7mADSieHN/JABY4vZPSxKbsYx2jRIzuBp5N+9NvU5j9qeOHjW/6oj4wdMsGlR38rKT5smF5g+4oDvis4+MY7eHKRPZ3KwchD13TIXtpdnn2F1Jhn//rS+U74VtK1Z3xdz5Xkjp6aLLVHc/vdK9sWvD/DQdYOVWvfOlmiQ77lt7Tjr06P8E7ZPSdT96eMS+e0y8oQj1NWzw7nwBrffirWgd0pU/JrX/lPfZNLeyWc8MLxvDYIly+yffn42jca2RrMc/rya5d54uJz+QSaaJgD+sSNDoa0RRdcl5/+8fcOdMl7jsvCP3dP3vBW9tqtK9YULwSy3dqTbNlGXDN+rRlvjX7dvFoLvHgwZEMomAvifbrKifydIY5/lXe3gEBiYpqMJqXC1iUV2X0XV0HJs8SkvxtTwtaijm4LLfpKC7tAos3PySQYBYb6CxYFnA2g58KB7rYVfO4M3/TpR3O/+onn4ro/6T/dvvxhl2QqcZEs8UE2sOB6zhctxH5yFb1HPLaPnUpQ2RFdAVtyLEEsSfv888/vbxuMlWQWz/6O0MqXbtXgwcKRyHXAUxDOB9T1SZz7fB6eZ5uEfh4gIY+m/uR2EKRIrPxEgG34CJ309fd//G0wb1I6gPQ3goy3NyIu9OCxTfj0uBaNN459VZcFLgtcFnhHC1h/iqdifhsIa7BkXr9DB+uPQ55eDrUZ0W998Ta4jTt492K8Q5n9IqmXeQ6NxHk8+0qpgxw0rUEOUzrwEP87OOmARZvDGGuKQjayVMD1N/DU8hV0yepwxkscONYvVzA2ceDIYQ2S+5BTHY9yF3kGnchKDnaB21qNhnVLISt90AmfzuTwEuTRF0HWf3lvG7zWaPjowetgR01/PNH0NzfBeS4HI68+NOnkORz3aNJRjt1Bh3ZfXFjH0fGMLh37ymf9oX769KV1m/Ro8Q+00OlCj234oxwNrEt+oNafXbOzcQMvR4kOno01OEWfXMxBTn/PUTvboF/OgxZZteXP2RCsQhYFTD89BJO87h+V8LdP26P25RN8fNdm2TqenhuL4OHT23wOjr4KvzrlDY9c6VTbPi/9sx/t0yY9Jz+ceK8N1s4rg/tooL/8PYtPYg0fopd5bi7T+4RF57T7c7otr5VNe/J1n27h6Dce/DHYdO5vnYYb7eURTnVzfG1r72S+REeNpyteO8e1+yPV+op10Vu62UNbtNKv54VpPGuLJ1jzSo5faTzSVbu5LSa198hH3yJdN6/WAi8eDLGKjamrcjnPf8ZX2LHkyyQ2cbW57+CgBFLQDV6gkLx4EyehQKM+9/ssqMEVAApY6pJRNb5qRUARXFzaN2CmtbYC0PYXeNAiQ3p0KAQnvvGoRie6cB2EqPVHh00kdXTvcGRlco8GW5y88HWh5Y0eOu4dgkh08PNmsoQVvqImh/ZgJTvxSP7oNw4FWzwkciVfnsGA37HuoIccgrmE1yX5kthK+H2ZBI485NffIY6DLDR7Q+DtMFh4ElLJrOSLfRxyWcz74s/ibkGX1HcQBbexysZXfVngssBlgcsC/zkLiOViuPVCjiXW2zh4tkm2vlszrMuKZ2sJGAc91hAHCg5THD5Y660NrUv6rQ2erRfo4om2WK+0ZnqzbK0Ar7Q56QsktBxikLfN+W6awJPJ2uHQxlrUlz7o7TrunrzWIbL5AkgB31rZ3xgqx7D+WZO8oHQwxBb49DUG+RT9dAmmtRYfMsLpq9k2tOVAdANHNvLjUd7kiyCF3ejdWi6/cQhADrZ/ur3Qgqeg69BEDc84ktNFL3SSH0wy4gnGeJVXaSsna+zyC2OKnv4OmuB7hqe/g5tos4t+OQa74x385jN4wNcGV/7Ff8CXt+G1OMkJvhdojaPaAVbP7OS+fCM62rO9ezkKv6MH+tmlOngHkGCXvj5lZU527QubDaJLhq76Govqk8Ybdne6xoqvwG0+pNfqAedsXx3XFtE/dU8+YxbtlfmUc20ALh9Y+tEJtucOpj3T6/xa7KThOXnRSq7gagsm+JVx25LjOZs1ptELno5K/IJLjvriq+aTwZ8ynHz0OzjCr8NTcZ0PmEfK6T/Jtn3ajKMr/1OLNX0FdCd2K2RY38YHfm2n33suLoIVr8xhsfsqlwVY4EsPhi4zfTMWMDlNSgGjBKQDhb4a8izo9pahAGHSCw4SmZIbMF0WT1d0z0XFs8CgFADBKwURzwW9DY4bZApoBSY0yeMwJDgBp0B01uDpD8cC6qdPPWeLElrJBHjBK5k3IG5iUfKFBngJm4RJsicBj7bkkazhkq8khxxw2Rk+2epXd5XA0SG6ksVoGmMwalcyOQTyZhOPp1sySX8JK33xstCSTzsZ1Su3wx+JuDaf3v/kJz+5284mAZ6DHgdCEmoLU/89DCwcB4vgOlzqLfA34+0X1csClwUuC1wWOC1gHbEGuKxNrffiv7jdyxp1P/uyNjvAsb632RTvW+fR9DWOOG+dEPMdiKABz9vrNkwOYKxL/dSsdjU+/fFmaxj5rIXu0bImq8HhQQc8rVXw0W2tBtfLlf2j02309fe3gRzM4JPM6OhTyNma7BlNhw9090UJfayjaPSHb9Emj35w5JQr7XoPxkujfk7dC7VyFrL0L+4d5rA7WmTzRQyb2gjSv7wELjlsotGGR194+NnkbU6XreDRC3wbvPIS8NnXei83KA/rq37PaKnByy/8JJBschNt+hxaJUuweNPni9tP7oKvD315jSsa5Yba8gt9SnZIvvS7d77p16fAZ0v2cZ8O+shpLmxBSyGv+3hqq8999OO9dbDx0rf3S6u+aEbXMxnCU2fz5e0nRPkcnEp0w6vd817ao1d7OjcW6kf34S3PEeFfZF95Vv7491M+8UmJ5+I9ogFWe3Tgrb3Tqf6z73xee2UHcyo++UX0qsUAc5eviQP94Xn08DgvPo3W0jth6kNXjBB3xErtybN6J3s870C3kr0bw+B6zn4nvHa8vOwVp5S1V/35aXNNjBFD5P4dzkf7ql+3Ba6Dof/h+LfYtxCapK4OADr0KeA14dX6JE+SRUFI8LCognVZhMAILp4LZgKi4qdN3oLpg+cqwMFpoVfXHo3kqF3tJ0qKe3qBDVe9Vwck9BScJZh7sFJ7ybLEL1uhH90OkRz6wO8wp/YOWbJRtg0Wn+RCl1z62NOhjSSTDOlTgA1ux2kPg/CJLlxwkjmXN254SHDd65NU65OQ9ZOA3j6iBVa/NrJJjG0KJGoWIV8xuQcnKbZJ0MYH/vKXv9z16mdiJfdsgx76V7kscFngssBlgf+tBcTp1mfxWfy2HojxLmuKgwubMnFbn82Iy/qtWKOsKeDcOyAC70Jb/Hc4ZI23NvVyySEEeOuMWj6BBhx81dZCa6m1B4zcAZy1pbXExqQDoL4Wwas/Oo0GHh287EGA9YvefRFFJjLCxxMN9OmgZhN00ATj8KUvnHopRnYyWt+t5/qtn77MBdNBDRhraLLRF+8OUv4fe/e2I8dttWHYl9JHP2JZDhAEQe49J9kcKLCReBfFjs90KX8/lXmFlUIrsuWRZiZaBApVRS6uHVkk19esasBP87067FeOLz/0wwp58h14tlugtRiZrZusW+iDrjrx8+MQQOlyBXVqW3R9EFyb45V/4qFfuJ76uSdLf0kvebVr9Gwm3307g/KfvqW8tWLrSvnpHu/aFC907luD9oTRb6ZekSNvrlfd4zNTdTuT0YFOfsesO/PQVz+b/0PI9UZ+RzrRR1I/v7mWus9u/NPL9eSX/Kl38qc9eCYvOd3jqVx+z+ItfvHN3vSa91PXWzyUez7s2kqPSXfW7VaZvLMOb7ItfvlUvXTM12iyPxvPtkZrnOhZr98rq+2BKvpgcuS7rs3SO/7yq6+M/yXPgPHJ80NO9d6ka/ImHT7squ3TQ77r6X958a588qyu8Ut8gdZ4KU5wbdwWL9Sv8di0Hlhg6AH7QAOUBUcTjwnfg9tAZjBrIDAxy7d4sSgyUBugPNwNDvioMwdCZQ1kDSwNCmjJlE8fCwtljmfPnr32TgPXPDcAIWpgmZOfPEcLB3ayoQVWYIpFk3yynd2r47r68VXGfgsxC8FeAVNXvkGZ/wzy6rt3nvzo7de9V9dfxXpFiw8taC2gLEhNgC2S0ac33g56+LWBfnSbupKVLwOFJiBElnuDM/36kKSFoDJAmUWcXT2BQfQyUeDd62B9a8rfx+Ol7elO9l/+8pfDz37BsDPINVn5+gG7/YpeD6wH1gPrgf/igeZMc425DgjQq2bOgA1juXmiIAJYgh4YZC4wLzmbmwoEWkuYI5rTADFkmEuBL3gqk5rbyOqD03go98MSvcg0/5qP8LE2oVOAhnm23a/RtB4x/9LRjhY2WbeYh9nPntY1ZJnvgUd0MS9KytnSuoYc+ljTtGbwIwl9+mg1GQ51zIvq40Nur5EFtrUewtN6gK7qxBNf6zC6aRf1ClbxZJd68mY9vhGszb+qr54gVd12ULfmUk6fuSakT+3PL2wmp51Ednho32gCadRDz3bndl31cWt02ti5ftRaTNvgJ9GNL9iJvv4inwx9Kbpb59oQP3VrSzqxIz3ywcHsDSka59p+1ivvlh78XZ90zUZ9JFvpMm3q+UIrVd955ilL7lmndJOfPzMNj/I6xzca+fLoktzqdU/G9IFrZWQ6z7LsmHKqrx+1a2vKSpfO6NMrfp2TdbZ18ph1p+7pNGWfr/GZbTz56kf4ZTc5xi7r4zkezDZynQ5Tl+w4t6v+7wflyxXM1Y+Vz1Q9eZMvO7LbNV9b70/62Vb5Ih5T5+rMs2s0+rLx0w8DxUqB+O43rQfywAJDD9wXDCYt0FpkGegNVs4eaosBCxP3BjgHWjQWYl6RUi5fnkFm1gPwNJg0KBt8vM4Un+qqnwyDW6lBsvs5aMlrkGoQYlOLW9dsMLm2CMFP/lxsoI8OcOPaoU4LhYAUi1h5k697C0aLTfnJJ2P62aCLjj4GcoMiMMaAyY8NtOmDbwtOdPLRkDH1b5GF77QVf6g8mRaQfO5XVjZqF4CQRaBrbWyRxU56ysPXP4VZ3H/22WfHPX7t+HItGfSV+VaEftEimi6zLV836l6sB9YD64H1wKP2QPOYcdz85DCX+BHAPGQuNG+bp8wvgnFzjMP8YfE/Ay5zAfDI/AU0APi4lpqrm8ejrX4gkHLyzUlozFWBS+QqB3xIaMgTWEZjjaGe+Q5whBeb8EfrPr3ZCCQyZ7ZbCIgUgIUPO/2QYq5tRxDbyGltpMw1X+Fp7vfjVHMu/9G9oL81D970BmoBvfDVFnyChk7Ora20Vzop6zUyvNWjExq64F09vuc34FQfsk6Gs8M6gV/TTV5zO/3aQYY/3gAiwBXe8UqONpv0ys/02m/ql+7lka3vtK5Rnk19ZDpatpFN99Y6zh101kZsVEd9/s7W1pzope6dO9DSqTN9apsz/cHkjk86dcaDrtoL8EUmfbQ9PUvpko3OycnfaPQz/TxbkksOOjTJjiea6Z/z/SzrOr3yR3JeK3y9UIY+muRNubPe5H2mOet81iP6fBKINfU723WWPXXPV9WpLH7nNj77wT0ensVAy9qkMxrXM+WvW/LQa0fP+uUKCqXDLbvkiSH0q3wXXbZNH+OV7OnL8rIv3enhqN9nhzlCDBOdPkwH45o3FjatB6YHFhh64P7QBORB9qAasCQPcAOHwbSFHromc4CCOsotlNpy7tpAADCSZ4JrwHY2AMmT5oA3Bx4DDr63BtbpslvlLWSbWJ0tuho0G7zY5zpAp0UBm1zLd7aAk6e+Oq7xDBRy7h6/FmboWkT3sUv+MBi2wAyZr44zXuqRbZHK33Twq9oEnKIlx0Ff2zXlo3PfYtYOIW1n8Uq+hVTfLGCTRUNgEJ/S4R//+MexqCa/X1O//PLLo11s8+8VNDZbRONL7/zTpPDAXXzFrwfWA+uB9cAv9IBx3vhufjG3mGMAKOYV85pxH42Ax2HesG4oWCdeOTrzBz6+U9dcaN4pWGoeNQ/h0Uep1ZECOdDLA+oAmNwXEJunyJavnM74AIgKeOziYYOgRbmjnRpo8DaPCcj7dRsvegExkterXsARdIE7M6gyh5vPrY3oQA4fouEDgBK/8I9/bmWHA7DiRxzzLb58Qz792EcfPOQFIqDzA0/2VI89rcHUw18eW1zbsUM3OvQjH/rq4aMPkK1+8rxiVttpn3SffYaNAAr8gBzk4IfWGc/WhuySh7ay2Sfok05otaG66PM5f8iTyHaPR7uxamd56PqzjdaUndVvLYy3lAzXZ3r3eM76Z5pZLz3ZEe/8Sg5ezuj4whHdtDedykvns47y2dt56pl9k3/18+UtmmSjwa86x8VIyqrf9cyr3tmucx39U71+SE7E2Qfpojz9y7tlx9kXk6b6eKXf2bbqv+kcv/r0rO/Z609a8kOy9AHtdUt3eee1trypOz54iDf0o/QnU2yQnPw377ue52kf2Y6pY9fGCrFG36DzdoKxgg6eQzGDOGfTemB6YIGhR9AfLE5MSi0GWsh5+D3gyh0eZAOxs0VAk7FJ16DZYXBxPc9N8g2u+DWYnQcZMh1NlFwUTdfODZSzjEwLBgOPRZGBiSwLn/jOcmUGJoBK4Io66sq3+DF4mairj3cLE3Ua/NRxr168DLpAFQtPfnK2ALTQRIdn+tAlmraVB1DVBmQns4VsYJB8C8vslt8vkL4l0K+O6ABBwCoLtGTJ//bbb49BXDvb8aPdAID4WNgCitDzizP96MaOTeuB9cB6YD3wv+sBc655ojnInGIesCZo101gjTnO/GHOLwgPSBIsmBfN3crxcG4ub040t/qBybxZQIOHfKn5M7Cm7wops36we9X8ZE4EQqlrLsO/ALs5k012BqUjea2FzHtk9uFqNrovkCLf3I23M73QNi8Cd8jnL76QWl/xKV+QZdYDbQAAIABJREFUYU4GmOCXfnxDF7opCzRRbh4X7PH13EmEf/M9QOly/VELfQfQKCBE2/ERW+x2pitfSO0k1jZ2ebO7XWF4VY++7CjgVJft7tsZxDfy8HJmEx5oJPWVld8akp/Q2GGOvjWNazS9RlZb1Y/Y4rpU37Lm0R/snMCXXdo8HfJ768u5znzN7O7ivAYlQ57kWsonXv2fu62sndCSpw3Sdfow3zjzGfoAsfxTvZ6lZE/b1Vc3HumVr9J1nruOdp4Pw65p2poM59qs8vKqN3lnR2XRnttOOZ9drn0ZjXbvWarumYf7fJhOt2jlKZ96Tbp4TDvOtO5nf3DdfXq8SUb5k2f+nD6ctkTbuTKy+E4/qczz6Tms79yqM22c1/lh8s9f2Vjf0r8c5Bhfuo/OffGi8U6M0Vgz/b3XH7cHFhh6BO3voTXBG0wMvD3050EsOgsik7DJDIjQwDUHkBYg6PA1gBtYHO5N2iZkdZW1cHRWt7wmrnhbNDTITP1co23BEWgC0EGvHhs7K3dvIakOm1rwBqy47wPN6jXgobXoMtj27R551TPw8SNgxqLOotbZQGkBOAEhMqoXQMOvbXvvA9TkAGbIVN8iyUJQXbuAkk9mC8e+aWCB2iKKz/1qyvbAKnItMMm1SLaYdgRoKc9fzuTzx5zEHkE3XhXWA+uB9cB64AN5oHnT3NOcbeeQucvcIc9cJEiwa8c8ZA7sByZzjTkbXUFHAYb5xfwGBBAAqouvucfcKxV0mM/Uw8P8RJ5ktyw+DvWbPwECyfSDSbtJ0NCbXPxae9hZS6652JlNrQeSK8AB/Ci3K0jdgn1zq7pozN/0JsscLKEF3ijLXzOos8NIEBXw0dn8Sxb98pt66V4ABshpzUYeP5nT011e6wNntIE//Ek/tAJLawpy8cj/6iuzDlFmXWHHExptx594tPYjw84BAFRrwtZy5PBhOrGnYJY/8cp+NOqlhzaRyEpn/NGX8Ld2yl/KXTtaz2RfOnXGI31v1UsG+jO/eMvXzgFufVMpHfCVok+evHSOVt6083wdj/ilk3N88y0a7aJM4rPo5J+TP3sBsGVr5XSQpu+6nzymH6eN0cqbPmin2cy/xVfemaa8fDv1mD4qX160Xc/7STevs/lNek17pn9ql7lbaPK4ZU/1qzvtmHLw8Tz1TLmvjV1n1/TNtANNKb5Td2X1R7GHMc44a4wtRtDH9Hc7hZR5ho1ZxjRxxqb1wNkDCwydPfJA9x7iFksmBUeLBQ9+iz8DiMFfuYm6RZjFQANHE3NndVokyYtHv4rhBQjq3C965KKnWwseiwByGtySKV+es0mka8CLuhZhjoAYO4jQxNs5Goshh0UsemUBL+7pfbn+ahGgE32LVgOfgdDAZ7C0sFPHkS744NliFfDDbgNrv/TxCb+65z/1eyXMwJp8C50GdgsxelioSWz0MUl8/SrmDBwiC2+/ADrTFX86WTRaVLrO7/jMyeeBuumKXQ+sB9YD64FH5gHzrmTOkMxH8sxhdvw0jwNAzDn98FOQX1BrnjHntLYAtpirzE3mu+a56AAjvWJlR425FuBgDiPf3G0XjjnOvJxu5Nq90Y8x5n7zbD8W0QGN+ZRsPMyrAK5egUJjTqR7H1Sdu5ICY8zZ5mv81ceXD+IDbMG73Up0xJutaNut5Nq6gV3WH+ZxQBzb+gOJ6vSxbIGXdRy7Wjvxp/UBYC4wwLn53Zqk+d46IF20iTZQZn0jsZ0siZ3swkud2rx1JN20h/rK8g/a1hlkWM8444XOwVb2syG/pzue/aBIDz5Kxmzz+qiyyvHuu0b5pz4G3AoIm0Fx9VuH5qvDCdfkvlS9eAqYXUdDZoc8ZdGez31ke/InZ/aV6mcf+TMv3fOjsnZgycunrvP9mRee2R5dMvKx+2R3fu2Uu4tp67nM/awXbT6pfNar7E3yoq1Nbt0rY3cpudk86/w3PlOvySs+9Rlnz5TnKX5n/buf+ZMWf/fxdD/5dx1N9J3z25muvnLmFx/l9SPX+qEx5fe///0ByuNb/zCm9C1aMYjDpoJA/ny05/UADyww9Ej6gYfdBNuE7b5FiInDRO0wcXjYDQSSOhYPTSgNWAYFvORXhn4OKhZ1bQc1sScjPRqgWzSYqOMfr87yO9RvseGaHYFCJmb6u0ePDo2FmQUgWwzSATfy3Ft8Wcj01/XKo6kuWr8EWkQaCE3k8U8+Hujws0i0qPXrXH/nDoxit/wAIjwsirTDq+uvj+TRST7fWjThaWGpzPHdd98dZ2CQxa8B2wLTPb7yHPRhv7OjNn8k3XLVWA+sB9YD64En4AHzagCOOcZc4zBPmYvMtwJ1gEhzfUGp9QI6+egChOzgwdc9mtYPaM1V5kUgh/UIcAQNsCQQwbxrrgUYtQ5RZo7F2zXd8BK4oKkcL/XoKHhjm2SeJa/1Td/aU24do0ydgiL8rAfYQDc2om190w8x6K0d0Ldu8ho3/XqNrEDM7hy05vV2BPEJnuRbhwCjqpef8bXTQ6pNyLeDW712Z/HNBGaq1wew+bU1njUVkA2NPD5wds9vEv/2z2N+qNIHAoCipzvd3POBdQke8tUlo7UhnvqCcrxds6edJWToa/SU6FPwWtvxZW2ErjXtUeGujnPrylleP6ysM3qvjAUo/ZvTvwN1OsQXvfvAnqmL/GjRpy8/THl8qx7a1uau0VRv8kHLTn7HSxl/8zUZ0c778qcPoqsvaoPsSvd0OAquqfvkOtf/Kz/bHM98lW3VS1bn6O9EHqdJk5zZVtHGe54ryweV1Q/SK77RzzYqL12Uqe/Mf8bJyxXkzT/5YN4nd8qZMvC5ZdOkSe4tXcuLJn7Z6d51enftXL81tukHxuLkGo/cA47FK8Uu+qrx0Ni2aT1wywMLDN3yygPltbAwELU4Mfm0CGtiaiBxbxI32XjYG0ANEE3oaCuvnoGDDPUNKEARfEzw6tmmSr7FjPwGGosC1/FxPg9Wc/AmN7CjhUmgkTJ5HRaVFkMWbAZriwwgTUCKwbuJ1ZlfHOjQW4i14MPDr1B4kxdPg6HDr4vy/Zpnkrad3YIL337FBJqxBejDR9ogW/nghx9+eP0LJPkArQAiPmsh22tt5LUApXODfefDqZvWA+uB9cB6YD3wCzxQsGD+M+eY8wTp/WJsLjJHOcyZ5jkghLM5Dp1785z6rRUKXNQ3f/thRSLH/Pjjjz8edZXRAZ15ELhDloSXOZY+5uXmf3NsoFTrF8AKmtYR5m3rEnTWLq77Aw5zKh5+CbcWaK1EHj7skMc+9ejLHnkTNApcUkYPoJa1AECp3TjpZ2dUABNb8QagKO81ctd4Vc4H7AAopacyOlpvsUMd1+rRn67O1hnOAQf5gH8CmfCnR3UEhvzlaH1pbRTIQ5bUea41yaIDedrJWk5eNuVDwJj2JEO7oyHj1fVHNAkPOrJTuXtgljMASZ+hc+srdknJryxe6jmAUOzo/qh0l6oz6+InOc+jNdiUr/ysR3L4wbpR0udbe5/rp0sy6RtPvCRn9eigrD7qLC9fJzufVO9sXzYmR/m0NZ2iSwf30Z5p0ku+61k3WufkxHOW4V2atk+a8zW6yVP5tDc56XOrH6BRXlmy9V3P7uUaV8x0y8/lnW2IV/zTp/Z0Psstr7PnRH/KrmTEu+dllnfNBs+95w84bvxT1jMMlC4Gyl59tR/BjW2b1gO3PLDA0C2vPGBek7cHuofcxNzgZ4JtYDB4WKy4B6C0cGlyMREbLJSZfC02Gmgb7P1C5ZevQCCyDDb94taE1UCVbPkNWtzVgOYabYs5C4Bo50LQgrEyegauAFJMugErzue/n+cbvCz26M5uOqMFJhlouw8UslvHQEkXi7K+NWTitWjlB3l4q2uhZoeUZABtkYofX9LXgpIODvaTTzb7yWnBhl+L5QfsWit6PbAeWA+sBz4iD1hPmKsCbwTw5iPzfEBLIISgwetIvWaAxvw4A6/mNWCPuc7cBzgJXDFXojGnk0lGu2XoIuFpPkZjXrRGsYOFHHUBLmTjhQcaMsy7dKQ/HtYw1gvsI4cu+ARq4dcPPWRUn4zsEiQpU1fCr/mbLDqQg94c39oKLT9ZU0zQiL7kWifEi74BIb7zgd66xdnaAz2+fljyAxXfyCM7MKEPUltj0IUvW3/xFXkS+f2jlp0C+RW/2pMsfFoDqtcaDZDkh7IAidpM3WRrD7typPkX7vFrjdl6CC2ftnZ0xgM9IM3aSh6a+lpnMgTPfsRT3qG86+qinXnZNHmxp/U0+crQydcflUl45jP3rtHTJdnO/Q19cmddPqzucTESeind531k6VLZpHdNb/Wd6abtJfUmEFv9/DB9N32jLlp5k/Zgek3lz/Kpm+ueEdd0cp408Zrn9ElGZeVPfc+8zvJvyYtm9hPXjrOsdIhP5+rqJ5crmFTdyvPX1CcZ1e1eexkTADrFBZWd5bqfZa7rr8YssR0e9OowFhhb0OrTDrTGReO0V8h8WysbbrXJ5n3cHlhg6JG1v4fVg97ZgNPA0KAwVTYBGBwsvvrVz4RkUSPf4iRASPmcvF0H1phM0AUQxa9Jx7lffFw3YDUQzgHMAGXCdG5RatJyAHnUZ6PBSp4FBDDIIsbizYJInsUUHq7Phx1GFrkBShZU6rPXNfCHbRZg+dIChE/41ELLgsViTJ5Fnl+v+KTB9sWLFwd//iTfYO6QRy92kGGBrPzcTjvwPrKHa9VZD6wH1gMfoQeab81V1gYCB3OZ+dd85zCHAwWUS+bJgmf35jtzWvOjudp8aw60DjFvt0bpA9DmTrL7VdwaBPBRENOrVurjbx3iMH+rY82BxlqBfnRLD/qhI8ucTB95fuTp370mYIXf/Kcsu3rseqJ/ABa92BBvu3rwbp3jm4WuAU5sD2RIp7oWufRKX7oF/AjMAsvYl0w2kmvNYi2Dhu/IIwtoY2dAf3DRWs5apu8L8a86+Hh9hJ4S2tYsyvCrbVvPuCdXvwCAsE37OfiUXGsueuEVT9fsiB/+Dms0SR+Lhi3otHc+bZ3UfT5072hd1bpT+aw76chlP5lk5Fv6KZPHhoAvuuCLrr7qXjqvl5OTfulRn1cvHzijJ7Pz1DseymZ/zkZ6piPa81py+kX/tVa2e4nO6tWHk02P5Oe7dJv60btUndpznuMRbeCE+vo82qnDlBHf9Ok8fTbLst156p7s8pznMcvrQ9oZDX35bKbpo+SrVx8yvl3Ga2eT/5Rbf5jn+OiHxoY+RSFfXxX/OOOTrvWFmecZpIdnNsAUXQe71Mevz2EUB6pj7O0bcP9h/N6sB4YHFhh6hN3Bg+0B/ynJoIW2X3MMyiaHQJ4md4ML2iaDJj8LRItBE1F11MfHQmoufNQH+EhzMJ73aBwGIDoFBmWTBYm8jnYHGSwtSFq8sclCaQJCBuVs9QtSiyW6AnbQWzy1VdJ1vxiyk14WZyYgNv7tb3877La4tPikA70NvNmfTvylPn3SwSC/aT2wHlgPrAfWA4/dA83N5ljzmvkyIMC859q812tBgRfsMv+ZM60HzJXoABzm2LmmQGd+NKfiJxjpu33mZfdogB/o8LBjRyrwtB7pRx3rBPOx3bxSYJE5XF3AjfJ2RFtnSECffjXvl3X81XMAOiYoxGapoNoraoCdQCN1zPcBZ/Lb3cOOvn1IBtn0Ip+frDHYIaizHlGXHLb4cQpv6xLBIr8FCqDjKzytdZz50hpEO5TYx27ldFHHPT5oJbys3ch9dX3FC2jTOi+fONPPOqu1Tes893irj44s12zq383IyU/p3bqPf/zwlj+ixad+WXCtrL5QezoXIM8gvDzy+FgK4HHPfj7Gz24t+qWba3yry5bk2gmlbMpVlt5z/Zu/a4P8cxDfpejdxmfaU7+W57mpfaNRJ7vjCRiY/NjJxvRWhy7Vk9+9elOn+MhL1lnX9JaPj8TH1vDqaV99avI666zsLDeZ1Yum/PrArDf5lu9ce01/yedfh+sJCNXeyXCf/7Sro1gkfyc7uim3a2XV178vd7GL8Y+/5HmmjGHue26d6YdOfzRO44PeuGscUOe3v/3tcU5GNPq7nUjaJd6ed7azTf369tFQm9YDNzywwNANpzylLANRg4zByEBi8jWxBOqgMWDIb+CaNqKzMJzgkO3IFkstEtWT8Glg7Ey+gclA1wJC/QYri5Ly1TdINRgCeOQZOCHbriHdzg6DpLOBTr66/QMIveVZGPY6nEHPQpNuaNvhY1s1fSwa1DEA48tnQKXvv//+0JGuDvLYJWVnk8dT6h+r63pgPbAeWA+sB6YHzH8OgYJ5XzBqvjQHBwoATsyD5kzAi2QObc2hnrmzfPNj87wyu3DQolGfDIGjebVdwfIELPIK0qw5zO3R+MEGSAUUiUYe/YAiXmtTpzL6kq8++/rIdfN/r40rC3wR9NFBwAscAewoAw7xCX7408taQrk1U99ZKpjH27oCfcADOSU6AZR6ra2AjW0AJbT0zFY8BPvuA40KIvk2vwCsrIvQOwKAAHfpxj51aqNoO7Ntgj90rk2to7IffTq4rowt1mvoSl2TrY77Amo0gTd0Yreyub6cAfa5HE/1rTXpzmb8tYH1ZHbjUVvg4XCfbn2Um9xSspKPvhSd+vLjeZajvDV39dVNrnOyXQe60V9KDr5S9ea1vPyVjHyITl9MD+VolU8dDubXFK/sONNM/6DvlcXqp0f36vON/GQnI5pZ91wWv87ncnWnzGyb+fqk3XSVTfrsyx/RuO+gPx/2cfh4K6/d6ytzF5A8/RPAA6iZ/Vtdz4LxA/ijDK1YCA9jgHiGXOObscLBlnZIok+GazzUe/78+RHfeCbVV+aa/409xhI8N60H/psHFhj6b955ImUGGQuRQCEDhEHPoGAw6ZehJp4GsiZHg40FBh5oLbIMIg1mzmibcJzlNaA5GzjnxBLSbeJWbpCSRx+gEDRbnoHQ5OnXxGhM6vIcFjzVoR8b+qcQOlp8sgewYyFgEdiuH+/R0kkdNvonEborN2DzGx85ujY5bFoPrAfWA+uB9cD/ugfMh+bogiJzafOqdYQ51c4XawJrA3nmUvNqgRUfFUAVoOOnjvnZ/C/oIasghkx8nQOXzPf4zzUDGoEVYARPMsm3VrAuAAqpZ47HW3JtjrfeIL/gXH3gC6BHcKS+9YK1jfUPfQXl5NFJuXz12MVGCRjEnr7fiCad6e+XfeXWFAFO9PYqO54OMp3xVEZH92Sz2bV8PgNCqQtQojv7OvwQhk7wyyf4pidgq/VPgSw78J4p29C2dsQfXT7nfwEnHwMEyPHqkv7Ra2zy2Nw6ET+8JddAI6Caa32ifw9rnUnvuTatT86+6ZpeZOAtsHYtOeMdD7q6p1flycjmypR3TYYjWjzKyx9n2mxu3auO/hSgWP14OTukqaPr2or+2iGZs87UO92Tob71rTW5nWj0mGXJ4IN4T/2Sgw85+fVQ9i6le/dnfdTxTDh7PiQyiiXYJSmXsjM6/NB37zpafkn3M43+F61+qU71snHynLZWD722J6f6k8fMc60/AmDrd2xsXLvc7RSqL3VWHr3n1pimLwO8Pe94GkOMb547z5n7vgGnLjnAYLGUcY4Py/fsG5+M5exyL24CemfL4dxN64EbHlhg6IZTnmKWQcJhAGiAMOi5NnCY8A3QBlyDh0WGcoOGQcpCyuLEJNLOHfkGWgtC9fCfIIqJvsHUgBa//IfWAOcwYVpcmCiBPRZA6hqoDG7yDfb07R89gEbxtF0ZH/b1a57J79V1mySgyWCHlwUdvvS1+KOXOmjZ2C+ET7GNV+f1wHpgPbAeWA/ctwfM847WCebufmkWlFgDmKP78aj1gLnWrpqAFUEM4KDgSsAjubd+IMP6QsBS0Km+ud1c3TpGsASAUQ/QEsiBBq21iWBIsGTdgq91DT3N8+ort56xpmBPPyzFY36QGn/rD+siP1K55wv2WTsI8PCxHvEDk3L8JNf0RNsPU+2WAvKo1/rFTgDBXrtZyHRYx/A3nnjRlx/7BiJ7A35aw9HFWog+ZNOX//xpRvJaU8VTHTZqJ3zwJKs61lL9KEdvNP2FvfWbMvT40I/eaNy3NiSLXG0xg2U6ks3G2r5Auddm1O3AM/BIHprq0o1sPPEge+qRXOW1DRrXBc/pgBd9yZMnxZM/s01d1/r6BDvQajvrWOWOqRv+6kqVu6ab+/N19uNbco22PhevbE63y7WfosM7vsrYTo+AE3zSZeoUX3XyT3qik+eI/2sFrxfPnj07gD/Pi/6Eh3tn/ukZ1C/Ul+dcXJKe5ATIquPQPp577aEsGz0XEhp92euSeHakH95T57OPop++d22sKM7Ba5YXb3kuyK/PyW/MqM850xVtupCpDrsAyOIVPtFG5Ip/jGXRzD6JxrhDlrHEjqLevhDv6QdS350CeHvmlW1aD7zNAwsMvc1DT6TcQGHwNOBbIEgGjSY69wbD8i04GgwNSOoZvCxkGmgMPK4buNvdY2BHa6DBo8Ey/uU1oFkMoDc40c09XdMJEGRQdwCM6MIeOho0DbIWh850NdmY5ByAIHRsJQ9Nu53ktaDCe9N6YD2wHlgPrAfWA7c90Jxt3jWPmoutARzWBtYE5n/3gBvzOLDAGgFt/9pUoF3wYy7GT0ILOMFDeesIa4Q+Wm3+FyT1cer+FdR8Ls9aQ55f6unlHh/BERp8WrtYn1groBEYsYF99K2MHsrl0dM6hY7ROEt9/wh/cls3Vd96BFgD/GJngat1ifzWWnjxUfXItC7Ck38L4oEh1k10lpz7LhHAho3qqYOf9R6Z7JOfP/q+0KzTjhaytLs24nf3+Mhz3ToqEKK2lZ8vAQHZYqe2PhE/AFIBaoAKvlJyC6w7K8dPchaca6tS/apgOX7aEA9H6196K/faFsAse9Cg58N8ng1ogTt8qS+wwbqTz9hPrroOfT5/JQufAABndN3rM9M++YEt+GaTPM+Ae9e1MV75Jt7VP5cpZyc72KScXcnHR91S9eMvX54061RP3cqd+UmqvPsj85r0A/7sH399l4iOngu0+g47e/7rD3SX0NPDc8bvU0/l7svr+txvKj/bmu+dxQueH2NACZ956Efo0Jcvz5HeXbPPbh+05EePznVt7lqd7iuffdqzLU5qR6ZnnM/SxfiovfkQjXLgMd/iu2k98DYPLDD0Ng89oXKDgUnHYG1wMYH1WlUThwHG5GzQaMKRpxzqbNBVboAycOJhcFHul7Q5QDX4NfCi94tTk3SDnwHJwuh3v/vdIddhgdK1OhJ6tCYOv7w5G1AdXhWjA/vwssDrl0PlFijsT6bzedJ4Qk25qq4H1gPrgfXAeuDBPNBcb541xwo+epXJ+qKg1VkZMMD8HHCgvjnZukK+unbLCFyan83RBUF4Wq+gA5AIzMz5gkFBT98R9Gu6NYo1goP8dgnhgb/A0foBoNA6BpCEth1OAQJ0QEMWPuoFLHE+G9jkVQxleKC15sDftTUPYEo52uwrEAOiAX/UmXXxB/xY07BJXfrwXXrhOf/eXntIgkM/qrXOqR5btJcz4Ey7sMG9HQvsSGd16E4uGvkOST5/qkNOZdpCvrqBUtZypcvdrhXyJLYI7K35SsrUZxu5rSXldaB13frSuSCcbq7ngZaO5bUWfC30etEHprNF35o81KkvtAMofyS79XJ05Cqjk7y5Bp3rZWXaIrr8E396ylOHXwJP5PVRaXLcd2RHPooHndD0DaN8kD/TMX1nPTyzSbl75/zrPFPr/fLT7T+IrjfKgZrO0QKD4p1ufoCurSctfujLO5cpj29l+SXeaMiTZln38vRvz82UI9/97CvaqVcZ5dfWnr35gWnPfTbFA63+Vd+vPc783XvWPF9o9YtPP/30GKfcz7LG43byAaIBocZL46ExZtN64Kd4YIGhn+KlJ0JjELFoaBB03SJNnsGowaRJzmLEQszRZN8ghdagYjKTDDR4GKTQxgN9g7tfBJKv/ueff35MhMAlZQZdi67J55tvvjn0ssgzqBpIG8xyvQFRWQMlPgZCupC3aT2wHlgPrAfWA+uB+/eA+d4B2DCXO6wZAELm/8rNz0AgoEuvOQE1BJcFxOb2GWhZC6hvjkfTfK6ONUy7OQJ7rFmATb6h4vVx9QIorAvwV6eg1zlQp53H7eAhr/WL197Isz4BYCmjF15+iLLemMASUAiNNVavv+Pf62PskujbN4v4Qh1+st6xe4NM4Itgz3oGIFBALrAjk4wZsNPZukx7uAYSVQdP/Ogc4EOecrZYa9Ep2+nI53xoZwE57T5wTY57NPhK8u2gCTSgg+RMlnz9IJ2UBShZ37ERUMS/6lSvNSX6+oGz/MoK7AvEncmKHhiiX8gPaIsHvdhewjM+c62MPtvPtqUrPdgk4Zle7vHUrvNzCMAdMsjEHz1e2oGPXZPZ7pjaDF32O0voqpveU37XyVDH9eSlPj3xzFZ69Tzmq+zHg535OZ7Kk5MMdSt3PtcpL7ruz8CRetWdPN6WV73O6ZicyUuZgx+0p+dg1q+9onH23Hnmahd5nivPnHEpv9bnkyHf2GMc0Pej67p7fF3347nydlM21jqTCcTSB/Qh4y6A2rXxRMxk7OvPd45G2bQeeIsHFhh6i4OeWrGBxIBiQGrQnQPaHBDZZnAxmMtHHzJtMSKvwcdAZdFgsmvAwrd/5cDLgGqSa0FgsHr58uVRx2IJb3r5hcyA5j5Ayr0FXgOp63YBoSOL3AbYp9Yuq+96YD2wHlgPrAeesgfMv+ZoZ+uD5mXrhD707Ns9AhTJr+LoBEzWBgWsrUkKxARL6gk8rSnU8QqEOb/1Bv7KrTPkWVf0ao/61hDWLUAj65U+yGwdQh/ndhcLnOY6qV/ggVl9AyjwxHrGmiVQCKBDrvsAiX7QIpd+ZHXYqcTOdv2Qrcy6Bg+BHfmBW/JakwFR6NRubn7mG/X5VB1rK7ZL5PcDmnr0pCPfO7LkX9+VAAAgAElEQVRDffbjZwcC0MJarB1G0TsDmgTBysmpHdVlF320BXvIx5vv2KmMHOfsy2f1H7sr8JpBtv5BDpqC5MrZqay+IV/99Ar8yD7n9AKM0S8go794V54N6Auy6SHRw04M/iPL2tTZmpmPyY4W7/l6WX0+/clRFz0/VT+dnM9HPiCv548f1T3TosE7ufSPxrUy7Qi0KkZgF5rW33jzlVQb8wkatpb4JR7OyUm+ezRS5fkpHtNvrqOfMuTN/PM92sorc06PWTb5ams68odnDSiXDnweL9fy9XFjjXGtPPl8BJDuY/O1l3a+XHfQoXHdt4Ncky3O8WyRX39tTNRP9LP+ddG445mngwMIrf8Y28RmQFvPsXZ1pqexBEjKtk3rgZ/qgQWGfqqnnhBdA9hPUdkAZSBqElHXYOQXrgCetoAbvJo40Bn8+jWrRVeDv0GubxIY/AxyDuUNqM6SwTfQCD/X6m9aD6wH1gPrgfXAeuDxeMC83Xwu4BBACk7M84IRawTBo3VDu4sKxs315nZrCUBPAbLdN66VAzacC67QtTOmH4msE8hC0xpGHj0EUugEUnYBoUODBx2ta5QBonqNCyCCpl1L+Aq6JGsW6yF88ZLobw0kWTsBdvqYNTlkSPwwwSi82F9dtqnLn+oUIKJRV/DHBsGeewcegj32piP66gBggGqt39jFHvUKRAFN6moX5eSXWgu2PlNHXbT0dq19BKJ2IgCWZpLH//qGbxuhBy7pC+mBHg/tjj++5yNavNiSfYEwgng7qsqfQXzrVOAge3yiQN8go7VldvB5r5hFG3CGFl0pOrIk37dsLUzfdFAHr86u++de1x38Uj/HL/qundlLj3aFqdu3dvJR/NSnT/eHktc074EXUs+YMkBHec7s0n696mV3F59kYz7ULj3b0y48pk7u609n3Q7BI9GLDflylvXcyKsNbp1nXddkTr701+6S54hPPGvVm/Lrl+roq4FC7mt7vGpH+Z4pYwbQU76jusr0ac+o59q1Q7kxxGEMsQvRc2rcAQi124+svrGEB/+rb5zBQ983TnmmgdLGinw0fbnX64E3eWCBoTd55iPKN9CYUByuG0RbqEG5DVINpAY+NAY7iwuTg8MEZgBS7pi/jDSIcmsDbbQNnB+Ry9fU9cB6YD2wHlgPPGkPFCwJSKwTBN+ClHYyCFxcW0vYMdNuHtfWEM+fPz/OUjtMCuDUEbRZHygD5pDjVQlriNYZ1iV2GQFZ5PUql1coAggmjeBest4RdAm4ArrwJ19gBbQgR+BFtrx+vVe/YEvwJZgLNBLU0QNw4WPV8u3e4R9ARcEn+Wxho3K+C8ThE990dAZasSNgJv368Dbf4GX9BQBRhz3OdBHcqwtE4QeH9RpZreUExdpK4oM+Vt3ubnXwp4c6dMATOER/SbtLbGlXFBnqtcZzDkggg414yb9cd1YAI2a74gcQ4ad2+6BnLz7RkkHHyvg5IIofyGI/OekfCEOu/O75K8BJfu1Vex9G3qXswjPQLvCidXF8VaHLPNTjt0mjvntra2cygEryq6tvaRv1Z93aZeala3nZGj/lXae7vj/tzm/9cKusXYHpS7dpo+tkzrJo0muWTR8nH/+py612SG88K8e3euUHoPCfMnZJ6idnnmf7AnD1lfocuvziGTeuKPOMAG7cz/qzXxsL+sYQ+nb9GRcBPO49342f9Na/PdfO+nbPsvoOZb/5zW+OVzQ9My9evDheI2tcyt97Xg+8zQMLDL3NQx9BuQHOoGIQawFgUO2XGQNai4g5SJuYTX4Gf8l5DpZdfwQuXBPXA+uB9cB6YD3w0XpA8DLXBxwheBTE9E0N6wgBfoAB4KTgsXWEdQhwAlghDw95AJgCPWsSAERAijPwRhJYCWBbt1jTAI2sVyQ8rG2ADf145QxEwSMgQ7DlWkBIHwcb7S4ooPRrfDuanK2HlJHl9Q66FOwBxfiidVPfJepVsYAAMgV3dv70naDssTsHaEE39HONxi687DDqFbN22vAFGwWbygWtATR4sEMZX8vnT23JXrsatJPdDzPQpyM92Nt3ceyA6ZUj9fFybl1JZ/eO2qL72ksg27d57D4SJJNRn6kd0eOrPiCIbg6ASe0wQRj11UWTLOV00i5S9MrROqPPz7VtoAPAqV0/2hq9Mn50Vle7ONNz+k85erwn4MJ/npH6GLrqVt9Zufr4B4pVd8pRnr5TJ3JnWc/WmV4dwBSe8VWX3c583bPPl454qDv9Smd15KfLpEHLVvXn9dQt3spnvvv8eVS+pvoZuUBHz67nUT7aSRMvtMqdqwfEAQqrU75r/d8z1ycx5AF9LncAUm2kb+ijyrWVvujQ7p7HXidTZhziQ+X86vBsGi/0K7IARmgbF/QX7YAXGc7GK22UD/LJntcDb/PAAkNv89BHUm7wMMgZhJpEGijf5IIGtzeVb/56YD2wHlgPrAfWAx+HBwQlAjAghCBVwC3IEdAIXlzL7zVzgZOARhBuveEaIGQdglbAZG2CvoCRDAfARrBmtxB6MgApAiJrE7zxEogWaBdsCaysd7oH6hQM2v2jrjJBHlvwpEPBcWf1BGfOAraCSq1NhjKBng/H8oV7NuLJT/i6t+Zir8QP+AGW6I+vMj4qcBTc2pVAx17XYmegEZ7uyWaX62QJUPEpaCW7XQXs0j7K1KGn1M4gtHwv9Y0TdfifLuTRXx656rt2ZkNBNd7521ldfAXg/OQaTTq21mSXsvQDIGlbPErVSU/yJTpK/NAHvgM30AZQoE+Gc/rXRs4d5M7X0PD3GlZ21h/wUAd91+4vVwBBip9XxLRz9eKjTn3AtXz2qD/r8hlf8VN+kOdeu0g9Z67xopO8zvlh6iUvGvwk/PkSaCSd/3UOPwfZAYjqai/n9MY3mXgqc8h3XzkeUs9J/YYNPcuNL+oElvFTr4rlV+2KpmP6wDU6z4RnpV2Os08o19cBNvIddPCcB2TWRn2Qmh7o4huwow3lKWs8UuYAOrHJ2GZcMt4Z94yLfAQoBgIBtH1XCA15duK5328LHV1m08/0wAJDP9Nh/+vkBsVN64H1wHpgPbAeWA+sB97FA4IgR79+AzcADgIXwViBoYBGPpChDyDLA/ZYiwBXChoL2Aqe1At0ERzaXSJQCzwQWAmiAA0AHjKAKfM1ITR0EqwCP/D2Go1XMARVgj1gjoBMwC6Ioz95gBtlgmOAhrzsUp9uAjtn+fxRsOueLgJJcujRTiT22qGjHlukAmv11ZOAQ/QGuhS406VX9pTRid6CdkEuf8prpwGAxG4h/AWg9CrQxZPP7KwS5DrsDFIfoIIeLbuUoQ9I4qf8gbb6BeaBBgXPySz4DjTo3hnvdi3hbWeQszLngnxtyObARvfKBNP6QqCJOlOvCVTIL3D3gWq0+mfyamd8q6fM9fnbR/Jqe7LphTYfdC/v/IFrvPTrdnLRK5nKWq+rq//wiWSHCsCG79lMB22srucmXd0HisS3++w62+cebd8l4n/3/Mwv6msreQ56en685kiXaPI9+9G7D0zGM33ltyOtNkBPHll944os/dyzrl8nX3380HbOj9mfzc4BPsas6GddefqGXXl0wMvz1E6k7ELT7qGAHiCOZ2OOjwHb0eBFnucXyOSZ9awpByDJ5yfPpbGLnxoD0Opj+oG237QeeBcPLDD0Ll7bOuuB9cB6YD2wHlgPrAfWA2/0gABH4OIsYBKQCmoERoJdQYyjVy0EiYItdACQAnD1BUEBMQAVAapAWsCEt7p9owZ/u2782l8AGfgBPAECqSdvBqsMARzRR7AFGMFXMCaIpA/dvNaGDxoyBJGBEQIyAJU6gkx6sFX99BTUsQEPQTP7+AWP+YqZQE8AKhhNN/fsnDsx2Ohe4Jge5EnO7QiiF3vRsIX9dORXdrGn7wsJstEGQCnnM7ryD8Ak4EqZoL0dP4Ez5LMZ2MZmdhRIk8V+bQXwoYt6aLStpH35w64cupBdwB8v/AIl6IFfQEgfT9Ym8uKNDh/39Tf3/OLcq1OBGNlR0I8mwKR6dMNXnej4HX9J+8Yn0EPA3w6a7ApkDIDp2akPJMc9eejkOdtBJZWHprZCazcTG+OVD9XRlumAH7psVLc8tk3bk8VOoMdMypKlnmt9jf2uA93ip530x8Ae+VL26UcSPTuns3vl7snqTE4+dO562ipP39bX546fSZv+wB5+jq9+HCjUWKMPGr+0pTw0nmX9ihw+AAhpe75A77nW140lwHEgkjZQ13WgmedIe6hnd5A+5B6YzF+eW8A0GZvWA+/igQWG3sVrW2c9sB5YD6wH1gPrgfXAeuCtHpi/kAt8BC3OgkABmsBGEBTAItgRlMkXgAFRfvjhh2MHDiBG8CYIEiSpX8CHlix8BHBAGUkQ1e4kAV7Aj+CMXHXkkycIE7yRhQd5BfvO7SYSsAGrlANq6CzRlV5s6xURAIUAkc7qq6ec/AAyPsFfHQdaZQ75gma7XuyCopugskAUDb0BP3TmD4GpANuOE0AYnehYIJ+9wBl829UVaFGgGsjBR3ytPjl81D398r1rfizwRgd0CBSwcwlNH0+WTwZbklV9bSMfjYNsZZJ8OuElaQ8yA7/qQ+pUvzbAw0E35Xg4k8937gMt02XykEe2REdBe3rIu9ztmHJdfe2gjYEhkntldJqJDdWjS/5xlsojTx+gt9S5unj3T2g9I4Eb2thhlxFAL1/JQ+O+nUXsxPuWLPnpC5SpjQ6F7nSqjwTqzLLoa1cAoZRM+R3y2TRlJlsZv9Tnpt/qU+rVvspdS/zvmeGLnqf8NfkEXs+dRJ5Tz85sI23crj124AkUwl9/UwfgXP917/kFfBo7jAmecf1Y39IWZJZPnrEgoMi4pi8Bob766qujLnCb7pvWA+/igQWG3sVrW2c9sB5YD6wH1gPrgfXAeuAne6AgTDAk8BZsAm8ERQVc8gRagiSBIBBEEDxfIxLkATok5QW8+KMVaEl4KSMLEONasCZA62OyAin16NA3iwTIgkVBl+BMItNrV0AgwV+vgdFV3uUKBpDt+x7sQwM0YoOEj4BNPboAZJQV8CcDoCNwVJf+AA9JsGlXgABVEFjgTqbgHk95/OEsoFTX7gZgVHyAROQ6BJjuBa745CO28gcb+lB1QTlZUnapx58CVnLkC0odrtlJH3poF/kzSK/t+EE7OCR8tVWAGf+pJ1/gHijkLOhWZrdEQFt0+bi+gY4segi+vRKIlp7yyGE7OnUDCQKR+BHQJ7WrSn0pYEI9/s8XygJG2IuODo7qyp/2qevwGlUgGh0AS9qAjtmUjurTP9/H71DumvJT92TiqT8FsvTPdvzOTt894tP6KZ6BDukuT/3u499ZGRopOZ3ld7A3Omf30qTpevLO/vqdM9scrvO3OuwI/HTWLkAXvpz148EPQFLjExo8nT2HxiT86sOeH32qvqxPeR7V1U/Jw0991551svvIvDxjiefeMwPs9Xz2MWn52tfYAFyWrz/S1bhD7r/+9a/XoGU+2vN64Od4YIGhn+OtpV0PrAfWA+uB9cB6YD2wHnhnDxQcC/wEWAWyBbvu5+tSgqF5FECiA7YItpQDXS5XgEawq6xgvyBPcCagEugJMAVSAniBrzronQVoc4cRfckEoAj01KHfBI7oIZgDCgkQAV50EowLmIEwgkbBYLuJ7J4pkBe8An7YwCfkoaOjMkGhfABR+fSSgFj0Us4uNhQs94oZ2QLUACLlbOCbXhejZ8E9MIKf0Lfjhywy1CUn/7rGWzvwP5vUAZwIvAW+BeoBHuQKkAFG2qVDOR+SgQdb2I4229QVMPO3A29lBegBJsmiE3vkt4vGLgs6kx/Ihl4+OrLrI8kLdEpeAIB69HTvGp26rp0d50QGP6FxXR9jqyPdnfvXNTzQzbr6Kt9Jnh8pIETdadstXfgDT3Wk/JM/A6b0C3LZKeV7eWe7D4JrwrMjn9Gp/sOv7qV8dVf1df70o3rR51N5eOcD5d1XxkcS3dk3daI/PaKtLdik/9ud6NqBr8MzoN/UFs54e47Q0aXdRPi57wxARduz7Ox5poN213cAPvyNzrMjX38EYvZdIYC2PHoblzyLnjevueK1aT3wrh5YYOhdPbf11gPrgfXAemA9sB5YD6wHfrYHBC+ABMGPwE2AJCgSBPX6BKYAFgGQckng18egBVQCNzTPnj07AnCBkXPBXwAE4ERdgZtAilx8/WIPWCmgoxP+gj3AiYCOPvIcczeQfHIkvARrgBQ7foBGbKEjfeQJ7AKO/OovkCeHXoAlQZ8Aj054KwtkwZtu8vlLgCt4tHtEAKtc/XZe0UldgaV6dMGPnb4jpF67Eegu8RUaZ7ZL7EkWcEtATG/8yC/Qpz9+dCtgDTToTHYBvTw7LvhH4Iwvn7EvvsAcO5baMQGkUN8ulnb1BBwEXJBdW2ob18oCkvK3c/5QHh8gEluUsZt9Am720llC60DDP+woj4/Yo54jnypnW/UBhkCouRNOXfqTk1/Vp5/8ZLBJmwIZyuOrAAl+mruZ0oW+6LOXLnwk73ytjnzlnhW7irrHm16eG30pWemNl2v0dFIfv/qMPqI+m+orbEJTX+y50ofRu+c/5bUd/vzfswysIlceeqBkfJxn/+5aPn54BLhqd+V4OdMtmYDdy7WPlIfGsycPjfvaxnWH58kYQA56IBI6Yx69jQeeVfbRAy/5fKb/G+N6VvKDup4FdY0t+hP+m9YDv8QDCwz9Eu9t3fXAemA9sB5YD6wH1gPrgXfygABIwCP4E2xJgiEBIaCgAFLQGLgDCBFAoRHkOgAqAkV1ZhAYMFDQCnTxGlrgD9nKBLDtJsKbXIcgDI1D0CXoDPRRTha5fZtI8NdOHPkFkII4gaCDDuwpQAfSyCOjj1UHkNDFK2zss5uoV8zwwcNOIDuNAE7sEHAGJqmHjr/SWVlAjI/dKmcH3vLpLL/dEuSXT4cZaLNNXXrgwx7XJaCHV1zUdxT0a2t6A+gKnPGqjcigpzw+t2MmYAZ/+ZNenjoCegAJnQTwXrsSOAMP8zcd0VY/ndXRF9EBXALS0NIrv7ENDT8BrVwHuLhGi6c8NjsXyKszD/XrQ/jSSZ3O1W0nkLpkJFddibzLFZgoyZ/+cU8viV3aIR/iB4CQ8gFbHfkVwIJeogNZ9YNk0RWNtmVD7ZGOwDZ9lO76IhBM0qfoRqd2DAZu4Od1NvSekfjLw9/heXHWztotefLoJLmWPw9l6Q6o0Xfwmva4ppszYFp/pos8uvSBaXn5G1g2d8EZL4w17POM6PNASmNE/zrmHihEf373bTB0xgRn+RK7AUD0dKYT//Er/xoHapejwqb1wDt4YIGhd3DaVlkPrAfWA+uB9cB6YD2wHrgfDwjU+qVcYCnYEbABQgSbfa/HjhhBUQGuoLugv0BWIIefIyBF4KZcIBWQVDALcBLABRgJTPEQzAn+AjYK8gTH/XovqBbkCd6UC9bkuXYI9ASA7GGHoI7O+ONDF4ElWjzRC+4KrtG0G4juEtqCXbzk9y0TwafE1r5XRCdyCnLJJodOACH3fdAbsJIeZEt8Rx7gTADLjv69DF9BqTy6SOqTr43yXYCH3SzKgE/8TyeHcufAj3yJX3wDKwATgIVAFjqgxxegxf/y6J0MdeXFow8vK9d+QCT/2qV/aXf52U+v6snDl8/Im6CNMiBKejnzmx01rmvTePOPhA9+7KruUXBN7KQ3mvprvqWna4mc9HVPZ3lAB0kZcIUc/btdRQFg9al2qaDPxupnz9TRNX+QQ17AhHP9Rn3gnsQGfPvQ9JF5TXwEUCllS30ienwDjqY8O5r4V6ouOdE4u6eT+vym3QOq0guNa/05AIjuxhF1HZ4bz/v87hCe+KWHusBiNrnmX88OP3ke5+5Dz7d+pJ/3uhh6dmgPutoJCSjyfLNF2wcW//jjj4f/8strJ+7FeuAdPLDA0Ds4bausB9YD64H1wHpgPbAeWA/cnwcEZQIfh4BIsNQrZgIzec4BK8p7/ULwLOHhaNeRQE5gJ2iU32tQBdnyBWbADKBCr6IBawSABZIAnT5WbFeO4Iwe6gnS0KIB0KgjGAS62EGARtDmXjBJDyCARF42C/QEg87KXQNd8JUnMOQD/PEQMMafDoErdj+ox3cBRwLrgl++AoIFJgE9BKLKBbfsAxQFJuHh6BtLfIteoiP7BMrq0AM/9HYw0FWwrCzd8S0Ad6Yb//EDnemBj3zBNbvIUa5sAkFk97fnQC07s+LPP/zIFwE7Bff4aU/0gS71CW03wRj+bMcK36gLREqWvhBAwyd0rz+iJVNiI0CtV+HQKO9AQ8+ZRxc+0i69tkUunujIJT+5x8U1AaikgBV0QJBo2apPypPw8uHp2lxe/YxObMqW+n7y8ZigTP1MebyPi2tK/vlaH1Gv8unT/MEP8VTeIa/noev49CzK11cAPr0qZizJv+jJn31ZXvWVqav9e12zvuTc7iF+wMO4pA342GHnnP6sHQN4gEDGI4d+LqnLlvprO4Q8/+T0fLmOL1metU3rgfvwwAJD9+HF5bEeWA+sB9YD64H1wHpgPXAvHhD0CLIEZIIkQZmgSoAEdBCcCXY7BFcCOcCC4E1wJQAUQKERBCrHI1CiIE6+AFFQ5pd8sgV6ZHs1TDAm+ALs9K9D8oAyeKAH3gA+8MaPzoAUevcR53Y94dt3a9iDBuiBB779SxSgA0ClDKgA4FKXPQJ4/AE0dGQ/G5QJMtWhq3rkBmLQyytmbCWLvvlZ8AwsAlYBeshwpKs6aMjkV9fagf7AouTTgZ4FrwFW6a2D1L6VkceGaPoItPJ29JCnHNAjyA78I5ddzujRJZ/+7Sbia6/j4MEHzuizsw8tF5TTkw0S/cjLbrLYEE3XaBx8K9HlcgcCkqdvJEcd/PCS9B/HBIfYqZ5DW6GvzrSXTOW1Jzn8GS/56XNcXJPng5/Kp9er6w4lfOlMJkAsIJC+8mon7d7zWV59gx382rOmHl0CYHpGnCVnvso+9J7z8pTjV8K3tqVD4OX0J13Y0TPpWXLNPnXwdE0GWwJK8UhvdPqIZ2zuEFIPnXrKPF/8495zCSwjX5+z067veRmbPJP8hZ+6gGnlfJ8dxiHPsWeYfmxV1jjHD0Ajz4JnhdxN64H78MACQ/fhxeWxHlgPrAfWA+uB9cB6YD1wbx4QuAmiBD0CIrtOBKkCNUnAJOgShDvQ+Ag1wMa9XTECKkGmVOAsqBMkomnHhCBOPhACCCDoI6cdOUAZ4BAeAjp0BeZ0EAhLBbqCNkEfmgAn8gSa9GYHEEswCcTBQ576BeQAHWXqs32CO30fiIx2Uwm8BY8CRUEu/gW07Cv4BnoVBKMnj5/sZKGfMjIL7PGwI6EAmJz86i/q2cBOB17k8B0e5LoOsOFX9rNrAhzq4slH6nvVC2+v4vCL8gLk2sa99iCT3oESBdPKAUB9MBt/dTvTSd1kksuW+h0QyS6adhWlA77AO6CJMx7y+B1vBx6O2js/1xfpSi/3DjuQSgEZ6uIVyEIG8GZ+tJoM9+r0XSU6exbom3+cpWytr+KZ/vIAFGSS7QBMoSEnPeXzsURvstoxo19qL8lzSwf6y+N7dqsfkFW/IQM9+wKjAlrIxYPu6vccX66gT98fIs+zqn6+R6eN3bsmlw7uyY2XcyBydGhde+bPZfU/ZXQgQ54+DTzmD7bo/55r/HvmjGfGD4CRfM+oe8+UZ7xvoNExP3r28Ayw5At/S08/YxwAedN64L48YJ/fv+Ha++K4fNYD64H1wHpgPbAeWA+sB9YD9+gBwbXgSSDlF3XBXWCRoLGydl6gF+SWBHsCNrQCMMGoXToFkgXy6AR7Aj2yBGB9qyVAqOBQ4CcgBSLQB48CWDzIIwMfdQrQBYrKAmHoLqkjASMCVuiqnD10dgA8AEICzF6vI1sSQNKJ/urygSC7V8zsUEgnuqY3vnY7kC0JstUF0AhAyRGglpQJjvmEH+YrToLadlkpR+uVL7rZGUEHgTEQjp30LEBG47AbAk8y6QkgYKMdTGjl01l7ObejQl33ZAqa0cVDHh50mLy1jTrk0B1doA1aefqVNkNLBp4TtAGS0EFeYEH85MVv+i9Qprrqays2qZPu1aWja/q5Zouzo6Sc3erjo32l9A2MQccuZ3QOfOmcX9WjU4m8ZE+5+nX97zXx9QJv7eW1KjTquJcPVJMmn+yR3zXfqKs/lrI3H0TfGb2jvt2zqO20IeAxncnpWjk/6LPt3KtMvnLPLsDMNf54t0uoe88lkEhbeg7sGvQ8agP9Hr328Vx7rvC0Q6i+h67nsGfdM2I3H7l8CAD84osvjrqb1gP35YHdMXRfnlw+64H1wHpgPbAeWA+sB9YD78UDAjC/sAvsHAIrQazgS6Dp1/h21bgXvEvy1BN0eZVjfhNFviQ4LPAT3OEp6LQjQRk6wV7BJkCHDn1gVvCHTqAHlIlHr3QVNAuq7SZSly4BO3QV8ClnV6+YAU2UBTQIsHvFTBmb2yFT4E0+/u1eoAtbgDv49vqKa75Qpi5bADyBKHSXpu1kBR4AjMix8wVQx7901SZAHfbQu9fE+Jcf5BdUdy6gVq4OnegXaMDW/EOm/HazaGu7TKSAH3n4sMG1/AARddsRpZ17vaxvFNV34iWQJ/NXv/rVIUP97ENTn2An2wPw0NrNg1/16CCh4S+85KkrL1BGPpuUpQf78ap90MT3uLhL6Oncx5zRARLY3DeS7A4jH0+28UFgkvv0JEviy4CjwIvkK9duDnkzn28CUaJzXz+I3rlnJLo7c45XKGu/aJJRX+G7nl/12MVe8vXxgC/1ay9nhzw2u3YmD/DIB9GS51nj08rI9tyzh5/UdXiOgEoAG8+gsSIwFuCkPn0BQq7xNSZoe8+P8Sp/+DaRe0AuvvI9j9rX87egUL1kz/flgQWG7suTy2c9sB5YD6wH1gPrgfXAeuC9eSBQQRdy79YAACAASURBVMAvQBUgCZz6oKtATnDVv1O1YwPgIeATgPnVXYBdwF6AiHevpgFX8JQHSBE84ivAFPwJCAV17VLCS5DWjiKAA92UF9hzSq9kCSoBH4AnQBA5ZLjGgwz16SDoVEaG3QPqqNu3SgSZbGDr/O4QOmUBHXZeCFQFqXxWwEwvfgksYSN+ATECUf5kL73wc6YDOzqyU/AqqObvQCp1aht28Y8gW13X7ZIhUyrQBlhkAx7Akr4vpI6kDl+QiyYQyTVZ9OpbPWheXXcACdwDWOga+OaMBm+64WG3WCAKefIqw9+1xKdslPBwKJfHV2yRxwbtmR/ktTukunTmY3Tkk+kA3iiLju71TbpL8vo3rOi0hfrKyAMwVuaszekTmCQPmCQfsKFOH9xOl0AW98lGL6Wj6+mTyvGT3OOjjZ0d2hM/z7hr/brdT3ytLt9crq9xAUu0I93lkdXuJGAlfuoEDCnP9/Vx/cVupPo9emXk4ynfbjv9X576+jUwj15o1DEm9Cqe/sIveHueATueXXXaLURv1/g2TqkjBRrSS98xbqDRL72u55/I9hWyw1Wb7tkDCwzds0OX3XpgPbAeWA+sB9YD64H1wPv1gIDMARRwzB0yArf56pOgSmDmLBWYuhYECpjbseEeIAMMEQQKzASoDgEgmQJ5uy8EjeoChZShEbzRRV3Ba8AB2QFYE/Qhjz7KgTbZBaRigxRII4ikjwC1snYg2EGAr0NAiieb1KUnevzRk8kGgah66Oksj97KBacCV37gT3nOds4ITtkFSOB71wEP5AuG3bsWPEt0kEd3QE3gSzs02B8wI+iVjy99AA0O9+zCKzAJf9cArF5PIicAiE0F/erWRgFZzsoDIrzyhi4e6eTMB2i1fcE/WoG68gJ7fYIcdAAAZXxRHp2TAXCpb8ljIxlAhnSvTfIze/UT+fjnA3IlutEVXwe+aOiBt8N1trnXJgFHeLSbTJmkLN2m75Mtjz5k45sP+Fc/lKLBM1vqHwF6PTf6KXCKHfqT178ktqDBn0/1E/eSczKyXx469Pk1sFWZI7uUq6/dek6Sj4dr7cIe/QVPunlG1PG8OKPFR1ntD2AL2A0M8kzOf82rX7ARGIRXu4nwAz6RaydRux0PwzetB+7JA/uNoXty5LJZD6wH1gPrgfXAemA9sB748B4QiApIBdJ26bQDQ1AsCAtEEXABIAqU23UjAOsVM4GickFgQECBu1/+gUD9Oxk5AUd4FKwK3vAIXKKDQF65s3L6truBx7zqJb/vkdCZrmQLWNEHCtGj7w5lD7AGcAPgcRaQX67AVIE9UGi+8hUwop6AUwALFJHPnwJWgS5ZdCHfoRxwgx+bBajs4zegkWDebqX5LSOBNJ52y+DF73wxASn+Yitwxa4uIFo2ymOHM3vpKrnHUzvQrd1FdJSPTlkBNx293iWPzujoUX+QH3gYD3VdAw/o5x7ffBFQAYhiY7txAD5o6Fhd9wE5aCX1+ROdHSLOfJhNgTf5JiAl+9kzQZP00M/sUlJOLj4Ai/pS4EhAivsAHDrN41B0pMromWzFfE7/ADrlZLO/b/ZMvnwk9f2ggBrnN6XKnD2fUtf19YAgPgNeyr9FLw+NPuDasz2/P8Q3gbx4oPWM4s8mz6o8vgYCAX/UUa6f4+f50c/7Tpf2k+e54W99XR6/8VWgkTpAW+2m/+H//ffff/Ly5ctPvv766ze5Z/PXA7/IAwsM/SL3beX1wHpgPbAeWA+sB9YD64HH4gFBVmCAXTeCR0GXwNdrYQJ/R4G91zsKvgRvaAWJAsHAHoEjGuV4Ct7wKlgUHAoIBY2BJL0CJABEFyhUUFgAym92BwgEAU6CSgAOOwKnCgwDlZQX2ONNt0Ah4BH75rdVADz06NW19MZX8IofvQNQ6CQoBc4AcfoXN37hH35gP53RBKgoE/i3o4Gt+AqA2UIeEIqvBNZ0FUADlNR1ADbUQYcv39aGgKKAlegDYJIjHz1AhN74uw8AwlMw/umnn76WOYEYurIhIKb+Ik+in74TQBOo4F7+GVCZ9+Skf6BEfTPf81E24cdXdKFX4E086XOWh5+Ef2XzWhkAx+tRJcAU/ulAnhTQFkCKN/2keMvDX0KXn6JRVvlBdE3R6Ifxk6++hJ7dUn5G69AeytHWRnNHUP0+GnSOWUc9ctvtFkArnwz0ngs+Si59vLqGFp1297w7lBkrPJ/9zbznzXOlzLOk7fRFz4RnVV8GvtJDWeCdfqwuu3vtlX14AIb++te/HiDRpvXA+/DAvkr2Pry6PNcD64H1wHpgPbAeWA+sBz64BwRiDgGkczuIBHMAEsGuMgCBgMyOmYJ/ygoEBZHoBdDK7AQQAAvq7CCQBGdoBYiBPvFVVzAHOAmEoQtQScBphwRgRKJP4AqgRLDaTiN6AH3UEXTiiVaAqQxvegFRADXqs4n8gnE240n3gBZldFWPTgJV+tlhpAxQwM4CX/n5rde8BK/0EdAWPAv4AzMCsZz/7//+77Cjf1XKb14DAt7wReADvvgAyQTHAmf3/oUpQKFg37nXbZSpy7dAoT4UTJ9AJPnaqtem5KvH9ngH5NGXvyQ+Ue5gM7no+m5QO36Sky148JukrH7RB8Pl6X8Bk+kCVJSnjeShazcOXRzskI8/H8kjV+K36PhaOX54ocGfLhI67eysvwI+2mWkLnud1edLiWwpMAMvB/7ZG3/1JPzjw9/anH36K53koakfeoVKGTp9CA15+dAZv8u1b3rGsx99/TFd5dEtEAcoi77ngcx2sbGNDwA08t17hsihjzx+siPOLii+lgcsNU54brwypm+r17e+yPLcetbSVbtMQBCNcQVQzG/al93q+GC4v6lfUOjoTpvekwd2x9B7cuyyXQ+sB9YD64H1wHpgPbAeeDgPCLwEqu2GcS0QE5AWTKIRSAYCFKjRGoAgmAs0yJICz0AhwIddMu0SEsgVuAIBBKYBRXjJE/DRQb5ywWXAjp0KBfaCy0AhII46dFYfjWBZoHreraSs7wepl310F3AKvIFMfQdFvkDXLgZ5vdKGhv/IlJz71km7p/gMT36tft9IUhY4ABiiJ785A7MCkrzGBvxgH/78rr0CDPCmM37zlSv12y1EN/4VROODh2Cebsoc9JOHt3x+kvDHS310gSPylNGDz5WVpx4er+52EbEzECN+9Sd0JTzco1eXPuQmgz2BLugcbMKL7/oOkrrsIFN/c60eennaVB38pQmwoFU+QZ6AI3YGSslDB/xy1tflkZ292qPXxtCwA01+1sfpx9dAF4l/A1ravVRb5KeeAfe9dhYtfuzsOSBXks9Oidzz386jd3jWPHfo2xlEHj6ep/i0A8k9u+wI6rVT9QGE+nrgEKASb4AQ+UAedsrX//kKf+CRflBbO6vHb9oGbXp6jl+8ePHJt99+e9i1aT3wvjywwND78uzyXQ+sB9YD64H1wHpgPbAeeBQeEOgCRwRgds8I5FwL2vzSr0xA2V+HC84EvoI3qaBZ4FiQKDBEF41g0I4BAd2kA4IIIAFC+AtoBYEBS3RBA8yYr5gBQuQLKgWT85Uu/Mm20wGN11boiD/5dqKQoR47va5FT37ouzy90oKX4Bho0u4jvlCXThIwgj8AYHYw4R3ghCce+Ns5wVZ+ILeAHUABcGIrv/FFr8wFlAALyCMDz755xC66aSPtRY8AAUFzuzLoXGDNDvmAEvzpwe/8EiioHSV64h3AhQ4fYIhzYESvnAV2KQMmAC3m63B4BkCxAw1Z6RYoxQaykqvczig2BgzpI+oHNKHHS115jhI9AA/o8QpM4m+7m+SziTy+YleADh+1Gyr5fDZBHrLwIlsb4HMLzFFONvpAzmxNN/qV8CSLzYEz6Huu0LHZPb3Ruw7coqNE//lve3hIzg4y9Ns+WC1vytNvlU36QCLPmF1o9V+y9F/6eB6AnPL0XTzkASY9w/ohfwFZAb36G+CnMUY+Ovz0T7bxDz3Q/fGPfzw+cI7/pvXA+/TAAkPv07vLez2wHlgPrAfWA+uB9cB64NF4QPAZECAQE7AVnLsOLBL0CmwDIRggCBScOQNr7EYQMAoue/2rAFQgK4BFi4dygSkAQh3Xgn40gCoBq6A+sEJASFY7dwStdmsUtAocgUICV4ElXcm6XF95EfyjV3/u3GFnqY9R00eAGlhhRwSeeNELiDKBC3rhHVCFZ6BRH7ImUzAciEO3dhKxWXDNf9HgETiER20gHz1b7bxA77pdLnQXiD979uxoQzyAIwJydtCdn4Eiyu3SYEu7atBrOzLVUYbeQU47XvjM/dxFpJ3ogW9ASkCGM1o+1b7aDVASHfu1I7+wIX1qn14bs7sGrwApelS3engEttA7UCGdyQmEmWd1yKMX/0QfoEN2NPwmAV3kuacz+9wHsOBf366c/bUh/6KRJ9EXfT5kS0AM2vwXOKZvk6eNlemj6SnfkX96btWlH5nAyKmvNnRPB4Bqbe8cSFQePr1uBlC1O7BXNMnsVbLAVLrps/ygH9anlXsWZtsrJ49v5AM76Qtc+uqrrz755ptvDsB203rgfXtggaH37eHlvx5YD6wH1gPrgfXAemA98Gg8IGgUgAUQCTIF1AEsBdOCQQGjMxoJDbBGENchv+BYsOkQLAIbBL3o8Rf8KRNs4mmnkgASKCQBS/AUKMrHA7ijvFd4Ai0EjYJMO3ME0fja0QAUErjirVzwqlxQXQr8AYwBGOhOP0EsWWSqC+CRR6YAGh+7fvAkN9CIPyUAC9l2TwS05EuBc7stCqrbMYQmHeycaEeP3VP8hy9Qha70lEcftqKVrz2V0QVwoW3pLl9iw8wPbFCHP9DK81HqQBwy2SwFPLy6vkLVh6u11fygNF6Tb2Ab2ZLy+LlXl/7k4KWupN4EXZTRT33JPdvRsE9f4T/XgS98TW59W71Ax2lTfg+UYif+7mf/dp1sPsBXHtnqtFMJDeBGOwCSJLug8m+vkqlTOypvRxN6epIxX6lMvnJ29byRLwFSADz1Y+WBVPq+OtnPz+rJ87oo2tpNv+9Vz55lz1qvhJLj+dCf1VUGQMVP3zVeeAaAmO71Q75Ao595LgA/tZ18YGu7AYGCfKP9AEIO/X/TeuBDeGCBoQ/h5ZWxHlgPrAfWA+uB9cB6YD3wqDwg2BRMC9aAIAJmQZx7YANAp10eBesCT4edKlIBq+BSoAkwETz6hR/QI1/QJ5j0iolgUxAa6COIFEw68AW6KEMTQEMXQWgAg2BUWTt6yFBHoN3fyAuK8aK/fLub6CqgDTACcAjYO/o3MbwBS0AL/glIC3QC/AC5ApUE9XZB9Bf1wCN1gFnqA2TYSUc+wF99/kZDPwe6MwAUoFObFOCj7/UbdQJS5Pd6GbsCSOiB3pnM2pFN7eaQp334024P9svTnvLyU6AIWe0M4ttAD8H9BGjU5St56uAn+OdvspV7FQ0PgAi6+oDrdgfRBQ16MiSAjASM4Qf6A0IktrGXPP3JNRu0TcCIe/bgiQ7/ngO86EmuusrkObsvaRM0JXUkdPVZ/N3Tw3X80AXY0Ck96aAunfiMjlIgmPxAHfnJBOpph+yRj387+/rmULYF9JDVa2TKHEBSY0DPqzbxXHtWlemT9PF6mOdl5umDbAi41DaeO2Cg585zyCbgqGeDHvhpf8+3vD//+c/HM6Fs03rgQ3hggaEP4eWVsR5YD6wH1gPrgfXAemA98Cg9IEgVoAvSAkPKE5QXmAvkBNUCXIdgWODpECAKHNUTTDrLcy0JPAWrBZmCPUG5XT4F1oFC+KgLQCHTbgX1BJoAFeW9IlawTge6C4AdgtRevQoUAowITpWpj57d2WJHA97kKlfGDnpL6uMtCMab/srpCKgQ2NKvnUAF/4JbMvs3MNd2XvErH8ZfMI0HvvwTgCCIlhd4AigQQAcMkcM/XmnDww6Mzz///HVb8U0ftiZTG6KXAED05QO2o0Ujv1fUyAM40EEZeQEOygJS5OGLF58E5Linrx1QrtO/fkUffcDhmhwp4KmdS/jzi3OAjjpkopUX8ATYcE1XiS+BEn24mh6+W0N39ec/rAUcyQMu4RtIwyZ1HPoJOWx1pAt5yqXaURn6dMcz8EtZ33HqVTZ9VnIfSNSrbPWXQCLlPVvO+Omn7AoIkkd2ZejoE4iU7niyyTPnnI6AGs+ycv3Orre5WwjI124hz1WgH/+3e4gMPDwPAD068q9+jZdnhxz93U4hH5tGs2k98KE8sMDQh/L0ylkPrAfWA+uB9cB6YD2wHni0HhAs92oPgEJQBzQQ3Ap8AxUElYJugaZAWQCpTBBqF48ySbmjYBUdXg71ACyCRTIBNq4DfQSJ+AQstRNIuWCxV0/Uwb8dCYJdvJQL8IEx+AhUe03MriYgFJsu190l7FaON93sCCLPzhrBNf6AEnIFzPjQWXCvzOFaPr2VsTtgQdDbN1zIQEM/PiDfWbAM0AHGBCjJ73UrIBV96Sd5vaZ/TEOPVhJoa7sAr+h7XQcNMIBP5qtlgTjlt1NMmwW8CNiBEGzgm14jYzu/k0UX94E0+PGvFKgor11F5AIb8GvHlLrqOLxmxe52BuFDrrzquU9egIj7ABN64ZVNgR30QEOH/ET/M/BFlqRN7ZBCP4Ec5XjSse//oNf3egbmc8APtRf/SnSJpjP7JLZIdNTfHfp4ZXhl9wR68NY3lfVsAed6ZvAjI37qulZPvgON55R97Rby7LRbCJg6dwvRSzsCe5TlX/1Rfbw9X8Al8tC/uoKGgCZlX3/99XF88cUXr9vkMHTTeuADeGCBoQ/g5BWxHlgPrAfWA+uB9cB6YD3wNDwg0BTIATgAPZKgWjAo0PSKmEBWAG3Hj7PXSdoNEhCAVpAr2AQuAJnUExTakQPYEHAqF1yiEYwKINslJChGVxkAYn6bB3/Bp2BVmSC4cnwF8ECddiMVxOLbN2CAJgJUMtiGNlCJDe34Ae7gD+jxOgz+EmDATgfAENm92hVgA2TCW+CrvsC4nTd8zYa5oyeggh/b0SMPiMR/XlsDZAUM0ZEuaIA3AVN9lwlYRF564YtWvjyH+w660D0Aya4VNlafDmRK6gScyAPSJIscducjdOrygzL35LiWx890l++eTdqB3HbxyFNHm8ef/+QFrgAzzruD2JoP6dN3f/DjV/f0DvjCwzXZ+OqX7ItGuUNZ9qOhi3u61e9da3c24afvudb36USOesnAl+8nyKSs7xih5Vftoo0DkvCU6sto2EbvnsUAWM+KMnWU9Xy51/+BvPLo6NVIzwUdgD3tDNL/5OmLnlf9HC+0nnG8jB/apnZEwy+eAbzks59ensMvv/zykz/96U/Hs7RpPfChPbDA0If2+MpbD6wH1gPrgfXAemA9sB541B4QzBbE2t1SgOwMIBLAA48EuQX4glypgF0AKHAUhAoOBYyCQHl2CCgXPAqEnQOOAhPIFZhGEyhEboCTwLKdQILvXtGih+Dazho7dPAUjKJRXwDrtaI+GI13306hI0AJbyBJ31BBI2hmc98PYpfXtwBD5JCvPtvoyF/sc+1MvmBZfnn81WthrvvuENmAiHYLBaq8uu6w0AYCazuM1ElX9IEr8tVBH1iDTp4y/GtD+fwc0MFG7cn/fAWgIjOds40deAWK8EXt0+tZ5OCnDfjVvXr8JM9Bft+d6V/U+ut6bQnES2d161v0rQ/SIbAl/vmAvtmNn3uHFHDC3glU8Zs+IuEDqFF+uQIq6vaaF7/ZHdT3ltgSEMRedSV0QD0Ajzw2AKzkowu8mf6svXpWyA3cYTcf6v98GB9n9BJa98rpLT8giI6u0ShDM59ZvvQM6rf6vd177ZaTp70ArcroZSceEAkfz0E7hAKH2E5noLD+iDZg2PUf/vCHA6DThzatBx7CAwsMPYTXV+Z6YD2wHlgPrAfWA+uB9cCT8IBgWEAHcBAotoNHfjsoBNUCWgGjYBU9IAWtOgLIgnHBqADUIYhUBy8BbDt2gDgFw8oF/XjRocBWHUGtMvSulQMA1BVUA2vUF9RO4IiO6gtW+wD2LFcm4dW3h2Y5MIw9bFYuYA7wIVsZHupkqyCYH9rRw3YJKMQ2wAu91FUHgCKP//ITPwvIgWjKyEAr2XHTB7nl052veh1NW9E3AAh4QrYUGNUuIjpoP8BIr3jRTf1ADfnoAnIAKXjyDd/XfnTWH+iDHqAiz66r+KnLL9lPR2Xq4cWOmXcofU14ScAWNJI2xceR78lrxxJfBsKwEd+AM/RncAmNfPXp41qfk9odFDDD7/TGX79jM36u0QYI4skXdIym58IuvHYLsT0dvQamDjoACjnl5Zu5W6hvEtU/Aono3sfc1XPIc/a8KAPUOOwcor9yfaXdQvqZvs3Xv/71r49nTz/zbSv18NJv7a5zjd7OIrZ6FoE/5ABd5QNovT72z3/+8/DrpvXAQ3hggaGH8PrKXA+sB9YD64H1wHpgPbAeeDIeEFj3ilYBcrtZAlkExYJ9waCAVh2BYAAFYwt+BdICTnWBIoJc/AWxk0ZA3U4f/N0r7xsnXmEROAtM1Q+QILtv+5AjiI1mgjrAiHaaAHfoQl9yBN2C1oApOx3wxLuPA3v9pZ1En3322QHUKHdW1q4MwbGA2Ks0/d07OrtM6CAfaIAOyNFuoXa+oOUjQAi/52M6KhOg26kj0G5HFLsACfjLB/KgdQji0QXkBJSwXZvwY6BNOz3yPx2Vt2MFnXp9+0cZ2ezo4835lD7qkase38rrY8V4A5qyky78q7+4njL1p0AP/CU827nTq4J8qa6kL8zvFcUDjd1XEl/MbwnFk87s6jWvQB39Sp100+bsolu7pvDli/oVevdSesrLJ+oG7vGJe2X6qHuy9eN8Iw+fnh20AUFkKkOjnC8DLNHoy8rK18/VUcZfgFPAjms+IFN/1Z/w0c8Bi3Rjk3y7iPjw+fPnr3fs2VGk3+LjOeNz7Qbk6mPTh0M2rQceyAMLDD2Q41fsemA9sB5YD6wH1gPrgfXA0/KAYBUwAZgRmArwCvQBDYEN7fQAQgj+BLYOAaiAEx+AQB+gFci6j8YZICSIBPgoc62ewNSraAJZgXo7eZQVXAND0AfqCFrxoRdeDnYUWDuzZYIfygON6NwrMf7Wm75er8JXWa/H8ImkHBiTTfSwG8ouCkG9fLq+uu6UIdcR0KCuNP+lLJCHLwXl7a7JZkF2u4jwDXCxu4jM2otcbaBd8OEPtAAMugREBeyhL4DvX8XoZ7cQ/7RjBk/JfW0uDzjgQ8TyJPzUlciUj6YdO+7VQ0eH/CkvsIyfanv+oHPtxnY81FMWL+XyA05c8yk+yvAI0NL3+AWPABO8yGW7evhke302u5Wph7Z+Xp9z1teqT2YgEj7K0KRP/XPuFvJvamSVp77kOUjf/o0s+5UFxuq/rpWRGUikvyrz3LRbSP8BaLq3M85uJiCevqz/0NU1Ojxd67fAN88FoAgQCigiSx/Fh0zPDjDxu+++++Tvf//7YcOm9cBDemCBoYf0/speD6wH1gPrgfXAemA9sB54Uh4QJAdMCPQE9QXO8guQC5wFjA67ewTcgAoBpfsAIfeC5QJWwa4AVTAOIFImuPQqS4G38gCdCfi0q0HgG3BEJj0l9dHPYBnoI498uqC1UwJ/PAA/ARKADq++2P1Ap76twgft2hA8C4QlerAVLR4CeoBAu4UE5IAbfmM3evIF3uj6iLRdNGxrVxA9BeoAE/bN3Uh0Qd9uJG2Blu0AIPbObxfRM4CGbO3Cv2xwzzd4Bjh5rUh95cA9dQNTakM8Zh5aNqrHD9kKKGI//mTmf7aqw/+SnSnq9Q9W6AESfHS5fiNHancQH/puDx/QAUCjvQNR+jYT/sALuraTqB1HdOF7r3XRhV36Q3bhGXAz7VSPbvLYSy6/56v0RKNN6vvyA2vYpLxnwnX6J4stydcG+Nfn0NIDP69sxUe5fsgWwAwgB41XyPhQPr76ktcegTvtqvPNIH0dLX71ezbqI/qluurpH/xPD+0bOMR3dqBpU6+NOV6+fHm03ab1wEN7YIGhh26Blb8eWA+sB9YD64H1wHpgPfDkPCBIF6QK6nvNTAALpBDwCkAFhkAIwXy7NgSHjv9v70525aiyKAwj1YvcAY1oPGICb1cvxQAYMEaAaWUDA1SPUv4CfivKUqkMQq6M9AoplRknztnNOnEH+9eOuOY6KowVrUEkNoNHis66LM5QSHEZ4DB+Bj4BhfyDEfkRR4/ZKJ4VuIrfAIKiXNeDLgl+FbriqSAXt+LWddBKnmfwYm0dPmLShSGOuqvqIpE7gEQ768ViPgihCM8nrQJWCm6xOoyJxTyFPN3FaFz+8jh3BXnEp66b7NPEfD741QWloFfgiwWM4Y894IS/3kPEl/N05U9uDmsBFdCgdUEBc3oUD6Ti18dctvgyDt60r2yxLzaHeXUtsde9I57iMmY9v/ZRTvy4bx30Lu/0B0nYY4OPdHDuSNvgpzFrxCvP4nd/mOu+b0/ZdR+I69zRxIc4XGOr+8x49wSY6Hj69OmxNhBkvsP9Zi7b/vbSQzxd811HEIjTI2TmBkDZsN++3SPub9foZsz94PEwfz/uc9dp7pr7z9+CfI3rDPIbzDXX33Hv63JffvLJJ0fH0I4pcCsKDAzdyk4sjikwBabAFJgCU2AKTIFLKaBAVjQqCsEX8AOoUFw6960gVIz3fh6FZFBIsoriOiV0MQAAOnQU2nWLBB/Mq9Dliw9jrp+7UBTlCm12xKBAVWgrzuuyMEcByyc7QQ2xKYr56WXYbMhVES1eha1HbBTICnmFurgBAnnyJSZFut8KZrpYq4iWmyKbHe8mUuwHG0AFhXtApke4egk0vcUaSAFz+BQHu3wouM2TV9CFTrph7JUPvcxlp3/fTqfei5ON/AVa6vARA43oyBZfbAUzXA+ABD+cB2asA6roEngQozn5DKRkS7zdEyCZefYA5ACs2BKDPUZkHgAAIABJREFUPRUvGGZfHGzzd/5PY3J0/eGPjiP6sCk2Y4CUeNl03wRujMnTPOvLSWzda+0bn9br3HHwKR/3BFBz3ks6metTzL758qGN+ebRxnEGPM6Ni0P+5//+J/YeF7O+v4NeOO0e6n1etBWbv4M6h+jCtvvNXtNY/rrhQFL3q0+Ay98Of/Twd6h7S0y67T7//PPjb2XHFLglBQaGbmk3FssUmAJTYApMgSkwBabA5RRQrAJAYE1gB6jwUSz6Nl6RrsD1O+CjOGXD4bETRWxHXRjGFLSKTEWp4lkhC7Dwq+j0MUcx2xznCljQwG9znCtczakzhx/gofeqKK5BBxBFbPwrchXFrimse2+RfKxV7LKpWDeXH4Wyzib58s8PTXRPWO+3w3xQSlGeXTmmESAjFgV6dvgFl87zg1fsnGEU23wFnayVkxj5MVeeztkItJgHSgBG5xdR0yBQwpf8zXGAAGCJnAASvmlT7KALYOLcvcGv9eb3aBdbcmdLDOIKfJljjRjYZ4sdfoIi9ilYSC85OmcrgOP3izqYIxb++A/cdN/IwyNmDvmL5fwOIPMCSX6zZ+/ZM9+Y+wpMrPuqjqZzt1DgJx2s5RuIEb+4uqesD8DYq/Q07tx89yAb7k+QiibOxeLvxzz3fqDHvWaOrjZj/fv5HhVjzzpAiR33YKC0eHTjuTfco/yy/+mnnx5waccUuDUFBoZubUcWzxSYAlNgCkyBKTAFpsAlFVCMKxADFefOIMWqQlGBbFxRC0YASmCPMUDFt64ChyJacV4xrMA1XyGqG0KhWYeKMY+tmMM/OwpUh/cFua54ts4aRaxOh4p8fhTx1rtu7fnxLGsV7nVkBJVAAPP5UyybxxZ/xuVWR1JgDORRLMtf4W+cPkBBL4w2FjAIAPWfz9KPTgp2cfLpUSvxeHQnMMKO+eJSkAND8tTNJEbX2JAbYBDgcg7cWG+Mfd0uvUw6kCOX8itnNq0VC7/2kZ2gi3PdOECVeWzTyrexgA57/IiFLaCCvWzZW+scNDLXwT6IVfeTNeCRo8eyepcQm4CT3PgVg3tADoEa++C+7Zx99wF/dUq5n4zzBXw5+LSvdGXXNR+2+juQVyDKfJ06/MtHPDRwrznYZefnn38+7OpaM5etHiGTF3uBIHGyGRT1zR677j+5meM3Lf19ud79XZ7sAbjypKvr9pBff1MAEGjkvrY/6SGPJ0+eHPfdF1988fxv+0hoxxS4IQUGhm5oMxbKFJgCU2AKTIEpMAWmwLUVUAgqLhWaPbbkN0CiqFWYKpQV2YrbOh8UyIrxivsKaUWvNQpNcxSfXvKsMK3QDkjUGaEwDTToarA+3+Yohq0PHLgmbj74c926IBT7Xa+wBp/OfuQIDIjfNzt1Y4Afjq4rnulhTUBDDEBPUC0YQR/dReYGIdhxGGdHnAEc43UX0Vi8Pq4DYWADgETrXqxc504xggv8GacxzcR3HmOTDUCATXDAWI+HiVv+OmPkZS1t2hcx6/ayl8Ed+Yi5f+EuPvrw457xDZ5Zy5e43BP8+9BMvEEV/syhV/db95M5YmSzmORrDRv2vOtyd/6vZx07gRt+xesAaM4+gjX57P40130nP3mImW9x8wtS8sUWX4FOeydesZcPiGoe29YAOGzRRYzGfawBdPz2rcvOujrfrANeXfP3ysYZ8tBCzLRhw98f8Ajy6roDXd2z5rDrfnR/044dgM4+fvXVVwf03DEFblWBgaFb3ZnFNQWmwBSYAlNgCkyBKXBZBRTvuj8AliCLwlPngeJToakwDg7VOaFQdyh2FbU+CmznCmU2FMdnKKQYZlOByk4gQ/Fa4cxORbgiVnwKaHaCCsEGxa3YFbrWKcytcV0hLR+FOnuuK7L5UdwHERT7/PPjMM/RS32DNP1rcQU0bfiliYMtkIZ969jXoRGQYUMs4hcbH+LyeI/uosbEX2dRnTzmyg/c4DMAwIdzOsuJDYei3toe5zKvjphAjljF0h6YK6/22hqx8wWMBEKMl2cgiLbm2XvxB0ysN8chfh/Xu3+y3YuqzdMN5DqAI2+gqv0FRUAY4Kp9Nsc5337bc4d8HGIRv2/gyzctxNZ/+vIIlrzBsTqxxCBe96ec3SP0NS/wY36+upf46j/eWW+cjUBY7xJiyzX3atfdU3Kw/718Ws7m+rDBvzm6etxPYqC/eAOy7gfwyEfXlXX08e0e8lJqfq0xx4uy3Z8g5ZdffnnY2TEFblmBgaFb3p3FNgWmwBSYAlNgCkyBKXBZBRS7PS4EbvgNlCgy/Tam46DCXlHpt+JXgV9XhyLau4eCKxXMgYtAk6Lc714OTTi2zDOuWFYo61Qxlx0AgF0fc/ovZdYaU+wrdCuirQ8YsCdG18wTJ3/OdVYoxoEDdlxTQLMFPCnGzetdM2AO3zRx1BnSO4esByboo9iuiGfHXLGDMuywcX7Jci+0lqucATbf2Raz9Y46efqX7saAFeM+/JtL0zp15C4Xdujhw75zwMRem5vGwJcxtsRuHEhpL4AOh/hobQ6o0SNy/IEQYhCTOe4X81wrTtAi7Xv5s3jS/uHZo1m9ZNu9BwCx47pP4Ea8fDj4kJvz1gQfz/9tjQbmgii9cBocAafEGPwTn5jk3P1EKzbNp1H3m3O+3VvmsiEGcbpmjfvTXjiXM53cb+2xOf7mrPcCbDDVfD6c2xcx6Syiv2tgkfuNHeP+vvh1H7jX3NNsdv/JD0jUJfT9998fuu2YAreuwMDQre/Q4psCU2AKTIEpMAWmwBS4rAKKRAW/j6IT4FBA+laMKqAVnApWxaZPMEkxq7vBXIfvgIPi1cda46CKtf3LbAW+j8I2UCMGkERxbm7FNN/msAFaiZlth4I3G3wphBXEivGKY/6tEZt17AM0OmLMUdzr+qi4Bi74dF23So+EKaaBBEU7n2BJsIhO+QCG6i4yly3XgShFPKhRDjRk17j4wBDX2AiqOHeww6cc2QuseDeR9Q77wQ64Rs/2yDeQYC0NQBZ25Wa/gQ/7AWYYcw2cMxYUYgMo88LpumjY0f1DP/P8tm/mil9soBGftBaDb3sgH/7kEjhyzX1nz9goJuMOds7dTK47791EzoEe60ESPuTHjz2znj/n5rpX6CUPoKZHyMytEwn07F51P9CK9mzQmeZ13PRic/9Jjob2xVxx6Cbr74JfWthva3349LflGiCko8f9Qw/X6ek6mOTDPggFXPVeIWvlx5Z1crDndUiJ333oP4/53jEFrqLAwNBVdmpxToEpMAWmwBSYAlNgClxSAUW0IrfCE9QAYBwKVEWx4lJhq9hUaCs+Fes+gYugkCJYgaqwVbgqhoEEhb45PuY0D1zJTwW3gth8cwJHIBDwwJ6YFbZ+8xU0cq6Idt232IovSGJcgS0Xh/yAmOIQa11S/Qt5vhTf4mMnGNO/ohdn76YpFhqx41C8A090lX+PySnY6U3f4Af7vYeIDVpU8NNAPrqKzNOlw6a1/Msb3Ehve9a7is5dQWwAE+wDYNaJy2NcvgEgY+duJD7aN+t82ks66YJil572AeTr0bYADpvitK4XSoMc4pQzmMWGMWvkFuCgg7zcp64Ft5yLzbn4HPmQuz1wH7oG5ARX7AFf1tKSxuJLb375c/8Bmg5aBYnoDJ51z/obsrbY3dPO6cRunW2uuyYe3+JmA9gUAw3MMd9v19mmRd1E8nEPgl/+ztw/1pjj/raOTb6NgXny/Oyzz45OITntmAJXUmBg6Eq7tVinwBSYAlNgCkyBKTAFLq2AQlKhrdNBsen7XKAbUzAHiAJDCt9AjoLUuaJZYa2IZQMM8B3sqSuDzzof6nIxp3euKK4VyaAQH2yDLIro/vOTdexXlCvkzVP4+3YNTFFksyX+XhBsjuK6whpIADbEKl/XjAEfbIENNAKLXJcHW0AByMOOeGgjRjHrCnLN+vO/dQdnaAza0K0OEY8D8edcLK7LmU3dMeyAMYGSOmDo5prxM6SxLkAhr/Na2sjBeI+QidlYL9wO1gAucpE3X0E4+bJP32AIjRzO2XZe5w5dutZ/HROzAyQC0uxRNvlyHdjpvrAn9GO7dweJX658+tCi/zqmW4Y9OgNffGTf/XfWiOZ1B5WPPRB3kMg+8E0ntthg2zmfvt0rtOyRMHvtfjLmfmCvjirQBxxiAzB0b8vPHLDR/SZnmpvjuljkQEP+zNfBpOsLEAKPfOzZd999d/zHtB1T4IoKDAxdcdcW8xSYAlNgCkyBKTAFpsBlFVB8KoJ9FJcKcgWrQh1EUIg6jCv0Ay/OQRdFugK4x8yscSiGg0eK7UCJR3OCMwCAOQpzHwUw0KLwNc4WKGG9gtl3XTighILce1PYCwoZU8C7rqgOCgVRFOSusRMU4gvIqLvDGoc1DkU6GCJGgABE6FE0UEd3kUJdvGBGj5bRyxg7vdCabbmYT3NFPUjhX8+nrbnydl0u/LLDl/V0AlTMk4c9kit9XAdM6FFXjdzEC/QYY5Mte2jf6vgBNGhiXx+ePfYEWLDvupjFKT+dNGIwJkaAAiwTQ7bF5N4QEx3Mk1+QSJx8mU9/c9h38Ae89YJp150bB0hoAX5YU+fNGUTR1jUxdC+wC6zItxdes0cP+QTbrNXdBCzKkQ3XgkR800ZMcmO/e4rtHnd0D9CDD7qz2z0cYOzdVOaJw9+Gb3BHPvaBHb7EIg5ACmyq+0h85hkz55tvvjm0kuOOKXBVBQaGrrpzi3sKTIEpMAWmwBSYAlPg0goACQpPhWjv7zEGiPQuFIW+ghMcMUdhr5gPyhBA4axYBkN8FMR1+FivKA4gKJrNqWAGmAADhTAbil/FbgW8c/6MARG9NwVwKAZr2QN5xM1+XSRgSHCpjhC2ARLzgQPxsW1cLhXv5sg3P4AOCKDTJx105vBpTQAHFACSPAZEBxo75KfzBADw4UtsvuVlDCiQizG+2AyIKf7pwR495Ok6cKPbSj5yEAu/QFJQhn35GKNdcfgdKPGolzl04t9HR4w5dTHxaZ4uGDE4+AMr5GvvQQ7+/QZ1aFHcYqQ1LeUrfx962gc+A1lBIrr1XiH3oBwBLOt12sgRqLLeWrDG4Z4DdPhPR9d6jxM7dQcBfe4jY3yBMWwFs/xOX7mJuY6e7jf5mseORxDNoQnt6/Lxt+ZaQK7ON3stfrBHt5O19oHG7mHrwCA6W18n1ddff/3GL7/88hxoHonvmAIXVGBg6IKbtpCnwBSYAlNgCkyBKTAF7kMB0AMMUJQCHIptxbBCVOdI4+YoXkEJH7CgQzEf8LFeIauAV9gHORTLCmjzFNAKXv4c4BG4oBh3reuKZ4WwYhtsURCzI2YfB2hgPgggXueghXkKcgW2XOQkFp+6baxR5IsLMPFtTY+KyZNt+YlVYS4mBbuDD0AjqMMX30AEYBF0sl6+5rMDioAjxsRrPZ3FVnfV2WYdS0EbPunPHnADJAQkAAM+elkznXonUR089sAR+BIDW4CPtdYEQeyndbQRr1zYDhyxDdwBcWICygJkYEgwyxx6iDUoVNeMOOw/Tfhlx77I05zgFsjDr/fpuEZjufQibvtDP9fsnZjtiXz4tjfNZRfY8Q6nAFXw0v3IJxugaJ1AbIudT3nQLEAnbveGa+5/3Ucgj3uSLTHZI+v9bdDZHjefL/mIhU0ait052zSyt7qmXPfv6AEh13ZMgXtQYGDoHnZxOUyBKTAFpsAUmAJTYApcWgHFMwCgIPetEFW8+h0ccq7A9XFYAyoobhWoCmrQRYFrPHgQOAJZdFAokIECBTPIoGhn4wyNeuxLsQ4YWAMY9OE/3wplRTk/CnLjCnCFdcW230ECtqzhAzwADHyDE/zpYlKoiy+Y06M7CnaQyrhY+vf05gaFgAR6BL7YNL9OHvmYX5zmyR1Io6nHxQABWvIRDDHPvpyBRO9FogdgYp1P8csLUKiDhz7AldzZEpu4ARJz+JOrdfbd2h53smdsBznMCVb5bX8d7StoRS82gSO5gU/yoo9YdBcBKcCc+8AcANKaQKG45CPW4JQ9cABRL3YriZutHnW03/0HOvEANnLi21x23bt1wtkLfugsNuvZoovDPHPsvbh917nEHi1AH/cR2+JjT072nKbi771F8tXxJa5govm0pZPc+bNv7g3vEiqWI6AdU+AOFBgYuoNNXApTYApMgSkwBabAFJgC96GAIjQwpDB1Droo+o13rnBVsAM9CuA6XRT2HXWiBA0UueCIIlpxbR3ooQA3B7jRTVGh7VwMjop2xbXjDIWsMa54Z0vhba3YwJQ6iuqEEXu+rVF0A0mKeFBHntYYD54oxP1W0Bt3KOhp4BzgUujzJ35g4MXHu/g8d/L0AmYwQWw0EH+POtEGlJArP+bxJU6H82KtKyWoArqYSwsQSD5y5bMOqrpogjO6YORpLn+6U9gxz76DJA4QRaz2kibs8i8H5/aC/uIGYqxtj10TP18gR+DJfPPsw8OzziBxBGaAFXts39hzLkbnfAJtDiCKjeBjvsVhPt/uQfuik4kdeolfHGIHZ6yXn/xBMyCMlvaie8nfRGDPejkF3/jlRw79tpd8gX++ATbxsMe++da7d9wnxUMjOT5+/PjIz7wdU+AeFRgYusddXU5TYApMgSkwBabAFJgCl1ZAkQw8+FbEAhAKVgWyAtbHuMLXR3GrgFXsBm8U9gpfBbLCWMGtoAaRdD/otmgOMBDwqGPHOVvs9iHqi1CI76AQyBEU6iXK2VVUB5/YCWgAAOaAD/I1R9zyMm4d4FOnj1wABHGe4QObCnrFPfhgLWACEtW5BEbQ0Kd38YBeHmsCI0Ahvviw/gyFXDfmGkgDgvSuH7+DQtmSV1APzLEuWOFbF49HpdgCY6y3H3QDaKwJCpkvFl085sgpKASYBI5caw678hUzX+b4tH/21N7ZY+tAK/MAH4f4zQkKAVH07r+D8c9+UFF+zbUPdOyxMPcvnc2tW0sO7kG5uu7cPWHv3du6keoAYstaGvFh/1xjq3vBb3bcL651L9h7cVgrJ387dJBn0Itva8VCH9fY8V/GPDKWJocwO6bAHSowMHSHm7qUpsAUmAJTYApMgSkwBa6tgGJd8RoYUqAryhW1xvwGUHq0SNHdR4GucDYPpPBiX8VuwMhvICAopOhV9ANH5gMuwBF71vTNrkPhbp7CGVgAmhTtIAdbwEHvG6rDAtgIGMlFoc6Od85Yo4tGLsbMO0MhOQMWroldgc8uH2zxb1zRDwrpCgpCWet30AbICQqBNtbLxZhYeiSurhXAgA3XywVwARF08wRlwAlayctcetIRrKCHwxx7Yo0Ye/eQfWDbehqAU9bQDLjwrVPKHDr3+Jw5bJljzDVx00Lcxs0xThuwhNa92+dFAOa/tDms1x3DJ92Ca+yBLPaHreCVc/eGfP2Wuzz4trbuG7E5NzdoyIb7i8auiU9cdX+x5xqN+JSvMXPMd40+xrtn7I05DmPyAQH5tf+uy8teAG+BMfPY/Omnn45H7jze53zHFHgdFBgYeh12eTlOgSkwBabAFJgCU2AKXFYBBSxg4VPRawyU8e1QICu8FcDgBGihkAc+HH37rcgGC+om0oGhUwN4AFmaHxDyOI1Ddwfooeg2V9Gt0GZHIV0niaLfUceNIhyMULyL2W8Ft1jFbB6QwDb/ABP7IATgAVCYr3B3bj4fbMlTPuAD8NI7k4IH5vIfwOJDwU9L+QWo+jfx4paHeUEh9tl2sGUPenwJvAMmHEEh64IkbAESAISYzXfQzOHc3rQntKYPQFIHi04pPsxhx57TjZ5BDvaM8VU3k/MglTW06N0+wRb26BBko7s1fLITiKKf+yRd6S3foBBfYgCgxGTvnAM/7pPeASS+OtXAH3sK8pibRkANuCQW+yQGc+TLj2vmA0X2BVTk1z1kbz/44IPDr/jNs1YM5sjf/rAtb9rLgbZPnjw5gFR7fWzQjinwmigwMPSabPTSnAJTYApMgSkwBabAFLimAgplkEdxqxhW6CqQFczAgsLYdQW5b3MU8gpja/1WBCvK64qoeDcfXHGY66NY9uncOoUzYABYAAxiAUeMiUHR7lqAwRxrFPbGfcxTmIM8AAhoYp545SIO44p2+YEwD88esTLfdfm41qNu/MsH1PlvUMh4fkECv9k9Q6Fzp9AZCsmF37qBrHv33XePXEEM2hmjE5jDvhiDQsAJDcRct5Bcekm2fRQHe3SxT+KjN111bYEo1hoDh4JCdPMbKAM2gBdxBMGcix0EsVdi4LdH5dgXH73FYb/Z15klZvCFvQAcu9YASmwEftw3HtuzD16+7QgS1bUEKMlbvPIQu9iM8c0uzeRJg/5zmbwCO+4fc+hrj4AfIMg5Dfl37gB35HaGPvaqvZcXzWkHaLIFdIlpxxR4XRUYGHpdd355T4EpMAWmwBSYAlNgClxKAQWt4lpxrkgGhHwrciuyFfUBoro0FNUKcoUzCKPYfvTo0ZE7m3UGBYKCQoGkYBIfAIPr/LoO/Cjg+ejRLHN0+SjajYsPnAgKWQ+sAATAAPugEBACTij8QQIvSOZTTop3+RhX5INC/Os6ogOgcIYfbPcoFr9ylmfgpE4h6+XiqMMG0JALYGHst99+O+bQj12x01gsjmzVKcSfWII55vUYlDx1RNnDdNCd0+NWfNKLJnXt8CGXOm9oGhRhzzW25OZbzDSjfyDJHvHRI1tBIefiBG9800nu9kLONGNHPObSXv517IAtcrVP4hWDuaCL+8neyAnsEQM/bPDPLhgDPgWFwCV+HOYapyd7cmGfTwc96MA2W+yDVDqD8iUWB9DFFzvi/fXXX497ascUmAK/KzAwtDthCkyBKTAFpsAUmAJTYApcRAHFdt1BimJgRSEPjCiwFdMKevBA4ayYVrADGeb4eNwGQOhRGjaDQX47gkKKaRBAoc2ec+CBPQDDNb4CDAEd8AAsMEcMAIZzPl1TnFvDHshj3FrzfSvw+XRdUQ8KASDi150jPp0rdUsBNgACSBAUAhD4FaN11vPTo2zmWu9gH8AAC+gIrMhT98kZCoFWwE4xBkkAB/ugK4cfOvo2LwDUi5brkuGHpvKjJ03EZn/lLTb++fCeKNfpQzdr5Bvo4ocmdSkZDyTRsZcuW987hvgPfvFnf/jUReQ+AlWcewl09xg/5gJdNJIjCChfYMw330Gi7Ne55JvW5tDKOZ27vwAp9xP7/PNFPx/wR+72ld4O9sXdGvcE34Cf/Myzzjdf/FqzYwpMgf9U4B/PTv85UabAFJgCU2AKTIEpMAWmwBS4hgIKcsWwYlpBDISAAgpmY0CI3wEkhbCPdQptxbWPYtlYH9cqtLMNRijSFdl8+JzhDlAABoAQgIMDLAjOADIgw7mrxZo6T9hV9IsFAPEN5pgjD1Ap6CA+MEyMAECwgm2QBLDxmw0266YSgzF2QAia9Z6h4Ac9gyf8ghd1r9QdZK0XRgMNYAhfbAE9YgO4gAca+dahI0a60JiOxtkz11o+zaGXcTrSXuxeAE0ffq1jR+7WAFRysheAB8giT3E497v8acU2W3Snk1jsbZ1RtDcfmDEmBt/dM3Jyz9gP+bu/6CQ22tHAOLt82Cdzg0h8sQVMmRuktCfWBc/qUvKI1xngBCWNsUU7NuwHm87Zt59yocG33357/EcxcA1IqxPrGn/li3IKvFoF1jH0avWetykwBabAFJgCU2AKTIEp8LcqoGhWkPeYmaJd8a2Q990jZL7NBQnM8VGYK7DBAkddGiCEYrtHcAAAIKLOGlBAEe9wzQHm1L0BKujaeO+9945v5wrzII7fOmgU+SCEc/HoUGEDCGHXdSCEXzHp6AFFjAMQ7LEDwhiTY7EBImIMSgFU4AGdjAWFfIMT5gIP/BoTg7mgDd9gRTCKL+/jEU8wyxhQw7bYglDiZ1texcC2vXBOZ/EDN+bo+gFoaEETc8RlL3okjW262l/QRowBNjHQiw58sOVe8Ns3KMY/3dwLZz2N2w/269Lhmz0HP+JyvxSvubQRKygj/mBeuplLO3nW9STuAJUxsbAvToc9kKNxcck16Elza9iVDwDFNv3q4jqM7JgCU+ClFBgYeimZNmkKTIEpMAWmwBSYAlNgCtyuAgpxMEJxDmb4+K3wBgMCR8YV3q4BQtb4tl4hXleHjgvrwIH333//eedInUoK9YAB2+AGQKCwZ0/Rrosl0ODcvN7H05w6btgCDkCAYJFv8CBQAwKwr/A37hwEAQnAkx5H4xeMMbd3z7Atb99y4xesMQbkGA+IZLNrunuCW/QQD/9ADVhhHV80ZUtu5oAYL74LyLo6poJFvfuHDb6srxNLXtaIuXjAFo/BsR1Ikq9Y7B99xKmr6ccffzw6sPjihw+5mssm4EMLusuHPzbdF3Jmi558mysuc/kL4gWm3C/G5V1XkN/GaWQ/3Q9y1MXDh3GPfnmPU4DPvcC2eHWCAT/2RHcQu2y4n8QmH/H67JgCU+CvKzAw9Ne128opMAWmwBSYAlNgCkyBKXBzCoA8IIXC2wcMAhvqFPINHBh3PSik+Pfbxzt+HObpxgAcAATXFO6Ke6DAuKJcgV5njDkK+Ap8sET3To8y+Q6mgApAxbl7iB/QAqTwAX6AFjAAEAE5epTKmHNx8Cle4IAP/v32L+zFAGqAX645jAUgQAbA5dxNxJ5cAC0xBK8AGee0E2udTMXBVi/f7v1IgaOAjxjEzDbw4dse8Qlm0TRwJQdxN0eXEsAmd2sAF3HSEXAB8pxbD7jQ0Dx2QSLa0kksdVDpKnr77bcPOOQ+YJs9UMd8tsqBD7G4x6ynh32kww8//HDAnNbQmH8x09p5nUHG6C1+9ukAEtlj8XvXEY17DAxkooXvHVNgCvy9CgwM/b16ztoUmAJTYApMgSkwBabAFLgZBRTYIIAPsKEoBwkU9iAEAFF3hzHzFPpgkN/mm1MHkv8UVmeRAh0EqbPIuv7dO4gQHDDfejbZAhx6yTN1KG1DAAAHuklEQVQfgSKQxTywAShgDzQwDgj4NtccsIHvQBFwBHTwWzcReGVOdvlmT07sA1kAld9yzx495GGMTbqx4dEx64M94AmIAniIQ2wBHfFaZ6zHrqx75513DngjBh82Pvzww0MrvmjEB0BSPPQKkOWPpmkoZ/mDKGzYF/NpCwbVwcNfcdY5pBNHnj06B/aw0YuhaSkXsCf45rt/cw8o9b4loFCXUd1hfNgrMbMhH79BNfeRnP2Ws4N2uoTEKzcQqfv2Zv6gFsgUuFMFBobudGOX1hSYAlNgCkyBKTAFpsAU+F8KgAlAkYI9WAGM1EmiWFfwAwSKdOPmK/jNB0tABR8FPaih4H94eDiuAxYBoSCLudYZt6bOF1AA0Ojf1IM2bIANgAqf4AIfwIHvAI7foEaQBFQCc/jkr64k18ESOfHbGtDmzTffPEBR7xviVxeNb4AH3BAzfehlTK58s2dcfAEaYEqc5ooFQOoaPYEPuYrRPLkHVVyTm29wT+4epQqM9a4gecrBOegjvzqfvPfHfHHwEZQLGMnVOvHJ2d46Zw/Mko+YADRrxS8W+fpt7/hzHsCxVg7AEgCmQwn8kYd9YAcwY9Nc18ThunVi2DEFpsCrV2Bg6NVrPo9TYApMgSkwBabAFJgCU+AmFQB9AkU9RqWABybADMU9YAAE+DjMV9CDEECQdc59FP1gCVBUt0ugCFAwh002gJBAEWDgGjChewaA8MkmENP7ccAZ6/jX/QIwAA0OY+IHUICd4qxzxbf5QRuxyktsHm8DTKwDW/zb9o8//viwDWz0aBk9xCYOufEJaJkTgNJVw6bHpXQogTXWyJNv4Egs/Bdj9uVhjS4dth1Pnz5946OPPjpi6d1BfAE7YjaPTXHw6XE64IgPcIYfcA4wMs9HvuaAV/Kwd3SznzRkxyOGvu2nx9TkYW1ACOCyJiBHE/Z8m2ef/JZbnUJHQjumwBT4vyowMPR/lX/Op8AUmAJTYApMgSkwBabA7SoAknjcyAcYAgCAAoAhyAAEOHpczBxzQQlAxQEO1E0DXOjg0U0CDgBDwAdwAkawzx+4AZ6ADACRMXb4NQ+wYcf6AI+5IIwOIN/W98gVCKOTpe4VEKV5rvWOJHGxY4xtwAU0AZDMt05+dSVZp9vo8ePHB2wBinTrGDMfrPKOpbfeeut5LGKQu2t0c05rPvk3Jm4gxfuE5K1zie8ekQNYAkDGz4/bAW1ne+waozF7PUZnXQfwBSABR+KiNwgkT37EYw/FVqcQe2zIj07yMEfcNJSjcyBoxxSYArerwMDQ7e7NIpsCU2AKTIEpMAWmwBSYAjetAPgQhAB0gIDgjXEHkADc9M6duo2cg0c6ScwFI0AJYIkdQAL4ARbMA2b4AJGAIX7q1gEuwA/Qoy4lax06jgCMIA3A0Xtx/GaDXd8BGCBKTOzyD8LU6SMOYKQuGZ1O4gNCdPsAJc75qxuIBuKqk4YNEIltkIkGjx49Orp9wBix8G+djiCQyTdN6pASo3jPj7iJSdeP9XUPiUMewE2dWOKjP6BDJ+uM0QPMM37+j2D0A4ro1COD/IqRfXHIzbwdU2AKXE+BgaHr7dkingJTYApMgSkwBabAFJgCl1GgjiPQxAEi+NQZpCunbiCwAmwwZg5Y4QA6rAeYHAAEoBGIqlsJvAE3ACK2ABjACbzwmJsxXS06hxyuATZ1MfVoVI+dFSt/dfeAJoEbY0CNb9AFTAF7ehwMyHHOnpzAHHNAFF07roNHQBM9nIM0vefI42O9CLrcxMheUIt/XUZyBJusEY/5bMq3LqA6fqwFihy0d1088hW7sd4DJDYwiE4AFs0HgA7pdkyBu1FgYOhutnKJTIEpMAWmwBSYAlNgCkyB+1KgjqQgEWgRuABR6joCkwALUAXEAG7ADdd9974cwKQXaZsLmgBM7AM3AR5dM/yAImwHjgAoRzCILfbN4ReM0UEEtLAdOAKDwBRAxpjuH8AHuLIesDHOTh1V/MuRHVDLNzDjAJPqUJI3O+aAQ3IH1MQjbgcIpJMoPfmUg7ytp4/8fZ/P6RfQOwztmAJT4C4VGBi6y21dUlNgCkyBKTAFpsAUmAJT4PVUAPRwABpACADk0+NsgRdjASHzQRlrWwf6BE4AIaAFRNKFo2MHbHHdeF1PwAr7dSrxAQgZA2vYCUZZB8L0GJtrABEYpPsoGGStNbqN2OfbmHVidq0uKzbF7eO3XM5zjIM94g0yvZ53ybKeAlPgrMDA0O6HKTAFpsAUmAJTYApMgSkwBabAHwqAQ6CLA0gBVnqkTSeQ6yCOa+YFmvwGYow7gkZ1Afm23nyHb7bqagJrfAAda8GlOpIAJNeCPta0HiwKaB2Gd0yBKTAF/qQCA0N/UrBNnwJTYApMgSkwBabAFJgCU2AKTIEpMAWmwL0o8DuuvpdslscUmAJTYApMgSkwBabAFJgCU2AKTIEpMAWmwEsrMDD00lJt4hSYAlNgCkyBKTAFpsAUmAJTYApMgSkwBe5LgYGh+9rPZTMFpsAUmAJTYApMgSkwBabAFJgCU2AKTIGXVmBg6KWl2sQpMAWmwBSYAlNgCkyBKTAFpsAUmAJTYArclwIDQ/e1n8tmCkyBKTAFpsAUmAJTYApMgSkwBabAFJgCL63AvwG3ZptjNVrP4wAAAABJRU5ErkJggg==)
###Code
# It's a human arm! Hello, voxel-friend! You're lookin' ready to shake hands.
###Output
_____no_output_____
###Markdown
**Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
# I find pathlib to be helpful for manipulating files.
from pathlib import Path
# Create a 3d array to store the slices. I checked and all slices have a shape
# of (454, 512), so no padding is nessecary!
slices = np.ndarray(shape=(220, 454, 512))
# Read the DICOM slices into the numpy 3d array.
for idx, slice in enumerate(sorted(Path('ct').iterdir())):
slices[idx, :, :] = dcmread(slice).pixel_array
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# Viewing an axial slice (at the elbow):
imshow(slices[65, :, :], cmap='gray')
# Viewing a sagittal slice (at the elbow):
imshow(slices[:, :, 85], cmap='gray')
# Viewing a coronal slice (at the elbow):
imshow(slices[:, 115, :], cmap='gray')
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
# Read a DICOM file again to access the header:
slice = dcmread(next(Path('ct').iterdir()))
# Print Window Center and then store it:
print(slice[0x0028, 0x1050])
level = slice[0x0028, 0x1050].value
# Print Window Width and then store it:
print(slice[0x0028, 0x1051])
window = slice[0x0028, 0x1051].value
# Print Rescale Intercept and then store it:
print(slice[0x0028, 0x1052])
rescale = slice[0x0028, 0x1052].value
# Try the provided formula for Window/Level adjustment and see the resulting
# image on a slice:
vmin = level - window/2
vmax = level + window/2
plt.imshow(slices[65] + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Observation: compared to the same image above, it's now a lot easier to see
# detail in the tissue. Cool!
# 2) Play around with different Window/Level values that enhance
# the visualization.
# These values seem to emphasize soft tissue (flesh, blood vessels, etc):
level = 50
window = 250
vmin = level - window/2
vmax = level + window/2
plt.imshow(slices[65] + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# These values seem to emphasize hard tissue (bone detail):
level = 500
window = 1500
vmin = level - window/2
vmax = level + window/2
plt.imshow(slices[65] + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Which values make sense and why?
# I presented two sets of values above. The first, 250/50, seems to emphasize
# soft tissue quite well, bringing detail to blood vessels in particular. The
# latter, 1500/500, seems to emphasize hard tissue, with increased detail in
# the arm bones. I believe the particular values that would make sense to use
# would depend on the use-case! For example, the latter would clearly be more
# useful in orthopaedics. I unfortunately broke my arm as a teenager, and I
# imagine radiology with such windowing could have been useful in resetting my
# bones.
###Output
_____no_output_____
###Markdown
**Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
# TODO: YOUR CODE TO SEGMENT FAT
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
# TODO: YOUR CODE TO SEGMENT BONES
# Are the segmentations good?
# TODO: YOUR ANSWER
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==) Assignment 5 **Yiming Shen****4/03/2021****HW Topic: load CT volume, slice it, adjust window/level**
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
import numpy as np
import mahotas as mh
import os
from sklearn.ensemble import RandomForestClassifier
###Output
_____no_output_____
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
###Output
_____no_output_____
###Markdown
1) Let's explore the data without loading it.TODO: Without loading the data, how many slices are there? TODO: YOUR_ANSWERThere are **220** items. ![p1](https://github.com/Yiming-S/cs480student/blob/main/05/1.png?raw=true)
###Code
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![p2](https://github.com/Yiming-S/cs480student/blob/main/05/2.png?raw=true) **Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
# TODO: YOUR CODE FOR LOADING THE VOLUME AS A 3D NUMPY ARRAY
dirfiles = sorted(os.listdir('ct'))
file0 = dcmread('ct/{}'.format(dirfiles[0]))
imshape = list(file0.pixel_array.shape)
imshape
imshape.append(len(dirfiles))
data = np.empty(imshape)
data
for idx, entry in enumerate(dirfiles):
arr = dcmread('ct/{}'.format(entry)).pixel_array
data[:,:,idx] = arr
data
shape(data)
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
ps = file0.PixelSpacing
st = file0.SliceThickness
ps
st
imshape[2]//2
# TODO: YOUR CODE FOR AXIAL
AXIAL = ps[1]/ps[0]
plt.imshow(data[:, :, imshape[2]//2], cmap='gray')#110
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 110")
plt.show()
# TODO: YOUR CODE FOR AXIAL
AXIAL = ps[1]/ps[0]
plt.imshow(data[:, :, 80], cmap='gray')#80
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 80")
plt.show()
# TODO: YOUR CODE FOR AXIAL
AXIAL = ps[1]/ps[0]
plt.imshow(data[:, :, 130], cmap='gray')#130
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 130")
plt.show()
imshape[1]//3
# TODO: YOUR CODE FOR SAGITTAL
SAGITTAL = ps[1]/st
plt.imshow(data[:,imshape[1]//3,:], cmap='gray')#170
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 170")
plt.show()
# TODO: YOUR CODE FOR SAGITTAL
SAGITTAL = ps[1]/st
plt.imshow(data[:,150,:], cmap='gray')#150
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 150")
plt.show()
# TODO: YOUR CODE FOR SAGITTAL
SAGITTAL = ps[1]/st
plt.imshow(data[:,190,:], cmap='gray') #190
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 190")
plt.show()
imshape[0]//5
# TODO: YOUR CODE FOR CORONAL
CORONAL = st/ps[0]
plt.imshow(data[imshape[0]//5,:,:].T, cmap='gray')#90
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
# TODO: YOUR CODE FOR CORONAL
CORONAL = st/ps[0]
plt.imshow(data[50,:,:].T, cmap='gray') #50
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 50")
plt.show()
# TODO: YOUR CODE FOR CORONAL
CORONAL = st/ps[0]
plt.imshow(data[120,:,:].T, cmap='gray') #120
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 120")
plt.show()
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
# TODO: YOUR CODE
wc = file0.WindowCenter
ww = file0.WindowWidth
ri = file0.RescaleIntercept
print("WindowCenter: {}\nWindowWidth: {}\nRescaleIntercept: {}".format(wc,ww,ri))
# vmin = level - window/2
# vmax = level + window/2
vmin = wc-ww//2
vmax = wc+ww//2
print("vmin: {}\nvamx: {} ".format(vmin,vmax))
# TODO: YOUR CODE FOR AXIAL(vmin,vmax)
#110, 80,130
plt.imshow(data[:, :, imshape[2]//2]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 110")
plt.show()
plt.imshow(data[:, :, 130]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 130")
plt.show()
plt.imshow(data[:, :, 80]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 80")
plt.show()
# TODO: YOUR CODE FOR SAGITTAL(vmin,vmax)
plt.imshow(data[:,imshape[1]//3,:]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 170")
plt.show()
# TODO: YOUR CODE FOR SAGITTAL(vmin,vmax)
plt.imshow(data[:,190,:]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 190")
plt.show()
# TODO: YOUR CODE FOR SAGITTAL(vmin,vmax)
plt.imshow(data[:,150,:]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 150")
plt.show()
# TODO: YOUR CODE FOR CORONAL(vmin,vmax)
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
plt.imshow((data[50,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 50")
plt.show()
plt.imshow((data[120,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 120")
plt.show()
# 2) Play around with different Window/Level values that enhance
# the visualization.
# TODO: YOUR CODE
wc, ww = 50, 200
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 100, 200
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 200, 200
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 50, 150
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 50, 100
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 50, 50
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 50, 10
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
wc, ww = 50, 500
vmin = wc-ww//2
vmax = wc+ww//2
plt.imshow((data[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
###Output
_____no_output_____
###Markdown
Which values make sense and why? TODO: YOUR ANSWERBased on what type of tissue we want to test. The significantly wide window displaying all the CT numbers will result in different attenuations between soft tissues to become obscured. [Reference : radiopaedia.org](https://radiopaedia.org/articles/windowing-ct?lang=us) ![p3](https://github.com/Yiming-S/cs480student/blob/main/05/3.png?raw=true) **Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
wc = file0.WindowCenter
ww = file0.WindowWidth
ri = file0.RescaleIntercept
vmin = wc-ww//2
vmax = wc+ww//2
print("vmin: {}\nvamx: {} ".format(vmin,vmax))
# TODO: YOUR CODE TO SEGMENT FAT
new_mask = data.copy()
new_mask = new_mask .astype(np.int16)
new_mask[new_mask < -50] = 0
new_mask[new_mask > -300] = 0
AXIAL = ps[1]/ps[0]
plt.imshow(data[:, :, 110], cmap='gray')#110
plt.gca().set_aspect(AXIAL)
plt.title("AXIAL 110")
plt.show()
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
new_mask = data.copy()
new_mask = new_mask .astype(np.int16)
new_mask[new_mask < 50] = 0
new_mask[new_mask > 1500] = 0
plt.imshow(new_mask[:,150,:]+ri, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(SAGITTAL)
plt.title("SAGITTAL 150")
plt.show()
# TODO: YOUR CODE TO SEGMENT BONES
new_mask = data.copy()
new_mask = new_mask .astype(np.int16)
new_mask[new_mask < 700] = 0
new_mask[new_mask > 3400] = 0
plt.imshow((new_mask[imshape[0]//5,:,:] + ri).T, cmap='gray', vmin=vmin,vmax=vmax)
plt.gca().set_aspect(CORONAL)
plt.title("CORONAL 90")
plt.show()
# Are the segmentations good?
# TODO: YOUR ANSWER
# Yes, it works.
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Collecting pydicom
Downloading pydicom-2.3.0-py3-none-any.whl (2.0 MB)
[K |████████████████████████████████| 2.0 MB 5.4 MB/s
[?25hInstalling collected packages: pydicom
Successfully installed pydicom-2.3.0
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# TODO: Without loading the data, how many slices are there?
# TODO: YOUR_ANSWER
#220 slices are present.
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
###Code
###Output
_____no_output_____
###Markdown
![img.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB3IAAAOeCAYAAAD2iFGaAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEnQAABJ0Ad5mH3gAAP+lSURBVHhe7N0JuHZXXd7/HZBBJoFIQkLmGQgQMIyCggwFFBTrTBWtldpStXrRWvu3LbTWy17tVW1ttdJqrYJeDiAokyjKPBhIwhSGTAwhAxACiMqQwP/5rHPuNysP57x5p0De99xfWNl7r/Vbv/Vba+9zznrX/az9HHanO935iyuWL3zhC8stbnGLBa4PO+ywcb58cZUO++Iq77DlsOUWy7XXXjtscctb3nIcoQ74yHmOG578d+XH2cr3DWxW11re8Lpiw3RYJ465DtZ9zHY5zvaY8+b+3lSkjfV2XY84Vx3+wiqEW6yKDvvC6h6sslbWww6j/66TtbIbeat7sfr/4Aure3ML1ys/h20O4CprlA8T/9H06jgOqzi+qG1X4nDmWnyb2deP20Y7/IlvucWGPeYx/3KxPp55DpOHOa6tYlwvD/EV2OT5cK48Nq6TkFhyjXE2rg3o6rh5Ov4jP+ebI7pxr11uXIfEm1jXr3Mu1jlvO9ZtEvfct3mMYx+Sn7HZqk15h3loxsPNr2dulbfrobwe1VVNMxu+Ga0ZbrLh+/r+p99z+wcS46KNjE/aSl7GYUvUUb7ZQREn9vW6w68+pxsr4/RxZrt+Dt+bx5zvgp8Rw+p89XPsNrBwj+axTExznqOzOT8xzPF90e+H1f8PG/fuemafX9isq578w/R/da7KF8SlPGOiDf5Xxxt6vCFbxTy38SVjzO/EXGff4eP6+hvtJPyN3xOJB3O8aR/Jn0n5hs+NOus2NxjjzXPH2GYMkpfrdZ+rw0YvVv9xr2TIl4K/FStLZxzuapePsGEv7/qfmbQR/IwfdthmzCs7JZ6B2SZ+4ztlo87UP4hj3Q6zTXyl/2G2mdkuP6Q8fnfZrsJY5RiC1cnqP67ZKGOzSoeNsVnF7FLBqpy9A9jvYrTh9/Qtxvxg1fPhOsbjv6qv7Ny3G8QyjqvCXe5cr9pmJ45htnHM3MD/rq+wWbZhOPiS/q7YqLNBynjYiNvRcyjujfxVxuo/m1ej6uo/jqtLI+X3SHyOfq3ud0i7o40tYtkp+HkZfytXY+z37/iZXV36O7t6VDbS6j95djaOh63m7V+1+Zz4ubvh7wblYyw3xzb3YDDuzcZxY8K+Ud+90r72HPldRTYMU3/c09X/RohC5mvzfJRvxhhfmc+Oe+x/m8Z+VsY1b8PP6myj4VWeCqv/b/yB2yzf8L06DL8M1p+V0YaizXOlYyyRw8qHvFso194trtuwHeMqBj6+MMY29tryu8bYZnzzb6YN+416N2BV7fpp0/Vjgo34N31re3RWhv9vjO9WsB8xr9gY39W139sjZ1S/QSxbxiUwbcTPZvHKdPhXZyMYZcZl49zzqcg1m9hm3DC3i9jOR+Q8v7/X/wbEPil/E6S5PD5Sfu3q37TJR85vWC99cj61uzLxuI3fm+w379VgVHdnVraj3vX+Nn7ulKz+u3lfkesxf1Zr091G2xtlN/BjLFbX11238TwmHyPG1f/G/1d5rm9QdyRXm7Gsilxv5G2Ub+RvxDLbCFSunwd84QvXDTt1bvlVtxz5G/+uVLqqNw6rcRsRrfIPu26VsbK5PmfF9c+6e7txtqo2GjXXEFNi3aizMhttSrmfqZPzjX58KamnrTxTIXVDbGf/SB4fW62HrNeDa8x5OU8cqSMNc35Wh111XK+ScTaqq5KN/NVhY0RXfVLReG380lyhDpuVxera+S1uufo5WNmMn6VVui7n2lmVb/RJZT7Esrq/t1jdX+48+CtH8tyt1eiPes5HHzeKR550y1XfbvlVG78fr9tsQ9ueF674/8J1m/mrBq67Lvdwo+/5ncw39yOkkcOejcyNcv/eNFtaedjol+dkda79/Btj42dmZbfqY36vaU9so+7qefVcD69T+WjH/7Vv8cavxlWbtzhs1Y9VHYz4VpjbCtuYQrziy7MqNv0afVrljXquVVpdu4fSuHYcBhuMsUme01XePEbunfG9Tj82n4dbrO5d7NXPs7/r53fUXfV1dT7c+3lcXZuvbngW1kabm02tzjeOoz+bmaPuaG+jr3mmRhubFYbt6lSM2hgtKNfmMFj9f3WP1N94zjfrbPo2dhsXq2foloet7qfTVWyj7qqtTXttsx/PsraV8bOC769aPX+eBfd9Fe3Iy31kJz4hqrHpfdf9dM/0Mf928Yy759d+/tqRj82mBtrB6JN2Vr36/OeuHfmJRZzXbto5H331v1Ubo97qZ5bT8ZyOsUtUG/dfF8daYPJX/xk/V07dj2GgzPO2MT76KU7xa08az9LqqMyzPv4mqTvY9LPyeu3K9638DK+urr12NY5fvG75qtVzdpvb3mb4yN+38bOkznCzamNzfEZbm3/z/F4fjLY3xlmkt7rVrYbduE+rNO7Lqr751nUrv/GpKR4yvxn3cZXvZ9wYGMnV5apgY2ysn2pqPA9i9b+Vk9H8ynD8btocn/nnyThdt5o3yPM7ZcTkf6t6QQvakq+9VcsjuM2z4dPRPU3fRhyr88Nyj1fjaZzGs7vCOH1hs60R38jfuEdwlL7K77lV+bA1hp6b0c6G3zGHWZVbO/7C6mExTDxs3OOVT2Oq3D1aZXmm+XTOv78Bn//85/zwKhxt514IxflXjdhG71cR6vhGu/rkKJaNMd2IX//Vu97lRk1jPZ55obBb9ScxQT/lr4pXx43r8bO1ciJ+1/Asjedx9T95kjb1K/biz8/2+Du3yufYs7P6cRnjKUZx83Pd5zd+boe/zbbEIr6NnsPFiG5VvnKywu/jW26eKzI+/IyxkLfqm98hn7/288utb32b5da32ohR/Nde63ncGPrx92xl715ct4pfbXlsr/385zd8rtKIib17tHpu1Rmxrk42fj7dm1X9lS08Bq43nn+/I8S+MSb+bTbGcuRt3B/H+NiYKw83I95Vxjgf7a2OG3/zNn53pc8b92b1TDBf/e9WX7X6eV91cNTkU8SjTSPnalUmvtV9cL5x3zxXh208d6sYN+6nsVr9flr9/hg/u6v7d+11Gz+3X7UaC/11T/nd+FnaYOP35MbvcnXG/R/5YuZ7XK3Sxr8f5BtvfbzNbVb369a3Wj6vrVUeu1vf+tajT5/73GdXba1+PlfPM9vPj5+FjXumnXFvVm2Lxe/S8TtrZXfLYS+eDXv4W37dF/yd2YhbrKvild1G/Oxyn257m1uP4/iZUd+YD9/86JA52UZl7W08p+pv/Dzr6karq2dgVT7Ge5VhjMa9WLXj58j93rjPG7+n+HaNjeds43eKcvHl/mt39GuzLTbKrtsc99jwxM/Gz8zqfLMv2s/fzFutflb0zXgO/6ukPbGOZ29lw53z0bcV/j5g/H7a1SY747CBtkbfVkfPoDjSt1Xpynj1+2x1fe3wu/KxOvdcmLu5l+N33rgvG/1xzzHu+yqNucM0Rqv/jri1N/o5MZpd/UfuaOphD3vYDS1KKaWUUkoppZRSSimllFJKKaV8RRmi9sZpKaWUUkoppZRSSimllFJu7jz96U9fnvOc52xelXJocfLJJy/PfvazlzPOOGMzZ+9597vfvfz6r//68qpXvWozp5SDkwq5pZRSSimllFJKKaWUUspBRIXccijz3Oc+d3niE5+43OUud9nM2Xs+8IEPLK997WuX7//+79/MKeXg5PoXQJdSSimllFJKKaWUUkoppZTyFcRO3P0RcXH88ccv97znPTevSjl46Y7cUkoppZRSSimllFJKKeUgwo7cl73sZcuP/OgzljNOPWkzt5SDmy9+8YvLF77wheXUU09dvu7rvm4zd99561vfulx44YXLLW5xi+Www8hhpRx8VMgtpZRSSimllFJKKaWUUg4iCLnHHH/S8rrPPWB5xYcP38wt5WDni8vyhS8sb/mnt1ge8IAHbObtO+eee+5y9q98YVlu4eW0FXLLwUlfrVxKKaWUUkoppZRSSimlHGTYiVsRtxyq2Jm7v6mUQ4EKuaWUUkoppZRSSimllFJKKeVmw3XXXbffqZRDgQq5pZRSSimllFJKKaWUUkop5WbDVjts9zaVcihQIbeUUkoppZRSSimllFJKKaXcbNhqh+3eplIOBSrkllJKKaWUUkoppZRSSikHGe+58JLlcfe4evOqlEOLrXbY7m0q5VDgsFX64sZpKaWUUkoppZRSSimllFJu7jz96U9fXvayly0/8qPPWM449aTN3FIObr74xS8OAfbUU09dTj755M3cfefiiy9eLrzwwuUWt7jFcthh5LBSDj4q5JZSSimllFJKKaWUUkopBxGE3Oc85zmbV6UcWrzlLW9ZTjzxxM2rfefSSy9dzj777M2rUg5O+mrlUkoppZRSSimllFJKKaWUcrPBd9xee+21+3Us5VDglqv0rI3Tm5bb3OY2y13vetflqKOOWu52t7sthx9++Jek29/+9mPrvB8yx1JKKaWUUkoppZRSSinlpuZOd7rTctZZZy0//uM/vnzVV33V8td//dfL3/zN32yWbs1Xf/VXLw972MOWv//3//5Y3/74xz++fPazn/2yrG1/3dd93fLWt75186qUQ4tHPOIRyxFHHLHc6la3Gtf5mdqb46c//enlvPPOW/7oj/5o5JVysPJlebXyLW95y/FO84c+9KHLAx7wgHG9/j5y11dfffXyute9bjnnnHOWj33sY5slpZRSSimllFJKKaWUUsqBxZo0Mfa4444bIi5x9JhjjhmCrFe7vulNbxrfr2nj0Yzv2zzyyCOH2KSOc6LRueeeu5x//vnLJZdcsnziE5/YtL5p6KuVy6HMSSedtDzzmc8cutK+4mf3v/yX/zJ+Hks5mPmyCLk+wfSEJzxhefzjHz925X7gAx/YLLkefzRPP/305bWvfe3yx3/8x+OHrJRSSimllFJKKaWUUko50Nz2trdd7n73uy9nnHHGcq973Ws55ZRTxnU2IV1xxRXLO97xjiHmXnDBBctnPvOZ5Qtf+MJyu9vdbgi/D3nIQ8ampaOPPnrXrsGPfOQj4zs53/Oe9yzvfve7h4D0uc99btcuwQNJhdxSStkZbPtqZZ8q8kfLcX+SP3qEXK+YOO2005Z3vetdy0te8pJx9AcwybVPL/3d3/3d+GPnj576+xuD9ksppZRSSimllFJKKaUUa8Y2GxFwH/zgB4+3SDr/2q/92lEW7nCHOwy7r/marxkC7t/+7d+O3bt2CNqJS8i1E9faN6xDq8OPrxZUj7/Pf/7zQ8w90N/X2Vcrl1LKzuBLduT6g3PrW996fCeATxftLz5tRIz99m//9vFH7s/+7M/Gjlt/wNb5z//5P4/XVLzmNa9Z3vnOd45Y9leI9QfSdxn4ToP1V2CUUkoppZRSSimllFJK2RkQXYms97///YcQa72aOEtwtY6dtejsoHVtHftDH/rQ8od/+Icjj/gr3eY2txnX8/r1vPOWeHvllVcur371q5e3ve1ty2WXXTY2MR0ouiO3lFJ2Bl8i5BJvvRLCa5B9qmdfhdT5j5Zz3y3gD9ev/dqvbSvk/uIv/uL4A8re9xAQgLE/Yq5PSvne3ec///lbvtK5lFJKKaWUUkoppZRSyqHP4Ycfvnzf933f8qAHPWjsmJ3XsLcTZG0Ussb8J3/yJ2O92nfpevNk7G9s7dpu3je84Q3Ly1/+8uXtb3/7Zu7+UyG3lFJ2Bv7K3EDI9U7/H/mRHxl/kOxi9S7/+ZUSe4M/Utdde+1y3eqYTyn9zu/8zvKCF7xgSyH3v/23/7aceOKJ4xNOF1988fjDeOtb3Wr8MbxBkHuBVzoTkP1Re/Ob37yZW0oppZRSSimllFJKKWUn4btsv+EbvmF55CMfOb4X1zXRdisx1tsdr7766vG1gN4y+eEPf3isdx911FFDCLYh6Ygjjhhvt8RWfuTZifuKV7xiee1rXzs2Lx0oKuSWUsrO4EuE3OOPP375mZ/5mfHpJH9cXvnKV97op4rWYe/7Ae573/uOV1O49l0DXl3xm7/5m+M1FFsJub/yK78yPs1EyL3wwguXW63sP/jBD44vhb96H//IPeMZzxi7jH/jN35jvMailFJKKaWUUkoppZRSys7DOrV1byLuAx/4wPGK5dvf/va7NjJlHfyjH/3oWJ9+17veNRIR16uSCbM2K3k98ymnnLLc7373W04//fSxFk4UDuy8RpmP17/+9eNrBG02OpBf/Vcht5RSdgbeXfysjdMNvFLi4Q9/+BBa3/SmN43XEn/kIx/Z6+QPmj+G3/iN3zi+4J2Y6o/VW97ylrHb1qeX1vFKZ18gf+c733k54YQTxu7ct51//vLWc89dLrrooi3bubHk9dD+GHttRV+tXEoppZRSSimllFJKKTsXr0n+xCc+MZI18Nve9rYjEXO9ofK9733vcs4554xEgCXier1ycM7uYx/72Nix+8lPfnKXn+zOVXbeeeft+n5c17OPA4F177e+9a2bV6WUUg5VthVy/dEhnnq18r7gtcgEWd858KlPfWp51atetbz4xS8e/lz7VNI6f/M3fzN24BJeH/GIR4y83/u93xtx7OunlR71qEctd7rTnSrkllJKKaWUUkoppZRSSlk++9nPjtccX3HFFUNg9SbJfM2gdWwC6fvf//7l05/+9GaNL8UOXTt3+SDUura5ibh7/vnnL3/5l3+5vOMd7xjC8VabmvaXCrmlXI+3w/7CL/zC8sM//MNjY+Ff/dVfbZaUcvCzb19+uwf44+W7A/zwEGGvueaa8Yfl8ssv3/YPlz9sXqNM0PV65Z/6qZ8af/Q+85nPbFqUUkoppZRSSimllFJKKfsH4dVuW2vYvmKQgPvHf/zHQwDypsetvhpwK+zIta79ghe8YNT90z/90+XlL3/58p73vGf42GpDUynlwHKf+9xnvN7cBym8+ZWwW8qhwk0i5HoNhVcp+7J3Iu4d73jH5fGPf/zyxCc+cbd/uLxW+du+7dvG9+n6o0fE9amn/rErpZRSSimllFJKKaWUciCxjn3aaaeNN1Q+7nGPWx72sIeNtWxvm9xT+PCWS9+3q/4TnvCE5UEPetDYnVtK+fLgZ88HJy644ILx80jYLeVQYdtXK/v0wvve9749frWyL4L3OuZTTz11/KHyg+OHxWuSvabCF8AfffTR4/yyyy67gTirrj9s3/md3zm+U5cI7LUUXonsk1G+GN5xX3jkIx/ZVyuXUkoppZRSSimllFJKGXiV8t3vfvflm77pm8bmo5NOOmmsi1u/vve97z1svH5Z2t1rke9yl7ss97vf/YYI/OhHP3psbOLH8aijjhq7A61tH+jvx0VfrVzKBr5e8xu+4RuWSy+9dHn9618/duTSnLzevJRDgQMi5LK1mzafOnI87rjjxh9Er1h+85vfPP7g+YN4wgknLO9617vGdwPkDxjh1g/XD/7gD44duOeee+749ITt774k3jn7fXnFcoXcUkoppZRSSimllFJKKbDxKLtwpWOPPXasQduFa5368MMPH5uSrHfbfETMJcZmYxK7O9zhDss973nP5eu//uvH2yXvda97DfFWmaQN9a21WwNXXzqQVMgtZYMnPelJy8knnzxeke716IRdP8eE3auuumrTqpSDl/0Wcv1h8uXRBNwf+ZEfGZ80uuiii8Z3ChBwr7zyyvGdt3bh+sP3hMc/foiyH/zgB4doqz5x9//7//6/8Qkm3yHwR3/0R+N7BXzBvN29Ph3lDx1fe/vppQq5pZRSSimllFJKKaWUUqxP20Fr/fvss89ejjnmmLE+DTv4JJuTrHdHzCXayifoqk/4zYYmYqq1bcLt7MPrlr/6q796OfLII8fatK8ftNZtXfxAUSG3lA2e9rSnjY2EL3rRi4ZwS9Q988wzh5bkKzxLOdjZ7+/I9QftsY997PKP/tE/Wt74xjcuv/3bvz2+25YQ7A+eTzH5gfHaZX+sLrzoouWnfuqnloc85CFDNPaHziuVvXrCa5ePucc9xistCMK2wf/X//pfx5fL+3TTgx/84PGHsJRSSimllFJKKaWUUkrZGwixp5xyynLGGWeMde2tyM5bIi7R95u/+ZuXJz/5yUMcUtdXAz7lKU8Z69uEWsLvOhFzfd/u6aefPr6OkL9SyoHlqU996tCSvOXVZj68+tWvHq8193NeyqHAfu3I9Yfo8Y9//Pijxc4OXK9B9gfxzne+8/gD5cvdibnOJZ9O8gdOOTuvnfiO7/iO5fLLLx9Cr08o+bSS1yiL4VOf+tRIPvnkj56dvHvzvQLdkVtKKaWUUkoppZRSSinlc5/73Hh7pHVpr0J2zC7a4DxiLjGWjfXsT3ziE2M37llnnTXeIGkn73abjtSX7OK1a9YrX6217+77dveW7sgtZVm+67u+a3y3tQ9afO/3fu9INgrSpGwuJOi+//3v37Qu5eBkn4Vcf6R8gbTvtvUH6U1vetP4I6j+8ccfP75nwCeUvJ6CMGt3rjJ89KMfXe5xj3vsEnf9UGmHCMyvP5AEX5+KIsJ+8pOfHK+f8APpk0vE3D19DcVNIeT6lJVYfBG+T2MRoP1CIDZ7rcb3fd/3je/3JTZ7HXQppZRSSimllFJKKaWUrzy+7o+w86EPfWisNVubtt47C7jOJWvS3hb5B3/wB2PN2oYj9XynrrXuvJY59YL1cna+QvCVr3zlaI+vA0mF3LLTocH8vb/394Y28/3f//3L7/7u7+5Kfl69XpmO1Ncrl4OdfXq1sj9QPrH0iEc8YvxA+IPhE0lek0yg9ekHX/bu/JprrhmCrB24+ePn+3L9AJ2ysjvuuOOGwOqPG0H0Yx/72Nhx6/tyicD+IPHjtcuXXHLJ2P1LRPVJqK8U/lDf+973Xn7iJ35i+Tf/5t8M8dYfe3+8ids//dM/Pb4v+D73uc9mjVJKKaWUUkoppZRSSilfaWwmstZs44+vCXzJS16yvPe97x0CbwRcWKuW7y2UduI+8IEPHK9Vtvb9zne+c3y9oLVvpA7f1rr//M//fHnuc587vorQRh87gdfF3lLK/kF/ocm85z3v2cy5nne84x1jg6CNiATfUg5m9knIJWT6zloCrd2xF1544fj0ku+w9SmHfIE7kdcfKKKrXbnETskfPsLvRRddtHxw9YfNa5QJtyDo+gG74oorxlF9O13l+4H0x9Arlrf7DoN9gTBtB7Bdw0TipLwCWn9n2J544olDVF4v83oMX6jtE1fivzlhQuF1At4ZP/dzPXk1yOGHHz7uo3vnPpZSSimllFJKKaWUUsqhgDVcu2utN/s+zVe84hXLG97whrFz1nq0XbgXXHDBWPsmFBGMbEiykcmbKG1AsiGJoEu4JdS6Puecc4Yvu3AJSXYKHuiduKWUDR72sIcNvcjP4To+qGFDYX5+SzmY2WuFjhjodROPe9zjhljpj50/VEQ/X/5OYPXpJX/k/KEi8BJtvXaZIOsPntcp+6Lpr13Z3nFVj3hoh68/gMpdq0d09IfOtV25xN0XvehFQyz2R/NAoD/a/u7v/u7lN37jN8Yf2yRt/dt/+2/HzuM9wff8/tqv/dp4B/sP//APjz/aNyeI6HYSv+51r7tBP9eTT4zpx4/92I8NcT4ieymllFJKKaWUUkoppRwqEHSJPYQgwqsduHbaEoF8PaA1byKQjTE2vNgQZN3aDj95xOC5jrVyR2+WtDGplHLT8KhHPWpszPMBir/8y7/czL0h2alLiyrlYGavvyPXzlrfgesH5fWvf/34lBLB1Q8NgdYfNztZCbp8EG/lEUsl/u1i9bIJn25iI4+NfN8r6w+oP4xev+wVFgRI5/nuAjth/ZH0Wgq7eXfH7r4jV9tE4p/92Z8dr8bwB/Z5z3ve+M6DF7/4xeMVG/e6173GeCCvgNYnr47+lm/5lpFP8PXHnqAtEbLFe3P7tJVxJIgTaE04/CL7/d///eU5z3nO6K/kk2bG1ETEq0LsvD7ppJPGJ1suvfTSTU+llFJKKaWUUkoppZRyaGAt21skrX3bBGON2rpwXt2aVyfDuXVla+DsrVe/5S1vGevG1lxtbpJu6lcp9ztyy06GVvSHf/iHy0tf+tLNnC/FhzN+7/d+b1uht5SDhb3ekUsU9T0A3v9vh6w/VkRNO2SJq/Dq4Tve8Y7D1h86Aq5dnf7I+STT2972tuUVf/Znyx//8R8P8dCrJnxfALIbN98bQNRVjy9isXbt9uWfKLk/aOfJT37y+EJsfl/2spctL3jBC5Y/+ZM/Gcm5uAjMP/RDPzSEav09VCCE+6Lv9Ffyi+23fuu3hphNxPdplW//9m9f/sE/+AfLYx/72BtMWkoppZRSSimllFJKKeVgxxq0r5t7zGMes3z/93//8n3f933jrZA2G20nyFonJf5aM/1H/+gfjTreOGkzzU0t4pZSStk57PWOXH+MvGrYlnWvnfBdsQ960IPGK5H98bI7F+r7flliKWFXGbHUJyXUtfvVJyKIul4/QRT2SgqffvIaZp9k8t2s2dVql6gdskRcf0CJuv7A3tgu0d3tyLWz+Kd+6qfG9xo8//nPX17+8pfv+oJ6+OQU4Vlf+PGKDK9P1petduR6tYadrPwRmdnZUTwjdpMCn+bSX0Kp7/zNJ758ckuf1//YG0dj7ZNg6qkjfmPM1i7gG0N82v7BH/zBXffXp7Z8x3Hgx9grc0+0aUeuo2fDOLo/PpXG19lnnz364r4TvPXftYkOAd99Yx8I8ER/r3hOPyS2YrLzl4gfPAPKvK6anXvORr+9yltbxtD42SmtPeWdLJVSSimllFJKKaWUUvaEv/u7v1uuuuqqsfHFmqf1bOud1iaDtV5k3dG1ZC3Td+q+9rWvHevL1qutT97UdEduKaXsDPZayCXqPfrRj17e/OY3D+GMiEuU80eNSOlI0LMDlyjrj57kDxph1CuHfQm1Ov4YEk4/+clP7hLm2H34wx8eAqjv4nWtjj+kRGB/FAmdBEOi3ixCbsXuhFyitO+yJZISlL1q2Pf5ziIg8Tg7j/0hF5vyrYRcfbC79xnPeMYQu+1Qnr9o27gQRH1K65/8k3+y/MAP/MDyrd/6rct3fdd3je8czjvdxZDvUDAZMH5ec/z3//7fH58IS70nPOEJY5cwUTWvfd4deyLkBuN+8cUXj0+Q5Uv8CarG3jPh/ujHf/2v/3W8qlmc7rf+E8eNAZGVSH/RRReNtj0vZ5111vKd3/mdY4ezvhhD1/e///3HfTJm+h8x2zP1Dd/wDWP39vd+7/cOYd0zY/yN4T/8h/9weepTnzo+LUc893yw+XJMlkoppZRSSimllFJKKQc/1letQVtXtHnJ2qe1Z+vX1oUj4gbrlsRfm47OPffc8TpmR5uAbmyN9kBRIbeUUnYGe/VqZWIcYY0gaeemP3Cu/UEjvBH/7LgkBhMKs7tU8kkkQqjXMhNO7SYlXhLiiHpe3Qu7QYl5/PqD6Y8iAZjYSFj0R/Pwww/ftXtzf/DHltAo7ic96UnLt33btw0xdR1/kP/Tf/pPy0te8pIhKG8HwdWOWbtUCdnr2FX6r//1vx6v2fjzP//zIT76bt5/9+/+3ejz93zP9yy/+Iu/OPKMKfj87u/+7vE9vnY9/9Iv/dL4jmCvhH7DG94w2uLz2c9+9k3y2mfvmCdy2+lKqH7KU57yJX1zT4i9uZfO5RF+ifEg8ir71V/91fFBAa9w1k99+Zmf+ZkhmBOo//t//+9jfIi6W0Gs9b297tM//sf/ePj6+Z//+VHf/fvP//k/D5H3UHoFdimllFJKKaWUUkop5abFWrN16PPOO2951ateNdZvrb/apGL9OJt/rHkTe31lHRtfG+g7cuWxK6WUUg4keyXk2m1LzM0njgi0BDuiGaGO0OfcLk7CHwHUp4J+//d/f/xBI4ISJomw/KiTV+zKI8bxG//+KNqd6dW8/ghqjzBoZycRWRv7g3h+4zd+Y+yytTuXEElItcOTGB20a9ewT2VpdzuMj34QofUvyCNaEqt9Cb5XbPgOWjuMfUrLH3rjRLz0mmC7iE855ZQxVl5R/CM/8iNjFypR9TWvec0Qu+30/eVf/uVxzafdsXbsGlMisB3B//t//++R7IIVw75g8iJOY8WvHdm+xzgCbSAom9T8m3/zb8Z36hKafTeEyQ4B2g7iH/3RHx0+XvjCF45dzPpuguO7eX/zN39zvLqaEG6nLWHdrut1jInnyvMkNrulicJEdh8usEPZePnAQCmllFJKKaWUUkoppewt1l/f9KY3jXXcV7ziFeNtj9ZHvQ3QxiBC74tf/OJx9NbD+avlSimllAPJXgm5xEC7cSPU2hVpBy4xl3ipnPhqJ60/al5VbDcnsZHoxp5ASvBkR3gjUHqdsD+ExFJ+Cabs8ypjYi7fBES7eYm6drASS+3Q3FfE+frXv3759V//9SGk2un7zd/8zcvTn/705ZnPfObYfepVyPuLMSOyet0FEdJrgomjBGLYrax9/Uz/9dFrkL2qmJjr+4TPOeec8XoPQjcbr+swidAP3xFLQCXqGhdjZQewZNxnYXlvECPBlWDqvhFwtUFQnXGfTGqIqoRZySfS1PNaZkKu77j1PLzxjW8ckyHoC/+eEffCGOiv8bKzdx3Phe9XNo7Ba6WJ4V75LC5jRtDtrtxSSimllFJKKaWUUsq+YJ3WuqM1zl/7tV8br0/2PbgveMELxkYVb5CsgFv2BXrK/qZSys5hr4Vc4phdlXbM2v3pNbt2qRITiX6EOQItcdYfMiImkdeOV7s5I/wSIn16yXftEuDY+/QSwdK1P5KEXEIv0dduX7s17bQkuIJoZ+eqa+Ly3iJe7doNSsx92cteNgRC7fzET/zE8s/+2T8b39/qe4AjQO8tBFR1vUZanETbCy64YLN0A+IkIfL5z3/+SERNAqgdqURJ42fcfI+snapJygi4+uG++O7hjA1x9C/+4i9G0sfd7SS+MdxP9zfYOZxXP4fsKhb3DDv37MEPfvD4Xgn3e90GhP93vetd494bM69P9vrt9Z3EBH/jtQ6fnh917QA2Ltu9nrmUUkoppZRSSimllFL2BGuv1iR/67d+a/l//+//jVcvZx28lL2FNuKNo/QO2or1fBuxaC1JvrJQyrVyySY3Sd31N2aWUg5d9vo7cglrjvleACKjXz52ihILiWe+T0Dy2mTfW2qn6P3ud7/xCyp/4IizXqXrFRV2YfLn1cHPfe5zh6DqE03+IPqF5BcaUdMuS6KqXaZ2yhJy/SLTjvN9+SSKeAidz3ve85Z/+2//7Xi1sldi2B380Ic+dHx/68/93M+NVyPvi5hLyCYqit9YbYW+Ezi9Elgi5BLB/ULWNyK178n1Pb3/63/9rxskO1eJpSYTxksbBFOfDos/47w/3ydMJJ7/MLjP80TFOWF/qzb8YclOanZ24hKGt4KY6767j/4wGbM9FWPz4YFg7DxvpZRSSimllFJKKaWUUspXChu06CTf8z3fs/zgD/7gSD/0Qz80vprQ20F9JaGvG/yn//SfLs94xjPGcf2cjWS9/4d/+IdHUt/XOX7Hd3zH2BS1L/pIKeXmz16pksRXr5QgHNohSjzzimPiptcAE1V95ynhkLBrZySB1S8qr+NVn9gI116dS6wjdtrxSbzk05EdAZEISLQl5hI8iXOESvmEVTtPCcuEP2X7sms2EBm9GtgvyH/5L//l+N5Vsfi+WTt2s6t2b9B/AvTe7hgmyBIx9ckv4Gc/+9lDwCYo7y4RgY3TgYQoKgWCq3HZE9xfzwb0Q3/Wd9kGcc9+3X/Pxp7g+ZmF3FJKKaWUUkoppZRSSinly4G1b3qFrxk8++yzh6bwhCc8YbxZ84lPfOLy+Mc/fiQaw6Me9ajlG77hG4aNN1NK3rZpY9lDHvKQcZT38Ic/fJeN8jnvkY985PLYxz52+PPmTu34ekNH7bD1VY/789WUpZSbB3ulehJxCa12ZBJpP/7xjw8hlfgmj1jptbcEOKIucY1ol+9z/au/+qsh1tmZSXwlBqsnn4BKIIwQy6d6fPkFSAz0amaCrny2Xr0siYFASDTdHyGXeKxdIvQrX/nK5ed//ufHa5f1iSDpUzM+2bI36Gv6TJT0mmGvbt4K40fc/oEf+IHxi9Yv//THjlYCtr5uldwL90ScB/K1CsbfL3vjT4i/+OKLh1i/p0Iuu9nWc8HnVhgn99ZYOfre3a1eo7wVxm5+3bNd3tvt/C2llFJKKaWUUkoppZRS9gd6hA1mNp1FwCWqeovm937v9+7acUu0pQtYZ2dPF7FGnrefwpo4rUSiA0CZdW8pu22zDk4D4CftE4vt1tXmP/yH/3C0702pBF96hPi0b/PYdhutSik3T/ZK9fRLxA5c4ppfSgTPd77znUNIJPL5nlLfMesXgV8KfhkRR712lwjMRgKRl9jmqJxw5xcOsZZYSpQk2vLlF1eEX7+wCIN2z77//e8f8cgnkirzC29P8AuPOPkt3/It4xepd84HPgjFvu/gT//0T8cuV4Lqfe5znyGo7g1i9bpgoqL4zjzzzPGpma0wXuKJWGxsvDJZ/423X/Zb4Re37yD2agZCbn6pHwjE65c8Md1Y26VMPM/O6hvDOBJ+9UVcvtN43t07414bA+Pvu4T13XOzJ/gDZJe3ul6LLUbxllJKKaWUUkoppZRSSikHAmvctApr2fe85z2XRz/60WNd/l//63+9/NiP/dgQVO2E9ZZO6+AEV1qEdWtfTWi9mxYSzWSrREOx6YkeE81DXfWS6A455msP2dEKaBhie/CDH7w87WlPW37hF35h+dmf/dnlqU996tAZiL/i359NcaWULx97/ZNKJLMr02t88fa3v318zy3hTT6R1y8XwhqB1S8ev0C8kpgw65eKX0Lvete7Rl1ind20fql4FQBRlW+vYybqesWwX1yEwGuuuWaIxtogIhPq/MLhO7/48mmVG0OMdof6VIpPpxAY8+mXGe0Rq+EXp77sDX6ZXnLJJcv73ve+Ieb6Be51B/MvTO3qg377xI6dqOwJ43Yr+yXsO4a9MsF35kbQhnoPeMADxusS9EF8Ecv3B3ERhZ/85CePNo3vO97xjuUP//APx+7fPYVYr//EbH8YxEqQds/XcT/0gUj8+te/fvR/HX/4ttrRS5Q3tupecMEFYwzFXEoppZRSSimllFJKKaXsL9amaRbW4p/5zGeO77n1KmNv17RRzA5doi1oCdnYZp06ybU1bDoGG8k1zURynmv2NAUag+vUiQ3dID6cs0l7jvKtyYvLWj99Qcz/4l/8i/F9uwTnA/l2z1LKTcNeC7nEVEKZXaM+1WHn4wte8ILlZS972RDPCHTEV58EyY5b4iLxTb5fOvJf+tKXLq973evGzlq/UHxy5Zu/+ZvHblV+jz322CHOERT90rGz893vfvfy1re+dezE5Z9YzEYb/BB291RozSdY2PtFe9/73nf8sp2/k1W88uwU9kvwnHPOGQLy3iL+F7/4xcsHPvCBIcJ6B/6///f/fhyJmsaSgP3jP/7jo00it/5Iz3/+84dwbbepT/b8xE/8xBB87cAVl1cj/LN/9s/GKxNe+MIXDkHdL3HjLXaJzwi/NwZR2S9v/u0O9ikd8RHdf+d3fmfcs719ZTGB/8///M/HPSNIf+M3fuO4xxlrsRkXu3/1zXfd/vEf//EYr3V8X7J45vukLn/q+6CBZ4sAX0oppZRSSimllFJKKaXsK9aubciyDm9DmDV6331LBPU1icqsp7ObxVXXeSUyDYIIG+2CuCpZi1eOlDlK8tXhc/YnqZf6UvKSH+Z2ldFuaArW0mki3/md3zmEXZpMtJhSys0PW1CftXG6gVfo+sH1Q2tXKPF0xi8N+NQJMc3uTIKbnbUEOEKbT3gQH+2ozCdH7Ja1rV8ZMfgNb3jDqENw9AuPoOm1zH/2Z382REO/oAiA733ve5eLLrpovJ6XiKoNAiwxkUBJ4PTLMa93XscvWIIvn+vCID922xJU7ZAVu3i1C18s/l3f9V1DZCYS/4f/8B+GyKofftkROvGiF71o+NHOgx70oOWss84asb75zW8e+fpC/CaOemW0mB3tzP3+7//+8c56O3GNrTbEmk/OGENjRrz0umrvs/el5ep4v77v7fWL+nnPe97yu7/7u+N+iI/v//k//+f446Jtgjv8Qv7BH/zBXfeXMG63bPDJnMc85jHjNRD8E4L/4i/+YnnOc54zBHvjE4yBsfHHys5hr6J2T9dxv4nS7rX7rB6/2a3rDwgh2ncD+6Pycz/3c6NN5f7AqKMf8IeGvX4SrfGMZzxj9MmOb/V+8Rd/cbTnj1QppZRSSimllFJKKaUcahASre2Wmwbr0l5R7M2hEuHTuj8NwZslaSNE0+ySTXIt3xp9NlhFTHUeAdbR9Zyfsgi2rqE8NrNt7CT1Zr+xWUcZe+KzfthQp5/e/OlV0PpAA3Aspdw82Gsh1y8dP+xegUtUJF76heSHPb+siJ4EPWKaHZJs1OHTLzgCJ7HULwv1lDknBhJqxaAO4ZVAy4fdnMTbfIpEnfwydCQc+wWjbGZ3Qi5bPgmdxFYCI0E1r1smVqvrNb/Ewbe97W1D/CWs7o2QC7ERqgmqYveLUd/VIXT+5V/+5fI//sf/WN7ylrfs2sUsEXQJlnYkOzc2BF27UOURxP/gD/5g+ZM/+ZNxrU/G2e5Ur0UWq93S7gV/s5DrF7NY2emv5FM4D3vYw4bo6t7/6q/+6vL7v//7y7nnnjuE1Zk9FXIzzgRj99cfEK+gIKB79YRPMRF2TTx++7d/e4ixhFrPkj8qs5DrNd7GxzNqt7B8u3zdf9/f+7//9/8e40XcL6WUUkoppZRSSimllEORCrk3HdbtrcFbY3/KU54yxtomLWvt1rajUWS3rGPyYE2bDiLRAiKKOp+TOuBzFnAjyCa5Zg/H2DrGNsRuztOO67ndtE1fsXnK1xbafEY3AL/6M2/sKqV8ZfBTfYN3Efth/Vf/6l8NIY84aBfmOkRXAuk//+f/fOyOtEvVrtn53euEO8KfX1Z2u4Joefrppw+hjrBIgPWLQT3CJNHTrkq7Qv2CIPwRTtn5RcO3X4p+iRDq/CJRRhQlOhIJ2cw861nPGr90CYSvec1rNnO/FN8DSxC089UvL4iBkOy7Yec/ivp8xhln7BJyvdLYGOjDdkJu8AmX+9znPstpp502xkO/CLDsiKHizy/RGXGpQ0C3oxd26xojgrsdy8GYGudv+qZvGtde60xE5ncWcrfDmIvJq7LPP//8ITQb93UIudqIkEvs3UrIDf5IEHCNnU8uqReIr8ZZfwi1wfPju5O9ahm/9Eu/NNohQrvvIPrqv93b7kMppZRSSimllFJKKaUcyniborcolgNHxE5vxLQDl3hrDV8ezSNpff0+AmmEXGvaNBTahXV1OoOj8iSok+O68CrJp4XMeUg91/HlmDpIXwI9QDzxx15c6z7FSduw+e3Vr371WPePEF1K+crgp3OvhVy/BIiWP/3TPz1ENj/0hE7iql8ExEg/7HbAEuXstrSrlkj70Ic+dOxs9T23fgH4BeKXA+GXX+37RUektAtXOeHUJ0y8npmdNoiNBEY2BEFCpet9FXLLzZN1IfdnfuZnxr0kMpdSSimllFJKKaWUUspOpELugYVgawOWr1u0oYqmYW0adAwaxrroGVzTNugkzmkZdA156kT8TX3M5zPyI8qqn5R859pIfceUaSd1I9gmNjHRfCTn7Gywi8iclLrKvF2UlkMjovNU0C3lK8OXviR9D/BDTpi1E9UOSlvv7TS1I9aOT7tSH/GIR4xfenaG+gVg16TX37JxbQevXbR25uZVzHz4pUKkY08s9iphnxSBX35ea+AVBnZj+uVKuLV71C+W/iIppZRSSimllFJKKaWUUsqeQI8g3NqF+8QnPnG8RtnGMCIuUZOWEd2BKErknBNRVOLHtaN66qSu69SPXWxz3CqF1J/ha93fHAvUo+XMIq8U1Ae7pMSs//QaO5Of9rSnja9jzBsySylfXvZJyA0+jfGqV71q7J71vni/BLxOmfDqh9yrBwi7duL6pIdyoi17AqxfFPKJtX5JqOtVxj7dQcS1A5dASzS221YiBvPBhg/iLmHXqwr8kiqllFJKKaWUUkoppZRSStkO2gTdwtck+vrAb/zGbxxfvyiPKErLWBdiI5pGaJ1TRNVZHM1RXsrntC7AztdpJ2xVX1r3QS+hz6z7EIc3qtJUaC6OriX9TJzs9Vniw85kY/O4xz1uvG315JNPHm9QLaV8+dgvIdcPu1cqv+lNbxpCqp25vqv07W9/+67vlX3d6143vreUSOuXn1cj233rl4JfBH5REGa9KtkRflmw459g6/XJRF3b/CW/aOzCJezyAfX5KqWUUkoppZRSSimllFJK2QqaAsH2fve73/LkJz95eeQjHzm0jdvc5jZD0CRsStm9GsE0omiEWSnMebOd8xzjY/16ThFlCbKzvZQyx6S5njoRcnMee/WJs3QWWsqsudBVlKUN/iDPGBgXY/X4xz9+ecxjHrOcccYZIy92pZSbFi9Kf9bG6QZ2xz784Q8fP+Tve9/7xuuPdwdRle1JJ500hFdCLvHVq5F9Ry5xNr/0/AKxJV8+Wz/o8tmw90kOvyyU+SUCx/zC8EvUuV8s7JX5hcGeUKyddfwS9gpm4rJ2y8GH+24nti9XtwPc67jd81JKKaWUUkoppZRSStmJeAWwjVRl76A/EHG9KvhJT3rS+GpIusS6gOt6Fkkd1ZXWSV7KpdivH+e0HVvZOOdjKz/rKXaS2GdxNv2cNRfHnKtP/I09W2vxyn3tpR25RG9fnZmvu1SvlHLTsd9CLjHVD7FXIUtejWw3rUSgJaISW4mv7PzSsCPXLwO/FHz6gyjrl4G22ambX5iu88skv2C0Kcnnzy+M/NJYp0LuwY37TsQl4Eof/OAHK+KWUkoppZRSSimllFJ2NBVy943jjjtuvCr4sY997A2+Cze7Uq1HI+JnBE1EJI1gOqfkz+Wpn+sbYyu7+En+nvhBfKX+uo+kuZ+uI+YmD8Yl48M+r6T23cI22H30ox8ddqWUm4YDsved0HbOOefset0xAdfWfCIt8dUPuS368pQ79+kN349LkGXvF6ZfBBGBI87CLw/infqOygnNBGL1kV84pZRSSimllFJKKaWUUkopM6eeeuoQcb/+679+iLjeIEq4jXg7C5xJyQ+0irBuK0UApVe43lP4TdqOvfEX5tgkG+zmJM71PHbiyLgkXx6tR/+M3/3vf//lCU94wvKABzxg2JVSbhoOiPJJTL3yyivHbtyPfexjY4ctcdcnMd7znvcMkdcrl9kRcq+44oqRb2cuUdcvAL8cCLTZaSsRbV1LfkH4xSFPHYnQS9wlFpdSSimllFJKKaWUUkoppcwQHo8//vjloQ996PLgBz94nBMmbSzLLtyIr460CtAoQvLWkZ8668ft6mD2jWgi68TPjfnbHfGrfvoZcXarpBzqqSMv37frOuPmDasPfOADl8c97nHLQx7ykOVud7vbsCmlHFgOiJDrFx2x1c5aiYhLsCW++sG93e1uNz7d4prwesQRR4w81xFuCbRSXp8syWfjHPlFw2eEXGWx2eoXXSmllFJKKaWUUkoppZRSdh60hMMPP3x5xCMeMcRGr1aWN2sQ0R0kyJu1BuU5rp9vlbA7rSL+d2czE797aj8z10lsjtFZpAi7jhGz5/PY0ngk5zQhvu985zsPgfz7vu/7lrPPPnu8iVV5KeXAcUCEXD+wfnC9Spm4+td//dcjEVuJrBFoXX/yk58cduyVyVPfOZvYEXjjW55yiX/l6ztz2cy/lEoppZRSSimllFJKKaWUsnMhNBJxH/nIRw4R185S2sS8eSyCZXSOJMhfJ8JqhFGsX++OrXQMdbW1O597q39sZy9/Lks7EW/nRJRNXK6NHzHXkQ/j6CswTz755OW7vuu7xmuWjXkp5cBxQITc/DIh0BJgCavOiayOdunKU+b1ycTcCLL5hZhPwEgEW7YRevlQjzjsy7MdZyFXUm9vf5GVUkoppZRSSimllFJKKeXQ4+ijj14e/vCHL495zGPGW0IJkdEg4DpCbbSFiJoRL6N95Hy+Brt1ARbr11hvA3Pe7thK+9idHqIs7WzlW7lxoM/MMehLRNx5DNLP5DmqS6eh39By7nKXuyzf8i3fMl5fbRd0KeXAsM9Cbn6AkU9iEF+Jqn545124cFTul8Id7nCH8UOuPL8o8osjyQ8+wZYI7Dt3fd+u79917fXMfOUXbimllFJKKaWUUkoppZRSCo488sixO/RhD3vYcuyxx45dpLQImgItYhYlMYuV9I7oH0mxkWI3198T1v0kbUXE1d2xXV3srix6jPGIPjPHk5T+Jc11k4xnNCHXxxxzzPKoRz1qjDtht5Sy/+yzkJsfVPgh9suN+JrXIPsUht2zvitXnh9oP8zZrStv/gWgPLtulbMl2Er85RdKKaWUUkoppZRSSimllFLKdpx++unjO1tPOeWU5ba3ve0NRNzoGetCZfLnMsyi7XrC3ugWqbM7or3MfndXb7bb01jYrfdjZi6bbdbjCsaX5uNVzKeeeury9V//9WP873SnO43xLKXsO/u1IzfH/HLLd9UmzzG/IAmz2Z07v3p5/qFnG8FWms9LKaWUUkoppZRSSimllFK2g+DqNcr3v//9lzPOOGO5/e1vP3QGm8XoFIieMYu10SAi2s5ljnOa2Vvt4sZ8RA9Zt7kxUm9OW5H8xDGnua7rjIE0M9sQbiXjKS9i7mmnnbY87nGPG/fAd+iWUvad/f6O3Hy5tV+GdtN65THRNolY65dkztn5hZldun64SymllFJKKaWUUkoppZRS9gdaxfd8z/eM1yrbDUp/yBs/QZzMJrRcS+yysQwRMJXN1zMpX8+fif8kzPXW9RF5Scrm8tRfhy3YzvW3Yqty9TI+SJljRNo5b70fbOhEro01Hcj1CSecsDzpSU9avuZrvmbYl1L2jf0Wcv2gEmR9j+3VV189Xo9st61XItt5GyGXcBvxtpRSSimllFJKKaWUUkop5UBBMHzgAx+4nHzyyUPEBU1iFnGliJIgYmbjWfSLWdAF+7mO85Q7zrbr9WZm25zPeZjP51jn/CAvfVuPcSv7G0OdrerNYm1E3Xks5ySejCVR3autv/Vbv3Xck1LKvrGlkOsHDrv7YU8ZoZaAe+mlly5XXnllv9O2lFJKKaWUUkoppZRSSilfVu5yl7ss3/iN37gceeSRQ3SkUUj0juwsJT4iouUsPGZDGqKRILaIYImt9I+5HlI3SXvE4lkwTsJ6/e1IHfbr9XO+t8z1+Eks8iPcGsOMZVi3NYbZ4Of7ic8666zxquXuzC1l39hSyPUDmV9gN0Z+8ezrL4ebGnFJ+UVSSimllFJKKaWUUkoppZRDA2v/Rx999PJ1X/d1y73uda/lDne4w9A4olkoJzxmN2lQHlGVFkLrmImt43q9mblsnVmsDdFeCJ3aRmxit34e5vz1dncXx3bMvm8M/mdB11FeUsqgX+nf3e52tyHmnnrqqaOslLJ3+Mm+wU/q8ccfv/zkT/7k+ITEJZdcsrzlLW+5wacrDjae/OQnj+/u/dVf/dXl9a9//WZuKaWUUkoppZRSSimllHJw8vSnP315znOes3m1s7nNbW6zPPKRj1ye8pSnLMcee+zYjUtAjIga4RERLh0j4OYNo+y8Dnj+XtgIlI6z6Ol6Pm5H/M6st5nvl133leu0y1YdzO0rX6+7J+xrvWD8kjKeGVNxpn925X7mM59ZXv3qVy+/93u/N76ms5Sy5/gpvYGQe9e73nV51KMetTzkIQ9ZvvZrv3b5xCc+sV8/zF8pxCzd/va3Xy644ILlhS984XLxxRdvlpZSSimllFJKKaWUUkopBycVcq/niCOOWJ74xCeOTV15fW9ERRrBLKQSHAmMjsRUrwBmx4aAS8jNblOJ3Vx/negQM+pAvrZmEVlaF3K1u1Ub677ZqIOUrV8fKOIrfdkO7c/ibfqYPInQbgyuuuqq5bWvfe3y3Oc+d7N2KWVP8NN4g59En/7wLnmvIbjf/e43fsjyy+Bgwy+HD33oQ8vb3/72Ieb6/t5SSimllFJKKaWUUkop5WCmQu71fOu3fuvymMc8Zjn55JOHEAuaRnQNAihhksCYfOdEXEKjczb0BPrILOTuqTgan7OgmjZD2o5t2pCCfOXxI21lE2KTc2Vz+e5gl7pB25jbQ3yHlKc/Ec7TPqH6s5/97MijMdmVa7zPP//85T/+x/+4awxKKTfOlwi5fgDtYvWKZa8h8EvrYP2B0pdrrrlmueyyy5Yrrrhi/OIopZRSSimllFJKKaWUUg5mKuRurP/7/tWnPvWpy0Mf+tCxG5fgGDGSruFaonNkl6gjHGMbu1nITV6ITpK8XDvyM9s652PWVpwnxfe6L34kdWdhGbGNDdhh9p3rvWWuk7bCHG/K0l7imcc2Qq68O93pTuN7i9X78Ic/vLz0pS9d/vIv/3L5yEc+MspLKbvHT9ze/0SXUkoppZRSSimllFJKKeUrQoXcjbeL+prIJzzhCcupp546dn5GXJyJKEo0JDASG4mKsXOMQJnXHEcgXbdDbF2v+wizTc4x+wizH4Koc+2LJa8ljg8pgml8p37qIscQ2+2Ij/R7HXWVJYbYw1HbGVvXYrQDV743wEbI/fSnPz023f3Wb/3Wcu655/b7ckvZA7b+qSyllFJKKaWUUkoppZRSSrkZQty00/PBD37wco973GOIuhEXIzYmRZhNUjdHiWCaY2xm4jfM5/Gv/tyW85TP5ymf7ZLvOMcjhZTN5eog8c1pne3ykTJt7I7YSQTaWThOfHOfEisIvPleYPftrLPOWu5+97vv6kMpZXv6U1JKKaWUUkoppZRSSimllIOGO97xjst973vf8Wplu1YRYXEWGImJRN4k36E7HwmNSWylCJHgC8mbyxwJkUnr5ZjzE9M6KY+fxKDtvKJYcp5XEbNhy0YilM793go+A7vd2W6HOtrKztu0mfjFBecZ08985jPjKzCvvvrq5VOf+tSoc+aZZ46v97zd7W437Esp21Mht5RSSimllFJKKaWUUkopBw1EWCIuQZdgGGFSChEXlRNuCb7qRSidie2MvK3s5ry5TfVT7jz+YrNdvXWS70j0zCuhCaIEXa8slvIq4yS2Sdv5ntkTm3XUSQq5Tt+llDuK9W/+5m9Gyvfm3v72t19OPPHE5eijjx52pZTtqZBbSimllFJKKaWUUkoppZSDBkLgaaedNr57lWBKMJxFzAiJiLA6H2ehEfK2Qn7KtrLZKm+9/e3Yym67uulbRF3H7IpF+jUTX9Kexr4Vsx8J2/mb40i74hZrYpaI6b7X+OSTT97jOErZqVTILaWUUkoppZRSSimllFLKQQHx1vfiHnfcccttb3vbkRcBFxEcIx4SPufdqyFCY9JWzOWz3fr5diSm7bix8hm2EUXn/hJP55TY5rjWY9yqfN0mpJ051u1skRjYx2/sE7+yI488cgi5jqWU7amQW0oppZRSSimllFJKKaWUg4Ijjjhi7Ma1K5doOIuaEQ3lEW+9yvfv/u7vlr/9278dryZe38k6i4w5hrkszLZblWPOn9vZjtjcmK3y2IS5zizkBue7axuzzbp/3Fh9rMc2+3Oe2BBx3Suvjz322PFdx6WU7amQW0oppZRSSimllFJKKaWUg4Lb3e52y13ucpflq7/6q28gGEoE3Oz6lIiGvqOVoCsRECP8qpv6qTeLkWEWInOOtDmTPIm/uY2Z9XozqR9mH4lhJvbrdfaF1Jv9bedrbg+zvTSPV5Jrdu4D7n73uy/3uc99xnkpZWsq5JZSSimllFJKKaWUUkop5aDATtzDDz98udWtbjWuIyA65nxdOIxIu51gG7vgOsn3ucbXnMJ6Pcx5zpNmXMdXzpO/jjIxzMx2zpP2lz31kXjXkW/M7LhNcp08/ch9uPWtbz3u5UknnbTc5ja32fRQSpmpkFtKKaWUUkoppZRSSimllIOC7MglCG4lYEb0THK9bpP8me2EyXXYpY2wXRyzzxvzP9fdjt35n9mdr8SZNJPr3fneE+KbD+O0LurKJ+Qq+5qv+ZrlXve6167vOy6l3JAKuaWUUkoppZRSSimllFJKudlDCLQj9w53uMMQAdeFyIiPESJzPbM3+REkt2pntp/tYqtsvsa6Debzm5K57Rtrfyu7sD5Gu4NtduLaQS3ZhRsxV3IvK+SWsj0VckspZQeSiZK0N5ggm2g57m3dUkoppZRSSimllFL2hzvf+c5jNy7Rb31tK+eO60JlbK1pzQJw8rGdcJm89TrxN/uYmf3lfM4Lc958nPPD3M562Y3BPq+VntPMej9u7Hpm9sUu64jZhZsduYRcgm7Gzr084YQT+mrlUrahQm4ppexATJDueMc77vUn3Y488sjlxBNPXI499tjx2pPdTd5KKaWUUkoppZRSSjmQ3Pe+913OOOOMGwiBsyDpek5BuevUyTFlW9WfmW2gnDg5C5TyZj+zPdav19lbe23ox9yXkLz1/Dk+bFU39WaboL6U77idY4y9o3HJDlxjwwfb6667btcrldnJY5sd1qWUL6U/GaWUssMg4Pr0IiHWUTKh2g6fhiPgnnTSScsRRxyx3PWudx3XxNxjjjlm+Ntd/VJKKaWUUkoppZRSDgTWqezoJAJGSHQ+i4izwOiaUJjyWXjEbDenmTlvrj/XnYnICTbrbc7srmyrWCAvIrK0LoAq38qv/Aio2Eo43Sre9GVG3fhinz5HqJWnbG7LubLPfvazww6x+eqv/urlMY95zNiZW0q5IRVySyllh2DCdKc73WkIsURcAmyuvZLG94usC7ImUYcffvgQcI866qhhp47j3e9+9yHkHn300cOfT9jtDyZtdgjzLSbt7O2O4VJKKaWUUkoppZRy6GL9Kd+xuh0RD4PrCIlzWUTLdeFyO1J/Fi4jWkqz/9ikbD4P8zlSf07rdSA/JAakzlbINwYRfqWtSFvrbWKOJW3FL4izn/vc55bPf/7zu/qbI3IPtvJhTTJvACyl3BC/7Z61cVpKKeVQxSTNK0rudre77Xqlcj7BaAJMsGWTCRbYfO3Xfu0Qce3avd3tbjfsHIm+ynOtrnombHzIl7QRgTd+t8JkTXza0aZJG7/y58lfKaWUUkoppZRSSlmWr/u6r1ve+ta3bl7tHO5973svp5566tgIQBi0XjQLgxELwywWRnBMuXpzOebzMJeFtLueUmYta309az6f2apNzHlblc9tYvYjP+fJl+bxWS+HeutjOCNfuTU7x9jqq5221vHSdlJIrGljFpWlT3/608u73/3u5Yorrhh2pZQNKuSWUsohjgkT8fYe97jHEEsj4krEVkKrFGHXREwdO3AJv3bGRsSViLiSPPWcs3FUj5jrE3TaIwJr02Quguw6JmqE2+OPP37sxBVrxGJHPvPKlUz4ID/py402xZ22D3QM8Z9USimllFJKKaWUMrNThdwzzzxzOfnkk8dmAOsn1oqyXrS+XgPn62m2TwrO42M9P6Qsbc+CrfM5xSblsx+4nmNmF/9Y7w9cx1+Y62CrOmHddp3tyuSLJ2uJ1hCJsfL1NbtxwS5lzuE818pmIdf5Zz7zmeWCCy5YLrvssi/pXyk7mQq5pZRyiOPVyF6DTBgl4s5CrvPsfI2Y69p34BJgZ0FVIvQ6KpOv3uyHoEv8nes6N7k2ITOhk2a0RfRlz0928iYW9eWZwJkM5js0iL78mwBmkvjlgticV1QbNzGIz6T1QGBXsnHM7mQTWuN2oPyXUkoppZRSSinl4GYn78gl5OYVvNZKrMuEiIVzgrWVnOcYZpvYbWdj/SdrQDnOKeVS1rByHWb/s1/nc5yYz+F63VeOW53HNv7nukj+zPo1Ymd9z3qgJNZZzM0mjIxj8ucUnCufba+99trlne985/LBD35w19iVUirkllLKIQ2xkZA7v06ZKLou5pp0RaRla/Lkmq08R/mzqCuP3ewr/lM3icCbGEzUssPW9+vmdc/xK2kncalPzOUzE0Q7ffM9umnPZC8T5psCcWtH28Y0r38WJzEX2t+fiSY/dkLzr28RzPVR+wRr/Vwn46L9UkoppZRSSimlHPrsRCHX2od+n3LKKbs+3B+BUYI1EuQazuUnL2tHc36OsZnZLn9ehxKbmKwXWbOaNwCAbdZt5pjX08z6Nbayw57UX78WR+JH4tvKP+QbJ+uB83XGzppY1sWSn7IZbco3ZnwlWfd629vetlx66aUVckuZqJBbSimHICZDJot2dBJJCYERWSPeRoAlTkaQVW4SBecRbCV+IubGDxxnPyZjfKWuc/XEE+E17UXElRcRl3g5xzaXJZkMiyOJPZ+ZFGfieaAwJuLUbnbJZhwksabv86RVX3xnC/v0y+RVfPNEGRkPQnHGJH1X1znm19QYz4wrG/ed0Lvuu5RSSimllFJKKYcWO1HItfbygAc8YDnppJPGeon1j3kNKKJhhMOcz3nzmsl29iHn7GbSrnJrV9Z+slnBh/O9mS7rNVmzIehaL7Juox6f6+3P18nb6nyr6+Sts24btls72p0v8Vkjc1Q/8ULf8h25UOZ+Oa6jjrIIufFjbM4999wKuaWsUSG3lFIOMUx+TGa9TjnCacTWnBMNc27CaTLp2gTKpMk1H3Ndk06TUPkRWqF+hNSIiOpFYFVOmIzo6Jwvx7TPnt+UJT9iaWzmeomZjbqO+m6iZ9J4oMRc7YidwGpCPgu4ade5vrLVf5NP114bbYct8ddEXvzGSWzidFSHLZHYpD/jK6XP2jB2xoFvbTjP9xj7B4MY1eFTeVIppZRSSimllFIOPXaqkHv/+99/OfHEE8f6Cax9RFSNIJgUkr++TjLbxj5HrJcFfuRZr7HeY/0n63BZ17F25cjGWpZzZM1q9r1dwnzMOXI9l29H7Ob+r9dL2Va2QX6E11yzy1pc3sAHdlL8z6hvPWz2BeuK55133nLxxRdXyC1lokJuKaUcYhD/CIfEvQiM64LpPKE0cYp4avJl4pUyST0pIiuxNztUlZuQuTYJ/cxnPjPy+ZzFzQiwJq/y2CuPL/5jk1jYsRcvH9pxzLXJnnrsUo9Pdpk8Ot9qwrinmEyakJuMZzylOT7tOiZWieiaCTw7ec5jp74JqRgJuBFk2eqbcnX4luJXP8UhJseMlZica0Mypia//Kcf+zsWpZRSSimllFJKufmwU4Xc+973vssJJ5ww1klcrwuQWftYXwOZy5BraRYmw1yujVnYZK99H8iX7MDdTrQEe2s31oSsn1mvib95zWa9/py/XdnM+vX+MPcX6+Osv5CfNSjrcVAmpW+Y68vP2h6MSZJnmpDLZyllgwq5pZRyiGFSSBgkCpokOuac6BfhzzETJpNPIuzf/u3fjokUgVQZX9mFG7HVZAvslMfWRIyAmEka/8rksVtvM8IuG74iVKrPTpk8ZYnJUTkf6VOScvbO+SGQmkg7j2i6t/hEJVGcOJr4HSOwOk+cjomJfWJXx7n4IrY6JtZZkGUjpY2MkfGMr/l+yk+/kxe/EXPdV/0g/iqD1zOXUkoppZRSSinl4GUnCrnWQe55z3suxx9//FjzsVYCIqEyCTnOZD0rdnOdpNhkbSusn1uLOeaYY8aaUdZnbgw21mqs9VirmXfmai/JdfJyPidsFd9svxXpb3CdtNV1UEdehFZrbLOda+tPjonDul3WCoPzJDaJl8/UNyYVckv5UirkllLKIYRJbL5jNcJexD/njq5NlEyo5El/93d/N8Q9ecS+JPaOxNyIrvIcTdhMmE2yTK5MvEzCYhfRVVvaSB7Uje8IlGyIlya1zpXJdy0uRz7k5Zg4tanPfKnrWnKunTnmfDpwd2jLDlkTcn7ndpLiN6JsYkz7QbuuxcuXZDwldZWrK4nftXyJr/ic+5JrY8ve+RybpP7cnqN8sbov7rd7VkoppZRSSimllIOPnSjkgpDr1co+GG/NxNpGRMXtsH6SFOY8yVpJ8teJf2XWd6wX+dC8NZrU2xPSjvUfGw4k8Sd/bjvnjtulkPP1Y1gfH9dSxi7l63ZpR4p9cD2LuMrSh6T1PgW2qWudTnIt+Y7cCrml3JAKuaWUcohgp2deqRzBz+QyyTURz3lERxNHkyc7cUEUjJhI+GOfevwrU88kzITKuUlnhFwQC8GHttiYuEnOI1pKKedXG5LrxL9eV4rfuU8RL2eb1Fc+i60ETPGuT06DWAjiJuTZWRuhNOJqzpVpx7UY+XftnH/ti1MdMc3iKt+pl7qxTzvpKzvXc79dq8cmY6BejknspNhoW333yzh0YlxKKaWUUkoppRx87FQh94wzzhhCrg+tW58i/m23xoOIiiHn83G2yTHMvq3bWGuxZmSNxVrOOtad/vqv/3r55Cc/OXyxsX4T5Fl7sp5mZ27emrbebpCfFD+xncuS1hH/Vsm60HyUwuxrPp/rJKUum/V+pt6MOtai9DsirhRfhNxLLrmk61WlTFTILaWUgxyTImKeSaRJLJFuTkQ8xwiAEQsdTZrsxoVrfiLWRhAkAEZoRSZhJlcmaJlsZcLGNjGl7Uy+5CuXB+3kyL8UH/LiM8nkVxnfUvokuVaujmNs4nf2b5KfT/vNaJPAmrFkvy6EOjcuBFHHxJD4tKm/fDlPXTazyOpaX+ZY1ZGvPHnO1dcO+yT12PPPb2zUl9Rlx2bOZ6+P8jJJZhf/ruWXUkoppZRSSinl5stOFXJPO+208R25EXKztiEF+cH5fB2SN5fvzta6j7UW7XobXtagZqw1fexjH1suu+yy5aqrrhrXWb9xDHyBmPs3f/M3X7I+NbO72LI2t1WZ63lc5vFJvnYzfiF+HNd9p07qQZm+zf1jN9cL8tWzHpmNIdLc//PPP79CbilrVMgtpZSDHJNBrwGOiBvRjmA3C3iSSZUJk4mnyZRPCKrvOoJjElsCIb/KTKCIvuyVE/3kmZSqnzz+CYWZrPHBlwmaCZtJIB/yXfMhLzGrP0/0Mmnkw7ljYk7SbuqwAd/EVraSsdCm74rVVr4TOPUcxW0yzoa9eKTElu/cda6/EXu1qT2xiQWJlS079TLOkpiMZ+LSRvrFNveOf/b8KnfkW122ytV3biy1KZaMNfvUzdgRh/VVf+zgFp/d3PzwYUJtPPgopZRSSimllFLKzY+dKuSeeuqpu74jN2sg1pLW1zCy3oP5HPNa0FZpJm1YY7EGo9153Suw+fSnP728973vXS699NLl6quvXj7xiU+M2NSzzsNHcE3AVMfaGtKHdd9QlrTOun2u2TrPGlXW0KwNScokduJE6qqTuiF2Wbvji9/YJrYcUz9lqau/kjWotMsPuwq5pXwpFXJLKeUgx8SL+GhSSPhLihCYFEGPKGiiSMQ0KcrETX52fyZlcqe+o2t1Tb6UmwCmvglXBEXXfOcTdXzLyyQtkzMxss+k0DmbTOwkbbFlk6S+dqVMGNllUsgefKUvYshRnMRLSQzqORJw73rXuw4b1+zZGhfJOKhjwq5M2wTh+BVPEIOkT2mPj0xyJfEp11byE5Nr7SWW9DXJ2KqrXP/5Ype6GZ/YSPIzluwIxmKTn+uI1ex8KrSUUkoppZRSSik3P3aqkHvSSScNIdcH0q15WAeyRpI1pKwdzWTNKGRtJOdJiN36tbUY6yX5SrOtuOaaa5bLL798vFY5MVmv4cO6i/WjwL9862TWX7KelfhzPpOYUled4DpJfsqsDVmPusc97rEcd9xxy9FHHz3SUUcdNY7WixJnYuY/bc1+JWS9yTGxziRvPR+JL22BPV/KzjvvvPEducpLKRtUyC2llIMYkybCW0TGCHnycy4/k6tMcDOJjF3qOpqURviTZzKlTnaP8gMTKrts1YkIyafdnPJcq8fGxC0ionzlEXQTizxHecr4ycQO6oplTmzlqxdb51IwNuKOWOmoD9ozgZaMg35LhE1lkjFwnIXYCLj8OJ/HQ9zi0H7GkG+JLT/OM5EVj3M++NQndtpJ3PJT5qgd4rFrvtLv9Cn3Qrm8tB/4zL2V71xb6qSvruWLTVuO/PKpvVJKKaWUUkoppXxl2alCrjUXIi4x19oG5rUKaxdZd1lH/npZ8pK/bpM1Jusm1kusw1kjWYedXbjE3KyFiUUSp00B1p1m0o435mX9K/HnGJv5WltJyFpU1oicWxMiOt/97ncfSdzWfawFKdMHyfqP2JSpL3YkBinXyVNfynXKQ/LX86ANa4HW0BK/sqx5EXLtaM54lFIq5JZSykGNiSCB0WQrwpzjfO6YyZkJUoTTTNbUzZG984iZbEyk2JtAmVjJM1lLnvJMmOWDXSZ0BFnJBC7+EL8SO4mNa/5cs8kEUWxpK7aOiUNSliQf+pKY1ZfiW19NwomVbDIGjhFek9gZa/mZ9MZH4k5c8uNDOVv++WYnyUtin3jZpE0++Fvvc+BTv9VVT1zpX+JI24mPTzHFVr3sAlbOVtvpKxv2EbrnmKRSSimllFJKKaV8+dmpQi6hkTDpu3KtVyBrLbBmMTOvozifr2dStl4e39ZLCJ7WS6yVrMPG2+8k6yXsxWdthaAqOZ9JrNalZgE1rMeUdSHHxJU1MOtD0K71G1/DJmnXOo/81J+TdSNrQtZ7nIsjvmLD/1yHn/jaiu3yE691wnkzBIypNcsKuaV8KRVySynlIMYEy0RLMulaTyZpUoRHE0OTIpMteWxMIiPupY4JXvxL7B1NsnIOPkys+ETaNSEzETMJY59dncrUZa8dNq6VqeMYX/LnxJ6/TPDYOte+iV+OScrU0S/1M8Gc68vDxz72sXGuP+ylCJcE3AiyrvUhk1vtuFY3KcKnY8bDuViU5xOH2oKj/MCvvNxX1xkTMfMjH/xmYqsOWzZzkidpUzviYZv2+Uhb+uNaf31S02TfP1I8D/nUaf7BkvuuTimllFJKKaWUUr687FQh11qEVwTf8573HOsX1j6seWS9x9pH8rI2Mp+vM9cNyUuy9mEdxZqINK/jzPCvzBqLNSTrKL7Gyw5i6yvWZ2YSD/8E4KzxIG2HxJ/+SYkNfBsPazkRcbUp7rSzHcqtN1kzsm5lHY/f1NNOzh1nf3OMc8yzDZKf9STtOE+++H1f8Lnnnrt84AMf6HpTKRMVcksp5RDABDGi4XweES8iqNcjmyhl0qXMpC5CpaQeTCBNDk341I29iRQ/Jl0EPmXy4VN7fKiXT9eJR905Nte+/yNtmbTx4TtE+ONbfW1JJnaJmw/l8pJik8SfGExeEzviT7mkPZNDsegn38YiAu66mCtGbbPNtTgzHnwjgq8YxC2Z6CvPeBor46csk2X+xM/GuRQBOOK4c/W1z5/6mPvpmHPwoW7OJbBR373Wlv6n3H30jw3t8K1dcRkLKTFt9YnRUkoppZRSSiml3LTsVCEXhNxTTz31BqKqdY/19ZBcb7dmkfWhnLPLddaPcrRuY51Jm1uhLpuIt3e9612HgHvEEUeMdSLrLluR+PJ6ZW0lIWtNcx9SJ+dp105l33srTms7c50bIz6NZ2LJdcrXSYxzvIgvyJ/P+bWGZj0q9bRhfGy0IOR+6EMfGvallA0q5JZSykGMyY6JT8Q/EzdHk7WIgcQ5+FRbREN5EW1NliJUyk8eO35NGJOXFGGPkEcEda2uiZl21FVuYuZTfHyzdc2GrToERNcSQVDM/KXdfAIwKfn8KeMz/U+5pN8myfFtnOb60kc+8pHlwx/+8PKpT31qlItJ+3yb/BK4pQi5RFdjxoY9v2z1VZ/1N9f8Z3yl2GgD6jrXB2MAdYyBfxDwLwX1wZe+6TMfacN5JsA5yhOTo2t2YteOfHnOHV3rn3Zil/r6pCz9cG5M5ItF/YwLf8a4lFJKKaWUUkopNy07Wci1dnLkkUcO0TJrJshahrWKOaVsO2IzH61xZN1Esh5ivcga0e7QjjUvayaSeHYH39bLbDawzmVdZY4DWYeT0g9l/IvJ9wUfddRRY1zk7a6vu4NP60piEdNW8B3/6+dhzudzPemjdowvO/2zrnTVVVct73jHO5Yrrrhi1C2lbFAht5RSDnJMgCLeZeITwY9YKJ9AaJJkcitvPUWEM3kiyjnOk0OTN7tO2fGbCaRyKZNb/sVi4hkfIPqJg59MBLWLCLw5Z5dJJ198iol9REZJmX4RQ51LYtBP8bHlS0q+lFgvv/zyMTF0HkHUUayO2vUJRqJlytN3KfGJmQ99Vc7W0bVy7YudPdvYKctOYGVwzsYx4+WoXB3nyDjz5VyZvhmL9FOef1xkbJHnI34zFhBj/OmrcyK364i5bFKePP11lBJ3dumWUkoppZRSSinlpmEnC7l2u97jHvcYyRqFdRPJWkbWWDDnJT/n6ylY10ia0Q6h1DrRnrDudzus7xBOI55a00HqW99Jcp21HQIuIduuX+s/1mvW+w8bC6yBXXjhhWNDg3bYWM9Zt9Vn61J25IrLmlFsHNdTmK/Xy0Ly9E8bjvGvT/r3/ve/f7ngggvG5otSyvVUyC2llEMAEx8TLMxCmwlcJl7yCXEmahFFc60sEy0pkz8J6ptgKctEKxNlk620L7l2zMTTREy5fJgUpk5sCX+pI59fJBYiYSasYsqE2gQ3Yq7zTGzV4ZtNfM7psssuGyIucTrjkjFLMi7zOb98Ss4zNumP6xy1yW/Kwb/8XLNLX1NXAhtJeURXNhkXsJWf+o5snUti1KbztBvb+M3YwBhAXyX2hNyMp3Jjwac0j0+uHSPmGttSSimllFJKKaXcNOxkIde6g3WK008/faxHWCOx5pEU1s+Tso6SY1Ls1tGeNrI+Yu1jf7GeRTD9+Mc/Pl4pnA/nhzkeSazaJWJLNh94k5x40o8g3gi4xFFfK0bEvfrqq3e9WY8gvd4Pa0TKI+Tyg8Swnuay9fPAR/L55zdCrjKxW3ty/fKXv3x573vfu+vtdaWUDSrkllLKXhAxz+Qjk5mbCyY8ktgIaiZkJkfIRHO7ZOKWyRuBj318pc8mVhFd9Z1NxgLKTDpdSyZl7MSSfD5ST55ySVsRVSPKslcXiU0bylKurnN1lWVnaOLPBDGJ7Sc+8YnlPe95z5iU8qv/2lGPKOlTjJLx0/eMTeLmV7vQH3XlZ5wikqYejJnyxCU5V56+qMuvPPauJefGEnywVcc5e/3SZibtmQA714742MVWYsNH6rLXLhtjYEzSj4yzOhmjPDd8O6qf85RHyE3fpFJKKaWUUkoppRwYdrKQmzUHQq6dqdYlrFvAGgasR0iY8+b1k9SJbezn6yQ+JOslsH5lvSbJGoqjdRxlkjiTiJMSoVS65pprlo9+9KNDxCW6Zm0nMSFtwjqSHcG+B1efCdiz7Tp8X3zxxUPEJRaLxzqQseKHGGwtJ2hH/GzFyRbp//r5Otvl85uyrDUZI/6VZQ3LZoI//MM/7GuVS9mCCrmllLKHmBxF5EMmNNthEpIJ2HaTmQONyZBJkImYRDxzNLkjruWY81yzS6ySCaZJZAQ68UeMk2fiZdJFtNNekvzs4uTHkX/nJqnxL07CITGV2CgP/Ls2udQmMVU99srkmUxmguxa0q7yCLnO9YEvdpL7pa7v2kjfxJbxyb1VP0f5yJhqixAsJjHwH8HXhNpRYqsP+uXcUV+d67d64jN+7MWnD+KRL4+dfsgXb8Yp7chjn/HKZNx9kade+q4v+iBmduzZOtenPNNs+Jfme5Lxc55PbCYOR/1S5lyZ9u52t7sNH5JxlAexaaeUUkoppZRSSin7zk4WcmFN5PDDDx+vF7buYP0ha1PWOuA6CVlHkmK/XgZrHFLWNZJvbcR6DLFT8ppi62BJ1ozk2/kakdbRq4Il3wF75ZVXDrFSvjrWavif23Q9x+7aGtIxxxwz1nDY3Rh5XbN1IlivUdfrqI899tjxSub0C9ZrrHeJ0TpQmG22IuMX1u2V6xOMX4Ru7cFaknt26aWXLuecc84Y31LKDamQW0ope4hPvPnEGtHKZFHKxMqEYx2TIxMsglbEPpOUTFRuKghmRx555Jigmcia6DlKiTu7JiPAZZKYlDwTLH3LhDh2OSeMOoJdxoFfKNNOEFMERJND7ZjAZVIZf9ozfo7a40NdY20yGSGVH/DJl/sTcZStyV/ETPbqea2yNiJAakMd4yZpN+PDJ19iUF98+ig/QqYYiK+uUydjol/KtSGfHV/64ig+MaibCbNrbUlgC/n8883WtfbFw5ZvbSlXxr9rn9JUL33QRmJaf3ZdiyV1+dIHNtqSTLb5Y6dcki9Oedo2fpLn3piyl++VP+z4nv9RUEoppZRSSimllL1jpwu51hesM5xwwgm71t1gbSMpOLfWgRzncsjPOsd6UibFr3UZ6yPWWCLiRjj1QX7rR9ZTrMNYA5GyJijxMfvP+lPWlOYYnSuzXnXEEUfsWrfZHcrZE2utSxK8rRWeeOKJI9mNq50ZMROe7RROfOkznKfd7Y7bodzaUtbnMgb6pT/G8p3vfOf4flzrW6WUG1Iht5RSdoNJISHMpMd3T0Tgi5Cl3GQkE7HA1icCTYxMJiNoRWAzQbkp0F4EZzFGUEuKaOkolkwQk8SWY8pMrCIU6rP8oM8R+uDIPiKdNqRM+jJx5Z9PdV1rh195jmI3mXPtPGJs/PMjngilMHkXH9KeOsaaH5+KJOKqk3shGQ/XOUZ4TEzrxyTxScnXpnM+9Q/iVcafvMTu6NmBGNWTks9GzBKSx1/uzTwejq6NifGcx12M8tg4SokbYvO88KkdZfGXNhy1yzbtsTfeiT3X6jv6B4MY1HHkO88eGySeUkoppZRSSiml7B07Xci1TkJIvde97jXW4aw9WJuYyfoMrF3M187Xy+c0l1kLwXqe9RHHrcrBT5jbm9uYE+IL8thlrcXR+op+xn471FPHOox1SeubWS9cHydrM0Rou4azZjnHh+36ONvMxD5HbVhfylqgvrhn6vou39/5nd8Zu5mNaSnlhlTILaWUbYjA5VNrEXFNMGZxyoRImidQJkY+IadexEGilhSR0KQkaZ6g7Q+EzLRrkhbRdk7JF7sJ01aTxiRlEmbBLfmJfRb84EiAzNioZ3zYRqCMmGlyqC4b11ImmeqwM0E1yYsgm3bjRyz6pv/K5CVe1+o62v3q1TX8snUvtON+qJ9rY5P7ybdzyXl88e1aXeOYsVQG1yH9ksdn6kNb+p/xUO4cjmy1rR115KVcmTFIPyXx8WHMjKM2+c9YxS5+Mt6eTUfXc3n8O1du4u+cT761oz0p4yQpY5+xlJwb3zyHbNKWo7ZLKaWUUkoppZSyZ+x0IddagnUQazw2YFjDsxaBrFXMWLeZ1x5cI+sxkrzkI2U5bkXqzvVjO5fNKTZzQuJLXvxJyBqSfmdtKnVznJFnTcb6jWQtJr5miLheqWw3rnFN+/EprqRch3W7kLyQuK01iSlrcDZenH/++curXvWq0XYp5UupkFtKKVtgckOQ9QoSE8JMLpJcR4zKtcmidNRRR+0SfpMi4jpmdy4iwjnuL9q1Izexze0nT4r4aNJkUpVJX1ImiHNexLiIbjA5c+5oEiaxdc1WO/yYpGWSqJ/s7Bxl51q5hAiKiU89bWSyp576yIRVn9wr/YxoydZ5xlZb6pqYul/akdRxrzNeyrQLvp2nn5JPevIn6Sv7TIRjm/5KbNgSoZWnT+zEmevEnHxHKc8XP+z4MhbpE1tJbHD0bCUeR23zzU5d9TKObMTPt/P4yz8M0o72jY9/FKUsbShLfK6l9NG9TJl+5FkUo+eDLbvUZQt5pZRSSimllFJK2Z6dLuTC+oFXGfvOV2+oyxqKtL62kDUHxCbk2vqEo7pbrU3M9ebjeprruuY3zOdbse4L/FnzsWZjncf6VDZHWPPJOhi79fZ2B3sbHz784Q+P7+zlB2k3pD+OOcd6nDOxUyb2xJk1pawbfuhDH1re9KY3Le9///uHfSnlS6mQW0opa5hg+CQfEZfoGvFTInqZFMqTch0x0G5YIpUUwVY+0VD9iGGEXvmELhMYwlomi/uCumLOTlPt8e9cm4kpMUc0Uy/t5jrnc7kEeSZdEQEJfSZjJmfph8mY/ulv8F0h6sbO5NJEEREh2fu+DmOSCag2TE7ZaEu7vrPDEWz1y1jrFzsxSJnMZrKonraNkXrGIvfItfa1h7nf2vePgkw0Jfb6I0/9WYTUHvt5nJPnWUmesTAG/LnWJ22JS6xgL8VOXUlcPiXpXAzqasO1XdnueeJXngk+u3lc3SciuDJJvrEyfuoZc/3LM++cT+Vidq5dx1zP53ykD/rLhzb48YzK9/MgBtf6qI4YSymllFJKKaWUsj0Vcq8XIn1Prq84y7qFtQplsEYRnM8pzOczfPC1Xr6dj3W71F1PiW1m3SawTeLPuom1G2ttvtM232tr3SlrPtZX5jbW/c1+CKjeYGctam5/bjcp4yohtkgZZhukLetSyXefXF966aXLeeedN17rXErZmgq5pZQyYRJB3IuISxAjNhHuspswgq5z9sqVSeyUmTimbkRb5+oSrJTHh3YIWURg5SY3JjJ7iskZAZSPtMtvxFzXczyZZIldyvV2KTiPcCtGwp+25SWfTdrnGyZoykwI2bFR3+QyeWI15sRFY5i4wC7CLFt1TUqVG1vjF4FSO2zEFuHWRNGRnUm9a2MvdsmYizkTSb6TTIL5di6u1JFyrZw/Mc+T1EyA2aQv2prtxOsfHI7aAL+u1cuzp44x0Lf0L+2z1Sf9J4oivsSgLTGqn/H2PIjFM+NeJQ527I2Xe+Gob/LSl8TvWj02fOaeSc4DG3naYetc2/qWnwHjn+dGv3LPMk6llFJKKaWUUkq5IRVyN7B2YE3Eus8xxxwz1hVmlFubcFxfZ7BGMedlXUOaiU3WPsJsn/ytzue0zo3ZJO45X3+SrMlYz7FWZG3Hq4oJu66zXpO1KX6sDRF/L7vssuWiiy4ar1RmizmGub3EgOQnJmkuz/nswzqPNtwnZJ3t6quvXt7ylrcsr3/960dfSilbUyG3lFImTCLucY97jMnfLIo6Ep6cJxGhTEiIUs5NjhwjSrGXiJR2gfKpXgQ6Eyl2EVy1lzbYmNwQs24Mk5+IxUn88ie5TizazKQzx/Xz9WuYgGVymKPYTMTYyBOzPhrDxK2M7WyvX/Idcy5eoqJ2MtF0zt55JqTs5YtPnwmXxpxvwiO7tOU8MUgRcp3zy4e6xs+R0Jl+yJO0qT19kp+Ucj6Upx/KgjjUk9goF1fiSZzqZlz4dA73iz82GVNlEVvncVLPc+g5iz/2yowLO/nyIN94aCMo1x++jINrcUYcVkc88p0n9vjmL3XhyA45RsiW9Mm1OnlG3QfnjnzGfymllFJKKaWUUm5IhdzrsVbi68aOO+64scZk3QFZj8B8PpN8ayBQN+c5Ivnb+YHyOQVrH1lbSmxb2a6fY45vbjvlM8qto1gXsh7kK8aIpXa7XnnlleMVypdffvm4JvZmzUhMYpTSz6TdkXLtJrY5xsRjzU1M1or4NxbOX/e61w0RV4yllO2pkFtKKZsQkuwKzatpCUrZORhBNCKTiY18yQTEpMTkhH123LLlh826kKu+yQ77+FQvR/XZ8EnMiji2FZkAaYe/pIi58Z9JmaO0Pimbr+d8MWTiJQ7JuWSirG3ol9jVU0boY6uu8wh/8rTvWjwZa+d8RKyUIuaxzTUb/SLkZuInmRRK7Ob20oZ7ID5jY7IqxtyL+BC72OTzwyZxJeV6HsP0C8qRcUPyxMQn24yplMksO+3qlzgzThL79E2bhNyIuew9X/opFvl8KnOdenxow3gbDyhnl3gyFtAOgd1RuRR7/nKeZy79TH78wlF/xMpfzj23xtMzKn5JX5Stj3EppZRSSimllFI2qJB7PdYorHV42521vaw9hKxNJC9rDWFvz7F+vRVpd31dKXXXjzO78z+XrfvQpnUda0HWyYi13jjnKM1rZ+rMcUnzWgxyXEc+W2gzfQ2urQtpSxJT2rMG9MEPfnB5zWtes7z3ve8ddqWU7amQW0opK0wgfHLPblxCUoSpiKOEQ/kRnUw85JsYmowQmUwYY+vckVimTgQq5yY5fJjAqJ9EZJTPTj1JfkQxuBar9jNBysTIZIv/xOGYpB67TLKSXK+nTMJChL0kbUm55h/qaT9xGRdiYuIzQYyYiNS141M9ttpXnk/qwbn6xkEfiXzqGCf5mXxKXg+TCSn/ztm5t+4He2Nh4hoRVNzOjVmQx8YxE1n3RpontWLVjqM24y/lYhZXfICdpI6U2NmnDffYMW2IhW9Jeznqn3Lj6HmJvbFL/7TLTiz6zi6vVNYuOwnqSXyLgb1x40OeuPnJM5k4fD+zo/YRG/7ZGA++tOPoWpxwL9XVXzG5v0muxaF9ftK/1C2llFJKKaWUUnYqFXKvxzpB1haOP/74sZ4wr6uErNckbUXyZ5ut7PnCdmsUc92sCzlPDKnvPLa748Zs1suzZpMkTjaJxfgkuRZPYsPsz/l6mm2xPg6urQtlDcx60Lwm9OpXv3p5+9vfvnz84x/frFFK2Y4KuaWUHY8JhE/r3f3udx+in8neeooYS+xz7mjSQWAjWMkjkMmXTEwcCWaEKtcmRuqwJ1IR17SdSUwEswi56sanCRf48ulCfjIhcx4xUP3EICZJu9pwHhvtSfPEK0fkyLc2tCVFmEtSLt741A5SbrJGXFWurr5HZJxjTL1M3vhNH/kwVs7134RcWxlPZe6Dc/5da9O5du2wtnvXeYReY+3c94bwLzb+9DtjIS/3TVofN+3Nk2GJL30zJkEc2tImW36NgTx+k6d+PlCgLTbpg3Kfnsy45tlRxwcAPCvG8VOf+tSuuBzTn/ifx09ip50I2/IctSFeSQxgyy7jJckTMyGXnbryjJvz2Ln2LKsr6ZPEVkx5JoyH/hs/bfs5ca5vzvVF/cRUSimllFJKKaXsVCrk3hBrFtYXjj322LHWYC3B2gPmNZKs60jy10nZnGLnPMzn25F6WcOxDpLrrDfdmJ/18jke5Dr+k6zt5Dx2xmN9jWsejzmF7a6l9GuOJ2jfGo574l5Y37EWaJ3IK57/5E/+ZPnQhz407Eopu6dCbillx0PAJeQSjiLcmljk3ETDkRhlYmKSQrzyPROu5ZuQZPI17yZURmRjr57JiWTCNE94nGsTjsS5JHUj7IpRnmv+1SXqgd/4UocfMWkjEzVxxka+43oK7BKvtD4ZBNGNX8gzQXMdAU+ST7ScbbRz1FFHjThdS+Lhm2DpWt0Ig65zL8SlHxE6+XQeoVOeuvp/1llnjTHTjjaVRYxky7/XLGtbHcdM9Nm6f/rjOokfMczjMSdoExlTfsWgTUlb/H/605/edR9io5/gS5ziTXzGUV+hv0RZ/zhJnHwTczMG+UeMeMXOLokvdhmz2GrXGHjO9IOdJBaImb1YMwknlGd8PY+OuUfaynPHnzL9EY+2iek+nMCPa+3kZ0Y9R/bKtccfuzxTpZRSSimllFLKTqRC7pdibeM973nPctppp421M2sKWceZkZc1nBl2WadJwnwe1vPUS92wbpP1JHnWRazlZF1NWVJYr4/4kHLuaB0oaS5P4kdbiXO9n0nI+Xb9SX3EP+Q517b1nawRpUz+pZdeuvz8z//8csUVV+wqK6Xsngq5pZQdiwkEEYyIRHglFBGOIhg6JkVQIiKBAEhIMumKvfrO2RGs4lM7Ji9s+ckEjRjlnA2/8ZV22CqTz/8sDqdNiR3/mayZUGlfiu9MDKUIgeJKykRMQiZ8YpS2ugaxLxM2edo3Nsi1JD5xsBOvHZz6oG12YoowZxLHTjvy+eODaJydtemHMkc+nJsgassYEuczPvGVttgR4om4BFH++FVPUkd84hCj8oxT+po0j48+uk/s2SUp22pcxBJb/rXLTizs9IWNsWFjbOQn1jwvytRzVIe9a7FFuPYPGLHx4ZofvqWMj/rGTRKX/rCVD0d2EKtnLPdRil3uUWKFMj9vjvNzZhzYzn3RZsZfkp/nXRIX0g82pZRSSimllFLKTqJC7peSdQZvZ7OOZN3KGkLWc8KNrSPMa2Spm+v1urme64TkredDnrrWZMRsvSZrTLFXvp7YsFXH+Xq5uvosWW/JGk3Os76VNnIuP+Q6NojdfK498SYPrsVnXcoakuu0bw3uXe961/KqV71qxF5K2TMq5JZSdiwmEEceeeSY1BGNJOIU8ZNw5Eg0msVQkxITJZMRExp26khsiGXxEZ8wackEyHkmXZnIIH5gIsQf/5DPF1t5zhMfMY3vxBMRl808cZNM2DKxguvkJT+TsEwe5yRPYhv/cx1jY5LmmDqZiEriy3iLJz4JcsTHTPDkZVJKdCXgEgH1WT/ZZ1IouY6tWOzo9H3H4hQjW77Z8a2O9uwulW8cjbH42DuXJ8aMS47i22qM1FVH3zDb5JmJvTJ5eQbU1Tfn4snYiFfSL/1WX10xElvlsUXGj19HvvlQl4DLXmzKiLsZ37TBN3/GmX3GmU1iTR1lfLmXxoqduIyR5DzXYnXk3/ioKy5+Mhb6wJ/keRSjfOfqpzzPNj/GjA+2GZdSSimllFJKKWWnUCF3e6yFWGfJh9qzdrK7tYOsY0jWLnIuIUfM52E9L3W3soX2rGdY23Cc1zYck6ydSLFhn/Wa1HHMGov2rKfoAxyz/pd+zWx1nTSzfg1tziROa2BZrzH21o6cX3DBBcsrX/nKsRu3lLLnVMgtpexITF6IQT6hFwHJBC/JJMNRvskGexOWiF7OlUf8U64OYUuS55oIZRKVCUsEKueJIxMhbc0TS/kRwRwjaGmXmKUNftOmcyJcxOT40sZ6fRO3tDFP4hKbtD5RzDlbPrS5XsckUopombKIgCbPYs8YzmXqEVblK5eX+Hy/if7KF0fEW8mYEjHjS2x2WRMu9V0MfLMjYMZOHl+IAGvcjJO2JL4SJz8Zk/WknYjn7JC+ayftidtRWeJyT4ylGMJcL2OhHfnaYuteO489G59sVE+SJ/F/zDHHjPgSU3bp5to/cCCOPId888dO3GyMId/sPGfGB7kXc7/Ew49YHeXxOd/3xJdnU3IP2HgWXIsn9yTlfEr8aTdxabuUUkoppZRSStkJVMjdGmsDV1999VhHsXZi7c9agvWllK+TPOsRSVtdh/XrrZh9hrlt5/PaEtbbi431k6ylSFn3ib/4UE8/pfiE66zJbMfcZsZqO9gkBefatEaT9S8+rR1Zz/F9uK95zWuW1772tZs1Sil7SoXcUsqOhAjku3EJUiYUcyIcKY9w5GiyYyJCNIIJoMQ2dSQTFGKlPOcmV0RG15loaZM/ftkQqdQxydEuv4jwJU8c7NRVZ13IVWZy6jq+JW2aSKmvTtpjJ8+kT54JGjttSvMk0bm8nPOjzUzYUp6kTbaEP9ficUw9cannu1zTT3UyPhk3fo2pnbUR7dwDr0Rmkx2obB3zfbNenUNkFIO+Zvep69jxLU+53b7Ocw/Fl/HWpiPcH+2qm5T+82FMXcNRe/oz908yLp4jbcCzkfuWuCJM51rc+uHaGBKq80poZXxqU1vy1XEujwju+WKnD7HTl8Qp8XXKKaeMsYO2xOk+RSwVl7qnnnrqrnETlz7wzUY9vlyL1ZgaQ/dGO3xpO214bl3LV278+XOubTES5dnGPxt9yrOvLXGIrZRSSimllFJK2QlUyN091gqsSXgbnzUEaxVZC5GyNuE8JG9mXdSc621lH5RtVb5dPXnaSr0k9tZCrJNY95Bcs7WWJLHLGlb6Ofuc/W5FylLuqN15bELyxZM6idHajrUj60XyrN9Y8/rEJz4xduK++c1vHus3pZS9o0JuKWVHYiIX8c054SiCmmvJZMMkKBMUk5FMRDKJI04SGePDESaLJjQRhDPRUteRr9TRBlt1TH5MusShDe0pd82PSZGUelLaZc9Gm2kXfCozkYsA61y9xLY+IXSevPmc6KZekJ86WyWILWKdfuhf/M4iqzGRIgATFImQ7JOX+2ACqG1jljL5p59++vj+Xf3SZ9+Bq05ETHWcZwx8KvOEE05YPvKRj4w88UUkhzyJr4yz+olFnwjHfMVW0h82+iYuRyl50Ja6+senpB3HjIMjHOWL1wcQtKsvjvzpm7HUdmLzfJ900kkjNok9n4Rs5SbWrtXX3zPOOGPYJHb+nGuXrTY8b3b3Ggs+XYvBeWzh+eMj7bJhO98r+fquz3DNxrjwpSxjqb7XZee+xKd/iLlfqUP45b+UUkoppZRSSjnUqZC7e6w9fPzjHx/rGz64bi0jaxAz1hdmXFuPmJE3p638QD3l25GyrWzWfc62yrIGA2sm1kishzhaf5HnKLFfr+OY83W2yp/rbUX86bN1mnktS17is17z/Oc/f3njG9+4a/2tlLJ3VMgtpew4TGgIckRNEwrJZE7KtcmGCZBk0kP8MvEwCVI2J74IV6nHnp3JDFGJABbb5MeGf/UysYoIlQmZvIhWJjpEN+2xV4+d8kzUJP2IGEkMZQM+nKufOFw7EuD41kcxSLlO+3ylD5Af2/WkrljSB/FAe+orN7HLJE/7/DnKI9oROfWTH/bsZjHWUb72tHPaaaftElXlsRU/+HSd+saS4OtTmeJhl08E8mVswU/swZZ/5Xle2IONlDEUW/qZc0mfPHvGky+wZ0cwNV7O9TExw5jYiatNbfClLH6Njz6IF+Iy7tpYt8unI9myOfroo4d/fpVLyvlzzPMhduPmGVJX3MZEvHk+HeWx9XPBTn2+5WU89JMf48hPfIl7Hu88v8rZ648848BPPrjgWj07tud7VkoppZRSSimlHIpUyN091gWyXmEtw/qH9QRrE9Yk1tlqHSF2KbNOsZ43s5XfdXZns1WZvCTtWxexdiI51x9JWdJW7K5dpA1s1TfIT4qtNRjjbO3J+o8y4yw+azkXXnjh8opXvGK5/PLLx3UpZe+pkFtK2XGYaJjgmOzM4q1zKRMhCRGeXK/bRlCKrbxMqrRDvDKh0R7bTBgdEwfYaZetc5Me1+wQYSoTMvnayOQsIpk8Mdip6EgsFJ+6+qHeHK964jCRilinLdfO05ZELHPNV+KMneOc5KkzjxPUU6aueKS0C9fiI8gSO+VL6rEjMEYUTH3jZJeq3araiW3Ey8STPPHbrUrETRtIOXv9NJ58JaXfuX/GMPcPsUvMYpuPSf7hkJ3N7PUl/RGDtlMv8fgHB6FVm2zVU2482KQNAq043X9tpB/b2RFGicOSdviNnXbYiUlia5zF71w5X6nn3NHYGFdHdomVD21K6sC9FmOeQWX6qI5zZRL4cM6vpE7qGktxOSYmSYyllFJKKaWUUsqhSIXcGyfrFdY3rAVZO7COZK3EukPWHyRkDQLWG5DyXO8tu6s7t81mbtN5jkniE3tS1kUwH7dqM9dze2GuG9brr6N8XvPJWpJ1HWNs3N///vcvL33pS5f3ve99Y72plLJvVMgtpew4TFhMLKSITQQgolLE00xWiEEmGsrYxC7nkjrgz7kJyzzx48/ExgRLGd/ynMPExoQnE0ltso8vKZMsecrVlyepE8FKu2Ii4OqXZJLKhpgXe/WRSZR8cUjIUX2inHbTnti0F7GMbc6TYJy0LUE97Umpb5Knvmvl/BMgCZFilO87X+PXOGXnrP6o4/tTjz/++OFXfXZ88s2WXWLkT5/yvbHs5UVkVMfuaxN87Ut5Foybc2PMVoKjOKS5T4kh8UjuJUHWmKTt1JFcz3nqiCGvSBanfsaf84i0qcP3UUcdNdoRU2z1KWMh6QvBnIjLt37L5ycxZ7yVE76NW8YBbLTt+dA2tO8eqsNOP/njyzH3VWx8qeeeZDyMLztkjAMfyvmGZ0wdfvKse17l8Ss5L6WUUkoppZRSDjUq5O4Z1gauuuqqsZaQt3pZx5jXd7L+IG875jUKaxrrZK1kK3ZXBuWxWV8LSVmSGKXYb0Vsw/o11stD2k/eejxBftaOjHHWeYyttZgPfOADy+te97rx3bjKSyn7ToXcUsqOJJMNyQSDCEQUinhHSIroJX9dvJ2vU89kJaKWyaFJiu/szOuNTWiUyyc2sZknXgRLvuSz59tkSB7fYjYRIlaJLROpxOgI5/w7mpxqny99Eo/2nPOlLdfa4S/X/GvHpxUJnton6LFRN0mbW11rn5CofXUkZZIxiC9jIWXSp07uR8ZQHXE7GkP+xe5a3/J6HHGz40t5/EbMVc+Y+E5c8blWzidbcRl3fr3uhT99z/3Uhv6ITV112OhH4tldYuP7ZfnRZmJUlv47eg6g3POR+5A64tRP7bJP0kf2Z5555hhHMbL1qmHn2pnHXl/YGQuwYZtY2CXG4447buzGJQYbQ0fx5PnN8+k5M1Z8GScxsWMD45tnTF1oQx9zzT7jxS5jDMeMuzrEZ/GrIznXr/w88mOsSimllFJKKaWUQ40KuXuONYNLLrlkrCdYS/IBdOsWWddwzLmErEVkvQLy+HJcTyH1Q3yu58+kjfhhO5/Pddev57a3Yrv66s35N0Zs1TMG1n6sIWUNRx+soVmPueyyy5a/+Iu/WF7+8pcP21LK/lEht5SyYzHxMNkgDJlsRJg16TAZyYRGIhApI2LFzrlENFLfRMV5xDaTGT74j182hCxluUbaIXzNNkQp/ohqYuKHbzZil5xnB6lJaETQOYEfqM+efz4lOIo1ky/iIV+BfyJj6rF1ZO8oFv0lsNnpKVZ9MmFTpt6ciHCZ+Kmnr76rVTtiVjbbZ0cpe+f6TVwkRuqTtrQT4VZc6qVP+nP66acP/9pO/OwTI7/yI1aqlzFk66idxJc+qCtlHNaT8SSG8quv/GpHWY6JVdIGW0Ky/jmXHxtHomv8OHpG733ve4975VpiJ4nTMfGIwQ5b/3BxrlzsfOm3lLE5+eSTxzh7XsWlL3wbc/auIyK7547aUm6c0kftRBzWr+zazZgkbr5cex4k/tQ3zsrlecYyRtp0T9XRpjKx8i05V08cfJRSSimllFJKKYcCFXL3no985CNjbWJeu7K2YF3B+sN2ZN1iRp5krcExayaOIefrx62YbdZTSDtzHpK/fh7Wr7FV3o2R/mb9iw99zlqmcbzooouWF77whcsb3vCGsZ5TStl/KuSWUnY0JiAmH5nkmHA4uobJiDxikHMTPEJSBCSTFPmZtDiqn4lMrhEBLeKodqEe2BGctCWxJYiljUyU2MeXvEyKHCPk8pOYEo9z9nyrz7dryXXiUW6XK5Spm7YkRHDT5lzX2BBMjU/6k7bYSs4j9PErTp+GJCwa0/RbeermWn3lfHt9sNcquz/sTMQd2YgtQqy67Pg3lhk/fRF3bNOWPPHb2Uq45iP3KP3UfhC/2NTlg43kXMp4zvdd/vp45FoZ//mEqPMIoMrTlmQs5PkHyIknnjgEUv71SXLOTh19dNSXU045ZdQxHrHP+LFPDGImrqvDTl7aja32jYF7l58JtvEHY5168/Ohnnx9U8dREo+2lauTMWbDf35OJYgv9yjlknN1HcUi8V9KKaWUUkoppRzsVMjde6wrWOuRrItYR7H+kHUG6xfWIYI811mDSJI3p9hag8g5sq6R661ImePu7OKHz7nOOnOec+nGYtgd6R+ylpb1FetA1r2su1jfufjii5cXvehFyzve8Y5db5wrpew/FXJLKTsekxCTDxMhQiRByAQux3lS4ug6kzx1tkoRliTXaQeu1TXxkWcixS5kksXGxMiEKX6IZqknZsck+epI6stDJlyJTezpDzuiGZHMtUmsMdDH1OHHZEwszvnTlljUlQ/18moasGUj8c/OuSN/EfbUIXQSLlOHPd/OtRV7dcVEXIyIq1yKUBlb9Y2RfhFx7WzlT5l8vtjLIzqmPSKjVyATO+Un9oypuvPYO49ftmJJnHz5R4Gkr/LZxj7nc9KOWI1LnoHEEZuc8xnx3Phpk72+ZDxcZzz40zc7bBF/EvuMAb/u4xFHHDFicS2fr4ydY8Y+n2IFW2gTxh/stJ+fsYwHP4kz48pWf1IPiUEfJdc5GrP8vPHhWrvay3POji8+2JRSSimllFJKKQczFXL3DesVvobJWkvWwayTZd1HsoYgBWVZW8BcBmU5WqvI9cxcH9v52I65jfl4Y6SddXv5ydvOZoaN9RTrN1k7su5l3cWakp24L3vZy5Zzzz23X3NVygGmQm4ppawwUSFcEa0iNhGmTEgcZ/HW5E6+iV0EVpMXIpGjfHkmMinPNUx2kq+OFJs5j406mRyJ0dFrcGHyFEHKJEpsqSNWefHFTl1l7LyGV2JvAivJN4HVN3UdxePc5JaNcyn+CH/8R8R1zBiwi+8kfZHiQ3tERUKkenMd4p6JIP/qyGND4Dv11FNHX2KXuGbB0zUIvvy7b/K8klg5Oyk+1GVDJLa7NfdDuTjEq1+u419yPueL17W+eZ6MiWv5bObEPke+3GP9Mya5D4lx7lvq881WHTG6nyl3b3z60Zipp4ygffzxx4942KkjOZc8W9r0vBs3ArsxyL3hM8+M/kCc/LLjgx17sM0/Ahz5Ve5ZY89f4lNXXOyUE4fZpMwzrV2o437wl3NH9vy75kNsjilzVM6PtkoppZRSSimllIOVCrn7jnWGK6+8cqwNWFuwfjCvYVg/kDDnhawpWGtIOZzPaU/ZznYrP3McM7trU525bL1vIdezfY5Zn5Osu+i7MbNuQ8R91atetfz5n//5KC+lHFgq5JZSygqClVf1RsAlGtlFSYyLMCopIwyZsBDWXEdQvOaaa8bkJWKXMkcTm0xuHE32Ihq6jkDGv8mRxL888ajnXBsRpdRHztVl6xjxih/1xMSfpG11JG2ro+8Rt2IrNriW1PNpOpM2tpm0iU2yczNiIuJPYhvfjsr4EydBltjpmu/YpF0przdWro2TTjpp1M0EUh8JkGxci0GeProHvuM1rxxOPMTcxGL8Uve0004bMYlHu7mH6rrWTtpUPzE4V8eY535nR638jBk7Kf2UJylXl7h+wgknjPPYJbkWq7bj03f+Zvz0wTG28Z+2fUevZ9z4GBt2fEn8Znz01zNBzIY+p29Qzidbzz5b9wP8ytcGO77ZyjcWfp7kOfeMadO9MF7iYWfc+HPkP22pmxjEw1bf+UgyBn4G+HLvnCPPi5j4SN9LKaWUUkoppZSDlQq5+4e1hQ9+8INjTcIahbWKrMNl/cHaQ1KYy9R1nbwcw/r1TOrFhr/d2adc3JjrYnd1d2eb8/TR9fq5NRVrM5LzjJny97///UPAtRu3lHLTUCG3lLLjMfE4/PDDx67NCKJEoxyJhyYoEUkzoTPBIxwRrUxk2CiTAnv5Eky2JPWISfFjUkQodE68ykRJXtojaGlLvOqzU8ZOPSKgc6KdNrTp6Fpb6kfsk9jCUT/1j83HP/7xIezpk7qSazFIrolgfEUIVj9xErT5Sd3ZjzjU09axxx67q32JHb+O7OakjlcC202qX0TluW/qRWRVX/vGhBjp/sX/us/U54uIazduxtfEVL+hX6nPVgzJ03ba1Y4xsUtWH+W7ZynPMb4keeoQw+2CnduS5jFx1DcxnnXWWaPe3I/sqJ3b8Azpl2fcPZL/iU98Ypdd2khdu2vZE0JdG4f0Ec7FqH/5LhltGJOMm8QftCle/iQYpzyD6rhXrj1H7rNnWxm0BT6d52dKPPyyVeY68UaAZ5f7mXvmqD39Vke7pZRSSimllFLKwUaF3APDRz7ykeXd7373ctllly33uMc9xpqCdYOsKyBrGNYVkrLOIM22YDuva0iIj/U1D+wuL2xl4zzX6/aYy7arv076xt46S9aQsr5irUX60Ic+tLzwhS9c3vCGN+xaNyqlHHgq5JZSdjwENCIXwY8YShxLIixJ8k1UpEzOHE1qkpKXcnkmeoSv5JkAyZMyEZJngmgyZLLIDvLlmShlwmhSRIDiW31JXWKapK48dRzjI/UidBHDTLjSfvoAeYQ59kn8RZBUX3+Mi7b5SV/jn0/19D31HdkYZyIuEZB9bNRznOtJ6tqlShw1PuzEQShU7px9EnttZCeuWLUTXzmmDeX3ute9domo/Gcc+GZrDGfhXD0ib3zpl7Y8RxFX2aVPEcaT5KWuVwjbWZr7l3sVu3ksnHsW9U0f53ub8twnSR+MWT6kICZ+2Rk/56kvj1/jrK+u8zzxoy3jw949509fnStjP/dbHX7yjHjmtBcfztlnrNUTq+eKfeI3tuqIkV82fEF8bMAXn67di8SO/HzxBf7cy7SROEoppZRSSimllIOFCrkHBmsH1jS8MYww6dq6g3UP6wnWF6xZwHoCXDvPMbjeKlmPcJxRb93f+nVI3eTNtmHd/wzb7dqffc/XjtZLklxbS8k6m93Mf/RHf7Scf/75/U7cUm5iKuSWUnYsJieEJjsQCWkRbU3Wcj6LuSZuwaRFfXnO+cl5UiZ6JoARo5InyYsIZXKYyZKjpFyKOJV6JpcRz5QTtoh7qceG6BWhzLVjfLAnAuqn+mIlaLGBGPnhgygoaVOKiGc8xMyH+nynTeeS89Rzri3tGm/iJdjxH1ExfiT1jJ17Yyeue+FaDJI6OU8dPrySl1CsHf1NLNJsx7/YvW6YgOmcf2V8qptrY2PynrHLmPEh/oyJcuOnTB3HtOuabdrnP7tw9Q3KY8934k0iUNqxql78r9vkfum351L/xOWe6k/siMvqS+5Ndjy7r+m3+s6TtJkxELNxkKfPfCJjLhkzz4g6zvlMWcZG2675yzhAPhtlziXt8SPlHkhi064kFvdDeWLJM80HlEvKxZ37UUoppZRSSimlHCxUyD2wWBuwO5co6U1msA6TNT8pZA1iPpesO8zX6wlZv5O2Ivk5zvXWmfN2Zze3vzsSv6O1FmsqjsjaivWkiy++eHnFK16xnHPOOUMA3xPfpZR9p0JuKWXHYgJCYLJbkRCaHYbyIipFsCT6EKRgMmPikgkMwSgpE7t5kseGWGRSk7z4kVxHaCNKsXOeSROhzGRSWfIJWdqQIiJmchW/BD111I9PdkQ4fUp97ce/+q71PRM3+REYlZnEZoeytkE4zA7VCGPqpS6/6tjtScSNX/nsI4jKU1ee/hgXImviUR77tJE21Reb79AlXmbcEtdsK4GIyt54ZNxSxzkfGVNjJQ4iavovRu2KM/GxV09Z4sy9yTVbsXqdc54rde2EZjPHmcQuO5P5m8ciKdfa8Dwaa/baU0d+/MXOc2En8f3ud7/xXM19zhikP/DzIrn36WNs4ai+Ms+bMeMX4sjYSurybfwI8O5DfIpLmX7K03/lUDfPrTI2fCvnS94s9rLLM8QvG/74UJ7nu5RSSimllFJKOViokHvgsW7wsY99bOzMtX5hbcE6gvWDrP0FtlLI9Xo+5mv+tkPZennykuJrvnYM8zmUW0exxpJzx5zDMTbzWowkP/0m2r73ve9dXvva1y6vfOUru5ZSypeJCrmllB2LSRhxh/jjKBF3CEpEu+QTrNhKOTdRiQgqmSSZ1DjPxC6TqUyKTIIIXNqQD0e2EUSRyRJfjhEGtUlwUt9EMoJq6vNFoMskSrsRqJyr57s+MiGDukQzbclzHhHOjlYCn7b4sQM2r+hNv9VRpo5zn1rUHj+O8sQlEUyJyGLhP3aZFDr6fl5H9fSJCCkZS/brwqWYHNmL4eyzzx7f2Zs+xs7kW5x8u3bk185d91gf0+/0xeQ099BRPHku2Dsaf+XiT31xikus8hxzLS62njG7X6FO7ptzn/p0LYk1+fe///2HMBufKZfW7cXn/mkj/vVfub5lLOUZrwc/+MGjj3wnsSMsw7W+u39EUIl97pv77jr3Ic8GQd34snEPPG/KM67Gg63v5JXHzjgbLzEqA18RhbUpdnn66Vw9tu6Jnwt5yjzz7LUtX5522ObeJW7PnutSSimllFJKKeVgoELuTYd1Bt+ZS7S0PgFrCtYsJOsbkjUFZD3BcXdrC7F3zDlyPZevkzWM+I998pNmEk/WbyTrMtZjJGWOWQeKTc5hPUiy9vNXf/VXy5/+6Z8ub37zm0dZKeXLQ4XcUsqOxWTFpISoQ5iKUGRCRrByVJajZEJkghORaatkIhf/mSwlmRgR0whOmVwlj+iETK6IUXxlQpWJlnjUz8TKNUEubcnjL7E68mPCSYh1njYdXWtLXecmZ46EL4KjOnau2g2q3/ynL4Q+bUck1J6UPDEaW99By59r+dpjn6PEt7r8uw9EQMIzZru5PUmZMXjoQx862uCDr7kNaY7puOOOG68SJubpi/x58ppk7PjOOLi/xjFx8pWx4ocAaUzkRzwWC1+xt6OXcMmvNtVJf8TJRnKtrvth/Dyf/KRfknrpX+p7FbW+RdCObezFxT9bQvYpp5wyxnvudxI7dfgxVmIXj5iVi99ROb+OkrzsXDaG8RVhWNv6lueNQG1c8zOUuJXLN7YwhvL4VM6Hc77V8TPkXvAB9ZQHbSUeP0PgT5KXVyeVUkoppZRSSik3dyrk3vRYhyDo+h7Yyy+/fFxbp5CsMThaU7AOkfUHx3ktIut1jpjLcj7XD+yzvhFis91xHT6sl4jbWkzWTyQxOWbNRnKuDn/WWKzpicGH33//939/efWrXz12K6tXSvnyUSG3lLKjMTkxSTEpiUAqOTdhMSEzOTE5Y5OJD0xq5EnzpC2TJ74zYcqEzbkj4cwxdsr5Vde1yZO2CFVQTx7stBQftCk2ZWzTVuJ0LTb2Uuy1wSYinetM3GAM0mfH7IRMnraIXnxIiVebEdjYEdUIesYR7NgQ/WI7HyW7gO2WddQeX8ol8Uq5lrRxxhlnDKFVX+Y4pPlcOdGQkKteYsr4GQP9dMxzQcTNLu2MG/uMl6M8Zbn/2knc4mVDlNYngixbcbGR1vuljG8x2smsb4it8tjnWhvsCeA+iKAN9sqSYi95ZbWYPBfK9DVjkaQf7p3nge/sYtWWOul37PU99gRleXn2nSeO2Grb8xzYihmxkcTGp7IIr2JzT3KuXTGylccXH2J1LVbnyrWrLht1xSc/z468UkoppZRSSinl5kyF3Jse6wXWCqyneHPbhz/84eXCCy9cPvCBD4x1w3ndIsck1/GRBGXIdexme7hOXkjenB/7oCztWd/I2kjWP5KsiWTNRp3Ebb0k6ybW79797ncvz3ve85Z3vvOd47XTxqOU8uWlQm4pZcdjIpPJjIlKhFzCkEmLSYyyiEEmQM4zwZnPA/scMynKBAnayMQKjgQkE6vYaYdwJU85/z4JR4hMW+wSO9uIVfJca4NwSIDTF+VsJTbaNBmd/bAjwpmQOjcOUgQ2sNdORC9+44tISFAjfvIjpT+xkVxHjJT4sVNVIkSKXZvxOdsmjx1RVp30WRtpZz5CTHYWZ3etPogj8Wf8Mg7Gzb3Sf/FkzOaxTpvqELz1Vzv8q+f5UNe9k9iJyWTYMfVdS/oFMeqXnbVzjBkL5+pLfBoLonlE3JTPds7dCwKucchOXDGIM+MhsRO/vniGxO7ngo0ydZzrW8bCWMXWuXx28eeYeNnwbbe3umyVO08bjuBTrHxpT+Ijzyxbz6u+85E89V2zc62ePmhbXhJyr8QnluSXUkoppZRSSik3RyrkfnmxJnHNNdcsV1555XLVVVcNYfeKK64YO1QJnNm0Ye3BmlBS1jGyliHBOsV8HRvIUxfJC/M1HyG+pKyDJKUNzHawFiJlDdDRWsq73vWu8Qplr1N+4xvfONai5vZKKV8+KuSWUsoKExTijQmJSQuhJ5MYApEy18jESlkmO9I82ZonRHA+C0Z8hpRpOyIXX2KS5LkmjGXHoWv24lLunA/XJpbxpx/ELZNJdQh5UsodTcQIXsqJXMRLSd0I2iF19Td9MA7pLx/KCYpEOvUhX5wRL8UpaTt9Jlx6hbC2MxZzim2S9u0q9Rph8ev3umiZc/4i4hJHxRORL/deUse1PhFkjR3kQZ3YxVbStnjU0Xc7b11HRDdeV1999bBLe3OcUsTZtE1sJdobz7x+ebZNffbGzk5cR/7jd27D0TiLzfftqmdc9Mkxz4OU54E/sbiPrtVXrg1H1/HBX0Tf2CpLzOxjazyMLRHX88xWfMbFtf45suc39yH1te9cHeds/XwYc+XaU85v7pc+sPFcs2eX/rpWrky+9t0rPkoppZRSSimllJsjFXK/MlgrsG7w/ve/f7nooovGjlVirvWzfOWWtQXrGdZHJOsOUuonWbfYCvnqBddJW10jPq1zaF9SnvahDPKszWTdL7Gq4w18+vXSl750eeUrX7lccsklo04p5StHhdxSStmEIGTCZZJjAhPRJ5MgExpCkUmOaxMedo7z5ArzZEqZ5Fpdwljs5fGVpK1MnsRDhMrkTyySGF2zZUNwStzy1IkQ5rW1fPERQZaYStiSB3X0gT3RkbgWEUtKXGwclUn85btvCcVERyIpIU8MbB19WjF9IbxFlIzAyL84iYticy025T7dqC3nqSPxdfbZZw/RdPbN35zkqS82Ii5x1Djxr86c+JGUG2fCaCa6ysViLGPPv5jYq8de0m/fHeJZYc+HZ0m/+FCHvWtH8eWeuK++s5Y4za+xEqv0qU99atRlz1aZOE499dTlnve856425GecYis5N84PfOADR5/41H7GQj3wIYlFHOJPH8Xp2nli1w5bffccIPFC3fjPM0SYJRJ7hvK9uewC38o8S2ydGztk3NkYX+3nZ0Oen7H0P8+tOmLLBxrE7f6IS6zyXPOhf/JzT0oppZRSSimllJsjFXK/8lhTsMbgw/vnnnvucumll46drMRQ6zTWOiRrE0nrWLuQYpvrOd/RWk2O2nVUFuStJ+sk6oTEkg+zS1kvtNZCnP6Lv/iL5eUvf/kQc62tlFK+8lTILaWUNSIQmcgQqGCi41ySb6JjQpRJGPvANhOpnOfaJMskiGiETMC0mUkW8chRW8pN/Ii/RFbtSEQmNlDXpNGEK5M4Nl7JSwgTH9uIcGy0IY7YO7ebkigb0csxQldSRD2Y9PGfo/jUI7yJXXvEN/XE51o78ZV+ejWypK/ilscuYp0YnEt8sHnAAx4w2lGHbyl9mpM8u3ZPPPHEIRyCfz7VcZyTsSL6EjCd889eHBJxMvdLfefGRB3Js8FePtiJQdz8GV/jJY/AzUf6qT5Rlp3YlfGV+mzmsXEfCLjus3N2Unyq4zrjwPfpp5++63lYT2JWz9HYGgP3ke/YGKMIr67FpFy/3Re+HeVLEaKNIzvlBF/+tcOXumlXEqsxZevZl5zzx37dt+dW0i4/+psYtOtn1THPPtJm7pfY3Jf8XLJVP89vKaWUUkoppZRyc6NC7s0LawvWGqwlfPCDH1wuuOCC8Z26BFFrDNYakPUTaxKO8iX5UtairGE4z3rJfJ21FvbWTaxnzPUgHuXaSVvWeayTsM9ai80IniPireS7cOVZAyql3DyokFtKKWuY9EgmOSY2JjkmRSZVzjMBIvyYJAXn83UwcZphE7EKaU8ygZLU0YbJV8SstCfPZCpCJNRxrZytXbURm2HixpbIxbd8x0wWJUInoRHais8Iikl8KeffeLiWgvN5QqqvEmFRikipfWKhXbxsCXPKlbE3+Y0wFx8EwHvf+967XsnLdi6fk/L73Oc+QzjM643FnzGWjEmScq8dJozqm/i2sku+tqEPhOyIgLETV+rHVl+1wb9xEhcRUl1jahzEqK6+Gw9H/ZEnucfaI1Dz5Xlgk3FLig/+iLjHHnvsENrZi2keC+eSGDw/xtmzkOdtttOOJF+CndGeBTbK5jESh/FQLl7jLLHhz31SzjbjaxyMkViMU/qtX3AeWwJ4dtrK8xzlZ0WSrw02rpFzMSLPsWuxOYf4/OOllFJKKaWUUkq5uVEh9+aHtQZrJdY4rE9YU/A9unbqEnS9gln+Rz/60bGmYb3CMWsXUtYkHOekLGsqyXOetcus0/AXO2VZc2KvTGxiIth6fv7sz/5sOeecc5YLL7xwxGUNJus9pZSbBxVySyllDZMaAo5JkMlORCWTHpMg+fIyCTK5YQ8TNvUzCZtJniPBSb3Yx4cUcSntiiETO8TGxIuw5TqiFBsiFcFstk1bJmPztZRJHdFMm/oXn7PApp5ERCQ+snXNjr/0wTUSP7/it5tS+8aASKicaOrIv76kTxFwI1AqJ3oSIwmn+qltZbFTL0m7xx9//BA79Y292DIe60lMxoxAHJFPf7Q7p7mOuImI+qB/sVfmKC6xONe+Mbv73e8+xs8Ye5Y8Q9pLjITb+HBuci+lj+oYB6K71ySrkz6rk3Fzra9itBvZ65qNCd8Zg4yHJHYxEXD5z/Mun+2c2POP/IwYB+R+sDOGjomNP2MVn/yIX508a/Ly8+W5gXw20hyPexYh3Hn6oT3jxDdfxte1I9wrsTjyoy126rvWH/Z8Is9WnutSSimllFJKKeXmQIXcmzfWEawnWN/52Mc+tlx11VUjfeQjHxnCrt26XmX8gQ98YAi+1iWsfVgHsb5hXQM5WsfIeoXzXDtCnaxdyFMvMdglrN3zzz9/JK+BPu+884aYa+ewMus5Xfso5eZJhdxSStmCeeJD2CH2EIIIQCZJhB9H+SZKElvHCFQz65MvEIjYm4DJj5DknH9CFsFTOykTlzrOJRO8CHxsIoCJM37ZRIiSMjFL8tpmwh0fSJ30xQQy9fWRiKiO80wwEw979dhqn2hJvHNUrg2Cqe+qBVu++VFXEl92ospXz9gTZu381F5iyjG2zo0BsfeMM84YMYgl/dgqVjb82y1r7NjLZ8tnUuo714+IqeqnHxK//LPTF+NpvIi4YuNfnsQf5Gc8Exdf6qd9+YTL7EBNG+mL84ybPH0hfHsNNXvl8ZcYHcWjP0RcKbEgdutJ7H4u+JU8b/IzPvMzGf9sjINrcUj66cjOOTtxO3omc2+Vxy7xsyO8s5Un8SGJRTts8qz6mXKv0rdcq5/nRN20L09i50MI/LMpX0rG1XNRSimllFJKKeXLQ4XcgwtrDtZtrrzyyiHivu9971ve8Y53LO9+97vHtbUHZZLv3LU7ViICS77uy3qFtRfrHtZG5nUYvq2d8OPVzmnnkksuGW347t4//dM/Xd74xjcO8fbyyy8f3+UrrlLKzRvbTbrqVkop20Csy2tpCX35fk/iTgQs5xJMqiL2mFwRxZRFPCJ0RGiKKKUsQifhSj2CnXb4kpRFrOLTRG0W+iKkEUn5kkfAcmSTyZy66kWgIpYRPcUiDj58So+YxT6xpi4hVRkRT11+TSr5FCc/+si/nZL6AdfpCxsTTBNWR2XqpO8ZF9fa17ezzz57iHb6Lb5MXJPYS/ydeeaZYwcq+J3tJP3hV8rY61N2Gctnp/0ck7QhnlmcnkVS7bMTX/I8Q76blq1PWKa/dtqaXDuXIoyy8UlI5caCvbp8GnM2jvLlgZ3ETvtisxOXmM23PinXXl4VLM/98Bx41gjEbPO8SdpRbx4//eJfHWPBj3GTr219ZqfMURk/kudBObs8k/L1VX88v8Rx+fHpZ0o5f7HTvnH1QQf/6IA4jb97qg7E6LnRrrL8nIpLu9oQhz4o8yznvvLhH0r+YeMfOF6B5B9Sxqhs4F7A2BlL9zXPcymllFJKKaWUm5anP/3py3Oe85zNq3Ko4N/aWUfMv7thTfI7vuM7xr+/rQexia01EWso1jOsY7zwhS8cdfJvdGs9pZSDlwq5pZSyG4hHBKOTTjppOeGEE8ZO0oh4JkER1dhFJDKBSjJZIjyaZLlWh2hkYpWjfH4ixLHln/jEPkIakURybsKmfkQxdUzo+NGmOhFW2BCxIjJqT1lELv4ImYlHIpixyzWf2rQr1jH+2RC82PDtnB/xE+TEo90w918+Ec73chgjiMXEk426drEaf/60NU9AxZjxUEd62MMeNuzBPmM3p/gnnBICJSjLfWQTW23Ev3PPgjrikGd81UmfMs4m0+InkIpFe2LOfSYgupb0h5DsHorBePAtX+JTe4RG9dwv+dBOxoTfhz70oSM+bbKDNjPu/DpqQ9wEf22z52MeM3bilp9r9X2wwXPPp2tlngvHPJPO+Sf+i8e1vvLHDzt19Vf/PC9+tuBaP/WLrevk55kQk7oZP3aEavWQ7yMWo7z8jEFc8vyM6bdkHNXnSx3xRcz1HTY+eODVR67LBu6D++l3QZ4T4+M+ZaxLKaWUUkoppdw0VMg9dJkF3GAtw/oNUp6j9ZMk/x7PB9whr5RycNNXK5dSym6IgEUsMjkiHBF9TIoIV67ZuFYuL8JQxCF5hA7iEF9s5wSTMeXEJoKIemwjmiZFSCOUsdc2CComc/xk0saOjTpJfConcBHYtOWaP+Wpx68YXPOhn+pE8GNLCHNUP3Wdq0cgUyd9j0/lScokQiKhjw0f6mtHfNrMmOdeaNcx4+CoH2edddYQ2gmM/LBzZJu+J/FH5JTEIJ7En/sk8Z2jMgImAVz9jK0YCFfs0h4RUL/E755mvJTzJSUm9QnqBEp9VdcYJBZH1861bWzVmeMTj/YI7bOgL67EKWlfkueY5yb3Rl7iin9Jfp41cRLWMduJKX75EBPbjBc7Nmknseuve81n+q1Mvvid85sxJox7XviIYJg4xKgNIi4bz56ficQKYxzR0bOiTeeObMWuPXg25CcuZXn+dirG1DMo5WfXuWcuR/fbfZFKKaWUUkoppdw09NXKOwtrF9ZB5mSNQsq1tRTrKKWUQ4sKuaWUciMQdUyEiDgRswg7BCHHXBN9thIuIgbxwwehiZ9Z6CCOEJUIi8Qk5WyVO58FMsf4IEgR4ohlEZtSl03sJeKTI3u7H9WBOpnosVFHSpviJ9YQG9OGMv7VS78kbeuHNhyNkZQ42KqfRPQhJvLPTkyuJWOhDruMnxi1K87Ey4fd0l6nHCGbrTL12LNNjPKIgWKMaK5cW8ozTvHvaAwifhOpYhcbk+X0z5FffSBs5bkwsWYvJSb54sjrf8XOzlG58WXLztjYscpeP+WJJWMoNkK2ttVXLrbUT//FqH7GOsI3u9ikb87l8+f+EO7yCubZLu04yjem/OuXtma7JHaeFz7Z6QsbKfEYV9fsnCdmPy98pDy+448dG3Emdv2WxOaeyAsZLzHknJ2xmeFfW9dcc81mzs7Cs+Vn1RhHxPX85UMREkGenbHMz2EppZRSSimllANPhdxSStkZ7LOQ+6M/+qPLL//yLy9Pe9rTlquuumq58MILN0tKKeXQJJ9wI0ARp4hBRJ+IQo7KHKEMEfdcz8IGW3l8EUOIIsQjebHVJvFIih8p7USUjQimLHbaiTiXfHV8J662IlqlfP7UnnblRVzVDoFGOXvxKI8d3/IlYiNRMkJuxiOxqIuIiWwIPwRCYilRzzXb+Dcu8Q956ovJDlSvO04+G/Gkf9qN0CdPX+wUFSMfbLTDJmMQP+pom3hIJGWrLGPLNmOSOhCTPsR/2rezO37Vd9/t8jUG8viIf0k9dmI47rjjxn3jV7/5NbYR1tIftvFjrPWNrXNl2lIn4wxleUVx2k0M8tQxBu7tVja5lrSRmIx3xiV9yViDnXsXO+Pv9capw965csKsexCBX7mUMRWX/vtuYPEYG/W0pa6U51Q+W37EC+fK5fPFxv0xdnBk6+j1wexTdiijz8Yuz4CfUT+3njfj4+d8Tuw8n8qMpXuhvvP8LjC+pZRSSimllFL2jwq5pZSyM9hrIfdXfuVXlmc/+9nLAx7wgOXqq69efuZnfmZ59atfvVlaSimHNoQl3zNB0CVOECwIFAQg4gTxgpDhmtAjzzGJ8BNRiWjlSPAiLipLij0/2krd5POrbfWIccrkZQcnIrDJI16xI2ASz+JrtmOTfjgn4HhFrToEGP1KGxHs1HN07ZzIdvLJJ+/qt6OUMRIbO/4IQvLVl9J3STlfxtP3wiYuENjY3Pe+913OOOOMMX7EQfCdWPQh4yZPfQKU3bvGXb7y9MF9FZ9kzJXzQZQ1DrHnK4ltztXhyzgRXfVBncQkJSb90fczzzxzxJKxEYM6ETvVdU18jwjGR54/IqbvGZ5FWL4SK5/GS132+m/nrnHVZ7aJiZ28jAFfkueGgEr4ZZ98KTFK/OiT+8qer49//OMjX73Yen7ELxb3b32MJPXSV3jO73nPew7/+sqH/rJl41yf5r65D+yVEYeJjMYg+cY6orCkTL/F45yNc/7SRgRgGPfEdyhjLIy/sfVhCwKucfNcOBpX58ZVcs1GHfc4R0lZ7lsppZRSSimllP2jQm4ppewM9kjIfeADH7j8n//zf5af/MmfHIuzL3rRi5Z/8A/+wfK85z1vufzyyzetSillZ0CIIER98pOfHIIRoSNiBnEoYk/siFiEIqJPBC95xDWiKtEjRIBjE3spwpr2lBMXCSTallLHkR2hKwKZpC2CJ/FXrPzIJxiKM+0RWMQsz/etshWrNtKXiHKu+eGDL7v17K7UNnt24nYUi3HhVx1+2QTlkvL5Wh3jQ4hznf4Si/WfvT4TkCAmbYovSb/UNdYEWWORerOdOMGeH3Xs7iRegX18O87tOBcD0ZuIK+6Mr/HI2EbEVE4YhfFQzodyz1DGzjVb/XXkT2JP0BQjO+e5P4kp4572xUeU5Cf9F6OkXHKee8TOMTuB5cf3elLPM6//hFy28SuOeewkPyueF/eWnbyUO8oTu/psiYjue3zqk2cC6rBlJ07iItiJI8+G/Nx7MXkGjaU+Svyw5TtjAH60nT6pa2zkJ+ZDFWPmfvrdYWw9PxFx87MoufeOfib9jmEj5VyZ8U79PHvGtJRSSimllFLKvlEht5RSdga7FXLz+uQnP/nJY7Hzmc985vKsZz1rec1rXrNpUUopOw+iUQSziD4EImIGIcg1kSdCGtjKmxNBg7ChLjv1CBwpj2iVc/7YEkQi4kbsisgUW+1J6hCkCGZeiytGeZK20oe0mzj5JRZri2glxVZ56krOiXJEUoKPumJLTMSujIVYZlFH3ZRJ6iSBvZglQpK6/GEWJNMHSbnrxMcuOwMJUIkr5dI8do7qEDD1x3nGhu+IdzmX9IfYaIzFyF4MyhyljAV/YmGrz0RY+cpjK0ZtiNcHqMTPJzuCrRR7+Z4lsdolqi6/xoKNMmMnaVudlCXxITlX5jkWnzrEOGSs1hN7fom44nQuX6xz4l9+dpLz7XmWJ6Z1e/0gomdMs+s4cUr6qT6xkZ3xci+gLM+6/MTpWeJHcq2vjjBW7LUnT1vs5LmWL+b8vIsh9+BQw8+nZyr3ythIxtKYGWdjkGtjk3x1I9q638Yr98ZRPoyb+1JKKaWUUkopZe+pkFtKKTuDbYXcJz7xicsP//APL7/7u7+7PP3pT+/u21JKWYO4Q8QhqhGTIvREPCJQECoIShGfIhIRMoirhEp58Re7pFzzQTRRj7BCEFFXG0QvNs7nevKhDkGGyAaxSmzUiSjommilHQIaAcY5lImPz7mua7YS4cYYqMM2Ahh7duzFLekPcSf9VxZxTVIORzb6LH6+xUqwTL30mW3O44+NvotNe6mTmNKf1HPOzm5Zwiz0VX7SXE+ZmNgSEwlZ/CiPOJlnxLmYxDN/D3B20ypPDPI9H77/mL28lPEn6Ys6yr162T3QNzgq14ax8wy4N/L4MVbq8hffknb1wVgT75ynDrvEONflS//F4V7FTkrfHdVTbpz0DbO/2Dtqkx2/nkP9yQ5k5+qkr/rIzocJ+Ie+Os8z4P6zdy3fPXPtnK177sj/fC4ZE7bGQltQzzl/4tVH7Rwq5LlxT90rz488P7vGwfjlKF+5NAu1+VmXjClbye8/PuUZ2/lellJKKaWUUkrZcyrkllLKzmBbIffCCy8cC59Pe9rTln/yT/7J8m3f9m3L+973voq5pZQyQcghMEkRfIg/cE2YYiO5JgARyYhuhBD2ETEIYGwibki5JnpkJ2N2PRKO1FWPkMTOdYQR5QQVddSNsMUuwhk71+o7J7KIzS7Q+HCMuDbX5Yswk1cwJ36xJgY26hkHMRMHY4v0I/bGTp2IaDBO+qye9vTDa6359LYIvlJHXHxBHESlCLKxU895+uE8CQRBu5HZ8jXbznXSV4IjwdXYxVYbSXMd/cj3FMfWUT9yD9Rx34izBF/t6G9imZMx8fpntsbJfRY3v46u5Ttnn7HJOEjJdx/cj7zyOPd7vd1cx5e5wrHHHrur3fV+p4/KIw5qh42fm/ib7YyT7zLmOzHkecy1c+No/Pk1/p4BZfzPyX3y7PAhuZbPFz+eKXliYCePL/n8sfMsZSwl9yg+3Dt9ORTQJ/dIMr6eIc+Z8dV/xwiykjL3iR1746TcUZk6xnG+D/NYGnPjl5/3UkoppZRSSil7RoXcUkrZGez21cpvectbll//9V9fzjvvvOVRj3rU8t3f/d1jd64F675euZRSrocAdfXVV+8S4yI0ETAcI1ARMIhk8gkXhA1JHWKeehGOIlhJIJIQJeNTOb/KI4Y4VyYRUQhs6sW/uqlDSJMHvsRHECOgRVThU1JGqHJkq64jEZfoFpGMDf/6lPghHrHzHzGI+JPvg9WePvDv2hH8qEcEirCmvnG0K1fyXali1C4cxUJYIpqqq54kJnXExU6bUvrj75uk/blcShtJ/BK9jZfz2PPF1lGSp0/iOf3008f9ZzvbsRGTGJTd7373G4KqvJSJO+fxfc973nO8Uhjq6rdxEo/ra665Ztjpu3rac8/Z6WPiFZ8x1R/PjWv26ZOjlDzJvSGiesb4S0zakNRLvOLRH7Z8p0wiUqvPp6PxOfPMM3c9Q2nXOb/O2Xnm9N/zILFH+uroOYO42OQ5NQbaAf98R2D0DDtXnx3BVrvgj406xinPlj59+MMf3vVzc7CiP34u3Ve/N5zrf8ZOys+fNJ+z87vA0Tjx5ei5N765H7lPORpLP8NsSimllFJKKaXsORVySyllZ7BbITfYhevVys95znOWs846a3n0ox89BN3u0i2llOsh4hB8ZsFtFrIIGsQjYkhEUaKScokYpH6EqxyJHwTJCJqzyEb8kJxri2DlXJ2TTjppCCzQFjvCqTadxwcRhU+CoJ2o6sRe4peNPMKWekQYIg/hRp8SV0Qyfo2HOmJybncp2BCA9NVYJI4IZ+oYq4hC8uXx7yipZzwc1TGe2nGurjJClLiUaSv3gh/9yVjJY3fKKaeMOq4l5RnT9XPtEruMl77xo1w7ytOeZKyMLf/uC9v4ib0kdjzkIQ8Z/U4MseMr95rPBz3oQWOM2MkTR2Jn51qc6uuv+vKMp3L5jmz48YxpV74x4yd9kKeN9JMg73669+5nYpz7k3Njyjch19irn/4k8eG+GtOTTz55xJi+rCcQnE899dQxnukP37lGnhd57q/YHV1LnkGwNwby+EkZW2XGOuPGp+QZU57nMc+kny8+Dlb03QcZZhGXYC3f7wVH4yHJd9Rvduoody/ZKjdu7rUxMV6x49u1cvbuDUG/lFJKKaWUUsqeUyG3lFJ2Bnsk5M685CUvGYKuBTiLzY95zGOWq666aryKuZRSdjpEHmIT8YIwkd2GxI4IP2wiYkTUUoeYQfBgAyIJwYMImFfHKottkmv1HPkmrnnVbnYcyotthLO5DrxKmYgWIUZs623II8zxR4yxu5JYMwu/6rLVP9fq6oc+EAvlsxED0t+IQ+wJfnwSzjJObOYkz5iyI6olBvbZKSnWtEUklZynP8ok7R533HGjT2AT4TOipJRr40SYdE/Ewde6TY5iJDoSx4iD2pbvnie5lm98iL1eaWtc4oPNfK6/dvYaJ+1nnNkkHolo5h4dffTRo1/65HnkJ/75Ishq07jJ48u4sCHoyksyxvoe0c6zID/9SErfjJVxcn+QcnXmc8+5MSLAiyPtpy9J7hWfeValCLdiE48xlzc/N+ygXH5i5y+22lQ3dZBnEhF+xeVesecjturK0x+xHmy4R54p9ys/U1JEbWOmj5Jz90ly7xzZqqeMPTt1jYfxzX2CMs8njKdnxVyylFJKKaWUUsqeUyG3lFJ2Bnst5Ia8dvn//t//WxG3lFImIqZFzJIIPBF8IhxFTIvwRTCSJ4EwQgQkGhE+iEbK2M0pwpFEHCGaSlBH/myvPX7EJA67SoloEdBmf6mbo3riUYeYBv1ST2LnmHYJOtm9qd+u2WsjddjOEIP0N+JaxDJp/Zwde2KSPG048i+J2djOoqFrscF4ZYzVSz/Z6KujpJ72CI3GSp8iFmc38HpSzrdxMrbJTzyzb8IoMZ1vMciL/VxHu15nzVY8YpyfCW0aM2KadiO6at9YRbhk42jcjIFz/rSh7YxD2nZNlOPTvWSbONOPOVblnsEIs8ZWnBGGYyvPcy5OtmLJc6FMG5JzMfKp/dh5RvRZmXPtznmQB31XJhbH1GGL1JW05zo2idO4unbkJ7HKU86XeNO3gwX98ZwQcv08GV/3zT1XNifPkSO7JPYpz7NmTHIfjaOxkZf2+Jav3DPjQwbOjWsppZRSSimllBunQm4ppewM9lnILaWUsnsiZOTVvhE0CEqEnlkws1s0ohWyu43Qpg5RKbaSujlPIpQQDokxEU3W66SeuPglYHqVLeGFgKId5WIW71xfItLY7Ut045+9vrCLaONcfwg1dnpK+iKfPbTFJjGqJ2aCUAQ0KWWJzVFSL3nqzeMkzrSlvxl/52J17h4YY7ERBsWiTFI//Uie9ozVGWecMfqVdiSv041d2hHLLOLKT3mSa7bK7Qg2pmlztoutsbETl5Cr74nRUcr4iM8zoH2+tWH8iaDJ058Im3xowzHn2b3Mr7YJb/pCaNZOxpRdYpTUEYP2TjvttFHPtWeDb/bxrW33Tb8JueJml+coR+0ZT/23w5hvyDP++uEZkKCdXLMBP7HTrus884ituKDNxCAuPh09M/xoN3Zs+IzQyyc/+hn/N3fs3vezQMw1vvriOdFnfXLtPCKu83xAIB8GkG8cYmfM/GzAmKVujsbGuDsaT+PotfTGvZRSSimllFLKjVMht5RSdgZ7JOT+yq/8yvLsZz97fC9uku/K9ZrlUko52CHgRPC5qSDsEDW83pbAQbwgAkX8iqAm327XE044YYgrH/3oR0cecSPCITtH9eY8r9Cdd5byGTsp9QhzRBMC5r3uda9d4pa8+GNPiFJHvvaJL4RMwo3rCFps+HSubXWVi0finxCUMvby9F8b2uZbIg6x4Us50SeisvpJyoJ6YkofxAVjy079WUBkQ7w2vvElEZH0Q2LrKFZlRND73e9+I+6ImLHlT934F4d+qyOWiJzria32zzzzzLHTVp/lx+9sp+zhD3/4EDzFYzwk8SWxUc93I/MXwUzM8j3jRDbJc5Jx5U9dPvTBuaTcUR8IzcThCG1slee7hhOvMnbG17nYlTlK8tKu5+/e9773sBdbnjNH/nOufffKsyG5zs/s3Df9hTz1k+cI/pwbE0fxei7dLyn1xZc8NmIgVPILdrkngQ1f7KD9jMnBgOeFkBtRVsqzEhHXuBl75+wcjRHbXEueJ/03PhlziUAc8ddzL8+48cG3OpdffvkY21JKKaWUUkopN06F3FJK2RnsVsh94AMfuPyf//N/xuLeM5/5zOVZz3rW+H7c8847b3nKU56yPPWpT13e9773jYW3Uko5WMmOUUIN4eGmgnhB3Pn4xz8+xCLCRZJ2CSB2WxLZCByus6MyIiFxJMJYxBJx3+c+9xmCGFFFOwQ2dfhlw15y7ciW2EiAYR+7pPgmRGmPeKUN/gkwyvUB6UPaiDCjDePqmg9E5HFNFNKu+IlF+irFPxvjoG3n8tTnD8aJwKct8KMvjgQjOz0JS/oBu0kJ0cq1k5iVGyvXGR/X+i4R1u0E1W7s9V1yLjZHbXuW7EQljLHnhx0/zpPSfyJuYkx7ynIUkzF/xCMescunMRGnlHP148+YGENjpczYgo081+q5/8ZOfF5rGyE78Rl340Vwhb7rIz/qsHEu37V+GCfjJc5Z5J0TfBfwqaeeOmLxAQd+xZb+5Cg+99m4um/GYhZyIZ+9+mJA+i8O/YitPG3xy4/4lcUHW4jT2ItP0i5f8tjJ0++MSfzNz6Lr+GFzc8XPimfL0e8DfdMPY+nc0bjoj58bP9Oz4Jt74Vw9fXbvPcPGDO4hX+p7Pvk0jsaFb2W5F/k5KKWUUkoppZSyeyrkllLKzmC3Qu7P/uzPjsXTf/fv/t1yzjnnbOYuQ7gl4D7qUY9aTjzxxO7MLaUclBAQ7JokOEbEIEQQF6SIEAcSfglUBKAIFoQPIgbxRwwRqtgS1dhH5Eq9iEjEE7slCTHqxT9bSX12/GtH0kev2PX7PYIKsen/Z+9ufvZB67vuX3+BDeVpYMAZHkoBG2i0JbE1JBpdSIy2CxetGHdjGuOiK+vCeLNzpQsTF1RD0kiaaNyY0I0xJiw0qZjYWqCUdpxSOs8D+h/c9+vsvOc+OHv9ZgYYmGvm+nySb46n7/NxnFfLfH7nddG5FnH5RrzRB3tiOJdLaz6IPPwaXvoImvO83rKVC0TkqIWOPrAxl7s56AkiiF82crfnnvg0WtOvl3QQuEhL32atBnnrjRj1tXqIfL0JxKT3kQ09eadv7I4Qx37ttDoQm+k6j8wl8kNsuTdj9+LsHOUm34985CMXMlV94sjXeM7VScd7Vns96TyC0d3ID9Snn/bqNRu++gasHspXPmqIwK0Wc7H69cj0+eCPqJ1O9bg33zZH9pYHyEGuemok3pC3R4/debf0raFRXOdqPfcI+1Bv6JmzMbKRI9/O7ekF4lIPwb4c2dAvF77kaU0H2qfPL5u7CD8Lumvis5jIn6jLW1EjHaN1OvVMn9x571zf+a6P7OiyAzbdN13r8/0PwzAMwzAMwzAMD8aI3GEYhvuBBxK5vo3rW7f/7b/9t5tf//Vff3H3/wcy169v9A2tfSt3GIY3GhALCAzEF9IKORHRgGRA0iAiImzsOWcH3y8pE2mFsDiJPkQIAolEihhJ+pEc8omIjpCyHxlmfhJufCBdEJsIROQMm3xHThK2BAmDdEMSImCKQdjxTd8cCYOMoYuAFKta5MM/PfMIG/71Vl+Npy+xrIGvdNyVNT8JXeKu2BrlIud6FNFmD9RQrXKrV3QiEdnxFSkbkWmdgDr01Z3w6VumRv7cIaErb71BfPJNhz96xuZAD5H88MMPX2pSp95cC0KNLznQq756qD53YL8zvVSXkXSmZmKPLf9y6o2qS35qkbt63Lfa9bc7q6ds+XBWf7wnOcuP6Hf3Vp70+nu+zu1DOvSrxZzYFx+cZddecSA/kYe9yfZPXX29fmfq8g7FYUPfz5HO7YE9/vSDlMtdgPrcnz7Ls599SW/DXJ/USyJwCXtSH3rD5mzoshErO/vQ6H30RvXH++mtDcMwDMMwDMMwDA/GiNxhGIb7gQcSuf4Grr/H96Uvfekit8HfVkTk/vZv//bN17/+9Rd3h2EY7jYiL3xLE2GEwCCIBiNSDIERkA9INeQefQQE0iH5XoHsiCjzq0iff/75l4ivyA3ERoSYMZEfQkxOkSDGziMG7UW8yZsNYpC9c4RJsRAohK41Hd8wRdRZI2MiE62JOZLPPn358K/H6UXQWFcf2NPXyDr+6cqXTv3VJ0QRPTHEUkv+y4O/dPkD+u4PnPHHFjknR3vsIGIJqeVtyOfsB/EriNUgR3ZyQoojKdl3Rped3qTLLwKXRPg6M2anDrl7n37jBb9s63tjImaks9rEY1/96iNi0NUbdmrnt370Gegz4YwvdcjrWs7YbMzrkxyrzRyRh5D2K6e7Dzk5Kz8C8hLfm1OD/KDRnciNvlpI+8lZa3vy6l1Ygxpa80lHD9SRbvlb27euVvtgj7119ddbe/VaXuyq9fWG+rxfn0Fv0724H3tE3vbU0M9Hkp4+nH1WYz9P7GVDPz0+2epBe9A7a86PX/XN5zAMwzAMwzAMw3A7RuQOwzDcDzyQyPUf9j75yU9e/pbjF7/4xRd3vxN//a//9ct/wPWrlfeN3GEY3ghAOCDSEJoIikgKgnBANNjzMxCha2yO/EOYIa/6lbiRUa8VELr9bVG+ET8EOYLgsK8GZCmSRGx5R6IgQ+jTI+bskDbq8HdJ1Qj2EZPsxBIjfeIf6iDUQBy6RrrpEHb2kY9+BXP6xLk8EDb0imFP3hGJ8rMHaqkOtdpHgkW6lyO7/Bnt8RfpJ6a74rt8+E4HSYqodrfeg78v69u7fD377LOX2GpVdwRudyI//dQj9yCGe6Mn1+6CyAtR5h9HycearjHC08i3nBG4/MpFznyVu9y6K+LXM+tLNcmDvtwRZsCO6LdcwbcdzfkivQk6cvAZcJ/w3HPPvVS7nAk9+YmN8OXfOb1qzvcHPvCBy69TlpP7k1e+qquRX32tHmCXQPdZHtaQjrV+GtWVHX173k3ojdkz8gfs2pNXc/ZEbu1b8x0p2R0Fdt2F/pC7AHmp088179Odq1+u7tX7IOZ+3tFN1H2KN/D000+/9A8J9MLIl3fk3sVzb0bxug/+7Lkz5/rjXvTQ52kYhmEYhmEYhmG4HSNyh2EY7gceSOQiZn/2Z3/2gb862a9e/rt/9+/ePPHEEze/+qu/+uLuMAzD3QbiLgIKuYC8QDaY2yPIjAgMYh7hi/RAahgRTmwRN0iI1woIDESbb+hGWCHHkBtIE6QYQhBZUuzz25PmdAlSBEmCkEWosaHHJuKEIBbZ8odc+djHPnYhW8rHPuHbyIY9X3qAHEeM6h2SKxtjeQHySz76pwa25Sou/3zTqy777gyJrn7n1QvW/NNHyPLdHeYrsSZypONO6UU0de9iPvXUU5f+1SsEqT6BN9C3YNUZ0SsnI105Oke666d9/Sgn6+7ViPD6xCc+cam1PkJkGT3+xXOP3oF86Z0C+krHWi3i2Ys0syemOVFfd+GMiEPf377lR+56Q999Z+dMTeUnlrzU4s3prb3yAXNgz1Y/+1boCTEak9Ygnt7I99SxrtdqgPbk0J68es9ycifO7BPz6mLP1loMNZJqgeqny9bo3B7IVc30xHw9oRZ5yc9nxnt2B6Sfh0Z19DmRP/1Gb8jb9/NKb+n5WUn62alX5t4X1Aufke6iXvHlM+azY/SPCYdhGIZhGIZhGIbbMSJ3GIbhfuCBRC74RtJf/st/+SInmYvE/cxnPnOZ/4t/8S/2bdxhGO48EA+IRoQm0iLCwRihF/mAcLCOrDh1kRyRePb4MtJFZCCFvl9EsCB7EEt+xShSF/FRPs4JXTEjYiOdygXZh7zu1/nyh0hqTLJTu9+0oD4EjBj8RD6layxH+khZZIy4CBln5s7ZG4l9vdLzkyjmT07qMWdTfQhzkl9wTt9IByKkIo+c1Yd8Ic+IuRzUSxfSUUd366y+0vV+eh/OEE7Oz77aM+fHN2aJWMWtF4QtovSRRx65iD7SKVdjMNcDuSF93Wfn2QS1sNdr+3LVi3OPjhx7u3LWQ/UZndtja60GI9tITGK/XnhniGu90yvn/ItD+AT6pLcgptrshebpNm8k1cGWdBbUS857tmYjdvPeiL3eFHFH/OqDHhXXPfc++IB00vNGCPvqVqs1v8V8PaFGOUakm3cf5t6aO5Szffpq8w58JvSBqMeZ/hG++tno3tn3BtirvbekB87zSfys83PPt8GHYRiGYRiGYRiG2zEidxiG4X7gZYlcBO3nP//5yzdz//7f//s3jz322EX+xt/4Gzdf/vKXb37xF39xJO4wDHceyANEICIXwYBcSCJbmkdaBKSEvc6QZ0hOxAdiA1lhHmFB7CN3EDrfD5AcyCFkH5IMwRH5Z0SAiGGOTHFOH0kivl8Z7G+TIv3kRJdexGOkSX7V4hutyDh127sW/ulHxPj2aN/ElQ8dYwSNmHSJvXqtT/p51sCv3otB+O9XWkOkG/16Yw8xhHiSQ0SRc3p0+DZnS98Z3e7aXlLu/Llna3l4N/qIFC8ffYxAPwkt+vToI2nZ1gP+moutf3T1PbK63Ms3ATlHugX612Bbn+SjJ/ZAbadPqO/uh35wJ/VLb8FngA6J8JOTOfIWGW2vGr0ltuYgLp+dEagOuZJzfq6vx/pkXu7mhM/q7oyu3hjl0Jqe2uQtp3pin9jnU23u2V0U3xkf6VSzt5J/kAN76z7TrxfKs2/i9jORuO8+I0Y19G7UK39Eqzdv3l3S703kSx+6b/Vb8yM+cWZt1D89OYnc3sUwDMMwDMMwDMPwnRiROwzDcD/wskRu8DdwP/vZz36H2BuGYbjrQBwgma5JXIQNssHYHGGBhIBIRWv7bJAUfCBnCOIiX2IQZJw18iHyBvHxWoC/CA6EB0E0ydUZcomILQd/azXSpDOCeGRzihp9IxSpqGZ9izDmm4650do5orK/uRsZ44yddXvyZKs/SCO9RuBUUxJBlOg3glOf+XTOnxyMiXvqjvJZz8ubgNrElku9Ifk2yru7t0ZMImTl7t7dMd1rcl1cNshMf5YACc2P+vmhX5wItA9/+MOXu9JPcSPXsikfe3z1VoOzxuyDfOSWvjNxgK48vB9zvQZx6gM0sqUnB3diXY7V3K/KBnHqOd2EfzZiXOcL5/q0a93YHMzLRX4kpCcXOVmLS9fdtScfo/4a65N9nzlvRT/BO3NuXR/5N48M9x7F4K/c6Mmtd+o99G5eD6jBzzPvVE5ylru9CFl79NTg7tSJvNWTfp2yftFlp15iTfjROz70B6yd8dl98aN39tyLvvSzTky9G4ZhGIZhGIZhGL4TI3KHYRjuB14VkTsMw/BGBeIN4WiMBENaEOvmCAekBbIC6YB4QVBY20d2ICrYE7bIBWf55QM510gfQYGwicT4fsEPsuOFF154iQRCdEQqWssVQYis7Kxvi6YTkYUsQaB8/OMfv3w7NGJKDNCDfBjpIl4Qmx/5yEcuOmz4Y0P4F48f9kbn/OsN2BP7tOVfTfU1XXN61SE+W2DnzB3ot5rkWI3uj40994yotgfVWn5Ge923ka4a7BO+EFHeE3/ssrWH5NQb8ZFV9uVEvKuIK/4R7UDnRMRYPWGHJFWf2CCXc4TyPMGWLxCHP6I2a3nS0Wt56T8fzszV39sC7xyRbb+6xAV3wMfZD+f82adH2DqnB87LO1/pwnnWHlyf65f7kWNIR3xn1uotfveir/y4U4hUlKt9OsFajOrni15vUt36q8/F5wvqc3nQQVa+HvAu/AMX90Z673I01wujXPVK/5C3xFzddNTDl3fhjdqrTnbWaq2H9vRPLPXrp/57b/za01dzZ/3DlWEYhmEYhmEYhuE7MSJ3GIbhfmBE7jAMb2ogVBG5iAqEwinIBOQCsgIRQaz/7//9vxfygk7krTOkAj+ImkgK+0akBj/p9O1cse37FbwIitcS/EWsINu+/e1vX/ISD0FXPLWYOzciSBAl5vL/mZ/5mUvdapB/tgkSxX6+/Lpmf/dVLKBPp7F5pAySBnHJP3ug4ywy2pm1uVzcG6Gvd85J0HM189uvglY3HSSc+spFnnyqNZKqc366vyQyS831RV7WJJ903DOSFUnNb768AfmB90Psq0dd3kb9g/oSrPWNnXdI99S51g/2uysw11d1gBzk7274rzfmnbsT/eFLDUZ6avLNW/b82u8e0tEDAs70qHn10i0fvqyzb34tcL2+RmfFvEZ3Jxf1Gt2rfPvsm+tBfdIH+94UVJ/PHB+9i+zs6ZO1euyxFQvoWtcL4MMb/2FCvd6h9ytf70zO9nsX8uozWY7WoM9qoctOz/VJnX4+Wvsc8aOPSO76y5Yfczp89x69pQhdds7Zuod6OAzDMAzDMAzDMPwJRuQOwzDcD/ivof//f/E98KlPfermV37lVy7/Afnl4D+4/bN/9s9ufuM3fuPFnWEYhrsBJCqSDdmHrPDzDMmAnO2bY8gDgmRATETuRAx2hvQw2kd2GJELdJEPSAfnCJ5Ims6+8Y1v3DzxxBMXIpUNckcsBAXSGGnxvQIpIhd5gvp8I9Q3TxGcakeqiIc8IYgkaySOX6f87ne/+5KrvMrZuTlRW/Lwww9fvsWnd3QivPJN1GiUWz2XI18nGSMP/zeELj+RXshCdmyg/rKlc/pQA/96nw6fesuXHOi4S4Kw4peePNkAe1CT2P36ZHrypsunuXzp6Tlf4tgT1zlf9MXTe/6IfW/DfmQWH+YJnGvC5nrvhFyS+picPa+f8iVq5IueUV721akv4nY/6vCZMULkmrvx5tm5N+9NTezF1nvr8jOvD9b10zqRS/dxXW/z/DU/awb95guc64E4RB+8AznL388GdcmZvdjm/UMEfdEH+/yqQf1+DvChBntGuiQf7PjRq+L6hqmefetb37p5+umnLz8fvv71r1/2+PhhQM39fPD5UZe81eRMrtZGPVefHuqltd6mqyaih/TYeSvGoBfs6NhXZ370WO3uQq8Qxv427rPPPnvZ/+pXv3rz+OOPv27fXB6GYRiGYRiGYbireOyxxy5/AnEYhmF4c+OB38j1dw8/8YlP3Pzbf/tvX/o/CrfJ5z73uct/gByGYbgrQBAgJpC4SArkQYJwQCQQOsgIgliI2ENGIDDSd27ky1mEE9LHmTnihiA76CI46BvZIEwQy4gtJIe5URzkSCTS9wJ27AliCXlE1KMue8S5PcSJPHyztl8DrGdgTk9O2alJnmrwq4PVF3lGn7/ik3yoHQEeoUOfv/KyZlvf9U9efRM3v+LTYdue80hqKCYxJ/TlKQ/irtKxH7lXjb0LehFXRKyE32yBfnqt5dQ3LCO8ekcRZnzlh441FOfE9Tqc+7fZ8E96H0YQTx1y0vPO05FT6+qKwHQP3Rupdv8gQX3Atz2+5eHtgD63l1iDPOE8O+U2tH89ii9HdZJgjxRLPebylqOzs0/puU/1uDc2zusbHaIXRp/p+ssmn8R+99+5nPmEPgfF/0FCHv0M6m0SuXRHIBd5gj3iXA0+U/wQPvTJvj7Q01O6fb7s6YFRncZ6DEhcOdgTl+iJfcS58YfRm2EYhmEYhmEYhjcK9o3cYRiG+4GXJXL/4l/8izdf+cpXbr70pS+9uDsMw3D3gUBAUtxG4hqdIyzMjciFCMLIB3v0I2Nb04+AoI+4QD5kY9/6jEcfQUms6SJQ+LZHP/KJfTmYk+8GfCBO+oYbv5Fu5vKXMxIXKStn+uzUQ8ce2K8n+unbu+zppk/qXcIHG4Ss3qkBkdM5v4jmc9+crrzUbh+JQ48/a3Zi6aee+XXGSCT2auWjnNnIUb/1mvB7EsJQv9SMGOZP7vbFJHyB2q3FIfzJo34Qe/prLhY9/tjmh441kat6ktD83AP+IP3W1yjH6jCq2Z6eGOWhbn7oeDPp05W/sz4D4D7qtdwJFMvaqDa9tF+e/Jb3KXTLzbr3d61HTlyvwV5vmj/z/MHZD59PZ/rArhy7t/bcHxite4PVysbd8qVmqDenHlujXvLReT9X6nm9+EFBTHl67z4X3oP43rK3C9b0QH96K0Z6aiD8yJ8dGzr1iVh7V2pyTupxn53rd6GX2fsHAv3DFEQuP8MwDMMwDMMwDMOfYETuMAzD/cCI3GEY3nRANiApIlIjW5EO1vatkQbICL+yM2KHThK5gZB0RieSB7nFL9Ihwose/4DMYGtEVORbPHOSH7+SFqEL/JnbjyCi992CDeKjX93Ml3z8bVuErFyQIvzbR7ggvyKS7Efc+BW8CNb86kNEVbrG9qz9amckEX2xnUegNvctO+BPL32DWl/yl9A7e0FXz/jXd2fi+tXVzouj995B9xJJ5Nuy5Qp64ddFuxv2bPmUFym2OV/OrfXGG7FfX/SsN2fdNwnpitNbYdfeSTo6C+c8tEcfrvXtl0s5n7WIKTexkGNsCF16emOeD/p+Ba+a6gNirs+Bc2c//uM/fvPkk09eavWe6rf6ekfu1nl1EnNADovX2alDwjkPt+2JJ0d3rN6zX9Y+X2KQ7rPPKmFrtKfe7omuu9MnZ9bF8B754VM8/eWHnn6wsy8HY++Mfu+BPrve5g8C7sdb93PG2M8pIge5yclaXsRa3upQJzs9sVZ7OglbUIe182plz8Zno7vzLryP9uixte9t6HefpWEYhmEYhmEYhuFPMCJ3GIbhfmBE7jAMbzogAZATyIIE4RDBZEwQJxFp9JAMCAUkRUQFIBPsWyM72CAVkA35QkRFjABCgoiByIgsi0QpLzGRmARBiSy015ydOPx/txBPnsgoZIg1UYP4RgSoPT0QC5GEZHIuB0SunnZePpFYjUS9yOL6xm86p7THrxi+XatH8rnWJUgco749+uijL+UuF/v8iSW+uV65B7m7R2t65RTJRt998JW9Op3zzY50hhgurr7yKRc2dHp7YA7eivzBGypG5NdJYhLnBBqBTcjHqQv2yvd67E2XF7veP9CphmL51na95ae7L6579m6NhC8+9aBa9Qexx94eH/yftdIv9+segHk53YZ06RD5GsXtPoIzNZwx5SR3e+Z03HPvWA1i2JMfG77Zp6e2+lkfvZF6w957ce/ii+M+9A/45IvPHySZK4Y33z9wkIP65NWbVNspka9q7B1lQ5/UG6O69UCc048eqLt+6Dd9cz7ru174zOgn8Y9t/JzidxiGYRiGYRiGYfgTjMgdhmG4HxiROwzDmw4ROUgCZEOEAxIisgKpQAdZYp4eG6QC4iJS1jlyARAx/EdERGIAf/YRD/aKgXyw5iviAmlh7yRGIlbo2LeWg3m+It2+G0RUycs3dPmwJ3+Qh3Xkkn1xEaxiB2eE3m0i72z4qL96JqaxuX01+banOt0JfTmKcW0DckJ60XcH9vOfZE8XCamv9u2d/bMHco68T08vSGQaO/HqS28hf+7MORs18WluD4z0SO+AD/v0SfbOu5cT+boNpz49eZ1STb3xU8+ZubjqV685PX0h9LoLumz02ZqeX7ttDUbCxln3XM3QyJfcib36b35bH+g/COmqk4h/zsUP/BA1sTOvF3IQn50cult1ZGfOrt7xTXzOrfngm08++OszzC/b4ugR296/Mzb8RIaav5aQh8+FfxQhr/NnIsjL3Cg3Ot59/bCWNxs69svZP0rQFwL29IsdG/qgP/WD2PdujPTNjchcn0PiH6L4BynDMAzDMAzDMAzDn2BE7jAMw/2A/6I2IncYhjcdIlkQDAiHiAcjAsFZBAKSAbFhRFw4R1ac5APbbIBOBJE5RCbRi5AhbOyd8cuBvb0IE7rW8qHvV8D6FigipRxuy+eVQFe+iBYEESLl7JFRzvKKVBY7YiV981PakzMb5FCx7LMjiKsILqO8kbhIQLXQd1Zu6Sd6cPaBb2QP3XIxt6ePCF+5yD+fdMpXfHpqpUM3n+KpwZpt74Oee9IncyM/3SVJRz3O7J39Fz+dfJh3B2zM7Wd3G+ynA9YJH8UyEvejDjGh81OHrdr1pW9gnvv1hn/9lCdd+dd/Ix3EGztreu5Mn/gwL3Y9IOUvJptrVO9Z94lsq99YffI8QVe+6UF10RVDbnpgX95nr4g6iPrV5q0YndHlwxmhpya+9Mc5XWfdiZwCWzrpnmffL/hWlzfvXZtXg1zKlzjzmUuvfUJXTcb6SaAzsZxVqxHUVe9JczGcVXc/r/psWpsPwzAMwzAMwzAMI3KHYRjuCx5I5H7961+/+bVf+7WRuMMwvGGBVEAGIBSQM8gFRAFyAWlgRFA4OyXSLrICIiAiOyKuInsjKcSzBgSGfWtERYSGGJ1DsUg+5QDIVGRnf+vVPj0+IklAPa8WiJFrQrfeIG2QePLmX+/kTl8M+pEs1cS2/Kzt06GbvhERY+RHHL+CmX++0iViGfXYmXp9u5YNsK//+aQnjv76e7dyoUNXPmqla26EiGF6+krXGRKSL3VZ+/XMiGG+q0/evQV29kk9A3qd2XOvauPb2h0WR7+N6qAPjXyG9k6c8ZPqNvKLtNPH9Ottd2PsXO+8AXp6wQ+hY80feJtq0iP1OMuvPXmwAzbVp55yM89fb46O3jrnw97ZgxPtVzMpX+JcXfIM6YGcgR4f4tKV0+m7moz88a1+n5nuG6rZyAdffd7ZOuNHn5zXc2t+Tlu5dDftvxZQO//evLv2GZCH/stXTcbW8jfSqy/lV+1q8o1Z+yR/xn7+0WVTr30m6YIavTlndPWjt8l392pPnGEYhmEYhmEYhmFE7jAMw33BA4nc8E/+yT+5+ef//J/fPPbYY7fKpz/96ZtnnnnmQvwOwzDcNUQGADILkAUEsYCMQVRcS6QSW2LP32lkE/FBEA5G55EY9BEW9rO1RkYgc/hoDzlhj52YdMR1Jg4SBJEod2LONhLGyAdS5LshevTF35z081s8tuKXF8IR5BbxQy8CmD3IEXEqDz7o1jMETPOEnTr81gf+ImfoRiybI9iMCKSPfOQjl9zSlUPz9NnqK78noexXs6qB0GOrPvW4e3nLw3k29Nh1t5Fa7sJZpJJ47QU21vbTI6BuPUdEiVfc8ulNvRxRxebEGcPcee/AGvxDAP67HzHlmVizV6P35U7LzXm9YMuHfCP2QL50xFA78Y783WO/CpeNXpEIdrH4cYfGzvPtvF7Lpbqr7cS5Z14/SJ+f4JzPzq3FDAj73ro8vUN3Qtca+KQnv/pNT/7eYT1WGz32bOnbY0OvO7GvXm/ZXr0RB+zTl8NrBTmCO+rnTCJ+n40zlz4LcpRL9yY3v7JdD+orPfa9afbq0BdnoHb7+bHvLfW59qbo9Ia6E7H4sTcMwzAMwzAMw3CfMSJ3GIbhfuBlidx/9a/+1c0nP/nJm8997nMX0vazn/3snxJnI3GHYbjLQCwgBxBpiANERKSEEYmAePKNMHvIC3sEyUCfPT8IECQDQoE+e0AqsEM2sEM00OWPboQGOKODtKCffz6QKhEb/MmLnbwRI/zRN5LydsZvcV8t1ISgRiYFe9Wi1gioiCz75Y2Yk5/cxY6EuR6bI458u7Ye5Z80T19dH/zgBy/96A47i8yVk7V83v/+919GuZD0EU30xDPS8Wud5d+aP/rdsVGODz300KUX9Oyb0+MrXXfg3FpcoGdNT/7ulT+6Rj6IfTr1Mx13W76Q31CNYlzLCT69nXTLm/BdHuKrVa7tszEneuhunOtdbzS/5c6/t/jwww9f/l4yMo6vcukzIR6RG/9s2wPjmQvQuYbYfNYPYGtfHqQ8u9d6YE28EWvvWO/BOt1I6mJ0n+L0XsEbAmv6SFx6eldM+uoRF1nqndAXmw7fcpC3ef2g83IE/3cLd4DgFkce1t2pXKydyaU8jOqSZzmrTd32rNXTz9hqyZ7v+s036Ie+0EHQ0kEIE3ePxO3+xRDPWXbDMAzDMAzDMAz3GSNyh2EY7gceSOT+9E//9M3P//zP3/zu7/7uzWc+85kXd4dhGN54QCz4VmJELoksMSIt7CERkAPWiAhrYg6tkRIRLIiL9tgiWxAabJA+yAc6ziMfrO3bQ2wAIgPBwZfzbIx85Y9+BAmSCAmjLoSqGiDy7dVCLnJDyBjFFSsSjSBQjHSJWEhZPbQvd/rsza+FDn3kFls+7NPPpjlxX8hANaq5GOnKh4jpvpDDehFRRIpxij7mV4388eNejNnRQ0Tqq37bo8t/dy22NT/uvJ7ZM9ZHuvZOQss5ghCyJXR6Z3JgKy47EvjLr7G5PM35Sl8s687TIebemHvxpuicvWbH3p6ckOvXfeaHLj1z/ugBn/mSj9H90+WDrprNxYJqJXrQ50kO1VQ94hmz7xycQf6t029O5OANiSO3/DnTH/nzAXpAp17VW3HlV77elx54U1AP+dQD8SJy+QFn/DjzuWq/dyMPJOZrATH8anFxSP8YRG1yStSrB3Kxlos6EKz21Gesd9l3Z87knK069Est9kAMc3tseqNsfUa8TRCLLz9jiX4NwzAMwzAMwzDcZ4zIHYZhuB94IJH7kz/5kzd/5a/8lZvf+q3fuvniF7/44u4wDMMbC8gDRKdf/4s4iYRBWiBbIjIQCQgFQCawQxREviAfjPbTRTQY7UcqIGyAHj/tp8fmhDVBcKRjjAiBxsB3dahBfQQZEyHFhkSIvBLkjSQharZmGwFF+JGf+BHIEUz26SBbEFX2Ena+/YfI1Tu69umd0h5dxKyRrhzylR6xr35+Eb/iyFkOpDqy01d6yNnuxn415xfoIHLpqZHvYoM1f+qh4w1BfXNmH6zFcmfeUdI+X+De+HPWe4j4ckbXnD6xzr6z8nFH0P2JlZ15scXwZoh5+mqth/TkQSfykR4dUu/oydOdqNU+G7bi8UdPHLkmbKqTVDepFnvVaQTz1s3Zh9b17lrkpBafI6N4fLEx5letfebUY24/vfxZy7fPpbj6U7+JnhDvi56YYM85e7mIVc31wdo/tpCP/e8V8vKzwudLnurp56HY1kZ6xQUxxZYDsSb05EvYnbZ6woYPfu2rx179sq5/9vXBun74zDrzORXXbxDo1ysPwzAMwzAMwzDcZ4zIHYZhuB94IJHr7wz+pb/0l24ef/zxEbnDMLxhgZzwjS7fPkNCWUfemhOESiQEsgFBYEQ6IQ8iF5At9ukakQwIB2SNkb0zesiJyAznRucICWtnkTT2+JdfBIZ9pBEd58AHO6SIUSy5n7UgV6uzWvKRnweBntyQJCcRKif5yF8O+Qc67Ii5fp1EqphyfOSRRy628tFXIhZpLZYYfkWyOvLpLBLwtJPDu9/97sv90mXvnI1zpE/3pmeIK4S+HO3TT7e/w2lPX/s1w/UPjPKgp//1nb61HtFhgyhrnb371EsCbPQEMVWf1A/dlRzs05Wr/Pjng99G+nT1LbLRfv010qVnnw9QA111gf16zkav9I6emuTHnk69o1P+vb8+B/yV//V9QHfunF+ibwk0pyev6q8W89NGT+ufOOaEXsLOmVzF7lwfnJ2+CZ2+teqsv99Kp3enj/ToqB/ko+6gX/aQk0a18GHfXO7qzJfY9uXazwP+xBT/e0H3RNRUzO4B5C8HZ0TcvgnbZyA7eZF8kOwIP915906nN8eX+sWwp7beTt/GNddjtfv59K1vfeuiNwzDMAzDMAzDcJ8xIncYhuF+4IFErv/g/bM/+7OX/4j4hS984cXdYRiGNxYQJr6xiXBDWiAvzCPhCGID2YBkQDwgMyJY2mvfHuKBDSKm80iISCskg5+fkWqICnDGDhHkjK/0iHNrRAbIky/+ixfBYqSPdIlQAfsIKuu+fcofIuSVIB+6iJJnn332kkv++ZIPsXaG2OFX7ZFsaqte/7fkve997yV3ddBxlrQ2yttvgxBPvfSd5fu0k+OHP/zhi/9Tl5y6EW5+nTLSly5BBvGhhmxADt6IWpG71mri37x+uGeix2ztg/tRq5jiGAmwo0f4pCfO888/f8nTvvvUX3N2dPInvl5EnFYL6X7O2HwY1SH/SDI5OvNG+KQvH3cObHovhF+fo94if/TpEHM+H3300Uvv1MCvXOqNPvnHFObVC3zqIx9s+GHjzbIP1aWHYrozqEafY3nyRce5XIk53/WE8Kcm8fIjB6K/dOkYgU811Dv7kf/2rNXdm7CvDmfm1WKUiz7xU572xVU3e77UKe6Zg37xS9f4vYCffhbWL+9HbLlWO5Gf+1JrudLr3uSaDyJv50Ttjfb1FhHLj/j60Js0//a3v32JaY8OG3f09NNPX2qVl33yzDPPfM/1D8MwDMMwDMMwvFkwIncYhuF+4IFE7pNPPnn5VtQnP/nJy39M+9KXvvTiyTAMwxsDkTtIA2RDxK11eyTi4ZqAQC4Qa2N7yIaIm3ASH9khRSJj+EBkIG+yo8uGLj36zoi5c3GQLMCePl/lWE5GJEn6kSrVj3Bhi3jl+9WALwQO0hP0ik81RLbIic+IJXvOxfDrkd/znvdccvOt086zbW1EKn7oQx+61MI+si5Jl6j94x//+OUu6/Gpey1+w0Tf7EMa8WWUc6QdPT2Ss2/tigHO5ZNO9yO2XO3pdT7rrf3mjZHfvYP23A97sfSKvhggrrUxG2fukr6+yYUfe8C/XPTFHJyJ0XtEgItVr+n3vuxZi0nXt5P1Jl163hp/2XzgAx+4jAk9OcmdrV7x2WdQ7t5NfXUe1Gcv8CF2vXDuLbLhRy297/TLoRzpuVM+2CMxrU/fxmoUX75q8HaM9U/f6oN5oBPZTfhHWBut5YvMNopT7GLJwX3y4Yz/YvDNTt7W9NnJ6buF/vPTzwXx9KSYIBefU32r7/Wun6X9/OSjnzdyU2/vwDxxRuzzze7cF8NaDvz6HNqvzvrk7elFe8MwDMMwDMMwDPcVI3KHYRjuBx5I5IJfqewbTL/4i79489hjj90qn/70py/fjPj617/+otUwDMPdAZICYYFoiICwJuZIBWRBZAMgXxAGxDk7Z8iD9CI2EBLgjK49kt/08h1ZhPSIGIlEQUywK4bziJpylBOkk0R4AH+IH3mrGzlEItQiocR8OTinJwfkCVLHmvBhrf780SN6jlwj8kg/0iq79JFb/m+N+6Bjjx5JpzXfvvnJt/z4Pc+T7sU/SELE6U+6p0+5gR7+2T/7Zy9/F1f/uif9rvd01SNPeeilO+KnXtG3Z633xvbom+cb+OMb2EeceQe9J7ka2eTbSORaftb8E7U2lpfRm+hXUfNXT4xELXTF9l7qc/rp1mc6yHrvq1rFYW+uZvukfsnZ50/OvgVsTw0nnInFl7iNdDsnfOqzOZSnsXy8hXJRf8TlqV+MRI10InH5al9e8rU2F9s/mKCfbnrVIY74zuk5J/ISWy/VgWClU+3OI0r59fbqKXs/q+h9N9B7+fLFL+Gznz3y4b8e2pODmo36R/r5It/eIOntykt+8iwGodu5sVrqFV19MCL79cDnop/LvqULdPWN7TAMwzAMwzAMw33EiNxhGIb7gZclcgGZ+9nPfvaB8rnPfW4k7jAMdxKICKQBQgDpgIQ4iQjziAcjIAciXhAMziJfTrIC6CAWIjsiKLJFiDiHfBkRGNkQ+wiOyCTojH2Ex0mAgDk554mc1S1vtSNtEDjqzrc8Xw0JIi9kGFKF6I+8jOzLUQ0IIjGQs3JwxtY5XcSLdaSwvHzzDnFjHTlzm/BL9x3veMclp+6qc36by4UeklE/yrfzbO3rK79IXD3rjqpLn0hvSR/5D/zR6/7YAb1iFyef3VH3DvWZyK13xEbvnNtjb18/2Fs7d1Zd3Uu2xHt3L/Kyrl/0je3Rcx/eDTl7Jlb+1UBPn72nciVyJXyB3IANXfdu7tfpegv8nfUZ7SX5S6fPj/fNj73z3EinOV11i6vv5xkR8xzdFX31nXrlw4e1MzV69/Kwx7dRH+jRF9+c3+zyZW6/f2yRn3rJzrujkw/+2HWH5uK+GlRXPwP1sJ9vYidieF/9HDF6D+z6eWqfDt3seidqM5cbnTMOfbU5I/StjcWz7vOgRqMa/aOPdO33mwiGYRiGYRiGYRjuG0bkDsMw3A+8IpE7DMPwRkakCXIA8RApYkQuGE8yATGAaHBGEBPIAzZ0IjgiLeiKQehbA10kBvIh0gKytU+XHv/FRH7Iw741/cge5AYCxTqwTeRIH8RgX53ZImEicUC9bPl/OdBRj29Q+macuZrsG8Xz7U1/i7a4zvinHyHDzp4aEFf0kWv6V7+cp9+cT7oniXutYyR64O8iv+9977vUJb/iZmeUA10knG/50uUbzCOX2AECTA/1Um3pAh1r++z0gz5da/Hs8SeueiIJi2POHjFlT571thzMiffCnh9nxTeKVX32epNIXDb2xOrbknSRqUb+9M63Hs3Fl4tz+ciPnlHfCMip/NnQ8Qb0ix8xxbIHfFoTv3ZbLnJnaxSLTXXwbS0ONNon/CX0EvmILw+5uo9sTj2SrTfh89FnpJrqBdvuoM9U712+fJgH83ohvvsFenyCz4C7ATGgM7HYi8NXseUpl5MIfzXwuROr3PXHzw4jtBbDSKdaE/tqKQ8Ccujng1z5IvI0ples9NTnH4nw50w96hKjv/GcP+/Yuv7RI8MwDMMwDMMwDPcNI3KHYRjuB0bkDsPwpgYCASmAGEICINeQBcgQZAQCAZmAXKGHOKAXoZAucoVOpIoRjMgJJAN7Ptsn9v2NWX4QD/Kh4wzhwdZZse2bE3AGSGiESt+Ccx5xIwa/+XaO4KgGEoGCwHGuD4gtPvXm9PdKoI/QJeLw51fQ8qUmOdYvBAsdZB07JJ1YdPwPDnmo2b4+uYuIvMT6wx/+8IVclGM6ETjpEGfvfe97L78mOaIn/cbmgBj+8R//8cs83/VOX+2pRW3+fq79iDign25+3YE+qwuM7oZfon49U0/xEnp64tvBiK3nnnvuEh955h75lou1mM7YgFzlxmc9ce/6gfzq3Th3xo4P64hUf+vWu7BP156x3Ajf7hvhK1e67NVpTsSVq3s2d0bkJ3f39sILL7z0meLTXm/G2lxN+it39Rurn19x9YDUQzakfOnJ19heOreJf5Agd77kIX7+64W4vsUtF3mpgS3f+knfnlrYI2rLuX6BPT9P+OzXU/cm1Fr/xKDrPoBve2xBHHZyeCVE5PLdHRnlCUb3ak9Mut6qfXtEbvKSh5GI/fzzz1/26MovsdYb+RP9APU65/Oc01WT+N6BtVrlbk7XuZh8P/300xd/wzAMwzAMwzAM9wkjcodhGO4H/FfmW38X36c+9ambX/mVX7n8R8qXw7PPPnvzT//pP7357//9v7+4MwzDcLcQ2YAI+LEf+7ELWeXXcyJskBjIAuQAvUiiiKNICGfIA3B2EhEkEgcQD/yKh1whCKDTF6IEWeMMkBfWzo0IJDZ82BNPXASREYkhDl1+I1XoRrLQKW92/JUnEg25Kv43v/nNm8cff/zmG9/4xuVX3b4a8CuGWGrxjVaCgNQ/9chLXMRNOcgXKeQe6lc68kXmpic39fgfJurXE8IuQiipxwhfOciND3udnfpyRsyefxOXDmEH7lAu6kAg9fdi+SPgfuSkH/bkpn7vyD6/xJne2uOLyNEZQtxdOhNfPg899NAlLmLM/agfcYiQra/0jPWrOZJQPIQsQttcLLmle4pz+X/oQx+65C6PcneW1Bs9+OhHP3oh+MzzQcQB++Lb06Ozx3JAvKnNXdhjR0+O8rcvF3t6qW7vJoKRf3bpW6vB2pw/+fX/w+iZ+GIb6SXWBNjUq7NHcrEnL3H8yu5qSqfPbm+MvjuTpzrYmYP7licf9r0Nsa31TE18eZ9yBm+ADd1i+Laq9/G1r33t5oknnrj0tBgPgpzkz7d+8ttnVnxv02jPqC573Y987eupXjkXM3LayCehQ9jQlX/3cH6erMVTm5G+nwWgRu9RTPfJv3hPPfXU5R35xw5f+cpXLvNXqn0YhmEYhmEYhuHNhMcee+zypw+HYRiGNzce+I1cf/fW37+9/pu4yV/9q3/18h9Wv/zlL9/86q/+6otWwzAMdxMIg0gchEQkBWIikgXMIykiKpxFRAAb/iIoQnPnSBhjMUj+5ICQoB+xgYBAiJhDZ/K0x54tH+pwXpyTvGADdNXC9vRJnNHLn1yIWNb8RR69HPigF/EU+RSZhZiNnLSHqOpv0eqLvHzrlB59unzlA8n0/ve//0LgiZUfOhGf+de7D37wgxfyCORlP8Irsa9WRBa/emQvKba5s0guvbLffaZr1DNzOSLD6MuxnIl5d6guJLF1d8s+G32KsBRPbYh3sfLXvepd/WPfOR++MRsZXb/onLrm8tUPxFq9kxPhmy6RD/IbQatmeXlP1VB+ahSfjnrsG/XTmVqQdGKzU6M7UXPibiK7xeCLHyKeUU7qMgf7fOqlWsQTlz0d+6Qe1UMijrvuM1Os9KslPf//D7/yz4d86NjTG321Pn8GsKFnFKt/8FGedIpzCn+QnjU7d8KmXNXvzl4O7NTgHsWXJz+9Xf0vf3p05FBvxNMPe70Po8//mb+6+Tjfin0jHXdubd6af/N66n71Xx4R3X3m1ZxvtXtX9odhGIZhGIZhGO4L9o3cYRiG+4Hv6lcr/7t/9+9u/tE/+keXf+3zxS9+8ebTn/70zRe+8IUXT4dhGO4+/Ad/hAViIJLM2pxEWJzkw7VApEzEAV3oPCIInCEn+BXDnL198cz5iQQxghzkaX0SINDoTE10+aeDAIHiyqO9iBN61ScncRApelEt5fRK4C8yhyBaIpTM+eAbOYc8Es+5Mzrm6fMDkb5+Ja787cvHSLJ1hmjyDVZkZH2nS4du/vXIr1Lm8yST6ethtgSQgb0TNdKpH/WVREDpG+LKPYjbuVzY9jZ6b3w5N7In7kft/HRH6mOvXsRWPePX/OwFEq5eG8uDDh/W2YjHt37Qt+bTWG71mp6e0TV3JjdS7mzkrP7eUe9bvwiyzzeQ+bTWB3megsD1Tsyduzd+1FysauFHzvIhfLJzd2yc0aNPuotG/uUpX1Ld6edXzHJNr8/SmRNdet6NPolD91roOeeLD76K67y7t6ef5vmna51YOysH92t8EKqj953Yk1NvmJ7RWgwS5Fxv3YFRfDr1NJGLPWfqIs1Pv9Wtxu7HncvJu5aLM3pqlEN50uubyvIYhmEYhmEYhmG4DxiROwzDcD/wikTubeStb+SaD8MwvBERcYEgQlIgUyIxkAr2I0giHsK5RipESEU2mZPOERJ8A5/pIeSMxYicQEogIviQD32CsABEBT98Q/moAdlhzpZfsY18I0UiSqBcCH32YsjVtw31JVLEOdtXgpwQdf2tWzVa2+/boZEzzv3KVGf29JEAIg8pK49Ix4StMaJH3ghffweWL7nKmX+29OoDEte3dvumJz025dAI6qdPj896wKfeQT3RM311dxFWINf8ywecpcO+HPjn1x2Q3kk2esEHn/qqx3KxJvzohb4hQflQS/1zXl/o8+8+3AviuLP62ihHuUWAtwb59t7Ym/tcqY+9GPaM7Hz72p9jkI+1fb1L5KOGauenGPTlY08sNfBjzwjsnPdtWXCuX+zZVJee0/GZEVMcOvb5N6ZnVKN3Q5+evrIn1Z99pKganMuvOvhyLpfuuVycFZPPznov5e4sf+LK31ge9OQnn9sgrhzUoqbqsufdi2cuRvfgbuRiLY6axAA5idXbJWzYy4eudfcqb1Ld1VJP+dMf75G+t0en+1OvuZh8Emt2ft70HoZhGIZhGIZhGN7sGJE7DMNwP/BAItffyP03/+bfXP7m2t/6W39r5O0wDG8KIAF8E61fK0qQF8YIJQQC4iAiAyIfrM+9yBbCNyA6kAyETqQOEgKxIR6yA7FFRxw2YB/syQkJZS9/CBEEqLh82eNP7EZ29NjSETvyBaqFiEOXH3ZIPb3RI+SOOPJmm80rQS/k6Fed+vYl0pF9RBTSxbmc8h2J4+8XE3Hty5+PCMhTfEPUr/qVq/zppMevPfHgfe97380jjzzyHXdD5EqHdI964O/9QvryINb6qm9ydKbvcs8PHWtjudRnPTXP35mHfefZGe313hBuYtFHWPl7qJHhyEs5u0N/+5g9/87o6GV3wMfHPvaxy7eY+dJ7umz0r3tK+BWbrpzKuR57n/b1TY29kd6uGvj13sXg057PhVwa6eppNRNvRz3qcM6/uOUqhjePyPetYnvqkQ8b+fFjPOPb8x70iz92dJyxO0e98beGkZv8ht7s+VlEdkP52qdTH8zFFD9fapKbPfHoeAf05AvO7LvDSFd78majf/yLy8boTPxr6HdvSV4+79bnPYilr/mUI7/6LVe1O5cPOGeXWLPVG3N61s6Af8K3PhTTWwW9lL+cfL7V0c8zObIxJ92xvT4TwzAMwzAMwzAM9wEjcodhGO4HHkjk+hu5v/M7v3Pz8z//8ze//Mu/fPlGrm89jcwdhuGNDP/RH2mADECINCJNEA7GyB3EQORLEpq3zy+iI6KCrT2jGJAegkIsa3OxIoPMkRJyQJSYR+bwRV8MY/7Z5Js+IE6QfemAGARpQx8BBOYRIebyRRQh5ugm+iQneWb7Sig/v/YU+WPOVv7EHuH7Qx/60IW0sU+HbqQMwkdNyBy5/tRP/dSFiJSXXsrJmV4RPhp/4id+4kJ0RhrpSQRQ4kzdvqGKEKyP6V/b6IN7cC7HiCg5y5Nu9t6Q++zdiSU3NZrnM6KrmPXBPgEjckuO7oeuXPTP3Z4kllz0JP9yYuv/lrOxB909XTbiEvsITDlBeZ0iP6I+NnogD6Nc1YfQd//06PDX50w/5BLxR3qPzz333KUeOcmdX/rukiAgH3744Yt9oj4+5Fod7MXmQ8zes5j0xFKLd0hHzoQfPX73u9998S2vswfAHz98eLt9rvnxDpyby4GNc3H0wL78jNb2+VcnsDHnm875FvhK1725f3Hbp9sbK9dAV+98xtmrzZ7c0hevPPgRw88To96woV9N9V8d7OQobyIve3yqUc1i2wc+xGVPh229rsZqZp9dsewb6YK+jcwdhmEYhmEYhuE+YETuMAzD/cDL/mrlJ5988ubzn//85du43/zmN2/+3t/7ezf/4B/8g5uf+7mfu/m93/u9y/kwDMMbCf5jPyLCiGRBKEQoIQbsGyMOTmIhUuJa2OSXpBdOQoYen8iM1kgMOtbsESOEXiMdZ/ljJz+5IWX4p0P3zCeyRT5IKXUiu4qVr1BNekJPf5A85q2dictWvFdCNZ6EEELGmiBwkGB864u87CPCzOVvHjHmm5S+jSuH6qw/+TXn17d7/WpWc374Z2NOyh+xp4dy0COgl6SbeCN6G3o75RKZq5fOxK+W9OSph+Z0xHUvZyyQV774Jubis3Fmrzthb61Wouf9fVsEqHfAZ30qlrm82esHopyeWKFeJM69C2N1GeWmFneNVOPXvhz1wj0m6fYm5cvGNz/VA/pz2pDu1Aj8dy/8sCV6oB+Rv+6Ynv1qY2uuDvty8e69y97D2Yfq747ENGfPlj+QJz3x7beWFx3iHqD4cjPap0+HvVEu+ajXesp3sZ3Z03Ofh3oY3K030GeajR4SsYl4/BvlEYFavLP/4prrgXn5EDbnPB961b1D98W2t2iuXjb2xKJvFE8OztjZr6dsvTv3OAzDMAzDMAzD8GbGiNxhGIb7gZclck/4hu7nPve5C6nrPwL+43/8j29+6Zd+ad/SHYbhDYcIigiBJCIEIhkQA8ScREYkzvhJIhPoIiIgEiZ7esiRSCCQkxyQKAgKus7Zdc53JAcf8kXEIDvkKw7ywjnwQY9NupFEzvjms5zB2dkPufAfgYQEI87zUT6vBHqRQhG6fXNOLHkin+gY5cWvmqwRa76BiWCsF2o2N1rTNZevb1L6lbt6o+8g33pFXw101ZQOyIduAnKp7/rY/Xan9IzqlC+IRd8+dO/WcqDH3v9dlQfbzon8+OhO7Tkn4oupj/yKw4e97oSOe/d/q/Wv+wc63X9x6crFN35J8aD+WPdWjd4F1AcC/LpfuYFcet/Zq02NzuSgFt/g9SuVxXFGT11yY8umvtKpHlC7M+BTTvbEi5SlX+9IEIs+e+9Bv+ifqAegzu5HjvLnzx4B62IZ5c+Hd1p+7Og7M3aP+se3OL1X+3T4Y0eQsnSLwZ6I0T9s6A4hIpev3j27chaPbSIGyC//3bv82ssH4bN54jwd+RTXKHexjOLbE0N+anNmT0zzekWXL3t6xB/4XHlHwzAMwzAMwzAMb2aMyB2GYbgfeCCR29/I9Q1cv1b5lD//5//8S/+xzH/o3bdzh2F4owE5gUxEBJxkBPLAHpLAvr2IDHvIhBDpcC2IBf7YAztkijNzMI9gidBAVJHs+EdIGEEeCAs5miM5SPEiQ/h1Lpa1EdGklnxVhzyKIW5EXjnQqy9njkgues7FMp61PQjOEC4IJiSfX7nrHuQgV76QeXToqo1fOSBlEbli0XFGjx2yh4Ba/Ypk39ylW838qENt4oC1WoAuf87Umm32YM5GPqG7ZOfc2lwchBM/zs3Ved57+/W8+Gft3UU+1cfWmb8f3DuWk2+d1i963oe+mfPHh5F+9RJ5q/k973nP5du7SD7n7Dq3JubeAB352At8gDtxR+4Z+NE3Yk7YyjWfJ4krphr0pjdHrE87a/Vbk3LNziimb+KWp5Gus+4M6rn6z89A5+zywb44pPjqtS93uu5XT5wZm9M5/dnXE/vO+XRWft2dWrxz/unZ836d22fjzsFZPw/OnyN9dunx1x3Sp0v0UT8I6GW6xJzYlwvIJ6kuedWPaifmYsCpL3b3bhRLXfbp2c+ukY43A/VF3c8+++xL8YdhGIZhGIZhGN6MGJE7DMNwP+C/nO2/cA3DcK+B6PrIRz5y8/73v/9C4vjGJzIAUYWoQDogi67Jp4iOBGlA+luZCATiDOmA5GGHSImciOjyDTkkHAKCrZgnCRNZYg+ZgZgSC+QoP775jUDjJwKNDaEXoUIfefN//s//ufjik46Y1dfftmXDN3v+6D/11FMXIpE9PSSctTnb7wZyFd/fZDXqvdz18qd/+qdfIlv1Uy71tVrVIT+9QEb6NcK3ETjqug18Je6KP2/BWq3nqD/11ToCzD2BM7nopXwiluQtX/2i3z2oMQJKfvooFrHPh3nEFf/EXTz++OMvfasZMccXOYnI7pQvfnrL9aKe6pkcwNkpbNTEB325uCM96C6IOaH7/PPPX/bYsztFnAS8mf4mLns583+OYtUjKLc//MM/vMTRJzXL1a/TZnPW02etHOq/e5MvHffk3vUnyMeZMVQvfb7023mfc3veMb/uXA3Fous+5Ok+yk3uvfugH2LUxz57fMifnbjs/Cpqb5GOXvp8/v7v//7N1772tZv//b//9yUe6I1/EGGM1OWLsBVfrt5TPexXMVeHfOhm12cgW7la9zb7u8z1kaijz4E8+NCf9JwBn/zorZys1QatkbY+V/ph9I8Lv/rVr978wR/8wUufy2EYhmEYhmEYhjcbfOHKb88chmEY3tx44Ddyh2EY7gsQBsgSQFw0RjogZZALiAZr+xE5BCGRWCM8kBPBHlsjMgKxwGf2YvkGaQQIHdI8ksRcHvQJH2IicMzVQCeixJ48IrjERXrkiy3YYyu+OXEOfIjF3rn4fAO/BAnDhp55seT9asEn0gsRjPiKlOL/jClnZE29NOqT3EhEcL2EbOGcg3UC3QmRh7r4OvXOO3JGt7vX24g8uduPJESk5Zsfvv2jAWu18keXLd16X3yj+zTyi/AVj+/6n9QTPvh1HuRFh63eIvIQdPXrGr7t2OeDz4RfefBdPLXrW73xFog8mtuvDnXxj3hkB3KRI5LOaC3PbIslhr/fX//0jj96RvU5YwsRmXKxH6HONp/yiqyEsyfmp7SnB967nooLchA3vfIHpGRzNYjf3yPunvRbPdUJPlvd20ngit1nkL59dbAjvUH1Qu+qX69MrOu1ufys5c1XpC7hn7/unahBzuzkICd5mvcGewPZAF/O2IoN5nKm68745kceIG9gKw6JHId88+vnCX90h2EYhmEYhmEY3mzYN3KHYRjuB0bkDsNw74E08B/7EQbIA4QB0oGYIwbMkQl0EQSIAXOEUEIPoRAZQyc9wj9bJAihj7xAmPAtNj/0nNOlw9aaL8QKfXrBGTKj3LKTRwQN387p8UMnwoSuOqs98sUZWCfO0zHyHfGjFoSSEVIgXsMAAP/0SURBVPHjXC1ELvXjQThrR1RFxPGTffkS+atDT3ybVO+RPHIPZ7zbYrcnjrlRjHJRi/jO0jXSKdfuiOgLG2Ry90lHLex6Z/TU5M8TtE9HPWwI8KGXfNRzZ/VHbDp6QPi0JvTp0tMTceVo3numb87PCWt5Rcpai83G3Ah0xOAH6aYOe87Lg/9G+4lckJpIXHOg500RfSTVxEZs9bAj+pJtcejLlYB87Ed+1hNjdVWbsfyzp3PKuQf0wecgyEnf5EJPLHp8OkvXWo105OjMPdHtPdAl9pzLrbrdOTu1l68z8c56+HE/zvin7/Pi88qHN3aStec3cI3ys3/G4tMoDp9+tuixvAjQJXSgOoO1/Ij4YM4vvXKVY3H01Fx8MY107fGXT/2j32dlGIZhGIZhGIbhzYbvlcj9O3/n79x85jOfuYy/8Au/8KfEb6r6zd/8zRe1h2EYhu8X/dz1G/K+l5+vI3KHYRj+P/gP/pEcEQXIgeaRC0aISECYRJogOpARiITgLGKD8B8BgUghERZ8008HKWbdvvgIFqSGc3r2CbIiO/rID7kjasxDREu6hB6yhn9n1XqSLOZqtM9f50b5RAAhfqrL/pmH0Zo/cR4EZ9XvTujzLX45pMOfWIhTOmfvA/sHobNyIvXUnr6ImQ7QST9CzZqNu5RP/bFHJ8KpevTGXdK3to9sMooXMaWf9MrHnjldtfdG+TNadz/AH2Ejz+7sjHGNbOTrW5ynz9M+W3nZp8umverMnj49o/fnLfrmNYLxfCs+Q+p2n9V01oNcJuaBDv3rPohXfnJL9CK/1VZNepVd+vItdyOkc0rnfLCJpBXPGqoFisu2O0q3+6omOu79jMvWe6PbXu9DbP7t8adf9iPb9atfY+7zqe/dF5/6Ym6/+yPN3Rv/Yvde7EF6CT/lB/bUQQI/7VcHP3DqdU5fTLHtRQRb1z/+1Out0R2GYRiGYRiGYXgz4Xslcj/2sY/dfPSjH735z//5P9/88i//8s2v//qvvyT2P/GJT1z+99T/+l//60WLYRiG4ftBP3efeOKJ74nI/dP/BfdFfOpTn7r54he/ePOlL33pZYUO3WEYhjc6/Md+f2/T3x39oz/6o5tnnnnm8mtfkR/OCAIBqeL/oY2UQCIgKpB+9hJEAolUOM8QFPTZsSdIELHSNyKsEBoRKgR5QhAWbOhZs0VWyI1veYrjPFLYHjv+I0mQN/xGtvBBn5/yg8gZe0igcmGPEPIvNv2KaH+j1q84/vCHP3zzkz/5kzcf//jHbx599NGbD3zgA5e/R+zbs+zZ5vs2yMOvD3Yf/sanb26qFykqP/kgOeVBt54l1z1/kOhLY2J91g7mp017cqqf+mOUY30OSEt2512m053mx/1VmzvrPukUB9lGx4iIO+9cHDn0K6q9Y++3njwIdL05f7MWMcvHaSNuOZSzM77VkvSGsknS5zcy1vsh3sQpamEjFsjDt3D1W3182aMX8UsiI9ka6fXZVZ+Y9fDMi7hPZ/z6PIl1Esd80OldNMqn+603YvMD9pyVh94US6/kzX/v1lxd7hXo8Al9NtnQ5wPYsHVmr8+1v5nsb9T6XPqM2ufDHbsH+dBnJ2Y9s1et/FiroR4a9UhvyonvemlN8mXOXky2YL94/IC5nPiuV83dMx/WIF/69ac1+Jn07ne/++aRRx65/GtH8YdhGIZhGIZhGIaXx7//9//+8t8QfuZnfubFnWEYhuH1xgOJXPAf6v71v/7XNz/1Uz91EUzxs88+e/NLv/RLl7UzOsMwDG8W+Jn21a9+9UIePvXUU5efeU8//fSFTOjXuUaIIVkQaYgSRAeCgSAcnCMrIhiJM0AovOUtb3mJsDgFcYSkyIYPdsg6MZyf+vL1/2DTRYoQZAdCVW7IEf74YIuUQZwQeogdeuYRy3Im7OToTKxIHXv0kDbycmaPH78qWF8QKMjbD37wgxd517vedfPwww/fvP/977/5sR/7sQu5Ip5883sb1IXAjVyPzEXSIKeM4E7Km+hbPZRfMO+OGjuXQ/vmeqPO8ks3yd59IpDkogeRVvJEWqUXwUSnu+RXn+VbX9XiTSG89d6ZPXNxgK44etjbcB6JGVGnXwhG9r0FtuJCY/CWELhs5J4v62x7Z0Sv6auFbkSqUV7O3QE7Ul/kwr/3qyY2xoQv/gMf/f1kkLe+iHMKP2KUn5hQPezScSZO82wS+3zSIXLVAz5OSR/a48/o54XcO/MO1EG/Prk7+ubFMNcHozP78qerbrmx54++NZjT7XNBx1vyuewfUdiTE139pM+PGvtHAXL1tsR3/869L7l4S+5cTPU5V498I4rpkXpTX+o1eznYy1ZtzrMBenTkcf580gP5g5zp1Ad6/Mm3ukk9GYZhGIZhGIZhGB6M3/7t37787z7/28o3yIZhGIbXHy9L5A7DMNxX+DUHv/M7v3Pzta997ebJJ5+8fFMXAYWUQyQgMCKikApICWdGxAaJxCJ0ImCQuMgOPvi0f/qJdMsGAeH/gY4YQW5ErhA6yCF7ETERXggNBIdYdBFa9O3JD9lB7IF5hBJ/cpET8vEE4gSpog7x+OPbWh/Y0pEPEtc34h566KHLN3URuR/5yEcu39gliF4EL+IX0cTvNXw72p388R//8aV+39Q1qlO8iMxiy4eoqztwRqA50RejfCOS2oP63N65pts3jPXMm+i+nctBD/NDVxx7xggttoS/7sO5O8+fuuTnLvQV6ERs6T3hJ4JNXs7YmfNn31itRF3ekDs0dwfE+yFsxYp8JmpQL3v3wGfvib5cjPzxyz+imA/wbiIWxVCTe1SvXoE48kVKFyO/5Iwl3+ohbPVC7N5qsdLnL91Tiu2MPl39Ukv3Geplvs74RnfR3abPX3dOr3dcbvz5zOsvFNue+9cfa/U///zzl5zKo8+1eL0jNvZ8FpGb5mLpNVvv0L3wWz+BTzG7A/+wRT1iG/mu7uI7A2cnrAldMcSrL4RtPw/zqU/dHeiHeZ8Z/rwZo9z5sPYz8+yjuv2MccbvMAzDMAzDMAzDcDuQt/53lP+tiNQdhmEYXn/sv2YNwzDcAgQcAvfLX/7y5W+C+DYowiRCCZGQHkEqIFCMSI/2zSN0ERIIFCRFJId95wkSI32EA+IB4cIGkBV0EB78W7fn23BIYnEg0haxQhfxYo0MQaL5Rivf/p9zOoQvBAgfyBZgKye5yKOYxJpPMIppj9/yVodY5YdIQqwgc5G673vf+y6/9tW3eH1zzjd3ETFImKCvvl36jW984+ab3/zm5S6Qud1HPROz3pqrla0zUp3m8s1OLfZBzvWwOk+7/MgP+UiXjbhi1fdieAP21eT+6Ucm8a3/zrt/36YGe+LQd1/FZwP+h5WY5UYfWaon6qYrjj5Eerpbe2LpGxu6iHJ58y0ePdI9Evviqcf7ox/xphfFy9Ye0ZvqLIZ9OifpVly58klPbsXhm0755KNYzuWXyJN98dhmb8wmnLb2E/nTrQ49i3ilC/T4VKPenqAnD/nwA/lVJxvgN/BRnWe9dPRHXLmYE70/+0+PTbl7p/2KZZ+v/kEBoeszrsflwpd9eavVu6LTvfDbzyb+64/43qq1ukOxknTF0Jf6AeLY58O+GOLas+4tdBdyVR/7+u+ts9cLJLafPer28ybdYRiGYRiGYRiG4U/jb//tv3353+n/9b/+1xd3hmEYhtcbI3KHYRgeAKSAX6v89a9//fLt3N///d+/kIiIMsQCksOISGiNhEBMEHN7BHkR6YGAiORAREQ+EH4IogJREulAvzjs+WcnBl3EDAIFmWQNxWdHzPmJPIuIcVZs8wgi58Z82o9cKR8iP4QJPX7Ami3/9uWGUBEXmesbushbJC7y1rd17fmVy/6Wrm/uIl/4CN2Hv5frG9K+HYhg8m3CvulZjmIScyLf6qvHSbXTIe5KjfpXP06pdjryox/40We58Jse0tVdnn7cPdATC/RIz9iUW/b2i60mfSfqNPKH3NYn/u2zYUu6D7Z8ELki+JF15t1lctoUk271iQP05GktR2d07VmrJRJPXmcM7wzYVo9axIjEdSYPwgZR503RL073ZuwzWK/K/Zyf91Zup5z7QR71TZ/Puvjt/sWuF0b16Akd9cmTgDP63TeoUxwjPfmquzdZLUZ9AH7Es2+eHpGDt+Wz53Pn8yYPeTsTl2+99h70nbjrfs4QusV2b/kn9b43Y03/RL2qX1D99O2xUQcfveHg/LxTeuz1gH2+5KNW796Znzv+AYlv5SJ07Q/DMAzDMAzDMNx3/LW/9tdu/uN//I/fIX/uz/25m//wH/7Dzec///kXtYZhGIbXGyNyh2EYXgbIFcTtV77ylZfIXN9eRBzaRyoAIiQCD+GBTCDmyAUkDKKBPkFAsImsYmtNF5AhiD92dPMln+ytkSz8ImeQI+zTQYjwS48+0PFtWESYeNZimBN27MVHlNCThzGCxD7QZeMMqWMk4soJAWOfPUIGeRKZ5FunviGIUIrENSJZkLgf+tCHLt+e8y1dRAx78ZBMvh3t7xj7dq5v6fYtVL1Qr9j05cFWLfIu3/pD6gvd1sS8uzSHCKjq49MasmlEgIlFT/3qkxN/RC/lSyfojx64QzH4OokpvdR7MfWWjrX7c15t9u2Jy0YP3Is9+nIi5r0/a3kb+aYbkcY3Af7lL5Y9vSX0jezl6x74I+nzJ4/0T/9qUi//amYfwWyPHmFD9Ao5V17sxTaSSFbn6jSeQkefrpF989B9kOZyFAOyk2P5Z0//zEPt+ZIzke/pw93pGR0x0jXS5UcfqhHMiXjEPeRTTnrPrzfmZ4A5f87p0/EefNs9Mrc65JRv8awb5QX0Qu8sGyiXxDk95+Kq1x6f9ul4K0Z7xNzd0+stQD3gy5sz+sct3r9+E/X6ueMfivg82huGYRiGYRiGYbjP+E//6T/d/M2/+TcvYg5+M91I3GEYhruFEbnDMAyvEo8//vjl1ywjdZGIiBLSN9cQCOZICQSIM0RGZBdyAallzxniBHmBmEFEOrOHoPJtVPqE7whL+nQQMERM/hAbiAlnSBh7xLl8CLKD74iSfIM9vuVADwHCF1jzw9ZeBIs9OahFbCQtP2ydgf1INzGdWyMX+xYxYhex5Ju5CBa1+3u5H//4x28+8YlPXP41KB22/Krv937v927+5//8n5c7icR1Vv/laZ0gfNi7F3XWG3u+qYdUtnYG9VYsNtZAX6+RQmqC8x6M6UYmqUnPuk/56Ts9+YqJYEJa65Heqsc+Hbp67B70Ti3qQ8zx60yevplcn/XdPSR0yx3Y9W75EoeO0Rl7cegTe/Jhk3/68ul92xOjNV05sevu+aSXrpr47l3qjXeuHr10Rifb7pE/++5NT+s5ZCNv4yn2Tn3za7An4LzPUvfbHYvdHST0xZFjvbPnnavRWu76fv2u7HuL3pb71yN3y574bPKTDV/ujh+64bxHvTf2Vn3OvDVizn+6xj4ffMupOtQUMVov5asH7kRP5MiH0d13h+Ub6hW/Rr6MCGS6/LLjQy3eRL2v1t6sd+YN9i75LA+68rUHEbn9imX1D8MwDMMwDMMwDH+Cf/kv/+XNb/3Wb13+W8w//If/8MXdYRiG4S7AVxj+nz+Zfif8KtFf+7Vfu/nSl7704s7NzRe+8IXLv8jxdyPBGR26wzAM9wGIDyQMcgFBEKGAaLAXKYFUQCIgDozWkR+IBkQNX+xJ5Im/YYlw4INf+wk9sZ1ZI1n6dcViyME+v/xH/rBDAPk1xkgX5AcfkSx0IjvkhyhBwtBzTs8+P2qIVEGyiAViOUOq0ItQEYees0ghe/Lg0555vTRa0zGP9FUnfbHlC0ZkjT024sldPLb2+YH8OVOXvM35TocPe8A3f+qUl1Gu9sq1fnQ33RMpJ7r8y/3Uk7f34owPdy53Z2xAjtb86IMcnPHFzhkf5ULfvjz5okv4Lg/iTt0v8gv41xM2pD7XIzby1ZPuMWFL2JwxiHN7bAidxDp9MCJ95WS/uwS65uLweW2rB2qy175+GIudTcg2WCetu/8+A4kzhKs60qdLL93uWv/s1SPIp37qsbz0u88wUac7YsOXvXpw1ml0N3TospOLuGygOoz6YJSbnyX6zab75997KV+j/Xx6h/byJX53Ih4bv61APvVdns3P3K+hJ/zTA6PY9PUqn+Usj/L0BvrHHPm238+6swd8OrMvZ30ehmEYhmEYhmF4I+Iv/IW/cPM//sf/eHH16vGxj33s5qMf/ejNE088cfObv/mbL+7eXP5cEyLXbzIydz4MwzB8/3jQz91XiwcSucMwDMOfBrIBCYI4iAhDCETgmCMekDJICWvkAwIhHeQBQcAgcuwjF3xzkx19e3Qi6CKGCP/WfcsPqYHoyDcda8IP9G20cpUPnPkAYiQCNxIvHeRHtpE/QC+iRG5qoVdsPvSCjbwjZRAvZy5Ah30kDUHiGtWgP/xFXukNf+VH9I/IRS9ADOvEOZ/G0Bn/IIb86EUonX0Tmw7Udzk5o+9u2Fqn57z7icTzbWP6fItNB1rrc72Wj3OiVxG59tXSvfBHrPkxiiXP3q59/ao+Ym6vPvDLf98gZWPv7MupX+/osRFP/dXRvZz6hG/6dKuB33Kibz/7a1vxOjMSZ5B+yO56fg0+iZwSfn3u5HTapatevaFr3Xttn309MdLhi56RDv30ulu1O3fvztibO1Of91Sv8qt/5dbonC2/xFvQP/vqkgfxmfOZzWc/y8qD72KXDz8+0+LTpeMsOe/tRGv5qYleucsRimWfjveRPuFDbGP/8EMe+hLyTaBc5F2fh2EYhmEYhmEY3kh4rYlcf0rM/6b6iZ/4icufvfqN3/iNF0+GYRiG7wcjcodhGH7I8B/8kXDIMEQBEgM5gORwhvRAbiIvkA3OIk4iHvLBFjHi15b6NaV0Iyiyodc8kgTZgihEaPAVGeGcLRu5pcu3EeiQiItIISSJXACBwzZdkFeQZ/qRLvzQtccWxGBXb8ShU0x71nTEpBdZlOilPUSumvU3OzWqHSFlLa+TPOJXfvRJsOcc6JwC/NCnp8flD3Ineiw2P2qpHjkjZ+XZ3dMzT489O/dC6rV9ORiBr0gr9uBMHGtvkH/gL/KNTf1MV74Rsnxc9znSjp+ELX225vJM9IYPAvbURlct7Oqp+OmfPsAdkuoof7pnTsCGr2yL1X7C1gjdaWPIx4nyPXXVLYb8+2xfIxu69anczdl3763N1XX23Fl+zPWkHheDHeHPuTrp05EjHfv81YNw+qeD/PSG6Pn54DPGDyLU+ryHfJWr3hvVwhfxOeyenZkTtkR8Es61kQ+2pDr4N3du5NeoB87l4U6MdOTazzrfts1WzeVBqsN5/yDCfBiGYRiGYRiG4Y2C15rIBX9SDJH7oQ996KLzX/7Lf3nxZBiGYfhe0c9df+7rF37hF75Dfu7nfu4VfwuC/3q2/2o1DMPwfcCvQ/ZrZ/zqYv9y8ZFHHnmJfLRGIiAZEAXIB2QBUsHcGR1/FxbsR4xk04i4YYts8fdUkSyRGYiw8xuubOwhZHzTF1kYORIBZI7I4INd5LMzRIi9CFLkiH2kUznSRWxVk3P6arIvDkGa0DlB3548Cagnsiekxzc95CDi6amnnrr53d/93cvfKvYrecvR/9BwF/7lqLoR5PrFD5F/JM4Zq/PW1/tqrrflQsRFVNVDPUY2E7VHNNPl01716rFvSYP99PSBX77cibrsdz9yoI+U7e8mgzNvSV3qlBek71feikEvso0eMSf1hI2azPs7t9k1Fkde3W9EMTuSv+beBd/moJfINj7F4EceesB3fp17W3rhnD89Eu86r+Z0epPWxSyf5Nzrjs9ze/yIrVb+6RAwZicnunR89uyrgURWexvm7lSN3oEc6UN6cuZPL+RhX+0+52csdckL6gFdOnJ2LpZcenvu6Omnn778P4h/9Ed/dDnzFv2davn4+7F+JnVf8hdDPGsx3JtzsdydOD6b9Lqn7qL7IHCO/IEc1MSGrbV4/UzJRp/EFk9c/dBTY+9UDkSN9oqjDjHkTN9ngo6/t60PYg3DMAzDMAzDMLxR8Nhjj9189rOffXE1DMMwvFmxb+QOwzB8n/Af/xFqRqQhcgaZgGhAHiCbIPIAOYFQQSj45qa/twl0kQ7OE3rE3Dn/73//+y8xEB3p0zFH0BC69v0rHzEiceRlHmlChy9EiFHOSA+ETCQMPXkjeE4ikD9xkC4IEnM+1Ms/IgWBjOxhxze7iJpGNmKKg7TKf2M6zkCdiCZ9QxiJrfdiIB3rVcSPuCcJR88ZkWfkjpGAMz5IoFuvuxN3qlcgv75hy+7UTa++00Wc1U/6ztSGpFKzvskjoqoeOqMbAVp/jCCu/ev7FEMfzp4m1vTSlzOS6/SV3knS8Vlt8pQXfeKctOa7t2Su1+zS1RMx+DDa41tP6Lvj/MmL7elfTs3VUB3i0WvvGvbqf2+A2AOxif53d9B54wk5qOH0W2/Ym6tL7N6ls/TFUZ+49PXcvp8z/KqLvX3nfDi3z/YkWIvprL6Lw0810WNvzz8GAHPQP+f6W1x+Tn9gzpda6N52H9BI/+xdOtXAT31z9+5cHX4OAb/gXE5s9aM+0nNWfnrUnt5AnzG+/ezoZ/UwDMMwDMMwDMMbAd/rN3KHYRiGNxZG5A7DMLwGQBYgPJAvSAhkgREBgSgwWiMvrAEpi1ywRx+ZQBAZp9hjg8zzTVwEB1/Ii85P4Quh4VfhRMpEaNDnq/wiPNhBJAgdZAe9co70QPBE8iCC+LUPclITXYLM4QMBU25iRFxF+LSWU2tx00WwyEdu9unpn2/nGdXLv3wQkO5BLDnIr5r4i8CSKx3npDnfBNgR+9f9lk9rd4NY1m/6dNOjky74piORD936bG2sB/qoLvXXE+f86Acil25knrrMjeWtV/Toy6VeyJMev3oidvfsHevP9f0Ys6GbvvHcY5dt83TBWg5yN6ZD2KhB/+iLx1bN7WWvltOWlE8obz0ufiPwQ7p7YyI/e5BfvsiDwBew0+9086kGvVUjyNmcvhxBLLr2ImnplV8+6Zl743xWl7z7xwTs2cqFnvgh/73RfPt50D3KrW95O+dbnpHj5QXuqp832RvllcBt83OvPokjvpj2ykcd9tQsb/H9fGWjHnmd922fjtrkp9729MjPD774YNfndBiGYRiGYRiG4a5jRO4wDMP9wIjcYRiG1wAIgUgO3+zqW2z2EAbmdJALCAXEXwQkkoI+guEUhIKRjW/VIv8QFfnr/BwBWeHXCtON9GFD5ENOIG7k5jzCxJi+UQ5i8IecoU+vPOgENpE4ETrOzfPLH+IH6ZLkN9/lXi8hf/mno4dqRfCwpY/INcolPTGLLQ8j/fIn5c4XG2uxST0+Bfw6WsRZpHz6px0/4p2/5rp4zumVm5z0hj/jmSfxxuh3V3oH1uVA+K4P9ZSIbYz04j/pHQK9835a851+83wTe+c83dMGml/rN1dPPdcbNXdWTmddt8XSAyRd/T+FLelNnmOEof4TsU99saG95o3kzFk8dlDf5UtHjeZyJPTYOOsbpOXPn37Ycx98yZUun8Xhz7k88lcu5UYXrJGY/Dqvt/kS27uA1uz1RYzeBX17kajVaJ6otVjsnIMz4J/wD96ztTrN+RRDDnxFvNL3M7K+8acuOuyt6wnoBT36zszp+rz4hyDDMAzDMAzDMAxvBIzIHYZhuB8YkTsMw/AaAsGAqEB8IJCQA0iDiBgkRORL5IezyIiICcIOGYH4821PRAad9PkmzREWyGGEL6KYvT0+SASJfbEJIKwiM/gGpAcbuoAAkVu69PiD8mDvzL46mxv5OtfO1WMuVr0g+WEjTt82pId4iWwC+3KTFwJbX0FP+mZu/oj49RWs62c9kRs/4tF1dko9l4de+zXPbIB+/UiK504QuXTFrd+Enj1nEYggZznVCzb0wL7aI6OgXtJxXyepxmfSHn98s+vtgbNTilOs24SP2+Ra5zZdMc4zcfTE50j/7Ku/XEh1JNkTYFdNxTnP9eiUehuJ63Na3XrknM+zZ8E6mBO+1JBu8Y31UU18ipk+qI1dn3k10LXn8wBssuOrmMBP+ekZf/mmVx/z2VuRi7PI5rNX5W3faM1HfSoef6Ra2wc59KvX83eeZ+ucmJ8xSTnrC3/Wvpnv72GzsaanZ31O1S/H0z8Uu7vw3uTX52AYhmEYhmEYhuEuY0TuMAzD/cCI3GEYhh8AEAEIAUQiggThgIxAzCBfkDEIBcSJs8gF++lbIycfeuihi511+kSM5kgI397rG5/8AWLCGRQDAYLYkAcgOiI0Io2QHkBf3ogtZ/YjPeRpTqc8+EHskOqjy4c5/WLTCfYJm3zSJxE+5ZnwSdd+MfWrXw2rl36tMELXnE+QZ31oTxwkOB9s5SFv5+pkX81GcZHrvo0L9vjQu/Ne7PHBr7uJTBI7qU51yB3hW7/4jSRjZ6wP9hP6hL6xd0TXuZ5fi9hylR89b5XffJ1STFB7Y9KaDrnWOeedNz9tCKjDP4joTckBjOqRj7l3RKfe5IO9t68++/SLZ+wOjL2BdJCCYpyw5pMNHTHF5i/kN5/N3YNc9dza3Vzbie2Mf2f5dgbegnr4MLffPfKXjhxB3fz1OeOL9Cb8PKnGamLjs8LG5z05dTsTF9RVbUa50IdqqQ6iL8WTq7P6Ll8w0nOuJvmbV1v1psPWr5znq76LQc8odznx0+eyvOkb6dU/c79VoXyGYRiGYRiGYRjuKkbkDsMw3A+MyB2GYfgBASGAfHnuuecuJEGEBrLAHJANEVaRagkC4tFHH72MEQ3pRUgQ/pAUjzzyyIWMjAgh7MSghzCxJogXpAxd8SNTskNCO7dP+GeDGHUufz6zBfZ0EJHsgZ4eZKMGEnFTDu0ldAnfr2YEZE1Ekz4gdfXmW9/61s3zzz9/IamsETdiyF8vzRFBEUb2GunITww1IDvViMBFsJc7oX99NwQ5+/DDD19yRKSxge6CX3ri+3ZvUBuwURdySa70qjtCyrw1n/rv3vkmcmDnLZHszaE+2M/n6dsZgebJeZ6k1/hy89ZgLv9vf/vbl/uSuxzU7z5BjuVP353oqb38sXUnp27ndOt7wj9ivn8EQO8a7j1C0EjvGnwTPs8Y9R7KGeTkrbkvOsCvWPz02QnOrNnRkbe1z2FxI471K1JVL+g7ZyMHb87nRHy5sXvmmWcuMfxjEJ9jPpzxz5YPny99sm+Pfnn5+SB2b9S7shaPsBG/HMWWd70J9tj7li3fwEafxBeHTn7f/va3X3qq5ySyl9/iykk/1c2H+OZGfvsZxC//clfTMAzDMAzDMAzDXcWI3GEYhvuBEbnDMAw/BCAFkAMIA0QBsiHSD5lhjSxBViBfkITIRYQD8oEuooF+EtGAVEH4RgTZO4Vv+siNSJoIHsQFiEvXOjKEDpIje2SI3PkRG9FiX372xZcvO374TO8kpBA5dJ2zi6hRH5STkW0iF2PEkfNzzj+iKYJLLogqc/ERugThE4HkzN/WNNZPZ3TkY696y+8973nPS4S5+tVBzEm6Rr6RUfKzxzddc3nXa7kizuQB9BN69NMVwzqyLD35RGBauy/CJx9iQ+8ku/pLp/nZazohm2s5z875bevbJB098zmB8rZ/1nLqg73urZy9Aed6ctaRjX29ZIesJHRCuqd+88ZXAt9ieAN6fvoicpQbHXfmc2OtDj8L1Gvu/XWn3nG/Zt25nHsXeudt2Pee/Fxg74xfts746rPi55E9922PvT1rPtI30j/76DPiLZtnL286chbPmdgkO+f6ITc2RjHOdwnOyq+1M+tqUqPPoncvBz0QO1t9ouNMHKPz3kW+2Bm7G/n4hxPGYRiGYRiGYRiGu4oRucMwDPcDI3KHYRh+CEAsIAWQCH3bC9FhJBGBSD/EBFKDIBUiPNifhCEgoJC+RiQHPYKUSKyRJ0gNvzaWX2u+0xejOPywi1iiZy3P5uVA1774CGJEif1rPesIHDpqSZdeyGcSCUS3OT+nyDuyiz1yBgmF4FPDGVPvEVC+8SmuMwSVM7WXrzm4I77F4Bcp537klm73R8zpiqfX+mJef8UpTyM/cq3XcqJvH4xqBGP7oKd06wESjb087Fcbsa53dNRVDoQv/arH5MzhNums8Xr/Qev24DzXZ3nJT576LXe5QLXKV73lWM/VYF3+6rvu5wn6et/7AHq36dqj/yBkVw6Jd9QbcGadyM2eXM3lYe7MPhv7vTFzZH/1QX1w1nvq3VsDf/T0ji/oXbDTZ/EI+GyA+MWiS48PMeDazr63Rz89Z+c6aV3e9NzFNZyL7bw69IKtOvuc25cP/frnPfm80rGWW7HYq4/v9rsTPbLnZ4W5/WEYhmEYhmEYhruIEbnDMAz3AyNyh2EYfkhAICAGEG4IK4SiEWmAnEBEvO1tb7uQCwEZcRIMEToEQUQfuQMRGJEZjQgLJBFyJzIm/VMPGQIRPNdEboRmuuXMvxx88+0kTE49Pkk+7ZurifCPXGFz1i9X+tciJjt+xBJDbOtsGuUYWefct/eIPK3F5oOcuTizRhTpdWR1eoQPo/jm7BC4akD4is8PYZdPddLlE9Ekt/osbzrlZK97S/jlw9zoHfkGIojBn7yJOXs2YvJHx8iWiE3PHr10b0P7xtt0rvfSu9Y/53qnBnlVn7H3Fdjotb6YG7v7bBrtVVuxjKfwTeh3/lrgjAH8m3fH8qoGd2T/hLVe0Ouu1OXzbp6P/PFjLY7PlffuDaUH9vVKT0l9cZ6tXvgmarHF7GdHvbIvd++WXu+5vgM9vp3XX2sS6IM9+fS5P3VADXIVg79qKCfn/Jdzwh8itzzk7GeYfjtjC31+szP3HhG53iS7YRiGYRiGYRiGu4gRucMwDPcDI3KHYRh+yEBKIBSQiUhdc8QBgiEyBPmAoIjcQjZEmDhHRETi8hfREXHTSBfhQYAPvrOhly5BbiBumrOnJwd6ckVsmNtHRCIu81/sCBBrevKlg4A5a3R+Eili60NnQJeNfBqd6R078fLprH6JC/aKz389RdT49iE/9PtGKxj5Zuvvb77jHe+4EFfyQpg6U5+RuD9A9PqV2CBHcQjdRvvycXekHvMN9PgjfFtH9tULuvXBncidPp16TMShW3+S7NMjfOmDc2dGKK9gfe5dn1/jWj/IleiJu1DnmWNSfuqzliMbtu7C3llLdvogLv1yKI/m/BjZ3Xbe+hrn+bXeOZeHvPmXj5y9LUJP3r1392hk49w6H+7R+/OG6ahJv8z55wPEoWPkg56Y7fElpr3OjHT7jPDnM+Lt8S+uz415fp2px9hd8OvnQT0l3pSYxBrOsfp7p50BW371Qcwgh3LtHORNz554Plv01FetdNSiJjrZGIvd58/PZ//YRo3DMAzDMAzDMAx3ESNyh2EY7gdG5A7DMLyOQDAgC/zt1qeeeuqyRiggMZApSAtzZEKkAwIFiesbsMgIBAWdU+gRZEYkYEQJX9aQXjbiRfogB+2zQc4QJAfihA8kib8XKx95OwcjX/KlJ0f+5Gu/Gvm3Lxc+6SNM7SN4+I9gsSaRYS+88MIlFnsiRnEIiKUWNhE/viUrD3Xpb79m+Zlnnrnoq69a6PT3SMV0dvaOXgSq/YcffvgiIFe+6fBT/4j7QPj6tiBfpLtJqsncebHsqcV+585I/XFO+FCTPJBX1vlTsz6kl/ChTnbFOVHt9pMTrc+zUyf7II67tN/9GZvnxznS1p5+5kfv1Uyqob4Q9ajDXec3OLfuriND7Ydin8j3idvW7clJDmLxZV+P9Z+AfTlUs75Y+1wgR32OnZUjfXmrHylpv/rV7D26dzH7DOifd8BOfGfISqP3I653KZ7PQ2+VjXzoyol/cXondOzRV2fvlJ0zNVRXPbltbF4vTuFDjnz6GVGt4ojrPatR3XLx89Gdd+/yrH7+jOzlSseePrHVU/Gc+Xay82EYhmEYhmEYhruIEbnDMAz3AyNyh2EY7gCQF4gRxEEEYAREJAbiAfmHLERIIknYIS8iPOgT6G/WIkjonQTJSeZkg8hA4qQPfBN7cmBDECHvfOc7LzbA3rlc2fJPgK5vqSJ5nKXHZyQKHWSV+OqMmKKPWDFaqwfsyd3eOSbWRK7iRvbxizyL/HLet0GNzz333EuEITtSvuox1w/xzeUu3qOPPvrSr3HtHvgQ01qd9sSlp87AJpgnZz1E7XzphTXySh72nJ11mtsrB/WpobfE/rSzpltcUCcyi317J868HwQ68g3VBnLX8+oR3ygnY7mQ3oF89JGOvPQz+9PujGuvu7bXOQnO9IVe++f59Z7xlNsgFtHn3opeh+4one7IO5OztW+76wsd9fJTnWDUgz6jhB3f7Hqz7H1W+9xZ66d69ZQPQt+eM2/29Osz2tvyGeoO5GB0LrY46nFP9m/rfbB3wpptwkdz9YiRfznWK2uxfK7kp06jGtjVZ3N1ykFOhI5zfun4/HuX9v0sFkcewzAMwzAMwzAMdw0jcodhGO4HRuQOwzDcESAREBOIA99+Q9RF6CAwELOIHSQEQiJShB3iIuLDGaIFsRFpQodEjBgjMsTgn7BNz5l8kDnIIL6MyBLfVI244S9d5/K35gOZ4ttxkU/2Tr2IGCQKgsV+pAuRTzZGuoStM0Lf2FnSubzp6Id8IoMIkgkQ6OUVgWOtDnvVo89i64kY7sQ3BPXbut4S+VsT8RG4CHjxraE7bN54LdVkDvzLy7oeqK/xuh/ZyQnqC+FLrZGY17ryF4ue/t2GbOCcPwj6qM/6Wq7XOZe3uPShmqtTzvVSvtlci30+Th1z0h2BNwHZnWjvev/EeZZvozrPzwS9zpp3D95W78tnzeiMqJmuPf0CPbGunub1szfcXrHM+wzLwdrofeup+9Ez+31OnMtfn9jKU87ts8sXiCVnfsrbeTBv7fw2katYfj5052evjPyWl9hqkou6Q3UQNnJyXv2EX/Ujcp13F+bDMAzDMAzDMAx3DSNyh2EY7gdG5A7DMNwxIBZ8Sw6ZgGhAZiAxkLiRGEiJ24gb5AVCA/ERnLGJGOE/goRELLJHZqSD7GiUBx2CvERI8hsRktBFhMiJIDgRmPbLwRiRQkdtCCskDdKkPEl10Y+IUZ8zYF9ezpPi24di21MvP0gfsa35BzXIS/8RWUb1OCfyoyMH3yBGaKsvW1JfEzHV5/7UaK0uiMSKSDrXjaew5VOORuDLvprqg3XCTu7lIgc6dJ3pi7zN7Z+xSOfFyedtaP9B56Cfetud8nc9JuJG1jk79dShBqN48rvOMQH1p5O+2kh23sS1HVzvXa9vA79iFoOufN0TOFOfMzj15dK37s3lR/dE8fUmlI/3UX/Ys42gBf7AGak33jVbfZAHGyKP8uI30tQZ23z5LLkvc7mYuyu2xbTfefk6q/bG5mL7nIpJL1/O+PczqHyqBZyZ6w8budHRCzbO60n1Va8eGOn1DzuGYRiGYRiGYRjuGkbkDsMw3A+MyB2GYbijQCz4pijiAUERkRUR59wZUgMRgcwgziNNIj4QGklrZ/wgSZAa1ogVZ/wiQIg5G34jk+nZc470MLLLhk+CyJUTXcRI+aaLWOnvckbq8EefHp3IJ/7oWRffWF+SSKJ8mEfOQP2LwOEXGVu8JDLdSCJz2L71rW+9fBNX/uzEVBuBeuYMgSRv8yCnhK6xvK/PzzWf7gq5xJ+1XrIVR23pOncGfl0sO/v01F6vjPb1qF5XU77oQPfBb2eh+fU+yFOd7PTRnP9ryTZxZ/IWk+h3+ZY/lDfirXNyIiLwvJt6SJcPvckn5OeU9sNte1D/+edT7j4/ndUTfYd62nvx2bmuRT/Oz7dzefNR/8y9j86Iud6IZ03X+vo++Ybmeu/v5dKnZ49Yp1uOcjIXG6zZ21O7GOe9B3r1g049syemfzDR3TnzWcymu9QrMcQj9ORhrDf97PL+qr36ndF15rOSnX7b8xsShmEYhmEYhmEY7hpG5A7DMNwPjMgdhmG440AsIBIQC5G1iIaIFIQFMgOZQk7YPwkS0po9EhUhguhAgCA4miNJIjvoIzyQnvTtidU5wkN+zvh417vedSGjInkiTugZ+bL30EMPXeb5L1/x6bFH3CC3xM4Owe0bi3KlT8+ZtTmd/oYsX/KyL459a5Itgoc/hJC6v/Wtb136Z47IQWZFaCF9fRNXfXooT36dRZK6E7b8keJB95TIxxhB/iABvVCTGET86gZvozk9fdIL9vRAXnpJL7+d61XvSS2dQfr86om1ePZOP1CtYM+9v/DCCxdb0HP2p+SHtJbreb9En6x7H9lYy71+2AvN9Utu8rHHd7X2zerb7MD8XF/jumaw5214WyQU1xnxDwbkjFCM9PcGI66BTzpyt0f0Rz/UZa5+5+Z0iX44d1d9rtmq2XuyTkcuwEd98j7ZOZcPP+Uk1/pdPt4bsFdjZ+7eOT/2rqG++lGOfj7QzVf+/UwEOuI/8sgjlzVbPfA+xTGy8RnsH2tY25cP39bdhT13Yd3PIOt+HgzDMAzDMAzDMNwljMgdhmG4HxiROwzD8AYAsgF5gZhDeCBvkA2ICaQcggVxEdlhHzERQZG0RuIgN/iKJLGPuIgoQphY00HYvP3tb7+QJvSQHJE9dAjISz6RrHKJMEkvggTZ65x/kl55ioU0ohO5Ve725RcBlT5B5BA++UsifdjyZR4JZo/f/Nl//vnnL3pBDIT6aWfOLoKs3O3pA50T1Urouc9sQB6nTnqIJP0xP2tpJHT5iszTY7pyOPXM6Zk7Z2cEY3P1yCfQa+RDLvpTjZ2Hemekr2dyMi9Oc7YJNC93sco3O2tvwLyas8/n6TvQdz/dm3vyWaAL6Z4j8flD6okjnt7wlc6JaufTnG596r6vR/n4HKurvrpzvs8eE3fDxr45QlZe+mFfPWAv8l/v1QrZOxcrwpgeH4DsVC+7s95y6GcAPXOgqx56/Ivj3q0bu7NQrwJ7uv6RSf3jy34ijj6pE/mtRrrOqq1Y/PiHF3KOuE5H//xcU7f7Vaczdnypz/7Zl2EYhmEYhmEYhruCEbnDMAz3AyNyh2EY3gBAZiAYEAvIP6QFIuMkiBAVkRmIh5MAMTaP+EV0nDr8I0gIYibCiP+3ve1tF3KFrnOkhhjyYUvo+dYbPXnxnS5/1ukhVpAw6RDzCCFz+SG35CHfSB3Cv30oX3uRVvKkf5tcE4LykaMY4pV/RFC5EXP7bNlFTnUX2fIjBzYg16S+8a2/5eKMvjP++BUPkXTqFe8cOzvB36mfLgGx5CBPtZw5snPujH779joH5+60nNtPD7J1Ruf0ca1zzp2TchbnPAPEm3W9TDfb9OEc0zG6L3ndppf0ruQQ1JGPlwN7faqXfJwC/uGDO1CDve5DjHTsgX3Ip/yhXMWoLvnxQc9cncUAPp3R8db00Zl3rGY/a3y26NRfPiB/pJh9Hs7eshUnXWf9fLkNcqHvM1TuidzKFYGLpOXT2htMj39r/wDEr2YW01oNRvpyc95n/PzZ4TNn9L4ieMkwDMMwDMMwDMNdwojcYRiG+4ERucMwDG8QIDAQEX0DzjoSBSkSOWPfHHmBoIi4sY+8QOSyiRgh6RkJQoNv+hGc/J06BCkCzvu1sHLJd/r0rPlE4NKTAyBSIhTpsjGPzImgqg5zuZiridA/dSBCiWRzSn7piV88/TESvvRaXmKA/YgpxJXR2pyYn/2VXzHF0Qtj+3SJXNiowT5b5JJvRdItFqFvzG8CneX3XJ+65VddasxnvTnztw6dAx22kG+4Td978rYgPeg8m0agJ3d5gDMxu5dr27Mv7V2PYF5fwqlHxJSvezCvn6Te0DtruYbzPgfpn2/Ze/KZaJ9AdbBxR0Y+xC6efevOgR+2et2ajtG7Kw97dPhiL66R+Mwjcf1aZfp0zrdQDj4rYtnn3/p8p8Q+ZGevWM6S0Dqf4iblJ4ZY8k+nn0XAP3v/YERcb+WUavRzqM9Xn0vgzxyJ6+dtZO4wDMMwDMMwDMNdwojcYRiG+4ERucMwDG9AIB/6piZyhCAfrkkgpIV9oNO30xAX6ScIG2LuHMGEnEWWRGKceolYvrHr27gIEn6RX86KYY4YQZwgrczZiUOs2RRLrvQQNsDePn90xKGvFsQpOJMzYse5kR865sQ8tBbfyJ4+/+KaG/nSR3XQ0UMkkLh0zfN/xnDOtlyMfdOPb/UbT2HLThz1IhARTGzUll1kcbH4B2ty7bPxzJNkr7/62bs5dZzpUf7CqUP0yHn3cQ06IE49zxaa37YmcvNO2MpJL6vLPYibX/vZnb6ucZtOaz0Xz2cNqv/0LQ/vor7dBjkRenrkvZjbM/dtXHPx+HD3510Q52ztX999++zpAZLTeXfnHYvpTVnzTaDPWO+b0PE3jdVerfylw5aOOOLad+4eInfLR435pEO8Zb2lk3+gA/w5p2vvzEvt/W1v9UE6RsKfvsrF50d/7PnV6M7l2eeJD0LHSE/Oaids+hvlwzAMwzAMwzAMdwkjcodhGO4HRuQOwzC8QYGUQTREziA9EBEIEGskCaICqXISjhAxEomBOGlkhwTxbTZ2fNn37bxssyPveMc7LvpisW3//OapEalCL3JU7mzyaa94YiNrgj269BAqkUXyUj//SGJxEV3OIhbVHIFkLk4C6ferVQl/4tc3I99yRuSKbx/ZjBAyr+d8FYudUY6+2QfdySn2ypHYkz8xry/WIC69IEZy+knOc8JXdmInajRGxnauH3od6XeCL/cCztkW5zbolTh8qo39tX7r6z1vin4xT1LTXHw9OmvO9hqdnzrnmp/eaDGMzsXvDamlN3kiPXn2vtmr39x7ZVM9xRUHxKZbzOLrm1G9/PrmrBzU3ufCvndpj21kprj1yUjXnlrpEL7++I//+EKUywvEo0f49hbssW8sH3P+vHl63q549YK+nOTnvDdXLDHSlQN7e+k99NBDL/VKTfrEF/1+5rDpM5I/o3j2/HyRE1t2RjnTI+L4vPaN+H7F9DAMwzAMwzAMw13CiNxhGIb7gRG5wzAMb2AgIRAOzz///IXAiIRAViA5Iov69hlyhQ2hjxxBbjRHkCBbfcM2kse+c3qRM/bpvve97/0OQkZ8Y36RH0bECZ+IKWBPEC/GyCvkCqIUKcSXHMQxB3PCjo6aiD2xIjz5NJdHBJiRnrN0Er7oOmdXv/SWnZ74dnJknTz7VdJqImzo6hV/5YkEQkhZ2ycRYGf+5RZpJEe68knEpSdG88gm983OKI9yqG44x6Q1PSIHkCPIo32+i5ttcC4vI51rnDZy45O++kM6p0B9kYs9uYnhHZ+9dKZP3ly6p5zoHM7zRv748i7M5Qvs5CO2kbDxNk4/9PqsycfIl/uSe3t6cRvEdJ/89TnojvjWu87rZz69Szo+r/Kz5o8feuZyMHqbfNEHdTz55JOXPPnrM0Tf50BPumdzdnJwzr917xb4sM/GXExSXdCbZVtvibXPljOfL5858/pOB/ipdr7pyqfaiM+ic//whK01/75tW27Q+/HZp6MP+0buMAzDMAzDMAx3ESNyh2EY7gdG5A7DMLyBgYhAOiAc+gYZUiIyIxKEHuLEaI2cibwwj6xC+CBSI2g6RxjRNyJB+EXM+tYb0HNGsqMP8uhXoUZGRcTkG8GC/EEilWN5qisCij3J3l5kUHFPHT5Pf+bEWSMY1ZyeXtQXviKxCJ/WJJI80oqtuXrU5T4QQHzIk62RiEGcsZN7RPmpf4o9Z+XOd3cCzkI+2jOqJ3KrHOyfAvzJw7uiJ7/2s23vhLrt68nLgQ4R77yz20TO9YWNnMVP4NR3HoFZjFPC9fx6XSzCF6nvSW/A6H4gHfnqB7E2OvOZkbd5Omytswc5mDuHapWXe1eju9Af/eajfNnRr6biG+33+Y7cZMPevrlv5DqjD/zIO1Gvc+j+7LGPlLUnN++T7/Lj0+i8HkM/O8qzUX/pdCZ/49lfufDn5xJ98M6dyafPoH9QIn+2+hdZzXe5sOkfX8ipXvs5NAzDMAzDMAzDcJcwIncYhuF+YETuMAzDmwAID2QGYgIBYR4pA8iJSMkIkYhZ+wgQJG7fMOUPweGMXroEUfLWt771Qvjmi64zcelaE+QNXURT+3zLI7IG5Co2vXQCvxEtBIz0mkda2aML1s6M0D7f5qfY44PUM7UDe3X4NiBC2jw9IwLLPOEP+VMv5KC+yKzilFt5sxHTWXqnDd/ZGOV89uDMIdDt/ugaxWGbjTGUjxp6I5BfZ3J0zu7aFujpSaDbGTTP3pvN97WIJV950Kl35aM3IAaw4TMit3X+0rlthPROEYN//SP8EjVG5MuJLj1nepe0lgddRGJw1jtTzzXY5IN/cfTC2+ruxbTvjfUW2RjLu8+uvd4if8Wup2D//FXDdJ35uRBRqg49rkZ5nj6N4MzcubjQXXYvRmt5GtkA/eqiZ59tddfX6hPfzxD6cvBzkD9n1vbf/va3X/St7esj3+osZzG+9a1vXc5Jn2Xk7jAMwzAMwzAMw13CiNxhGIb7Af/VbkTuMAzDmwQICiSMX7WMgIhkQVRE4NGJBEFaOO/v3CJOIn+cEfqtnSFDfBs3H84TxAcChR4ShU9ErnU58BN5wgdiqL87G7lDR0zkDAKGHfAZ4WKkw4e82ZLqY2vuDORaHgSu53QJXSKedaSdesQtz2yMkZfqr3fskV7O1CEnkj2Ra9+uLP618HPamNtrPzn3gE8kVH0Xq3M5G8shm2IQuu4U5JE+f+ZqsQ728m+/3p46zTvny/w2cSa+ebUl5WvefRXXu+Db3F77wTy5Xp/SmdqL0Zm34E7TEU8f0zvFuXeAaKSX2Ac1inHuN8rd3Fu29mt+1adu74qIy9592DfS4bd7Z+8N1z+5k/ITg414zuk74xeBy2fvmA6fBKqDPX32QA9xfdZU7s5O5LuY/uGEvezqrZ9vcgFrZ/Tp8mttn7CpJjVEQOuLfaJOSNeZb9/WO2/QPzbob4QPwzAMwzAMwzDcFYzIHYZhuB8YkTsMw/AmBBLjueeeuxASERnIDjhJPeTHu971rpfO7DlDbhDzhE+6b3nLWy6ESbqnZIdMeeihh27e+c53vuQ3QZAgO5E29JAr/Nk3R9zIV95Il8gn/u3J+YS81JQtXXtskW3yscenGHQIiB/Soc+fXJFQCCC5OjcCPT7oGsWrB+ys5UPkS9gioLJNIubcQULvXAPdxlPkVe7G9tMtZuenyFWtyF51nnbNxVeXmtTRPr98QHuRaPri3L69s8+nb/buF1FGx1qeYBRPfP0p52qBfLnXU6e3oD666Z2SD7jeO/VAbu5Prkl+QZzunchFD1ojcM9v4oYzjh73LdfeGTgTy76/Xeuu6KqTT32m71ye3VG90D8jHTn1Dunrk3NzOvI097kRkz7p2/J01am+3gt9Or1hemKccfQL+JQfMvbsX5B/9bRO+BEbxGmPH/8QRf79NgLzCFh59Q9KfNOWnT4TeUcM1wf7/Jh3Jz6j+0buMAzDMAzDMAx3DSNyh2EY7gdG5A7DMLyJgeDyTbL+viPiIjIEEYOURXxAxAUbQtfaiHB5+OGHLyRNJBVCxHmSb359wxb5Ezlin5i3RtbQo4+MAbkgVSJ5xJA78gc5Yy1ORA+dyCCEi3162bdXfHO5huwJGxJBVF7mzuUA1vqD7OGfPj21ik0i+/jQswhJfkAO/fravv3HL1/sIpb4yqY8HyRyuA3Ozp4Y83vaIr7EdZYdaS4/eUcUegf22AT51z/+1HDmlT/IP+mO2NEvV8KHGO79ttz1UV6ts9fTyEXrYp5onW3SXiNR1ytBDWKS6vHGvYFr3+e8kYgjd3X1nq37FdR60T54L+at9YKu/hPwDrPts2AuTyMBa6Snc6Rn3+z37VhxzrdcjnSL3/3wp3Z5uDf63YN9+fisq9XedW9blyubxFpORjn5def6Jo78gU9zOTqXt8+r3HovhM573/veiw3f9H0e06VjzgaZOwzDMAzDMAzDcJcwIncYhuF+YETuMAzDmxiREwjHCAyECmLDt/mQMsgMpAUiBLlCn14jUuZHf/RHX/oVqfTtE/7ZtXbu1y6fpCeSxcjOSEAOkb2RWMgZehFC7dG1BjHsdQ5G+8WTc7bFA3MkmDGyiJ54jdnwoSZxzOkjkNSrl33b2Tmf6jAiqfomL+msGPqqz/VbLGd0+CLO2mNzSjk/aL/xep+/U9o/9dRuLge4Pu/u1cTH2at0uh+6xL46Xg7VDfUDit05v+2bIwrlk061NQf+rOlAPk9pP5x7537o7QTrU8RDMvYeznrOEYqRQP6N3oLPL0LRmj+fLz3vfXiXYugJPT3pPeVXLtblSLd1uqBf1t64fwTivYrnsyqGfMTrDRM2/MjJXDw+6MtFLHMxytOcbp939nDm5/xa7NdbefnZYK+3SeTM55l37yQSmL0zsdiqp/fknA1dffBt3P4xzDAMwzAMwzAMw13BiNxhGIb7gRG5wzAMb3IgKpATCB7EBDIk4gZhgdxAxtCJfKMXgeMbbf1aWGd0TzKS2OcHMeLbe/kk4skhIgZBgnxBpNCzlos54SsiBTFkTI+UR2P1OLMWy5xPIgd7UAxij7Quzinq5EMewH/Ejh6xcUb0CsGkV8aTxM2/HiOE9A7kxpZefuiqCeoL/bNHBBpPPGgvkce519x+d2UuXmfpgRrcn7ro1l826Vn3ttRU7rch/3T4OG1PEU8figViQ/km5zp/5XebwKtZ3wb+k9ZiyVefru2N3lW5y/E8I9kjcL0zPe+z450Rb6b6+GLnfdK1p++BL7aJc/6KR+z1+RH3hRdeuPzM0Dt7EabOvGEx+egzWN7W5r1noNud9m7KnS0J9ui2fwpEzvKdPb/iiqNfxr7tT6f4+p6ePJyrx7uq1/Ts07Pn894/3BiGYRiGYRiGYbhLGJE7DMNwPzAidxiG4Z4AQYGYQUggNBA+CA0kCMLCOSIGMYLYsO9v3Po2buSQkS1BevgGm312fiWrv1UZuULoR1ryDwih/gYnH5E71uZEXuwjASNrgB97iBa+02NP7BM6+eQPxFZrBBNb/qqBsDtJos7sWT/11FOXPvKLVOLTGfIWwRSRG9FWDmwRQv26WaBzLeUgL8IWceysHtAJ7TW+GtBNgjm/xZVD56c+qd/m+qOn1u21jxCje+41vxYQ01vwLq79kd4bPaL3veHekBHsZe+9nv6u/aZ/vQ637YFcTtE3cNfyOt+1s96bd6BG6F1d50Svb9daR+DSY1OdzcXz7s3tyafeeDvOzvqB7/z0M8Fn8/HHH7959tlnL2u6/PAnT3t0ze3x5y6gGNZGviOA+ahO+Zhnx9/ZQ2cJn0Zn6upXKQOf3oT1acevv9Ftz9v0c4If8/LgL/h8yYE+Xz7H/Oo/G/P9auVhGIZhGIZhGO4aRuQOwzDcD4zIHYZhuGdAWCAukEmRGggMsHaOCHnf+953IU0iZhAfSA0EVGQOQXy8/e1vvwgdhAnhi9BFiPDj27r9imawVw5IKuAPAWRPPKRKRA1CBqmCKAX27SMN6dkTX55IJOt8RpbyoSZ7apAPP+2xoWfeGb0/+IM/uBDhpz81iaMuIo8It2Lzcf6tYnYRc+VU3GogoD+gj2qSCzt61xDH/inhtr3u4dw32he3O4F05BXRKufgDpxHIprTM556+UlCa/Xp09mPzqvf3F0Zi0u/vYR+BBxfzk+dehzsNSatQV1J63DOwdr71UMx5eIN1BN7dLwrn0XvptzU2K/zdQ/eh89B74YOP2r35tRhzs5ngx/3QN+eGPb5KraRnTNj/fOW/WMFfyO3XPky9wblTI9kQ/ig25uxFh/sVavPJjtnPuNszPUA5Gu/nyP9fJKXb9mmk9Dx7WE+xeDL3/62li9xD332/Czys8o/UMm/2uQmT/vm9P2cMCf+XvAwDMMwDMMwDMNdwojcYRiG+4ERucMwDPcQCBDkBWKpXxMcYYS8NSI16CFCnJFID8QIEgTB8573vOelX6fsnJ6zbPhAIiFXkDH0EC4nrNmIGyHUiHgR5ySLxG/PmiBbkDd08pme/XITg115WrONFGLHh95Ys3WmT8Q5e3bINd9g1LO+fWpfPuUqDpIqMtu53hqbl3e1XM/FO+uXj1jmJ6yvexuudcHeKe0BP+5aHHHh1FGXntYfoh5rtuWhh+Vr74wFt629E3cC+Xcf7OsHPXMxi3/uu/uT5L2tx2B+PSbXqKZGMBc7dMZeTD2Sh8+RvfI/RQ36KWfEM2LSuvrKH2lbHezM3RGRg33x9E4PnYspJ++Pr+AMnMuNPduvfvWrN08//fRlj115kHSq2bw8jD4DpG+k8+19Q++IX2LOji9renL2Xqz5Jsjb01+x6Tk3J3z5PNK1r4/25c8ffTn6pi7dfub5bDpD2vItf7b65T7qL2KXPzrDMAzDMAzDMAx3ASNyh2EY7gdG5A7DMNxTICUQHEgmZAYiI7KDIE0QQs4jQ4wRKJEiSFykjH3nSWvwK5eRNPQQIScZ0prICXkidkSl/eJFBkWomJerPWJuD9jJIb18RSCZk4g0/hFBSKXyUTdSB4lrzj99eogjgmCLOCsvQv8kFNnxTZqzqQZozEdCx+hcXvXE3onsT8gJbjs74TwBcfRFHHlCZ3z2FvSNLh3zUA/pGM+zM5fmxa7m7EEf7YlRz1qbg3ys6eo9+2v95IzZeNsczPkiJ8695tkVJ9i3lscp7TlHGHor9bK3om/WdH0mja3PePkjfLQnL71BUtorV2f0+qz6GfDNb37z8tbdWb7plH9Szmdu/lGDz4G33T6dzsUUx5756cd95Zs+e6L+dNjIhcgvsaanPrZi1Et+1cf2rW9966Uea+fq7TNarGz9THRG6CF38zMMwzAMwzAMw3AXMCJ3GIbhfmBE7jAMwz0HcgKRkSAxkBUIDKQRYsPcvhEQHkgbv4YUwcIHodtIEDCIXt/GRdBc45oUYSsGYiZChw9kDd2In/Yg8qc8Ip0if+TBD3IJrPMFbBCxJxmUf6MeIHWQQsVHVrFB4tJnhwhyVk16yYa9PTokoiuyS44JPX2u7varLwF1QPVDsU+0l13jbTj9N69/cj3Bb0SaHlgXqz4kvQmoFrgtXsJv93nWX71itF88vTZnm531Kaev4jbeNq+mamneeO5nd8YoZuP1nIBavRnvv/3eFnEH4tAL5vbcTW/GSOqdHKwDHXv1CIz0EZZ+pXL/qAPoO89vIj9v2Hvu7cu3NR176qFXjHwG82IY618+rOsxnd5c/uzXI+jce9NPeuL7vPqZ1efy/GyCz7K4/Tprn0Mjnf4hR/GGYRiGYRiGYRjuAkbkDsMw3A+MyB2GYRguQFogcPp2LkIj4sIYKYIwQc76lafOIk2uBQFDz7d2zV8OztNBpojHbwSRvcgbe9btySFiCcS2H5FkvxHEiZC2T07d8tADxBR/SB8+kVT0jL6R2698Pf3TQwIhxJBAfEc0PYjEVRdhq//nGd/NExAvIuqM/yBk9yCc582N/NbfYtgXV87m+mSMhFcfqOcU96oP1XQNe0Qc34DUe3P6YrCH6k1fLnQjQemCXK7v9lrC9TrwfT3eNgf24oB6OyfOzj4Wi57cfd58xqydsemb3uebV4u63D3Rk/MNGgO/3Yez/pEGHaN3XR706PMZiWmPrrzlYEzsy+/8DJDm/BIxxCrXaif8A3/VSYfUs0agrz/Z8cXWrzaHk5zND5/9YxJrbwQxW17qPP+mrjtQvxqc0fWZ9Ku9u9NhGIZhGIZhGIa7gBG5wzAM9wMjcodhGIaXgCBBYjz//PMXIhLZEfmBxHjnO995+RYucsQZ/QgTpMcpb3vb2y6/Ujky5sRJ4pygRzqXSyQh0saZWMgie+nJD0GYHh35IZnsRf4ga8oF4WOfHfJGTUCPDZ/gTD2RseyN8jKexBTS5xvf+Mblb5zaR05l1/wkurKNLBLTmfzlB+zoJNmA+VlH+ydu24P6fOK2NYnA0r+Qbv2wlrs+yqsztuqx5+1c+zglmLs/4JMt0SdnfGRj3x3RP3V7s/XbXja3CTTKmzQP57530pxdcYk7QUQb5SE3ut6jczD2WXv22WcvevZ6L94XG/0kZ67q0QPCRp3F8F6CdXcCxfSe7WVnn6gJ6emzTw+uY+knApd9RG62vdXeLl1z9flGq1hiuy93yYZ/wg74td9n8qwb1OQ9nX+bur3q4VsP9RRJa44oLzfSu0TQepfZiW30DV1zdvqhBnGGYRiGYRiGYRjuCkbkDsMw3A+MyB2GYRhuBZIDoYHo8O3UiEhEDCIEqYF8tKZrTN71rnddSBYEz23kx7l3kjTXoCc2Qucke8rN2hkgnq712CKT5IG4kRtCKtiLLHZGj705YiciyR4/kVj5P+MhqpByyCN6fFcnv4ih6/yM/SpX+ydhZnTODzt71vnMBzuEkzrcT/sP6rvzdG7D9VlrMeRZT6Ax0q0+k7NOcdlHrJ8op2vooX3n1e4N8u3+6891/xr1wtxbPXMxXsurwdlPtZy9LD/krV7IrztMQJ7yAm+Tfu9Mvc4aeyviqIE+3+bemPsWx3ukl27vL+gXm3OPb6KOekn4QJA+8cQTF//V1dsl7oCI1T9mSPhKzxwBqkY5icWfGOmxKS6pn/acq9c+O+hc7Hpkzrc69ZdYO0NK2xefnZ9lzujop/HRRx+9xKOTyI+4Gzl4Yz7bwzAMwzAMwzAMdwkjcodhGO4HRuQOwzAMtwJBgsxAAiEzEDLIHURHZJp55BGCBMGDwEUGIUec3QakysshwgYiXiLGInLsywGQVOWbHrTHBiFU3pFG+eG/PWjujC91GU/yCfizh8QlemWN4GJPT26ET2fyO4mks7ZTzhwJ3+CsPI3Nq+M8P8HH94ozhv6JIefi8y22uf1yMSdg1Ltw5nNbvtk648+oJ94a34BoO+88odfI7jqX4l2Pr9Sjzo1sCJ/W/mGB3MCe2GcexVYLgpO+/N3rmZs3gvBmZ6/c4MyvM3u9L+vzjjqP5Mxne2I1OgP27kluxFuWj8+0ee/5fOPsSTmI72cFe3HFS0f87H2uzM9aibk9Ir/2ztE+EdMoplE8534jAN/2ej96bXRP/KiJyNOenOkggM393COR7vwMwzAMwzAMwzDcFYzIHYZhuB8YkTsMwzC8LJAwSI7+dm6kB8IkkoQgVHw78OW+iQuIlMYH6YDzJBIKMRPEiLyBSBs+2dDP1r65HNmkwwdE/kD7/KnLHAFlzZ7wzQeSB+GDxNUT+xFUkVQJRDohi7IVo9qMhJ645UdH3iBuNp3LmVTHaftyqOb6caK1MYH6x7887ZuLLS9r+9b5TS87MD+R/xN06ektH0a9k4N5+/UtYVd+Ru+2PXESuC3umZvz1o3Zy6P35S5vy6UcCLh3RK466he9iNB8WBejuwc5OO89WvOdjjNztmdceZ7rPiutwZye2L6Bimz2mU4icsVF9uavGvnTB6I+Mc733ecoX32r1nk9TYCtfKrlPO99sRWHVDe/Pn9s7NMl+QPktDwirPu5RodtBK7RmV4UcxiGYRiGYRiG4S5gRO4wDMP9wIjcYRiG4VUDoeHXjJKT+ECmnKQMPIjwsH/bGQLmNtiP3IrMCeaIGqQRYoYeMsde5BCUZ98ipA8RUAT5BObyUwuwY4PcoU/4j9x57rnnLmv6/MvD6NfRRs5mx6/YxFxM+bKN+C1veYRyFEctCKWIqmo0snUO4tmr1+bpnmjvQXdy2pXHdf/AeBJhbKozvXLLX7iOfZ6rFZlmD7FGVwy++O3NJeWYD/rswX59ScKZQ/PTR2N2cojk6/7LQYwkfbbu2j+IcO9s5BPpSCJHexNsvDF16wObzugWzx6fcvKG+LA21hO56Jk54YuOt8o+v86efPLJy7umL45vrfLbG83eGsqrXxXNzp586PYNY/6Rp9ZsrYtJgrrZ+zkDzuSfjvNrEV+f1UPY8l2d3ZM+psOmXw/vLuWrXv1Wg73u2D14z3SGYRiGYRiGYRjuAkbkDsMw3A+MyB2GYRi+KyA/+puRiBtA7iBNkDUIEiQIcgXxglyBk3R5tYgEAwQKW3EgP87FQcA0p4u8QRYVExEU8UQnsknuxUHUqK8a1BkBZT+iDcn11FNPXc6RQHwgpxJxCH1x2UaoiYEYkqOz4puzLRdonrCP0OInEqxzKB7/+Q6n3qvBqd+cqEV/xKmn6VVTc/dQj+Qk7866GzjnUCwxkGm+9axWscXUK/fXu+Mvm1P4lMO5x08xTpy5nLDOLpGP+GLfJmc+oG6flwjB0x9E/LOzF5FIX/56lzgXg34+9EGe3hZ99nT5692y0U86cvArhM83al8ef/iHf3iZy5EPe/TOXqvFHjvzZ5999vJ5EIeN3lQXYSvfSNR8lX/9QKKq2ZtxXjxnRoiQVSfhSx7iIIr1gbCVS7pI2x/5kR+5ENP2/M1cfuWvH94V+75hXy+7O3P+hmEYhmEYhmEY7gJG5A7DMNwPjMgdhmEYvicgShAcyJC+1RYhFKmEcImkidC6xrn/cvP8iSlepFdAsiBusqNvj549uoinYA8pg7BB4GQnd3rWclcPX/zQR1j51biROpGKBEFlHTlWXHnJHUlVfwifdI1i6WHfJKyWU85ektNntXVWv/g699IB6+Yvh+zP+Oby5j/Yjzhz1l5knHhy1fNys5dcx0BkItDUaJ/+KXoerm0Te+5FvNZBzMZzDnThzIuoT0721SWPc1RnMcrBP3p4+umnL3cL+gPuDKnozQA7Pvj2xiMYy0lsZCWdoDfeIvIx8jVSkj+27CKEndHpHysAf+5L7vS88d6VPbWF7o/47MtRPdXP9/mu+wauvPuc0SN81y/7Ebj20knqqXtUj1EuRGy/0r0+OuPH+6GrB+zf+973XnTFyoczfWOP5M2O2Cd6MSJ3GIZhGIZhGIa7hhG5wzAM9wMjcodhGIbvGUgNhA6CA+FhHuESORMZ9N2AXWMC4kXeIG347SzCqrjWSBlkTWRRhBOkw8Z+xBKwsb72rUaEjvjO+ESInb8uNkLsrJ8dkooPfunIo/HUFwsiy+QS+XQtQL/8+DvPO6tP2YR0zvm5voa98gS64tbTEJFWXHr6oPb6bT/yOZz+9SwSs/qNCV+EHp2zh6cfozNSvkY5nrB3onU+SHfJtrvhj453cOZg350jZJGs1ySlt6J+8+6YrTkSEZkqhjzcK//G6uZHDD3qTRbbvB6lF4krB3DOH/8nwWv/G9/4xiUHcM6+WvlO+LLHrprZn58JOUTqOusejETO1vpaDdWY8A1yKA8je31kjxCvf+VWXdZ05cGXvjqja7SOIEfaknpldHdIeLpiD8MwDMMwDMMw3AWMyB2GYbgfGJE7DMMwfN9AlCA5kD/miJWIm+ZGiJS5DedZ82t9fkjEWaCHeHFmTpAuSBp5ELlBBFK+6YB9ewgeI5302EbyiEEX8SMPchuJqydIIH2RmxzqS8JHcQh7eYsnj2JZ81mOCRtwfvpxBhFPzs3FO5Hebbg+K2Zz4LMcT+hpsSPXuhujHBFr7Z05s42g1S+if/WuntE//dqr/6c/Y32TC1/lfcK6Pbr5lA87OSH07It76smJmNN15whcJCs78eSght6M+vnvjM9i+LYrX+n3DxfEMMpTHWyQxfzojX025WSMzKTfPv2Izc7kDX61srcbzj7Jtd51F0Yx+ZSnfPuGujP7Z+76aexdGOVoL5+J/MQ7dY10xSLOjXJynlQTXfmYi3OKOuUulnP35jPb+3XuDt0l/bMXwzAMwzAMwzAMrydG5A7DMNwPjMgdhmEYXhMgQZBQCA9kR8QSggVRYi8SxBmC5pVw6pgnCBa+ETTp5P8kb9IVP9IMMROhZJ3YtxcJxo91vvlB8EQw8hGRRM58xCd+BTOSzR6/dIzJmUN1sIvEal9sxJJ5MU5JD9GU3bVPeSPAOn8lsFVnMZJwrvmulvaM9sWGcugekrMekGPEZH2gU+5nz8Ae38RZ9Z0+G51BvVRfuuYnsj/vnehxb6o7LYf8IwNfeOGFl/Q7QyZGcPa5KGc63mD/WCCb85ut9iJfvU/+5XLeqzPfTi1/Z/yqL3uxO2fTHdBFWvarw3vn8jTSS+hHRsvP39u1ptc7Nzrnt1h86o8zMdRgz7peFotN9ck9sa8v1QH06Tqnn8hDP3zjVs/8fMqPPNjJvbX70v982UOqu0/+rt/JMAzDMAzDMAzD64URucMwDPcDI3KHYRiG1wxIDmRJf2MTcYNIQYogXcA8kiYSBuy3bow0OfcJogfBYo78Sc+IbEHUEWRPOsRcbGRN+8b2I5JA7ogcRJNYBKFHBzmlLuP5rUOiDqTPc8899xKRlO45ikO/WMVDop3EcELfuRrZh/KvBv3n66ytM7khz+QcnAW+z7V5e9fzzgnf3be4IFdn+ucuyl9s+ub2remxdS+IRLrVTNSLjJN78YqNmNNne3xcj4Eu2BeTICD1Wl3ypF8uRI59M1ZueisXok4iv+LVX/fvM8BObvSKFwFZjcUyj5w1RuB6L/T7W7r1EsSSY/mIV/5s+XTOX2+eLZ/O5ds3TflAdvIjhrwJRNKyo+PvybKnK09n5nIUM8hDTG/DSAfk7azPvJyckfriTuQuB3rmrd/ylrdc8gV6UAyi3vTpydu+ukj+4K1vfeslF59v34TmRx56LU97PvdPPvnkS7GGYRiGYRiGYRjuAkbkDsMw3A+MyB2GYRh+IIgEQRJFGiFpCDImwgfBco2TMDFPp3kS4YSscZaIRcSNMCo+u/bo2gP79uSN1EFO2ZM/cg3xI2exIocIe/6IfNRMr3gIoVPf3Dn96ohEQkCVBx2EEj0oBkT48U+PDRjpnLUV4xQ1nr7tneOJbJrze+5Be92HnCLdqkUffuRHfuQS97RnUx1sWif2q1Hv+COA9K3OdI2n/3MO5xzY6wM7d10sPY6c1i8x5d79m9N1nrhDOfXunbvzkzRmW45s1Nyv7hXHurfmnB178eXDh7sXQ3/zQwepKl73IQfCli4SVnz6/FcH3T6v5ohSsRCY5U5f3r1j/sThwyhO/vg/e8u3ee+2+Plg0/1lJ+d60bzc6J56ncvB2Fsz0u1zCeZqk+tDDz100a/3iT7KzedSvkbfEqcr5jAMwzAMwzAMw13AiNxhGIb7gRG5wzAMww8EEStIE+RIRI0RiYJ8QeggZ6ARTrLkNuIk3QgcBBEyiO7pD5EDJ+EjvjN7ScQQWDcifSJXT+Kr0V410I1IMrdHj0TgZuMsQRLRlxuUH+ELocQWrvtVjmz5Ps+BPf/snXXOjk17YP+0PdH+g87BGZ/1gV/xiZrdhXn1nTbyoe+Mrj1jUt5INfdBvCl6bLJrTD95JdDJrhq8WwLy47v7PO+eQGR87726y48dUtSeeOzcLbKUsBPbPv/65d2Y56N66OuFvKwjidnaM7evT2qRS/ty4FMsORuLQYeIqxZx2LKLJK4m8yCuPbp8GcU976l7g+IZ+QNroEv4IM2dI6KLW66nnn7KhY45/3TkQpx5m+rxrWJjRK3c2IAe0dMDZ2qgZ0+cYRiGYRiGYRiGu4ARucMwDPcDI3KHYRiGHxgQLEiQvg2HXEKuIFrMT+kc8QKN4bY1EQNJA4ia09fpmx6yhk1kWvbmJ8nEDxJL7pE7dBA/CDmkEime+EgeI38k0i/iz0if8EmfnhjlxKex/CKxzNmUS3qds+c3OwLO5GR9+s0Wyimb6/Ea5/455++8DzWbi+W+5Vz/2dkHc3pAR5/AfrmBfukV30QvxCDs6kn67MuvEcQ/UXy+5ScO8g5xZ5+/8x6JOMXirzfOLrLPeTblZrQWU/5i9M6AjjfWt3DNi2OE6he3uunqMZ320w/W9unVY37keurLDyKX6dBH5MqHbj0xB/WY17NITzWqzVl3IE59aZQzu97zKcWRtzoDf+zYZJeeGOKK35m7JfZ8WzeytndF+FQnv+7SnVaLUT/4GoZhGIZhGIZhuAsYkTsMw3A/MCJ3GIZh+IECKYIkQYqcpBHCBHFCEEMRPeFc83E9RuA0R7QgcfgKiJt800PCpBdxBfTsR5ghexBZxbAGBBtiiU5+ndPlI8IosqwRWcUmff1gwy8/enJK/pPyQyipI/KLHgG+2PIvTrZ0kXGQ7875yI/c7YHzcwR6536SD/bEnB91ywn5FwFHXw1QjYEdVBNfwEb9frUtfbVYqyWSL5sz//Il517oTL+Qdv42sb+HG7kH7i/RxzMWX2zdi1+NjOTrDThXs/eSfTb80PF58AbEYmc/GyOb6pMnG7rdpbN6y3fvjC5/2drLl/4k9ssJ3BXxd27FqabeU/HY2ZOPvWzlZWTjfdPpPqotW+PZS8KGz3xn7+/z6gmb0N0F+mL7mUKXrbr6mQPykxs/6rCv//b7bKlHPHreW8R8P7/sizUMwzAMwzAMw3AXMCJ3GIbhfmBE7jAMw/BDAUIEWYYMiagiyJpIm5OcQcREMp2gG9GTtOaXn2ztIWHsIYusETeRX0FcxJJzpA4CxznShyB4CLLo/FYifTWxR545TzfyjH21EeTR6d+Y8EvoBfPOqk0v5cm/M/ugttN3Mc0jGeXVPgH+/J1QtbE9z27DeS6f+m8kEKEmttzBGsQrlxPOnSHN5CsXPdUzPviiIzZba1LOcD2C+YPWfCNw9ZNP8fXTnERe1n8iHqLz+eefv4g5P3yy8b7ON06fsFX3s88+e/PCCy9cYrJRqzO/OpgUMxuj+O5bHPrOkY7FAG+dIGPFMecfwdm77HPhnC9n/Pc+nD/zzDOXdyS/7kI92Xtn9ujwQaf+8S3/3lK5yJ8tO2fqkps9I/9+Njjnw/lb3/rWywh0siHtJd6J+OKwL8fsnbvbd73rXZe53OXl7q29V/XpGTtELh/p+pwb+R6GYRiGYRiGYbgLGJE7DMNwPzAidxiGYfihAQmCrEF+neQUsgVpAidRE1ojfQgC5hT2b3nLWy7kVnpiJXwbxeIL4XMSRI325ETKj8/EGpBWEV2ILL7ZV8+pz6czEjlUHDGK0/rM53pkl9AVt/l5ridyQ1whyNqnE1Ea6ZVv+UZinvt6GawTuO5z9wGIMb4iwyIBndOVsxzyFYpvVANijW49RQLmy7q6zrzO+W1wxh4BS+Qohlh8yisy0vq8m+z00BvobdGNwDXn47SVJ1vvRZ/ZWTunH5FIrMWqNrrsjPbyy17OYhaDPv+9Q3twvnl2xJ67YWvOp/t5+umnXyK31cYXcjOhU/z8AR+IYXFJb696iiPnciUhG/qEDt90Tv1TTrC3pyYiVzXom3z9XVw6znqrziOzwV7kLl966b7dNXE+DMMwDMMwDMNwFzAidxiG4X5gRO4wDMPwQwWCBHmC1AQECnIFWRSxBCdxg8yJLCTIlOYImr7FCHTJqcPnSSiB0d45R/ggmbJlx38kl1zpInfUUK7OIuGINaGb8M9Gbp0n/JJqDq2v/RD61YqwkrvYzqCz4pV7sK8OdbI7/YE7geKGax/1OD+IuHoA9ZHYb0+87ixUG5Fb/e19nCP/ztMvzwflCp27X8Qogk7v2PNL5OW+SXHs0cmub6DKEdipm/BfbdkZ6fpWp3fPT3HYIHHrm32gXw96j87OHliL50wcuRrD2T/wtunT657pk+6J7+eee+7yDW1vqphGUj3dHX/O5e/M2P75uSlO8cVLL6RD/9w3T8518yB/orbmkbj+Li6/etA+Ytb8bW9720XH3Htwx8CPe9aLyNzyHoZhGIZhGIZheL0xIncYhuF+YETuMAzD8EMHMgQZhlRBliCCIrUA4UIim4xsIrSszZEvSDByrZeukS/+kVRgH+gjdPqWIeLMurj0xCByRDJFmjqz5jPfJ8EWaRXRxAZRZD+fJF0S0iXlkK90WzuXU75Pf0Rd9Ox3ZrTPTq3WYjizrn55OGMP1yPQlYPa3R+Cj6/2jWLJzznb/BvP2vLbPchHP+nUW/OkGED39JOv6z36CLmISnbdoXfU/dWT6lcDO0Qscq/a6ETE8kHPfvn2XiKOnQM7+sSvH06/O6hnkbnln029kJ9zNmztGeVXrGqQBzhPx1hf6LPv10XLvbzca7GrtTNz5+bFyi8dflqb05UvKR9nD4KzU9InwI/81UfUYV197se3bsX2GwH83NFXegheZ3TtuVv77NL3swq53c+rYRiGYRiGYRiGu4ARucMwDPcDI3KHYRiG1w0IEuQJcgQBdJJFiJTImGtxBr6J+yAS9xRETXqAAKKL9EHOIG+QNhA5RpBQAUklBoILcUYvMu0ktawjm+gTOajVHr90zjgRUpBuOVnzHXGVQHN+jWrJr72Ej3Tlc+YB4sg9fTp6Iwf77MBZ85BP/T37Za/7MNJzv2Bu390Xl0769crZKdVWDuYnOWmfmIfWRv6RsX27kj9+5d399Y8Jzj6ZI/SQsXIm7txZNr0BEIeNdQQue7XpgVzELr53TJc/MRN6kZP8EbHosqUD7LxJ/s+zcu+98hfpa13P0wV3juR2B/yJF1Etd3PEM3ufK3vFo8O3mvhjb+TTvnW56Ff3Xa0nrJPyBP7ah/wSOn2m0ytH5965c3chD31/97vf/RKx288CdnLszr0Vv26avXjDMAzDMAzDMAx3ASNyh2EY7gdG5A7DMAyvKxAlCBPEDsIFqYNEiZBCooRIpx/90R+9EDQIIzjJnNuELwKRQATZI0akj7G9yKjOED99IxMZJJfiyiViL1KKqAshhgDK7zmmzw99vl544YVLnuk5F7t86ME5Js6RUWzqTWd8I6nUIF86p7ArRnviy90dsUfUnTjj8p0NqEmP5E7A/YZidPfyqld8EPkk5RXk45ujYsghf+V0ChjVgaQU6/Td+6i/5sb67s4jNuVVL8Rjo67s1cKGbTGRhNZ6wh4ZGjlKX83n++FfnkZrZ/wb80FOsKXvHvkWm7AHOcglWz2o951bG/2NWG+cT3H71mqkKHskKP9qtTYXP3/W5vzxVR/oO2NjvxrtXUt+2LoDPZRD6I3xcQrI29/ELY56wVweyOiHH374Jb9qPwlnOdkT2+gbud3hMAzDMAzDMAzDXcCI3GEYhvuBEbnDMAzD6w7kim8tIk8QPcEccYTwilzpb13Cg4icU+izP4mhc+STIHwi0k6xh2iLyGOHJEqQVfQQaIge+dJBALFzxvcp+Yby0AMx1CTmKWzUWQwx2T1I6JJIs1PERVjJnd/zTH3OxTz37RU/MtY+RGzRUS8ffIeIMfYRyKdfOThXl72z5nPMhugVAu7sFV+n72uJJO7NVL9Rn05bxF1/F1Ve7pJd7xEJCO7BXjXzIRf9RTKXo3j8eod6cJK41UdPf9mqjy++Cd/lSTeYlzM08pFue/zpQcSknOQhd3r07T311FMX0lOvrJ2pGXErX3AmH/nyK3cx7bNVdz7EO3OoFn75M+pxuSbyQiiLIQ+29unK1bze2qNPnMlVX4nzyNvq7h+CyJEfOTpnz6e7oUvoVIuRzjAMwzAMwzAMw13AiNxhGIb7gRG5wzAMw+sO5AoiJUIFEDsQIXWSQBCJg1hpTOgiqRBFSCjrE8iba6GTFI/IJzLJfjmcQo8PoIc4Qi7Zlz9CqTrynb4xAk/u6Zxy6qubbzb8OgPnxJpOwv6MReqdOR/tw7kPnbGBSPH0z/PzDtQL2dGJ5D6l/I1EvmKTaifpnr26TS/dRHzvChFnLa9yc3fsswG+EZFGtmc+hE0EbvHsy8e9I/8QpuzzyYfavclIXPbsyr18E2flmu6Zp3sC++1l60zu9M3lg5hGjEZOVp+85CB3xLXa1eKMrb45j4TvfRB5Gb1HenwYW9dzPROnz4qxvhrL2eicXeRpe/QIyK39M8/i8NueWpqrmY8/82f+zEW3nznyBWfu1llkNB0SCc7fMAzDMAzDMAzDXcCI3GEYhvuBEbnDMAzDnQCyBVGCaIpYifQB5MxJWiFwkCrXgoyJNGObfrheg71TxDUiphA4yMNz/ySMxItoUoP8ET8nAUfM6RQjEYN/NTtP+EzSba5OxNSpm04C8jEXw0i3UTw9vI5hj4A65O4cOs8PCWKdd4KsPJH+mS/9elUc50l6jZFxxnp/6qaXyCniNz2x/l/2/uX3vvQ+67y3njG4qlxHp1yOEx/iJCYmJGkggjhK1KiBh4AEDBBCTMATlAESUwRMEAMkmDCxkJggZQbqHoDUdBNa3QOEEkEISUdxfMSusyuBv+DhtavelZv9/MqJTVX9Dt/rkj6673Xfn/Nav6q91vVdezt/6ScgF2Rm55secS3l07HYxmLIB+lHOv98VpNrBRGqJ+ZnvafwBXyXa9dNOmKC3tG3Jp71dIhz4By7fr0d/MYbb1z7YI24JtjLB9LLF/t05CB3fqG61aB2Nkbo2qFTDWL07yV/9s6c9dwoN/4IfXqkeTrFsWZuTQxEbMd8ykst4pr3e8R0HPtvDpQvYRuJa64PSHA94msYhmEYhmEYhuFBwIjcYRiGu4ERucMwDMMDBWQTMgx5gvCJMCMnEDMEsZLQ6atrT/3sk3utnevAn7fyepsT8WPPupwiprK1j/iRu7zo9ObmSUKln6/IoUik5FY3sW4/Uiw98W9tIFLKMZt0jPVQjq0DX/qvlmwIIMdu86NP8mfNOWg9dE7BevWf5ziSL//pQb2qn8mZX37k4Xyo/VbfsXPDF10w99aq689a569zGPKVHf1IUvFcK/bZgT4QRGi+si9nuapTb83pikOqjT6px/XWXI3QeST21PPqq69ex4jS/KZLT87OtevdObIfkWmuF/UOXFP6ZZ+wsSZv+QAbpCkb83rIztqZZ9eoWPXRnJ6xOX0QQ0zgO5RfPunwZ569r1oHe3JJOp/WXWf5IPpzftX2MAzDMAzDMAzDg4ARucMwDHcDI3KHYRiGBw6IHGQKYgkBgwRCtCBXwIh8QehExCBYHn/88bcJtIiZCLDkXmu3wq838JC4Yp2kHgKXDpxzRBSyDOkk39MugumMIWcEIERUJafeOwlSyigO8kns8iHQnH9klJhnzieRFiF2a6sH9Ow7No/s5Dc47lzAvch0tvbZRxSqV26O5YIMzM5Ix7kwOhYzqVf8pg+O+ROHTrqJfdJ50rtIe/udt0Z6+saGbtfjN7/5zctrr712XVNX1yOwvSVx6xfbcu1acw7BujV21RXs9fXIYjpv9Pg9zy8gHeWWbzmrzR86+I1f+fBvjT+iF9boV48Y5uKBHBw7H9bqKaiTPjs5yQWZC/VbX5wX+3zxI1c+6NS/zlmjPVJ/zhjyti4ffbBe3mqxT++pp5661mndftecNb7l5ljPHLOVH590/Hs1H4ZhGIZhGIZheBAwIncYhuFuYETuMAzD8EACUYOYQZ4gXVqLmInMQcogcZ544om3yaFwOz8lmCNtAFmDnPMVswibyB2kV8QeKQ6dCEl50EdW2U9H7vbZA30ktTrsR4TdklWnwHl8Lx1SPuaRjacAHTV03ChPeTk+beEkISPNIspaT98+icCD9pKz3uZnT8tPLYSe/fSa169iQMf8uD7OOMZygP5YwOha4/de5zq7fCBIkY+IX+dRnK5JNhG4BIGon/loVFdvezuuruLx1bnUa+TiK6+88vYbpqT6uuaAzUsvvXTVN6cHdPw2LLKVf0DqshMjcU5BXZ1fgvxVGz9qErffmtUvvug77/T1A+jLA9hYjzjVc/82rLMxdt6qid4pcjTmTy7Qdccf/2zlQ9T85JNPXvX0Qz79m6Vrn2Rjjy/HoOfy8nazvgzDMAzDMAzDMDwIGJE7DMNwNzAidxiGYXgggUhBqiB7EG0Il0ivCB9ECwIm8s3eLSKobsH/ucc/cs4YkUROYs+x2BFK9OQmD+vEPHEMJ8kU8cdHNuQkqqDcWruVUycpTrXJob382NPXYrZvXV6ILnP1Ah3r1viD9OnylW6+2Dgv5uf6uV/8swfp861XEWr0zp5mS/IJ59he9h3LGYHqXLu2nG/nxP7tuRYvO7mwc74Je4Sg3sjHyC4CV/35OeMTxCB/bBxXW/WlC/KXJ2JWXDaETn3nq3MhJ4SjXB3ziewsp/69VJt/R2eP5asf5VdOkdLskbZE/PpFp3yy4ZeYgxzzK4bY4tZreo10zt6R+pfYF784bNRc3Ub78s5GDmK35w9Ayt+ec8MPXWtyjLA3yp/tMAzDMAzDMAzD/caI3GEYhruBEbnDMAzDAw2kCrINiUJ6cxLJchJJETXhXvNzDRwjgJA3iDkjkgZpFOGVf6SRmBFK8kLqEHvWyqn8rEVC0UcUgeN08klCeZYfEil/597t8b38iCmW9fStpZv+uQe96WhNTyKv+ApqOonAYjS359h+BNiZi/Gsv/WzV/WInH0l0LxjMM8vlB+friVv4TrX2dGNFDzPX/noPxIVmUrYIkzVwy7SNqLTmJ/qUlMEomsY2NIpXlJc+YrTW8OOrQMbcehbzz89+npv78wpMpZtdculcysfb9Sqz/nSNz2xHkF9ypk7f+ZsHIMcugbkViz+6n89YGPUN3N+wDz/9eVeo/r5JtbkzV95iq9P/BqtecsYIQ3s+vfMl7yM+h+BK3/HfA3DMAzDMAzDMNxvjMgdhmG4GxiROwzDMDwUQJ4g0bxtiIxBwCBskC1ImWDtRMfGpOPIL1/fjKCJPLLeV8QilkikEcKHHjs2xWZDB4EEdB3TtWd0HDGV5Dc5bY3skI/i3Orym2+wn9hHWCHM2EaGEfsRXulCI93beOJEDrbWOqJL3fYC//pDxzxyEUGW7Znr7Vq9KZezBjhtbqX97BFw9RHRKQ/5EkQfcpN+NRQb5N7XKJMIVf4QpK4T4s1OtSX54Jeu81APyk1NEb71+zbub/3Wb13t1JC/004cY+eBvvzoyqPrl/g3Y40u/2qPYM8vcjNCsxr1hS39SGu6fNEh4smRH/4jPol/K+oXp1zo8JGc/WrOx7l3SnG6JuXAv1Fe8pRvOeqJ/vDHTk1qdU04lufZC/U61ns6auj8Wx+GYRiGYRiGYbjfGJE7DMNwNzAidxiGYXiogJDxG6BIQgQOwgUxhCiCSJ53gr322fhNXOQMgofvyCY6SCJzpE82iDKx6YmPKDpHNnQQSXwifXqL05we4vAkpE6B5u3TZ2suhhE5xS9yiW+5I66yJezlEAko9rkf8UUHzOlZZ6PuQN8+UivCs3Vije25Jp689KRcHKslMs16+ueciJHc7p1x7iVBLc6vPhHH4p5C37pzLU6x5OhcspMzUQvilq5rz9wYcZhd54m4XhCCXTeknrDVF3qBvXOr184DXb3PJxvXBD1+jWrsrfKuMznybY5oZuvYOl/loAf8WQf6xbTORn3m9jofIJZ65PCNb3zj7T+KQHzakw97+sUWT9/kI8bp08hOLfbPvaResXU9iVHufD/22GNXPX6Avhzpy8EoX6IuseSrZv6effbZa0x+I3L5cB3QdU6GYRiGYRiGYRjuN0bkDsMw3A2MyB2GYRgeOiBikCsE8RLZg8RB2FhD5KQbIoAA2YSk4yOCqTcO82WNPeImwtSxfTaJ+MYIJ8cRTMghcbMBBBOfYtlL4PY48optPon8EVH8Flft6imH0wddaL2+RCqe+u3Lsbpu182zIfwY6bbW8Zlj/uwhzxBlel2P5eKYLqHXWA7JeXyiNf6dYySnvkBEYtKxWuiXpzwigOXmWO3yR0LSR2xGgFqvPmPnxrnio+u0mNnQs1bOdF988cW3SfrOG58IStcoO1Jsdq4p+QOil748vXVKT+7iWJcLfaPzJkZxPvCBD1x9dn2ooVzlb7RGIo+JPqu1HNhXm+u83tRrc375kYM1OnzoN3vX+C15TEIxiHroAB/6ZN86/+JVt3Wx+dXn/LIvD7Hpdl1CtVobhmEYhmEYhmG43xiROwzDcDfgyedvP+EehmEYhocICCJvz3384x+/fOxjH7s899xzl8cff/xKXiGxEDiRUBFQkTS9IRnZRM9ID9hGGiF+kEAIHaQZHXtG5Bq/EXHAJ3s2EU3tQWvsEVUIIwJiRSbDaYeMpCc2PbDvmJiLaYxAo1c+gKgi5YegJOyRX8b07TuWo1xbQ2RFIlpL6IrH31mzOSAYgzW+CN9szNUtd/4i2/hMbnH255zrE6KxNyqBP3mTk1AUUzxvk1oXBxnITp7IyXpRTkZ27LtuHNN1XUVGErnIzXVZTL7Kly/+yxlZyI+e0M+m/onHhg86rkt27OWgPnvO//n2rjVx2HTOnHM2YF8s/vl6/fXXr2+sO/bvqbhykZ+4eqRWfvmrD0a6cuh6Vi/hp5r40Wt55lN+9vgT15wNHfZ6Wv71revRnjWEtHW58QdyUCM7I0FAG+mWv171pv5J3v7Gb/zG5Utf+tKV/B2GYRiGYRiGYbjf+NznPnf5/Oc//9bRMNxN/KN/9I8uL7zwwltH/z1+6Zd+6fI3/+bffOtoGB5e7I3cYRiG4aEFwibCzRwQRAidSDKkjHlEEkIGMcMO6UP3FIQRQe6AkR2SB9nDBvLPRkz+0zHSI/To2I9IapSzEYElL3bytW9kj1jiI1KqNTHzdYo1ftggt4wJ8Aut8Vfe6uYj8IUEo1Os0x9J3xz4YqM2Il4SecdXwp4NlL94nc/0oJjvNGenR2IhBBGq/Min80rOc8a3+OzY8EHfOWGbLjFHShr5CdXEzrWIAOSLAJ90Oh/GM758XZMIQrZiV484vSnOxjpfbM27/s/rGeFJqpW+GPTVal5ecqkv9Kzlm19krhwjfO3zoTY9c92o21p6xeU7/9XMLxj1V97OU/HUXg7s+HQM5nyEfNlH3OafZKNWceSnJ/UdrJWHsfXyci7oyEON/ijAOZLvMAzDMAzDMAzD/cbeyB2Gy+VP/sk/eb2P/7t/9+9e/uE//IeXn/u5n7uKZwif/exnL5/+9KcvP//zP/+W9jA8nBiROwzDMDzUQLQgVpAsgHRByCBwvAEZSRNhExHjGNmUIHgieYj9SKWInfROOwRPBBJiC/ljLbGOxIrsI9aIOPIXw7G9ckeQGfk2Rirlg78IvdagufViFo+IxXd2rfNd/fkCe+qQgzpac8wX3TOv4ubnPNZP89Oe3Oon9cbcWK6h49bk2duTzpcYzhG/xghGdVpjxy/9yH02Zw70kZNqdwxIVXauCyN/gNxERvInF/mzEZeIZ61r1LF4BGHqGmbLbzlGyp75B/v0u1b4lKe3xI309VZMvWdbr8UWozFde/ypw9uqhG/7RnXZJ3zyDez0Sa7+3ekR36dfYs4O/Dt0XefLaL/cSXVDNXTuiDmpXyBP55KvYC4n/bJPxOs82qPDTl7W+++Et4LlydZ/P/TG/jAMwzAMwzAMw/3EiNxheJPI9Uzg3/7bf3t55ZVX3lq9XH75l3/5SuJ++MMfvnz5y1/+7/aG4WHDiNxhGIbhoQdSBckS6YSUQSb1dqA1pAyiDSGDUPIhDymUOLaOuKHvq1UJYgfSubVB/iCz6CGakEnWCB3H/CGnIp9OsR/JRRznA+SNOAL71tM79fMHjiOq1N866FV71tVg5Et/zOWdTWP1OE744gcBGRFmvfz5PHM0R5qpKdE3tvWf5N+8GHpQP9svltH5R8aKyy+oXd+9rZk/+6cPupG/EYckUlJ8uXUd6RGf8rZG2EYAyjWClz0/6jvJSTpyoOfrnJG47PlnZ08O7M6e81UNCFWkomuPlHM6fFsTLxtrYkcyu171xj7QEUc/Irah8+QcsIEzrn9ryGP21akOcRyf/VZjvohjdnrIF/2EH3vsE/9+z2sNjPXUeuDfsfy6BkA96rCnzqeffvqarxzU5HqWy+mv3wAm9OrDMAzDMAzDMAzD/cKI3GF4ZyIXfuqnfur6M2y/+qu/evnKV77y1uowPHwYkTsMwzA8MkDCIFoQVoggH+SQSQgd6wgrb9chdBA0yJoIRKSNdQQNggyZY83+KeyIvWzYI5LEj0g7dR0jihBIcjEig9jIi5+Io3wS8+zNjfkkp+47ifpP3RCpJ2eEGdCjHwkI+ZG/dXW1Rvi1Zk9NxDwCsbiN1s51c76r7YyZ0CWd33LOh34i5JCsYluXPx0iv2osFh12iEHnnM5pY94xG/r1APkoB/0TF8nn2J5RTshGRG6kZNdD9fAl9quvvnq9JqqVnmuUbfbyYMu/+rtu5K0Wo3i9CVs8MQi/8nRu5c6GXrXxbU3ueuLaP/sJcq4v9cFx5DF/8qZH6qdc8y8+n0T+ILfOJx/lVc35I3T54bMY1gL/cmNr3Vwv6PbvQCyST3N62cvNmvrEIvrrXDlPrm+569MwDMMwDMMwDMP9xIjcYfjWRO6f+3N/7jr+63/9r/dG7vBQ47e/O3EYhmEYHnIgqZBQ3/jGNy5f+tKXrvLiiy9eyRl7kViIp8ieCCJA0kRgWaNDIqWaR/AggCKmIryQQYifiCaCQAI2kVr8Fz9yqb1TrIl7klvtRUYl+SPpWEN8nYScdXmBnpxr/MsFUXX6M6erR+rOpvzKy7p9PUB8iZsP+8VRD5LNaE9MBBlJ99YmP+UBEY/lS85zJS82RnmJw54+ck5+9uSRVA87PtIneime2rpW+HTeCRtkZOTmvUhcvWHLDxs52BMbGYqQNfJRj/JRPtmZt893flpzTFfueqsH/AB7vYXISm+hy01/+Qb6Zx3mTzzxxH9HNCf1l1+6YuS3rzWPDJVX+dzW2Xk/xboY5vIl5169FYMf/jo30LVlr2uUnWM+7BF2EdTWnXMoPzrDMAzDMAzDMAzDMDy4+Nmf/dnLCy+8cPn6179++Y//8T++tToMDyf2Ru4wDMPwSAFBg0gyRtogiXrTD3FjbkRyAT1kD1IOQRc5dgoC6SR8EEQIKaQRfWsnwRNZFFElVsRZpFMk0m0e9PiHiKOk+GzeSaA53Qi/fJz7clBbdu2zSb81kJccIsjyLU++7PGlbntgTpdetZNqyT+fxDHb7NmULxuoHm/DOgdiyEEtztdtPkg75xYpx9Y8IrDz2zk+45EIW3HYE3myF5cdG3O+bgnYsydsEJv88WONLaGf8FEu6mRfv/kwWhObLpt0jXTlqFZzkv6Zbzb25aVWhGe9t1dO8jEicBHN2fPXuRQTqlXPiH8rfMoH6BM+9SpCuJrb5/NWrPPTtcJGbcQakaNjMauFjbyIvXyJXZ/Sr976oidszPud3GEYhmEYhmEYhvuJvZE7DG++kevrk32N8l/4C3/hbfnYxz52+Vf/6l9d/t7f+3tvaQ7DwwtPbd/8PrlhGIZheESAnPFW40c/+tHrB7fnn3/+8uyzz16efPLJK1kUcYS8QTAhbBAzJBInHfMIJkQOsENOIXUgIiuhRxB15cIG6UiXT8fpIZYioOSDALNPD2FmfvqFxm8FdvlGSCGukNhGe9b5UVvz9PVBHggxxJo9NgTs8UkX+q3VSDIjXTr6YITHH3/8bX/ppUvM822uX4hKcfIf2Ubfb8wa+VRH4lid5U2v82udP/3t/HaOxbAvTrX5603Ea/lZ5zf/bJwn63y4bviLkKzfzq1rwKg+ubCna98oH/b6CWxBT9SAUAS+6TufxUpXHsWQrxzEAXP+2dGjw9Y58lu93hKWl71suwaz69+FfwNi3sa3Lk8+5WHsnMiDPeEf7PFhTOjms9yBjVyI3OWpV/TONdctqEUu+kfPv1mxxOfXHp9ydn00uubsO/Y28UsvvXT1/9WvfvXy67/+69e3/odhGIZhGIZhGO4nPve5z10+//nPv3U0DHcT/+gf/aPrPfw/+Af/YG/eDo8s9kbuMAzD8MgBMYO4QdognB577LG3iZlIVXOED9IGeYU0AuuIngg+I2IpMgxh1NuZjs35jsw6CSh2gCxKv9wQTMimiKp882MUjy4iTI7y4DfdW2ED+YNGsK5euSDjQjG8iYncqs5ErmyrKf/VSd+aetSot/mnl225y0E8epFpp276+a1feqA31hGD3oq07pi+Xts35oeIhwB1ngI7/UzyS7985IjA8xsq6orc7NoB+atVTNdAZDZ0zZSHaxEhrM+gLvvs6xnpPHdt0rHf10fzy9aauWubzRmPvZrU4HzTZaMGMeRtLifXoN6oE8lMvz7wY584Fodt56fc5WqtOvv3EYFPQI5nnf27kRv7U/jPp3m1lRtprk/lZQ3kYI0d2BNPLOugP+LTVQORq/Nova/QVmPnTU3mrr9hGIZhGIZhGIb7ib2ROwzf+jdyh+FRwYjcYRiG4ZFExBViCpnjrUKEFFIGkRR5GRmIoGGDwEl8EIwcijxCVLGLjGRr3ZwekihdxxFm/Ee4RVQhjpBIdK3TJ/ZOscePPbZiIZLkwK8ajWLTAT4bk3CSomDPnA9zMU4b8SIF9c66ONlVT3bIsNs+pJNY51dt6UFzwu4U/apm+8Wsp4SedaM8kHF67FjN8o9Adz7qQzkRJGR24vGj/3TUJZ4/CDAnCMLi8iePrg3nHkHaNcOP/YhM+bjuIjftlY84oO/OGXv5Wafb+T4JbXG7runKhy4Rg751x/zoKd9qVifhh538yFkj2M+Pc8H+rFNsefBdjsU38i22nCNPrTtWo1z4KB/116PiniM7+vLi13q+7Ftjz684fHY+5UyfbtcV4avYRm+S23fsulCz9WEYhmEYhmEYhvuFEbnDMCJ3uBsYkTsMwzA8skDeAPIGoYScQd4gZJAxEWD2IpZIZJw5Igjpg7gxIvgiye6lKyZSiE5kFLIqwol+NlCOdOiLIZd8EnPxwL786WYbHJdH+hFTzYHemZP1cs9n+WUT7FcXqKWYjUBPnvSqK1IvSc9+PuXFZzkZk9AekWdSfMKfc0aAz64Dc/mUe3b8IuqcYyMgJIEuWz4Qwc0Je+cMzNmwV7/4vZ1abQjJSFJzvrI966gnxkjJaqgOuvWGf3GRqeZgj45Yp74c6zsb/q1VZ3mJZ61rslj8sFEbMa9nQK9eiW3OR76qMXEddP7hdh/oyMkx/7f7navOCf3W5M43exDLOaEjb8d81n819W9Nbd7Sjdyv3q6tYRiGYRiGYRiG+4ERucMwIne4G3jzydcwDMMwPKJAxiBdvHn36quvXkdfmesNQsQO4oZ4Y5cggSKJANGFuEHasEXs2KOXRE5ZF49EwCGS6PQ2YeQSHXGtI46sycd6I11+CR/pnevlby9iLjJMHbeEF12++Ig4Y0us0y8Ha/lXo/1yY2skYkaCyYmuPKvzFHrFI/Lg89avPrNP7/R9SvnR4Ztd5yxiEXmXZKfv9MEoD7YniUuvN7kTpGR+5MO2Xsk34k/8fFnXe/r5OElcb/fWezB2Lol+qPG0LQfrxmzEUwvJl9jyq39s5CRHxKSc6dOVCxFLDDbs7Znz4xypMVKTvTU6nQ/18KFGJGhvMOcL2HS+5M2nPOR4Crvi0oWumeqnw69eWbfWOXasXjr8s+HPOh179NjI2568+KpvdAJ7voZhGIZhGIZhGIZhGIbhvcbeyB2GYRgeaSBgkHHIpIgoZA1B4kSmEQQZHWQRvciuiC5EUyQViWiiG0FFzOklfCYQIcVHBFpC35p98dk4htOvubhnbCPyydx6PqF5x3xEcBWTPTGPDDOPxGIrn+I6pqdHdB3TJ2eehL66rUF6hM90jPlsTfxTXxx+yoMe8I9sN0YKdp46Z+XvfNOpNjG9ySou/9kgI83FY2uNrhGcp64X/hCSXTOkPorH5iRwy6V4dJ0/eftDA3vWIJtqOHurhs5F12j5szHn8+w9XURsf9BgjS+EK31y5mdfHDX5o4beaNfr4ovX2L8t8/IpR3H4648j+Io0Le/iE/rVK741PRabLrAl/PJlnX49ZGeNDdhL2JDi01EbGzmrw3mVLzHvq6SHYRiGYRiGYRjuF/ZG7jBcLv/iX/yLy//6v/6vext3eKQxIncYhmF45IGIQVA99thjb78laG4NKYRcQ+AgchwjfJBLSBskVUTQSTI1srklhfx+rTh0EFB0GukYSaRYbyMijxDOdOWBRLKHYDSWGx8RW4ReMJezPOi3di9hC2LLo3zzidBCmBHx9UGvyj+J/IroMld/+dEx8o1k409/q1tcud76TJdPvSL01YagPGvRO8Qagk39fPCb8Cd++lAeRjmb06F7ilj1Smx5W5efuMbevu088kf0i7jWnFt+zj7DWQMf9UYd9G7rqJ+J60PchC95RhrXW/747Xwa+bPOpjrlbV28YujrG2+8cZW+YphNObJ1fRjFNPJhr3zrl3X+nFf28nNuy5lYI8UXix/Ch1hs1ctPNvbpWpNHsUFMNXXOrLtmrIlvTb5s+acvJ+dNv+jWO/X7dz4MwzAMwzAMw3C/MCJ3GIbhbmBE7jAMw/DII+InEveJJ554e45ggwggAgiiyEO2SCIEVATXSRqRSEEkGIIIAWRuT5zIKGKeROjxRZBJYkRAGR2b55MgyvIHtyOSix4yqlwhm1PKhY061KvGcioPoz1Elzm/p3361tSB8Dr1wqlnX13GegG3fuG0i0A0Fyci1ZreEDXc61yxjfRVj2PnmS19/WZTPsRxtRvF0quuk86LusVwzv2xADs+HfNHv3z4FRsR6+u++eTL9ZpdIqfyOW0jVYvb9Uno80PAvvhErmoBeqedeRBLjGpl53zpL9BVm39PvfVOznwT9b322mtXAlQP+VKDvc4ZO2vq4bdzdoqcThGHr+oonj4az/rlAGKY0yetRfKql+/+7epx1wsd52xE7jAMwzAMwzAM9xMjcodhGO4GRuQOwzAMjzwQNIgegpjx9l5ELpIGkYOcQvQgapA4SC5ryCGC1KEXUeWYbeQQkkscx+c+AT5blwcbxJXjU9gDsog/MW/3grVyONExezUY6d36ME/KE9iQCMoIsnvZ28tHeZ45Gfngiw/H9ssnnyRyj55+WdOzUzf/+UaoRaA7RkRGCJJ6bg+QdM4tcrLeGOmcNuoqT7ZszljOT6Rf54meUXx5dI11/RgJfbpGdcpF7cWCfJw1nPl0nSIX+eGXXnakGtRXvuroWrXHJv3qzq44YkTiWqOXrXhqbKQjV3tG4lqv5/rnmB9x6lc+8g3V5dpRazWwKUciRtfsmV/nF6yB88XefmSxnNXpWG5iiCkfcdRuvV7bQ+J6O3kYhmEYhmEYhuF+YUTuMAzD3cCI3GEYhuFOIJIGYYO8Qeae5NVJ5iBqiD1kTqQYwigCCWEE9BA9YI/kM11+I7gQQRFF9iPUzO2bW5NHe+b5PHUi2sztlROYk2KaI7b4qk5+7Nsz79hYzuVAIBINIs7ySYAOP4QOP8VLj52aTt/Wi4f4C9adg3QJe33vLUmxIiRJeVafftMz9oZpfoynXT1o33WDtEPkOnfqye60da24rlxfxbfeuUmXP/0g8iH0u9bM9SYfpFw6J2yQomqiV3xjMaudjn6ykT/7s1/Fq24jHbrsxHF89tmoTuK46xOqlY4a2TtPfPFRH9giu+uZtfIm6TrPcndM6BhB7sWTY7XVM8ddp9aaQ/t62r9J0r9nOYHc+8MC+urwRre3qIdhGIZhGIZhGO4XRuQOwzDcDYzIHYZhGO4MkD/epPMVsJFXSKSTVKNj3RhBRpBkSCDkD0HqIICQPI4jsk6JkLLP1xnDmhGBFPGUf76tGe0jlm7JtuztlXM2wZyedQRUZJWxuOzogFEOZ53yArnXk3M/n5FhoF/FJuUsVzp0oZrLO1061uWcb/1PD6wh0+STqKU53/yBvOkiUOVgFMM+mwjQs7/iiM/2lVdeeZswBrm5btghIn/P7/k9V1vjmY+e8ScX/sQ0IgX7owJ2/HWdIQ/5pnfmQughEdVA9CA7IiZ/1Z6tOGKqxxq9bOiTU18c+ghvdcu1PSN9edY38eRarf17Eq83cOUqD/b6pG+9sXySuPZPX2zk0xqx1nV/2hG6YlvPV9d+a+Z0zCNoXWf1VL7OgT6JEXH71FNPXW3ry+uvv36NNwzDMAzDMAzDcD8wIncYhuFuYETuMAzDcOeAQELqAAIHqXOSWeaRZMiciKrIOcSPfWSOPfrGU/KVsLEeGWUtXWQQ8gipJLcIM0KXTsRTedLJJ3sEE2Kt9eTUzVc5sCN8poN4Q1RF+EZoZWNN3eb6YywnvvmISMsnqZZGyJe8syXmJJ/09ad47PqN2whVOSZ08oXE8zYtXXk5b/UqIpJUQ0IH6Y+sE7v+Ix3pIiP9QYD4jvutZfNydD7Ln31vt1oHudqHamBPHCMR1cBeHd4Idsx3JGZ21UD3rMN5VLu9+lOM+skn3Ub6cu3fhljypF+/CR/FyY8cifPjOrLGB6Gjb3y4zvN1r7yJmEn75kbHeiJHfsQ569BjvjsGvVcDH3qoRrZy5SvS1vzxxx+//tvo34Ga6GbHPzs29IZhGIZhGIZhGN5vjMgdhmG4GxiROwzDMNw5IGQQOiRiC0EXEYQAQswhn6xFHkUQIXIQgsgktt7kM2eXLgFEEMKHjrkR5HAv8iqiSCy5GcsrYokPuZ97xnIi7Z37zc+41tSJrCSIKfvqIEgtozrz0Xq5yFmu1ottX68IHbnpcXHLgY2YxZeL2k69pPojQ9nSYWPkv/hETAQcfftytA90HZ95F1O+kZnmEZbGSEP2zrd59ctJjsQa3WrLlzjW+FInHyd5Xx6O6QN9vq2pH8Tr+iVn/gSJyp6tfNibiyf3auKzGvSqr5DW02phoxZiLh57MYvrfOtzdfJHzPmXK/sIXDlYyw8fjWLrf6Q74YPNmTMBMc5zbAT2+QVz/WPXmlE8qF5rYol71nNe00RMe9aHYRiGYRiGYRjeb4zIHYZhuBsYkTsMwzDcOUSkIZT6mlfEDUEEgTlCJ3IH+RN5gyByzAeJUKTv+CSYIifBOqFLIp3OdbEihqzx3T6xV05IqeLaswbyE9u+2BFi9s+YjcG+NTHPuujoGT/It9t4QM++/OhA+yfRlc/8GoHu6U++9uqnuf6rS03WIhT5LCdryDe1R6Dya11eiEhjutnyb+Q/gq6cIj+N9F037M2rR07m1uu3NTnrXedNzPyQyMxsgT/zziF/kF5S/eXPRgw1s8nWGtAjbOsrocdG3fJ1DPTkd749e6+eqU2u9V3MCGT7bEn/zm79JM7Xra0Yp5x5J+LXX8fZObZuTSwj386LdZC3ef9O2KmZrXMoJyJ/+/pDrHWt8DkMwzAMwzAMw/B+Y0TuMAzD3cCI3GEYhuHOASGDvEEKIZd8jSryBslkBEQOsgnBg7Bh4y3HyB57EWmIqYgjwoZETCGWEElJutbpneRT83xZQyi1n04xwB6xziayipirNbIp8owusS5HYAtinEQb5MexOZ320xGPD375jGQk9LM3pof4o3fmZY8v8/yyiXSjx4Z0DvhnZ89XItd7+nScV+JcEb4J/WKw9yaonOx1jkkEJH3HnYdyEc9a5J/YiXU25YwULGfCh/o7F0Qukarmci8PtmyS/Ki1+HzVNzrZVzd9ccRwXUf+ErbVz66cHZ/nnH/x2OqbY8Ie2Pn3dRLB+cuH2qq3Wq2LRegS+dgTn35x1Ge/+gnIiy4ba9XcXj7MG+nyxc61Jh+9tOdrtNUZiUvM1V29wzAMwzAMwzAM7ydG5A7DMNwNjMgdhmEY7iQQNQRp9MEPfvBKMiFxkEfmSBvEDxIHYdObuEieSCYSsWVEGEWInaSUvcQx4RuBJAckkz36EU5G4Ev8yCywnx8xI6Ca2yeBb7kW05wuUZ+crTtGStGjA+WayIeNub1y7xjUhOiKtKtuOmLojdGxr/Ett3zRrR/5MkLnKN+Efv7lFqnoOF0jKR9rdMVQkx44FoedfdeBN7bzny/6CFW9yoeazjda7dEtZ/rIzGq0x2d55oM9YW+kT7daiTxOWzr1Su3882W9mhvZgbzV7DeAffW3Y/qRknyc+vLnw3rxxGLrHIptz3m1x/aWxLVGh0/CD7u+ytlauUb6FjN9emzUV5/p6gMB685F8eTjnJmHes2GPzr1iE+9sc+OjvgR3vTLe0TuMAzDMAzDMAz3CyNyh2EY7gZG5A7DMAx3FsgcZAySym/iInEQNogbxJY9hBDyKFIPmYQQQvqcxBZB+iB/2uMrOX3TbWSP/OrNP3Hyl7CVKxIJaSTf9vhBQrHlIzLRsXyMkXDlYo0OMkptcig/Y/XzJR/71hI60FuJyLr28sGGj9PnqQP2I9n40ofIzmpjax7ZiNyjw395pas39Nh0DkiEoLj5JnJXo/j2HJ+2Eah6ABGUjouFCEXwgfycJ3p6TuixKQe29km9YRP5DPpAV15qLA/CRq1i6Rk9x+auVzWIyS4bOre9Ej+yUnzgu+vr9i1aOdnni61Y7OUhb2tisKfPvh7IwR7hIxH7zLm+sPcGbHmnL29jtVRP14U9x0a2etIcXO+O1SKevJ1Da+zTcz7tuzbEeOKJJ656zpFe9W/RMR26wzAMwzAMwzAM7zdG5A7DMNwNeLXnt3+QbhiGYRjuGJA33/3d33355Cc/eR2feeaZK6mL2EHQREAhdJBa9JFN2SKBCNKIsEEYecuXD3ZABxBNt4hYMtrnhy2xJt6pwxeCDCK/rBPkVHrtR1SpI92IUbD/2GOPvU2OAT1+5JM/a/wj2diYR+LJR970EICnvTVzfYWOE8f5Fz8yF0mGNDMXB9TgPBQD6Ok5W/7kdit82DfSQdYh4hBy/PDJt3N6kqbZ0ZGjOG+88cY1L3PQ28auCXZ6whYh2zG/4hrtRaayyw/98jGHjvlmJz4S1Dm0Vxw58VUdbOwRkAebSEjnrDr4KY+ul/oHfLBlpx/sHJvT1Vf2/OSL2MsXqFlc4NMe/fpXDv5wAvhlI67r1HHXnrnzAuoXw7UB9jpvRv7SsccfKR5/rgs6eqtH5fHiiy9eXnrppes6G1/f/Y1vfONK5HeOhmEYhmEYhmEY3k987nOfu3z+859/62gYHi385E/+5OWv/JW/cvnUpz711sq3h1/7tV+7/ON//I8v/+bf/Ju3Vobh4cWI3GEYhuFOA6njN3I/8YlPXF544YXL008/fX0DjyCYkG9IJnMkGGLHGEGELEOqIbQIUgdJhPyJmDIirNgEtsip3mbNH4m8hAhMeZxx5WPPaC1iDOyrS2ywH5EXCSYGPbDHv71yhmLRNyb2iRiOI8fkwIeeWC9na6Sc+TXn4/SbpCdHdnzLkS/imI5+0xefrtG63pdfdoFffT/fBKUX+ah+PtidMZF3iD3kpTmb6kAcqtla9mzkZp2eOCBGb3ymIw8+zusM1MC2nvEthjrloA725W0uv/wQ+vypRRz7xq6XiFh5Ol+d/1tbI936UP5yM7KjJzZ7Uk8SccTlw5zQ9+9P7NOGT8diitW/MfpipmfPcdcBn/KwFhFsTs+6uugAG7WI3TWkP86PPNOnh7z/z//5P1/3/BHEq6++ennttdeu6/wPwzAMwzAMwzC83xiROzzK+Kf/9J9e/sSf+BPX53PfCb761a9e/u//+/++/KW/9JfeWhmGhxdvPtUdhmEYhjsKJA1iCSFDInGQOpFYyBxEVaScNaCDFCP80EcY0YtYikRCELG1Zs62OPlETImFrHLMNoIJiWXO70lgkcgnOo75O8kqPuXGPgKQfzUhscRLl99iGh2DmPToA93yoxMxKFa5GcVByEU0ZpOdYzrsiDjyI2o4ScFG+mwJPbmWuxHqpZG+eHT1SQ/kS5e/esD/aSM+3/S9gfnNb37zSgDnnw0ClnhT9CTv7fEN/Ijl/IhrdM7kxA+94rKN1HTO7EWS8iEf5KNa1FvOpPNJN+FDfPHYucbVoib2dNjIXR3m/Igrn3pBlw9iXv/ElQPxRwnqZm/9zMF5rv+dW+uEDrDrPIjPv1j6BNYcJ60l5W2v64zv1vs3lD+xEpCTvMVVdwS9WvnsXIE+WGczDMMwDMMwDMMwDMO7C2/ifqckLvjWve///u9/62gYHm7sjdxhGIZh+G/wVqAPeR/72Meub+b6auQnn3zybWIrggl5EzGFGEMKpYPUQRZF+CCFItKs+2phRBwSKJKLsOc7covvyCpzxJI9x3wimMzBiEhGjiETI9+KKz8knWO5y9n6+daqPSKOeJG4arAOdIubrproWnNsX5/4J3xFotlnR7f60umtZDriIhuhvtqrl0CPPfRGJB210BPv7Cmhp3a6/MjbPv/lyLZ+gFyQnnrrnLEF+UI9iQx0zEY8vox6zp6ttfzQZSOuWozysN45pAtqA37k3R8OlHt18A/IUv6qRU4IaPbycd7kdpv72YMgDl29k0/XhhrYnTmwy17OYrM1ii1vtkRs5+g8x77W3HFxgb1YxBrf1vjgV1x7XYPBsTrzb8+xPOQtvlytgb6oj9Rn/bDvzVvXgTj64CuWv/jFL16+8pWvXG2HYRiGYRiGYRjuB7yR+y//5b+8/Oxf/auXj36HXz87DA8a3PO7h/fNeX4H+n8EfkP6C1/4wtvPKIbhYcWI3GEYhmH4b/CBzm+/fu/3fu/lh37oh66ELsIHwYv0AQReb+wieJBJEWERhz4cIoMi0RJALiGD4CS7jBFR4vndTf4jwazZ68MsH0gxuI1BB0krH3NCX65iQPoRcSF9RJY6/eUjvfJkbw/JjQSTi7XIXEB+Fbuci62OYthL7PNRb6pFXLqO2Qa1Iybr8dlH8c98i60eKB96EZAn+ZitfCI/nU/nw7prwPXQNWGNn0jQSFZ+fQUve7blxldEYiQjXdeUHMyNhL11YO8tWrbqUQdbx2z4rh/26LKh29dIn3V0zWZ/S8CSswfmp392RA7Ztk/40QcEqNjVUVyiB6cP8G8wu/yBNT79cYW5utQiL/k5ts4fH3K05lzzLVb/bujwZc+6PtvjrzeVQb/kbc3XKvPrrWzzr33ta1cydxiGYRiGYRiG4X4BkfvxD3/48v/9f/6fy/f/7//7W6vD8HADWeWu/Jd+4RfeFSL3Mz/6oxd/rv7m04VheDjhGv7bb06HYRiG4W4jMgeRg1xC2iGBkD6IMiQOIIzoRqaRyDT65nzYZ0sit5BT+TznCTtC37418ehFbFlzjJRCZDlGSIkvF8fqiDSMTORX3gSRxoc1e3L2tiHiyj4fwN6e2EQM+/T4Zy92+/KzL7fIRb6MdNk4tkf3rCfd85iUpzrKzzofp5RnPsz5ZxfZZ51edbHLhq65HJF39Tcdo1ojYInczvOsl+zqJV/8isXOW9l8OT7j58e5NiKekahythb5nJ/Oc/ZisKOPmEz6GmWwJ84ticpH/a6OeqAGNXVNdL4Q6+Uhp1t7+mcP7YlX/84+sk2ALZQXMSf2xKvm8iHWIEK3dX1hV8/0sn9T+syHHIm5dXpqdx7VoQf8q8cx6b8HwzAMwzAMwzAM9wNIrj/wP/1Pl8/+vb/31sowPBpA5r7yuc9dv7nrfFby7Y4vv/zy5bnPf/76+6IjcoeHGW8+pR2GYRiG4Up0IW5efPHF61t3r7zyypUIi1SLxEEEIZMQvSeRFLGECIo08uER0kMGETq3ZJQ1c7YRjSQ/iCY5EMSSdeSWUe72+cg3iVi0HxlGyik9RBeyszcYI9h88I3IcxxpR8QFvtnxZ50/IxuEoLE87akFSSYva/KmxwfdSLPq54tvPuxHLqYn1/J1TPjiP4LOHnv+7kVgQoQvOzH4oINw9LXV3sikT+RN9Id/NbE7BapDTOKa4csadIPBlz6y63qzbk3NUD+q1V7oPGUb2Uj0tvOvFjlEolqTY7VEUHor3NdRO5aHPUKXnfMhr85LfdEL8fnwb6dzypYulEuxi2+Ua9chX2qy1zmj0zWgV50nfakm+uy6Hgnwx77esaWTD3Zy4ot9vbfnLXSj9foxDMMwDMMwDMMwDMN7B/ffiXv0b/d4GB4VjMgdhmEYhgPIGmQYIgqRi9CKzEUCReAgg7xViDgCHxLbM5I+OLaONEIWJY4JsgjJJLY58M935B3wh0hCriG7+JUT4QfEK459JBW79q0j8tqHU69jiLCrDhIpB3KUm/2Efn0ipw2SDCEndj7tqanaswN6YhjrC//p8AFGx/WCrv44Z/VJLfd6C5Vv0H/6eosglI+9CFhfV03MT4hNNwI2W3mKQx/BeMY9CUe5RkgS8RGh1unyQ/S5nNXI1qiH6jtz4Kee1hd24t72IF/0egvX9d4fLugL0GXDR9cPFN+xePrtTWB5RIbaE79+q6dz1vkUWyx/SMGH/cS+kS99cNy1Q8yB/+rKlv9ip1/N5vTT69+aOuQifzrg3NPLR+vDMAzDMAzDMAzDMLw3cL+f9Fzg2zkehkcFI3KHYRiG4YAPekicCK3zrUQEmLcyjYgghJBjJND5YREBhMhCTrEjdE4Boz3EGwKLROpZj3CKADNai0gqHj8IKSQdEkr8aqAn39OOH4QV0EOciclHevwWj674civH3vK0V2w61uTOJ0QWyqva+WXDli9rEYX5yg/kj64YiEQ6BKkX6QaO1U3fOj/qvyVT5UTEpqcuPggbcegnbIxiAx016ZUReSkm+9sc2am18wjmYuu7a4y9vB3zJx6b4t/L3nr6YkckV4Ne0JOL+uvB6UsOUA86r2zt1aMI3NOX2kA8OZwkMhu9BnqI0LMn9IlzK3ZvAMvBuvzoseFLXfpjTrpeqkXOUE3WnRv+HdOhz7d1dTm2p5auCXpygHT5UpOe8GeNDMMwDMMwDMMwDMPw3sE9eOK+/9s9HoZHBSNyh2EYhuEGCJxvfOMb17dyI/gQPsgb5A/y1oi08sEwsokeHQSwdaRSxBISCMwT8OYjOwQSMolETCGuIq/kQRdhZw8xFtHFBgEnLlukF2IMQYaEQt6dZJp9YAdsieP0kFvWEIzyEzNyWgz7iC97EWB6Yu4Ds33x5RsZpq/qcMyfevSwfNgT+Rr1TDw6cuOjff5J9YnLjxzFhPSMhC4/dIlzw7+c5Jq+r9F94oknrvrk1Dc6/+K4Rr7+9a9fe602vaBTbuTJJ5+89vS8Dogc2RHx5S4Xe+zqhd+i9bu6cNqf5KexcxP5qL9yd775q37nqVyqi53rrOuXf/v0XQvmzqnelRs/hF823l6PVHZMgH3XajmIyZcc1e4aMxIQr2s3qZ90I77rm17KRd7q0Iszhlw6d9bUScRJV33WQM/EZCN355Ce2uiBvWEYhmEYhmEYhmEY3jt0T29Mvp3jYXhU4Efh/vab02EYhmEYACHkAx8yCWGD1EFe9fYtMiiiB/mDRPJBMVKJTkQXQRTRtR4QQsghoz06p4211uUS4Ztve4ilSC7H8ir3SCv6js0RaeUD6stn9RhJekY+1VdsEBv4tI/EUztSUDxkWPZG+ki2cqXDTl50rBVfHPNIXrp9ELdfHfTS5R+xR6d6IkPPeujquX4ivvmXBx258c2msfPLzpxdb2nrH7AVlz7CMj/W6wF7OuZs5RrxqQf26LAjfFjP3lx8Onrs2qnfatAfdfFD8kPkddtnNcsBIUrkJJ+ueRCb3W0+6qJjTl8enf/OaX0wnvHZJGzkL297/NZ7+tb5pGvdSO887+WkZqNj6/JSUzr63B86yFEviT0xxOKTnl6ylRvQd3zW6TpwDoZhGIZhGIZhGO4XfuRHfuTy//lv9zUf/G/3NU9/8YtvrQ7Dww+vQrzyuc9d/8jePfl3Kp4DPPf5z1/fZvztJ3LD8PDB9fvmK0LDMAzDMPx3QPp4O/O7v/u7L5/61KcuL7zwwuVDH/rQ9Q1JewifyLXIJR8UkUoRX/QQTO0jmJBF9JBB9thHAqYL9JB0hB1/9PLJhzFiDTGHkCL2Qm9D8ltuyKhyZ19MMcz5YIPAoseOT7btG9kSudLTE2Dr2J582Ts2Z3v6MxL+1KNWx/mTI5RrtYsBCLdIUTr2G8Uzrz7+IuLkYL8aEnbpy0W+7CLxjHI7Y/GjFnISl/aI3NhCPvgURy38RGIayyP/ckC4Aluibvnxkz49Y37KJV/pnLXIjciDbnV1vRHH6kn0znWpDwl7eZQ/+2LXh0Tc3nJN57HHHrvm3TGozTEbeeuXNUJHLOvi2ys/x3rDrt6o1x8YsBXf6N+yPOTumI7a/FuyJj9zN3/677rxBvSLL754fSN7GIZhGIZhGIbhfuFzn/vc5V/+y395+dm/+lcvH/3Up95aHYaHG+7F3ed/4hOfuHz0ox99a/U7w1e+8pXLF77wheuzgZ7zDMPDCFfviNxhGIZheAcghZ599tkrkftd3/Vdl49//OPXvwhELkFkD7Kor2RFIvmQyDYSygdG60ghBJw1RBMdJBMCC+GKRDI3IpUQR+zo9HYjG4Qh0Auvvfba26RVkF925crW10ZDhJ89tvLik9ATH8FlH/iqdh+srbOnzzdd63xF0tnn13o69PVEDHM1FTtilj77aupDNx17bI31tL7Q5SvJTlzkI+JOT8uNf3PCR/p802MjHzEcE2DLhr44YlvjI6lets4nsOfbvtjm7M3zWR5qZV/uxee3Mdtyye9Zk57wRdjJxfUHrYNrmI8zF1IviXMuF+RmkB990h8OdM7YgDrFse660lMQy5qvkT6/1jiw0T/58gt8sSlOPYLsHasV1CBv+RjrI/LY9cO3fPzhgtrKyZ6vje6rnF07iFxryNxhGIZhGIZhGIb7BUTu5z//+beOhuHRwi/8wi9cPvKRj7x19J3ha1/72uVHf/RH3zoahocX+2rlYRiGYfgWQBgR5A8yCLkToQWRQ8gygsjqrT979Owjo5BXveEK/CGMCFs6CDbHSCWkEYKJHpLQesQYssw8IPTYpkciaSPPIuTER0pZs599/hNraogYNDpmU135o3uSaWKnT9hD/TO2r8byA+vIND6tq4mN9exJ5FsEp3wimqsVIhDp6ZEc601SfWz4YcM/srKv2ZUPP/bEIc6131EFMcsxH845H0Y+1MsHXXWRvrKb7ZkDXbH7TVjH9ZoPxyQ/cpET+3zVL3DO5UH0Qj50xAJ6+sAXP/VFrvyUm9j6yD7wUd/ZR7BmA/Rd0/rfOaeX8NF50LtgXc3sO790QC7F4E8/+C9nQkff6PHBXzmpke/6wVavn3rqqbf7r1aja828Y+MwDMMwDMMwDMP9gq9W/sVf/MW3jobh0cJnP/vZy9NPP329l+/5x7cj7tv//b//95d//s//+Vseh+HhhSd3eyN3GIZhGL4FED/IqR/8wR98++uVET0ReN4kjEhCNCGjHCOHIu2QZ4g0HyYjoiLJ6BJxgA1dgoCiJ54cIq0AkYSE4lNsMOcHQVV+yCnEWAQaHTDy7etlzekgs8R0XBwEodjZyY+f4nhDsR7wQegiviIExc4v++Lmr33CT7HEMLcvRh/g1S1u+8aIO29ZpseOnl7S0e/63nj2NH/Ol6/OdQ6RjCBPUFNkJR/s2dEVn08+kJbOkV6BdXnrR/bm1sXXr3rDjxzcePCrDvrs6dCvr8b8yIWt9XoiF/U7t3LJh/Pk3NLjT06kvrC1TvLF/qzLGv3bHLoOnDMQXz1qpJ/QN7Kpj3I/r0Fyouug64aog5611tUCfMm169GxONmoRU39G7X/zDPPXPvu2vH2Oh2E/Msvv3xdo3u+kTwMwzAMwzAMw/B+Y2/kDo8yvvd7v/fyN/7G37h+xfJ3Al+p/Pf//t+/fOlLX3prZRgeXozIHYZhGIbfBRA+SKBPf/rT1w+TCN3nnnvuSkLZQxohpZBX5ogmRBBBDCGR6FmnF+FFrANbRBLSCBkFEWV8ImaRZUgoQFZFqkW0Ib96M9NabzGKL5cILDAX23gSX4g+x3zxKUfItzwjzMz5puPYKJ51PoyET6O9YvItJr/25Ie4jICmQ9o3RkJab+TvFH7tia/3SDd6jvUSEdg5oGsv0NXPCFT1iGuMqHR8krj5kKMYckQcstd/+9laV3PHpH7wW8705BH5yreY9vNHzNkZy4W/cue3c19uesPG6HzkK2L59JPUD77kUy30EPLIc/1wDPzLWXwx1eLaZiNGvXM9sS1efej8ub7l1fVcrXpE3zpdKL5jIn49EFfuri35gBqs0VVb5z4d+XobGpErnhGR+9JLL13n8huGYRiGYRiGYbhfGJE7DMNwN7CvVh6GYRiG3wUQSBFsyCFkHIIJ6YM8QhYRe5FxyCGCjANE0ykneYVMikBEQBH7/COr6PN95hFhVdwEQdUc7COm8immOT1+7afHn/zzrUZ+0gGx5cAnXb2wfwrfxQrWrYkNdOzzQ8QDOvzaR/rRqU96iXxLX35qoFvMwIeeknycfeH77IdzhTjsrcx88atG5yHis1qyd4wIJPyI3bUSEZmwKQ92hI1zLzZRXz0GNfIjVn2xZuSn+kB/1GzMb+cMypdtdZ3kMn/VBnp+ErlyqgY5JdV22spDL/gwspF356zcxTbKkzgfxDm2Xs30oX3/HqwboX8b1uzLNfJXzfwZrdFzbKRnXRy5qde1ENFLx1cuE+t6MQzDMAzDMAzDcD+xr1YehmG4GxiROwzDMAzfBiJwInwidJE9iCoEEaLnJBwBsUROAgtxRNjxG/kWuUWPb8dsrUVO0c03kipdEmkVmUZPjHzag3LgU15yVgeog9wSbvbzly6d9OhEnNmPbBOHjtEx0EGQiRupVo7sxKwWOuzELi6cPU2XTroRcfbK09w+/+KoBQHYG5mO6fDNtr4WyygOdD7k7yt3swe1uD66Rog5fXtigNiumeKXPz/iqC0f1Sr3aq4OubJFQnb9dQ3yZcyOH2/D8qsekj/Cl7zYIy/Ny6vesEXe8pVfdqS+IqXZOGZXD4ppDdjTl3dx1G69nAid1kCs1osj1/K0ToCdvfoUgXvqk/4tRuTyafRVyiSifRiGYRiGYRiG4X5iRO4wDMPdwIjcYRiGYfg2gdgBRFRkHXIIAYTkQfYghqy1HxEYCYekIkgmxBuJZEImIccIe/GaB/rWxQE2iLkIQiPfcorkyq89o/hiErnxSac8Sb7ZnLHUQKzLo17waT+Clm/x6SD9IuHoIcMiytRNrKcnHrHWV9may71+2i+X+oEQ5bd+0k2/2tKnR+SLvGNjL325ipeNGsEoH7adc/XQqb7edI24zF5fgA4fEbhyqL7qNj+vH2P++SJ05R0BGUFpPaSvDv4icfkTo7wJO35O4pI90GOrNiIn9uz0MwLZKA85ZSdWMetnY70Uuzz4JXTkrib+znzlpXa2julaQ8KKZY2dfORbbWzM6bGh59g64pqOWF3D5tZ91XL/9oZhGIZhGIZhGO4nRuQOwzDcDYzIHYZhGIbvAMgfJBaiEMmDtIpIRApFVhHzk/CKgCN+cxM5hDDKDgEV4RUByLcYkWTIJL9NSieyCxBQ/FqLqIpws86PfK1Fhhn5pk/oid++tUi5aq2u7OnY0w/HSC8Emrj5sscuvyRSznrzetqaeHqEQKzW0w70RO36EymO0Kvv9am4ajTyzRaRWEy6iR7rWfrGiEIiJh/Vao+c55BPttbBeueByFm+cgD6zs8HPvCBt8+/0br4jXrOrxwIwlXf5cOfPbrF1T9f1V1e9bK6EnXpYX1RmxzY8BEJfGtvtCa+mtjyAeql6/dwb20I/523+qB+66Tc2PJfLLXlg11x4DwfbB1Xj96B/j/55JNX+wh5Oka+6NVbb1y7DtU2DMMwDMMwDMNwvzEidxiG4W5gRO4wDMMwfJuI2EMMIbQQR5FSjhFPiDiEF/IroonQQyR5C7O3+xBNfNFlF9HGL1/ZRTqKn79yEas1QIjRjchCjtIr3+ysI6nkROy1j7Ayz2+kGn+QDZ9Ax541BFgEnBiEnnW58R1RGLnG9iSY6UZKysN+tddX/ouJcMsPkk9PiXk+syF6L69yEuO0qXYjHQLsEIByE09Onbt+01i8M255GpH3et75rH56t9eAXPio7nyxe+21194mlOWjBjmKUw/om/N5+lUTv/WCsI0Ulpu8rLGLwDU/8yqfRG/Yy6Uc6BcT5JC+uvVCLMf0+U7X3Ho1Ecf6Uf/pgFwdE366/uVIX27gnLv+5GlfTWysdV123XWO9Nhb4XzIdRiGYRiGYRiG4X5jRO4wDMPdwIjcYRiGYfgOgBhC6CCcvLXZW4oIK3PkEolMg4grBFJkILLIOj90+WJ3kmzIpkgrc3t0jSeZxoc5nwipCMb2y4Of7JBakWjptBcpSJ/QI3zSsRfo00F6iS/2WQddYj8dvtRR7KSeqVkdkcDlRfJlTVy68rXH/pRysCemvvPpHBjBvrrOc0ayc87Or2wm4opJP5L0JErlyN4oLh+99clWbH2iy05stqS8O6d80BVX/hGm6ubbXnVUC1vCb/nxm890+Sy3vuZZbnT4icAlZ3+qrV4iO50Tucidbv0w7zxA17PcO2/lWo75p8unYznat86OQH75k49czYtpHklrLjbx1q+xa60esLGuF2qz/sorr1x7X8xhGIZhGIZhGIb7iRG5wzAMdwO//WN7wzAMwzD8roH8Qsh9/etfv7z66qtXIigyCSlFkE3EHHlEBzFE2Ftjkz7SyYi0iqRCPGV3q48gQ1ghtrJBYtFzLDZ965F65vaAHskX0k0OIbIskiu/EOlGrBH7yK902PMXkVdsY/vEXiRj+/k0r2aoDnviqVduJF1Ch796ao2uWvQUOVc99uRIThtIF5HXb6TyAcWRO1FH+RH+5WREACIIfT2vPOqBWGx95TBxPtmWc5CnGsX3Jq5rzzWk3/zX6+zqOemPDMovHcKWXyQsn+rkt77oBxu+jY7Z54Oe2nozmD96Eb9E7PLpHFeP/gZr/JY3P9b0S53yom+u3vKqD3pabkQsa+c1KVfnrx6BPCLG+eK780a3889WLsMwDMMwDMMwDMMwPDj4i3/xL17+2T/7Z5f/7X/7396Wf/JP/snlh37oh97SGIaHG3sjdxiGYRi+QyCHIn+QZYg45FAkUYQQvYiiiCgkkT0kV2QX8goimuggx+izi4wKbBBZ4iCuIrroOUZ02WN/knBGvukb6fAlpv1iy4NuRJia7Ms70hDY06fDNtIPjKeeeQRc6+pmV/zqQJrRi5Cz3jFf9OWmr3KlA/KgUw707KXn2Gj/JBj5Lg/gG/mK4GSrRn7Y1Xt25qe93OTQm51ide75oGud/nnu5cI323olHqKRr94WdSy3zos+scsvmPOZb/vi8ck3v2xdXwhKvY2k7Hx0rjpP+YDs1eXaZl8O6rjtKx+k/Oiy6byResOuPOuDUb3m9Pgk1uWtt+b5L0/+SX0ypyMGnYj5/n3KwznRZ2sEge/rsPWJn2EYhmEYhmEYhgcBeyN3GC6Xn/3Zn738zM/8zOX//D//z8tf/+t//fJzP/dzV/ljf+yPXf6X/+V/uf5M0le+8pW3tIfh4cSI3GEYhmH4HwQCCImENEPmIqMQQognBFNvAfrwaA3s0z+/kjlyKhIKYYe8Qx4hvpBPp/BBJ8IT4YTkQpAhq/iOXOODPruTpGPn2L5crdO3Zp9vdsA3HUSXfXHUTtSnNuv2882WnjzpmfOvZvv06PBbHYjTsw6/O2udHR25Wk+vnKtVjSAnIpY+EnHEo/dOJK5zKTYyr3Mnjr3za4YjDvkQWx7Z0/dVvEb2/MldLvIXs/N+xufDvLpcM/yUO1v+7SUgPl/yYp9vuTk+65MLApNv14y8gvrqD1/54B/U4jw6n95EN8qHf/mZi921J796YpS/8xYpm+/Oh/3in3akGtiVszU2jju3fKqLX32Uo/PITly50aHvuN++5YM9Xee+t4yJfumb/WEYhmEYhmEYhgcBI3KH4XL5y3/5L1/v2//W3/pbb628Cc8CfvRHf/T6k0o///M//9bqMDyc8ON2eyI1DMMwDP+DeP755y8f+9jHLi+88MLlmWeeeZukIhFSSKMIvIgx+5FW6REfOJF3SDO+6PvwiYRyTAcivghi6vTBJsIxPZAH0soancjFiCwjws5IIhvt0eNP3vJAlOXPfvEdl2u+6RrLnz6S1miPHlH7verIj1FMb0napxuJaQ7lyq8+GgHxp5ZGuRQr1Hvknrn4fNHTK3Gy5ZdYd+wcIAIJH/KSC//mhA57c77ZlYe6iism3c4FP/kKdPgRnzgvapMTn+b5NvLLf+Q7clIO9vnmw7H88kXsE7H5RpTSly9UR3Wl6w8bjJH45nKmW16nbz6DONXMr77XB3Hts5GzuuQpD/v65lhMsav3ySeffNu3PYRtb+XyRRyrj/gqbIQ1PT7lPgzDMAzDMAzD8CDgc5/73OXzn//8W0fDcPfgq5O9hesZzF/7a3/trdVhePSwN3KHYRiG4V0AAggxFXF7Em8EARQZRZCckX8RWvQQUmyNiCZ77CLXEvHYILSQUIgpMSKG6UfA8RNpx7e9SCv7ELnYsVyCNb7aZwfiRYiJG5mXD3rys09P7MjB4hOx06F/W0cxq4NOenyJWWzrzcXjm990+M23NcJ/vacbESue/YhJsZwztuaJPbnILdIwIhHsi+m882VeTbf1RbLKpbdw1aQWMaqNfn5dS6dvUl6kOhGT1cWnWulVU/5OHyR7ok9ykmP+6dAXv75alyMUr1G+6sg/Sb9RnFOsEf7b15N6w4d14EOvxOu6M7LRJzby1wu1EDZgLRKf+Ktex84Hn8MwDMMwDMMwDA8K9kbucNfhG8y8dfvJT37y8mf+zJ/Z1ygPjyxG5A7DMAzDu4CTaENoGZNIMSPSjESWIZsioBCBiKPIJ+t80TdGeEVU0Ymsomsd0QXn3H4+kVl8sedPfpFeEXWO+YuUM88/H5Fm5UCq1d5J+rVP6PN56lkrN8Km2MWkbx0Bh1CLVNMXe+VZrcWmh7Arn5NsLL4YdJF53rqMGOTHeueLfjZi5pM9/fIifFinL//Oe7EdFxfoidfX+PLhfMjJulpBvPpXfMQk33xWV8I/e+LackPDr3U+2UcClx+7YpjTUUN5sNdT83TZnX3NzkgPOcq+a4AudO7414/EcfU1r1+OCfBFQP3mnT+6kf7m5caPGiLMgX7r+i9f+64HYs7XMAzDMAzDMAzDg4QRucNwuX5t8g/8wA9cPvzhD1/+8B/+w5e/8Bf+wlWs7SuVh0cFI3KHYRiG4V0AEgnZg4TzdcEEYYUoe/zxx9/+nVfkVSQZQgpJFAHlzT8+kEr22UfSkYgoxBOSTywkVaRZBCHSDJBX7UeIGdkjrbIx0o3wI3JC8JWLUY7iRr6R6uGTfxK5xoYO+/TKh6RTXulXhxzhrMNapJr88m20b6QTqUqXLegn32LQPQk/BJ7aoFzpiM0mMlePjNA5d96Qgub22YnFN9vi5rP47MVUKx9+O1Ye8u8cpsvWtZRvYo3ffNcDNtUlL9eYt3H5tVef+YvITYCffNEF+bEvt3pSbfp0xs/O3HmI3O5c8iNnx2zSBXrlaC+xpqbzmI5/Wx3zq2a5yFNce2pzvh3LB1nLV+fJWufRntH5IPo3DMMwDMMwDMPwoGFE7jC8CYTtz/3cz11+5Vd+5fKZz3zm+gzgueeeu/z5P//nr88HfvmXf/ktzWF4OOEp229/d+IwDMMwDP9DiFz6I3/kj1y+93u/9/LUU09dPvjBD779u7mIJjoRuwg2RJF1xF7EHIIrYXeSXAgpv92JgAp8iUUXTtKKTb79zq4528hXH3AdW2dDkGARY5F19tkQ62L+3t/7e6/x7BHriDA65cxeXDESMRBpRnHKEfTLcTnS4ZO/1oM85S8Ge3vePGUD9NnJlQAd5LrfQmXDhz7Zr+dIRraIVuudA7ZIPutycmw/Epdd+eiZfrCzng/CB+KQD3bqbDzrA/lUX75JcU8SN/EmaXl2PglfEbD9kcCZm5q7ZqyJiVzmy9rZn8bTNokYbY+YE7nSUSv7riF1Jx3Ta61zpWZ9EF9tYiGk7avXvhjmdECfQV/sq8eaf5euA/+eehtX3q6hfit3RO4wDMMwDMMwDA8i9hu5w/DO+It/8S9e/uyf/bOXl156ab+fOzz02Bu5wzAMw/AuA+kUSYVgQpo5RpwhlpBTEZ4EsYSEshdJR25JNoLIQkJZi0wj5nDqp3OSZ3KTRzrWkGGRfnKxhiijQ0C+9uSV79O2OCRSkZ65Uf0Ra+0b23fMB5Hj6Q/EEItY09fqpo/MoyNP4LfepJc/Ih+kHn2irwjF+l6eRv5BbCQfYs95U4/zVh/o6ltEabGBj/rNnsiZndj8FAfoQT7lZO28LuRnDnzzg4yMgOSbT7Xyz7a3cCNx+edHT/gg6uq65KPrjT65zeG0NVcXW8f20zN2HuxVI936SeTMf/v3Qj7TYa9+tkY5qFkso33nzrnWe2v+mEJsvaIvtlrtI3JbP8/LMAzDMAzDMAzDg4K9kTsM7wxv4f7ET/zE9ZnKl7/85evv6Q7Dw4oRucMwDMPwHiAiC5GEbEKmIacimUhkGSDUkGuRgHQRVfmgi1RDNrHtjUSC1DKKQ18MhFVEHtJKfIgg4yeiLURY5YfP8j/JLL6ti2sviGeveMYIPHpJehFvhE410znrSE9M+43yo0efXj0qVzr6acyOPtBF2MmTvT3xoZ7Sta5XzlNEnzlhR9f5Esecj+zZFrO6fVVvBLLYpB6ceZR710Jzvs56xJEL3+Xn+ig/wjcfrhnCjxzJmR+9+lKd1uyzL5fyO+ujx8a56lqT2ynpFi8B1wofpDUxgnXI5jxmq3756iv/agty0pPyA7756I1b+/qnfmuvvfbadV4twzAMwzAMwzAMDxpG5A53Hd66/Tt/5+9cv23r3/27f/fW6m/jT/7JP3l9XvBP/sk/eWtlGB5OjMgdhmEYhvcAkXRIJgQWwsmIaDpJPGtIJ0QdQZI5RjRFeNFH/iGaIjXpsUWQNS+OmBFqdBP+6Bt9kKWbPtgT1xp9/vjIrmO+rdHNhr7127jpnHpIMzWdsM+nfXNQL91yjczkB4zFKzbQLT+iPmN6xkg9Nel3ftlaA3HZIvqQeuVtHfQ9crQ3hPlgT+ozUYs3Qr0tyw+IBfWpXPjLt+P8qp9NOfKvBv6IGGJZs5foJ1/178wzH0hQPvRFrZ2f4p7k72krFz2iz968mKfQP20Sx6Q9vvPZvhH0CRzLWa2tOadqsC6enM3lpS9EbeVD3/kg9a76nSOEe+d5GIZhGIZhGIbhQcSI3OGuo7duf9/v+33X5wnnb+EieX/4h3/48n/8H//HfiN3eOjhydibT8CGYRiGYXjX4fdeP/7xj18+/OEPX55++unLY489dv1KV8QYIAB92CSRZZFagExCLCHXrEVEEccRiBFoxgRZBflqXfz2T0LMVwvnIzINOca/eHSsZ2eOcMwvsU7UEAFnTg/oIM/oyD0S8/QrP+v01B1Z98QTT1zzCOmUPx/FrWbralAbXwg7QNqJW22dA2DDJzv5RgTKrRojN4na8sOOjjwAAd8bn0hFe+pkI55jQLTKmeTPXmty6pxXrxh8R2DyS9jxR4d+ubIn7E6/eugrmfmRkzw7Z9UoH3vZ6jFbI30EaD2k00joQGNgH/KXqEFN6uCjNWLe+THKj27nR2y5q4s4d/TM/ZWufCNr+XBuiHP16quvXn8/hy5/wzAMwzAMwzAMDyr2G7nD8CZ+9md/9vI//8//81tHb8JPJv2Df/APLv/xP/7Ht1aG4eHFiNxhGIZheA+BoEJM/tiP/djlYx/72JWMROYieJFqkVXmSDikG6IMeYVk6qtfkVPItAjDCDmIAH388cev5FQElNjinSRZZBhyKxIye0B48XuSve2zR4hChJp1BCkdxyD/D3zgA2/HLSbfBOwRdvJWX3p8qt08whGM9ujKg29fgasWuvLWW+MZ2yhOv3kaKVkvzbMJfPY1yJ0b+2zM610+2BM5ism+t1vVgjxUO1v77OmD8x9ZKoZ9c/vFlaPa+YyYtGeNpK9W/gg/IM+zznrKb9cXH/b45qv6srWXLTtz/ZFDe/zbLw6hS6AR5HkvWO+8kXoth9bkqp+RsHL2RwGuWTGt86Eu5y+SOkJbD+WududHXohb15LfzPn617/+jvkNwzAMwzAMwzA8KBiROwzDcDewr1YehmEYhvcYSKGTIO0tXGQk0izCDdGEoLKHeOptQseRaiTSjz/EWaQZ0itiLTKN73QRVhFxRnmJI7fefCT25IEIE4+v4rCJ/EKuWT/3E+DbeiQcUk0sx2pgZ55PBJ01euZqoc+2nvEttvwQcpAfYk9cerdxy1k/6qPj8rdPkIP13r486Dg/+uS8VWe2idj6dhKuapEH3+z5FNu14BjkU75sOmfmvTHKZ+er/tDnq1z4I3Il/IDc6JSnnquxHsqNP3mw59OcPb/Z0hOTLdgj9fFe/UzkjlTVF3M1yUMM+7doTV4gB/Nqb87XU089dc23fjkHzeWrF/bV27lhK4ZrT17e0vUXu/wNwzAMwzAMwzA86NhXKw/DMNwNjMgdhmEYhvcYEaYRbEgygkRCLiGoAOHUm5sIKCNCLP3INWQZG/YEoRVpaI5Io2e0z4c9ORAxIgoDHT6M7E6whUhB9YBY/BntlVdxofzERJzxn045Ap909SC9dJvn3z4yTl70q1l/6QV6akLk0gE+IjhP0lF8Ptn0Fi1YF5P+eQ6yq042EYJITnPnEPLBFiEtz86jNSPUl/Ip/6Tzq2YC5ZKvri/5ktOfkQ91IjrVyC8Um232fFpnR9KXF7FHh5RD8fSFmIvXdccO2pcXYVsvb2Ff3aS+kI7V3Bvk4ui7mMRa+dB3biJ5rcnJsTd0EbmulXo7DMMwDMMwDMPwIGNE7jAMw93AiNxhGIZheB+AeIocMyLgkFCNCCUk2UmWIaAi1uhFzCHQIqHoIaXYgfV0Tv/pRYDRSehasx/YiIvUsgfpGoGv/KkL5GzfSMA+P9lab14u5agmc7Um9tmnV70n8cefWiMTgS9knrF87NGDyEO+9bw3d+VhXWwjvfpvfuZOnyAII3AjLYEuYYtwJIhc9u2Vj1jqK+/IYP5uz41a+ERgAh8RxNb5rc9GPuTLZySuPbXaV5fc2NdD64n4elQe9ukl1UJXHbe1nHbVbqQjB7DG7hZ06nXXIwG1qtuxWESeYvJbL9Tr/NjT0+z1wro3cRG59odhGIZhGIZhGB4GjMgdhmG4GxiROwzDMAzvE5BJSC/kEnIKUWrsDUIjsooguXq7krCxhigDRJS3PxFzyK3IN3p0kFj8FANBFnlmPIlVaG494pBtfiFiDPgT57RjIxYbIwF6xTXaL0fEmWMxzfmxrvZIuKTckHXs7UdcOrbPV5APn6deuXUu1EEHiav/5U9f/b5GuTrPMR/6H3krb+fFnjqzFbfzyLc9a8b0+e2cvv7661eCMb/qtScnenLmp69mLkb1nTmyk5faEpBL/tid/bGWD7H4kU/9Tc/YeSVAnx4RW1+tpXfqW8+/WuUkZmivOpxP+VsH8b3hrL7I6chcojd8lr+3brOtJ0hcOSJyzcUZhmEYhmEYhmF4GDAidxiG4W5gRO4wDMMwvE9AEiG4kFiISkAoIdEQTJFZ9o10CNIMARYJxQ9SCpBXfPYbspFrBCmG0EJQRYD1NmiEmX0+ItHEjsyzx5+5PSJfefIX8dpvxpZjMe3zjYCjJzbf+SJs1INsK7Z+gD3HfBL74opfXPvlS4evcqMnl8cff/xaB7/pGvmgh0Q30tFvehGbcjBGKLKVt/qQg9XYuSV8I1nJY4899t/FrPeknBG3v/Vbv3UlFOXCh1iBDn05Oc/VElFpDYqdf768ZUrsdQ2wIXzUQ/rlWV7W2EXG2j9FLekSMOoNQtp5z1+61X/qN+/aDHqgr8R119w5krf8xYjAFbdrw5549NWtF3Ts8dX1Ye+111679sh8GIZhGIZhGIbhYcGI3GEYhruBEbnDMAzD8D4C8YRIQlg9+eSTV6IPMYWMM0dKRVLRQdqdxBd7xBqiDEll3T7SC1GFoEOyWUsi0eiyEYNfa/YjvJBbjtlbI/xGVMqpeO2b20OK2ePXePogEXTFcIxUU4e89YTeLXGtXr6JPNjYy1+EHZ/ldpJ29vLDpn6eeoTv7CNs9dIaifhjg0SPdOU3GyJ3NSAljZGu/JUPEU/t/PCrh4QP4FPcSFd+XCP8WueD33plZMOHY7khmuVHV3/LLZ/Zq59dPghSk7ATT3z7pP7Tg3oEnc9yTLe607sFX+yM9PRBTuITc/jABz5w9dc5s26MUGenRsIOuasX9PTCPtuuKTW+8sor114VYxiGYRiGYRiG4WHAiNxhGIa7gRG5wzAMw/A+ApmHUEIy9dZmX5EbwYZQigADxBbCCQFlREwhpOwj2Ig5Euwk0MwjxshJvJlHbLEP1uUov4hbOmA98owt0IF07MmTLnHMZ77AmB4/js8cxY5UBH4iTdWlRyfpaF1e/EX68Unn7A0pLr2IThA7spK+Y8hvJG7kn7Xy5ld9yEMjkWOxz9r0SUwErrdw+RRDPvzRMRL2yFv+5OUaaV5+dMzZR06KYVRroBOJy8dtX8TTZ2tIUfXxY6+eNJ45EvWIZ6wv9JJqD10rQLex68XIxqguwl79crAuTuStfXM1qZGvetBb0/XC3LpzaI6U90YuP2dewzAMwzAMwzAMDzpG5A7DMNwNjMgdhmEYhvcZSCWkF+IJqXa+vWmOZAr06EdaIbAcR8YlCK5IrkizSLGIMftGgrRCgEXW0TnhGAkXicaGP8dGgjArLl2QL5385T9fcjWqp2N7pw/r5mz5MUYsWidis01fbyJYoZj6mg3YR+QZ5VoNEZwRpfWEjlwj/up//eTXiHBFyLO3Zr+RPj8EcUv6GmC52y9f+nIyuhbISQoTMewDXXnyFdnNpzV7yelHvuzFJPW5XPSSDb1Tsjn9stEbwkd1JNm8E8QO/NUnYCsfwpcazDt35znnxzmQp3W9QOKas+lcRuJaN+9rrdUxDMMwDMMwDMPwMGFE7jAMw93AiNxhGIZhuI+I7ELOIQIj05CGEY6Ip0gthBRdhFykIx+RculFdiXW6RYTadZIl4gdaccXP3Iwt5YN6ZhNhKrjcqBTTGtyUAddsdRHH9FqP5/Zik2fHpLOHlu69QgxWdxy1Z+ISb74N9IBtvzWEz7o6GUEqRjBMb/yYBPxiFS0J9dIUjkarYlvFJc9srCvZHacL/v8REJ2LKfy4pPYO/tLuk4Ql2LyocZTP/tbEte+HNjWZ+Qnv+km1rIBsYjYzmf5kPIzpn8v1Eswz6feIFrVz965NfLXtWo/cay+9tXjK56NcpYLn3pvJK6B86utxR2GYRiGYRiGYXiYMCJ3GIbhbmBE7jAMwzDcB0SCIQERZQHp5G1NpJ8R6YTQQlAhqhBWkY4nKZcgvPhFfiGGI9esR/ohDU+bdPhCqEWW9bW+1tiepJnYbNkgDvmNeBXb7/zmNz2+6KrdsT1zPs2Rh/IkbMQTux7RMdJjzy4Skh/r9UaO+XdcX/g8fUeY5tMeXb7feOON69fuRjKqC/RVb+iT+lGt5ahW+fX1vs4lP/yBOOz5M2ejb6Tj27rZisE3EpJ/dcmXb7rq0TN+5Qbsq+8UZKbrzLnjW4ziZpMdyEtsb7KqiY19cZNTH9gQqPYT9fYU+uqwB47NnbeuRSM8//zz11F/9UK/9YWOfPg7a2SnbucXwa5398prGIZhGIZhGIbhQcaI3GEYhruBEbnDMAzDcJ+APEKKIc4Qb0gnxwF5hUyzhxijF1Fp3VokGZILsYe8MrfGPzKMXoJkEyPSFfg9CbtGenQc04mk49s6giySNPtsxT5JPcfIM6QZwo1NNasF5B4xGzFZbdZuCWQSiSlXMRCs5UqvPtBB8EXm2a+P6rCvV9WG+CsXa/T4NcqbbfmwN1ar49+JwAU58EWQt/nnWy70q5O9/p1vkZY3oSMfvtia54/oT32Tj9yM6uSbDv3TrnrqI9RHx/JP6BrTTeB2PKGGRI5E7Mjt1iNuuxaIPJ944onrmvNaz+Voj5jrlVrpGOn1hrT6Oy/DMAzDMAzDMAwPE0bkDsMw3A2MyB2GYRiG+wgkFSCdkHkf+MAHrmRchBhSCzmJnKNzEmeRbMiuSCo2kWrm9sXIl72TuLIObPOdX6BrjiwTJ+I22EeW2eNbjsZiW7dPxMh3sdLPFxGLvXWSDeEDqWdUF//W6d36tA6RlnRBH9RB6ItVTYjSk0zmgz8EZ8KGD2JOR85iIA2Rg6ef+t1YfkbnOr+O+bPHN79E3vzw5zw7rlf02FZP14l1foja5MV3pKiRL3nTZcsu22KT+hOZKn55GpufuuGcnxC3a4rIh4hfjx13vottpE9H7/hXG3Ge648+ROLWN7p8nCSu42EYhmEYhmEYhocRI3KHYRjuBkbkDsMwDMN9RAQgkgmJ6w1DI2INoUaQWkirCDZAnEVOGpFW9E4bguxDiGVHPxLQPj+Ras3p0zNHrMmt43xH3FlvrzWgB/aQb4i0sya6/BpPH9bkBuKIb2RjPWIPOWcegRkBmU+6/OmLN1j5h4jCcqFb3nJEBkYWQkQrfSOhK04j3ZNMjMDlRx701EDoIxnPnM8a7eu/Y1AjX3zzB+VMR359jbU4+TIn4rPTB3kagY7+ZJOc/ZALiM+WH7kVuziO6RLHIftbyIm/xLHa2eovvyeJK2ej42Lqn9G6fhN69dsYUWudrTV6vlIZkaun4g/DMAzDMAzDMDyMGJE7DMNwNzAidxiGYRjuMyJOEVCIuccee+xK5kbQIaOMkVeIKSRZpCMgwgi9JELOCBFjRj7oRKAh8eRgP/LLHLGWb34i0yLp8m8NqoU/oJff821R+mzbS9hH9NFTc0ShdXN7CDljROuZI4KSL0QdcpawBTryKG/rhI1+tgb8Rrrmm6ihPPUJYdi5qH57YshPraQ5f+b1KL9q1F/5Oj5JYev8dg3Y56P6I8HLk44c5MSHY37piMumry8uP+I4AXER4Ubgh54YjXDanOO5Xp/rEXHMz+OPP37Ni3/7XX9GYo3e008/fY2pJ86vPSOxry7wRrQ1dnzKwZqeeiNXTfoxDMMwDMMwDMPwsGJE7jAMw93AiNxhGIZheACAkEIyIQ4RUkg2Yo6Ii8gCBBXyEKF1EqPG5pF5kVhGaychaT1CzNc308ne3Ihsy46+eQQhW2IeSZtv+fJrP7KOr/TKja69cu6YDohpbkTA8Sk+QrN1YwSnY6O4Ebj82Ufy8S8PpJ5jOnoZ4QlqtceGlFc5i8FOLka2iE51JnScP33lT45qR87b48+aPbZGvvpK5m9+85vXuVzVQPjSV7oRuHzwZS4311G9KDdzuZbHOcoJ5Fx9CV96Y945Os+TOsB4rzk0l78Y8ifqIvLwhwtg35p+iN01ZlQfEtd6152a9Ii/zhd7/evcyFcOHe9t3GEYhmEYhmEYHhWMyB2GYbgbGJE7DMMwDA8IkF3IpQhEx0ipiMbILUQXggoJliDlSATbKewRVxDhR++WtKNjzl9kHYFiI/74zP7UibSNSDxJv0hAtnzRs46crSaEHIiRLT96ku98nWQkHfby1y/+jGwAeSonumyrkT/kOT29tme9/p/6xej3VY36yl499PhgH8GqjoQ/tZe7OahHfCSjkS+iF3zYJ3wib8+vUeanfcf6pPZ8ys0xsKkufs+6zOXDnq25fEg5F6dY1pPbYxLM9QUiaiNy1YOcptP6KfqgDm+nEzoRsp1reghee2q1Z+yciJk+cd5I53wYhmEYhmEYhuFhxYjcYRiGu4ERucMwDMPwgCDSC/GHpDOP2ENIRbzRo4OQIxGOJ8mG4EJgGRGQfFm3Tz9fhB1EbN3qwEl6ySedyD1Ixygmck1eEZL05QDVypd5Uj7yNqobYZc/sF6PzMvRPnI1Xf7oEESoXKrJHJnHtxzAem+6nr75oYMYJQjBSFc15JdN58Vx83Lgjy9j54WPk3y0rmb6SE4+yscYEcu/2PnlK5IT+OAL5BeJm351gdiEjb3yDOxbZ5ftrcA5D/JMwLnRZ7XxKa61ekKsqZcvunKIqO2csdUjo9qde/visOPDeaq37F5//fXrORRjGIZhGIZhGIbhYcaI3GEYhruBN598DsMwDMNw3xHx5I1BXwHra2MRUEgn5NlJqCHkIgkdR67Rj5iLbLQfmRpRGFmWX/uIM8dINXt0+cye2DuFnhwIe76ssyWRd2CPiGFPfub8IhnZm/OZb6MazCMIjeysyS2fEXggn0jZYJ8/tRefD7ZJ/vJJ6CIKIxCriy7/4jRPkJSnXxA7IpI4v851+eRX7n0tc2+jRmbypUfm9OXPHjnJZ/XJRw7szrdwrdcjiBw1ik2ADVt29fu0M+/4XId8QD5JfuRgtCbfar/tQX20pv8Rso7rkX21V4O94tCtt+Z0iONhGIZhGIZhGIZhGIZheBiwN3KHYRiG4QECYguQaE888cTl8ccfv5J1fkcUsRcpRxBdkXvIK7bIQWQVG2QXodOYHhLT3BriLIItog/ZxQ+7SDcC1syRY8gzObXHluQL+CJyBqQcsSbP6qBfPHN5kvIkYtGxLjafYssVkUnHGr16Vb5IPXXTbS4WMjACttyty02cSNKIQqDHv/PEvtxOoVs9euXc/Jf/8l+u/vq660jXwA+fRD7VUs18wZl//ayXxe96OWsyT/iTEztx0zlH62Ld2pLbtd8JzgGhG+kMRjH0y1ytetr1pV9dL0RctcmNHz2ly3f97nqgr090iL6nOwzDMAzDMAzD8DBjb+QOwzDcDYzIHYZhGIYHDIgnhBYS78knn7w888wzV3IKueU3UpF1iC9EFrFH/5vf/OaVJENmtWdObok8cwSeWITv9sGcDb8IMUQaPSPdCDM6J9lmLjejPIudrwjHMzeIXEM88kuQcekj5PjVE375Y1vdfrfWnC7hBxkqjprom5eDkX3EZ4QvHRBL3S+//PLbJKORbyQigp0tO/3wdijUE37EF6e+GfnIT/HN6Xrzlm/25WPOp97osTV++hphtvKkw1f1GOkm8qDT+XesZ+ZyPaWYQE+s1ugb23f8O0Hup/BJ9MR55yuSVh/kb25PrSQyXX/0m628XGfIaPp6Jh99Ifwa7dNVr5HuMAzDMAzDMAzDw44RucMwDHcDI3KHYRiG4QEEEgvZhZDzNi7yCgFlDZmFnHOMuDIiuiLlTgIPAXeScGA85/Yi1oBd68bsiTny0Fys9E6djpF2SDi+I9kiHZF15Uo3eySnPBB07KyzpWukozf2I/To5StS1rF1/qzpT6Qgck8u8tPfCN9yJ3rKdz3hC8Skz868uuRZ34gevfLKK2/narTGj5EdP3zkl8iVwNkX4EfueshPvZC7fAjbemq/XNTfmp7wUd7sE/vFs2/Op37oVz5JaG7/XrBOqp8vecvDul4gseulfedJvZ0vOTz33HPX+NadFz7OnPi3xqZ82aodgetrrLMZhmEYhmEYhmF42DEidxiG4W5gRO4wDMMwPICI8DJGziGqIq4AEYaYIkAnQcqdRGBk2zmewm8iLt+ItWyN+QsRZ0Se9opNINLMSI/v8gfkY37FiXQ8fSI4q+u0hd7GtNfbnOZnnvz6Wl0EIJ98qE2OZ77WI0oRhXqQLh90IkyzqS5+jOwJ+94YVRPQlR+ffETkmue/niMjHauNLz2RO9/8iJetObtqqpck+/pulKecqoG+0X52QSwx2VmnQ+DUA7r3gnXx5M8P6dqVuz9UMNKxrmdE3PqgTuDD+bHXNcLWyKbaiGO6rhEkrmuA3jAMwzAMwzAMw6OAEbnDMAx3AyNyh2EYhuEBBTIKkYXcQmQh0KxFyiHIIq4iMEnk3EnowTnezk+CTjzCT7H4SoeIjRRL1zwde/LgtzX7fJ066UXu0SEIOLAvB7VFcBIoNl31R/aJyac47OnwnU/r/NHNZ/Xzg/RDmrLhM7KUbr3NP5t8i4U0jAQmcrReH9h5m1r8ajIidEP58APOv3kxCD/yIr5qW078l1M+5HaSodb5sV4Npx0bAs27Bo2O839CTo10zmPCtnMrtlFMe9XQuRTLSIqp/3Qj7elE4oLRcdchsFW7c4nA9bXKzskwDMMwDMMwDMOjghG5wzAMdwMjcodhGIbhAQXyCjnlzU5kF/INQYXMQv5FqiHF7CWRc0gwYEMi1iDiDhpbyycyDRnmmN/200nPiCgUA6FGIt8gm/KiG/mIeBNDrMg5exGndPgxzxc95KZ4dJGhxvrFlh7CD/nX2kmgkvLJTg56DfTU0IhwZEO32sVwLrKTl2MiN+g3b0m/7+r3dOtne47lwNY6X4hHvh3Lwb58epPXXN31Wszyguzp8G/sOhDTMf1sTgn0xBIb2uPjHOFcS8SrJ85tRKuvUi5/kKfzdZK09vXKvFrsA9/ylp+9MzZ98eTs/L/xxhtvk8DDMAzDMAzDMAyPCkbkDt8O3M8//vjjlyeffPLy1FNPXZ544onrvbn7dvfR3VcPw/DgYUTuMAzDMDzA8EEaAYXEQmr5kI3AMgJykNCL+DzJPKM3EnurkyDKIhLtJ+k38kUi/9i0l0Sm3Yo4YiLezJFq4iIhEaKnXSRjxxGPrYvjpoIfpK9+2IucbU6XyFdc5Ko3Mh1HftJLt/zlRZce/0aoP+zEcMyOfjUiEcVSnxzrFTvni5RX8c2N/NHnyzki4vNP+LeHqCXlkp/6BdnVs4hL89bKodzLIfB/L6Gjz2pkpz61ZnPCXmOEqrE5fTeLRjp6J1e5yb/+i+e6rsfOo3Pk+IMf/OC1JrrW9Eaf+bNfjvzqqa9VpjcMwzAMwzAMw/AoYUTu8O3Avf33f//3X37/7//9l09/+tOXj3/845cPf/jD1/tlz2/6A+5hGB48jMgdhmEYhocAkYcILsQV+EtKgqS0l44P572NaA0xZs1e+5FeCDakGETaGSPozmO6iDcEWmSZfNonZwzkXMcRisCeL2tINjmeRKh66LLjsxzcWNCNFDRH9kVq0kvkx6cY7bODSFES4YssFJu+fYRnJDE9seRNDzmot0Qc+3zrh1jZ9QYucUynuuToWE18ks4HmPOjF5Hf9vKdj+qoL3JAekI9N5J607ra83H6OucEjGLqDVu9iDx1LtiAY3ncCrvOFV/OjZxJ9tbEqEY90Jf+CMHeM888c/VnXQ7iIYbVkj/zztPrr79+vcb4HoZhGIZhGIZheJQwInf43cI9+Uc+8pHLj//4j19+7Md+7Erkfs/3fM/lueeeuz4TefXVV/cH0MPwAGNE7jAMwzA8BEBeAYILIRbZ5cM4Ys6+Y8TWLUFGIvMIQgzoEgQmouzcO4m8iD3IhlgzIs6yjWQTO2IUeVfs/EBxrbEVx0jkn//8kXRJMfOdrhwIf/okvt5UBz0kIH/0IzTp0NfTW8IU6CMVEaVs5J9PMczZRt7KiY/i8yUv64hFsYlc9AHEJdBbxGcNxeRLXCIX/vjmpxj0zxrK1Qhi3/o6dYzQHr3EvlqNYlkrh0S/jNXQOaJH5Eo6r3Llz7GeuAb0mo51fqBzxx+/emJNPXT5Zucrlb/5zW9ez5m4wzAMwzAMwzAMjxJG5A6/G7if/tCHPnT5s3/2z15++Id/+PL8889fHnvsseszB/f15Etf+tLl5ZdffstiGIYHDSNyh2EYhuEhAVIMfMj21cqIrki1iC2kHjILuRWRZ/0U+pFzwC/yiy92SK/8pkcck9MH/WzZiY1Us07othepaM2eXK0j48qVWIvIlVsEHTLzlAhPPumy8falXoAc7UWqphfRF2Eol25gIk3T54OenIxykTekS+e0J+bFy0+1RGKKbTz9IenZQv46h+ZdA3I2N/IBcuT77KXY5ZGw0yu69chITr0Ezjk9dTtmK7fq47McW9cbo/7ZF1vORM71UL5s9cj5yVe10/EWND/iWrPnr4brZf209tprr13fyhVvGIZhGIZhGIbhUcOI3OF3g6effvryB/7AH7j81E/91OW7vuu7rt+u5T68Zw3G//Af/sPlq1/96lsWwzA8aHhHItePXn/f933f9S80/AN/J/EfAg/c9pBsGIZhGN5bRJQh7xC55oDgAv8/RmiBD+WRgH04p3dLzCX2T2KTPZINGoEPEkFoTlduSDQ+uhEgfMuT9EYlRIryXZ4nKej4rFdeCMH8ipl+ZCYdb1865re82dEN1sXm3+cXewjU/JZDushA5GAEJP9I1shbNkY+sjWqS53Alh8EI7HOtxyM+dMj/joH/Mjv9jyps3NlX+7i0jHPrrVGsdTA3n7S/jsJNN6CTxCXbzXJ3TWqB/blXP8Semr2m7d821dTvWanH/oqv97OrT5wTJ8dfXa/+Zu/eSV8fa1yPRqGYRiGYRiGYXjUMCJ3+N3gE5/4xJXE/eQnP/n285Hzft/99i/90i9diVz36cMwPHh4RyLXgy+v07/00kv3FA/dPETzINTxMAzDMAzvPRBi/h8NfQCPLER4QYRk4kP5SdTBvebE/9uNvdXo//PmCDNxxMwXNGeXyOk8bg3JhkCMxJMnATEQbog7/viN1AN61unYQ3ymx2ekMD3kYQSr2Mhg6/T6XVukINg/c+jtXb75QwpGvPIj78hFMcwJXTXyJacIVjUQ+YmrB/ICMfwlrN94ZWdd7kb+q50fPolc+l3hbKqBrVzqEalHzTufjtmR/Kd7C+vkXlA3CfKRg1Ee9U3t1U/MfZZE9prXo0a2/piQLh/Z86tfoA66zqXa6eqPc+wz7H4bdxiGYRiGYRiGRxkjcoffDT71qU9dfuInfuLyzDPPXO//Q/f67ulfeeWV6+/k+omiYRgePHxbX638gz/4g9cfxfYmrodj/+//+/9ef3tsGIZhGIb3D0gu5BVSFYFl9PsmyLzf+3t/73Utcs+H9D6cJ3CO9xJ2fEQCEmtiI9vo8O/YZwIkG9LMOoIzkvDWHvFmTsdxRCRhywfC1XiSoxGc1YRQjZSUQ77o3eqKx7eeyZ2+ePTkYaRbPfT5RwjKxTGdcsx/uYthlC9dPuSV6A2SkU+2SFqx2IsfIVye+aMDEZYRlZHg1ZhUK1s6fOWT1Ffz8s6GhPST1k6c5C2cx+Z6IFakbSP4bZ6+ykme1tXn3Dh2PSN566OajXK2B/VWLP2rR3yoEwHfuR6GYRiGYRiGYXgUMSJ3+J3gj6d/3+/7fZcf/dEfvc7BfXTiWYD7cPfO7qE9B3Gv7n5+99PD8ODgdyRy70XeegPX1wMOwzAMw/D+w4dtH6oRW5GBCMLIsYhJiKhDliG72HnDkSDn7Bsj8iL1yEnyRfqB+BAJxxfkx4i4iyy0H6lmnh/HEZiJNYRredKlo578nXrimJeTuX7QP/2Kz6fc7OlRcvoGPiOJjezlEHGej2zEpotApG8k+iMmocMGictPvbqNbx3M5cEP3+qypt902UXkOuaLLaHHBqw7NgbzM14xm99Ke7eo58ZT1E2cvwjc8n788cffJmPty7NrU6367I8R5OicEXts3XTy77yzATWIpe/13o0nIlds+sMwDMMwDMMwDI8iRuQO3wrul3E7f/AP/sHLxz/+8euzEvfPni+QdNw3u+d2P+6+2z25t3PdUw/D8GDgHYncfiPXg7L/9J/+08jbYRiGYXjA4AN4pBzxoTwi17EP5BBh5kN4H9hJ5B4/9B1HvtnPB7FHEHrWgS49+yeh6CbAXjcE/InfzUE6hF1xu6HoZsE+H7fEbDo+o9ySjWz0gE75WetNVvGJGxR65md9bBCFSEG2hE5kuTzoi0Uf5IE8ZHcSr0Cv/vAhLj/5IPyXe72C3kStT/IVs5ytmZeHmJ27ekiP0Mm+uAl8q7VwzoPcTtFjoo8k8lwO/tDADWHnjqjPyBboqCtilq4areufNX3h07pYjq3zY/R51TfGiD8MwzAMwzAMw/CoYkTu8E5wz+zbsH78x3/88sM//MOXp5566vo8wH1yz17ch7uvtmZE4j799NPXP0D/2te+9vZzlGEY7j/ekcj1BoSHkf7xfvjDH76+kesB4MjcYRiGYXgwgOTygTuSzjGiLPIOYWjN/899QI/4SwDRRocunchIfhFnkYBuAhoj3ezxY8x3+o757k3YdK2feUTUIfTEdTNhXz0RnuUO5mzkSSc9Y3Z8iIvU47c8rIP83JgYq6k9NyrycLMiduSrOR2ix3yCXCJcrdlXDzu59het3kKtL6Taqh/ERQjLm62RDd1qzI99Ix+dEznoi9zbSxzTE8vo/BpbT8K95mKE9K0lfJ4iDzp67U1cNag3Epc4Zqs2fZKPOpwve3LXb/20Xm/0le9IXL3jj11E7jAMwzAMwzAMw6OMEbnDO8G99E//9E9f/vAf/sOXF1544Xo/fnvP3r21e+2eZbh/f+KJJy4vv/zy5fXXX7/eaw/DcP/xjkQueIDmh669jWv+7LPPXp5//vkrueuhmbVhGIZhGO4f/L8YgeUDuA/dRmSY0QduxJ4P8IiyyE561hBhEX78nAQmMad/kn3miLI+B9DxtiSf9sBIkJJyo4PkpN/XPxffGInKhp/IznK1zgeSjkTa+V3g27ry6VgPIjbtIQMjRMVEAhbfz0fIjz5EnvYGLX90+akeudSHekFXjQhX+uzFTdhZVxOf9NXSueJDftabs6vG7Iz5UrM83HzVv8Zidf6IXOnXr9v9joM+QmsdG5NuBOXcHBC47PSqm0Ojmo1qe+aZZ6465UU3W+cYrOuPPd8aw76/DmYnB/7eeOONy2uvvXadD8MwDMMwDMMwPMoYkTvcC54F4G/++B//45dPfvKT1z+OPu/bPUvont1zAffXzXv+4F7761//+vX+mu0wDPcX35LIPeHBmb/EQOp6KPjd3/3de0t3GIZhGB4A+ADu/9NIMgQi8tD/n0lv6PogH8GHqDvJu+b0TrHuA7sP9fwTc3FOHcf2+BDDB36fDdKzVj6Qrf0I1G4k2PoL0NN/OSI76ebXOjs18Z1eb8imZw8xSszrgT1AErI59SNe5V4eRD/oErl0E8SWvv7L3zk43/rlI9909UuObBGUcoBiy7U+gJyrzyiuc+GYrWO69DrPxTztshX7XA/n/EQ3bueojojbcyx3eYAa9arriK38XJv6xY981OEc29eD8rVXvohdOnSt5c9ova+4tjcMwzAMwzAMw/AoY0TucC/44/JPf/rTl5/4iZ+4/uG0+3735e7XSXP30sTc/bt7ec8Q7PvGLC/49bNFZBiG+4d3JHL7jVxv4CJsT/HgrQd9/pF7mOYB3TAMwzAM7z988I6c6w1Sow/evRF6kpcRZCQiz3hKeif6kM9Xkp51I1IugtO+zwmJ43zSp0eskTM2XXM3EEi6bhzo2UN40kvfjQeikD825mJEauuJHMqXXfrdkPBLT7/YOC4GiYBFFJoTvvhFSBrZEnM+xKBTXOcI4diNkxGM2cshyU6scqErb/VZi+SULx/1jlhPfF6TD3u29vNp/16gR845kfut6GPXG7/0ypWITa+3bOUqrnWit/roulUDXXVHyiKHAYkuVrnwHbl+kuzDMAzDMAzDMAyPMkbkDveCr0b+7Gc/e/mhH/qh6z169+uk+3f32uCe3JwecezevLd43dN7hrEX+Ybh/uIdiVz/YHsD91uJv8zYw7JhGIZhuL/wARsZZkSQIcT8FaYP4pGYEXZGH8bPD/BG5NtJ7KXLJx3HfCHZIjnTN7opiHSNCD2JxXRBfLr80iV0CdCtJjcNyLz00jXyLS/5+zyCrATH5Ssm3eLzS1d8o+OznmLQ5UOu8sx/vbIvDwSjXpvnI3/lww97JKOa+AuIT/qEj3rMxnF9sCa2ubF48iDFbo09gfqoFig/etA5Np7o2JgOEb+bQChf151RHPWKm7B3PbohtJfQdX75sEdHDGtiQOdaDV034oivx24qk76iehiGYRiGYRiG4VHGiNzhXvAW7p/+03/6+kKe+3D33d2/d0/fvX7PDLoXp+ObrtyDP/fcc9fnHd7K/fKXv3zVG4bh/uDNp3fDMAzDMDwS8BsmX/3qVy9f+9rXriPiC9EVIeZDOUEo9jW0dHrL9Pww3zFCjb4ReUhOIjAbNwd0+G//lG4QgA5yjw0fkaf5Fxthh+SzT5c9v4Rufu2zQYjyy8Y6PfNIRMKvXqi9PORgtG9uPHPVG/oRiMjKYrrREVc+bnyMxepGSQ56jGSMsMw/0pZ88IMfvPo644J4fBB1RFLWJ77EldNtjwN9X2Etp3qWtC9HcnuDVy3Emj2SvrVy6E1b63JVa8JGjk899dTbMboG6Yvlr4bFiexOj52+tl5u9iPjrXcdOx6GYRiGYRiGYRiGuwj3+r7pyuge231199fd33e/7xmC0X22+2rPj7qnt/eRj3zk8pnPfOYtz8Mw3C+84xu5wzAMwzA8nECO+fCNIPRBHUHmQzuSywd0hBgiEgF3ig/ydNggCX/zN3/zascmHR/ukWrmPtQ79teZPuTD6cuaOX3x6CMz5RYBaA8iJumliwCMPC2e3IzIUyPhL4KTP3WfI1s10CP86gF9NzaRqelDNzB06wldcYk3cOla498ol2pgz04ctt0IIWvF6W1pNvkxOqbHjzX1O7bOH9/2qotYK355midq5yN9+2xAb4leJHJlc9rf6olD1OEvdNOxFrGqd80R1Y8//vj1mHRu5YEARvCyt6ZO4liP5Mum8wfykifytnOErDbKbxiGYRiGYRiG4VHH3sgd7gX31z/1Uz91vV93X+1+2r26Obh/B/fjxLpnBe6l+2Ppnhn0vMJLA5418TMMw/uP7+g3ck959tlnrw/S/CMfhmEYhuH+w4dy/2/24dtfYbbmgzkS0Ifz5hGGfYC3buxDvj1inZhDZBxBqqXD36l7Em4IPXmc/tIHehF8/KdLx81D5CWf8kPq8Ym8Y0M/gjW/1cWGLlE/v/SQqr1RW170zY3yNvItprmesqt3kE35ELb86SP7SOAIY6N9+bAn5nyY64Vjca3RZUfMy1Ft9YSNddK6ubj2W5OPvPgnauk3lb0BSx/4S5c9XUC+0mUH5Zk4l+CrmJwTtdLhr/Ngbt1nznOPrZzdfFaX86ufRjZ0HbtOXAf+mOCNN954+zoYhmEYhmEYhmF41DEid7gX3GP/5E/+5PWPrrtXN7qv75lAzyB6DmHN6J7cPbV1Y88TSN/6xs8wDO8v3pHI9RDPAz2/gfvrv/7r19/D9bV5/hF/8YtfvHzlK1+5/gOn19cNDsMwDMPwYMAHa4RYZBvyzxwJZs/xLdlJTgIwsvB2DPwj0fiLoKQTwZkvZFukmxsDOnTTEzfwRTcyj96Za/r0kKX0uulIx82GsTl/3bhAfpMzD8JnftnWl3wSOVmTRwSkuc9DEcxyqi/pF48Px8WVp5h6xE+j/fKDelGuYhgjRrPJbyKeUU16IZ45fT59npNbtvTzLR7/RvXQp2s/P5G3xLzajXzRkV+94tc+gfbtQdeqtfpZrnqTbvuvv/769fOovWEYhmEYhmEYhruAEbnDveCPon/6p3/6ev98+wzAvXj3+s3daxvtk/atg/tzL/N94xvfuP4RtXvwYRjeX+w3codhGIbhEYQP3EhFfzH58ssvX7921rEP5Yg1H+KhD+jIOx/uEWH2IuuMCLzIRLp9uM9HH+4d83eSlN0s3Oryw2cEo3V6IX0wT5eePE+fcoyItNY+QSwi+yKHxSo/dsSxvNXOVp98JbKx/hQD8Zh/IzKRb/09iUQ2bnbo+0O43nblL+EjfX7O8yNXOvLkg72R3xN0+FCjfOhba685sK9e/az+SNNTF+hUs31jx/pcDyJwz/527eiNXkbG0hfPXwb7qmV6dIh9+dsX2xq7+sKffXpiWTOq35rjYRiGYRiGYRiGYbjLcN/sOYR7ZffJ7vXdwxvPZwrmdN1/A53WCH3PYAh/f/yP//HL93zP91x1h2F4fzEidxiGYRgeUSC5/I4JMtc3a/jNW6QYIi5CtA/zjiPdkGJ9wEee+VDvQ7xj+unxzxdSjj0yMWKSHkS6RQLS5au46ckLEUo/n/Qi6uiCGPTkgDBE+vFrZCdf5GC6iEB6t4SjY/WUg5sbNfF9SzqevvWC2KPXVyhnwyd9+Ud60rfmxifoF30EMPHbwXzKVZzsHOuDEfHZOagf8paDsdzoVtsJdnLiBzFs9Nu1YrV/2nRuQO76aq1r5xTx809XDdYhIlff+fd1y+KbZ1/9bIk5O9cFv+pSk2tYLNK5pONcWxuGYRiGYRiGYRiGuwr32Z4JeAbhWULifrp988Tx+SyBbnrZeQZA/Mym+3nPO4ZheH8xIncYhmEYHnH8xm/8xuULX/jCldR99dVXr8SjD/U+mPcXmshEiARFDiLXfFinR8zTizA1Jo7p8c+WLt/W00UapofIpIs8jRjOT3qNiDo+I+sQgwg8Nxp0yHnjQRCAkcv8IhkjfflEFpar+HTkJLf0in/6TfSDbyP98kBE+iqj7LphYvPEE09c5/orf2tyMJY/O79pIx+5QAQq33T04Twf1k7phuy07/h2/UTH9xrFE1ffIsMjU0HO4lrXl4SdeOrye7mO6ei182Pko9ryyZaem0Tngi7/5WRPH/VADx0PwzAMwzAMwzAMw12FP5p+/vnnr88TenZBeg7gftro3prYO58lpG/eM4rs3Jt/9KMfvXzoQx96K9owDO8XfsffyPWg1MNN8IDOP14PzDxc88DNP2APYD2MHIZhGIbhwYMP3OcHcR/CEXJGpBqCzH4f0oljH+oB0eb/8/7fD/SSdNPPxmcHpKabh+KSU09sehFw2cqNLTtr8iRyQPBVTzcm9K3xD8g9OVu3bx2JSP/Mme/sfd5Jnx79k4hNV5xIZzZy5/MkfYthpC93+vLSw2pORx+Kd+aHnGRbjn3+ol8+4kLHpH6Fjm/XQX4E2us4OBZTDkTujWpGzspdDtbkqP+JftKRKx1Ij28+vBksfgS9fhm9NQx6Yc+62I77AwBv6dqT4zAMwzAMwzAMw13BfiN3uIWvPv6jf/SPXn7gB37geu8M7rvNz3v/8xmCedJaz3LO5wjdz7/xxhuXr3zlK7sHH4b3EW/+6xuGYRiG4ZEGUsyHbW/kEqQgEsy6D/Q+qCMRk0jGPrT3od6H+eQkHfvA74M8YpiNOcLOzUK66dtD8tGzhsxD9KXXPoKO5NMe0rC3a/myLoZYbBCmkaV0I1jPnAldfsWyTuj2dm03LkQcfdIvsUh50E2/XhB22ZDI3Pptn81jjz129ZUt8K8famPLTj0ErNNlRxccW/92QD+b6jp9tKZH8i4HIr/Omdyt0UmPnTeTvYWsP/zqgXODxFW7dbbmnXP1su/rlztP/ObDMTLXdUyf7TAMwzAMwzAMwzDcZfj648985jPX+3H32t3fGz0z6B4fHPcMAszd33txzwt8nnf0rIK9e/KPfexjlx/6oR+6PPPMM29ZDcPwfuAd38j1UOyVV165PmwL3/zmN69rHr6BPcd0h2EYhmF4cNGHduShD+M+mPtA7kO6EZFmbr8P82wQakgy8z7s003POulNSSPizX42PvRH9OUPEYecpCN+OeSbHd2T2KPbjcVJnJJ8+oxiZM8PnXKmxx7kmV/r9u/Vh7M2uubpIDHZlAOb2xhqVAN7OWV/xuLn7CWowxrb3kQ9cyPm4skL6s85nj7L63dC+vKNIDVXA7Emtrds+XTcuZdLtdJ5+umnr7laU5N6ELCR32J1ztg3qot/uoQOn/xH1HsT12dT/akHwzAMwzAMwzAMdwV7I3e4xSc/+cnLH/pDf+j6zMc9dPf1iXvnnmGAe3I6PUPwrAJJ65tZzdn0fIIenZ4n7dobhvcPv7snesMwDMMwPNRAkCG9XnrppetbjMgvH8CRg0YCzTtGoiHNjBGl4bwZSC/CMdLRh3sf+BF4CDixiTl/xP5pE4nq2NyNhrh8R4pGYqZLJ8JPTGS1PTZqlWM3MOz9Lq5jMehHLJYD3/wCe7XxXZ7lxsZNTOAzUlMMOUVsypG+eMXhA+pTtnrkq4Mjce2RajZ2jry56pheN1ah+bn2O4GP5DwuB+ItW6N81SW2kVhTjzeN2+s6sWfUM3W07m1x14J9tRC1d41UG/LdseuHWHdNlOswDMMwDMMwDMMw3FX0zMA9sucA3cNb71kHdJ9/3kvTt9+3hvWzmuzPZwr2f/AHf/Dy2c9+9voH2MMwvPcYkTsMwzAMdwQIL9+k8Wu/9muXb3zjG1eyEHGGVIxEQ6qBEfFrdBPgA78P8kZ+kIzIt9dff/36ZiTYIxGd5my7aXADgNhDxomFbHVjIIabh3QJkg4RSp9dNxBGfuWbX3MkaySpN1z5Nka02pezmvlFzCIKgc1tvkROdN2wRFLWA/p07dGLLJa3fkR+05WDvPuL1mzFAD6KwV6O9tn6jVji+LSB86bLqKfqbK0brcZzvbUT+et83Arot/zp1JOTxCX137peuF7M5Sd/9bCXa7byUa/fyrVvzR8dVA89vdFX149zybdxGIZhGIZhGIZhGIY3v4nLfbv7aPfZjnu+4HmEvZ4ruC8nofty9/Pg3t/9vbFnAkY6vsL5J3/yJ0fkDsP7hHf8auXw3d/93ZePf/zj1wdrr7322vUf6Sc+8Ynr6OvyPLj0kG0YhmEYhgcfPrAjFn0QNzciGX1Q9wHeB/c+2PuQ70O6ubXWmydsIhpJesQeEg7hZuzGQNzycJweUpbIhZ90jWcMvsFnkAhC63TTL49GhCLfSMJ8sxHLGjt5+GyDNKQrb/6ts9EP63QdW2/tfPtWXuWuj6fYzx8gi+mzl6P1asgH0R91EPaJHNmftVq/1SNwjuc6yC2YJ+exXumRuXodm3szuL5Yq999hbVauiHsDefOOR3XA7DTC3WpgW+6YtLTL39k0DU1DMMwDMMwDMNwF7GvVh5u8alPfer61cqedYB76p4VuPfvuQxxrw09R3A/7/mQP652X94zBTb98TpYc3/vm7p+/ud//vrH6MMwvLf4lm/k+k51b494I+JXfuVXrv+Qn3rqqevDTf+T8PDuIx/5yFvawzAMwzA86PD/bkQYEswbjf5Iy1u1iDUf1BFuEbpIR2+z+oDuuBsA88jGyMlIRzcL9roRQMAh6twgEDYIO3/Vyc5xxKzYCDw3COZyTeiRYsjB5xG6YtDJ/5lHOdM5yWHxSb5OstDYvLqrr+NuZMpX7+RQPHpnzuVirm7H+WAnNznQkVPimC4/8oV0k2otNyO5F1o/dU90zG8jEZPI1blxTl1D9QhBW4/oq01PImXpqMHIh/72VrT83Sjao+u8RvxaM++cdF6I+TAMwzAMwzAMwzAMb8L9dffe7s3d9/dsoucMxLp999uBjnv723t5/BD9wA5wRZ4Z8TcMw3uLdyRy/UP0oNUbDy+++OLba/7ResAGRg/Z/GMehmEYhuHBBzLO/78TZJz/1yNzIyPphIhaRKEP6z7EE58HrHWT4POAY+vpRObx2Y0DvcZuHtiJ6QbCyDaI2U2CfT4ReKfvdMrvXKOXjT21ROKayyNhhyCkLw/2PgsRuuVtbp9uX6HsuBrpu/mpP/WEv0htMbxdqgY+2Z3k8tlL+kZ74pRbOO2C/STwceLcu8VpaxRTf5wDY3M56ge53WcjZj2kGxlrn1911i+2XZf6oy/W9EyfzIk/RNB7PoZhGIZhGIZhGIZheBPuuz0b6P68NbDWfs9BwL07Xffnni24Byfdm1vzx9dAjz7w99M//dOXj370o9fjYRjeO7wjkeuBoodv/rEG33nuH6qHlmDvWz0EHIZhGIbhwYS3cX39TWTiLTEWkUciFtv3+SBS0OhGIOKRPj2C4DRGgEZ05o+tzxXETQKCDyHoJgGxlyD6fOZwE9HbxHxHINpnwzc9NRndYLBRm9yK3w2LHMpD3nTYyAMiIMUo73InQF+N4qcrZ18zXD9Ajfqs74k8+cmvP5gj5lD/2YpTLJLfUGx79ZRAfk6bc631zmnH7fHD75mD2tT42GOPXXtjPZJW79nosb/Ofe6556752dMDYp+9r2Liy/l0ntp3PfDZW7v5FLveDcMwDMMwDMMwDMPwJnqm4N6+5wbmPfNwj+3+u+cQPQ9xr03M3d9D9/juz8GLfO7v2dPtWcEP/MAPXL/BdRiG9xbf8quV/ePtLzPA3H8EInId+0fbA89hGIZhGB4OIMO+8Y1vXH8+AVGGBERG+lDu//U+kIP/z/v/PgIVmUaskXvBh3xv+PYbKT4ruEFodKMgRjcLb7zxxlVXTDnQI+YdsyP+oIywReSdehG0/CMNgc/WG/Mnh/LwOUY/5OGYvV5EDp/6RIzewqXjZsZYHtWIqCwPxKQe8qXf+peutWz4UKObpnqsp/pEgN4t6KqNLX0xrXWDlc4p7d3KqcuPXIyJHrmJk79aXB9i6mOiHjeQ6mGjfufMSNjb00d18eHaYmddjfTF7gaSyCm/wzAMwzAMwzAMwzC8CffZ/ljavb77cPfVpHt74p77FPf39tmYez7hXt+cfvf6H/zgB69v5dpLn/jD7f6QfRiG9w7v+C/MQ1gP1Lxp4YGZ/xB4QOjNFrBmz8O4iN1hGIZhGB4O+OCNvPzKV75y+fKXv/w2UegDOvjA7kM5+ABPfDA3tn6CP6Qe6YbADUDkKbGWD58xfH6whrBjF9FnpJOffFkn4ObBDQV/JxGarjr4VAddIgd79JKIxeKc+eY3QSgiY30Wkl9+b33XI/2VI32EpT06bn68tcunY/on9FJ/InORlj53sc/GPF0SsiVqbz/pZot0fO41so1ETeQhdzdp9MTQZ7Uhwc2JXugJG8feoNbjanr66aev9p139n6rufro0Ld+1qFPctA7PRiGYRiGYRiGYRiG4U0gW92v9zzH/bV7acfdV1vruUXPFegG6/5QnZ65e3L35+7D8UP2gC9wjCPyB+7DMLx3+JZ/KvErv/Ir13/kn/zkJ6/fde5h21e/+tXrP9qPfexj171f//Vff0t7GIZhGIaHCUg2BJw3c/3/HZmGdPP/+0hQiGDrgzy5RSSsD/NINnKSodkg5xCbSNzewgQxzjhiR+YR+bi54IffbiysqSM/9ORS7kAHoWrsRiWf2UU+Gs+8ywm5yIf6SHq3+vKjW//UUGz79OTuzVlv/vJVb8zlw9ZaseRAlw17e+d+kg8iHpEDkVPzc685YZfurSBQSfH1XF2kcy9HQocfa51D1xVdvWKvxr72mq6enLH5L7/qAucImSzOMAzDMAzDMAzDMAxvwjMD99buqd1Dewbhvtq9uDX34tD9P1jrvpuuZxJ8uHdnT8+aPUTx7b042w996EPXN3OHYXjv4Afe/vab03vDQ11fu0i8pQse2L388svXvWEYhmEYHl74UE6QbxF14MM80swH9xMnkegDO8LN6LMBP5GahC0Skq49Nw9IvchNNxL2xRH7JEXp23fDYBQjIpTc6oE4/FvjtxsPKBbwSc+NSHr5o1/eUH0Ri8W+l343N+pDUrZ2xiD06y3fQEeO9Sbf1ovBVh7pdx4INOYT9MJx0trt2L4cTrEmF399y7965CDHBNzoeWsYqoO4LvJTH5C3J5FeP6zRr4/syhHY04PejD73h2EYhmEYhmEY7hJ+5Ed+5PKLv/iLbx0Ndx0/9mM/dvnMZz7z9s88uZd3b+0e2z25e29r7qMT9/fu9XvG4J7db96ezwTY9cfdr7766vXbx7oXZ0N8w9uoGW7NAAD/9ElEQVRv/MZvXNeGYXj38S3fyB2GYRiG4dGGD+0+cL/++uuXr3/969e3c/3hFoIu8tJfXEbkgdGHdjoINW/W9oHfTUGSvg/+dMWJoLPWG5zEDQGSjp0biEi/c92Y2OsmhJRP+umUU381GsHY3qnDHxEb5Kk+b5PK+4yffrr29aF+ICmRkvTOGuqJfMurmyr6pDyS00ZOYpmTE47p+1ojeTmm6zyqOVtyO4+gJRGpchKbP77yI0fjeX34thb9bz/hW05IXnrIWteaN8Htg1z1DDFL+BFX/Gpiiywm5q5HfR2GYRiGYRiGYRiG4XIlYH2Dlfto99Tu7c9nCO69rZ/PGFo3ukd3r27fVyW77/Yso/tz6DkC5OuFF164PP/889e1YRjeG4zIHYZhGIY7DoTcF77whcuLL754JSKRccg0H/gj05CdyDhrEFGHjExOUvQE/71BieD0QR8ZR5BxbNw4kMhLIo++6tncPl176RN5+ItQeoi+iNN0T7HnhqTYEayEr25E5IxcrKZT6PJFD9zEiA9ulEj+zxjpQzdJ7PS4PsuB7zOG9W6akKrWnR/rSTCXj69Ucr7kS1c9cuJH3G6+nE97idzpy/uJJ564+nAcQduo13J0oyin1k7h2+/huvnjN6IWmeuYPXQsH71Snzj1QA31wkhHbuIOwzAMwzAMwzAMw3C5PuvwTKT7fff4RvfSxPOGe6FnK+7he9bgmYDnCv0RtecJfNDJJxv3594A3m/kDsN7ixG5wzAMw3DHEfn35S9/+fJrv/Zrl6997WvXv8JEviLkkGyINceIQOSqGwJEW0RlZJsP8t0knG+oIvro0U9Ou2z5F7u3YMWgQ9wwIAIjIsvR3D57e3DmcebCb7kQ/m9z5le9cuL31L2tMYLSevWQajQvRnagrwhMx2yN/cWreiJok5C9PumH0b6R3NqKj/D0V7niyEkcczdokbzd4LlRoy/nbuJOklbO9Pz+jZs1NelV+8S+m0f77K3Vf/b1VO8c519OYsqFX/nXt/Kuv92gDsMwDMMwDMMwDMPw5lcpu692T+7emriv9hyBmPcM4Rbuu7P13IS+5xP+uNv9NyLXvb/79XyycV/uHn0YhvcW7/gbub4i7/u+7/uur8V/13d91zvKs88+e30Q14PTYRiGYRgeTvhA7oO5D+4+pPvQ7kbAB31EG8IS7PuA/04EJ30EnQ//iDz61iM3b21IsbvZoG/fDUG60A2HkW/ohoMeOyImn3QiB7O1z+aMY0+NPtOY24805JeUKxufe+jyod500wfzs8ZEnfISJ98kff75LlYC5zH7xm7GCJzzcjj36OuRuPYTOUA9k6d8CTvnznXh+qjH9vSDWOdDXl0Hke/s6022xByqtz51fXXMb7p88d0bzcMwDMMwDMMwDHcN+43cIbiX/iN/5I9cvud7vuft++aeNdiDxuBe2rMH6z0bMPdsxB9494zCfbn1//yf//P1N3I9L+i+H9HLj5cCfuEXfmH358PwHuEdiVwP3F5++eXLSy+99Lb4R+sBrDd1vvjFL17X6NAdhmEYhuHhB+IOueYDub+sRKBF6CHPIvsi2fpgT9/NgpsA4u1LdmA/Epc+iZxj5ybAZ4n00yEn0Ug3GzcHxnO/OciFX7mYszt11GKfD6CHGEyHqC/98k3XjU325WlUozn/kG3x5H2Smrf+i08X+D8FsuGLnvqS+gLm4hjTPcVeZL3Y7NTW+Y7ANa8214Tx3Kdfj/kD+5G4fIpVffYIG+v1Qo7i1I8I47P+9InY3vQ1DsMwDMMwDMMw3DWMyB2Ce2lE7kc/+tG37897dtAzgkbovtqzAbo91+he2zHYY+ebyxC5noU45hfct4uHI8IZvfHGG9f1YRjeXbwjkXsv+Fo+D/D6WsJhGIZhGB49INSQc0Yf2n2Q9yHdB/SIvEjZbgroItUI2z74pxvxe95E8AtsfPCnH3naDUc3DcUAa8R+Qr/1xBqf3WSkZ2xNDvIljukX//RJj8jBDcy9dNM32nODY043othIx1pSfYk1fUZ2Vm97IR906ouRWDeWs96eBK5j+0ZfkWTOT31w7hzLNdtnnnnmmhNd+z4HpieGc9zv6apT7en0li5fiF0x+Kxn7Msf6mHXTvWwccxPIsfdKA7DMAzDMAzDcBcxIncAzwqeeOKJy4//+I9fXnjhhev9s/tqz1dunyNA9+A9L3C/Ttpn757e/TZ744svvnj55je/ed3nly5b9/vu1d37exbwq7/6q1edYRjeXYzIHYZhGIbhv4MP7X1g9zunvlKn31U1ItMiKOn4AO+zAfHh3c2AD/YRcebdFNAnyDxv7YpjvRsHkm/rCRv+3RggCyMez6/yJXDakVt/9G7nfNA54596CMhIavvp3eoneuizElt7+tANTvqkHAicPtQI6dwLfBKfzxDtRL/1SSw3VsZzbt85ZRfE0l/nwzwC1s9oIGjZqidytrk94py70RNXr+io76mnnrruObYvvnpcR8ZuMMUy1lP9Yi+fIAf69gN/biiHYRiGYRiGYRjuGkbkDuBe+jOf+czlD/7BP3h5+umnr/fN7qfdX5/PEuhF4ga6nhH0fALoePZhzz23+/zXX3/97echhK57dP573uCZyb/9t//26mMYhncXI3KHYRiGYfj/gw/siDcjkg1ZhyTsQ34f3H3A701ce0g2+5G4jm8Jy8hAaxF3SX4T+vz67OHGgK+ELiAe6dDtK50TNudNCR0wRjgiC6szncb0Ipz5KU/iOD37kZn0rZUroS8XPUv/tD2PCX1jtcHtTRecNh2XJ339lg/yVI3W6fDrhqs+5QPJi8B3/tiz1aeEPv+R+47FUHt+xFDreR6Avv3m9IxyUa/82OmFa851Fakrhnwd82fOzvknxRiGYRiGYRiGYbgLGJE7gPvlP/bH/tjlB3/wBy+/5/f8nut9sjX31aHnDbeg2z347b69836/ZyF0833em3se83/9X//XdX0YhncXv/2veRiGYRiG4S34EO7Dut9A8bv43/jGN65kqg/xPsCbk97ARPj58O8PvhLHfci3T5dtJGdkX0K3mw37YrGRh3zss0kcB/tuHsRJwGg9EpDIoWN2dIzyihA0RyJGysqpfIstV3kmfGbPJ51bPbbISuuOq/UUkA+7Yjku1/I9pdrEdsyWHfve1i0X9nobAUrUCOcb1/TUpF+Ef/7cGKqBL36c//pULHuOI8r5y0aN7MC6Y3nrrdwdI5LPWul1TQW+rPnaZ/vDMAzDMAzDMAzDcNfgHtq3ablXB88ZCLinDvTIOT+fSYTW3Yv3jMGx5wHd0xP34fY8K7DuecIwDO8NRuQOwzAMw/COQNZG5voKW4QdgtNfWvqtWPs+tCPYkGqktyp9qPdh340DshCpxxaJ181ERKUP/cQ6PfpIRIIIpHfKaePYTQQbJKTYjfYiZuVppCcHPtLhx7719MSWixroFru43bwAO/qRjumXZ3pAJwH+yakDelF99ruBIvaaV58ciHm67DsvbNLrfBjpiV2d1RKBS+w5r319Mz22rgcj33pOxBSfH3by12frjt3cOa5/9tmZV7NY/MrbuhzpOK42vvjxBrE9x8MwDMMwDMMwDMNw1+C+2L21e2r30O6rmxNoDI7dR5/rt/r23f+f9/Ttm4vhHt3cffwwDO8N3vGrlZ988snL933f912ef/756190EA/u/KP0FcutPfvss28/7ByGYRiG4dGD/88TH9oj6nxw94GdWPOBnZj7kN8HekDIeWvTOvIQUWqPjs8W3RyAOAhiJCCwOeW8cejmwUifb3+Bml555j/S09wNTjr5JObpFZPubXxS/epBbFtL71Y38NubzfJKrzzhds6ejj62BsXnk8iD2G+PVM+pQ6wbnTcEqTjyEoc4F3SM3nql51ifI4Eja/UdQWutc2w932Lzo16+5GgtW9eBa4SuOXsksc+j0BvBbh4j99UWfvM3f/Mal89hGIZhGIZhGIa7gH218gDus//Un/pTlyeeeOJ6n+x+271zzwSgZwvncfOeSUBr7v3B3vncItDruQKx71nHv/gX/+ItjWEY3k28I5HrIdnLL798eemll76l0BmJOwzDMAyPLpBjPpj7oI5oQ84hN819Ba6bATcJkaOOCUIWAedzAlKOHzcAyF765taQf24I3AT44M/HSZ5208AnvcQxApX/SEFjvtMx0otghH7f9cw3/UjDdG5JX6Db28lIReu/U75yQzhCemdsEvT6XKOj72rthsrIp1G+5vT9JW79BiOhk/DvD/OcC0DIRuJG0jq/H/rQh66/VSuGNSIHOkZ7/IgrZr2Tqzzo8cvemuvF/vl13PrQNcCf3tnnC6lrLxIY5GWNH/nzI4b44g3DMAzDMAzDMNwFjMgdwD30z/zMz1zvwd0fe37gOUbPDqDnC/ZPOGZvL1jLtmcW1vKRn+6/PW+g45783/27f3e9Xz9jD8PwP459B90wDMMwDL8jkHZ+J/dXfuVXrn/I5QO7D+sItMhAH9T7wI/ENUe20UPikUjcyFE3AAi67Ky333gSngSph5hF+oL99M3FRALaR5wiW8Wxn890uuEQmz5da2C/425WwIhoVLsaim88cw1uZt54442rL+tskvTUT+CMdQtEJxFPH9Rh1Ft/fevGjS0SmvAfEUrYIVO9Batm9uqImDUSMfi63a9nTz311FX4s64f5YHYRcA6pkuHLzGtOXf25eF6kAfkTzw1+OYX8eSgV6C/ncP6LNcPfvCD19jDMAzDMAzDMAzDcNfgXvmdniNAzxzu9byhZxFwu+d+3X28+/Jbe/f72dKTw0c+8pHrPfswDO8uRuQOwzAMw/A7wgd0hKTfy/3yl798JXMRk0hCJB/4AO/YOlIUIQdIN8Td+TavD/gnoRlZKA4fCDo6JMLOPmKPrpsE8ezzlzjuxiI7evaQhhHJ7fPDX38xms/80ImQNPamqLm9M3bEbDc1RrrydtPTjQ2hm56YyTuh/ezVEFmLINXXCM705GK0p279NxZbzZ2/6kcG+zpjfsEeojYSl+8I4nrTm7AIVbH41y9rIK41uuLYqw4x9UbMSF0x1AFsrNVXPp2vekAXIrj5GIZhGIZhGIZhGIa7BPfLPWOA7pVPWDt14Pb4Fu7H3cMbT11z9+XG1t23P/3009f1YRjeXYzIHYZhGIbhdwUf3BGZX/3qV98mcxG2fb0xQi+iE9kWYYfERfCdZGNkIhKPIOwIG8dGiMDjTwxEnr38NIrDJ/3s2bVGLyl+xKF8EYyO7TcSOdMpx4hZPvlJitNNDAE5y8X+Ke3foputb7VP1BuJS+RgXa6n8KMedSBU60/9jliVk/PkzVbCv71IXLps+SDt6YW4/LMHe63LUR/Zuz6MfR23PIjYSFj6zrNRPXpdDfWWb+KYDlgXn99yGIZhGIZhGIZhGIa7CPfI7qVv4V77BD0C517r1vjxzODWn3ty99/u582BjW/osjYMw7uLEbnDMAzDMHxb+OY3v3n5yle+cvniF794+frXv34lcl955ZXLa6+9diVyfXg/SdEEOUj6kO+mIOIX0kf+RcbSQf75qmY3DuwjWeki86x180A/Qrk3RfmyRycJiEM6fPOV8J+ueTcwYjq+jduNDnFcjaeO0f4JfsktWrvdv53fy588Ez08JfLUPB31eCP22Wefva7rd3r6Q9TrbV3npjV9A1+bzDaSNfLaTZ2vS5YTfWv6QZe/CFk3epHp5QXsHNdPc4StEboWIouRwUjtYRiGYRiGYRiGYbhLOJ8NuJe+lXCvtROt39qc6Lh7dbGtuc//xCc+cX1uMgzDu4sRucMwDMMwfFtAyr344ouXL33pS1cCF5GLhPOhHdHmgzzSLwIzMjNBAHqTF3mHgCM+6Cf8GBF6dHxVc8SsmwP6dPjtxoHQl4ubCPv5Ne+N0Ehbv50rB/N01EUHgclH+RPz8jKS4tM9JbBBUsoj8lH+503QOQ8RmZDP7G6F7nkczjU6atKDk6Ql3r595plnrqSrPT1ILxIVUYoklYf+JXS8BUvY8YfE1SN+2b366qvXa8O+a+P5559/27f+WTM61ns2/Dnvju13LdDhVx7yOq8DOnotz2EYhmEYhmEYhmG4i+iZxPlc4cT57KDxFj1HSBznN2TbMxHH7s3d87tPH4bh3cWI3GEYhmEYvi34gI6c8zbuL/zCL1zfzkW8Iet8yEeqAVLOh3rEHSA2e1szMpQusi5Sjk4iBhKXPT0iNh9IPjcLxDECVw7sInDJ6ddNBX2EY/HtRxZmpwa++CSO1XHm1s1K4ykn6HpL1NcR6083O0aSfse3+8np93aP3AvW5R5pq49EzsjSvvJI/9TbvjyJmH1VknXkLd1Ie2/b8uVcWCN+D8e6nhZXX60jYeVD35r4rpFIXOeHvjhyN7fGxjGf8pKPva4je/LgE5HrjwiGYRiGYRiGYRiG4S7i9rkEnM8N7vV84YRj9+FJ99y3uvwk1t2r+8Yu4zAM7y5G5A7DMAzD8G3Dh3nknzdz//2///eXL3zhC5fXX3/9baLPB3kf9JFtyDdEIP3IOYQpYi4S11of9unxwwasIxSRoecNAb9IP6M4EIEbMctvviMF+ear+OlG7Bajm5HITbmriZw3LK3drgdxi8NXN0KnnDdIHTeH5kn66r61PXXsE7mLrTZ5GOVrTT8Q5kSd+iBfI7DtrWhAAiNg+WnPOUPMOk/Arz2x9TpSXywx9IS9Prle7EXKsgXELx112JeTOV/s2fJtbnTujG4ch2EYhmEYhmEYhuEu4XwO8a1Ar+cXvxPoJLdwf36775kA38MwvLvYv6phGIZhGL5jIOX8Vu5/+A//4Urmvvzyy9ff0EX6RcbS8cYl8YEe4YagI8g7RBxiMRIPORu5l94t4UqQhJHD500Ff/mMsKRbLoR+BOGZQ7aRgpGHEZPm4YzZ3HiuBzEiIPkg6j2J1ojP9tNJHKeXbmLvPL7VE1cOeqjOetGbtPqIkO38EHWk43yK4abMG8b88W3fSBeBKk/nnT/6Z835ciwHaxHx5WWNP/BmLb/nOWLvvDi2Z404T0Q8RLO1e52HYRiGYRiGYRiGYXgU4R749j64tdbds5u7zza6Dz9hP/Tsxv01fTj3PWvIRzF6ljAMw7uLEbnDMAzDMPwP4zd+4zeuZO6v/uqvXr9y+Wtf+9rlt37rt94mQJF7fcBHwkXMOXYjQBB6CMNuFhLH6WfDlxsJNw4nQUu6geATMWhEIvNNl718rDkGvhL7xTrjAyKxfKFYt3Iv8NfXDMkrwrW8za2RYpx6J/l6q988vXQdywe5KbYcTj+IVMdsARFqrj/EvuN82GdXDnJE7nobl+6Zkzo//OEPX9ci3OuBPkfYA/9qLWf7RjpskLR8Ipvp0XdsP5RHefIxDMMwDMMwDMMwDHcB57OK3w3cM7u3JqE19/N+Iul7vud7Li+88ML12LMD9+nuyYlnBu7LQ7E9P9n9+DC8u/AnF3/7zekwDMMwDMN3Dr8nixz1xmS/v9obnAhRH/iNfai3b0T4IvoiDhFxt8QtATcUyL7/+l//65UEZE8X2Rrh6uaBHz4JPXZ82Sf80XfzQd+8G5huXJoXt9HNC7/FPvducbvmmA0fcuwGSG/0Sl58y1VekaJJN056YM5fPuzbs54eP343thgRuI3EVxg/++yzV/KTnTW5ETbOGR2wJg4dPXOenV96b7zxxjWOdbW4DvSIrutC3c8888z12PmTgzW6EEmLtLUmFjv+8qM/1Wytcy1HIjd+IuodD8MwDMMwDMMwPIr4kR/5kcsv/uIvvnU03FW4T/6Zn/mZ67MUcM98+yzixLnvnrr5ucbnd33Xd12fFbjXdt/tj+PtdS/uvpueuWcC7tPd4/v5Ld/U5p5/GIZ3ByNyh2EYhmF4V+DDuw/3CF1zBJ8P8chExJw5Ys3NQUQlXR/2IyZPoo4+cZMAbhr4d4OQj3TS68bD3H7+0m3N/qkvL75PP0ZIx3jO5Qh8tnercws1gxwQlBGQyEvHfBnlUa1yI+Z62lc+s+MHYal/6egjUpYOf/KMoCUniYtYpds5aL1eIHDlI28+rNuXg5tE/sXs/HXe+ZSPc1be8mFPB1nNRo/FQb7aY+daYYPgrz98s3nyySev14y62cm7ffNq5RuxbD4MwzAMwzAMw/AoYkTuAO6PEbn9ATb0XOP2OUU4j2/n7tnd6z/33HPXnzwy5889tvv8npV4NmDevTg7x6+//vr1Z7fc0w/D8O5g77gPwzAMw/CuwQf5V1555fJLv/RLl1/+5V++fPWrX337N3MRb24wfMBHyp1fdUzcHCD7zH34jzz0V570kXts7UV+Gvllm598WafzTnr8GN2odNMRCSiWdTrpncIP0EVA3oJtAnyTwEeEbQJuioBuNcid9Nuw1VCPjMUxr4fW+JOfHibW3eA99dRTV9JU7XQSQKh6k1csMdRJ+LV+mzMffCF3jfKwzqa86Th23qvfaN11Q8caOHYu5OpcED6N/MrJnvz6qmY41+rDMAzDMAzDMAzDMDzKcI/s/td9dc8IbiXQ7RnILayR7tfB/EMf+tDbzwhC9+h88e/Y/NOf/vT1a5mHYXj3sDdyh2EYhmF4V+GDuzcska/mkWxuKNwIIOIQdUjDyDvrieNuDiIDvXXazQRS037++C8GXXqnP7r2/3/s/Uuv7sl51/+vR5C4u3e3u93udvuQdkhsQDYgTkqEEEyQMkGMIjHDQkK/AQ8AKRIPgZknzBBMCEQCKUQQBUJIIjvEcezEcWzHpz733t1Gmf//r9v97lSWdvsQ92nv9XlLpfpW1XVddVV977X3qvuzvvd9/fBSu8KWLwGxvPVdt0WHG/k5uMA8jZ+2J/l1Xa0kUlabX8yK9bLLRp7N3T62P/rthWKvE0qth9Dqr2oJrGJ1L8Qjfur3BK058zUunwRetuWiFrenaa3buMLPxy/Lh4isj5BrHrmylaP95m8O87GVL/iyY+91wKbSehOom0dOd+7cuVy3R2OMMcYYY4xxP7Encgecx//RP/pHl0+wci52HlZcnwXO0c7fneW1cdrAewL+ANz7AzDmvO+Mr0AMRTzvH/Qeg7i/93u/d/WNb3zjYjfG+NHZE7ljjDHGeEt45plnrn73d3/36jOf+czVH/3RH1196UtfunrhhRf+3NO1DgMODg4JSgKd4jCSgEkATKStT81GEceBgYCssO8gko2Y9Sk42xV28iM4Xu8/Y8jFGszl435/GMGQb/7yFqM1VhJXxW2MyHnaGONPzLR/Dk7Wqp+/vVazcah7/PHHL8KqeMVqDmvyHTh9dBIf/ezk6SlXRT/6nluirAOep3GtqTnbI/auxXJv5KffXhojzopLyDWP+Yz5C152/DoUmq8ngsW07j6eG+wcLo1bh9zGGGOMMcYYY4z7GedmxRnbOVpBtfNzZ3Sl8379J8Y7V584i3s/oXO283f+zvn9obWzu7ExxpvHhNwxxhhjvGV4KvKP//iPr77whS9cvifFxy4T+fziT7gjzBHhHBT80k8MJOThFFTZ97Snpyx7MrNDAzFSXwcHhxKCYk9qIlt+lbPvtEMHntMuW4cUB5PiEB3PQ06+J2efa+u2vkTaijEipJK4bc/si9J1dkRUc4ulX4zEXHvA7qmnnrrke30uhZ09E6d81Imj1vz+97//cmjrXmXngOY+2m/ff6OvOeTtO3XU7PnJM8FZXPHq4yN/9u430de43Nh6HYhhHXxcu7/aXjf8IB8+xGH1GGOMMcYYY4xxP9N7Ac7mneWdmStwZq7Nhu0b4f0XZ2227MT3PoOztzM3zvdFnNmh7X0ZY2OMN499tPIYY4wx3jL80k/YI8IR+vxC75d8v9Q7GCQ4+qXfL/wgzqFDAR+HDNf6FDH0sXWgKAaBj53SYcKhI0G3OUKfIk91feV4CoEdeJTabM3ZX6Q2Fndry0eRc4Ko2lwETbX58zWHvM1jHfYjsVKO9uGMkzhLLPXXsj3Bms1Z2Mi9feFrXrZim6f9199c8je/cbm6B3LraWsxrYWdPuPiEIuDIM2X4GpebTXMXb78arcXaN/NKw+5tXY2rl988cXLk8NjjDHGGGOMcb+xj1YecK7+u3/3717O271HAOdn5bw+z/Zwrs4mjD3xxBOXczZbZ26fhNV7B87u2vkVTw3Xv/3bv3311a9+9dIeY/zo7IncMcYYY7ylENaIaV/72tcuH7P8O7/zO5ePWvZ0LqHPL/mEPoeBDhNwCHAISRR0wNCuGO/g4OBivJKN/g4U8jCH0iHmjeCnyCv/CpqbjVpMazlpLflpy0FJSOVjfQRVwqhDkbzZECIVNuXDlrDJTluM7BR+4ohHKFe0G29eubA5Y/FNkGWjj685E2SNWYv7IbaYnoQ23vrLsdzEdE8Sn8U9++wfO/5y0W8Oe2oefY15Apm/cbW9spbutTp76/PXwuoxxhhjjDHGGON+xPnYHzE7szsfOw+r9Vfqw/lehn7jJ/qd3Z2vneuJtr7v1rXzvvcC+MP5Hc74/oBfDvyVMcabx4TcMcYYY7zlODAQAb/85S9ffe5zn7uIuZ///OcvYm4CI+HOtYNAhwK4TnSsX+1Qoc8BQ32KkkqHByVxUHyFaHkeVthcP7zIWU4OJB1uKujamMMK2+JeL2IpbK1V0ZZXoquc+TfevM1ljL1cjbeWu8VTxLtuh8RSc8IcYqsVc7V/5Y9i2G++9l+O7qu59SsOdnwJvPbFfUmIb4/Mw1Yu+hQ++tix0T735Wy7VtpTuanlJIaczUv49cTvGGOMMcYYY4xxP+Jc/Bu/8RtXzz333KV9nuNP6nOWdq5WnKPrw2kD531fk6V0Lnfm9slfp62izaa+Mcabx4TcMcYYY7ytOFwQc3/zN3/z8jFQzzzzzOVwQIwj6CVCOgQ4kCjQT/iDMQcDdvoqiXzoMFE5bYmMDhgdPJAfmtehxne1yg1nThVxCJyeOpY/m0oxslH0GSNYEjhdnzbmSsSVq3Wz5dd4hR0/Y8W7nmfjCpGVsNn33Z45hTGCqf0w1sGOSCoXT9Hqa8xc9lZcOeiTm3ntx0MPPXTx02ff9Tn4mdO1OARfudXPl1/rgD7rZS8XRR/svTHo9yRutbzOezvGGGOMMcYYY9wvODf/8R//8eXrrDr7OkfrV6Lr3h9RzvE4z8/O387b3hdxfnfed3Z31i5+8dTG1WeMMcaPzoTcMcYYY7ztEPS+9KUvXf3iL/7i1X/7b//t6gtf+MLV7du3L4LuCy+8cDl0JPARAB0G9J1iHxwOOjx0UDnpANEhwjjxT7zEv9NGYWOcXbbNr7hORE1YZKfONvv6s1EceG7duvX6X7DWf9oqxnu6VF7EVf3mbP7siJ59f00F/HoylU3xlNaiEGB9n468CKDGxfcRStmIb/8d4OQK/ezl4BruoftGhP7gBz94iaPd+uy7WOZ0EDRG6JUbW/5imqs9F6t2a2PndfTkk09e7PmLKUZitD7r5zvGGGOMMcYYY9xvOCMTcZ2RXXsfoLP0G5GQe53eFwkxeh+iP4h3xnambw5xnMmzH2O8+UzIHWOMMcbbjsOFA4CDxu/93u9dBN3//J//89UXv/jFq5dffvkiFiYSOkT4rhWHB4eCnlI1rq8inrgJfV0r/NizUfLVn03jxSquuczZuANMAmRCM+HQk5/Z8dOvFM8cPbXaX6tez4v/e97znovwyo4NP7huHoXNaZdtJR+cB7pwbb78quWjdADTd37XrDwdEq3N4U1ObO1La7Y+QmpiuX2Sg/kIr+KzU4OAnL97bk3tr1iEWP3mlovY2t0/10RbxbVYavl5StlBc4wxxhhjjDHGuB/xvkOfeNXZvvPy3WDT+xLKyenjDO+crU9sZ3h95x9aJ+QquB5vjPGjMyF3jDHGGO8oRMFvfOMbV7//+79/9ZnPfObqq1/96tW3v/3t1z/SmJjXIYRAp7h2ODBen0MLsbDDBRu1klBqTL9DB5HPQUP7HL9u55DiujmqFRBVCYvZyam8xDEHEVS5Lih2aOJHvGTjkGReh6HzAOWajRhnaexupTFr6lAX5u3A5bo9qBhrXfzaE/vp2twd6MQ2pl/uxF3+9sFf7epvfebSdq/MTYwupj1zzbZ9NL+i33ytWRxtfnJoLWzNo+1aLuYYY4wxxhhjjDHuV7y34n0UZ2VnZGd0tRLGwrUzdORT0Xa27tO9nNOdsyvGO4MbU/f+wjnPGONHZ0LuGGOMMd4VOHT8wR/8weUjlxNzn3/++csTuoTCntBUiIBEPiIega/DSdfGEluVREGHEQcMYqPicEE8VIrHP1HQYUXMxNvmb4wY7AnR87DCnxCqLzuCqANQsAH77Ixn06GnOvhdL+fhrJLteQizZ+3baWuOxq1f7iCYJohas/U3Zu/sI+wNsVZtHT29y1ZcvuqEZ7ZiQRx7c+YlhpzYGHcY5I8E4u6TcfMaN5+iP7GXrVw85SzO9f0cY4wxxhhjjDHuB/xxvPdTej/D2dhZWX2d+jojnzad452vna39YfRjjz12OY87V4f3SvhXjHmPxPxjjDcXf3LxC9+9HGOMMcZ453BYSJh16CDQJdLp7yCRLUHQYSGBsOLQQBhkL5brfDpY6BezOYiIYusnvBIIxdGvEBWzE+fRRx+9fKcsOz762JvfAUhsT+oat5brnIcka2Ajtw48+s76OvmrKyfyqcgvkTMBNBG2ceuyT/LQR1yVOx/r5ge+cnSAsyf62bhW7Ik9aG/1iddf7rbv5iL4msNfDMP91OejkI37WObHH3/8MiaOuMbFKK6YbFuH2Oqu5av20dx37ty55Hp9r8YYY4wxxhjjXuSTn/zk1Wc/+9nXWuOm49zra4s+/vGPX87Kzslw7b2FN3p/AY05Lyv+ILr3BfL3ZK5zObzf4pPVeg8m+mP4//N//s/VV77yldd6xxg/KvvziDHGGGO8ayC6OQw899xzl4MBkc81IdD1N7/5zatnnnnmcrDou2KNEQHZEes82evA4kDhsOEwQ3hUtAl/ivj8jBMNb9++/bqQqd98xlxnSwh8//vff4mTCExUNK64dogxV+Ki+vuJh+JZuzysAedh6yyofiPMZ275nsUhzGHM+NkvRzHV9rTDl5Ltq6++elkvoTW/9ob9Bz7wgcv+6bO/+RG9PRHr2trEeO9733vJ0/6a097rE9seKvIQm59DJAHeHhkjeluL/JrLfun3etBvXD4K/zHGGGOMMcYY437F+wk+0cw52zkYzsrf7z0JY9fHndGdvb1P4LzvvK7ufQNnbpjnFHLhDM9mjPHmsSdyxxhjjPGuwgGCgEeQI94R5Ih/DgMODkQ5hwYHFMJe4h4b/Q4v/MXhy97hosOGp0+JtImwDh2ERrZI2DWeUClGT9g6uOQrlrwUAqn8zNuBRznRvn5Aqn32i9tTrN+P01+xRqW89BGW5Za4rT8bdU8q2yfr7dBVDG1PwypovxW++rMzJob74iOY2BTHXO2TNdpfdg6F1stX27j9di2WfeBHCOZnHeY17n6xhfytQ1/jSmvVrx5jjDHGGGOMe509kTuu4wz+vve97/JRyN4LON+TcF6+/h4Fek8htJ2jncEVZ/XzyVtncmdz78n0Hokzd8U5/7d+67eu/uRP/uRiP8b40dkTuWOMMcZ410Fwe/bZZy/fkeuJS2KgpzWJc9oOBg4XBNkXX3zx0ia2OkA4YBACXfNT96SrWmEv1ilsOoyYV0xzJDoSQdUOPQ4zPamr1m7eCjsHHfHEIG4q8r1+QIr682cvd/7FvRvnmBjWkXipGJO7dSq4bmMfHL7U7BM8FTEJo/a+PW0t7Kzd/jjQ2TM+1m39+tUd5lqjuYiz7KxVDDnKhZArFl/25hIjUVafazbl0Tyu5ee6uInX4vdR2B0+xxhjjDHGGGOM+wni6W/8xm9czsu9D9D52Xm5cnK2TxtP5RJr84Vzd+/DOHsbU+Dc70zf07xjjDePvZM1xhhjjHclhNdvf/vbl4ODAwEBTyFwGiPWEQR95C8REcRFhwmFD2GRcOsgwa4nRRUHETEIig4hbNh34CEE+mtW3wNjXnPwN4fDi8MQoZBIyNZ8xdVvXJ947JWuUR86KFXMJ6+eUM0Opx2Kay65Kea2B4RLxXhj4rG1djZybb8Uce2xot/6FDGsX06uG+NfbOMg/irihbh85GYvrc8+n997ay36us9ylT8/PsbM2d6KSciFPojr2vrE4StH90jRN8YYY4wxxhhj3G94X8N303r/A87GvXegrn2W69Tv/RHvlfj6qnBOF9v7LM7c6L2N3u/40pe+dMljjPHmsY9WHmOMMca7EsIgsc5BI1Hw1q1bl8OC79ElyDlYdBBx3VOyrgl/fLQ9gcsmYZNomPDIViH4iWku4qKnUIM4yJ4N4TAbomJxzefQIh+FaMhPHxtU/yA4BPFXy4vvGafDUiKqtSrwUcNy48dff3ba8uujol3nmx0bYz5y2hz20J7ZL8U+9F25+sHXoc7+EFzbDzWIqWKK1Rzya5+URNxsxC5H1wmyfNmo7UWxXMvPmHzEVJeH14fXgvExxhhjjDHGuJfZRyuP6zj7ei/g6aefvpylnZPrd+1sXRu9t3Cirb8ztbO5P3LXJuq+8MILFyG32DCm8PuP//E/Xn35y1++vD8wxnhzmJA7xhhjjHctDg0EOKIpEc+hg5DrEKEQ8LJzoPBRzH0Es0OLj9MlOjqI8CXgOXCw59tTp0888cTVBz7wgYuQaB59xsV0aCFOEpGJn0RcByMxxEtA5Ketv8ORHMztQFPfibHz0OTavB2CFPHkac5s8zMuP8W1PDxBbI+yUeSVnbXZT5z+Z7FGe0H0FEsfW3mofd+OWm4JvOz6CGZjCacoHnuiubp9FFufveLbfO7T448/flmTWGp73R6rYc6e0LY2c7Ozj9npIwCLI656jDHGGGOMMe5lJuSOu+Hs7Dz83HPPXd7DcP73voC+830CbbYKvGeh6FezcZZWXHsS19nb+wT62HTmdoYv/n/6T//pMrfrMcabw4TcMcYYY7yrcSBwYHD4UAh+RDskShLz9D/66KMXgZePw4SDQ0/dgsCnEHgJsxVCbQIgMVERT9xT9DSeIOsaDi4dePQ5BJlb27Vc5XH2ndRm4+DTQckaiI/ETiJo87KrgI9c2cnbnly3ATvjYppT/PZJDfsglnXwtbb27pxDPvyMF7d96iAHOdtD8xGBjRFRe2q3tpzEsFf65JNYLBZfgrEnhMtZTDH4d9/kbJyd+95+6ysHH/GUwDzGGGOMMcYY9yoTcsfdcP711OxXv/rVq6eeeuryB9JwjnbGdm52jmfXmVlxrnYmN9b7D+DjDE3Edf7mz04/UddZ3TU/fwz/3//7f/9zH8c8xvjRmZA7xhhjjHc1DgkOC0REYmLipkNC4uIpMhIJExsTW4mJxL0EX2JvYiLhM+GSX+gzrjjEJBJ20OmAQxysT92BJ/GwPtf1KeKdxcGnwq61Nj+u+1TEk4+13M3upJw6uCWKJqaCT2MOZQm8/ORnzflDjvY/e/1sxZMTcVa/cYIve9fEVnGI6dDH196Ll4AL87Nl072Rt4OjvzJ2zdZcxvia3750j7yOeiK33McYY4wxxhjjXmRC7rgbzs3OvP4o+yMf+cjlE8icp3u/wXm/9x2co9EZ2/m59wyyF88527m6czQb7c7W7Nl87nOfu7wmndPHGG8ef/anFWOMMcYY72JefPHFq29/+9tXL7/88uXA0IHDwcJ1oh0Ijw8//PBFvCXy+QtUT3j62GEiLkE38ZYw6MBBcOzQkgDqQCJ+Y+Zl32GGrTjZ6UvANKa/Q41DkZqNIr6aX3bFYUvILEdkX25dm0dOCaWNX8fcxh20rJefAmtLKD3zgX7rF1MfX2IorFOuxjrAiYFzz/joZ+vesJevPh+xzFYu+q3ZnMbbD33uWbmLW06uz32FQ2Nj7Np7tdeDWGOMMcYYY4wxxv3MN7/5zUtxru7c7H0BpXO/M7bzszM5G2fo3oswzpb/2ebTmR18ncOJuP5ge4zx5rIncscYY4xxT+CAQODz5KhDBUFO3ylIEuj62FyHjw4qfBw6QMzj7yCSeEgg1Ad1orDDSfHZJ0iKq7AVz0EG2RpLjBRfWzx9chZDv5iV+tgRnPmUc3nereQnrvzkwx6njTmyI67KhZ0xmK9c2KjlYp8dxE5/6/Lx1PaVn/X76KTEVTUhWj581dqE9b4jVxz3y9O4Dnzin3svjn7XipzNxa9cxZFDAq9+62ejtDdqNpCr14iP6x5jjDHGGGOMe5U9kTu+H87UztieynX+TpR1Ru69AGfm3kNQO3+DrbO/szabztadt53R1c7xzuLPP//85ftxnfmLPcZ4c5iQO8YYY4x7AgcB4qMnOB1EHCYcQHoqFA4WhD0HDgeKhEZ+DiPsHURcO2yw4UNkTJDVBltPnvJXiJPNWx8cdPSLW7tan7xd128esR16OkDJw6GKaCrnbNGBqeuz5itGRf4d1OTHTr/4Zy2+eYyLIRd+7Yk41uvp2doJsvo91awu92yIu/ZSf4c3e+Ce6Te3GMaIuuInsBJX3Re5s7P3cjSPfOX60ksvXZ601icfvmKf89lHcyjWoybadhiVq7Ynu8cYY4wxxhjjXmVC7vh+ODM7Szsn+5QyZ2vnZmfu3ldQn/29l9BZX0E2Su8BwHscPkHtt37rt64+85nPXGKMMd5cJuSOMcYY457BgYEg5xBC5HNgSGTVJu45TOhPxCPi8mPn4AFiob4EQnbiKh1U2J/irrZxc7HPH2pj+jr46JNDc6izZyu2POR3riUbPmd9oq+5Kuby/bNiujbeWoyrzUVQTejNNxt52Vt21glj1kuQ1UeAhXZ+8klAtV/6jRNXCa/i6e/J2fYRbNnJi514RGX2p8Cur73SVqyTkOu+uzYG85Qnkbr9N6/5xeJjbIwxxhhjjDHuRSbkju+Hs7CzuXPw+9///ssZ23sB3i9Q0HsVvYfQNXq/oDN16O89BPG++MUvXv3yL//y5ZO6xhhvPvuO3DHGGGPcMxDoHAw8rUmMc2hwmNDviU7iIIEPCaPGHDAcRNgmxEKbjT6xoCYKqhVjfPvYoPrE7Kla5YzzvfoU85pDjhW2DkeK+bq+zmmjiMe/J5UV43Kt2BM2xuRjbn7G1Ow7lLU+7cbNw5+Qapy9mAnn+sVk774o8IRx9sbsl1hEX+vtQMk/Ebf7xcacfLXlo+3anPwdQvnr0zaX+Obip1iLWPqahx/Re4wxxhhjjDHGuJ/x/skf/dEfXf3mb/7m5VOunJmdnZ2xnZk7QztbO7M7k/eH28bRubxzN1/FNZ/nnnvu6utf//rFdozx5jMhd4wxxhj3DA4OxEOHD09xOjA4POj3cbkOFQTFDhuEP6Jfhw4FDiQOK/r58+HruhgOJEjUZKvwTZxtHn0daviyb47iOhRlpxi7Xprje5UTPnIh4hIojYttzeZSa8vJ06py0T7H5GaflA5hiaPsFHMQP42fT+IqYvLVJx/jYpjTd/2ax0FQTMijPeUvZx+xrE7YFVMf8jM/G7HMJb6irb94xszBTx5iaRtnL1ex7NkYY4wxxhhjjHG/c/v27av/8l/+y9WXv/zly7nbmdxZufchnKN7H8E5mo2ij01nbT7O1dXKnTt3LvHHGG8dE3LHGGOMcU9ByH3hhRcuh4vqPk7YAYJQ56DBTt3H7TqcOHywJ3o6kIQDirHE2Q4wYup3yEmANKaY05Oe+jrUqAmG1/s6DNU+i/nk2QHqLPVfH3doSmgmeFpzY81XbPl4MpaPPrkTORVt6/B0qjhsPNlMJGdXTKKnOazBHtuv+omiZ0yIab9ac0KuffGRx/q1rcG9kGMfdSxua0u0ZSNH+fprYrV7Y0+Mt+fafN73vvdd8i0v8Bebn/umNM8YY4wxxhhjjHG/4qzsk7N+7dd+7epLX/rS5UzvbOwsbcwZ+yzO2Z3bz/cXFPb8nNON/+7v/u7lo5XHGG8d3r3ad+SOMcYY457BoQFExAceeOAiPhIOfRerj10mzvWXoyBMGmeHDjA9wQrXhEAHFP5sCIIONvrFUoxpO9CwEdMBhmhozut9iYh8tAmkYhhv7mp0MOpaPDlVtPmGufoYY7nmy5aQSpy19utxrEVN4E0A7cBmTEyx+35bca3FuhVi6oMPPvj6Xtsr9moxiabie0pabLnJh+hrD8Rjq+7JWHHMKx9P8rKTD8Qwv7UTfInBYrHXL4b55CaP7hk/bXmyU2uzFd9rI7sxxhhjjDHGuJfYd+SOHwbnb+dgZ3xn6t5P0e9M3Lm4M7S2M7maTf3Qdj4n4P7v//2/r7761a9eztljjLeGCbljjDHGuOdwgCAMOoAQCftL0kRJ44RThwyF+Gc8sdMBg6+xxEvX/LTZ8FeuxzOu5pO42IFGn6Ktn9CYv/EE3UTH6wcd86IDk3Gl6wRLc4jPnq02G/GLcfZlUwGxVU7txzkP8ddcctXHp3nZP/roo5e91LYOoqy1Eo7dFzaJvg6I5pEvCLaK2OawN+6PeTwl2xO+chaz+6ZNeHXYbI72SbvcXPMR09pdJx67X+2FcXmIKcYYY4wxxhhj3EtMyB0/LM7EinO3T7JSd7ZWOz87o3fm9j5AONf3Xod+n9b1K7/yK1d/8Ad/cPlj7zHGW8c+WnmMMcYY9xyEN0/fEuh6kpMgR5w15lDhIAKHDdcOK4l9cPhQwKfDC1zXl01+Di7VicgdZlwTD7XNVZtvT4uyS9RV2HSYSixV6nMtFwcp9gRQQis/Y61N6fAF1+YkWMK8+uQgFn9tc7Zf+omrDnPs7as5jMtB3sag3R7JiTAsL33lko8ihlzYn3PI0Zi53T995iQOt09iiWmeYrnv4otl3HrMb9wajaF1N59iXFyiML8xxhhjjDHGGOMm8PWvf/3qc5/73OUpWufzzuydpZ2Znc07X6NxOF97r+D3f//3r37nd37n6qWXXrr0jzHeOibkjjHGGOOeg0jnY3uJeQmNChw2HDwcSIh0DiJqffzYOXgYVxL2Ekwju2IZJzpqwyEmIZY/mkvdgajr5iRIiiFPhyMxgo1x86jZFzMhkpiZqGusPNT8raF1WIMDFoxna15j1qSYR1xzEEbNp59QrphXn5wVojlRlo0c+Pk4ZNfdE3ZiWkf7bl36+15fdvo6JMo/Yde1uGKKwVbbmLn1WU9rsQY5wphreYvDzrrU+s1tPuKz/eiejjHGGGOMMcYY9ztf/vKXr375l3/58n6Bc3Ln987Kzt6K696TMOb87Pz9jW984+rf//t/f/kD+zHGW8+E3DHGGGPccxDtCH49MRpERx/PS7Tr8OGwkZinJBIm/hHx1D4KSE10VBMI2Ws7uICAqO0Qk+hofrH0GdNnzAHHtSdLIRd97BMmq82lJFqKp1iD75wlfIopfgWESIUNcVc/v2o+5W5NcrSmcq6Wl31jc85jjGAuj5645cOmvRdPXLb5a8uJuEvYVYyx1WeN+sqPbX32TOkjmetj5151cHTNT1y5lhe7l19++fXXgPHyUhvXb71eG+6LfMcYY4wxxhhjjJuAc7Ync//rf/2vV88999zlrNz7Es7LzthwVk7cdQb3R92+F/d//a//dfloZWfzMcZbz74jd4wxxhj3LMS/RMhE21P0M04QZENQdRghiiYaEgqNBTE0odO4ONr8HGyMJ1rqT+w1J8RywCmGNht5icVOn0JYlFMC5Cky8ikX/W9U8lPkqMitYh7rzda4tcvDOlwTPO1d4/JR+NvDW7duXfLXboy/78kl7rYuBzrziCWma/3mYJNoKicx+shnMbpXCeXWbh/E1M9HTDXBvcOk+PJi28GSvbo8W0f3W9sc3SOCtMJvjDHGGGOMMe4V9h2540fB2ZgY++CDD17azu3n+xC9R6BWnnnmmcuTuD6W2evOOXqM8fYwIXeMMcYY9ywEPQKfQtgjgOpzyCDMOZgkJuqD2qHEIUUNvsQ+hxSCn5owqN+1sWI61CQCJggqxsRji2yaA/qIt4mLiqeI2ekrL5hPMYcSbJRzPHGyOPYjAdkc2RgvJwc0/vbLnMYrbOSaIFvOCh/9YpaLtRNhfeesOfnbR9j/xF2+1sunOeTJ15hcibXlLYZr/mwJu/z0sdPX+qyjXDype4q8hH5rSNRmD08bm5c4vEPoGGOMMcYY415iQu74UXAu7nzuU63UzvSd1407XztLOzP/+q//+uVpXB/L/O1vf/u1KGOMt4MJuWOMMca4pyEQEuoIggmDCXyJfImEtRV22iBkEgYdWNgq0Af9CgiExefnGsYTOxvT17h+fWI3li0Rkp1iLDG5eHwIrx2m+Oo7bYzp72lWsbRPG8V81p7wWRy1cb7WnVCaSOvwBqIqXzmeT7yydx9gjD3f4sF8Yp1r5s9OLH3sjYthTXLlb1zbePeOnz5xtMWyDjblZox9dtlAjtbu0EokHmOMMcYYY4x7hQm540fFGd2Ttl/96lcvZ2NfheTM7g+dnZF9B+6LL754EW5/8Rd/8eoLX/jC5SneMcbby4TcMcYYY9yzEOSIdJ60JOh6UlObkOeacNfTnT3FmXiZvTEHFiJi4iIbgp/xRFe4JgiCnXj6+FezFcccbFw3pza75mBPGK1PgTnkBbX+hFzwdS1HBcUgpjZf44rx9sEeGTc/MdR1az/3JaGUqMqGn3Hx9BNC9euzjtZknK/io631FUORS4JtH2tcn7rY9lcpT/ZykKc5EtXzAfvG9GuDrz5x5IDW4SDqL5HrH2OMMcYYY4x3OxNyx5sJAfdP/uRPrj7/+c9f/e7v/u7V//2///dStD2J+8ILL1zO5mOMt58JuWOMMca4pyHUETl916qP1CXsESWJkYRBRZvYS2zMx7Wa2AdCI1v+RD+1dsKjPsIfvwRMfYr4BMF8tAmGhE3ol49+YmIxiZL6ejI0IbHcEl1di8XW9XVhlD0x1Zi2PCvFtX77JBd9jRvjxz8RWL/1KK71+YglMfJLFLVeY66Jsgmqrc8a+MvXuLb5jIvDz5jv3NXvWpGneypXNrDX8tXW39w+Ilk+5uPTE8REZHuTqCs3/WpxxFCM6+c3xhhjjDHGGPcCE3LHm4Uzu3Oyj1D2h863b99+vXgq15nbOLsxxtvPhNwxxhhj3NM4SBAACXankKqPcEk4JOKdAqA+IiLhTj87NsbrE4fAx5aPeVwTGY3pE4MfzOlgo8jD/Pz5ESDV5ZpYma+4YhIi1fnKg4/aXNDmx0dJnGVTTmcx94MPPnjxudu4fRPDmLWLr9867IfrBF5tGGMvXwJwe0cM9WSrtjnldM6Lhx9++NJv/Xys4fHHH7+st/0zzo6NfsV1c5nHHpnDgdIazQHjYhCN5WxvEpTlIDZ7dmzEKp99RNQYY4wxxhjjXmFC7ngzcS7uvYDrRb/z9RjjnWFC7hhjjDHuaRwmiHSexEzYVBNWjflelwTKBNWExcQ/Yl8HFn61iarsiX4gPOpzwNFnHrZnHAJjfeoOPsXA6Uv4lFfiojqb1kBkLL/zAKWWi2K+c13ESyKtecTSl3+xCKPGoW0ewiZb/Y3LiZhqTNEm7lqrWPz0ufbUrntR296rrUVO4shbrY+tuRNTjRW7fNjdunXrElM8dXmzZ9OearPxNC47Y+2f0lzZu9bvL49feumlS8wxxhhjjDHGeLczIXeMMW4Gf/aO4hhjjDHGPQghjsCaCAjCXCIgwS4b/dpgexbCIZE2ITcR9oSvPgIhxHXNVuza0FctJl/iZf76ExONE08THF2zUbKRm+I6sZiteNamJOCeYnY5G289xhN5ob95iaB8eorVXPbH2tTGToFYv7wJoc0tb7GMnfsiZ7bNwVZf949NuWubj5hM7JUHH7huz+Su1oZ73r3Mjh8buRTDnOY2l7zNo4wxxhhjjDHGGGOM8W5hQu4YY4wx7nmIcp68TTxM4CPcEfMIdGoQ7xJF9SVGahMo61MTEpHIyZdQqL+CxEF25hVPH2GSkEgs5Jt9+Wmb09zstflaT3OKYTzxNaEzgfIs+tgo2uWjiAm+xTJf4+YyVq7gXx5i87OX+eqXr2t2xl3bI6K1PSSS8mWXOCs3tsbNbYx/98U1f3g6ly07/QnMMD+79iM/bfPAHPztY/fF+lzzZ8fXunzH8hhjjDHGGGOMMcYY7xYm5I4xxhjjnodI6ftNibk+qphAl1j3yiuvXMQ/T3USCfWpCXdEQiJewimxj/CnJhwaVxMm2bgm/BELCYLimsd8iYfFUsRCYmWiJDt9/Nip5dRHCjdfcbLrWiwQVPn4OGMipGtj7MRXTh/5ml+78fKwHnvku2m17VvjfPmZp/3KD/p89DHYGrNH4snLvPr6/txHHnnkEou/p3Mhvj3k39O5bNlYQ/vuvtlz1+zM/cADD1xiyIuPGPz56dNWi6Gfj5zY6lPsu9hjjDHGGGOMMcYYY7xb2HfkjjHGGOOeJ+GOcEiMIwoS6wh0iX5ET2MJjWyId+wIeoQ+QmMCJZ9XX3314l88vmIYT2hUxOHHRj8hlh/0IZER7Am3xhIagz+R0bgcjCPRNOGRz/WS6CkXtSLegw8+eFmD+es/beRDBBZXDLHaFzkQYxOs5WNf2NhTha14f/qnf3qJJ3/3gj8fMRTxH3300cv+NI8a1muO1oZii8m//mKqH3vssYug61pNGLbe4ogpD/3maE1y1bY/rZv/c889d5ljjDHGGGOMMd7N7DtyxxjjZrAncscYY4xxX0CII8QS/QiFSv36iI+3b9++9BPyPMH78ssvX4qnT7Mj6oHg56lRQh88LUpYTGgF8ZJYyEdMbcIhu4RSccUiGtaXIAn9bE57iGkewrG5CajESGKja3YVfvoUeaiJswTY1mBOduapyJMNkdd8xVFrW8sprupXrmNcHnIzr7hIGIe1mMteoHskt0RcuYuhLxG39dXHTsz2h49rfd0z+6RoJzKzAXsQmnuNsJGD+GKW4xhjjDHGGGOMMcYY7yR7l2qMMcYY9wWERk9ZEueIf0HAIwImUia8dk0YJOQhEU/piV0CH9iyq2iLQSR0ncgptnHiayKkvkRFfRX95u8jhcvJmFrezSc3RT7s+Yltfn6KNhu5s5O/IlY5shefjThitz/G1GLzg7wTkI3JmbhsXF9P4YqfEHrut3HxPPErJ+3WeIqz7aXr4rcutJ7mcm0N4rCRp1j6EpRhjI91smkNxrXNF+bwEdFsxxhjjDHGGGOMMcZ4p5mQO8YYY4z7AuIeEfd8ChTEOoIh1NqEOkIeIZNo6OlMbSKkOiGzmvCXIHiKkEh0TfxLKCQKmsv4KR4mvOanP0G1XJtDnQ2BUq7EykRN9sWWqxjZaYtRHLADXzYwXk5qMdsHY3wSx8Vvz1x7upXIa0y/p2H5i5OvufWxtzb2/OQnV/PoN7++xFi0T/m77t4WTx878/Nr/+wRzMUngVmRV3Hrcy2G7+8Ve4wxxhhjjDHGGGOMd5oJuWOMMca4LyDO9YQoMTfxkThH2HOtuO6p1QQ9NvpcExYJeT3VyZ4QqOjzMczEQUJhYigxlQB5ipjNQyQkNLLRn2Bam5085ENQ5pMAajybs8Bczc+nj1HOt3L6J5Sy0y+HcsvGk7OJuPVbgznkp+gzT6Io9Isvnn1qH97znvdcavekHNwjc9h3wq5+ey4v89hfPmytE9r8El213eOe+OUvF/NaoxjBrr3q/rJrHn323z21/gm5Y4wxxhhjjDHGGOPdwITcMcYYY9wXEAMJiITBnvok1iUs1kfsI+ypCXjq73znO5fv0CU2siVg8n322Wdftw3XxEQQ/IiG5lZcEwPNRSBkq991wm1iKBHz7GdbbKJjfsU+ryvsCaXmPOeqZHcKtdb3//7f/7vM0xgh9MEHH7x8rLD+9k2tyFe/vUj4VcxpD+yjWM3JxzztUwK37yXW9/DDD1/WWA6+u5iImxAsTvfK2hQU396ZU1tOffeunAjBrUMObPiXFz/FtRyMm8fcjbGfmDvGGGOMMcYYY4wx3ml8KdgvfPdyjDHGGOPehxBH2PNkZeIc4S/RUEncJQIqREO2BD0CH3GPPWGQL2FPHH2e3NTvmg8BUZ2oKC6MGyMIqs1bn2Iegi9f/QqMFbs55auW3xvRmPr6tSI/Re7mlmtipblOYbnCj4grJ2PyUEAwVeQI67ZGfuyJ2nLuO3HLiYhrvvJg25O4bPQpkFO5sm+P5M1WLt1fbfeRrfuVHx+vBeK1ucDPuHuJ9lafsZ7oLo8xxhhjjDHGeLfxyU9+8uqzn/3sa60xxhj3K3sid4wxxhj3FQm0hD8QEQlyxD5iof5EQYKikqBJAGRXf0/GstXfGDuIQ+Ds6V9iYGKh69r8CI5iJWrq10c4LE7jipwqp3+214uYp3/FWGuu6BPXuuTBLgG6MUKsYu7687cH2nzZmtv6xTBGIJWvcf18tPvoZ5gT9vJuTyCblx9//fzE1q9t3+RvfrDV7glp7fIDH+MQS9EWs37XXiOeTlaPMcYYY4wxxhhjjPFOMiF3jDHGGPcVRFVPuib0EewSHxMnEwQJiNef3jVOiCQAEvOIgfwJgWzEhbZ+xROc2mLyEQcExQRD8RIlFW1zGSvOdTEz8VJMseViPHFULl3nX2ke/WwUyE2xdvHMYaw909afwKpdrPzlzpZgng/79pWNvmJZY/tN7OWjv3Wda5K7MTGK3ZziGysfbbFbHxtisRhiia0Y0ycWylO/axh3LSdCbvszxhhjjDHGGGOMMcY7xd6dGmOMMcZ9RQKj0pOyiajEOsIt8c+4tjFCocK+PiIeGwKlvkRLNr7rVU0YNAb2CYR9/LIxJTs+Zz8f+RjXrxAoG68YV/oo4OKo5VRMNJc4xSwOkdJ36vq4ZHYJosbY+Z5gORFbidPFVLO3F30fbUIrAdh+iksAdS2vYttLe8JH2x65F2IRssW1t8b0y4Mtujd8XCvlxT+BV976tPnLXz8bc/PXJzfzsbMO+dsT8+lnpxCDift8xxhjjDHGGGOMMcZ4p9h35I4xxhjjvqMnbwmfCrGOeEcoJPohkS5xVkkorU9NOEz0dM3P05qEQsKguNp8tUEMNCd/EAoJjQmMxtkmLBIcCZCJzvqKVc3ftfnNJYZ8CKjmF0OfOdjKV0yw4SPX1tK4OmGTTSKqMQJr67Kf1iS2PBW+8mWjLl77Jy7RWM7FM39isD5ttbxd8zFGLLcecfXJwffcQkz58NNXfuZ2f80nT/HkIteEW9fgY065y1W/edXZ6bfHY4wxxhhjjPFuY9+RO8YYN4M9kTvGGGOM+w6CXkIe8ZBgl8hIoFP+9E//9OqVV165CIbf+c53Lvb6CIFqvlAnMBILE2eJhYmXREHFtXHziFkeClGSmFgOxQVf18b4uy7/004fCI7mfuCBB14XORX9niRlZw42RE9zKzBmHrkqxNk+RlgxXkFxs9eGcfthPn2ezDWn2Ob1VKs90k8QZc/OfOUinjH98pQHO/uvT2xCq/W7j/KwZv7mKmdCdTmq+dsXhS9bPq1bXT6EYNf87L3anO6V9Y0xxhhjjDHGGGOM8U4xIXeMMcYY9x2Eu8RDwhzBzjVRryda6wPBj9CXfUIjO2IeYVK/p14JkeIXj/hHHCQeNqf+fMyjrTY3f5wxykse+s7+6+36xJMP6oN55U0Ulbu41yknuSjmDf2ETnvAny30EcSt0ZjCz3x82vPEVLmp5aWfjZz4nGKqIkfjMJ8xvrWN8TF/5E9wlQO082Xv3vC3Rv36ylm+hGKxrUUO/Fy3h8a7X2OMMcYYY4wxxhhjvN1MyB1jjDHGfQcBzxOcPWGZGAiCHfEvkY5Y2cf/Kvr6mGKCXmIof4UYSRQ8n/RVPHmqNpfCTixxzvlP4TMxUtucicZnf+V6W4nr/a0pEfK0LT/7Yl3yi8bQ2pFAaj+t2RhBVFz9zYv22/rOtnkSnsVR7Fd5sNdnruLXJ/YZl005EVy77l7oO0Vl8RKTWzN7rwFj2vr5db+0jStjjDHGGGOMMcYYY7wTTMgdY4wxxn0HAY/gqPSkbIIj0Y+g6KN/CXeEPKKdvoRPQp6xPmKZjb7ER9eEW+MJjErX4JeQ67o+37mayHmKoArxknhqjsTKc/xu5eRsn9dyOn20ram9qa9+ecjb/DBeLkgctQ7+xSd6ErBdW2c++uyn+2HN+vm6ztec1m2PXdu7+ti6R+9973sv90lf90FMFK/cXbPV9vHJfb9xc1uDa6U82Kv5JOxOyB1jjDHGGGOMMcYY7xQewfiF716OMcYYY9wfEOcIcb47lThHHFVcE/l8Dy4BkB3hLmGQKJmIp22cCOj7bol/IAgSMcVL+OVD9CNIEgV7qlMRkzhJONSfj9gJxGfRT2CtzU9c1Hedsy/bk7PPtWIetfxcW3PjsH55y1f8RFJP+rJvz9iIQazt+2azJ3aLm0Cqz76bo3sC98M4X3vEnp+nnvnYp0Rx/uZtL+2Pe8Iu4TWxvLWo3cMHH3zwMk+ir/kgf3Npe820Jnaun3/++YvdGGOMMcYYY7xb+OQnP3n12c9+9rXWGGOM+5U9kTvGGGOM+w7CHbGPGEfoUytEOeIigU6bKEg0JfK98sorF0HRdYUgSPgjJBIOCZme5BVTITYm4CrabBIRxeiJ0eDHh1gpxygG2ER9OG3ijfpOrOHEOCFTIYbKJ4wRNBXruH379sXOumC/oE/+9qY9aZ+g/cADD1ziJ/gW19rFJp6aj637Yg5CrHwThfWdgq2+xGHIR4ye+i0nefCRp7nl2FynUOyavTGYm40+c6ubf4wxxhhjjDHGGGOMt5O9IzXGGGOM+xJCIZGPkEecI9QR+QiBRDlCX2Kf8QRGfa5BwEvIIxK6TgQUjx3fnhI1p2JOhVicTbgWxzx8lETEhEaiJJvT7yT7k+t92mefWGcxjydirYtgWT8fOcvftTG2Coiiibl8xWg/2Fh/YmoCqHj8jMGYfn3EVIhhT8pbjPZdXmz1n+Kvvu5Vubfv4rHRZz7txtTNJQa678azt/byGmOMMcYYY4wxxhjj7WZC7hhjjDHuSwiBnvIkzIFoB6Ic0Y8ASTDtI34Jhj5W1xOhBETFeGKemi8R0LV4BFzCYuKtuZqPKJhgiYRGnAIuO3Vj7MxLMM73B6U4ZzyIeRbrUKxRjifZmFsOPkr5tNHfHvC3N+bS11rYGNd3Ur+1h/0yX0/iattP1+Un7inaimMecfRZizoRGfIqH3HYiC1ONtanj41xc7r2WuCvnEL3GGOMMcYYY4wxxhhvJxNyxxhjjHHfkgCY2AqCHCGPeEs8PIVcwh7RL4GSUOtaHD7EvkRbgp9xH8msv9iuFWKj+ITIU+RUE5jllNCrH2dt/nPsFBJd1z77r5PvSb5ylP+5L9BPuCQm//iP//ilD43L6aGHHrrY8LW/xbQ2e9lHLffkLuyteOK2/voVyNc+ywuJuvrFF8/33PaUrjnEkUv7rNy6desyJpacXBsXj233VK72uTXrl7siJz765M1mjDHGGGOMMcYYY4y3kwm5Y4wxxrhvIQASBQl4hDsin2sCHwHWd67qI9Il3Pb9uL6jtSd6+RIMn3322avnnnvu6uWXX74ImIS+xFgxEoWD2Ei4JAaKI59ESfb5K/oUsPF9veaXHxIuT5uu871bibv1i5Goqq/4avMaC33EzgTmYliXfdSfKNocrQP20Pft2vtEYOO+R1dbnFO0Jdai+2ePHnvssctc7o849lWfe6Hf3ERmc/HpvlqL8fe+972XuaxXnxjm0ic+9Lm//FtnaxpjjDHGGGOMMcYY4+1kQu4YY4wx7kuIgsQ7op5rwhyRTptAp49QSRQk6qqJjoprvsQ7T4ESdfkTBQmPRD/iXk9wEgXFZW/M05zaxeML8xMqjZ9PlcI1QfLOnTsXn/I8RVO+UV/194JN5Ww3d7nX15z2SC7Wpy9B83zSlm/jCbTWwbYnbaHNxl4b7yloeyOGwpeNJ2DV7g8h17q13RfX7YOYidCu5cFHfNfycc/E1qdAvwLzm1dMMdw/99Q1rMETvgm9Y4wxxhhjjDHGGGO8XUzIHWOMMcZ9CSGPCEjoSyQkpBLkEgbVxD022REFoSYCgqhH+FMTH4mQibmEPoIuXzFOIdGcp0iI5gY77a7lyz5htDnKKbSv910nm7v5QvxKuSMf/Wr5nJx75lqOxZGrWNahGLM/YpxiNlt7a1xbPH72mH1z62u/tO2LOGpttM9gp02E5ycfteL7j9VsxTW3unssnrnlkgCsdr+J931M9hhjjDHGGGOMMcYYbxcTcscYY4xx30Ko66ncCnEvUTbhkECX6EikJdr5SGTjRFrXBD0+hEIiH+GvJ3z5EhjN5WnVREt2CZZ8CY2JiT0JKp4+AqJ45lROH3Opu07EjPreqMT1PrV5zJ+I7Lq5jCnlmJ/ruwmyxuUMPsatxZj1attL+5wNrN2YvRdPHsXnr26f2ZZLNrW7Fpct2PDtKV9t4/rM4/7KWZGXPqV7L47axzLLfYwxxhhjjDHGGGOMt4sJuWOMMca4byHqeXK0jwIm5BHxPGFJ0CPcufa0pkK00+ejdRXiIBEProm54hETe+rUdR/FfIqPipjESX3FkJOndsUhHiY0stGWQ8KivgRKdaVYZxtiKyfap83pV2ku65BPPvoJq/JNnNZnnNid0Kmvj4Nmb+8Ip8FGv2JP7LF5xGz9YsnBtWJMDnzM1z20r/XJS/1jP/Zjl3tTzOYwVnz3iUDbWtnp16et33xyN9bavV70P/nkk1cPPvjgpX+MMcYYY4wxxhhjjLcD70T9wncvxxhjjDHuP4h5xFQCnZooR9xLTNROdCTo9ZRoT+AmAupPaCTaKvrFJy720buJj67ZG9PHXzxiqWv9bNSJk82n5lvhf73grInKvn+W4GltCY6nTQXiyr22a6JmArOcGm8PtItt/XBtPfaSL3uxta1PnVjKl404YhoTF8aKw89+ipVwzp9wS6Tlr8946yW+Nr/7LC97Ip5xsG2f2XYf4Jqv14l863MtH/lCPN+ZPMYYY4wxxhjvJJ/85CevPvvZz77WGmOMcb8yIXeMMcYY9zWEN8KeQtAj5BHviHlIwFQId4mJhEHCYUUfMY84SFQk9IonTnEVYiBb1+zEUcTtqVLC4FnY8ZOD+PrE1V8xXjkhKiZYonXldzeKcT0We/tU7rA/EJ+9tSbEElPVrZuvvdVnLT3ZCrViH/ip+fm4Yuttj/XzNYeY2om5np4WW2ncfWBvXBxCrBzyMae2J4YJ9try4kMsZicPNubSx4+N+6Ww9cQwmwRztmOMMcYYY4zxTjEhd4wxbgb7aOUxxhhj3LcQ+whuhDkCXKIsATBhVk3MM068y5aAl7CYmEncJOgRAxMhiZfiIQFWDMKhGK758xVLfQq1+ZpbkRM7NYxXsjcutrWIL08QGsU2X0Iqm2zZNd8ZV0nwLH+++s3ZmHK2zSNPbfuhLkf99lGbrb72Qv5s7WUCachfmw17cbQTzvnLzbg59VmXPmNs+RkXu3H7oU9h1z1A61dad2tQ+Bhzrx966KFLGWOMMcYYY4wxxhjjrWZC7hhjjDHue4h4ibIJeQRGIl2iHhLuQLgjAhLvCLYJjr6P1TWhTwxiYkKiYi7Fk7LEU/ETHMUyV0KhsfyIyfwah/HQV+zsCaXylFfCpMLWOovNridU9RdfbQ2Jr2rx5c1PX/3mUPNXN6+6dvnJuzwU1/ZVzMbztXfy0rZHFWP62do3+872zMmas2Gvr3shXh+PbU4x2we4J+zUvQ7YNM6vnMpVPN+T+8gjj1xsxhhjjDHGGGOMMcZ4K5mQO8YYY4z7HgIe0Y4YRxQkRibyEQX1EfQqxnwXq2sf1Zt4m9DLvrhEQvWdO3cuT+H23bkJvxWx+mjfIDrmJyd2xEmFKAnzGSM0+n5dH+ubUFm+2SdwVvQbJ0aqxeErloLTJ5FUjsU8++WbOG28uMVR7IX1G1P0NZc64VWxd54Wti52Yuk3R4Jxc5y+7of7Uj7sPCWrz/xisVHzMe5+ytl+ars/bMUP91VMsLMPifjiuX+EXHPZf/HHGGOMMcYYY4wxxnir8E7iviN3jDHGGPc1xDxiHFHu/Ije2kRDIh2BTwEhz/elggBIuAOhj/jIRyHceoqT2GgOtgS+xOFTEFXYiy0OP2Ihu+sirhhqwivxNoHytOObbeVs43qfdYtpDeY9heV8IL7x+vgp8ia82kNx7QFBli20xWy/zNX81p0ImuBrHv7ug9gJqfbGd+KKy06+SIDnQ8QtriI/tf0SF9oQx96jMXHOa3max+tCzb6YxWeTHwFeHmOMMcYYY4zxdrPvyB1jjJvBhNwxxhhj3PcQ4IhvxEdPbRIDCXvEuIRZwiLRkRCpj3BHcHRNSDRGOEw8FBPigogrfoUf/wRC9j5uWVzin6K/PBS2Yid8yklJQMwu2+wUdH2W6/3hWi5iNO91O+PmOjEmH2t0nZBLeLVeffxavzF7x8cemas9di0Oe/vAxl7zaQ5190Ps6zH1ydE6ulfuMTt7Zy7jfMo/f3bl0ph1iFUOcuw+Ng877W9/+9uXOcYYY4wxxhjj7WZC7hhj3Awm5I4xxhjjRkCEI+B68pYwl3BIsCPkqYmFhD3FNbFOP3GQYOeaiCcOuiY4imkOtWLsjF8xJj5cnwKhwoeYyMa88igm20qCYuV6+yy43sdebAViZhvtwxkbanl2Xa72ga22dau1E0qN6+fbWGJtuehrP+TETjGP/oTe9pV/e4v2no+44sDTs4RkY0Rc97N7wUa/+dkUr3wIvtCGPJSexpbTGGOMMcYYY7ydTMgdY4ybwb4jd4wxxhg3AsIcwY2IR3w7xUAiXkJmImNi3ikIEu8IfURghRCo30fxJiDyEVfbfD3F61qfQvzt6dRExITEfORoXvbmPW1DW39jleJVzr7zmp9ciZryRjbZ2S82qF/Rr8i5ddkrbX7QboyPseJZW3kYq48wSzhV87cf/ORqvCdg7Yk22LDV576wUc7Yxl3Xx9aT2GK4F3Jk597oK7ZaPuWv9rHZvjP36aefvnrkkUcuvmOMMcYYY4wxxhhjvNlMyB1jjDHGjYAw6qONiX7EPMJgYqw2MY+IR6RTetJWIdrdunXr6sEHH7wIgIl5fImuBED9zXPnzp3LXITCU2Bk75qv+RI9Ew7FEY+dwqf5rsPvLGzOcn38LOe4eczdxz5HdgmjcsunfK3VWHbWa3+hz37pb+35nT7azZstjCe4u3a/ytG98WS17zDWZ5zYytf+2TP2cvHUrPn6DmR9cvFdveZ2zb51iUds5p94zC8R1+uh1wcR99FHH728PsYYY4wxxhhjjDHGeLOZkDvGGGOMGwNRkNhIlOsJVCJegm1CrprAR0gkDhL0CH3Z9LQuCIPPPPPM1csvv3x1+/btS3wx+St8xEhcFAdykI/6O9/5ztUrr7xyERwTFrMnHvKproDIiPrY3K2cftkVW2k+ORCeTztFTsRN+8HOmvQTT4mYbBM6rcGe2Cf9clT0tZ9EVPtsD/URtdmivvJqP8rDGLHVPMUm0vI3h9p+gr1xMRNuW6u9hzzF1CY88+++wLWPZbZm8fSL8573vOeyVw888MBF5B9jjDHGGGOMMcYY481m35E7xhhjjBsFoY6odwq0FRD5GiMeghBIyCQUgriXMOk6HzH4ESa1iX9EPxABm48IqYivz1g5JDQaq1zPswLjruVDwCxPPuJE9tepXy1XvmLy1dZPvCRa2gfF2uC6J47Z2gtjibhi5d++sOUDPq71uzZvOdgfvomn7VfCeH3wZC4/bb4+6to8ruVobrlBPHH0g13irJqd6+4xW+vRVtjLFz25q58Irj3GGGOMMcYYbwf7jtwxxrgZTMgdY4wxxo2C8OYpSoJchVhHNAyCnkLsI+YR6IiEp3CX8Kd4urQnNhMKxXXNXmyCJX81fzQ/YVLNTlw+YlTrV1e0y5eYXI7llb25rEEe2Vfjep+6NYEfxEx8lat+fWKbk319RE7XjanFPfNPYC3XBFNxzKOv+6LPOFtPAIutr7UScdnx07ZncmgObbHYt99sFb5swPfM6/p683ddnIRk9vb/pZdeurTHGGOMMcYY461mQu4YY9wM9tHKY4wxxrgxENwIdT7+OOEz4Q7EO+Kc71VVTruEPTXhkKBHvO37bgl72q5PATKR1He3ikd4Bf9EQfY9ado8+vhWtOsrtrzElat1iHcKrWwSj40rfIt5IrZcysE+iN9YgqjY5aNtHmtKQC0/tbH2S1sucnDd3vPjbz5FX3tpTFsc+6wYa4/qa33stV2LZV5x1GIUz9xy6Mnh1tda7Busx9O94re3bK2vvVJ8vPT73ve+y8ctizPGGGOMMcYYY4wxxpvBhNwxxhhj3BgIcoQ64icSGhMGtYl0Sv2EOoKf72H1kczEPU/06lN8ryqxL4EPRD9iYd9923e6sjFGBFSIfgmefNUwbwLjWSsgiPo+Xt/dqk+O4hZb3OYiZJpbDgRfvjBXMc/CTzz5yL8+cfhYj9ocFePlaN2EUG1jRM4EXshDv9ya07h7kp8+c4ulbX/FSejVZ+89UUscZ1cx7h7nay75WrfrhFzX4pmrfbPmxPZsEobloO4+ipeQLH+vg5/6qZ+65DXGGGOMMcYYY4wxxpvBhNwxxhhj3CiIcS+88MJFUCTWgdBHwCPGKURDwq3aGBGTQEfAI9wR/BJPCXtQE1bv3LlzqRM82fhI4Fu3bl1EQzGUSLRkL0aiov7KCcGT0Gj+s/AR9/RrLmPyT4wl6Mozwfj6HPk2VpGbuXyEsH0Ul4hpT4zbF8U6rCkRuX02Zn624mhbt1osfeYwTkS1j2px3B92xF55m98+GDem5n+KuOZiz++DH/zgpebL1hzG+BJlFX6PPPLIJY79sjYxxBK3HNkZ8xqC14wncvmyGWOMMcYYY4wxxhjjzWDfkTvGGGOMGwVhjhDnqVpPdBLsEjmJcES+hD41EQ9q4h2IfoQ9oqpCHFSLLYZCNCT6iUucVFybu9jsiYFqbX4Juc3NVnx2iZj6i8lWO9vrpf7TBurmVaJ+tdhyzgfWZQ32rDXosyfsoSbumo/gyp6d/kRQ8FMSg62nWNBnHvMnunoy1rz62OpjJz4/+wN5Q397ZVx/wjOxmcguJ4KzfI2zO0VltTjdE9foWs0mcbvXxRhjjDHGGGO8Vew7cscY42YwIXeMMcYYNw5in+8zTcBNLNRPjEt87JpgmHBI5CPink9+JuKpxUsETCRNeCxmdfOxrSQWKuzMx4aQCePEwgo7sb9Xwd36iZCJkuKeZIPyOSG+Grcn8ktEZdca1OIbh73R5oPmb//RvvRULLQVe26PxG7v2oP2yj61N8aNJfTyF1Nswq2izY5fa8nnjAHrKc/WBjmJaX4++nudjDHGGGOMMcZbwYTcMca4GUzIHWOMMcaNxJOdRLyEPYIdcY6Yd11oJeQR506xD0Q7gqYYrgl6avEUcRJE+ZwfD9wYOz7mTig0X8IlQdDcxWeTfbHZd329D9fHkDjZOvWLiWzMK0+wN67umh2b9sna5Ki/9bFlA225E1DZ6tdnD9uTcrGn1+8Fe+Xs90Rtono5EIy7lqecytMYzGfv9SnFFItNT/7af/nwb23s5cwe5/02xg4+wto8Y4wxxhhjjPFmMyF3jDFuBn/+0YoxxhhjjBtC35FLoCPWufYUJgHvrBViJIGOLYFRnyd6fY9uoqqPaSbmJfqxJ+IpxEGiXmJf4iXBkL+S4KkQFeVHjORDGFROARfZI3FVcZ1NNVwr2VTLSX6NK2cMuSRo8kmM5ceu3OzJ+aSujy6WnzWozaEWw75YH3u2+rUV14qPJzYvW7U52sMEUuI4+MlFDvKRX/nLQyz3i4174x4+/PDDFxtznTSfmMbNy8e14toaEoXlYw1E5V4f733ve68+9KEPXezKY4wxxhhjjDHGGGOMH4Y9kTvGGGOMGwkB8cd+7McuAqxrIh/BjYBLkCP4EfOIfIQ5doQ7/Ym7ij6iIYGRAEjUI94RLY1rEwSNE/nEEZPwpy+hT+FPdDwFUjaERO3sXPO/ffv2JUcx2BpDdnG9HfqKq06YjPwUeZgj5GjdRE1j/OXCVr5q6zmFXYXwyt4+G+Mvjj2xP/XduXPnsi4xxfadxvqJyuKYQ+F3xkRPPosFfo8++uhFUE8AFrt7U66JwuYzLrYitvupz+uEWOw+2Q85Q+wQSwyvE/2KHMYYY4wxxhjjzWJP5I4xxs1gQu4YY4wxbiSENaIq8S8hlPhG1CPeuSbgEQRBNCTa6a/NTxzCHbGOLd+EPEKimh2R1Bz8El0JidpsCJanEJotX31s1ERPT5fyLU9FHkRJfj2BKg8x5aa/WDjrihjlE42ZC8bDetnaJ+NyKi/zGTO/thiN20dt/oo87Y/8T2GVvz0UXz/462Pv2jqtSx8/+yOm+1ocbePWZ5yfsUTWbKyN30MPPXS5Nqf+7pdiXjb6zcuutYprrfWzfeyxx65efvnl118fY4wxxhhjjPFmMCF3jDFuBhNyxxhjjHEjIaoR5Dx968nJU7BLtHSdmFch1hHl2Gi7TjwkOBLwXBPxtBP5QOBzneiq1icmMVFM/gmBbPMlWLJnq5YbG6WczetaTmz0VYqlsMFZK3zEuJudsWKecyp85Az5wfqhba35tSdinWOEVnHsgz6wS6CtX1z9fPh7iraPtS5HQrF8+KAnqGsb4+/pXrHdP4Kzfk/btvfaxs3rtRK9ToyXv/nl4lo/f5hbPHN5qneMMcYYY4wx3gwm5I4xxs3gzx63GGOMMca4YRD2iIFEuVNE1Ec4JbzpIw6qiXEJcsYJgPqJd8Q9haBHvHOdcKkWtzkIrebQD2Ny0E4E1FbE5mM+fmiu7NnxUcQtj+xcty5FPJx+SnmyMa++8lHsgxzKrfztAz9tcxkTI5vse5pVzBM52hu03vLWNsYfCcRs9LPJjg0RV5GPfNkT640rhOFEY+Pl4hrsi12+RF7r0DZHT/Xy5Wcu+2NMn8JeLdbTTz999eEPf/jyscxjjDHGGGOM8W7h//v//r+rX/qlX7r6t//231795b/8l1/rHWOM8Wbg31X/vvp39m7Fv8E/CBNyxxhjjHFjIcL1NCbxjziXoKeuEOQSAImG1Qq/hx9++CI8EvLqA+EQREGxFUKwmAmVat/Vy8c8iaYJj4RAwnH2lWzNmU/tBFU+Z0yCo1hyODlj8JOnPdGviCGWwkZep4/1+U7bhFWYhx1fT8wqIATr50cYJW7aT0J1c+rXx1fO9oC9p2XZaMtRfevWrUtersUloics87cX8vK0rLmM239xXBeb2Os+lFfxxRZDTDUfObaHjetzzbc9r1/fxz72sUuxP/rGGGOMMcYY453mJ3/yJ6+ee+65y1npZ3/2Z1/rHWOM8WbyzW9+8+rnfu7n/lz5lV/5lat/8A/+wQ8k5k7IHWOMMcaNhahI4CPYvfrqqxeBjQCofvDBBy+inScyiXIKMdIBl+BHtCPYEWCJfH30rpiEQsIkMVD8REDiH7HRtVJc8xH4EkG12b7yyisXX3MZU2efWHi91M9Gud4nhiI+gTNOW8KjPOWur1Le9oovH232xS+O+vbt25c49pRwysfemFutneArDju+DzzwwCUPPuys3T6z0+bDl7Brj+RjnPjr+4HZJeYq7kn7Labi2hwKG/bul7junXit9fHHH7/kUEyirxyLaczc2vJzbY97LWT/l/7SX7r6mZ/5mcvrit8YY4wxxhhjvFP8/M///NVjjz129eUvf/ly7iHqjjHGeHv4N//m31wE3h/k3969gzTGGGOMGwthjlhJnCM8JuQR34i7xDyCHIGOOGicaJeoyocvW2P8xCQuGkskJhYWR61NyGMDwiYBMjHRtTwSGxMM2asTARsv3nWbs2R3xktcLY/Tlx3KpbFiEDutvXjEycRrbeIuodV6oZ+wKQ5beyiefe0e6LfnrvUrYplfHuz0iZ3Qan4xxecHQq9x4jr7hFzrLTZ7dmIa63t29bNz31o3G/3WImfxzOvaHPz4yMs1zMHemlyzNcf73//+y3dZJVaPMcYYY4wxxjsB8cA55zOf+czVt771rcunHf29v/f3XhsdY4zxduB9o+/30fYTcscYY4xxYyHqJcIS7QhrxFhCnaI/4c84sY5YWSHwEWG75gu1WIm3CoGPkJfwCDH5yYGPefLLV5sIqOSnrl3J5gct4iZoWpcS4hlX5Ehgll/zyAvtnbZ8s7VnZ+4wJoZ+cU6K0ZO94irIX9yetu3pV37mMDfMax7jPUndvrIRw71iB9fG5NMa9PklWp9Y4tsbr4fWwAatVZ/iyVt+SmtXylNOnvT+4Ac/ePneXEL3GGOMMcYYY7zdEA38kelLL7109au/+qtXf/iHf3g5D/k6mDHGGG89/h32/pP3mH7v937vtd67MyF3jDHGGDcaAhyhTk10I9YR3hQiayJngq5fsM5C4ONLeDROsCUkEu78QtY1ca+nM83RPPzN69Cs8CcI9sQpP+MJhAqqT7K9Xs7+6zbykPu5xmzkTVyVJxu29cvVuhN59evjmx0fvtA2xqZ+dvrQk6xiQR7m0ZYXO376xNIvhnhyzFbpadf87CWaNzulj0EuDz7s61MXu31QG/P6SPC3FkItW4WfYh1i6nP/3Vsfz+1jlj/ykY9cch1jjDHGGGOMt5OPf/zjl3MJARef//znLx+v/IlPfOL7Phk2xhjjR+ef/JN/cvXQQw+9/u/w92JC7hhjjDFuNAQ6YiQhztO1ICoS4ECkS+glzBENz0L085QlH2Kdvp4GFYMY6zoB0TxqMQnB/BL9soX+YiQyqiu1weYsp91ZxFOut+Vt7vahmGyMyYWASbQ84ydU2jf+1u77g5XmIXaKKYYCYwnH+tiJk0gqljcVxGtePuKyZ6Pf/Ane2uLx0RYfnoLmJwaIqWLJy7zF1c+f0GrMHObSbx41e/dObUzOxuxR87n3Z1zjsAf6CMfW5mPL/tpf+2tXf+Wv/JXX92WMMcYYY4wx3g7+9t/+2xfh9td+7dcubU+D/c7v/M7lrELkHWOM8ebxxBNPXP3SL/3Snys//dM/ffUf/sN/uHxX7vfDu3/ffZdyjDHGGOMGQuh7/PHHrz784Q9fRDgHV0U/0Y34RrBTE/N8HC6hDkQ94p9iTJtdH6GsnziaMMiGaKefLcGR0KfmJ24ioO/ZJf4ZrzTe/Pn4rldCorZ5CYhihvzDdW3xa3cNscxH1NSvJD6rPUWqzzoIqPW1Lpjfmq1fLHmxVYitYsmdEMuGGMymPbJnYhgTx9rcHzmKwVbbNV9ziyue/WAnFjsxCLnyENd490UMuYuvLSf3nY1re+laXHOwk5+c5KffXohnfvOy86aIYtxa1XIxpibiZ/eVr3zl6n/8j/9x2bcxxhhjjDHG+EH41Kc+dfXpT3/6tdYPju/B/ef//J9fzih343Of+9zVv/pX/+q11hhjjL8oPuHgX/7Lf3l5D+hf/It/8VrvD8+eyB1jjDHGjYYgR6gjqiXQaRP+fCerX7b0EeiIlImCCXHEu1PA0+97hoiLxNhTYFUT/wiMHZr1EQxBlOTTd8Hmo4BdhXiYrXmzU8xJlDR2586dSy2fBNFinD7m0ocER/twjhNCXYtpL1xbj33LTt7msQ+Ns5VTeyg3aMsN4rNVys/8ibbEVX3ysnaoxdBvLhivbYxN9+327duX2ri1+AgbQq34bPS7L67Viry15VFs69VnDus818O+OfibX62tWAc781sr0f/JJ5+8+tmf/dmLcD7GGGOMMcYYbyW+B9d5xJNgP/dzP/fnyje/+c3Ld+fu45XHGOPdw4TcMcYYY9xoiI6EOOKbEoQ7QpuaOKcm4PakJQGSiJfAl7BLpKsv4Y9458lPgiHxjxCojwBMEEyMJPrJh1/CptpYJNLKNVvjiaDsEy8TOrtOkOTHRy7XS7HEETuR0lhz8LcX4rUmefHVhvlcK3LgY2540yBRWAx7R+wkcKM1icHOXjWHsfxOcZqdvsRfebK9fo9OkVWdn7n4lKt+90l/TyZr678+nm+054o55Ge8vBVtRRxPef/UT/3U5XtzXY8xxhhjjDHGW8VP/uRPXs60vhf3Or6r0R+8+kPTMcYY7w4m5I4xxhjjRkOEI/T1lC0S9AitxDpiHJvE04Q/4mACsMKHPUGSaEeATKzrY37BN3FPDH7QzkdJCDU/zF2e8jHXdVtxi21cPLViHn5KYqM6nzNGcc1FQM3O+ClA65Of9YubOMq3depznQAqJ7VY7MSpz3X7zFd/TzwXX7Fv9gLlLCe5go0YYrHVNq64th/o/qvNL6Z4YOu677rt3phDDXWvE/a15VO+2bmWDzvj3ReFgOu7qD70oQ/tydwxxhhjjDHGW8LP//zPXz322GNX3/rWty7fi3ud3//937+ciYi9Y4wx3h1MyB1jjDHGjScxj1iX4Eh08zRqH7+bIOma6Pbggw9erhPiCHWJeH1Hrqc+9RHuxCUEEhUTgPte2ODr+3kTJpszMbCPgJan0pzXbbtWi8WmmPk4nFufeDiF2mzVBGjzyhf65GkPrI+dAuuxj81HBD7FXrUnk83NDuweffTRi519t1dK4rE+tq4V69aWj2t52Fd21iLfhx9++OKfsFp++lyb0xg74+4NxBPDPTAuT32urbn7oM981iunhFt2bLwerLPv8GWvX2xzKeztn9zzsRZP5n7kIx+55DXGGGOMMcYYbyYJtJ68vRu/+qu/evmqoFu3bl2+S3eMMcY7j3ejvvsIyBhjjDHGDYWI5ntKn3rqqYtwS2DT98ADD1yEQUJcEGk9MZnQRsgjzBHxFLYEPOPaxEzjxL7Ex8RTfdqJjQQ9RUyio2uiI3+ioXZCZLU+5K8ObUW86ihvRT7ahER1Rf7G5cJGvtavz1h5t3biJhv9BMzTD8b5E0iNyT+xO3FWHDH1s7NufWLoJ3zy1S+evya3NrbG3S97bkzMcn355ZcvMayLjbX6rl/j7hffBGbjfIzJDa1Dzq69PtREbrby0lcuctSv7jUhJx/JbQ+sz3rlKk5+2r7L92tf+9rVZz7zmcvcY4wxxhhjjHGdT33qU1ef/vSnX2uNMca4X/EIyC9893KMMcYY4+ZC4CPQJrIR5hRiHpGNgKcgcVBNhCPQVYvDnoDnWj8SAAmDBD/CIX8inzFt/j11K744+s+5tSvGz/JG/bh+fdbg19Oh53iFiGmcHeQn31PshH7rZmcdp8CpP2FTPEU/8bS90mbfE8D6tD39DP3tYyK7PM79khOhF8Vnk2is2E8+hFM18kU5sW2+xlu7GHKzRrh3+vmytz7CrXnZ8hWTj2u2rZmtPnblaH2+u4rdGGOMMcYYY5x88pOfvPrsZz/7WmuMMcb9yoTcMcYYY4z/PwQ1H7WbkJvQ6JqAl0BJhCPUsU+I01aQCJf4VxGDSEe00yZAJgzqZ0/kVLPRp7BRiiEHuE64rGgrOOtzrH6cfWxAKNUu18qZD07h1VrqS+RUy9l4+Vmr+ITPxvTZOz6u2xt2YtjvBFjx9esjcuqrLbdy0Hfm4pqN0tzdu/Jjq23ck7Guy0Xba0Ec/cp5b1yXn3hi6TOfe+q6PTLWU83s1fo8sYviitOTwT0pPMYYY4wxxhgxIXeMMW4Gf/bZe2OMMcYYNxSCGmEvcS9hkYhGgEuES9wjtp0iXKIc8c1Ywh1BjphHBOSnZi8Oe+Pm4U/wc53gyD9Rr/xCG+a7Xt6ov3KOd20OWHcfCyyn7OShLe/6zzXW51ptjcRU42IWz3hrEqs1tx571JhrsexZccTlT2zVD/Pos2dqvgofsfkZ63uOXYvNxjhRtSdq81ObuxzKyxrkUBxtMerzRDdBVgx9iu9SDvmVKxvX/JTWKhdFXj7++emnn7564oknXhd6xxhjjDHGGGOMMcbNYU/kjjHGGGO8BiGO4EdII6wR2tSJf1CzSWBMeGOTKGfMU6Sn6KkQLgmlIBb6WN9EQqIfxFQS9xRjvs+VKIkEwPJT8EYC70nts/+0k4/c5C6P004ehEv9kZh5PRfYEzlZN9/2iK05iLDFI1yaW1u/OPY5f/NY/zmHMf1iuebL1n30kcTiuBfi8PHRx+wUPuZ174rLz/2Ro1ybx5Pa+sRTs+Njvl4brt1PPtYqvmvf4ctWn3Wbr3Hx4LWiLVc+8lMrfIi4+nx3rvWOMcYYY4wxxp7IHWOMm8GE3DHGGGOM1yCy+TjbPl6ZeEa4S5wj1hHcEuYS/Iht2cEYUe5P//RPLzGIb76zlbjINoHUWEKgufURIhM8FXPxZZNthX1zEgUTL8Xgh+Y6ba/XcQqIrq3DnK6VbMRv/oRcAmh5B2HT/GzaK/5y9N2xanHsOX97a042+uyVtjHreeSRRy5x7avYEN9eG+fnvtkHbXnxN7/2j//4j78eTz63bt265KBPHEVsY61NgdjyMZeaKCye/HtdeN28/PLLlzgJtvyKLQd+DzzwwOv7ZNy19b744ouXvW1P5cDPvhk397PPPnvxG2OMMcYYY9xsJuSOMcbNYELuGGOMMcZrEM48vdlTuYQ5AhqxLhLkEgoVwl6iH/FOH4EQ2gqxjsiYkJsoSwDUp7DLT9schD15sc9OrMRG8FESG7MxXkwlkdD8xvI/OfvZyiHRNswjb33mgXnOJ0qNK/zLxfxyU9sj/fZZX3upsLVeYm3zs3NtzFzG3Sck9MrXnPzU1skv23LS717wU8SUQ/ewj0fWr3Sf6pO/tWavJNiWv7Z+dq0rm9Yr57O/e+1a3ucfE+izDvGI4PzGGGOMMcYYN5cJuWOMcTOYkDvGGGOM8RqEN4KfJy0TzRIxFYIaQY/Ipl+bKOcaRDcF+hTj7Fznh3x7OpQ4mTiXvTlrF6cCgmV+iYH8sscpNKojUbU54nrbeu1Dc4JNoijOXNWK+eRmTF4gXOoznjAprjjsW5s8E3vZslOI0Ym4RE4x2NrD1mtPPFXr44+zETNB9RRLXfM/89WvQB/ybZ2uzceXD/KpnW/rZWud2l4HxltPce1PrzttRbscrYetttyVMcYYY4wxxs1kQu4YY9wM/uwduTHGGGOMGw6BridXiW6uCYMJbtl4Svc973nPRfDtu10JewrRUN2ToOpTwIVYRLueItUmRpqXb8Ief3MRCRP2FBACy9f19RyQvTGxFG3zWBd/JaGQXz6tg2+i5HWb1qBPjmK1pvKxdmP5mbd+gqXYCZLsytHHSRcX7od+9vrE7z7p11YnChPke5panNZpzH6zQ7kZF8/8rrvfxrJtH5pLXZ84fJrPuGI/rNda4T7zMQ87nD7sxDInX9cKe681H8v89NNPXz3xxBOX9hhjjDHGGGOMMca4f5mQO8YYY4xxkBgJwhoIb4Q1QiDxzHiiGwGOUFdJlPNdqNquFWKc2L6/lZin+Ihc/fmcsZrbdSIg9Cv6H3zwwdefOs2nItbZrq85+FmHPOXyRrZyY2f957h+4qL67CcsK81BuCWQJngW116ws6eKWObxNC2smRgrhnFxCKWetEVCtNz1aycSP/zww5f5FHtDNDanPHxsMhFeruzNywYJrN1/fuL53lv+8jAnXyK+HI3z63WTcK2PnXzZJMSa/5VXXrmM6RNX/vq7z3z1q+2HeMbMb23E3I9+9KNXH/7why/7P8YYY4wxxhhjjDHuT/bRymOMMcYYB8Q2Ih1hjQhHRFMTzHr6VSHaaasJcYTOsxD2CHiu2RFMe8qUKJcAR8xzfYqNCXyK+Hwhnr5qYiBfAp8c6qsU4+y725j18LdW7eu0XvNko0+tz5g1iaettgZrb5y9PVVgPv2KmOzR/t26deviW5u/J2ytna89tVfEbOP2ljBrPwidchCXLcHcPImvia4PPfTQJZ44bMVK1G6uhFuisnyMm4/ALLY+uam12aoVdC/LV0zII3s2XYutto4EZPGhXxyIIwevm2efffbSN8YYY4wxxrg57KOVxxjjZjAhd4wxxhjjgFhGZCP6EdeIbArRrCdMjbsm/hHwiGsEPwWJncbUCvgQ6BIu1YQ7fQmyxL4EO/GKwZ4NYU+B2hObfIh9jTXn9yon2vzN1xzX7eShXx4wpk/+/BJDE1r1aVcTHPmwby5tgqm4/PmZP5FTfHaJm+bWTvTk534QZQms5va0rWuCMTtjCaHQJ57Y5VFMfXLVL5b4cN+0javlV25su3bP5G/OciYwW7txbbEIy15fzQ/7cIq00OajLS/5sO9azU8+YhZrjDHGGGOMcf8zIXeMMW4G+2jlMcYYY4wDAlxCHHFPIZCpiXSENagJiwlphDVFv0JINJ4IR8Bj15O3bMQzj0K0My97Y+ZUiH389Z9z6E/ExDn3WeJuY8rp11oJk+e40trlWrzT13htucmLoKrPmq1BXP0Jz3z0Q9zWmmBrfWh/xbNP4oBNQrb99eS0+Y2XTwKtGGBrrvIR07z89Bdbns3VesTiZz2uFXM0j7zZi9OeiFHcXgvNzY+N+PzDtbWxbx2K+J4M7nXUmn/iJ37i6gMf+MC+M3eMMcYYY4wxxhjjPmNC7hhjjDHGQaIa8Y2IBm2CX997q03YyyahksCmBtGNsFafwp6/OQiKCkFPTbQ0nm0x2SuEQbAh8iV0Evjyye56YfO9+hMKraXYCY2Ni42EUGvQb15r7cnaMC6OcRQXxsRjr89+mlsc82WX+Klf7R6wLTYRV5sPgdN+a7suZ8V4YrDr5go28jRmTvUp5Jqre5U/HzU/tdLHLxez/SlnYwqR3x6CH4pfAX95i9V+E2/56Fesw7zE3Pe9732X+zDGGGOMMcYYY4wx7g/20cpjjDHGGAeJqb5nlUhGHCScEfYIp64Ja8S2BFhjiZMJe/qIagl14rJlQ9gjEhImCbauCYTmU8TQT7hLwKuIkYApduW0UcSD+nr5fv1ytT551A+5mFfe8jzn0SeP9kbxMcd82jv7RYgkZLIjkLZ39bERCz5+WBw2ISf27PTLx3fbEjX1G3/kkUcu49ahvydVCfHuSYKvjz2WIxuxFHNal2v5eOLVWrXFdq3fut0jtXn5GHcv7YN+OVgXrMOcihh82LWf5rl9+/YlTmKtMbk1hxhsm59Nc6rlw+eVV1652I8xxhhjjDHuX/bRymOMcTOYkDvGGGOMcQ2CGIGPqEc0UxJYiY9EXjaJjIl0xlwT2EB0Ix7qJ9AZI+4R2fgT+fglNOo3D5FO4S++eHIoBuRSn8Ie+Sr1v1G5zjnWmuTTWLUxAqP55awQTQmK5mz9vrfVNXt7ACKlNYnNR20f7JM2xBOfr2v2/N0PNuKyN/bQQw+9fl8U+cnBmH7FNR+wSeRF+fBzX/lqQw7uk7bXg1jlLTexXCuuCcP6+ZQ3v1dfffWyRnOxJTzr0xbTfWbPz37bL3spT3mxsdfmYKtPHuxc81X3GtV/586dyxrGGGOMMcYY9ycTcscY42YwIXeMMcYY4xoEMcIb4dCTksQ5Ah+hjfh2lkQ2ol9CGlFNX8Icf2326saNKcS9U6xz7QlW/Qmjxs5r4p22It7dysnZ7vputue1OQiG5dyYWj/MH3IvP2JiAid7e6FkR+Rsfe2Bfm17r9Y2D39xCZtKT6ASRM1F3BTD/pqXaKtW+BenPdQWo6ddjVmrEt0b9uKcttqe7BUjEVhsQq6POdZnTM7ybJ1stMWSA7/WVux8XHvymshvzLp7TeQnphr8rF9e5iF0y0OuY4wxxhhjjPuPCbljjHEz2HfkjjHGGGNcg1hGRCOeEdwIfAQxbSJZAiEhjXhG7E2oTPzDKa4R5/hqE/KQcCce0U1JKDQvEvlCW3w5yitRrxKnKImzfX0MfBMG1V3Lg4Aor8asQ9t+nEJmoqX+BET95Rzm12bP117rY6tkY25xFMIowbanXu25fWRfHXJq3+TC171p3/W7dq+MQy5g270yb8Jpoui5N2yMtwfnvTembQ3iWY89g/XKT5+YbI15bWQrnnbz6TNXeeoTg29zs/dU8RNPPHH18Y9//CJ0Zz/GGGOMMcYYY4wx7j0m5I4xxhhjXIP4RmxTe7IxEVObaEf8IyqyIfydsFMIdGyJb/wT/PQn6hUrWwXaBLgE0MRBFEfNrvnO8sNy3b85a8vRfNdtyrk+oqI9Ivyq5e/pWmvhb8/0s9WXyGgfQYjkw659NQ9fT9may5oTw885IE97SzhXEm75J4rad3l62loeYuhPDGYrJsxnvHUp0DbGD/q1+YmvLp42O9fmk5/1WYexfMs7H23iLPTl376ZQ1x2+l0rxnyc80/8xE9cffjDH74Iu3zHGGOMMcYYY4wxxr3HhNwxxhhjjGsQ1jz5SSBLbCOQEf+IhQobguXLL7989cwzz1yERyIgWz6EOiLw7du3r1588cWrl1566eL3yiuvXL6/lK8Yrtn7CF2CJGEOhMeEPLH0F5MQKJ+ERHkqXbOtNJbtda7bKciHYHg+aQpjp7hYDPlpq+0FQRH2RFuxfrb2i4DL1lo8Yas2Zj8It+YVjx/s36OPPnqxswdgo59P9+rWrVuXa35yZGMd7MBfXPeg9YG9Mb5o7kTg1gG+1kCwVVwnVLNpT8VSi+v+us/i6efT/fSRzNr6y9drQ/7trxj2y3zlo+jv/rCr/tjHPnb1oQ996LK3chhjjDHGGGOMMcYY9xb7jtwxxhhjjLtAHHv44YcvwhsxjYBGHCOyafdRygS3xLNEu4Q/dgls+cB4YhyRjRjnWp95iXtqJTHQuP6ET8Xc+hPpuiaCEhrLt3H1Wc6+65x9romT8jjXC30EUvkQphXX1pVQbX3WRshU889Psc/qhElrJGz2xLM47B966KFLbP7y8ARrueVfTsXX13zlrVZae/umlqO69bpv5jLuvrmHruXERj4wjzWXizUgsViftaDc1eJ33133JHivIeQrH/1gf95bvubUFq85vX5dE9DlPMYYY4wxxrg/2HfkjjHGzWBC7hhjjDHGXSCUEQ4JewQ6QhkxjUjmmshGIKtNRCPOESAT5q4XEN8SDvUR7fQlMopFgGNDeEuc1K/wU4qhgF0iIMGvMdfERHOJ55pNRZwzbpxta2ObeFm++sVPvDRHYmLz8bNPfMyTvf7s7Kt4CY3ESnb6Pa3Kr+975WdMzObg61oxv6ep2bkPCnsiuGt5FNv9qg0irflaQ3nbM/vUmsN8+rOXd7aKHBRiqjVDPPnxEa+9J76bn71+dpCbdnOL21r5iVu87PXJXbxyUZ599tmLzRhjjDHGGOPeZ0LuGGPcDO7+GXtjjDHGGDccwhlRLTESxLXEQv0K0YygRihT8yOmnUU/ETFBTR9foiH/5iAuEgKLo1ZgzsTLYiQGNi6GeMTB63bGGlfKjQDb+qJ5rxfxTtE3cVEtN2NqfeeTpdauz3qtlU0iKx9rJka2l/JybT7+PclrzD1wjURXMdiWI3/r1x/imRd8iq1uD/lDjvqbs3Wx6171fbblzNb940dAzZ6wLx/9cuejX2Fv/11X85W7uOzLWz9f84NN62kfzdF6rJEPEdlHTX/gAx+4fG9uezfGGGOMMcYYY4wx3v1MyB1jjDHGuAvEMCKaQjAkxiUoJpYZO8U47UTJSh9zzJ+4xp4Yp/aUKBI9jYuV4GeuBDzxFTQX+/qyl1uiXiKkfvZIAMxGvznlFwmaOIXM8jnHza9NMFSLKS97BvZ8zZNwbX2EULm0Bv3mgf2QDzvfs8vWnrWWcpD3uY4EafdJEZeNOcQAO/Z95LAx9XnNr7iu5ZEwzKY1uO6+GXOf5ee6GL5X2Vr483HdPeWbMGusPVD0m7/7YpyftmIsW2s13jrMBTnw8UQ5MfcTn/jE1fve975L3xhjjDHGGGOMMcZ497OPVh5jjDHGuAsEMR/nS0h0TTgjlhHqPMlJQAuiLNHMGIGxJ08riW/6FTb81WImIBLYCIwJdkiwI/q55m8+fdpK8cQ4xU39lWwr4Vru4pf/SW11Yqn49fFVrMOYktBoLdDHlx1fJUFUYe97cvm0d67tgzWxBcGSWCpOecmZDxvFmPumr/j66zNO2CSw2kdxzCN/Y+bW136qCcvyUcSSb/et+2uMnxzZm0tuYuh3za7vP1a3RnOo+Z2iutztnXn4EKONQ1tsOdsT/eZRYL5oPV631t0fGPAdY4wxxhhj3Jvso5XHGONmMCF3jDHGGOMuEMmIf8QzIluiYqIhEUybYEYkI+ARLtU9baruo4W1CYjq4otJTGSniCM2gS/BkphH4EuMM56/knCnNnbaKq6zwWlfrZiLGCg/8zcWxEWlfNjlZ177oi9R01pdWx/sgb3hw5ZAyd66rYPdKVw+8sgjFz85aRNOFTGJp+Zt743bL7HNKz7OPjlDbmdxD8QzjzZ7ecqlPjkSXuVWjPbC3HLRVvjD2qyLzyn8q8WTj352bKyl10LrJLreuXPnMp9xsbMVh422PK3DeIVQK75Y2mzl2h8K8Lt9+/YlpzHGGGOMMca9x4TcMca4GUzIHWOMMca4C4Qygh4xzTUxjChGBCPggcAWxgllCXsKkU7JXx///PRp8yEmEtfA3rXxBFvwc22cj7bCjj0B0xzGmxPX67uRGEkkLCaqEw0TA8tBv/2Qf2szpmavlAubautIZCRgEl3Nbc+LrdZ3ipj6zAkxEtbtszmbI2GZjb72xZwQy/fXdi/1s9VmK1Z9rduY2NbZU7Bisq9dDuz5yvXMWU6eiBXHulAO5jFnMfiIDfGUrttre6eG+ZozX/HMIwdzKvraI7mMMcYYY4wx7j0m5I4xxs3gz959HGOMMcYYr0MEI6wl6iEhkRgHYps+44QxbU9uKkQyT10m6hHWCH2EN7ViTCHksTFXdeKb2IQ3Bc1lXjGz12csn9B/wvZusONbLvVVF5tQSER0rb94niTVVvhbn2u29oudtRIOE1itwR6ZUzvR1lrZmlO/uZT2Wp85iKf6wEc/PzHLFeKbP79zz1HefMRpX/WZr1J++ZRb/p6AFbd7233WVw7sxLZf5vF60J/wyk+B2tPE5jQGIrpcilVucqnYX/OJLYbY0C8WAdt35X7oQx+65MF/jDHGGGOMMcYYY7z72Ls2Y4wxxhh3gVBGcFMn2CUCEsOIYoQ0EP/6mNyEOiVBj6hGTFMImUTexD3XhDSCp2IuvmIqxZNHMc6SwMfP06xyQP0Qo1xh7G7jiaWn+BfGxDaPvCFv/Yo9KVf+RMT6T6GQrza7xG5xe8rXvOLwa67yyU8tf/72zD4FH7b2gl25lmcCOB9xxdFuXrkUny97fvr4GC+H5uAnf09v86vP/RdXjGI1t+JaDiE2O/2u1dbXHjYn3/I2z5ljRY7G3Qd7rNbmK+ZDDz109cEPfvBSxB9jjDHGGGOMMcYY7z720cpjjDHGGHeBMJbglYBH8Epk8yRjAhsRly2Rre8lVSesEttOTvHR05Fs2YiVmOeaKAmCIAGYeFesBMOzGJMTe+g7MV6pHfWLSxhUlyO0FbkRUOVpj0A09J2siaj5W1dipqLvPe95z+WJZQKmWPre+973XuqEVXtr7xIj9Sv6iZL65Wo+32Ps2rh59bln1s7O/Ep24tl/omvjsFa+CdBs5CJXtXHrZm+cnfWVs+vWI/f2whO1bJqHLzt1a8j2gQceuPjL3fwvvfTSpZZnIrB5XPPVX05iyJGNPq8r82oH+2KzVez917/+9cvTzeYeY4wxxhhj3Bvso5XHGONmMCF3jDHGGOMNIJYR14hixC9iGWGOmGiMKEZUUwh76sS8hE42bLNPgBOLKCcWQY3olsBHiDNOoNOvjzjI17XYfMx3PT74maN+VOP69XUb+TePPm2FKNh67Ycx8+g3bl45EFwV/dak37U18dEmHJrj4YcfvqwFbIyb17qJqLBP4hlj0/4Thdtztb6eLnU/5KZt71599dXX90Qe8iWOm9u4vOXF3nzmYS9H87HnW158oM9Y958vH4ipbUxc8/I3j2vxi9s6+BiDfn3q7m2Un34xFNflY8yemVcf5JWtcRjzGremV1555dI3xhhjjDHGePczIXeMMW4GE3LHGGOMMe5C4pmnO4lxio/PJUAS2hRiJpGXKEZwUwhsamIdQZZoJpb+xhT9hDV9iXrNa7w5FH1d8wNfgidbJPRlK15zhLFwfb0dXfNvzuaF2NYO+SdC2qProizhstz12RM1QVyRt71KNC4ee776FXMaM485zJWNYtxaxUtUNS6m+Gp+CdDGi6Ot5KtfW0zzmb/8tY3JvTnAz1j3lA0/Aqm+9ouPOe2nvMzBNmE7oVcMebATV2782i8YN4d2ebRP5nENMc79MR/0tQ/lQ9weY4wxxhhjvPuZkDvGGDeDP/+n/WOMMcYY4wJhi8iVIEb8IoIl1NVHSPMxvZ7Q9PQjmwQ/vsQxIp9CxOSXsIaERHEU/omN5tKXiGgs8U1trFjFVdjLhT/b+pXvh3kVtkTFPnK3fjHFNyan054gKMfWzi57NuVtfUqCKXuw48/Ofmm3Rn32kj8/Y9oJoNnZY7bNi/auPTGntn4+SvsnTqIsG/bZwLgnXVEfO4WffWmd+tTuMcq9p5XLQ7+4xvSLob/825f2FeZmm69YSv18ukfG5VC/os9+mNMfKDz++OOXj1luzjHGGGOMMcYYY4zxzjMhd4wxxhjjLhDEiGbEsHDdR+oaJ6z52F3iF1GMOKlWEsoSKhP2TrsENjVBTZ9YxhPk1M2VcFetP6Hx7FMSO5u7/uvljTAmpjW7PoVC6zjFTvknNPKBvPTJQV8iLNv20P5qK+XCni9BVj/OtYnpWl4JlfbNevWbp72UL8QRV3994pi/J4T5Fku/9WnzM482XzHkZx5x+Vl362WjTwz+5dJ+No7EbGPEXf6KuHxaqxz4Ea2LaT7zyo1de8O29SpnPL6njT5FHj6mmpB769atS25jjDHGGGOMMcYY451nH608xhhjjPE98LSt7xAleBHOiGj61MQ64ppC2Ev4I4yxPUU1/n0frKJtLKEuXPPtSVjXxEYCXCKgurZY+sxN3NNX0ce/NsSM+u5G8/joaHOhfBIp5WrObOWpJk7bA/36fLwwn+xbt7jyVydItgb2xsynuPbkqHGiakLqgw8+eLFlI6Y59MlVTP3sfBTyma+xPipbm405tOUuJhu56O+7jOVoPdZvb9gYE+ORRx65zCFG65WHujWx5ZOIKo71mF9c8fWzd51gzk4fwVW/mMU/RW+1/ZdD3/3L39whjiLn/hBBsW77881vfvPPvU7GGGOMMcYY7z720cpjjHEzmJA7xhhjjPE98KQkwY+ARrgjfBHaQAwjgBEVE8cIbkQzhYBGHBODnTEinkKgI8AlxH3nO995XVQsTqJmYqA2oU6f+I2Lp+3aOPHOtXFF/PqUcj/rMJdSv9jNA2PWRRg1lzy1ExvLLwFSXmIQQQnixuzXKRyCrzzV9le+9rqY/MXUtkf8+8ji5jAmvmK8feNLJK2/PTKfHBTt5k5YFZ8PO3PKlV17qUYx5aA0bp1iGS9n4qp4cmOrv7VZhzmN+a7aBFn5sWmfXYthTdrlpdbuNaqWlyKmeNZoXrmVk7nU4spb/corr1zyGWOMMcYYY7w7mZA7xhg3gwm5Y4wxxhjfA8KbJ3CJsYQybYUIlhBG+ELCHjEt8VYfO33Es0TfbAluxhPcjKkT1diJk2CXWFec2omW7PXj9CfKsdHX+Ik+412H2HxPv2zVrY84aF/MlVhIbLQP1ihP9tZlzBr1seVjTBx9xTAvO0KmcW2+xtgp9gHtGRKRjcuPvXG+4ohvzLU+Nu2bOYy1blirNfCTo361+dT8xCec8nNdrMRg83SP1PZFvzZ7eyWeMTHKx7g2+HTfxYO9gRh8rVUNfudaIaeK8e4NP2sRh89zzz33uuA8xhhjjDHGePcxIXeMMW4G+47cMcYYY4zvQU9KErmQMEZAS2TTdk18g5oglohpTNGXUKYm8rEVq5KYxyZhDnJwTchLxAOb4spRu1oh4Mnhev/dStQ2n5KoWNGWT3tj/c1vPxTX7Yn5rUssduXDt7o9E7891ScG2KC18iM0JsSyLw/ziMM3f/bsFJSrttj2VV9jin7x3UexxSiOuOZvDez1m1cbbIjb5oEx9tkqzWN/vB604ZqfGvrlgNYqHvj2mnBtD6yntRfzbnMnhJc7H0+fP/nkk5cn0McYY4wxxhhjjDHGO8eE3DHGGGOM7wGxMrGLSEb8IrARvAiSiWFEsAQyhY0+Nj3F6+OIiWMJtsbEcc32FNzEJM65JkwaU9cm5JkHhDtxEuoSEpH4aLw56mv8eq00p3mIieaIbKyRnTyMEzYVfeazVtfs2Iul3b7p19eayi8RUqmtmMdeKeK5N3zY8RcH4hNf2xf25tIuB7721zhcN2/3Dq7F48PGvO6fWOdHYZubnbnY6ndtPj7iyp+ddnVx21+x+GiLgfKGObTFa2/OXPS3j+Kry6c1sbEGsdpbY83jtfrUU09d3bp167KGMcYYY4wxxhhjjPHOMCF3jDHGGON7kOiW8EbY8hG6vtM2kY+I1sfqsiHM+f7SO3fuvC5AEs/OAgJbwh8731XKR/vVV1+9zMk2f2IbAY6wah4lEuwSG6+PE+fEkeNJ4h5O++b0sdLly1YpJzEJpuZWiIDF56fNrid3+VljttrV1i/3REd+CY32FvZKLIUNP3shhsLXmO80JkrKpXukTw7Gic1qQrNxcyn6zSeuWIq4+tTtj9hiidv88tG2H2KxFYcdG7GsQ21v2IjD3rqs3ffSNmf3se/2FYOd7xk+1w/jfKzFWLl5XSasm89c+sFeWw0+4vhjA8W9feSRRy7zjTHGGGOMMcYYY4x3hn1H7hhjjDHG94BgRvDrSVriF3EtoY4Yl7BKJCOCEQ35GdNPCCSqJfjyV7QT+NgR2yDOGV/RR6gjthH0EjXZqBMgK+yuF+Jggh0BT32ibd761eYs3ol28fiorTth07rkbU1sxTj3g401GWfLX39+xs2tnS20zZsA654UTzGXHIwTMcVtbuPlK1d76D4lmrq37MTTNqc+Mcvt3IfWKgZ7sDntzFc+5jHWHsi//TFPeWqLS0Rl1+vLHw/I27qsW5/XkdccX/3msS6vLTFhzLVYxuUkF3mbQ1se6J5ae3Gef/75y9gYY4wxxhjj3cO+I3eMMW4GE3LHGGOMMb4PRFxPURLUlIRaghfhjVCWkEgQU3uiMWFOH3sCXQIhIc64Ptf5GSP2JVwiEZJgi8RHNLc4+olvtesjAMrVtXn4JNxlp9yN5jIubuiXp7gKiIXZEAwTmls/e2sx1r4QF4mR/OSumEtM9tr2wr7JQ10fO/58tfNl17zmUfixKxfxz7aaf/taLPGNu9YnjnXJzToUvua3Dvblx09fucrD68cc/FEcNq7Fgj5zlRd/besyh1it1VrKnR+b5m/d7Yfc+TWHfMRDc/OpZu8paoLuGGOMMcYY493DhNwxxrgZfPfdmjHGGGOM8YYQxBLciKIgnCkENcIuEdM1W8IYe9dIpCSUsSGqEW/5uW5cIbKZJ7FNDEKdpy2hvz5+KB7/BEB2FXZEPn4Jem9EPifFDGurwJqU5hDfnHxcK67FMT9bvkTChEhzslOLkeDIT1vNttjFEVOdr2v1OVeIoXTdE9Fs2PJB8cyrz97rKzbk4Nq97l4obBpTxE1ghXn1eb0Y53sWcyp8xGavbu3i6NOG1xA/NK9xrwe2SvfKmBiKPHu9yCc/Y9oQw/fkPvroo5f2GGOMMcYYY4wxxnh7mZA7xhhjjPF9SFxTiHqeUCSaeVKXIEdMI3olpsKTmGwJkoQ2IlkinJoQ55rA1rX4RDs1P2PZi4cEw8Q2AhwS5xID+YGt/OTFpv78TvSdBQl75VctN1iz+J4Albtcy8UY++aEuOKxa4185Sl3e9YaxGCnTw3rMJZ4yVefuPrV5mcvdrnql4d+tRzEaAzGXMvlXCO7/MV2zz1xnWgLY9brNcEmoVWbv9hqa+bDln9xr78eFNfs2xO25astx5781mfebBTtYjZ/ewXX+vm3DmPaivH3vOc9l+/KLdcxxhhjjDHGGGOM8fYxIXeMMcYY4/tA7Ep0JMwltBHJFMIukYvwRdR88MEHXxcKff/tq6++evXKK69cYngKVOHzwgsvXN25c+cSS5uNMaJa8yWqgTCnmIeYSOBLvCwGWzmdGPc9qnzlFPrPcqKdMKgWX04whz5rVVzLuRjFk4f1W4eciYEJpAmM8iEWimHMPNZlDu36zW3N5lG6J+bXZmdO/n2csTnYGSMyq/VnlwBdTuIkwOtT5KeffetIxDW/ffXUajklSvNFT952z4yL5TXBRr9x8xB3XYsjd/Hlo0+ufVeuvTGHfjGsS9EH8cV46KGHLvlBLP3mQ6/B1mk93Z/mUIvr/oilb4wxxhhjjDHGGGO8fXi3Z9+RO8YYY4zxBhDeiGEE3AS8hx9++CKiJY4lSGoTwhLalAQ2AhqbRLeTxD5+5hDTnIr5QMQTn535iHDFUhL1EuQS+iqEw3P+fK4X/XANYp75FGKoIhd2+poT+hIR9Rlj335ot0fs9FkvGySaEi3N2966B/ZfH1s2iYr2rnmMszW3HNg1b32tM1t+5jAmN/kQT+XCjp/1EqTZ6lf4yKG51eaK9piNOhHWHL5vuf1TYK1iyKP1Elv9UQAbOamLS2xW+GmrvcbMVyzX+lybl1CsLQ9rYAf7jdarP5vWytcfHuQzxhhjjDHGeGfZd+SOMcbNYELuGGOMMcb3gdDlqUQ1sYvQpiTkKUggTABLSCS0JaoRxdjpI5oR9RIf1QQztol2INgR4sQWT21cPH7sK2hO88gtEVA/m8azOYvYJ9qKuRICy4/oSNizVv0wn37lzAfZyznkoI8Yan5x2LeHbIm15mZrXn0Ke23ztBY0r7H27NwHtvrMy0dexTV/ebIz7zmuiOmeGJOj3CFP13zFT3wnDouhyE0NMc1fTGgr2mwTqsvZU9tyrN39YJNfaMM88oS5wF/R37XCpxyNiSum+vnnn3/9dTjGvYifGT8DfibGGGOMMe51JuSOMcbNYELuGGOMMcb3gGhFTCPkEl0TLImuhDqiAAEMavYEMEJYgps+hUim/xTCiHwglFWIDQRBYoOSr34im+L6ejEXP7XCRxEz/+y0TxsF1eWMbK01IdBYYoh89F/vU/SZ/3p/ffZDLS/95krotH8J281fLgofcbpWl7d2cwcRkk3zNK6u/4zXvPIxt3zKj1hrXO6KGMb1uzamuB9eK3zE8pohAl9fs7Zr42p4bSSmmp+9J4MTtdnVL3771j4lRhtvDnWvy3JvzdkYP/euWPbPU8IJ12O8G/D69pr1c6buExT8e6088MADrxcfEe6j0NWKJ97rq79/6xWx/Bz2M3P+XIwxxhhjvNNMyB1jjJuBd+r2J/VjjDHGGN8Db/A/9dRTVx/4wAcub+x7M9/Tud7o72OACW4Er0QzNQHAm//e+FfYnRjjf9qAUOc6Ya4ndbXFJuDxabw5tQlt7Il52SmJh3LQZq8+KT91+danzYegIW756TOfWnwiHx920CYAGk+UzS6REsbYNC87oqa1QZ951OZVsxfDOtoDsRMjwbYnZNUJMvz0yYNfe6G/ecUp/jneU6ntP3+FT3Yorn5+7ol1+l5ksfWLIy+x2Paa0ecPCF566aXXY7e25msPrMlHZ/Oxb/ZJPE8gG7MemFecXrN93DYShJtLDvamwlcuX/jCF66+9a1vXfIY4+3G67KfKbXXrde30r9D/duszaZ/WxQ/H4r+fr6hT0w/N/qUXuP+7ew7pb/zne9crvv57+djPw9jjDHGeCf41Kc+dfXpT3/6tdYYY4z7lQm5Y4wxxhjfB9+J++STT169//3vvzzV1Rv+hAJPdKkTwogGPcGlDW/6J1gmvp0QIfSxIQioEyW0FfHMYV4xik9kKB/XbBIr9BMsEjEIkOzqq4aYb1TKS+FDJElwFc914iAb8xA3rIu46Fo+9kZbDLX82BBNiI7WTLy0NjHFds23PLSbNz/5FNP8YhjnZ336Pcl6xrZX8tInX7GaS07itFfy8pHG/M49I2w2n/Lyyy9fPfLII5fY3QuvHX78+Xr9yMdrRO7EIeP8n3322cs+Nr8+ectJPLbiE2DFkrN+f2jw4osvXvaFrTEx7S+b9vXb3/72JXb7aYww5b5oWz87ebERQ66KvbC+L37xi1df+cpXLjmM8XbQz5va69drtH9n/fvr6Vt9XufwWn7ssccu436Oen279jPpde5nvqfLxdXn56W21zt7T+d6rft5AB8/M34m1H6mlMRdxc+NeowxxhjjrWZC7hhj3Az20cpjjDHGGN8HAgGBrY/cJJ4RDnwMp9qb+t7oJwSACEAQIAS4Nu7NfTESWtmePuc1EuIS5RIVxRKXUEDYg37+xAhj2iBIaBPg5COOvuJmc5362PNvHebwRHKUl5wIjtaGxAz7xJf4QkwpDhEF9lXO7I25Noe2WHJ0LXc+9lpc8+XPji9b13InUNbPvr0VQ9u4azmJwbfc3GPj4rSG9uOcl3AD+yEOH7m2r+Kx55s4BPOytV/6+uhW4imRV2zjfOVcXRFfPLk2p32xd73mrF3e8mdvHvbq8rIn5uUjhthqdnJQ82dvntZlHv6E4zHearxOvb79bDzxxBOXT0X44Ac/ePXhD3/40vaHDY8//vjVo48+evnZJeoq/q3t3xL4efDa1e917zWeKOyarZ8bPw/afi4Vr39/jMGnn0HzEJDFIhg//fTTl7zkw87PTv8+jDHGGGO8leyjlccY42YwIXeMMcYY4/tA0PImvzfvewLMG/8JZYo3+E+IZglhiW5EAn2uCXpKT3LpIzYkHBLLgk+ihHgwn5z0sT/zMF/9avMkQijsjF0n+0rIJeHjFEcgN37GxG29bLJv/frU+okq/Ph3naAS9ktMfWr+Xecrtjbkaa3iyMPems+4vWKvXxw2BFh9+bvWJ7a42vzsmVz04cy7e6DwFcscanPkl538rB/y0y7X5oVYEEO/ePK4ffv26/7arj0xWN4wj5jd62p7AXbZErK85vhYU3ugbT8VseSl2H++1tUfEozxZuF16vXnKXPirE9BSLxV6zPWH9Ukyir9eyOG16/SNYyx83Orr58DhR1/Pyt+Vl372dCGnws2fItpbv8fyEU/Qdn/Ddrvfe97L3/ow9/PXT/XY4wxxhhvJhNyxxjjZjAhd4wxxhjj+0DI8uY+4SABQdub+97Q9ya+N+wVooC+xvi69kY+8YuNPsW4N/mLpY9IkLCgxtlXbJhXv7ZSDuZhd/pBXV+xr8Ov+rxmn0gSjaO41gBrIpqAGMg2ESXBRV/7wF7urpX2SVzX7HHaNF/X7XNra15z4owph2AD/fYOzdFeF1cfG/21sykHRb7WxK822BOI5FYMNTtjzSNXNvbMOPQTfAld+ti4Jqg2V3HaT6U8rV1/OevnL6ax5jB25qTPmtXW0T76SNkxflQIop5+9W9rAu6HPvSh18VbAqmnYo37eejfmv7d7fV6vmbB5iy9lvn0M6DU72dBbNRnDq91pTj+zZaHnyU+nswV07V1eHqYiPu+973vdbFZYd+/zWOMMcYYbwYTcscY42YwIXeMMcYY4wfAm/AJDr157838BDDjCiEgkaA37BMAlISwhD7FNT/XiRPFqhAA9Il5Cgt8iRfa7HDOXdwEBG1+OPNS8jlrxRhR0TxyO+dRzO3JUrk3R+Ip0YO/tn0joPLRJxbk1h40Z+sH2/IWr7Xzby+MF8MTpsUWx33iVzy1Yj3ilI/iI47V/PQTOa29fMSRb5gbfNjyKY/ion0Rx3VPDivabI2z126e1mi8OGyal4/5tHsdsOnpQhSfX8IU+MFY8cXrXkOM+lyLUe07goutHuMHodes175/Uwm3CuHTE7iEW7WnXfv3ttd9Pxdew72+YbyfgV6j0Pb6NF+vc3bi4Hz99rPZz5H+4mQjtlpe+rStQ47+3bGePq7Zxz57MpcITdjNh624rvtZHGOMMcb4izAhd4wxbgZOphNyxxhjjDG+DwkPBFwimTflPW1FGPN9oWo2CkHAG/zQz6+aiMFfmyDgozi9mZ+/a2IAoUGbPdGPSAD9iQ7ENfMkOkDbmD6CBH99vpOVTe3yi9pin4VPYojanImxinyU1kCgyE47e8UcbOUiD3mq+fBX16fNVp+2awIOobV9FlubcFLuCSREVfvdx6Oamz0/1wnPcqnPHps/H/HM6+OM3Wv+irjuET/jfPWJxaf7bA7j+uxJ6+vjkfWxsdb2T1x2fdwxzGkc5hDD3PqMEbzEaY/ZFOs73/nOxaanBu1N621Or+XWqtYnt+DD3v6rzU+M8oSk2PzYWLPxyhi9Frw2Kr12iJxPPfXU60Ku17HXrJ8feO3euXPn8tHhrvs30Ou3n0/XXtv5eW3C69frWy2G2F7X/dvgjxD6d4OdHPm6VuSp7bUd5uKjsOlnULz+PYDc+veg7732M+b7dD1pbM36+nd9PytjjDHG+IsyIXeMMW4GE3LHGGOMMX4AvGFPtPImPhEisS9h4oUXXnhd8PLGfoIA8cC42hv9vdlvLFHNtTf0vfGfsOc6YU1MgkS2CRjEADbl4TohQxz58k1YcF2OEEe5jj7zmFMMNZGQYGIdMFeFvT0Rl1++9sHaXZu3HOTevPJN4EHiBmFEH7vEQjkktugXTyxzsG1eduZrz/kTgtwzfoo9kR9bccDX/NpimZMdEUgtJszFhm197PVbX3mrzSnn7qtcYX2tzbzsXnnlldfFUrnrNy8/tNb2vNeAcXGsx5z+OIDArV8shQ9bcV2Lzba82xfFmlpvc1uDsdYnDnFb3p48fOKJJ66efPLJy16Z31zs5TVuLv7N8G+D14Unbn1cch+b7DXjo5S9jvwcKgTP/n2tiMHf691rV83H68vPi9ey15mir5/hvq+Wv9cyW69d83j9ek0npPa0rNd7r3m25mPr5449G/P0c1Mxr1jGi8PPmHz5qs3t50VehF0/M9by8ssvX+YaY4wxxvhhmJA7xhg3A+/i7XPQxhhjjDF+ALwZT3j4xCc+cfnuxp62IgR4896b+Wy86U808IY/QSxBAmx6s58fkcEb+AleCXfe9CcEiK8v0YKPoi1mgi/0JzzUlk+CAh8FRAdjCRFskQhBHHFNeOAjn8Q97YTA/MxhTL8+9q1DP9gTWeR95mLt9owf/2LLD+XTepoH8kOiqnFx2arFkXfzttflVjyFve+bFZO9GATXrtGau19dE1GzF5udPeELNuK3h2xdozyJr61Znu69mh17fvZJjuVhXK3tujkI12ytxV653+1xrxnrZ29fxGYnd/HlbkxfcY3BXHxRf77mgacgrUdce2DcHvRzkW3X7FyPexuvC68z/64lyrrutajP69rrnK3XYz/D6LXs9VLt9ef12M+W14oxryWvIf7ioz62fg68Pv077bWl9NplQ+gVX2HH18+COeTBn4+c+1kpH/1yYiMfPsb0KfrMQaQ1pkCMfib9XPhZ87Py/PPPX33961+/FH8UNMYYY4zxg/CpT33q6tOf/vRrrTHGGPcreyJ3jDHGGOMHpDf7exLRG/YJEoQDIoGinxCg3xv9+tT6iqPoJ2q4ZpsIwLb4rhVCAAEuf3kQFAgCCRvsjbHVLjfXCSTlwSdxgn1ihJhs8iU6qPmwPTnX05qbhz9xpfz1NZe2a6W4+sCuvFzLC8a71t9YObRm++n6RKz2mT0/xdrY2sPmrQ1t/fZanzUpxejaGD/r1aet35wEn3whlr0qx/K378UXm4/82Lc3xS2Ga37yZKOtWKf4+pv3zKPXDbvgd85zjovFR172g504BDqvU699YpdaXj4yl0jmKUrXhDRPY7LR5ztQ+fbEpHHF94j62dJvXDx72n2omF9p3Ur7gXNd4+3BfXLf3EMfk+weK30/rD+AcU/df69rrx/3zT1zn72O/Ly4p91HbXbdd9fubfeenfvOjr+CXstsvWbZev0WQ1u/8eIb9zNiDL2uvPZ7zUGbnz7zNKau5OtnKIGZrdjm62ezmK79YZD9aw/6mdtreYwxxhjfiz2RO8YYNwOn5J0OxxhjjDF+CHws6Ec/+tHLm+9Eip7Y8ma9N94TCIgMrvVr98Y96u+NfmP5Z+eNfBAHPLlFyDWPdmIb8UIMczWHJyGLk70CczSPPqIgASNhw1gCGvRVsjlrOaiJEM3ROMHmtFXkxCdhhXBBtDCvcbX++tiCX8JG63Qtd222+hS+noZL3LNG84jFFvrsDf/8xNPf9+6y1ebHrj7xxZNv+2ceY2zL3TrYlEvXPSHITp/Cz77LIX+lJ3rbW2P2wd7qa/1i8FVce0L43A9xFEIaf9f6IY77zVYecuyPBuRQfvzsmbWY30fEWr857Qd/TxcS7MRujfzE99HRMO57gs3Dhj9f1Gc+sb3u+VuPMbn0+oD8Wmc+7MVkd+5JdF1/61NOu/G9sb9ee+691xWxNjHe66N//3q9uGeJre17fcbdw14vYten7b56zXptmM/PivvFV+11qb/XNR+Io688vA77OfXaMgc/ba8xceTFhr3XnNcYO7HEFV8cuRiTgzE5Kvr4iik2zA/x/WxZl3nEF9O/CeL6PuBnnnnm6g//8A+vvvGNb1y+f53NGGOMMcbd2BO5Y4xxM5iQO8YYY4zxQ+JN/Y9//ONXH/nIRy6ihe86TOTzhn4CkTfme1OfjzfxjSv62SlEAfb6Ei682d/H9fIlGihi8WfPj1BgXn7Njb4jNQGiOSM7feKaj80pSldgvnNd0G6caEFcKYeKtZx7Yb/0a9dvfr7QJxZBRB+f2tZE8GgtiS5ybx/EtkfEm/aq9dkLfdBnf+2hOPIwh1o/O/GKL059rvmZQ25gR8S0f+YRi1AkLzEhN75Eb2sxpp1AJQ/x1OYhdpYvW8V3aZq7XNnaY3Nqy8PetAbx9ME8aF/lpxBWxWEvH3PkI2fXXl/8XcuND9FOP6HJ/IQ8wpMY5jVH+9tetQfsrc9cYtgTIhZ6LbMXz2vG/si114r9c83Wvoir1mdv5Nn69Z37YJy9/ZKX4qOoCc3tkfGTs+36e43fBNzb7rmnrdV+Nr3+7Xv7oXaPvAZc8+vfSm33QHGPjCnuLWFYrH4GvcbZ5Oda7YlutXvavZRH91o8vl4zbLwOxOjnl53c5Cim3L0O9ImTkAvzQAzxxO21zF98P3fsxIG5wLZiXfysm625rFlOMKdYBN3/+T//59VXv/rVS55KOYwxxhhjYELuGGPcDCbkjjHGGGP8kHhj/0Mf+tDlqVwfHQsfIerNfW/cn2/8e3M+oak+xZvy7Lypr00cOAs//d7shzYRgEAgnnkUIoA4iphECKJCT3pla55sg3BWHiAusOEnB4hFLNF/5q4+qc2XICLvciL6yd815GbcWnqijj/xgk+2ilhg23rY8DHO1jU7uRpvn8rJusQmyOpvbxN3xRFDTgSV4pSTtetrDuvR75pww9c1/+7/WcRhZ9zcbNsLtXwVY90jMe0T2he5i8PfuLnk6fs1CaHyE08fG+Pimldc/V4fbMSC7+MU25g+forXRjblQfhqXq/1XlP6tT2hmyAlZ/Pae3m1j+bRR5Rlx759kWvr02anbX6+5eO+GZeLAjU/4rD9JgSayzz8xGJjrX1/MLtiy9G4XMrDPmmrjVkne0VcuRsTh0+53ATcewKuTyUg4tpvPxf22V7Yk/P1BHul+Jmz5+2vfVPYeW25F2IY9++D11Z2vX68nrTdR35s+blHYmTL1/0Sh43xfp74ytFcbFyz67UrR7G8FsypWJfY4qrZsbEf7MSXj7mNiwV9/MVU8zEnH4KxP2jgw05ufp4Juf6Q4bnnnrv62te+dvXFL37x9XWad4wxxhhjQu64X/C7uD+Qf/bZZy+/744x/jwTcscYY4wx/gIQLogYBN2eyPVGvDfqvRHfG/K9ce86MUHxRjxRgJ03/LNLECIUhLFszeGQoy9xgHiQeEKAIxKYL+GAHR9tc4vfvNmpFYjDLhLV2DtUGc/eNRKkrUtpnZBPMU8BhL88xLSu/PTxYdd+IPEtcaR1ZqsYFyvKyT4QTjoU2g9+4pdzObVPcC0fdnz5EKLOvNoH8YlZxcrXmHsE/sasJVEJ2q7Zi2d+OZqD8Ah7x6c9UIvXOvio9RNVxSF68nFNHOqpU/P0ekzctS5FfuY1h3626pdeeumypl7n8usJa34Ef/N5LciDv1j2rX3gpxBB5Q39rYU/eo1r21dz2iNos+Vz7r15vf7N3c+jtcpBPDlaF1s2YRzl030pL2uGnLWNi9HPgmv2Yrbu9s+Y0rrudey3P16p+D5c//71M2Mv2Xit9lqGfTLW68y91VbsldJrqr2DGEr2fLXb6+YzDveknyU5sW9u9q7Z8um+ygnZs+0eeg0ZZ3+uwc95859562PLr5/LM0e5ez14fYpHyGWnNga2fq56jXlN+4MLf7DhNei1JD9j/IyF+dq7McYYY9z/TMgd9yp+5+13e9fOqn/jb/yNq9/+7d9+/fzb79/OX/3OPsZNxWnxF757OcYYY4wxflAcKLzx7w39PmJW8Ya+N+l7056NA4g3+XszPwGhfm++Zw8x2OgjCIjnzX59xpTs+Va8sQ9jbPhma26lN/rF019drolTbLXFcA395wGqdbQua7EvKFfxjfPjbx51cxaT/dnHRy76lfoU1/oSWWobM399rhXXzWH9ruXFP8FG0Z+Qo7C3hmLKDWIUv/uSj5jBxqFTbGtjZ03a4oqJ7pExffwUghRb8NWnZmtebeMEzO4pzGkdYulvze1Pa9XOToG+rrNVi6G0b3JIkNJW7E9FjPYE3dfW0BzWbsy8YqprsylWa+YLfdr2yNzZiGdNrmGsOetvTGx9YvBrbdqurc3eKtkR2BRP4HsiVe3fAMK5Ur+nLF33pCphU59/K7xJYR55tD45vtuRr/yfeuqpq/e///2X9VqfJ3K9dpVeU4o973517+2rAuuu2Afxu+/Z1rb3bJTTV/z2rmt7y8+12r31c9A8xhWvbTbdB69VNlBrG++14drPAFvt5jcuRj8rYbw1GGfnWi7q9kfc9iTM02uRn/20316TXk+9zrQJ6j0V7bXpteY1Zsx1P6PmqfxFaf/koz7jtw/nfRxjjDHGW8snP/nJq89+9rOvtca4N/C7ot8rfVWVc4U/DHXGIOT6PbjzVmcpwq7ftce4ybzhE7kO6U888cTlh+p74bD6zW9+8/JxUGOMMcYYNwkCzwc/+MGrj33sY5dDRm+gX/+ORtf9FamDiDfvozf/vZHv9y723iQniujzBrnam+MOMGptMdiff62a0Ha+mQ7xXfNhI7Y89SVWNO4jY7VdiyFf7fNN+drG2WkrrbFY9kP/Ob9ra7Yu+Wur9fV7pz4QCq2HQNAe8hNHEdNa2MuleH2cLh92rsVi0wFQXMV+GFOzFT+hpZjaXZvPU3Dlr5iHv2t+arH0eZqOL7FDbp6oM6adHR8xwUaO9s3+WbP89fHlBzHF56fPGDt++lqffFsbn3I1jz5Ckfmtsfunj41+tdjGxbduRSxPBnqte43oMy5nPuxbh7nEdR1it1fIRluRt3zkopar9RgT3xqNtY9ydB/1yUUs+WmXh5zFcV2fmO1L9906zM9WbGPs9ZU3G37mNLc47Nn6+XU2Mi7X3ngQXzw+xtXGrEVse8Be0eYj3juNdVivfzOefPLJi4CYIO3fQK8BH//b2t0Te2R/9VX0GYe1VroP9s+aW7d5od9cyIeNfRLPtRjiG9OXDV9x7KdaG9bj9aFPMZ/7oPb6EUt89uKxd//YuO5nq3GvM0+2Nyd/18ZaHz/xezqbXfn0Ojcne7G9gcVHHGNq/56UtzzN6V74N09hx1euXltqe6Hfa46vOfSpm0++5aLWF61H3c9c/w5p65ebmOZuPnslvhzOeGOMMcZ489gTueNexO+Pfp/8Z//sn119+MMfvvzO3O+3fp9U/H7pd1e607/7d//uz30KzRg3Ee+c3PWJXG8sOJD7XPKKg7oftK985StXf/Inf3I50Pmhc0hkP8YYY4xxk3DA8DuQJ6J8rGxvaPemtje5vRGvJnZ5M18h5ujrST9Fv4OLPmKYazYONN4EF9tchADxvVHuWp+5imk+Puz9rmasj5t1DXO5VrzZLkY5l4c4iVjWmUCQnWIe9r3J71rRr7Bpj6xBfuxakzf4jesXw1z1dZhLAGhd8pCX4trvoX4f5cs2H2Oum1Mu8micX3G1yz8RmB/KT589tH7X5mSvz9xstOVvXDzX8iR0mYOv+Y3Lxz5bd79LE27Ec0/YisdODPHU5r1z587reyqeGOz58hHH+r2Oun9s7anY7NnpMyYPscXgB37mLq616Gv/9FmXcbl1/1qbfrbmLCa7YplTnGz1WZM8xBCPrXEx9LNXXBtzDbUYbF13L7VP0QvNIdf2NBEc+ds7dXtn3Bzidt+sS3FPFDbGzddeaBvzBoXXCNHNvwX6/AGI9k/+5E9eij+i9e+IMZjP3OZS3gnMS0T1l/LW4MlPOffkZ/thX62vnxvYA8V9sJfWffZX3F/zsKuIo4+f8eKefuzM75pt99me6dPm73uTe10ZO+8pv/YYrQdiuD5jFce1+XuN6rMH7MUQzx6Zi3/x4L5We/3wtzfafg7kw9dc/Vsgntep/x/aR7buhzm9ZsyrENvdIzkZc/+0vba6f2KIKQbY2ufstcXS173m68mI7r9c1GIZr634t8F81tC/VeZTjzHGGOPNY0/kjnuNxx9//Opv/s2/efWJT3zi6qd/+qcvvzP6HbQzlNrvon7/9zuu4vdTf0xqzNf9jHETeUMh9244tPmB8Ze/DqW90Tghd4wxxhg3FW/oe8PbAcMb2A4h3hzX7w14b/RXvIntdynFG9xwQGFbQW/gE6ESHVwr3vRPTFB6U5+NdnHyZ99hSJGrcX1qfXKQdzblV7nur63AGuVoza4THcRT8+ejtgfECGPa5cGHf7b8CKrngY5Nftakr1zkJRYbc7CTkzGxXOvj07ztqd9li5mooc+4eGzdW3H5s3NN4GAvvn77HfrA1u/JiVLmca22vtbPX46tXW1N/F3LRb852Sq9buQihmsUk0AkfzEqxvhCrc88YOva60nOxVbM3RrU5en1nlij2LdeI907dvr4mU88sc1jzfJgw98+21PrbZ1iiWNcjtalzf9cjxjdc/1i9Dpwrc+YHMRyrTSXWtt82l4bZ/x8jIspl+JZm3vDXj8f1zBmn7w+9Cnmsiee4rceb0wo1u+8RdD1pD/h1PdwszMvP/OV01uNPfUxZ3JRWyPshXW4V17H1qbIDcbkqNiPc0/YKPX1mtbnteHanvWaUeyz+N2r7NXs1Mbk1f0zLn++1cZcN97c1mHu/L1u2mvxy1ufazH690RBPytiGLc+91b+/FA/2Imt8Ct+b1SJ23dXK2zM7bVUn7Y5/Juq7eeP7/lz4LxOfBVX2/rk5OdHvz9C8pHNRNqEWq83xWtR3c+5WOJYuzj2r/2FPpz7yEfcXsPW0R6MMcYY40dnQu64l/D75cc//vGrv/W3/tbVRz/60cvvrn5H9bus34c7I6DfXZV+b9XnjzSdQca4aTh1/cB/Fvv0009fDn5f+9rXLj8wfS/Ut771rX208hhjjDFuJN609qb4X/2rf/Xy1Jo3rL153RvpCREOJmx7oz+hgk2HFtfe/IY36NnpU/eGuYONGOzU+vUlLnlz3R/YecOcMODgw06cYun35r/2mZNSTuKJk6/Y5hI/YQKuzV+fuljWJQ+5Wm/t9kPc+hVxjCmJpMabX9uY69ZnXJ2QZL1yTJwBHzno48uOj372rde4ePyay/qNqfW7Nmaf/D6cmEH4M5bQ0tzimVMfP3Hg/sLc2YsjbnN68tZrKzvjcjSeSC++WNZnTfqgzYYtH09le13qN1+vEX32Tmxj6GOr2LVfkAOx6BRwiDTtldzbW304r+2Xdcm5/ctebPdCn1jti77WKRdrMm5f5M+GLRtr1YZrvnzM2RxKP1vde23X5uDHx54QrdAaFNf2s/mU+hWUu/7mLC++jfUaMR8b65KLNzTENS4Pfc5aBHb741661s9XH9HvOmJaF9TurzlgrbVbG7ouH09WusfuezZiya+1QR752Qf745yobQz209q1FXb6xCovfeIr1q/oY4P8szNn+6jttWnP9Impzz6d8csT8kQ5qIufTT/zrVnb3vHtXp5Fvxiu/XvgZ42d+OKi2BBXvvpak3+f1WGsNZhffPNYm7Y53CNvjrV2dXnYF68Zrx0+5dj/A/Jir+bbPqvLqdyjf6v0l0/r0hav17vivnidysMnfxkfY4wxxo/GPlp53Cv4nfKv//W/fvV3/s7fuXwakd9d/T7Y79SK3yv7ndvvpP1e6uzC7utf//rVr//6r1/98i//8uu/gxof4yawJ3LHGGOMMX4EHCC8We1NbQcMBxTFG/0OIIo3wdW90V1b3Rveau3egDfe4cVhRjGH2K7Zqv1O1hvsauPy4e/6jNFc8lCfeVS0OxSVk6KvcXOehy1FvDM3ZKOvIhf2clCLl0/x9NlL18jmHGffoQ/WbMwegY22Naibo7Zc2aC+4oppXIH2ObdY3cvEP35+HzY/u+5LT9eBTWPszCtOMexh94tP96i9cq3Pa6v1ypFAkh0/sdh0n/I1n5wVfcWHON0bfep8wVe/Iv711zpf85vTuDn484ty42Pc/MaLZ7x8wEb7er95+OgHOxQDxsHPGhR25m5t2q25Pc/fHPr4F0Nf12rj7qPr7re9MAdb8fMrfrlrW7+c2aC9FK+fdTF6ItIfiRBVPUHpCVlitjGF4Oqs5i/VXeej6Odn7NFHH720iaxiiKWtbrynNNmIRWwtF6XXgvnFtwbrsa6KNemzXuv0evXzYX3ti9JrUvziaNsre8qPTfvU/WFXfMWYnNpjRVsu+sTUDnkUWxzx+VQr+fRzVG797LnuXvKzhtaUvzFt/a7Fu56Ldrg2l71yD/mAv5i12ZlP21rscXvQflXac8W183vCvHvo3yi1++x1x8c18Vdbrc1H25wKH/HKyV54f6AY3dPzunjtQ/drjDHGGH8x9kTuuFdwxvj7f//vXz5S2e+Dfmf1O6zfB/0eq0a/J+tT2Pm90e+j/dH8H/3RH11+NzXmd/UxbgITcscYY4wx3gQcPHqz2xvaagcU/Q4jrh1EvJHdm9/6e0NbSRRQs/Pmt2u1Q4prdvDmt3hEvA433ihnI2axiiuPnt5Cb7CzaU4F5dyb78bLxRghQ610uBKbLT996mK7hlyM8XOt3zrE1VZQX/nAmHmM8TOm9jto8c0lB3bQby65mTfE4p+vA6Sar/00Zr36zCcum/ZBLG1CC8q3PTkL+/zMq889cH/z87oxJibYmMPrJLFJLu2P69rN097oMxfE46/P69GTm/rMqZhDLuy1i6utFrd5xGav6NdH5HNt37SbK+FIToprduZwLYa6+8Lezwf/csm2NZWL8fbjLOKwbYy/tfIr316LYrB3XZ/x9tp142fd+irm4N9es3Nf1WyDr2KvjRVPHuVWsQ9eg63FODtx7bNx++6NENeEVJ+S9NRTT12EVx/HnFDr+6cScLX191G5ibDOc+KoE34Je14v5vXaVJezfNw3fQmA+uyF/BIG5St/xVoUT1Mbs876xGwvXbNX2z/7VWz77Lr7Y357zhbtq77g11zqXpfiiCe+seLb9+4nG2Pa5WROMbRhTBFTEaf1QltO2WtDHPEq5dxa1HKzl8W0H/690me89YjvXmmbNzvXxvp/pI9flq/73b8/XgvmYCO2Gmzc+/ZMrfBvfq+Z2vXJpT1Ri+l10f0xl/7+AADyOO/bGGOMMX5wJuSOewG/B/7jf/yPL59i5vdDv/v5HdDvkNHvixUYZ9e13yX9vunM85GPfOTyqUXPP//81b/+1//66p/+0396+RRZ7THuR34oIbcfDgdLTMgdY4wxxvguxA14Y5yw4pDhdyVvaHdA8QY29Bk/DynZKX6/crhhz05sv2u59ka9wq6PofWGuDfgFXG01cVV2MJY9nyNnQcmcyjG2JRncZRzvuz1EQrkKj8HroQGPkiQSCzlg9ZaDrBe4+yslR8xgsCgbS5jarZyIU6Yy7g8xBJX20eKyomdfr/PinX66pejPj7aavkmiCQ6tHZjcjMvG78XG+db7nyURJ72oz1Q9/phb03wOhDfPrLja47mF0dMr7NT+LFP7MQKMdkaLwc2+uWsD61f/O6f2GLx1dcemw/6FfEINOytR8z6tc1VEde+JRiJ1z3TZ+18rR/6zavu9dLr1zx9tLXY2bUPIZ5x/q1Xu/3It3t79plLvjgFLcWetbenXfdWLopY3TfXraF9Mubn1PqtRUzXiW3mbf/56E94tYfs+6hrbf8OEW/lYR6wMY7WZdw6znWXI+G3J3PtJduEYPgD32984xsXW2NsxDGHGO4je/5iWZfXl3ujlr9crUdf90ye7Zc9cM1Gf/suljmN2xe01/qthY8iF7Gg3b6z1a+Iaf/zbx3mZC+v7j2MF8d3ddkT12Anhr7WIpbanpjL+q3B+lsr/z4SHb1Wu/+tna0+r7X2Twx95vXVR+YrfzZK/87IwZ6I72eMj9eKOazTmGs+xoplDli315px9t13tVysC8bZiiFeObtn9sZrw9i+pmmMMcb4izEhd7zb8fvgxz72sauf+ZmfuZwr+v203yvR75rwu2O/86I6/D7pd1lnID5+n/Sdu35//c3f/M0JueO+5YcScq/joOuHwyF0jDHGGOOm4w1sB4+eOlMcXLxh7U1u9Aa8Q4dDiQMMP4eZ3gD3OxZfb3gTNrz5zs8b3nzON9m1xVOakzAnjkIUENe4GM1/2nsz3zzqRAt2cjROBPX7njzY9Ea/0qGr9hlbLCKBcbmf4/JWzKPfPrDlY235q+WTX3Nbk1zYtu/67Zl89YsphnmQ4JO/NWkrbK7H02cP5ai/WIkSxtiJqxbPWotZn/svtmv+2t1T9olaBFlzJEpaa/H48TF3eagVsdkb85pQrIGNtdo3uTjkem2Iqc/+yMEYe5izueylIjeFvXhs+oNOvt3Pc9/ELY5akau5+cuVn3gwbg5tvsbEsxd87Fl73j3Rz09e+ozn23zFb83ainHo46Owhzj6W6/i2l4UXzy4tv+9BtiwPXNTjKmtu7Wzqd8c9kXbnskhkdPPhXmt0Z7wLRft+uyRmOKXj2u12O4/MVVsH50c2nLNT4x+FvRp93ooRq8T8z755JMXPzYETaIcf74KO77215s3YsjV2th5TbY34toLffzab74KG232xrwG+cjRPrQebTb6+Gmzz66Y2q7NqV+s2pCHa76tw2tSn/hs5WQdEKOYwbZ81O5j69Hu555dRb8+sc3Nt9e9a7l0X/Xzaa+9XvwsQQzj+s3DR5sA7zu4+xnmz9b/Pf49sofuUfthXv6Ke92alV4b+hW2fKzBfmiLLwdk0zrF8prQFk8eY4wxxvjBmJA73u34g85/+A//4eUJWr/3+V3P74jw+9+Jtt8Tjfe7swK/S1bY+X3V+y6+LuajH/3oxf7zn//81XPPPfd6/DHuJ34kIXeMMcYYY/wZvWntTWnFIcXBowOKg4trb5yzU/RVV4w7iPDvkOK6cX3m0s/XYcab896890a4dm+kiyNeRT9/tTf8vQHfIcm4sfx6k951PmqYyxjKqbVWy8M11OVUn3HrEOf6mHXB3Cg3Puzth77Tzlg2cgI76wBRwphiPj72UC1meRRDEV+R5/+Pvbv51S277nq/m7dl16tdtiuJjeyyE6OAwCAMDTCNKEpQBA2ErDQQErFEIw26SIiLhGjQpWfaKA0kIm6DSFdB0IgQUhwIGCuOHbvsqnKV6z11/4Lrz1Pnezz9cI5fYjs+dfbvJw3NOcf7GGs956w15372llt95LfrTV//81dOdOD0lw+2Rmt8+bLjg745Pn8dbmQnTzI816/eFY+ta0u/uKjDtGzLn705sKHHH14+5eRgR3703d8OGqtDb+WqD+zx6IlVvvWovFqDeNV/2kIHRKAX9UpMeeGZ84FC9wAef3SN7l0xkHjyMK/P0D1Dnw7+vX5wItQvpA/keMXk37wczetBsfDM81ve6sC37nqZnzrFoIv4kDc+GzI9dA3V4kCua+cwD4+cDWKDb9QvvOQO+sorPfkZ+efTNSdzYOh+YCtXOmd9etC1K38HpUYyOYJ4cmRb7a6HNb4+1ktxszWWJ7A95SBu/uvdKVcTOZCVA8q3ervu2fPHR/ni6QMeosfWXO708E4ffMqPjt7rlX7SJ1cvvjUdINdn/vw7oJ/09Kse8dnnhz5b+mRGvsnMHe6qrxzI2Lgv1IPkXD7m/LOXP34+UX2iE18fur/kqHfDMAzDMHxv7CB3eNDht7j8nb/zd+6+S3rO8/wHnheD50LPhD379nzaczH0PMmP50k+/ZDpT/3UT12ey1988cWb559//vK+MgwPG+57kOtXK/lpBn9bya/luh/5m0te2LzIDcMwDMMw3GZ4qfCy4aXCRnob1V5CyPDbCEfZIC8tNriN6Nzg9oIS8mlj/dx090Jkc92LTj7oikluJGujHK8RP13EJ5D1ksRfcvxqoovS4wvFb04XtdGPn/9e5k4+FKu1HOmrE/QL2OCXk1yNxSXzYhfo07nOC9HLF70OIqzFLwdE/9q+A6szlw4m1MOeXn7P2s3JxTVn18EPn2wczACb8kD0gB5f9K77xx/f/JUffTHTdV/xlx09+YL4dB0Aub87eNEzMr7EgPy5Vuzp5BOxIzMCW73qHmZfHmyty4Ve15C8flojvsqrMXujGMZ6Qcccys1IB8Xjp+sVj101x9MjfiG/XX8gQ11jNmTyVkvz8uCj+OVj5INevO4fesWxJgN6SI/ql2voenZdxela1GtrcoSnf+YghgNCo7iITM784ZuL65u6fJUPlEe1sG+UC2KfDj+AL461uuWFZ82ebn0qPpBlg0AsudJP1wj0myeTC1trcrFRPUqfv/pprEby5tWVPzYI35h/94m1ftFlU4x6lX71VBN7fP8mOFi3scVfPSn/YpSTtWstHjkZsvbvBJ9iqFl8uvITnx4ffNHLDo8NnvjJgK01Hb6qcRiGYRiG+2MHucODDM+JTz/99M0v//Iv333m9Izn+c9z3/ks6Nmw58T0PB/3jBzMPXtG9P2GIGdUZH/0R390+VYu2TA8TLjvQa6DWTf9Sy+99F2Jzg5xh2EYhmEY3obNapvQNrN9a9Ho5SKZFwovJW2cI5viXlxQLybZ4NkM54dfLzK93PQtKbw2xdNvzZ+41og+u3h0089Pc9RhCV/FTo/Mug16PBAzytacjhryWQxzfaB3+iw+qPf0lW29as4PPfb0wFqNeHSti5M++/I9Y5dLNnSsxeoFE1WfwwhzVO6ela1dQ+RAxeE8eyQGW7mwz0aMcpAP6J8X3Orjzzfu3Gts6fEF7OkY8fhSR/lak+HpD557wygXY3nVp751SZ+s+4du16ga+GJX3eoQr7rKQ87sTh/1pP7gsatuwDeP5KY39MSudw4ajXrHDxKTHrvip19d6eGhMz4dREeN5EAHlTd7+mLzCdXFF5RTcegXm049zBekV7xi0KkOfH7EI8+2b7LqlWtFx7wc2EPfvKVvrKfmqPuCLX7rfPvWb9dEHa+++uolF3kBH/2gAp7YetS3hfmhY6xecutqFtecfzrimbNnS9+aDV0jGV790duud3HPtb7WFzxz1688uq/yh59+kGt56nnx80nODp9vOuTi11egzzeiX+4I+OnbDkb9pcfON2v7hm7f0uVHDNe2z355W9OvF/HlyKbPuTkS0zci6LKtL+z5pVOd5W8EtlAM9W6PYRiGYRi+N3aQOzzIeOaZZy6/VtkXBT3fnc+D57Og0XOg58uefXs+9CzZPP1sI382xnOxZ1DvHK+99trlBxiH4WHCfQ9yh2EYhmEYhh8cXky8aNjA9jLhUOL8W5deNNp0t1FtMx2xM9LBt9Hei4oXkjb7jWD0N2HaLHdYIh67Nt7JHMb0ooRHh64cvSzhnfLmUTn0khTFO3W9mInpxcuojg6K0jEXu3n+zNVPxobcgYGxzX3+5E+XzNpBhXinnh6Yy4G/XurUTVccPujI2bxr4rBDfH7lxYc16rACn55rmg/Xxm+0Kef8+RubdPxtoHKlW6708OVABtXCf/nKA3Wd05WXevH5oSumX2GlP3KlK1864shXDGtQEx25OPzxtzPJxVMPPrn7DeTGVpx+hXixHKQVE+SkDvehawBypicXZC0GeX2TE1KPXPhMv1iIPuo6FZsOXTmT89PnyGdSToDHv+tjpKc/aizuCTI8Mjr1wjXVByiv04e4eNbx1Kc/+Oz5rob0UH7LVe/kSU+d5PzzAfwCHp9i5FMsvAhfnvzxY14t/t3SS2tyft1X/o1yD8qVzH3AL1Sze8i9IQfkvnrqqacu14k+O9fBKI/6JZ/ydo3EENv9Q8ea/+rREz5Cnw0++reCbvmCHPjXl2rAo1tfzfnnS1xzPFC/XuCVE//s+CGrnvrJ1n1CDy+98kuv60JWjXwh/ICfDE7b+sGeXFz8D33oQ5drYo2vVhtcrod45VX91qBe16L7nF15p1cccuT+Qfj1qrj+LWKHyPQY8ccWn768zpqHYRiGYfg/sYPc4UHGz/7sz9586lOfujwD9qwKPec29/xHB3ke9RzZ8zDZNfAhHx3ksvXe4IuHzz333EU2DA8LdpA7DMMwDMPwI4aXDi8QDlwcJNnA9rJhMxzZrLbG95Ji9OLRxjwbay8tRgcO0CEGwheHzAsRP/TbiHfAYcOdP7pGcrLsbZ4DPl6yRjBHofX9iB3iE7WJb37anxv46so2vrqs1YbnsMBafcYOGfRZ/6BYeqF+OvwY+RHHvBh09cX1wJMjfteouB2q08+2a2rON5k1fX0XXx4dTJxE1gEcWzUVz4gHcj+vYfEcrIgvptwcrpnzTQf44pc9G7EAL/9GevrCtvgOe4x8se+gWB5syKy7t8UGfHM25nqBxDeqQSx+XDc5oXLUExCj3MjkEeHpn4NBscRBetJ9BEa+r3n8Vm+xHeLKqR51T0BjUG8x6zeqd+VkLC6ZHoord7lWV2M9hPw1R3qdnl46DPPvi3rkTWadTf2Wo7magK4a9B9fru4J17zPTAeo8tQb/qqXrvuSP7qIT9dPznTIjG+99dblmnY9u24OiPPvNzx1n7NnW/7G+kCHHb161v0iPn31y5Ocf3XwR24uhz5r/OHnX76Axzdd/ruW1Wst73qFT86eDVuQAx1xxGBHVv5s2ZDhW7PHY9P9kG+j3NPHKyf3llzJyidfcuiHTtjRIdcftq67vrFDvlFLJ71y9G8MOb96hO8HV/jIr77QIVcje/da9Qbx5EWO6jE9tsCPPriHxByGYRiG4d7YQe7woMLz3Mc//vGbT37yk5fnvJ4xe5b1/AdGup4XkfcKz3+eEYEum9Dc2NwPjPLhmdjz5CuvvHLzh3/4h3uOHB4q7CB3GIZhGIbhxwAvDcgGum+e2ZTugATOFxgvHea9pKAOyLzAIJv4Xmz4NLcRDucBCn1y/lAHZ5DcS5SNf3rs8E4Su5HcgY8Ybc4b+SbvRakYUP7Ny58tyjYdMnE6EMjm1C12ceQO5OmecejxmT2eXpCrDfjQY3pAhse2nqLy0G95AB+9YEK9dV34BHIHG3y47vyb06NPzq+8INvi6ke5nj2oDnIwlzNZ6/LMlzjuPzHhvFfEJacH+OKlmw7QE0ue8ig/sc3x+XZf1Juup5zwrOkUw5xd9ZS7WHjuVbrl1ZzcvBr5OXty9ossnphqiBefL6M1GaJnnX+HqWAuBzJ2QMe6uVySGZPlG++sCdjJL1s16o2+lQcb5Jqo7fQtXyAvTrrlx1fXRr1dE/L8i4/o50cuRj0Vhx6wAfJ8mbOlm0+28vU58nlQF+C5xullD3h88FtO5nyXczXyg8jVlQyPfr7KiS92SDykT90LwA7onHM18MtGDMQf3/TogDW+ePHLEfCReOd15g/oVye+z3ByZO7fZ/1MVzwyn8HyY29eLnhsEBsy8ZEY1V++XS91i+n/M8SP+xXPPVF+eHw6sNUDc37o823Ol3yqr5jnZxDvjTfeuNvXYRiGYRj+T+wgd3hQ4XD1z//5P3/z0Y9+9PLs55mv5z7Pej0z96xoxO+ZmO75XAjneJJYfPgBRmvvF88///zlWXIYHhbc9yDXT9j6oPkd5u9///vvS/6QtJcrL4vDMAzDMAzDt2FD2wa4De0Ocr1g2MA+X1y8nHhJ6UXFSwvY1OajjexeZvBRG+F8tYGPrHshMndAEd+LkRcpdqe+NRjFQ8Uvt16+2JCTWbM3L5/8RGzFLf8zJsIHPDK8DjvMgZ1epm8NaiwPPHJ6rSP+yFqLVVxj82LHo+fa1bOunZjVjPjUkw7T6h9b1wLYs6XLd30wz7Y8+JSH+vjpkImOEejSqWd8wXWd4vNHn60x/WIYEZsObOjKy7z7tPvZWP35569cy8k6n9bpJcu+XKDPSPlaAz/s+G5d3vUY4tHFN+arfvDBPsIHusWoL9Y+u+pWWz6i4rFxfbLPd37J6BcrWJODPMWKx0+5m7fmx2hNz1ys1sXVx3rPJz5fcknnzIe9NWJbvXpB5pqLDe6NrrXeyCFdvltH7hn6kJ5ay5tvP/wgN3I9rUa5QHM25HxaIzI89ZFXc74RPh6/1VT/9Kk1HT7xk5MhNcibnKxcEJRPscnBur7zJ8/y4Ed+xbE2Ar9sutfo8EVHv/TAPBuxuy7pGSH/5Vd8vosH5vlUKxSXLbnrj8e2+zH52dsOeBEeUjsb8q5Xa0hfT9Qi1jAMwzAM/yd2kDs8qPgrf+Wv3Pylv/SXLj8M65muZ1rPez0nGj0fn+9J+PTO594T5PHM0fve977LM2h/F7dn0y996UuXOX/Ww/BOxn0Pcr2w+aC9/PLLl5ver73qJe8rX/nKzde+9rXLB4Wev1+zg9xhGIZhGIbvxLmx7eXEhrtnKc9U+F4ovLz0omFtTubFxQY3MufjfPFpM90muXkvMdmCtbmN8PTxIB/0EbQx70CDjTmdbNPls411/uRIv9zLJYL4HTCcuRj5NBbHKAae+Hyb11O9EJcMny6eflhDOeq3n8qle/ZQLtWDJ0aHOfFcs8C+2FB/8JqTsW3diytf1kZ5mdMxN4rJli+kNjL1mHfIkwy6Dmw7LCnOeXCKz6aa2eCVj/7wJUZx8Ol2veoD3X4wga5flWtNx5ofOeZbXISvv0hsfsnpGvHkRd7BVLF9dqzlhaCR73pwysTLLh6qjvylx76c8YzWYOyzq69y1QfvQUY80BO9yHe18cfGr7E91/yai81P8frmr7rUJ65RDnzi8+X9i208OvzAGYOsa0If1EwXH+SFyI3VyDY/8rA2p2PumpvrBV/FwdMD82KYg+tarmrSM77xfcPUPHv5yDVbMjwysdnyT9/ILxQT6Mu162pt3r8VbFxDOt0fdOTXPZId0C0PIJc7iIvkyz97pId85EdMZK139cS63IsPeOrlh1754YVi81FMo7V8zhzB/UOmFrxy4pt+uZOrsWsgJj4eu66Pf2PlZI1Pp89I95F5vRDHaM0XXX7O/MUWT+50hmEYhmH4Tuwgd3hQ8Vf/6l+9/GrlnjM924G150DkWc/zd8+PyXrWNCL8E54XG1G/WvnVV1+9+x7sudNvkZGDH37ct3OHdzq+50GuG93X0sHfJfLh8tMNXqh8GLw87yB3GIZhGIbh3rC57SXEc1UvIF4szpcULx02/dvk9nzlZQZ59iL3QsJHm+D0PX/Z+GaH13NbOl6YOnRAxTSXU3kYrT3j2XC3psM3fTrpIcA/ZfmMD+lmh/j0HMmmTXtkrReAD3ht4PPPzkEP/3qjTj1ojq8X1ZwPPGv9KT+90Vv2ZPpZDvTw8mtNLhe+6SGHmHBew34VdXEBTw3AH12x9Zo/PAdz7hV8tubi0a2fHbS4P4xwzvmTC3/uA7bW5d31AfzTH9Dhw9/JFJ+Oa5OufqixnKzPHuDJH18P8MzrH19y4Os8RNInNYsP9Y6MnjX77h1+EZQbHfPyFQdPL/CM9ORoDuVCz7sPvvrx2Ih33gN05GNNJsdTdvqQg96Ipy7+yMUkx9dn/szJ1GjNrtzA2lxvy58uv3LHAzH77EC9EI8/eurBR2yBb/eoOHy7X/D0373BtpqN5aF+5JrgIz4jMjDnC/LLZ7W6V8XRj3r85ptvXvTplqdRjHpTveorhty7n8tBrXKsN2zx6wV7dvzmJ11rMiBPjz96XY96ivjn0zXSV3OgpzZx8eiaq8NcTuLxT7f7QAy+zFG6cuRPz8y7D/DKiW8kjs801NM+78U151//1MmODL+8qsXfTiYXi0yucvBZQGLLS05d8+4bdtZyZCM3OuLGoysHfvhm4+/lDsMwDMPwndhB7vCgwkHuhz/84ctzXs+w4HnQsyDyHIg8/5Ejz5ae/Xr2RWyS5+cc/cZY/hzk9szredJvk/UbZx3ivvjii3ef64fhnQhvezvIHYZhGIZh+DHC5neHPU8++eRl7dnJS4cXFZvXvaR4abG2yc7G2vOXDXXPX22w0/FS5DkNjz6fbYzTb6Tn5UcsMdl3IIoXiYPo8H/apGN+0jWf7/IU/9RFQJ9/KAZkrz8dLiA8NciNPr9GINOn/IhL76zZnD9rL3h4eq1fdMWTMz490EfAp8MO2PHnObgYbOiLrS76Dl74c126DmR0+Kwf/NJXB+IPn637hU2xO3jBE7u+4LOzBvbi1E+j/Pikh4BMXkYycej2K6nwxTKKqw6HKvLlw3tAOZUPufrI+ebP+sw5fyAuHX3tMBGPPp4Xcb3uGpC5bnh05QRimMtJ7/nUAznoIxlbPHP+4PwcovoJ/JOBesQA+ePLpzm5Ud705FE/9V5ORv4RPh7QY69e+YlLR4/kJH9r864HfTAWm4566VmDOZ9Azxry3X2uZnHokuGLddaYbtcyHnn3grnekpUrlC+eOb566SE949O16bNVv/mrJnz5FUf8cpU7si6/7kF8NurpfqAnBhl714MdPfGQtbrI65e41nyoRR3p4yN8/txfcjh15Y8AL3/1BFnLjZ4cED25qieoRX78yI2sfsiHjWtTLUa69MSjW4/YyBPfmm/3Ijmb7mk5yY0uPWt5WfNBn19+uoZnLHnwLbd06g95fWyOD/ri34hhGIZhGL6NHeQODyo++clP3nzoQx+6+6wNPat6zvMs2HOjZz4yIwLPiZ4b2dIBOoDXHL75zW/ePPfcc3ffR7LxzPnEE09ccnjttdcuesPwToW3oh3kDsMwDMMw/BjRS4iXEt86sxneBjVZm/rmXjI8Z12/wJwb+GS9zHj5ATKb4Ta7eyFqA9zzHDm//PAHvTQh8nKSDz/mp8wYQbwTrRvzEU57L1jiyEkupy91s02frrz0DtggNYOxQyd18mfeS148ay94xfRsi6cnqHyz5VM/u34g566hOSpHcazZgxjyzh8btnIwkrMhR3Trv5hkbPg3dsjnkIg9XfN8Q3mThXJQHxl/1h0mhvJoXl/Au4B8xdGTei8ev9b1Qf7llA1/xuzY8INHP10+qp9O9nj1Qx/i0aPjvUTMasUL1vjeWfis/mrEA/Zkcuy6Izz+8PmhZ20kD/y5t/S1/KpNDPkDOfvW5GCtNrrl572MT7yIrZzMq4Wt0RrkBuJDOSQ/x+KfuvjljvjLJ5iL14jyo0fu8exBDXrKL13oGuGxqX56csVzmC+G3vNH5/Rbfnhigjn77vX49KyBbzkBPTHIxcmO3Fw+ciXnt2vOhh7Crx9gLlcydpAs//I853Ji55pXb/ene8a/5WAtn+6h+sm/nKuFvbWxXI3nPc2W3LrPI/8I5CQfNuXCzliceiNnc/6Qdb7UicSz5jd/9Th/2ZmjcufbZ7h6h2EYhuG2Ywe5w4OI97znPTd/+S//5cvfrvXMFzzXeebzDo56hgye96yNnvfYeg70XIiXTs+G10gW+BAP8eVMyzO1PRm/Ycb7rd+Mdf5Q6jA8qLjvQa4XJD+x4Ob2tXR4+umn7974Dnd/+qd/+nKDf/3rX7/Ih2EYhmEYhnvDM5MNdy8NDio8Y3khwe9FpBePDvkQHRvi9My97OB7wTH3bHZueOOBOV8detABfqzznR108GJzHZKXWwTZhGsdcrHM+ZQLf6cOlI+RTXlmky+jZ9DTDjlgiGdk3wsfyn86eORqZGvNvxFPP7Phq7UDFL1F1tcx2BpBLHMyL4WuAX1xHFBAB0RisOWXvEMXMqP4/NWP/NGNevGsRrr88SUPhCcO/XI2imMkx5PPmQcbufPX4SlSB2LLhh8kh3phTlY/gA5famCbvnj8lpeYdOiywaeD8OojnXgdyrMtZ/ERe7qBHoL6Fk8e5a2vfJnLg6zPiXXxjHSisxfFZUuPb9cV5H7GBHmzBaNa+9Y5v0b6ZEaoP0b87MG8voLcy6VrbK4OyF4cfPnQpxvfvFjG+kHXXI/YqhO//NTClzUyR8XC4wPYkcmreAjoI/HT6161Bv6SZ1c8fPmc1xbqPVv+1CHneoDIXFN6+TJWe/7YnzaoGEhOxadTjWTs9Y4N3qljno45H/TMoXuKvNiQD5QuP/U3n8VAYvN38uuHHsjRGr9aEF3kGrGVQzxyfL3lv3sC3xqsu8bVQO7/TT9EXs3DMAzDcNuxg9zhQYNntr/5N//mzZ/9s3/28v7YuwZ4hvTs1yEuXQTXz3aeG3tO7Rn2pGwjPMgf4PErlgPbD3zgAzePP/74JTd/O/dnf/ZnL7/+2a9gfvnll+++t+05c3gQcd+DXPByZbPRAa4b2k38+uuvX37vuLUPht8vvm/jDsMwDMMwfHd4jvJs5dDNbzVxiOiFoo1uz1XNveyYe8mxDsl7ISHvxQbY8W3jnT86Xn7EYdcLE34vO5ENdBvkHRrQwzdHcK/5/Xity8F47TPC62CkjXvzDkLwTj1rBwjx5awPxUCeT62T6bu+dFAA9bq8yPgsLn55+FagtZ6DgzX+uz58Qz3mi9zBQz6M8lZDddHB77rqFb58yfq7yGzymT/Iv3sLyM9DDrVXc8/seCBPenzlDw/Eo4dPR10gfzE7wOHbvD7Q7RpVXwc2dBC/5ZJ+NvWJvjqM4qRHjte1oYNfP6sJz9znTH5i4ukrO/r08MsJD6q5HMiAf/OudbHKM7/lC2SofI38pK8PxSaTSzHpyd1YfxC7iB5f4pU3XxEdNmT1DiXD4/u6BjDKJ91InvVAfnzQMSfjp/6fnwcx8ld/jeyKAXzTxdNLJEa/Vjcev2rjM//lYdQbRA+JRZ9cLDmyg2IBP+5ZI9AlL159bF1f6NSLfJVL9dKLZy0/PGBrzm/XlX+61cK2nIsvVz2wDnT1HpGVJ1/m+TX272J59W8nGeiTNT/qMsqVHf/l1H0kD37Y0KMvn66rNTr9VjPUF7H4kTc9oKfe8/oMwzAMw23GDnKHBw2e3/7W3/pbly8Aep70XOdZruc+75KeIXvW7Fm45z1rxK7ny55jPQsifnqeReZ46eUT+MXzXugbuN4HHeg63/Jnr3yREd9zKx3ot9MOw4OE73qQ68PipxFeeumlC5nb4Gvt94q3CTMMwzAMwzB8b7Tp3ga4n1J1+IrXi4gNbC8jNrS9UHh58czFxkuJNb6XGM9rDhWteyFixw//fHl56UWnlx8Hk70A8UnHy9L5UtQLUC9EobXxfnTqm5/+T6QvphroyTueWvDkjMdHNeCr3YsWv3h608FCh016nH/60De76KLyqi8dxPBnzVYdYvAlDy+BeGKBNZlrUp7Z0EkPT3y6/LsnypWufOjgWcdH8lOPnOjx4QctvXTy7z7Jzk8d4+kTvTfeeOOujXtFDvLkU7xiyp9MbuZ64VdP6TOfXnT5gO5JJM5bb7118SM/a74dwMkPTz56yd59Xz6onhvxwfXiD8pRv4oPapVHtcqla1ge+qIWB+P6R18e+Oz4kyNb/vHM5WHUCzr1svj0+MUHuqhcxNBjMY2Aj9RSfPP80+PDOr4a8aAYSG/5oCd/ueBbG/PNpzlfauIvHaM8XXP9wru2NdZjMvpGfqolEoMf9xK9ePrqmlSPkbza5K4WPLLm3avkUByg130qD5/B/o1FcpcHfXL63bPpkalL/+gCXdTnQM/Y0ZVXc+AL+JMvfaNc+AU6/EH3vLr1Qwz61YDOe5Aem+4fus3lT5dOPeWPvDj8kcm3usWycUVPvmR80eXfaC2/+meN2Pi3E48fa3GKyabPHl9Iv8iALlsx1Ggkoy8/lB/z+qIGc/auAaq/wzAMw3BbsYPc4UGD5zwHub752rOaZznUc33PhZ71gAx6FrT2DOk5tGfRnkf58Jzo2ZCuZ0P+8Hsezq/4EZCJ37MqfaN3Nb8G2uGuuM68PCMPw4OE73qQOwzDMAzDMPxo4YWglwbw0uFFBQ95QYE2u6EXnzbckTVbLyW9zOSnDXDgB88mO54Xk/PgDg+Zs+sg45SFU7f5ybu2OeVIfuI7fKIrr2RQjUhN8a1t4MuPnZrp6qE1n3phrU6Hf+yBHF/NdPTDGrHr8I7Pes6eHyhf/uTAj9js6OGxVwuf+scPfTw65HjV50Wxl1KQF3LAYgQxHQSxSZetfORqxGPjZbT8yPRJXDr4fJo7OFUfPx2SqUOt4oA8gV95I37pehlXEx/8Xh8gVbexnOu/vMTBk1v95l8+fMlBLeVHh219wOdDDvTi0QP+rMUgQ/zzhc8O5CRe/UFi8I0nJjvXKeDxAfTE5B/0W48b6fFTjXj1Ep+83MjyRdfIvxyzEbuYQEYPnw0C8mKFckiXXdfQHPC7blAOrhN/bOudvytVPmqhy9YcT2y6fCH2rocDfjK1sxdfbmz0jW4+9J0cj40YdPwQgTl/7kmj+5edOVs2dN0f4tDno/u9vOXcZ1ksunK1liMba7pkeIi9WGzqiTU7eu4jOdATI1l1lGs9wkN9ls4edf+R1w89Iyu2PojDhgzf2L8F9di19IMY/IA8yKCeWOsnf/Loc+/fDTl0L5x14CH6SO/7LJvLS970xRGfXrVANSMgo8dGPFTeIC918rdvSwzDMAy3HTvIHR40eG775V/+5cuzu+c5z4XgGc9zJjKHniGB3jn3bOh5s2dCz5yeL42eBfHoi3fy+O55Mn9w5gHFpsfOM68f+uXL8+YLL7xw0RuGBwXu3B3kDsMwDMMw/CnBC4QXAy8mXhK84NiQbsO9TfVeQLxUIC8mHdiZe+ngxxqs8fk18uHwAvEprpcU1MvL+YIjvsNDdr3QJPt+6X5IZuwFqpc6Pbj2YVRrc5DLqVuf1EpWD62NXvLomcfPnh19OnoY8OjJqxfHcqTLD/uuAR0kPh6/XbNy7PqVL1jzVWy+zMnL2eggB59/NkAmJzaIzOGLwxMxgA3SQ7kXKzkb/vmRb/EQ8NW80X3kW7kd9OiHUf1ykwvUD/H0T0xUDfmjLzc5FL8+m+sF0gM8Pvjk/wQ+e7Xmx0iPb2tkDnJOD/Advlnz3zVUp7hqQORdY6Qe+bI5rw2ZXrEF/umSAX1gW1+QfvCDx0d5o5MP9EGe5HyTxyO3rgfs8cq93MqJjJ285Vd/EJDxw18141U3Hhs6bPgrR3mQxaMjvjk/5NbF41MvWld31y2ZXIzFMJdTuvwXOxvzdF3feEY50NODPvdyqXY1mtM3F8cagTV7ceWGoHj4RjWzL0e++fCZszY3gpFfMc3lI7f02IJ5fHWwEb/85UAO/R+SLL9yQ/3QBBtjcjkAfr3v3wJ6XUu64pPrBxlevWlNF+Uf8UtOV4/44IueOV1zeSM9c5BLZxiGYRhuK3aQOzxo8Mz2S7/0S5f3uJ5lwfNd76d0wOg57xpsPBv2XN7zs/H62dJo3bNtz4r0eoZEQB81J0fiGfl3mOuZ+Otf//p3vGMPw08a9z3I9RP3H/3oRy+/M9zfw70f+cq5D5aX3mEYhmEYhuF7o5cS8MzVCwd42fFS0YsH9LJB73wZ6lDAy0V25pF1vPPF5nzxAf7k40WlFx9y1MvNmQtd8fBOHfbk1RJO+2zETBelc+pe84vdWl2gV2T1QE+M4gA9hw54elbuXiRdi3IJ7OqzXnVYwYYs32cO9AJ+/hys8ONZ2Si3Dj3yg8iM8iFjpy7z+mnOnv8I+OGzHPRIbaAOayQGX3ycMfkxr0Zx8NmidNh2wAni4ZOzZYfkzV9rczbFrDfW9YJvfYbi8u0no6G+AHsxmkO9CtUiR77JxOpaghj81is6bPiqR3TkzI/a6QOf5NkYUfV1D1ojfsCcbT0qz/yVG7k1lP8ZO9/1JT8R4IvbdaIP+SXHtzZaIzjzaUQ2Nfp2ez03D8XPZzHzj58/uSHXJH389MxPe3Ct1HP2DuSLwL1Gpjf06Bc3ve41OvjZ+OYq5JeeHNnwZa5eOaofnx0UP5Q7OVn3hFjVSoevbOWSHhKPTvruYTpio/SyBfrqUnfXnoyedfdWOtYgD+vqhOKQ8UOGqocfunyR49M3iodnXY7JAI99PopBjvBdj/xkw4d4/r9BfgBpGIZhGG4rdpA7PGjwHPeLv/iLl4Ncz2w9y3kGRp43PefhoZ4ToXXPl9n3DtqzJMJHfFmzPf2idLPLPwrxgL043nk86zrM7f1gGH7S8BZ1z4NcHxA/geDv4n7pS1+6/E3cfh3bV77ylZuvfe1rlxudnr+Rs4PcYRiGYRiG7x9tRHtJ6O92toneywacm+QO49i04e25DJ16Xjxs9vPj5cPBx/liAmTmbPjhj424cuiFh6w4xmzl0IEqGR5qQz1e8SD7a59yUOspB7XQOV+46HrmpC9PKA96dNRBrqZe2CAeOehL/vklMzcCX9b1o3zwe+7lE9QqBz7lU19dCzZss8E77avNtTLXW2sHSOXEf9fUs7ec+IxXzciavW96+7aafMgRu4hfEENcI1v5IjWwMZaPuVGugO/+ZScfa6iv/LbWm3zSF6/emMtdb+jImS9rRI7PX33ir5zPuvHI6y19suJ2gFcuxQFzOWXLF55+0cFnQ4c8GfAN3TPZAhliV++TlRfwSScfxcCjo1dAbq2GegD0EZQjOX2wzpbf7JIb6bBFdLKJr59yzo8c9MWafvnSz79R7kZ6cmIH9F0TNun5fKifDMjYiI2ffyOkx3e58CVPcfLFnhys0+EL330nTj7473NIrxrqSbWge91DQB/hyQOffXby85n3uaYHfJVfPDaAX77Imh++oXsEvxyLS5a90YE82ZlPPT2vV9eczNqI+KBjtBbDqGd88mEUQ43gcxvoFlsu5vkwB/eCmNZ6ZUT8dX39O8fHMAzDMNxG7CB3eJDguc3exqc+9am77389f/Yeef28F3rO81znObLnUbreOT1Tmkfnc3K+mosJ9OSExE126rQOdMXz5cb/9t/+282bb755RzIMP1l8z4Pc/p4O+MaIm97fRvLi5GDXjb2D3GEYhmEYhh8cXj48Zz311FM3jzzyyOXFxkuEsbkXGC8zfq2t5zCb/F5ivGA4/E3fixGZFyZ67Lw4JbPxTbeNb/JebPjr8OB8OaLfeBJ//MjLM6HnQEQXOjwUG67tr3l0O1BIxhe+sZcuZM6vvEEP2+wvnvquXxT7FqE48fXEiNTiudfcyJ947MTgjw7ijw45PX3Ddz2g3uqpGGLy0csiG9cJn51eWtOlU33W8dl7PufbyC5+P+0sJ3xxgSycL6hq5JcfL6bZVK98+FZDOXref+KJJy656UX3Ixnq2rgW5hEfbJB80pNP96oY6nXfZIOH5BIvXVQsNubeS+RsXRy25u6j4iFremCtHv3Dqxb51gvxuuf12EguJn6oN/kRU13sjR3WsS+OGszBGuhUAztzvtgZ9cLomomXDn18Y77khAfGU48/6HrIo1hgLhZiY+TPWA/p6B2+OZ/ZuWfoWVc/fnHw9N8Yn75Rz6qh2Ei/kPl73vOey/3btcRr5Mf9wJf6xOAH33VFeOTyR+byUT+bPs/W7NirUzzvwEa5VK+c6fGVrjl7o3wCfXy2RjI6+ZEbnrnrzJeY5GyKo6fy4OPsozq6PmxdF2t18ZkPusBezvwB33T4ZFP9/CM8oCM/udERtxzyjdecDPSfHT9yQ93L8q13+PTyKQckX2tyMj9EVIxhGIZhuE3YQe7wIMFz34c//OGbv/AX/sLlGc6zmuc2z2ueM3vO7ZnQc53nScCj27Nhtmw8a/YcyB56fkWBj9bFPfWs2ZvjI8CTx7n2rvfbv/3bN6+//vqFNww/aewgdxiGYRiG4ScELwo2173keJkw+ialDW3rNuvBs9a5kc/Wyw2b/Nj4Z2dsE5/Msxq+FxIb5Gz49VyH1wtNL1t0ewlqDNYgBt/WXq6QecTGxro8im28H9HtIFKOePxYq4N/PDHx9EMOXhbx2YgBavAsm746+WKDl4569bicxaGj/12DasgfO70zr1906mMHHID/1ltvXWLgy0WN4pCxhfL2zJ2egwk5pIvElYtaxYLk+qBHbPWlg5LiuXfkYhSj+H6AwEgH1GUup3hi8efeFKd8+SdLVyy+5Ve++Oqg1z2KJw+9xit+PWWnZ3js6OWLDv/yUas5/91/4nc/yuH8pvvp8/xsBTz++SPjq7jmZMWUp7rlTkeMrpfPm/rcM2KRp1f9fOglOVv+jHSQOGCkb9Qro7iNaoTis4XsrPm2TiY3czaIHuKr+uXneoqBB3Tkb43PL73u72uQ4ydja15uwI8e8ImnT+LjIaB/+rcGNu7JcqTTfccPvvz6/Oo30Ok+JteDICY+npFu9zVZfvNljYCNWGzynQ85Vycyr+9y65roJV227vtqTVcsOnzyoVfyoycWnhzMq8taTHbiuC7s8c3J8mvsuuLTM6YHfNdr8cQu7/IwypdePcLjQ17ytsYHPaBLHr96IV5+usbm8rDBlu4wDMMw3CbsIHd4kOAZ+SMf+cjNz/3cz12eJz3Peb7zfEjWM3bPfD1fGs/nwJ4tPR+yQT1LG7M5UazoGtkWH8RA+J4pIZnn3+eff/7mjTfeuLx7DsNPGjvIHYZhGIZh+AnCC4fnKC8UXnbA81aE5yWiFxi6XiRsZHvhMPKBvITQ8RLSS48XET5snPfSkqxN+nLIB14vShF4qfJs2AZ6L0LGa31EJ4iFxIVTD+giOdBTc/x48s6vHPA9s+KVe/noS/bmHRzwA/X25LH1jCs2Pp94YtGvf8AfvrWayNnWExCXb7oI+OxakGXPtpwBX22uC9/l5PDFmi3iTx7idl2Si0EO+OrNBth0H4iPb40P1cyvQzg5ePbHl2v2dOqXGHx1P+UfjNXLFx/W/CE8OmyNfCF+6o119eIjPuRgJMOzpssOD8TousmXjlxOHr/8AD/1kx5f6IwZ6ll9cO2qvXtKPumkx19rxE99rUZ2fBQz3TM+GV0gM88/P3hyUSff1mziAxkegkagF5UjouOeLN909Ex+pw+54JPjZ99cHnTY6RefgF/OYFSbe9K8OPWITJzyMfJnpCOG6xPoILZGsfTc3L1Yn/k1Z8s/X8VqzQewLWYERjL+6arN/Vf8wG+6RjryEkMO5sby7Bpaxzv9gRzZ0EVnj8Ugcw/4jLM1lwe9cuUXVTs7RN71ZWdOr5jlywcZn0b3Dj1y15NOevyUKx1xgtjF4cNBbjGHYRiG4TZhB7nDgwTPkQ5xfSvXc2DPi+YOcns2xCPzDHn9zOqZDtHpeTq9a13w/HdSeq3htD8J0skWjPKUs4Pc11577ZLTMPwk4e6859uOQ9unn3765tVXX7158cUXL7xnnnnm8hL57LPPXg533//+9988+eSTNy+88MK+Zj4MwzAMw/BDwK8J/dCHPnQhz1i+KelFyKa8Fx4b3aiDCC8Xnst6AfKC00vRvV52+GnznR+b49bmdMzxipePRpvmNszFbrP9jAON0ItTL0biAv98iYGAjJ6RDHlpowvyIsuGbi949YUNfnk5cJZPufbrhx3MgN4BPf7T801Y9vlC/QSuFzkyvQJ29cqc/w6WrdHpC7oGIGfyriv9wId4YrMVh7589abeuSZ905Zf/vRNLvwVr/h4xaeLzy8/2Ze7HMj0xA9uPvbYY5dfAy6WmuQhRz6s2cvLe4K4fNEhSx5PzfJm2z1oFKcDOnpIbvKgqza5lXcxOwyTq9F1wNM/udCPxz/iRz59jvSsPKqfPBIPpYPYiYdnXm5kbOSO9EweiD7g07XOxpwfa77kxJZuEEvNxnw1N4I8xWKnPnz++NZDusVC2TWC/Mv3jFNs+Zr7TFVvPep6RGK7d90vpz+x2RW3fPzAsB6c/eODHXk+6KgHzzUVWy6o/omJyPhi515ggwfi07XmL5/u5e6PanZP0nWv4dPFV5981IPk3D1VLCPiA18ucuvaVB87vTXnmw74gWp8/ulWp/s8X/KVC9AD/sQEedCl5/rWf35cSz/MDfpJrr505aNGoF+9wCcSi6x+kYuN1zWSZ/7UI4ba6FUTPcQfH90P7JB/K2yqffOb37zsT7zyyisX/8MwDMNwm/CZz3zm5rOf/eyd1TA8PPgbf+Nv3PzDf/gPbz72sY/d4fzg+MM//MObf/Nv/s3Nf/kv/+UOZxjeufAWtm/kDsMwDMMw/ITRgZONbJvcbf4jsIndxr1NdRvfdGxw2+wGPBvZdNnRT47XBjueDfE2xz3ftXmOh/LdXG7k6cc/KR6cvNYgL8+N+GLeS9+oDnPx8iGH1qceHlKvWpOZG8/DVTrmdKw7uNU7Iz98driQrtjVrY/IdcBTE1/mrUF9rqfnaXHJ+HOtjWIZ6ZNVB9BHclAHoiO+OX6yDnuhfMV2jctLHnTlTBdfTeWKJ17XmR9rNmo19uzv/kTZoLMe9nIA/uVSLLnIm437wLr7gLz7EuXb9SBPp9qqkw8yMeJXJ77cjfH4xKt2I768wVzNfJ/6fBQzvpiAz56eOGBdL9SsXmv21uoy5lcd+YPuM3GArXk9JKsG4IdcbiAXNvWnOGeu7PHZdF3IwBzRp3OCHb9qJQ/lqMbsgT2ZGPkjq6/qxufP56VcytH9a33mK0Z+ydjTl1fXB/Xv6fl5oIus6fJvDmLUOyM5XTkhfJ85/Yd6h18e1vjFFB+/+1MMoxqqiV625metRvHqB+Blhy+2NYhDr1qt+YH0EH79yn/x6OXbdeEPj9y/A+XRaO+g63jG4kMM4Mv85HcPnbVZy8GIl085siPDl79rAf4t95kahmEYhtuEfSN3eFjxL/7Fv7j5pV/6pZs/82f+zOUH3f8k5PnUedZv/uZv3vE6DO9ceMO650GujYaXX3757iEu+NYtXhsMZNY7xB2GYRiGYfjh4RnLxvQHP/jBy6FZm+g2u234+wajtU1wG9rpm+MBG89oRuRQpI16OjbE+XK4AW3o08Hrm1xt5gc2bba3yX7SNS+cvKiNfGjDH78R4XvGTN8oRgcFeEBPznhy5Jedg5ls8Nn54UNzttZG9Tg4oWdO3gh666BBL0G/+cdjR9eBAn28Dh3F578a9Lb85WxNzpYPByN8s7dmE9+cjQNpNnTU2f0hljz5z04+7gNreuXssEPe/LChh7qfOjQBfrpnxJQzWzl5KZYTfSN/wAd/bLrG7ORBB5nXp4iu60MuBhtx+HZQIxfgWx/w1EUfT33ug3IpdvfGNY9edbDvutCRW3N9UaMc6YhJH8SkKx+6dMjosCdDZNVFbk7ON6jNPJKXa0WXT/cFkBUnXbkFfLHpIHXz0f0rDh35Zm9NjkenewDoy5Os+7d7AtSBzmtZHLXSswZ+3Kd8AJ0+S10bOuT8sTdH5EY6XYtswEguT98adx9Zn/3usI9tfcwH3XotRxCHTG+KLz/wueKD/64THTbqRWzpq9GaPajN9ZUrXn3IHopFl9w1oWPuOogN/FjXQ+v+LTjvVTn2OTghnjzUYV6McuOTLZ/ysVYXHXO+xWBrTU8cNvmky9aaLZ6/ayyGPLsWdNgZAQ/xW0/ZFsu6vogJfti8+TAMwzDcFuwgd3hY8U/+yT+5HOL+MPBbzjxv7lvrw8MAO2ZvvwEPwzAMwzAMP1E4SLDB7e/KfPzjH7953/ved9mctwlubGOcng18vy4Yrw3yDiLodLhibaPfwXAb7+Q2zW3Wt5EuhnXfULVZblPdnB/E53kQeMqaBz4bo1DOzfm1uc8nPh6SJ8KXL1iTyU1t6ungosMAm/v6yFf1WdPRA+gQjj4+Hfb4eisOnloRXX75g/PbZ3SLKZ96m48OF8TG10O9Eq+edW0Cfw7u/RkTenDGqkfV5teK6hOiY3TgRCfw754Ri04HpOrjg65c8eoTiOdXl8oFPfroo5f61enwRA7isTHXK77w5M5ntVnTi+8a0hNDX8zlKCf97QBOfnhy418cfvQPrx7xo0+uofz8TSO64uDxEY9PBHj0EZ/yVUPXmB3f4uAjevwY6eDJBY8+XnJr9w57a7pk4pQ7W/F81sWqTsDr3tMjfZErHl98nPGA72KSmSM1p1cO5MZyNb755puXGGTpys9n1bpaHJzKQY58uK7+vakfdFwra3Oj+soNjx4d16g+i0sXqZle10NeYlUnOeqzVix+QL+6P+mrgQ+fd7HVTG7Ov1EefMjT/cOXORvr+oTMfV7Z8gPs6RnJ3dfylwPIA5UTdJ9a86cmPDH02bUTXwyfQXX6nANd8cUgZ2PEq0726Yghr/LIR9dEXmeu7NTPj+tLT7/p6qU6EfBLLjc58MsHezL3Ct98qYGMHrLmh24jX3xUBzLXE59dP3D+hS984fLv4DAMwzDcJvjVyr/1W79182v/6NduPvbhP/mvoB2GBwWeET0PfuQjH7n8oMIPCz/o8OUvf/nyLOu5dRjeqdhB7jAMwzAMwwMEG9Y26L20+AlUfzvXT5K2ae9wx6a3zXAb/m2I4xnZ2wy30W2zHtocpwNeYjqIsElOZo5/6rV53mFaL1X4qJchZA69HNFtjM418IXkCh20kcuhXBD/DiHSF9/hQTr6El9tYA749PD5LkcHAWKyRZBfBwh0swE8sejorbF8+dLLs7f4/OLpozUd8ZuT0eVbfXgd9tRTObLhi5z/6nVt5eEQVG5BvWQOWtjQkaP7iG+65UevGHQcvrSWA1/9Vp73vve9lx8w6CAHdU3E6X7JZ3HJz+tjLgcxEB/8s6+mM7+TV6/5B2t55ocPsu4Lsq4JXvr0EL/6VG5k1nTJ+T3zDXKBcpYTO9dRLLnQ55etvvCP2Hady8s9kJyv7kG+6eIVj+/8ypdO+tZgfvqvBmDbKEc9IbPmi4/yQ9Vt7hAU4jlM85kRRwx5mAMffKqp2HKnQ+Y6WbMDumLj1Sd1d5AtT/XrMX/0gI18XG/3JtQzMrqIjtzUaE2/WPksPnty6F6TNzl9eciPjdH1lb9ekLkX6r01Pp/GfCG+2cqVX7XKk4ydgncMbwAAzXVJREFUuf6Q1yt+2fEHdOiCH64QQy5BfudnwVh+1Yv0ha4Rzn4gMhBPDP/uVLM53/Jmr59iyLt7ij85+2EQ/qyNaiLnt7z5IwN8sbsm/Rsglh848O/Tl770pcvfy8UbhmEYhtsCB7lPf/Dpm9/5v37n5v/9//7fO9xheIfjW4+An/uVz/3IDnI/8f984lsvDHcYw/AOxW7hYRiGYRiGBwg2tm1MP/fcczcvvPDC5ZtGNvnbnLbxbrPcZreNbRvayW16GzucsLFPz2iNbIjbVG/d5nmHBEYkD/4RHcQPWz6zi+BevHN9EtDnq3zkLjZ5McmtbfSrq3xAjunykZ55m/50+DBWVzXqo7hnj/D4yK95fcGjZwQy/ui5LtbmiE35yQcv+/OQRZ75A2u24KAmmTF7tsWBDofIyyd9ebGRm1GPzavZaO3AhSyfeOXBj8Ol+oDI4ew7OfvyA7xqpNshkmspV0Sen2yAH7z4xnjnyGdz9dZfvoF/cznGA3y1+AyVI4gfT175kjcfRr0DOuRQ/XIw10/Xxrw6xTDyoweAh9iqBU5f9SyUZ3I5hubi5eNcZ0uvPICeOroO+pItHiJPpi4jH3JA+aanPwhPXGhez/gUIzsyPo3lRdc6vXLITyOYnyM9MrWLJZ9qEsc93/U884DyY9Nniy4qj2zEcy3N81UP6BeXjntCTtblXyw9xGcjZ3aIDjt6Z17sXQdxz/qM7EPXPptiIrGALDtUreZs6BrBPB1zMaF7sZHMnB3f+ojcz8WRS/7Nwdr1Ic9f8cuhXvNb3xzg+3dqGIZhGG4bfBN3h7jDw4ieR3+YcRgeFth1uOffyPWHoD/60Y/efOADH7jnH4uO/FS+Fy2bHcMwDMMwDMOPBjbuvXzYoG6T3mZ2G9dGfIcRNvMBz2a45zIjucNAm9+tbaTT8/zGdwcR1uS+jemFhyy5jffzMKPNe+s21M8xguv1iZPHp3rR6T+owUHIeahFF+jFA/WBmtSWjZ4m40sf9U7dxafHLj02/GdLzsa8Q5QzT7H4A/6a6xs9feygwohnpEdHvmLIB18cOvhGetUN2ajFSEYPyU8+5u4JtnT4NheH/ZlPcdmQI2uUvw5k3Fv88V2tiC7wYa7XRjbypOu+xVNfuXQYBt0DfkjBaI2ALTtrI5/q4Mf8rA2JJxYe/+XTWl3W2apJDAdD9bX8G8XGL4+ugzVyvyAQhx0qN/0W17cWq0su+itftsWqJ8ZyZGss33rH3hqJwzddPvGAD37rDTu6iMy6nvHPp5qqg53RunrpgxE5UCO/vq+sTzvALx4ZyLXa5N99TG4M/PGNz55+150eGd/Z0qkfdORQT8Ugw+uaGMnpkvGP+iGLYvHHnp9qqC7+zF0jKA+5k7MBYz7FoNNabGu+qsV4zWdn5Ffe8q8nZw8RG/eZuOm2rq5yiMdX9RnZIDp8iGtdTdUoNj7Qw69GhCd293u9Ygd0+WEnDzqAx86/E/g+t/2q6WEYhmG4Dei3OP27V//dHc4wPAT41uvkZz76mctvJ7ss7zwzoh907U9vfPYPP/utl6mLaBjesbjvQa4NGb+a6KWXXrpLNjN8AP7n//yfl7UXKJsPfp3WDnKHYRiGYRh+dLBB/9prr12esfwqU39f0SZ2G9lt2DswQTbAbczbQG/TG9qkh3ie52x20+ffRnib8XTMyTosiMS2wd6m/yk3v9Y/ca6v5c3Ziy+Gg5KQXF1i0wN1dPBw+uxXfOIj+nTZxwM90LcOGeg4EMBLp8MBz7x4dPSBrb7rk7Xr5Vm5X3GMjzoYN+8ghb/yyB9buvzgy4Ge615P6EV8qavDD3Z6hsePOPxYI3P1P/HEExe/rjseO78qmY65WHISt/qtxdAbdbgf/WrUcsEjN/q7nnj8uVZdL7FQ9daPftBAfXzQF5OOXOoBHWukx5B/kBsdMeKxPeOLp1/45dw1ya782PSrduVm7Hq7V/QKX25nv7u+fYb4NNJF5FE5qNf14BMv8GfdPchX+SL+1MOufw/oGvmVPxs6/PAnL3w8tdZzvuo7iNP1waMP7lPx2PUZLXdx5cGPWOaALxaQ6W2ft0h/QZ7x8sF3ddOrVpAbX3wWjy0eG7WS0UsOdMjxyKxd365d+YktJh1+2IDay81o3fXH0xMoBup6dx+IgUdHDCRmpFfVQY9Pftga6YvbtaFT/PIEeaeTHQK6fLgHxKPnnubHunhqq4f1wb3ADx/yFT+d4vBRjuqmxx/d6jTXF/XVE/HwjOUBRjHoAJv+NrF/F/w2i/59GIZhGIbbgB3kDg8l7hzkenf1vHg+C/6g61dffXUHucNDgfse5N4LvqXr5coHALy8eeHdQe4wDMMwDMOPBzayPW85bLOxbcM69JJiYxvZLLepjsx7kXHIBTbEba53EMC3DXkjtKHeRj++eA6qHHCwaaOdDzJkflI5NuLJw3ii9Tnmz7Olw4XTh9hqwDMP1XTGVT+enNWP1CoPlC+9pVNtePT4pCeHRj1uLkY8cFjBvr6SkxldDzz5NE+evXhQHeisqWtj9DcwyepBdbZmoy49kC856v7Bg3I1qoVv9eMB/eqXnx56/neQK1b3G3sQgz57fsiMp555B1L5p9O7hD4WD/gqX2O5ukZGMdmryxro4LlvHUrxgeji0e2AT00OmLreoDa9KAd6bPk1ry4+5aEmuq4lGfBfH8jzSR/w1W8Ul1/y+sEGH09sxNYojs+ja9x9h69Wa7nIgx5fIHd6clIzuVhywAc5WIvZvSlmNYnHXl788imu/iXjAwV61vLi13Vmk075yYW8a6AGPD5BLvWL/emvmOVFjvjQR6Rm+oisPrOzBrHcH/qByMTUA3bVxw5PTnqQnEyv6jV5kJt8xOKTTF7AFuoFuftHbfSyq0fi8dd9JS+2SGx8dvKBeolHv/uGXv7okItdP8iQmvg+6+evHKzNu+fKN13zfNJD+aXDVz98Y02eLri/UH0155e9+OKZu8YOdV3DYRiGYbgt2EHu8FDiW4+BDnKdRV2Wd54hz+fE73fth+N3kDs8DPiBDnKfeuqpy7iD3GEYhmEYhj8d2KRGjz322GXz3Qa/DW8b4zbDbZy3aW6Tu813uujcfMen44WGDxvjZHTi06Hbpnl+ySAfbc7fi4Ad2KC38d4mv1inHpzrc06/+IAvD7lBeeDRxc+/nnXAwIcNf70DuZB1gFHdiC0/xpNHz3j2qt43L1f29EFcOmfP+JQDHn/09cn16oAEz3M2v/T5Nq8myN4BhjnkPzt5oK5H+eCxw6eH4nc4oj8OdrLjUzx8eZqzk9PZa3P50MkvmJenOd9s6AN9PDr8IGtysnpjrVd6JC/3lxj1BT+fxSTTc3ryR+blas6/mOwRnLnR4Q/OvkEy+RrZd5+Z6ycfzUFO1nTY62fx8ic3+mRiViO5GujjiUtHP+hVixzc9+WK8Izs2PMvj/qCp6fVkhxO+2KAUU70+OUHzz1jLbdyP/tszn9+ihO/+ox4YtMpf3T6E8ca35oPeblf2JjrafcYmJOpObADvvgRv2tiDvTVB8UsrrF4ciqmOd9nfq6PNV05GRGQ0wM8PrKvZjh1xBDbiO+esGZ7XtPzc6MOPDK1xDfisRMPkSN9sC6Wa14vI+v8ZS+W65EumdhGPlF1ywFZ44sD9aH7XS1+uMG+hH9L2QzDMAzDbcAOcoeHEt96lHOQ67dAnc+E6Addv/HGGzvIHR4KvP32+n3gZ37mZy4vcMMwDMMwDMOfHhxc+CG6P/iDP7j82QvfOPIDdF5KbILb3KbTBvj1hnwvMDa+bXib0zNvczxq89xmOdtg7jmwzfN8onuBjk31DjryXTyb7+Zt7hvPjX4kf7lUWzpyqIb49PHE6nCCfTl2yEBunX6HEYAH9Mg6ZDBPB6zlxZeRDqKj5nSqlx9x6LauD9VpxDMnB2t18AXmfNB1wAt8pK9GNvKi2xiPrnl+1ekACb96yOSAn2364iD9dQB81i4nOuB6gZpRMvHymW7++QqnXzhtyYzykHc548urnpLrG10jHjr9dSDElk73vDlf/MohH2BezPIwWvPP/vSDZ42s6YvHR3HL39xo3XWtt8VOTtdBGORXDHIoDzbFN2/NFukDWyj/4uH7zLue1YvEO2tNFw+Z5xPiwekHnfr84qm5mtRTr8RKh921v+Kzz98p71qePTyR/jlXO/3WrgvwRYZ/2tFF4shBj7oX8ME8P+UMeGSt+T/zFkMv+KXbPYLIxTGnd/YA+KRPh6za0kPmSAz/PmdLNx08RC+fwK97KR9k9Niao2Lkq/6B0fWlrxZreXTtT9t6Yp5vqL/5H4ZhGIbbhC/+0RdvfuFdv3BnNQwPD3q+/mFoGB4W2EX5zrfYe+DjH//45UXepoYXo2efffbyK4sc7vrJiOeff/7m9ddfv6M9DMMwDMMw/Chhs9pG+Sc+8Ymbn/7pn755z3vec/l7MY888sjdb1XaTDe2ue6wz+a4tQ1xvw63DXIg82xnZNeGfYdc1nSTs7X5jhz04UU2141imdNx2Nxme5vzEblnSzkDHSDrhStdOZDLy5hcfuRyiecAgm+/YpZutYpDV70dAoC5nNWIR0eu7Kyrxbe8/CYa/A4b+HIYxlZsa9fi+u9Lyo8NP3yqu2tWHnT1S4xqBvnR468aiycnuq5zOfkmGl/VRd51NK9XdLtf/KopqAfl6AcG2OLzx+6VV1655Om+cx/S5ZeMDt3zm8X4rgn0LmGNLzYdPuRaf9jyRU8sdnjZsrPu/lKL+hyi6x994A/vXe96193rH89nQQw++zWs8fjNTj1dc3HBWv58ygO8B4mLpxY5gfzYsedX/6uDjntFv+ggPDHl2edQ3mKmwxcSu1zkjazBXB7iiMlvcdwv/OePfTmz49saX03uM+vqxafT51FM/ujmrxzN5d89JB98segGevxUh1zLhYyu+OzwyLo2iIyOWNC1BteADyg/JF91gRGpSY7qpIvnnjJ33fgVm73rGeQlB7kV17qc6fIlF/Z4xbbOP74c6LEhK/960T1Uv4pBT2z56w1dfhGQuafVQId9sfgBOfFtdG/StRaDDh/8VpvrJLZe8kXOVq5syUCM7mVyOamvvKvDvWYUo37wbc23nKEc1eGz6jPs3yZ/H/eFF164EP1hGIZhuA34zGc+c/Nbv/VbN7/2j37t5mMf/tgd7jC8c+GZ0fPfRz7ykZsPfvCDd7h/cnzta1+7+fKXv3x5Bu35dBjeifiuB7l+D/lP/dRPXV6wbPJ4WXv66acv3wp58cUXLwe88IUvfOEyDsMwDMMwDD8+eA575plnLj9M5zDNIYONfRv0Nt49szmAM6I28b0IdeiEZ7PcRrgNcBvsZF5qPOvxh2cj3CZ5B8LXhwf8RL0Q2Uj34kWHjRwCfi9l58heDPNrvUhchwMdRKDzYCN9euXphw3lZZ6emtUEDggcJvCJDx1O4LHhVy0djvDFxnj2CdSh9x2wyY9cHAe3wBef/sbtk08+edGTL1u66Vizd338Sm0xxC0fo9o6MHEI6dvaePLuMKo8jXLwbN/BpRh02Xi2dy+5h/isR+LyIwc98sOb+tM7ggPM7h0HOWKJIyfXno3Y2dMja97BmbV88Dq4EYdMLL1hx3c8NmJYOwBiqw/4ePT1quuuFn5dYzmT4+UHT4/x2OmHXuGx6Toa5coGdT+wL58OgsVA+MWmT4cP/RaDvbV+8COHas4/XfZ4ci5vutl2zbqHkDnoJ1t++ewz1+dF74xdIxAPTyy+yPRaLfpaTPp451zc7jUx+WKP5Ji+Ea+e8Nl9bSSrr3TpdD3yIZa5XNnRYYNvrXbzckinfFxXJLZ111LdoFZ8YNv91TXgy799/KtN3ub+jRabXH7IfUVWb8TssFwM10VtfgWcz415sYz+jeneUK+65Nu1E8v9x6+8xXS92dQzcjxr8brGQE99+XR/uDfkLHb3l1j06pdYoI9yqNdqk6teGMn1jExMvRLTv9f2G8gQXX7I5IrEZlNctvhi2JvQ669//es3X/nKVy65DMMwDMNtgIPcz372s3dWw/Dw4HOf+9zlh4d/WDz33HOXH4gfhnc67K7d82/ktkEDbviXXnrp7gvfe9/73pv3v//9lxcsL0teqoZhGIZhGIYfL9rQttFtk9/o2cyGtw1uZG6T+0Sb3jbX2ZizM7dZb82Ob7pGfHLUxrwNfCgGsqluI93zID4dNslRNqddfJC3DX68YoR02Z6HCEGe+QVj9eHHk38gZ+fwIb4c+O/gmk2HBg5FyPCqDY+s2HTZlaP4Rr0Ug8yzc36qE99ByHV95vStjXzQI3O4wg74p+OARkxg6+AEz1zO6nJQYlQ/Ph/WDnbwyss8/90DagV1I/k6XETykl+HRNZ8yJsufj7yo/dixaPTvai+atZXczw69YO/9OtvseRmTjdf9OjLt97G04PqjPjkB/EpR3P6+iQevWoz8kdWfKCPXBtyumzlVq8BD/kcGNWN6NOrF302q1ksOsauKSpn+Zxxy89cD4CMP+CbnRHf4VhyduVdjHpJh405XTHLG5HjkfNTv/SFnK7cykmtEM+afn7115wfEFsu3VfZkFvjsxGbz+qnY7SWh3mjewfkaF3OrpF7HE/M+m4Esg42O0Dnw70ntlyNSHxjuemTfM35EI9+edIFcjHrhdh04pezGlxPfowIzs8sm+TWYjTyj2/efcA3e7L6LnZyumLjuV/lg6zp64Oc82vukDi/eHTlcObMN+Kzvkfi+bfHgbqesx2GYRiG2wB/I/f3fu/37qyG4eHBX//rf/3yw8+eOT3r/UnIc+H/+B//4+Y3f/M373gdhncu7nuQ28vnF7/4xcvLUvCS5VAXvfzyy5cXp2EYhmEYhuHHjza2vcz41cp+xTLY+IY2323Mo+Alxsa45zYjtDnehjefNsnZ2ZDnh8wGfP5s9jsYOHWi8rrmR3C/dZAn8HPygazDBjJxTv7pq1zLE9RiTcccOTjQj/jgIIHM8y8Z+w5ejPzKz5wdnWKZy0fP6FunyyfQ6+CFXv6rIT/knsfJ+MgPGb3yBbb1nT1dh+vqw7MWiz0eXXnqpbxcd8DLvlrI+WeP5MWOnm+dqkWM9NjyXz7VAunh8QXW+HLQe3xr9uZ05SzemYs47kW+yejgWauTD7xqVAse++pC4ogrDl3gnx6/5aJO/tnIFb/4ZPjm7FoXE8UTx1oM/vsGI7k4cqfHjxGR8W3Oln69Kcfmcqs+azk3Ahlbcvkbu/6hGPIhN+KxlV/11+vuKTygB2yLT088tcQL9OVv5Id/cmtkzo4O2+L3GSWPyrk+0jvz0lc2SP7xkTrqO5tqEhu/XFB2gQ0+H4idjSOjflc3Pw4c+aZfPvS6DvLiu2tGTo+8e1V9+bY+IT50z/BDVw7iAn/Iujq6LvT4J8ezdu3oyQGRlQM5XnXwQVds/vs3tR413qt/9PmWe3nUGzIxUHrysqbvs+S3Hdiv4G8YhmEYbgN2kDs8rPj93//9y5cJPfP5Mxp/Evrd3/3dm3/5L//l5Yf9huGdDm+1336DGoZhGIZhGB54+Lblxz72sZu/9tf+2mXT3oGajW6b3m28t/Edrje2OySweR7xRc8GuxemDgj4wrNZjmdjnj3/bcjbRD8349vQN4KxObBBxUNi0nE40WHTSW3Yy1ON2eIX14hPL1941UInORkd/tIpdwcEoHYHLyA/oCuuPtDrsAHv7AmI4UCHb/N62TdX6TtAPPPkUyy/0tRhVfl10Aj0+MH3a1o7LJEH8u3bvrVND49Pvl07OcuLndh86wl/emsOfCK/EhYcEFu759yHcpMLf/h8WYshLj/iqlsO9Mj5o18d5HIx74dI8eRVP9kafaPPqId49PLHFk8e9NIBfuRgpMuHeT2SCzs11euuJzvQPzrs+VabXrDtnpELezK2emGsBj67f20qmOcT+KEDeOrw+QK+6i0ZvyCWHOWeH7J0ik9PPPnJXxx9lz/iu3tFrq0jdvTJ5Vl+Rvpi8ElXzvLhw32A2JaXeECfPzx2elH8dOQsX7auNxu5iEe/fonFji/gD8qpHiRnC/h+cEIMfumKyU7MeoNnzod6+MknqI+t3iD6eiAOvp6wy3d18m1dX8zlw4YOYqN29zjfZHR8JtnLE883zMXoc0QGfAJfYojr3q1f9RX4V1e8rnU9qBZ1ITI6ctAD66436CtefTAHc3HkYOx6kOubGvDlYHRQy685mX/7xHV469fL+21hfuCczjAMwzDcBuxXKw/DMNwOfPtHoYdhGIZhGIZ3BGxm/8Ef/MHlbwHawLYpb+O70SZ2m/M2ym2u2yC3qW5jHt9oQz95hxPQxrmN9HzyZ043nYgPBwqt74VrGT9InKgDhw7Byi0il1Ob+qePs352dPnoAAKPX3MyRMaWj/LrcKhDEzCqv0MLcTqQIGNPbo7Yi5NfuuJUT77KmSw/9I14cpEnXr3vAAgP8JBa+WVjdJhDhy4iZ6vmauQ7GeS3WlxT9wx+95T+O8Dt4BRfbuyyxSsGe7mITR/40Atr+kgOyfMlLnv+kDrrLxuo52yrw1q8+lWd5vLWe7L4DpnUYI3KoxhyJcfnOzt+ii0n/OT8Z1P/5Idvzae1eTE7CAO+5UtOr36JY969i4fYdS2sobzrbbaNYohtXr5k5eAakomDZ35eO7z6LAZ7qH46+PkUL73y4ot+1wRPbUYgZy+X06ec5FKP8PmsHiM6fdBBYF3u6LyO+RIHv2uXzLw45Vkv6dB377h3qwu/e11ctmLg08E7fWdXTnjm9YF+9ZVvevoBYjTyR6d7+sw3ezJ8az7ULpZ6zPHMHeCa81ss9iB//GLzh8cnAr7oq6V1Mny29T2Q179yLRYkoxdvGIZhGIZhGIbhYYE3r3v+auXwMz/zMzcf/vCHLz/R/+qrr17+Nu5HPvKRy+j3lNv4OF+yhmEYhmEYhh8/bFa36W+z3eiQCnUQgtowx/PchucQAM/GN9j8blO8Qy0+kY1+m/VsgJ3NeboRe98Ko98GP2LbHM7xfnw+jPLtIOHUBfmJSX7KPJOyc8AT6OJ3IKlP6ZPRlWffcqt/dMRQrwNNdieJTVZ+dMVgi2etl2BNxk7PjeLSRXSRfOTIt9E3Zs9ve3Z4YawGePzxxy/9F6/rVSx6eHwYIzqu4+uvv3732qvTt4DVJa6cwRqxqQ42/DjUUR9bNfWNQ/UAGb5c1ADq6zCo9wiHvfKhB+z4F4svY9fbt/Cs2Vq7N/mLB/Lhn4++mSh3+n4QAk8MtmzE7b6uVsQnnv7pL318to2uE2QjT/rAJx/6pX7r6ug6WIMcyhWPndr5F8eaPRv+u3fJ8mHkh1wMJGe2dMmKbYSuPz/inbLm3XddSzmR1VPzcrTW0+va5CR/+Rjp0xOT/0CX33KhWz/p5bf7Ujy6+Wdz2naNyfGDe0S/yeRC3n3ZdWYP5eBzJr/uL3ZILDz5yFGf9IJMDPeO2H1+itdnAh/osK9O/uSnRjXTZ2eNrMWiT08ePkuujzhkaqketkh8euzpiasu//7xS8can129NAdzdqE+83v2hT1/5PI373NUXfWSTveTuuVmVAPQFxNVrxzN/UCT0b8NfhvBMAzDMNwW7FcrD8Mw3A5814PcZ5555vIT935N0Ve/+tXLi90HPvCBy8vX//7f//vyt9ls9jjgHYZhGIZhGP70YNPaoYLNbhvyNsBtattwtxFu7VeetsnfRjqyEc7GvA10emxtsAd6dNjzaRQD2qzHR+xt/kN8I5iHcw7Zn3PEfwcO8rjWA/mcMqQGm/nyPuOri8zBgMMFNZOLQW6Nqr9DETyHBPjisRWT3KEPe2t6Dmya803X9XFQAfVOr07/rhV9uVnnH8zpGdnR6/AFv4MnsVxT8+TlK8d6VV7WYiB8cRHQT0a/e8v9lD3SN/HKS/1s9aoDGWsykC8eIq9G8nL0nmHOr3z0BJEVSz/ZyoGOay2/cuq+lg+Kp8702MkTjy/6YogNeOR08eWI2NGRpz7Lp8Omei5WfvDyzZ4MyMnwyI3WelwMsfHEYY+vh91n9YCuOTk+nXy6P9Rfb+LrQ3O+1VMN1zkha/Z0jSAeEltO6chXXNeYPH3yPkfiy5us/usNfaCjjojcdS83EKcY5Wqu1nIRgz10n1r7QQWHnXTx6PIlbzl2nfhUW/cN8Nn9i0dXvu4DvuTADvCAnnn6gZ545ci3WHQQeXUZ1WUkYyceG/xqJvdvoFjZ8ynf+mTUD9e9usq965Ftel0T+novLjmwIXON8K3FVLORLTs6fJuXK/RZYmtOzle9wecjueuUb7H8X+g6+SEN82EYhmG4LdhB7jAMw+3AfQ9yHdr6xq2/2/TCCy9ceA5ubS7geTn0cmXDyAuUF6dhGIZhGIbhTw82tm1qtznfZj+yWR5shNOxcQ5GPJvkzc9N9Tbh8drYz4c1dGgQkXkezFdyOPWiYJ7NNR/ODX8xIBm78zABP57e0C8PvHrQgYCDA/7rWXo913aQwmf94MOaXwcJ9ZwfMnpnPWIY8cjpdrhTL9lkf5Jc8OmJR+86D6AjVzx6Yjh4sYby4a+8+aHn2d49lC8QT4/4lD8b90A5An0/0OkeIqdPzh8eH3TxsqVTT6q9fI3ixaOXPtA5r0e1xFOPeOYgJugDveqQhxHVVzGMfJrzSa+cq01dzQOe2PjAj37qsXz4KzckJl7XFk+u7KuNnIyv6rGWV7YOEo3kfIBc+qwCOYhJL9160/qsD9jJuRyqr1yyk4tYUbXVCzniZ29t7vNlTb9rng8x6fBDjiCf5UCHrD6Ssyd3zdmXHx1zsJZ7tuZqjPjRh+Z80wE8+VrT4VM8fHrVZY0vx3Iz4nl3NpaX0Ts1ebWQu1/rGzJPX+5ywMcrHsSTmxHIrOXH1rp41VB/1OfeFYdffLHFo6fu7kNr/rI1B+tyrlfmfJqjcqHr8+KePO3zwUY+1W4ud3OE34G80T6FcRiGYRhuC3aQOwzDcDvw9lvpPdALmpeuYGPGS5JDXOiFbBiGYRiGYfjJwIb4c889dyEHGDa2bYojm94d9LYp71mOjF3fssLzXNfmudGmeocFRjrgGRG1kR7M6Vw/G/J1UrjXOiQTF9TUoYM6ovIiA+sOGzoEwEOnvdH6PGwDMnb1gowPhypAxs6oh+LQsQZ61uw7cNCrdKz5Nddf8QCfTn3lG4+OeHjsjOqjx4e1uWt8+nFgRp+sPlnLKx3E3ihvcjL61uKkI5YDKHJ6ZPTkZiw/cR1KVTM+kJ158BMBmV6ednzLF1mXj944qCl2hz9yRHSAXr0rRzyUXrry4F/+SB5IbEiPDls1iCtnc7pkZ45syYpbTLHOeTqAl3++5F1vyBAZ6lrSB3rVRw7kbPmnZx3xnw6/6jnliF35xQOxy41f664DXXz30GnHD/7pVw7WbIsD8tFT+ieqn011W4vvWucj//UM8ZU9uc+nOSS3dv3PuP6dLH92zfX55He9u25QLkiN5zdM5W888+ID9K5+4nX/gLyzNZLRRWJ3XflDkG41glH+kB1enzs25aUH1umhfNEvt3POVk71E2UrbnL81mAuFt98qbdcTuLHmA677oNhGIZhGIZhGIaHDW+/Md0HXqB6iQRzL0sd5Fp7sfICOQzDMAzDMPxk4FtI3/jGN27eeOONy6Z2B7rIIV8b9sb4nuHI2vjumY5OG/AOUzooIG+D35oeskZwHhTcD+TXdC/EN5ab59BsbOJ3OHDWZO2wxBjSBbbyJ9en9OqRmsk7JKhmPq3rmfE8lGHDd7zyLS/+8TpAQvVPbuKKCcW1Tg9VG368dOizwwO/XYd+1wTfSIcPJJ9+yw5SQ31iS9chjhr8QGc55tevMO0QBY+P+tE1KV59lUP90385lBtky091WdPJTk745PWCHR7gsafftabDTr4o/9Vo3j1Pxg66NvUDiouKoT/s8dioud6DvM2rhyziH5mzh2zFxBO/uOBXy4L+VbcDbjpIDUCfnB6ql2CO6PJfbuyrufzO+vkxp9eaXtcIX0/yn49qMdcfen0eqt3YDxLQlxO//CG5lj85P+z4YANGFJ8dv8VgJy9j90F8dnjJjOozR/LBq6fyN1qL4wceqg3Rxe/eon/2uZroqStedQAeWz6AHhswFgupgy6oly98cfNXHGtxgS7071v9qm56bED+1YDOPtChK48OrvHFcl3B516O5WOkC3SRmHSMfMmFb2SuBnryoCN+/y4MwzAMwzAMwzA8bPDGds9frexF6PHHH7+8MHrZshn06KOPXjZs/J0w66eeeurysvfSSy/dsRqGYRiGYRh+ErCB7ZnN3360sd3fS3VQ10Z3G+2e7x555JHLaKO/DXX2fZOujXuHQ/y0aY9Hvw1zaxvtxjbt6XWAkOwkvBPxg7kcgrmN+w4j8hnIPZPKWQ5ALkd2DgTYWKtFbtby1AM8ttYdDImVbXGBDj8dlIpNn096wJe1a0Dm0C17z9JsshOj3M3p9Y08ve9QiD500IFXDx577LHLdeZPLXTFdrDPr7W81SJ3dtby7Tp13eVHx30kH3p0kLh0yPllw7YDG7oRW/moJzuj/Oj2Ny75oMNnkDuf1SLv/NJFeAiyte76y8+avdzrtTm/2cqJTB5sfAbk5b7ouiO2roc5vfqqX9VgjcD90X2Vrtzqi/jylg/gndfFmlw+fU75o9N10SdruuKrRd/Y1ZMOkMXjQx56zwe+3PgSg43Y+HKlK2a5FQePPVt81P3DF5k8yMtDbLnwwb5a2NQfI+JL3cVEeOzFxxfnvEesu7+yNQc5yIkewu/68qs3bL3jkolXfeTW9a/8q4+PM5czP+/T+kiGT/f111+/+OArf6h/Z+VFV618kJUjf8buIzmT0VOXNR1++K4HRv7Z4NOVC4jFpzjk+MXgVx7meOmpWxy+5Fvs7nk2wI5eecsV5OLeFY9MHD8IgW8thp7xD3zzmS/1NGcrpnv61VdfvfyNXLxhGIZhuC3Yr1Yehrfxq7/6qzf//J//88v46U9/+kK/8Au/cPPss8/evPzyy3e0huGdi/se5IKXIX8X973vfe9ls88L0le/+tXLAe8HP/jBy0vSF7/4xTvawzAMwzAMw08KHSp08GMT3Ya4Te4nn3zysjHeQU18z3J4yGY5wufLJrsNc2Ob/W2s41uzT47nMJGttRgIH2WH1/ykk9f8RGv+zeV5gl8yG/4ODOh0aFPeoEZ6dORpTde8Awk2/OmLwwK95BePjJ76yazZs+ND//FcA/oOJ8zp00MdePAPZy5ikeltfHriu3Zyd+iRvnh858f1wOPHnEy+7PWBPb7cyhePjhE895PJAZ+u2NbeB8oB4auJT/nI1SGwmoFcPfIw8isPI78gBsiRD3wkjoPSDn3y0+GpfFA+828852Ry7LrxmwzMEXn16h8b9dDnQy3Xh6dQLvTK/fQnDjujNV2+oHrx65U5e3Gs5ZIfukafZ3Kgiy9X/sq3e0B+1sVlZ43YtWYH5nzS5088Oubdb9ZszY10wfx6nT/+5WgOHcbxIdf4dKBrjl9cMr75VYuc8OoJvjldPSPrUJC/ajQH+vyz4a98ybPt2iB8+vXAiHwu1NDntGuYfzw6rkW1qkP9asin2Px2vc2NwF4+9MXp+rCTa/cfHj2ggy9muYrFPx8+p2Lmy5qcv2qoJrZgzoaufiBrcrpyEA+vOsjkQc4neaDfdWEDYtCVE5DLia14+EZ8/x4gNtkNwzAMw23BDnKH4ebm13/9129+5Vd+5eY//af/dPOP//E/vvmN3/iNCznI/cVf/MXLD1N+7Wtfu6M9DO9MfNeDXHCY6xu3yE0PXji/+c1vXmTDMAzDMAzDgwGb2g48bHg7AHBo4LCtjXIb3x0O0LFpbg7GNtjp24Cn0yEVHnsb5eb8dEhgjU5freNdy0+c/FN2P76YHRAEcvnJWV5qERPwW9Mzd7CQ/OyNuYMAeuR88dlhBB4d/HhAx+FCvoGOHvNHzicbPRTPmhyx5ZucHZ1qgnLF51/98gA6rtuZK3l+k3uGP2uu1g5v+HW96ZHhsXWAxJc533wh+eUL8KonGYghH/kBH/KIxMm/dfb02TmUqqaub4e71sZyK2d8tfCjBwjUqoflIpb8jfxDOVkjfhEfdItVzvkAdmKSA3vr7gn6dFGfLTZgrof5lwdbcvrVal1+kC7QEYudOfCZfWv21WPOB99gjsjEIOfPmg57fPniiWfOPz39sMbnhy49qJ78yVsf+KVHvz6UE79k+SkPoNc9Q7/PhxhQPmyKXb388nPmjsiC+6XY3RdisjuJHB+Kw4/YfCPr6pKPuOxa07X2+Tv9VTO/HZ4CfTnRIysXPHp44rrO6Z1x44G+0fW5wq8mc37YVZO12Gz7bOs5H/7/4b8++0EW15dOueGzB76KJ8/iAh1yMfH4JdcfRB/50wLWvo3rh4nUMQzDMAy3BTvIHYabm7//9//+5Qf7/tk/+2d3OG/DM+InPvGJyw8a/+f//J/vcIfhnYm33/iHYRiGYRiGdzxscttIf/75529efPHFy8uMjW6/MtRhnM1wm+PIpjhqY7/DAjptpNtghw4i8Nt0RzbYbcq3tplvc7+DCchXY7BBj8L1PBIPnWu+yj+esXwcLkCHDnJpc1/t+OUvZ8hf9UEHDmzpVgM+XSMeMheHPpTb9SFVvRdHr6yztzYXrxxcP37osYnn0ISu/L2gnjXGA37YO9gp5pl3vyKaTH7F0ENxyoOMnhHIxMgPO3HFkht7tvLGz7c8yY3k1U3Pmi5/egT1jy0y7x4TC/DSN69/xTRH9K3ruRpQOVUHoov4yA87vquZrfzrEx5fwB/e6Q/yoy628aH8gM/uE8g3n5Cs3JDeuc76Y02//BAfYiMyPs6+5K+cjOeanK0Rj7/swRq15t8cmaPyxTPv3jjrIzt7V57kdOtb91s15Zd+vlB5kldPugjoFA+MfEJ56qtR/O7hfOp7NfJZjuVAxzXns1rLEczL0xys+bB2/cQwrx9i82mdrrh91ulUF8gB6g9bvPI2T4cNW/yukXsfiUPeZyE7utnxX230T+B3f9ND7OhVg5jAVzXgs60udv07xZf/+1yXYRiGYRiG4fbg53/+5+/+NpprOLz9e3/v793803/6T+9whuGdC29q3/UbucMwDMMwDMM7CzbYHTr4Nm6/GrRDjzbE8cBoY5yNTXA6eEY2bbTbqLfmt410hxL0bKIbUZvtfLGJj0B8aN0I17rh1I3K21x+xg4D8OXRGtRnXXw68mvNR33LLsqfEdSdPbt4cupAgx1fYE5+9qM18NH1Mc+PmA4pul56ai5P8eVJr8OL/NLpWqZjxMfjK93uD8Sn0bfyHJCoAzpU4ac+lacRn46RXbLik4kNfLIvHpvqcxBzHoiVd/00r476jV9eDqaM/NKhaw7mbOozlD+ZvNnkC9IrpjUb19Wox2I60CZD5Y1nRPU1yKPPUdc33/LD65oWK3v+5KOH/EByPuhXA55r6Yc4yPhmI6568eqTOapfraHY1nxDMdkWD/J3XkN2rcUnFwflky8+6GRfX07b7JH5eZ8H/cPjPz/mxcEjNyfLp/jWQFZu2dLBB9eXbvc2uftXbHr44tTnes++zw+/xnInP/MCc37oupbdG9bsfF7E5dPa/diBr9jQga0c8mf02RePT2v3TnmXG37+8IorV9+2zS90T5LjI2gtFt/0+Dnn6jWyR+C+Fbc8yNXeWn1+WMlI198E14thGIZhuE3YN3KH2w5//9a3bp955pmbv/23//Z+jfLw0GIHucMwDMMwDA8h/JpJG/SPP/745QDA5ri1AwijzfAOIjpksFFug9y6jXWb+GQdEOGRd0hgM55Pm/WozXox+LC+JjjXYsa/xql3TXKxcY/kJk98cxv+HTiAuZzSA4cIciSDaudXfXpU3dbG1uKIwR6PD/ZyiWftgEEPxabP1sEKPl45OhTRN3NyfDryEUt8+r5B6/CCTjEeffTRy68UpYvwHnvssbv1IrU4uPFiiy8n5M+l+FVTaqbD73n4x1af+Cerx/TF50s8PJC3HMiM7N0fdNTAF+iFfPjBI2crRjz9qGf8da1QdZmzpcdWHnighnTYRvyD/NRKB6/a5Mk2f1Cc9PWKDR09YMs3na6BNZDzpwbgs5hi0eO/HOpR/O4p8+pLt9rFKF+88nON+aNbXu41wENsxCBnkz+2fb7x829dDeXDbzxr8Y3yxk+uB/JRN9/05ZAPKA4ZiE3ufpGXNb/uSf744a+86ZGzLz65McLHE1M+7PDlwg+ZGviwpi8Heq4lPXmy1w+6+a4H+KcfNuUvJlv5k+HpP13XgtxabfTEoksuF76MiLz8XTP+/BshB3Z06fBXPvhsxaLHtzXb/JUTWT0vrrH+s+FXX+hZs5MPnushL3ZdTzx9BXO6+lWu9UCeIOfuG7kh8RFbv165//OGYRiG4TZhB7nD8PY3b3/u537u5umnn7755Cc/efPpT3/6Qnj7lcrDw4Id5A7DMAzDMDyksGHuIMDhE7Lp7lu67373u+9u0tsc7yCCjk1xcxvibK1tntNvM91GvU14sjb600XkNu37xhbeCWuHB8CnTfgOYfgXi33+4NoH8ME/H/JwCJANvpz4wgf8DiLS61CqQwa1lj97BwoOE86c5OqgxFpc4MehRf3oMKRDJWv+2Mob1bMOSsSmS8aWjXzp6Yu1Wh955JG7PeevHreuZmNx2dHDk6NRbfyUQ7HEplM8dg5iyOlZk8m1Aybgs1/VLF+5lHe16Qd9umKBGPTIxK0/8qInNh3+ykev+aQjBzz1uA7iyVUdSFwU34jEqCfik5UPfnlCMjZs66nY8kpOn10+5GcO3StAxg9bucFpa04Hya17EPAaq0Uc/uXVgZq+ybfa+CXDI6NPpm/k9QjxnT++6qF+suk6WfMpbzmUV6ATH6oV6LHjIz2+XddiyzkbPKCLR5eOfMmq15wMzMXJBvi0Bjw6CI8un0b/HpLj6wFKB98oZ7z6SQcP+fdPX/2bS44Hrjlb+uTue/KuO99qwjPv2qDsAd/9rn/8sdG7emYUUxwyPbHO//l5IeMXVT+9RrXUNz5cc7LuDzHoycm9SocMj4x/edYf82rgs38frEFO9MjokiN86N9bPDn7cwJy5H8YhmEYbhN2kDsMb8OB7W/8xm/cfOELX7j5c3/uz12esZ966qmbv/t3/+7lOfPzn//8Hc1heGdiB7nDMAzDMAwPKWy826z3N3IdCvSrlm3AI5veNsaRuU105EWHvA13PHMgo98GvbHDDRv3bb7HC/gR2bmmx5d5G/Vy73ABrxwQnGN2RvnEUxM/KP/ZVB87eVdTNtWAf/bAOp6eAhsyvA7c6LDv0KVe4Hd4xYZf6GCETYcertV5GMKvb6Dh80MG+Hyyw5eXkQ2dfKjL2GENG2u6cqnncuCDTjbATo6t5a5eI149zQ74IrM+47FJZiQvj65518wI6gTr/PODyKoJgVrYWxu7n8S0RmKLhfAh3/WcX6P6fJ6AXfl3f0B1sqkf5V2eePVBPvh4bIqJ8o9/bZucD4gvvrj5lC9+9uRdM/2oR+KWE8KjUy/IkFyAPh32/IZidI30hj85WfMpbyhuNsbqqYf4J48tnrjVz4c5e7XVd2Nxz9roWsstFJsvvtOTA71iVf/Zi3opzklBTvTxHDzGQ/LsMBXEQXTrS7W4ltZ0+SsH9vW1PNgAPgKx+eQrXXNyPpGc1KsP+PTpQbrixhfL2rXJb3nxw597HAE9Pvx7J0dzo3W58dnngg/1yR2ffzKHtvjkvon72muvXfj1cRiGYRhuC3aQOwzfCb9q+T/8h/9wOdT17OhbuX6D1X/8j//xjsYwvDPx9lv0MAzDMAzD8NDBZrpD3Gefffbmq1/96mWz24a5AwEvNdCBgA1wG+E2y22oRx0I2LRH6QFdG+lt5hvFpMPOYaADAXO+Ankb7jbuHUx2wJwsXx0IoHIxntRhQDmqDeHz2QEBAr7FsC4/cdiyAWt8Bw90y4WOEZ9fvqotHio+uQMHNuLhmScHtZWLWNb5OPN0KKJP8gp88U8fH5m7Rii+sW+08ed6NfIhJsiJPj02RjAWt9y7X/C7jzps4hsBufxBrHLSr4hcb+tnNogN1I/io2KA+HrEH3t6yBz4rk6obnK22fApB8AzlxuwqYcoW3ZA1oEZkNMrJ/Lo1IHuldZ0oDWY5+e8vunIo37yR+fMlVzfjNVCl/zscz7NyYyAV68An/1ZC59soOuZfX20xpdfc8RX+eWDPzy+ystcnXTI5ARGdRS3HhpR+ZPlC4zFM6crV/dHfSEnk9t5zYE/uM7F/UhPPvzx2z0K/Hff57s8+cS3hq4Xv8g8O7nFr456SU8MelF9qxZzfDDil0s/vMDGWs5GxJb+2SN5I/bWXQ8jW/NkbKshPbZ84+UrsO/fUzrqcqhrPPWGYRiGYRiG4d/+239789JLL13eoX/+53/+DncY3pn49tvnMAzDMAzD8NChjfcXXnjhQv5Gqo15B7pkbagjcAhFjhyydCBk4xzhtSFvQ51+m+htvNOH/De/F/DbwG8TH/iSUwcWHUKe4JuesQMFevxk74BCDeVSnXKOR05fbdDhBz/mXvzUi9ghh9TW9Nji8W0sttGhDT/4aqNfX/muX/3t1fpoTk9deMjhj795TIagHDoAVA8en33rjW7XiayDJPPiG+mR8UcfD8j59WuTO/DJpxz5ZQOuJZm8W2fffWNtXr709cMIYiAg79onL18yEB+Rx+NbDnjmIR3+5KJvp405HTHiIfpGeXavReWeX9+aZn/m2cEdOUp+5kxXrZBe82IFunI34p866Rm7XmLREet6TUdNrmU8fl2j9K3R2U+jNVvET/d18vj56Z4zl0P9R+mSxcu//nWf4UV0ql1sa3r8qqc6jUBe7K5b/s3pokC3nKAardlY0+8zZfTvRdc7XTmJSe7fDjz5oXomNzpILnTlRpfM/KzZ9UHlQWasj0YQ27/n4tDNvnjAFuhlx7drRefUd48a6akFX7/7THedxDKCfPjy2fDvklzUh+ee4EcO/OIVC5nTL06fEWB7/W/QMAzDMAzDcHvwq7/6qzf//t//+5tf//Vfv8P5P+EZ93/9r/91ZzUM70y8vWswDMMwDMMwPNT4xje+cfk7gv4erc1wc4e6vs1kA9xBor+9ao5s0ts8t0Fuw92vZTbabHegZ7Pd5jydNt/xO6Dgw5x/64B3Ets33njj8usx+eBPjOQdTiAb+adt/hrpIn+/ll88NsjLm5zM81/t1g4dkIMGkEe5JHdQwa+Dmvye/sQAPHoOHPIrf7EcPOijNX/gEEO/yYyuhdjs6QFbPL10/RC8613vunnve997ice268SHPPMtP38b2ShOB0v89LdA8cjELR6+66JGvoAc74knnris+WAnVwfN1d7BDltyo96Y6wkdtt1L/Lsm5GzrJ5k82MjJvdJhKT3XGw/Ekzuwc0+ywRf7RNcOsqGH5NQarNMBc3T65UtM/K6VGvLVCNm7JnTN1ah//CVH6iyv8oGua3pnLub8dvAvD311b5Dpc9eHLTvziMy3+c3BdervQgey4rmHXB/rfNHVj3SMeMnl1fok+QAZnfIzN7LF69fxdv+VmxrN01Nr+YD73Lpr1XXhRwwy0D+fJ/eQa8APHTm4//SID3W7d9nxRW7NF5vuVXn5TOK7HvLo0BePv65J9zpfIE818nd+Tvmkx75e8s2Hsc8KXTI6YuC5P63ZkiF6eOpka93/DWqqVnGzUTOf8si/fKuTr/oNZ1y2aiErZ7mx4cf6vP/FAzJ+2Hb9hmEYhmEYhtuHvnX7qU996nKoe8LaO+t//a//9Q5nGN65sHOxv5E7DMMwDMNwC2Bz3Ma3w4mnnnrqspHeYVib4R0IGG2c25Bv05wOWYcSZDb6HSxkSwY23Nnb3Mdj26HO9aY8+RkTnWgtjw4F6Kcrj+aIXz7NT59ySB6vb612yMCuAylztXXIwB4fOVCpB/Syx6OH3wGNNT94Hdy4Dvxk58DCvP7SJw/iswd6HXTwxb845PyiDjkAv2sojjlUk3h06fHt/jAnBzryAnGKJbbRwY7rzC/Co8Ov/qqFPd/4gK++esSODl25tNZPMC9f/jugLX8+6LLTG35aQ7b4YoI18IPPBz02elVsMjHUSSZvevLo/o6HXOP8VKP86yNiW97mZwy9YUuG9KPrTJdPuubkYK6/Z34oXfZyBXyfW/pqNNI7e4D6wQz2YpPxQ09/imnNH3l+3Gd0y4Ee5AePPcjLXBy2wD8d9up3TYutxvqpX/S6P3wu5ZIO38Xnhw0eGV/ikoM5H434dNUCeEjP6MhHLWKVHyITSwxx8czpWjuopMM3HpjLrc9tcjxkzYfPJhs6qM+qmpF5eZvLTZ3WeiV/4Kv864XYeNVHxo68HOo/X65XuaqLbv8+m7OnQ0aHj66FXMGaPr1zDeLi4xn5RtauiWuNXn311bv34zAMwzDcNuxv5A7DzeXv377nPe+5+ZVf+ZWbT3/603fpfe97382/+lf/6ua3f/u372gOwzsX394dGoZhGIZhGB5qOCDy93I/97nPXb6RaxO+gwQb5B0gQZv4HYbZXMfrYMCmvI12G/r0bKS3aY/o0WFngz27NtvbrOfTYQRfePcjYGsujoMA+Vu3+d+cL7odiuBVpzxQvHKVGzv1dqhAnh6ZOoFfczxgq398ZSu+HMmMekHOhn35kIvPH6KHzB2qkAN/Ypx158+abnCQyN436eqLHuP5Bm+xO8iB4nd9xFUL+dmj+luv2RjpVjv52TMkR/nzg983OK27Rvh4ZAjPQY1YYlc3PTmRlweYyx/F10swl1M5Zlu95gjIfAORvrgov/ydNnDGY1NPQnrlYp0PMFePmuMbo9Zn7Gv/Zy7pQHN5ZZMumOOfPMDTZ/3qfnRfQfGia5SnmKB/7lv+gA3/5Gd+euDz5x7IbzHonWO+jGzTQ+UrRp9VOhEdORWTDzy1Zp9v96eYxu6dsza6qHrOfvnM6Rk/1urqs9+aTG8QGd9k7gV6iD8yukafI3H6DBQ7ilcd9IDvRvbV43NlLm4+rbvvxQVrPs8YZGzw/P+B1MKebdednC2euo3q6AdLuiZyATZk6i1XekAPn27/fvA3DMMwDMMw3F7863/9ry8HuSf9g3/wD/YrlYeHBjvIHYZhGIZhuCWwke7Xo37lK1+5+d3f/d3LN5lstOPbOLeZblPcJrnNcmSjHNmIt9HfgUeb523ok9MP2eUbmbeZj+jbrOcvfhDnmkAcevnHL4eT6MF1jtYdDOBVU/U6lCg3ObERR0+ADTk7/JOX/+KRO6zA67CDTyh/cfhuXh/kZ7SOFzkskXO5F0+Pi2NdXXzFQx0g4dPp4Jfv8qoWsfPLNuDXk0DOLlvgl209j/DLQTx5uKfw5OMHBPgpRnZ4dOiDEc8B4MlrXh71o3VydVyDrG96klv3LUU55MMYQf675tXOh8+ZOk+oA+iqgT5dVM/hzBU1p8MOin2CTrxzpNt9kl3/DlwjXvmoH6oN+Dj7a8w3dI3jpY8gnhj8JrvWvV6DewbE7fNL1tg9Q68YoC5ydnI7a6eD4tFB9PnSK7749IMGRnx+UPHpVE9+AM/9VH7QDzngJQc5lEcxrM19junnH6+66YkXz1wMuReTn+rQO2idjJ01m0b+wPzsCx9sypksAjKwzjdi33VJhqpVHL6toc+SfPxbaCzGMAzDMAzDMAzDw4hv78YMwzAMwzAMtwI2wj//+c9fvp3rMNcGv41zG/Q2zG2MI4doDirabEd0bLqnEzn4suneAQLiyya8g4m+hWXdRj+5jfj+hiS0iR9a34/4OfXkaJSjuMYz/w4M5CwmXXpGfeCPnjo6mDA6cCZTPzLnw8hn9TnQYB/okIkB9aPDWzZGscUIfbtNfHJU7884/Dn4NFrT59u6gxtro1zxuh54bM5Dy65RB0lqJ/MNQb7pkCMyBHzKjz0dfDmzQfImNyfLB/DPTv3k1vHZqRUf6CU3B379GuB6QrdaIFtj1wHI70enXK/UD/lF94qhJn0FfdVHPPc4fWBDn9/sAJ8uHrvuqSiY6+F5r+Ub2NM5eYEM6RliT697r3yKVz8Br2t2Xm86cgnFKH5yusmgGNb5ds+wSzfbPrenvdH9DPTd591jiF3r7n8wkuHrY/8W0CXTd/HkIj6QqfmMafTvY2vxoV6ca37545tPFM+15leMYoPcEN105NB9RU/eaq+2Pi/8GssZ5OnfFDL88iwPvsQqbzrWHZb2uRUPscEzL5Z/i877Go8NqE9+fBnrKTp9iMmmeuVXfDG7lvL0d5zx2A/DMAzDMAzDMDys+PYu0zAMwzAMw3Cr4Ju5L7300s1rr71289xzz928/vrrF75DAr8W1OGYTXUb89CBB5mxNdhI79eI2pA3d3BA3qa/dYcBNuL59StsbdiLgzoAgHMO1hFd6IAV0s+PUSwb/vmP5PTHf/zHF1u6ckYOBqzl1qFCBxCnrYO5DiU6yHHgAWqVFz4/7NUtljl+BxR8OSR0OFNf6CDXQzwxOlwR94knnrhcG+CXbv1F5nxWgxj0ilOtrdUMjz322MW3+umzM5cXn/IwAr+uXXXJk674HfxAfasP9RB1eMgXW/HkxSd7Ou5FI+iB+ssByMRgD431NpjTffzxxy/61ifxWf/jWTfPR0SPzSmrD2KoI5DxVS9aVxe7M+673/3uS0/o6MWZB0rfddMTczrh1C0egrM/cPbNNZIjJGdPxt5YztZiFtd9lG36za/XwK57BMRz7zgYdZ/Qj8T0K8G7zgjPWp/NjdbnPWauJj70E09M92e5GvNT/cUVR/+T4blnsy32WZ+1+6J7G8oF0XUQ6Tcj0PPvpM+hz92jjz56qdOanlzFoIPwqkk93QNikcmrWOQI+rz4LHWvlLdeGMno850eO/ERG36LKbf0ycsNr76AWq3LJ7l6ygX0gy4/YN7aqMaunbxcl3SGYRiGYRiGYRgedthh+L/fng7DMAzDMAy3CTbHbbg7PHD4BDbWbbB3mGHT3eFZ83Q6KGzT3iGMgwQHSx3Q2GwnQ23ad5jRhj2ZgwMb8mJYQ/KAj0fnlJnb2DdmC9YRmw4l8oPk20GGvOjJU1148lePwwdrPs4ajHyyUbM62JABfw64OpChp0/88UueD/WXG+C7LvzxIacO7ejJBfiSk1rY1AMj/Q53sq224rz55puXg3kknw7SigNi8ePw2IjEdV+4D8jVJVe5kPGBoPsMiWvkg1165US3Hss1XTI5If3Qr3zh6aH8UdeJnpEM8kuHffdp14Men+Ri0pcDMu8QqbrM73XNqy1foE/m1VEu9Ogj8rMW+ZH3ww7lS27et5DplCu+noTk9b4cXC+y4uq7PuDxJQ/+9AXfPKiXXb3nnw4/bKGDR6BDxkf1AV06+mDOZwd89QWs+Y8vbtfxzMWaHh6y5t89jZ9edauN3Jquw0QxyIrl+nZPQvb8saVXfnyQ4QNePUJ8uO/M5a8fxnIXTyz3GR384pH3gyfWRjWyF8e9buTD51Ifiw3VS68eu1fwkZwRG/HF45vsei5u9yY+X3qsvvqOyPlUZzb1US5Q7HypjU62fNMtXp9bh8l+0EWt+RyGYRiG24i/+Bf/4s3v/d7v3VkNwzAMDyt2kDsMwzAMw3BL0cGCTXaHADbcO9DDAxvrNtg7yLGpzs4Gexv25m3IOwgw4rMxR/ygDpDSM7KNx+5+oH+OJ9jZ4K+Wa91yLWZ8hwMdBMgDrE8d/oAupNdhA102Yp/1iBdPD9NDdPDYF4+NOYipr9b49PjTU2v2CL9DVPmQOXzir/z5IOMP6J5+OwA67R3MiCfH+il2OUF2+i6eQyJ6HciwJ2ODZ43kTb85FNtBlpEcz5gOfn3k11q803/6xbiXXv3AlzP4HKRHlh4eP3pGxyFYMv70ymeCDpIDm3IHevKpT3TY88NvcetHuRrpkJefntNjJzY/1n0+i3H6O3OpJoRHX8/LEd+cnljk7PmiLy6evMqTHjtrdnSqixzoZ19+xSHLvxz0kwwf2HS98NlWlx7Qy5dcmudPvvrDT/H4MjeWe76t5ZPcWEz+2dfn8kP4dAJbn01++UuPTjngd6jJnzV9MiQeiAd4bOWjfv7UZs6Pkcx9na9ig34Bf+4pumR0xE+v3oEayMWmn265gDj0yfCsxZCHsfzL3T1H31weiK0aqoN/sc9/38jYvvLKKzdvvPHGZS3WMAzDMNxW7CB3GIbhduDbP649DMMwDMMw3Dr4ZpO/lfuFL3zh8g0nB3g2zW2OO8Do21nmbcjbPMdvg91mfIcaNt9t0FvjW7Nvox+xOWMYHVh0IEQf2J+ID2SRWMWw2d+hwClHyTuYwROTLr61/OjhlW91s5Ef+ZmnuQMKNfFRXLwOJABPPPHJ6PKBrI14p2/64qEOcc0DX5CtfI3i4jVWpzrqudHBJH5+5FitrcVUW72jf8ob6aHiuw7s6iM7SE6Xv3LC1we68juvY7IOogL52Y/q7WCzfMGI8MRwyFRf6q1YxUdk+affwW9+k5/zk+IjueEFPH7SM8rBXL1y635pbaR31lfO/OtRPs746dKhz54vsq5dsIbsgzX78iC7zt+6nKC5vJLns5rSBTKEdxLdcw3X+aWnF+mcNnIXnw6o2by83Fvlg1/OeMgaVSu9+uY+B/eyfxc7jMTnu3m+AS89/vhyXXwm5XJeI8Cjk68zDzwE7PDolBec9uLCWQd/PnPVSZZ9OchHf63plAsf1uV7+qWDh8y7J9LFs06WvT7imccjP/8NQ8MwDMMwDMMwDA87dpA7DMMwDMNwy+FXivp7uegb3/jG5RtPHTY42HVoa+xgDbXxjoAMr013m/o2+G3kQxvvfeO3TXiUzAEBO+DrGuxO0Ik6YEDyl1d5kkViyctoLR4bczkYrYtvdOjBp7l66MF5oIVa8118/tm17kAI+MHLpwMK8w4/6JYLv/GRnFB9Q31btAPUDpTEICcrHp8d0oBY+NV65l3u9ezUrZ7yD9nxQYe9OTvofmHDjzmdfJvX5+LTEd9IhvARWKuZ72SnToR35s2/X32sR/w7CKNXTnTM/SpjY/ePPPNf/HKN372QPN0gHpz3DD38ZEYHyOLRQR3A15Niy70eA538pH/moA8+k3pR/SfodqhY/nolhpxR96G+dA/wCfjoeg5yUYexOsAol/jmYppDo1z6rF/L5Hf2AZ9fPPNsULmT6QO9csDTo/Khq97WYN5a/XLqh1zwq4ucv+K5T+nj00mu18XUX/p4cgM29dl9Ub7kHdaySy/784cQoHuqWPpJFw9Z06XHt3WQv3z1UQwyNnjqwuc3lHu9oQviAd3qT0ZP/PKqpq4Vyn4YhmEYhmEYhuFhxne+qQ/DMAzDMAy3Eg5rf+d3fufmm9/85uVbuv4eo8NbBwDnQYQNd4c0DlY6GPB3Cm3eO+BANtptvjv4MtrEt+neBj97Bzj80sVrwx7EuUayxkD3JHHk5cCA70AmHjm+2uJ1YKIHreXUgYHDETXgd+jhkIE9Pb7EE1f9fOHzh9dBBzv+9VQv2Jsb+XMowQ7hIX7FpMcPH3pfDHl1rfjkB9Gxrl9s5cK+a6Q2/A5p6hdbc374gCeeeOKSTwcuZP52q5EtX/Lhn9z1df2rzw8LiAvW+oCALTv3ER/Apxr1pPurWtjjn+v08w149FD9ysaI1Mk3eX0Bul1rSB8f6Zn68o/wu9bZnHHNT8LTazHEzZav7FGfwXyQm8tXHu5PiM+XPvB92qsHLz069a4cI0hOv/xOfTx+3QdGeuSuJx36+SWzNkI8+mrqHrfmqx9KaA3ilVtIdvLF0RM+IBlfSFz3I761fMU/8xfLZ0oOXR/3Ot/WcsVD3SPAFoHc2OfDCHz4t8FYPLkUo89yf7NXbulY8+Oz7FrqPbk+66M49Pioj+TmYnWvs8XH82+Mz5j4/IpV7v3bB3KTd9fr7B0CPHXJpz7wL+/W1en+NZLLi45RjHIpJl0+05Mv/jAMwzAMwzAMw22At6n9jdxhGIZhGIbhApvlNu4dADiscChgU98mfBv8eAis6TnUYkfHRnsHHA4UrNukz8bGfAcZJxXLBr4R70Rxr/mAd+YlB3E7dADy5vIpZzwj3XxYdzBirjf541/+Rrb4Dh6M+Gz0xYGD0eGHfIx6wi+y5lf/yhnYyI8Mnz/kYEXv2BVXbu9+97sv6+yhXPkQk5x9dZrnOzv9KC65POqNuPTYsHfQRE8ueHTp0deD0ydb9ZqTs6ML2dDhNx0+2WRb3+tD/UNsxKsfSAz3MT0y665FsflyqFV8oN+9QU7fWDw+5CKWQyj2ZIiMLlu51iu8DtLFpseevJ6SZysfcmv64soJX01kekxGhw95AD7UA77zxZ5cHnjB503cbPiTd/dLun4Igw86eOb09K7rUB10uofAWo502CK2YomR32JXFz02/CP10DfXi+b01dW9V37W8uAb6OPJq3tdPPzuaTHyxU928jj7lJ45f+KVi3WHkvjZismumGQdbLID6+rk39y9Zs7Omp25H+QA8ej3eRHLiEePz/IorlzN2Za/fyvoyhvo10v+UNdOLeJZlyO9ctE//O4HMnblZV2d4tBLV++MdOVp3QHua6+9toPcYRiGYfgW9jdyh2EYbgfe/hHmYRiGYRiGYfgWXnnllcuvWP76179+9xthNudtyNtM7/DCxru1Tfw2/JNb46MOHVCHw2BtEz+/2drEx7fuUAPw2vQ3nrwT13p8yj9fyeTf4YS1wwKjgwkgZ2Pdt9s6aJCzujp8UFMHJXjmHVbkq8MP/vjgr17hI77w6wXwTQeRsyczl7uYDs5AXvyITU/ceA6K2VmLQcbeNQH+2OE59CqXdMWXd3H5oQf10AgOWPRcHfjs+MiPwxg8oCM/MNLJV/7SyQbkQRdPXUakBjLz4gP7fNA/1/y0RvWfzFoe+YTyo4OaF0+NePk3x6sXQQ/P+sn5gezKzVptfX58tjosy0ex3Wt08cVgG+GxzQbx4f4Qrzjui2qja05mbc5OHPdAfsqPnjys6bQGOZ7+EFsjtAaj3PIJ5vyCXPJP1xqJUWzgA/8Em+yMbFD39+nDmo/8ubd9vvDxsrXmT33AxmfTCOKwo5svaM6erc+068YO6VUHpMVLF9+aXqMayOjggf7z69pXbzpyJhcn/dMnFI8uXrXg07MGc30rB/92mrMpDmJn7P5kRx+vGN0jeHLv3zTrZMMwDMMwDMMwDLcBO8gdhmEYhmEY7sKG+YsvvnjzxS9+8UIO3fBsniOb6TbabcCfm/p9e6rNfhv1yMFDhw824W3q89PmPT5f2SIyB0TXvq4p3I/PN/Ad0iGTV/HiWZ+HBOYOP6A65I931uWAiS2egwz+rB3kqCF79ZOTQT3QYzbkYjugyT97PHP5XMcon64JVIM1O/mZu370q4sM8Zm/DrDkVn708c3rjTFdYJsMTy3swcg/P/LkJ7ApV5AfAnpyL256fCFxOuhD9BCdYl/3oFrzhcqJzFotxvoYvzrIUYfZEF9fkyN28ehAMZMjc3qtxaVvbqyf9NjKiQ6Un3zVkR0bqC8Qrz7TrY5qdV35Kpfim4vPV/nQY2NNjzwe1MPu2dblHxWLn9bFBWvx8kunUT75z4YuWbzoRD749tlgl+1ZY9cJ5I/4Sg9Zp9M6HQTs3Ifl2j1ALpbazMn5oO+eMOZDjHJAbOgDXeCz2s39e8KOT37wTz18flCft6h7t9qqy79ZIG9QC1/807U2Ivp8Gc+8Tp9qsSY3kuO7R/MF/q/p34dhGIZhGIZhGIbbgB3kDsMwDMMwDN8B36J69tlnb/77f//vd7+ZazPfZrrRgQcdG+s2/VGb/W3k47Wh3+Z9Ojbs24h3AIFn3UY+9GtiW/OBQut7UTCXgwMEvgM+nkOL85Agfbrp03FQIf9qQ30TDg/qQzng8+0gN/9qtTZa8+0Ajb8ONbLtEINtByDmYnTohic3Byr06YG41WZEek9+9h/h8yE2Pp/lhuQGbMUtNzJxO3wCduSuXb9CF/gEMv7wy9VYDKhv1cPWt0PFwNODrg2Zfpz3CqJnrE/0+dJ7vtlY0wH6CF9u5nKSS5RfedQjPvo7ufVBrPRDcyO/KB5d/vJpLY4xefmAefVAvZC7OT+Qf73jo7jlfMZpZJ8dFINP9YlRjme9qINHvlD3RL6tu39Of+TFSbecz3yM1vjmqFqBf/4gfrpkePei9MT2WaBfn+IX270DaqXDLz3ys1f46mPfYWc50Os+I+e3b5qS0VN/PSCjc8byGZDLGZc9ff8uqYM+3qkvrl+NTaYuIC+mzwcf1kYEamBfzUFe1cQPPTy+xSyPrmv19e9f/sGaXvWIR85/9ub8+uGirsUwDMMwDMMwDMNtgB2B/Y3cYRiGYRiG4Ttg49ymfId1yEa/DX88m/FtsDsccEhjg93Bg814cpvt7M7DBGsywHc4wNYBRIcg7M3FA3GgzfzQ/OTBuWaDyrmDAjyQg5yN5QV44spNTuw7iJBzeeN1wGHsUEadeIgfvuXVAYTDRzlY86GHetSvcfY3b83ZOoBhS/7oo49echEHT4/ouy5iumYOUPh/66237taPTy43NqhrZM4flAd/RkQPH4rhW3GPPfbY5ZCfXC9ALojfahWz60tXPfpNzwjm/JLxJQ6ctvyJq6f8VBuZPnWN8suH/PHp4espW/rWYnWdrfWOzSOPPHLXR9cQXz7mRj2iI6dyUANb8+4J0D88euXtGp3XBfCylU8+ydm4TvrgPrCWU7WRyUkMa73MBq/YZHrSPVBe/XBCefNFdt2D8gG51Cd8xId6xWJDt1zY0xWXbvcqG+vidR3KxxqRAb13vetdlzk9EIcPa/NqLhcy4FPernk8c/2wZsNeDH9/Vn16gScHucvDmh/9tZanOOb0zdnSJzN3/4lllGe+qh9PXDm4r/DpGIEPenzzSc/a9TTKCamZX/mS0dUvo+vs3+qum1zFUMubb755icMvXXJz8nyz63OPyKvbddYPNchZbPWSVR8b/O5luvXGHMWXP3668kb4bIZhGIbhtmN/I3cYhuF2YAe5wzAMwzAMwz3R5vrjjz9+OQRo095mfQczbbrbWLdR7zCkQw+Eb7SR72DApj6/bczzR853MfEihwHiOACwDucc6MA1v/X1gQicsvLDQ3jWKJ0OF/Dk2QEjmXocMOgLGcLXv/MQS49OO/4catRL/ZEnPTrW7KEa+ZavNf905YbI8PEc5oiJzycZf/h8y5dcHg7T1GVt7kCHHzKjHPgxp1cvjeUiJvDtuuOlpz/dB/jiWBurR250ER5/2RrlkA5bOmCud8UQn8y82qtfXvzQt+arXPC6j+l0sAl47m2gF/iXE3tUnHJWK3054Rn5Mqcbjx5ffJw8vuRiLh9wffDdN/rGD8gD1EOfj+qVizX/ZGyqvVyyJ6eXvDrkJ271moO+8K1O/PKlL78+L10LOnStjXSM4vBN19r89OW+5YsMj536uh/ZyVc+xURkes6HGOKb882X3Pki48/YNfLvHh9AD+XDnB4So5jk+Si+kT+5OaC1Vl81iU8f2Lq2/NPvurAD+oie/NVMV0746gL3i3rI6OF3CCqedbXg8S8e2VlH/8aYdy3kRK++VR+f5l0jsdXftS5H/sgQ0O3fRURenxB7eeD5ARX54g/DMAzDbccOcodhGG4HdpA7DMMwDMMw3Bc2323Q26x3gOTXsp4b8TbybdR3KNIBR7Jzc97GewcQrW3Q47HLFj8bBEb+vx+cNuec3w4A5AYOEIBvNaklpK/+9Okh6w6T5I3nQKVDF77w1MZH6w5d2JGBHviGm0OUDkuM5HJgiycOe2vAIy9nNgiPP/ZdG3zxxcZLLx7degRyYldt5lC/8MGBUznRwy+Pc05uzb9+WouJl541vjzqlVxPHYRvTQfM+WWP8Plhqw7yfFYTP0b15M91snYN6afHV/0Qpzzwz4N6hK8+eZjzYyw/63yfY3PIDplDnyM1iaPv5nyyy1YtbKzFpFfeQFbv+YHyYsNen+L12Tfno+uRP3r5xLeGYuCJwycb971RP0Fu1VlMsvPzwba4/AI5yPfMwdr1Kz96+aRnLR96YuePvUNCMVD3EB0oRzKjvhQjWTmAWOJUM5jz5z5TjzzZ0MvfGYMO4rODXbbk1aEu/Hogbzr45nTIxVIzX9mzMaeLikmfHnuwNmdjlCvQtWYrDns1mOMj/6aJ27+BZKDX5QZ0rPkqf774sHb/szF/44037sYchmEYhtuOHeQOwzDcDnz7KwnDMAzDMAzDcA+88sorN1/+8pdvvvSlL928/PLLl811m/IdDNiMt6neQYJN9si6zX1wINGcPmqTP14HB5G1Q4MOEvKXn3MO9E8qDttyjhfRw5cf/9Z8Vic4+MDnp8MQczpkHZjx10FJOmBOJ1l5myN6HVDwb8TjE5ULvjzpdphChs741ub5oiu+uTo6OEbZykMMMrpnbkCnnPgJ8QE/O3wQ75qAjvzyaUTkZCAP6+TuF3MkFsKnx5d6im1kqz46DrT0rRzw608xUYexxSlH4D/7+PWCbvHxWp88MHcN2UPxHVjRzbYcgQ24PnSqlU58SE+d5vwEfvPfvdY9VJ3InCxf5u4pZA7FZ2uka6xma7qIbuv6aI3Sr9bzmsiRXFzjaUfPyK781dIa2AEZ3fy6D6CagB15/SxXPMRHuVUnsGdDPyJL35qcTXWyMSd3H7inreUvT3NxXFP3ojzKn32+i48vjn6h4uKzMy/39IHf0w/giYHA50o++OLii1EexaNnLRaw4RMPyq/c0jNmJ0Y66elB8Ywon8MwDMMwDMMwDLcBO8gdhmEYhmEYvie++c1v3nz+85+/kF9taZPdBrtDCIdP8dqUN3cA0WZ9BwpsHFoYyRxctDFvbbP+PFA4+R0U4BcneQSNwM6BglF8ZM4OvwOTZL5hKR8+8DtYoQ/m+Nb80FUPfflVl4Miv2K6vFv7VaodNnVYwae/ecpWbR2a6F+HIXy2Zm8th3J2HRAf8vPrsMXlp4Nydv2dzGoB+vXavFrEkqf46oTiGtlUk/rqCxjZiAv8IDnUA/7T4yvfqDrLq17RgfQQHXXHZysffLlbZ2fkhwyZd1+Jh8o7ubz5wy9nfvj2LXU6QAYOwc845nrd+swZz/rkGU899nzKg7x1MlQP5W9Nj73rA64NAnkXA9EDdnqRHF+coAZ8enypt3X3krU86ymiJy9+6QL9Mw4yR92D9YY/oxrZkScTh7y46i2GNSRrTu43C0DXXI4+G/SQGGLRJSsnORj1hTxdZE5WPXIsT8j3qUfuWrGv92TdB11HMvH6nMkDyd0a6APd+Ej+/m1gX850xfYtVyOd+OVLl59yrvZ6DuyADf1yMadHjk6ZGNaoNcgD6IkjHr/1QEz/1pXfMAzDMAzDMAzDbYG3qf1q5WEYhmEYhuF7wmHAH//xH989cHAgZ26j3SEj2JzvcMSG/Lnhz4bcwS95BzkdBNq8R+zIHUBmmx850OkAwZwsOZzzQC84CHBIIOYJOmRiyKFDFXTy6DgAkaN6OhAp5w4o9EUM+tmRO2zSL/IOOeoffflb07fmH9FjS+4AscM8MrrgepDR6UAkyoeY1nrQwY28zLtmHRLR83c1jQ66jHTkpSfqKhbf/JiTITI5yRsf8NRu7V4w50de4hrlwt7ILx1zvjpcxJMPfX7kdNahh2xb97dl5cKGLh/qFFdedFF/G5VMbnonhjm5uMDO4RJbMnHI6434/Jz5mNN337PHQ3ziiZUvtmom4xPkz46/cmbjcJK+ezE99bGnh/JJB1991nrHT/7YycnanE7XJ3/1ofukHlqfPpD8rKutPl77g64TXbbnfV7+5vji8steDdb6Um309I1P+VvzRcYOL39yco3F4CsdOdCzRnQC/jmKLQc24siLX6M4+Nf3QbGs5ep+am0k734gq29ydr9Us1HdfIM5nvvcnD2wU5d81MJX9fGdjdzw6OLza43fdSPjj29rPs0Rn0Y/5EOPX366P8nx+CJPv/zUrB/F8O/QN77xjYv+MAzDMAxvY79aeRiG4XZgB7nDMAzDMAzD9w0b6r6d+8QTT1zWNu4dkjzyyCN3DxDxjGBj3mY9O4cDNubb8Me3We8Qgl4b9jb0ET4/5kDfQYKNfMQG6JCh0DpKBxxIdLATD4plFIf/+/FaG9XigEJtDl06QMGr1g4y2DsMN+8Ao4MJB1P1iV85dtjIroMNB3TsydiWF31zPsR1YHKdp1rY6a21gyC8Dorw2Do0YVPtfJOnw95hp9zyC2LyJWYHSubk8qLLnzrIxTLmgz9yvTOvNj7pqbtazsMrdeCb02XvkIp9PrpG/OgB2w4J6bCvF2RqsRZLLnpgzlf10OlgmA3qHucPxCs2Hv/046WTnrzpmbu++A7w6qscjHyA0Vo+HQIiuaRfzub03KfW+oDqL6gV4cmDXC7WCPioFj77PPJBxkZsOtbs9YQOWJcfsL/2d/ZEPuzrGyLDLy4762oT1z2SbTHI6cqxfKzFzl/86nBN9Na6PMWkpw6HlWIg/Gp2b1jTAb7kwb57Rczk+O5JeubyF5M/8cUjK5ZcyLsHyPH4ZVOu5Sm2WqH6fXb0BOiICdnVa7bk8fgRl2099e8Gefr61po/ObKxNpYTPfKuWbHefPPNy1yu/u63HiQbhmEYhmEHucMwDLcFO8gdhmEYhmEYvm/YQLep7gDLBn2Hhzbl2+BHbbQ7SEiGzCEeuU19MIcODk5/1uT5O+XfD+QTBfZw8oBPBwr45eaQAg8cOJAVWz7W5A5XjPT5Nzfyky8HEQ4+OiiBDm/YqZEfcrrpOOAg56f86DrccBhUTwJ9OmyAT3r1sfrEVYvrWt5AjtSHnx/Apy9HfGtzBF1fo1yN8ulaIjngmxv5YFMcvk59fL7M8ekWA6+DKX6zI0PWell+gA/yOnWBbjz+inPKxdPTk18N8crF56Xri8g6vMoWHMCdcZH86J1+xZGDOb4Y+UvPyD6bMzY+e2vxgL6a6p1RnHyl6xrktzyKSSd76+7TciHDR9bQ/UEHTn9s2dQnulDO+pVuetbm7nWjzxY5v2KyJTt1+XV98PGQ2PqBX6/YoOqnl89yBLb5N6/veHzRxzc3ytGIytX1lFcxsoPiyY/sjIV36sTvWlSvOA6a1ccHnvrBfY3ou3eNfNFlZy0fa3CPZpuOOPUM8MSRn1hQTDkCGb/s+OygWy86yK2OYRiGYRh2kDsMw3Bb4I1pB7nDMAzDMAzDDwSb6jbcbcA7lLTpb+O+DXab9jbwbbrblMdHNvlPnTb3IyC/PgAwz05MhxT4cMa9H8gjOSE5Q3HP+ObFMZeP0bqDEQcM1WBdHsZypmNkh++ggr0+GNPTQ/LiGq3TEb+DE7JIPIcb/Jsj+n2bFMpdHPHx65+1PtC5PiDBqyay5sXOF7I2yk/NbOWOzPOH+FAvPcjeoU31sZNjoCMHNvKlkxyf/MwtOf34kI21XMrBOh/8Ad18If2nY0R48oXsjXKP1zUH/QcxutZ47OLLiU+8/NGNB4346eUzf+VhtCajA3jqxqNTXkCHfn2iZ14vwhnTnB89ySeeuulYo/pKxhdZI7na+TAXr/uFzmlvzofcyPJnZCcHMrbW7rXTVjy54ZsjtteEz4/+5L/cyik+nfKpjvxb02FXHUYUny49uRuRzwN/2aVLp5zNxSbHs0bul+4Fvqq/GHTFRHKtX4gtqn62/NAtPkrX6NBVruWSTzxUHxB/5WPMlk/Xpfwc3vKBpxf+nfPNZz6GYRiGYXgbO8gdhmG4HXh7F2AYhmEYhmEYfkA899xzN7//+79/80d/9EeXg12b7DbwwWa7DX2b8jbjbcTbmLd2GNChgw38NvXxHBg4DMBj1yEHPt/80gO2HXDh3Y+yQWKePoAfPiLxxSwPcnk4QOwggx5+foGswwvyekDuMMKBhRqqv151oHz2gX6xy9e39ujyiUAucvX3UcXkE7Ev5/Lmp/z5M3dd5AXWxRPb2ByfnfoQvt6fh03i0kHlIY749PUzPT7SM0Jj+VYzsAexgC8onhzxyNl1mIRfHuTm2dVj8cjTJWNfTDL8ZPlgl255dQ2MwCedvnEpB/rxgC7iwzckjfGQb73TZ3/WVw+s9as+00Hm7Nlmby0/ebGxbg6uqTlb/vghL3d8scE8Ph39LLfuE/75qHdd13xWB1/08a3LkW3+2Jvnj7zDRXGM1Vle5PT4P2ujD/zwl17+2OObJ+8+KSewDuXIj7k41iAeGzz2wLdr6z7AJ8+vWOUbXz1ITP7lbs03nrjiyZVt8eoVHUSfTnH7N8A8GYiB6Ilz+hJHjPpYr878+KR39sJcXuziIf838I2HxKiPzREd/odhGIZhGIZhGG4TvCnuG7nDMAzDMAzDnwi++fnyyy9fNuMfe+yxC9mk71uiNvsdDODZhEcdNNj8t2lv3cGjTX6+8PqVnmzx+DRHbDsQcXhAbqP/Xmjj34jE9nch7wc5yUd8hxEO18ChggMHsv4uqphi84nXIQi7bOnTcTgjrrnDE7momR+1OTChi9TkcBZfz/jH8zco+Sg/ePe7332JJbb+kT355JOX/mSHX8/FVWN9wWPTIYu/fSsn+SL26pAfHfnAmZc+8MEOrwMY9fFvXb1i6CN/dORSb+jQ1yP+6fPPr1GN+NbiQTx2+QP9cVCmN+Timhcf1EXGt5jlyA8962qha1QfWdfdKB/EF9/kiJw/uZrLjZ56+SXrOuTzmsfG/QJk7PHEkTcdvtwrctQHyJ+csjPni561vFB+gE656S0fiJye2Ob51BP3peupRv70vvtIz/lzP/NjDfWDvVrk7XpZq0UsVE/lDPzj8cdeHvLJF7/6W/302VqT47nX6Mulay5H8eiAufrp1fN6KddrKiYdVK+M+OIg/q1ROeDxUe5swBqR6UF5GKubT/GMdNmmxy/Cc91dIz3Ov7/XTVdMMfTKvJzULw7f4pPTw8erb/1bnR8gkw+Ip+9ykBfiKx/i0cHnzxqJCe6vV1555VLnMAzDMAzfxr6ROwzDcDuwg9xhGIZhGIbhh4IN/9dff/2yie9QAHUgQQYdSNi8xzPatHcQYW3TvoMHG/kdFhgdABj5M7f5b92BgZEfBwHGNvvpnRTE6aDl2k8QxwGDwyL87Bvl0qEDHl01ddicbzV3SIFf3A5IxMGzJs9Xh1b6IRYiq2/1AVk3snVY48CET/bm7FDxxSITh5wdHw5OxFJXcegVi62ajMUBa73v2lnTF5Nvfvkis05uLhcyo16lg+RgXY0gpljZ5ocPfDz68iOTb/WqzZh/crmwLUd8fsjkn49kfEMxjaB2VF/kzVa8roU8gA9xjXhnHXhsEf98ydu14wMfTxx21uz4Q8Wg634UG+SG5yCPjV4DuTh4dEC9xaJHrifgUA0f9KcDc3G7xzqUZyOnekW3XOnpufji0Y3Hnj/54Pkc8tf9pnZ94q8e0LXmj2719lniB/SSLXn3Mv2zT+TyrCaQPx/xu9dCPcHjF8qv+4kP8dM1ylE9bPhnQ1cfqkPd4tbPyLrPjDUd4INfiM+/XPLpOuaTTP1yU2815K/7mRzhq+XsVTmIywc5n+TW+PWrnrBJrg6jNXLITEcfzI1dw2EYhmEY3sYOcodhGG4HdpA7DMMwDMMw/FDoYMChkc3/DpjARr7N9zboyc8DBzI8Pk4+f+b5cShgjhwI8HXCgQBqTp9PaAxkHTKcdulZn7byMEZs5V1uDhvSOfPuQIO8mObqNHdw0eGGHrAV25weeT2hz1e51kcx+cCvJ2zN8eRpjkcX8pEOe6CjJ8WUV/lan4cofKXDB8JzgNQ6mbzpyhmvfNQLajPHy6YR37w6+KFXDDz54znowbv2A/HpJ0su79bk8lFzB2SnL/JTVz6gxuzUCfwidmRswhk35JMM1KVuPutDvvDKIx/qAz7oo3pujp+dvE/7emNOv7F7C+ozvtzh7LU5yjY+O1TsZOXApnsrHnm+89P1qL8OF82zV6NDRvp01dOanG3Xy5qdtX7Kj259Juv6nDxE33gN8c5rKj+26qYvpntUHHro9Etf3iefrXl1W4tfrHJTB/14Rmv8s2ZQa3MyfvlzD7OLj6zlJAYdY/nLA8q/+1J8MaqBHZkaWvd5QXKhnz+jmHpF97XXXrv8oFDrYRiGYRi+jR3kDsMw3A7sIHcYhmEYhmH4keD8FpjDAQe6Nu7bqDf3zT2HADb1bfSb25x3aEDe5j99fOv0kDmchwIn4ufXoUDxEOSrHOLDOQdr/oANf3LDd2gB4lRDhxFnTHw9wHMIUl4OM8zPw5nyRdaAx97IF7A58xLnrIXcYUhzuqh+0mPXIavDF/DtRXHrIT1zvPIDPqzZmtMt9/SrHV8OzcUsfn7Bmh5/rdGpa9TDDrxcC/6u9fnAr9boxMkzlj8qVj07ce0H2NKTmzH7dM3DGfca9Nj3mbGOqgX0CU5f2UF99TnElx+9rhdS22nbeM75qAdy6OAU2OJXO37EDo9OuYh92kK25HD6Q9XOTj+6j4DeWR89/Xcv89d9714Rz9yYf37Y0MUvpzNPSIbHjj9zduyTtw7mxSGTP1xfu3x1H9djPPZGxI/cI/6MfWaKgUc3vjziIyA/ryW5viE/zFHO/bslTyN9fulblyM+HuCRAT/6hVe9fOa3OsjLTR98W9iarb+/jsyHYRiGYfhO7CB3GIbhdsCb2w5yh2EYhmEYhh8J3nrrrcuBrgOVRx555O6Gvw39fn2ojXob+Tb1rftmHT3kYMBhAT06bDvYQObxbPbjnciGrMMBuh1qBDJx6MdPBxWnXPHy2SG1ucMPOg5AklcbHjs58iWeb5ad9dLVp2p2YCEPPfQt5w49gF5x8fgEPvCr0ygvPhEeXTbm9OVERy5i1NsOVk4Sjx9jubElyx/ffKHs+KNbvsb6yR8d8/oMbNLNV3XqD7kepSNfPsTBo2+sJrZypMvWeOaMR5dvfNfMPekap2PMlzWw63pE6XcNT7vyTIe8EZrL5eyRz1P3EdQT8eLR5b982ImpViNK35x9uUD+gjzw+AF6+lsddOnwr29k6SG2QC/d61qzFSN+PDXyow94/JcLyMOajK0Y9PnV+3J1jyL9g64B/+XAni+xqgP4rAf5RnxUD4JzjeiXg7U44AdZzMvXvFEc+tbkPvv4EX73FeCpg66c5F+++Sn2mTO5PNhZ6zc7NvTOX+vONr5RPNBTwBcnHb74BfmI2TdpywffaI1fnK61+x3R6QCXX+MwDMMwDN+JHeQOwzDcDuwgdxiGYRiGYfiRwib8yy+/fNmUf/TRRy8b++Y27TuQsWlvY95mvQNfsOlP7zxE60CiAwo2HQokpx8Pkjl0cIAj5hnXocAZ1/xecMDgEMLhNF8doIADBr7FZ4/olLt4YrFPrwMOPPL6gh577LG7c7rngQk9JB+/ZtQhjD5Y05ULnnggvkMgPvUFxHNd2NFjC+9617su/XB4Q0c8f4+0w3U10+WLfT030qdXzvrLnswc+CZTc3ry41vP1NUBE39dm3qXX3XQy5d86eLLl63a6rE89ER+xUH1Hg9Vn28AiiM3MejgqwXMXceuB348dVnzh1dNdIstDz7///bu5NW67L7v/7Gttvq+UakslaySZDUlS7Iay7ITYRk5kBhDGgIhBA+iQSCQcUY/yJ/gmeYZBJMQPMnAgcwSCBhiQQgkQZZsy5Kq1Nqymiol/vl1VJ/K1vUjW7bVnLr3/YLF3nut7/qutfe5zwP1fGufq8+e96wcxSBOLs/ZHuQ2bp59OxdjDX1it5fjz9psH9uTnIt1bl3nmnl7HjgXszVc2y/2gVzOteN9+1w8zxUEzdM8W/e+P8Puwc+Bc8/n2Ld1nTvKv/tH7O7HuTWt4TOXA9fm+XNu/54Pu1f3Lac84uzLtbXlRIw1NWvhvuR2r1vfuXnyOm6/7kusNfzeX2P2cXXu7se4fRjb2varGdszNUff9i/W2q6X27r60e/nymewXJq/25bLc/P8WD7r78+wufLIa5496zNPrOZ+xVjfPE0O89lz3Z9L1+7XMxHj3Jw9B+t84QtfOP99trlJkuT/qZCbJDdDhdwkSZJ8z/mHeP8A73cbKhQoGPpHf/8g7x/5/WO/4oF/vFf0UfgQt4KJosOKfZoxMeLldi4OfQoPmjjxR8shXqzCg+Z6OTZnsdZYMUMhyL4UEqy7ApH7Me5crGLEctsHxtwHChSKEyvEItZzMOaesZb1Xctjj66t7zl6hnK6F/MUy+VbYcea+ozZq3maPZq3+15edt97fhtHn2au/WIN+7Te9mY/K/TIYdw8z8VcMWJ3T5o4e1fYES+H3PrN2XPaXvc8Hbcnx11vbf0rXBnTj/273r3Zk3nHXPrsBbHy7Vkf73fnYl2LO+7FGo57DrtH/XJtb641n4/PdkU1c9YndvTvmWKu/cnteTlq9rP1nFtTk99+xTrKs88FsebYm/nGlss97J4w39ril9OzXtFSrHFH18fP2b25tv5ijs9ATvn2rDbfOp694/bjea3P5y6fPkd9e1765Nka9mJN9+rP5e5TTsw3hjh70WctsXLhfHmM26/nYL3tw5j7UYy1xj6P4/1ruxa7PXsWi8c9WU+/+92+N+a5sL06yiXGuDx7JsY3V07NmP3Kb2z35Si3c/2Om6df33JtT/uzd8xrD+szLo9z9yxGk8+Y/1Fl95MkSb5dhdwkuRn8V1aF3CRJknxP+Ud8/wjvH+S/+MUvns+9OamIoThg3HHFC+P+gZ8VDFbAUCxxrShwtXBifG2FhcXoOzIH/dbcteOtmnyKPYodrhUfHM1XqFkRQ9yKJPLal9jtRdseNTlXvDBHrHmKK8ZXaHHPrs237p7T9oNn5PxYpNEUZ8w3Rz/6XK9hD9ZYsce5teWVx7Vz6+7+tO3rmGufh2tz7Wtz9gzYvlwfm3tgcXKYv+eJsX0GjnsOrF+s9eTbWnvW7m370xa/Z4S+3RuLFaMfaznfZzzbgz5HTZ+5jvbjaG+eOXIjlv3cc8yxfcuxz2px7B6sfVzz+Odkdm/r29zFWWM/j5o8u19tjNmTApycx2cm58a1zdN3jEXs8RrXy7H7cb6c/m5ZEdJ+NdfLL8ae1yfOteexPy9+rsWJMUdOxz3f7UPMPgPrmLv+5fDn+vh85NKHNc3buq7ls+7ybm8b2zPdGvaztfcMnGvyauLN2/1qW3/3sHXY89O3+9jn7j62vtzmWl+fOebKI16M/uP96Wd7sIYccmtiVqw137418zTx/keg/Y8+SZLkz6qQmyQ3g/+qqpCbJEmS7wv/WO+NKv8Y7x/rvVG6QgD+YV/BwJhz/cciiuOKD2KOfWyunMd51l282GO7an1Xj3LZz4oZm69tbFxvHys6rEhzNY970ac5X4HH8XifzrcX8zVj7hPzrXE8juLInseet1zLYS3XW/e4P/3Huc71bX/rV4RZTn3H81ku8zC29ZfHnOP+9SFOY/O3F4zp19a/Oc63lns0fhzbublbj+Xe0ZhzcQqs9n3MsbijPeON24fj+uWUi30OOB6fw+Ze3bs+BcJdsznbn2vrOO7a+tsDznd/xpcDfXLtc9qY+M2Zxdqn/Rt3bY/mbZ/6rTnOj5/nxrdHcxfj2vjR1tx+NPF7hsf7vDrfz4S/I/b5m7P7uLpXuRQW9Ytj+9MQ6zPdnK29Nc0Ve9ybWPvf+LGZZy9iXIs3134dl9d9GJfT0Zw9F2Pbr78Plsd9O9//jCKnOY7GjR3n7z6NOdrTntP+nt3zEm9sxdnFbw19W2NF3I0dC7mOCuFPP/30C88+SZL8WRVyk+Rm8F+aFXKTJEnyfeUf5Z955pnzP/h7K3dvHe4f+10rAvhHf9eaf+Rnff7hX4FhhYMVThQCEIMY//jPCh4rHqxxvF4fYuXU9K9Y4npjK5TqV8xQeHC0p62J892XueZ5FnIqXpiz+9VWZDHH0RxzrSWXc/PM12cvnqf7NUc/ru1FDszduvaw5yPf9rB73P3uea7PPGuvX76dy7/P0LOQV5/9b9/yINeKwOYfY7cP9NnnPnMN1/bB9qzPXHt0tIavnPVs9pzcp/7lNFe8guMx35751kPO7dUccXK41z3n9S1eTn07x/hijNmL+3Qu5uq6zs0XY10U4PQZW17j7nH3Jo+ce7bO2byda8b0icW1ObtXeV37bK23PV2NPc4Xe1zX+e5v661/cctlzD2sb7n1737l1+zNXnbPxp3rk8u5eT574xuT17Py8yDP1nEt5+5Zn7ka5ot3vf3u594626Ox/bncnOO9OWd733pbU2Oxi7Ouo3G5jPmztHWNu4flWB759YvZHvZV0j5jfdYwf28WH3O4D+POzfFzL5f15RUvB9uP/uVcs5aj/cgpB/pc+ztFbn92/T5wvyNXbJIkubUKuUlyM1TITZIkyQ+Ef8T/xCc+cXrggQfOBQAUOzTFA/9gv4IG/pF//9CvEKKI4B/4HRVPHOVURNAUGMx3VERYgUU+RQKM7Xi1KT6Yv6KDHL4OWjHCuDFr2q/c+u1vxa777rvvhcKFYoaChPs6FkEcxSLOtT36nZ3WsqZ+BQ3PiRVI8DXVd9999ws5/C7S41ePbh1FGjns07Nxbp+7D3Gei3nW3zNYQUaxcIUV8eaK22dlvrniHO15n9vxd3aao+1z2udm/j4T8zT9ci3P9iGfufo9U7k9p92HmM211tbY2sfCkmfl2TiX07hY7FGcfntzlHPPRR7z7EO/GG+YG9vzdNy9ymuPYj0z9yFG029ck2f3ZH/2bx/69pzks9etZb4i1/ZvDfOdm8v6Ha0pn1zW2Gfoedqva/O2P7HmWU+M9bYH18dYn70xfcasIZ+1PTO5zEOfn3Wx+sQ62os+157TPkNzFytGO/ZZ397YtfH12Zc+Y7hXzc+MPrFybv+LtU8/+/tM9Jmz+7U/9ybX9mSOMU0u+3ev5mBPnovjnoE8x+cnp/vfZ27MPjxXebaeWH1+BsTZn9z7u8m+5bGO9YyL0y/O0Z6/9KUvvbA/a+r3d4oc1vJtCua7Fw1rI1Zb//a6Z7j92odnM+Jc75nt7xhriXdfnvlnPvOZb5uXJEn+rAq5SXIzVMhNkiTJD5Svy1QMUJBUVFNMUAzwj//4h/wVWBQdHP1jv3NWJPCP/4oTi5dDrGKII8uzYoej4oAY8fYhjz7nihiKEwoZ4s2VYxYvj/7ll2vFEZwrqhhTuLDfnZu/go51zbWuIoZiiljj7lm/tfS7Viy29xVrzLGm692/o5gVXLYnMcb3LKyjz3F9Yj1PRSvkcm4/ctqzHOZYf/tdHgUY8cb2vO3RUbMnc3YP+3zFbG/br2v5PEfravLKL8fu11xcL85eXHtmYhC3z/e4hs9ErDnbM44b17auONyL/SuWidN8nnLs2Yt1LsZ8MdbWv3vYvu3PnwfzxemTz36dbw35js8Tc8Tok/s4V4zc1nUtZns4rr8jYsz3/J3vc/KzYS1j9r+9uEYOuc3bWiwO6+45OzdXbnMd9e1nVUOMz3J7tob15dG3GPcp73JZ1z3tWYt31GePzrevHf1sym8P5jm6l+UT5+fBUZ8Ya2vr9zOr3xFx+sXY79ZyLb+8i7FH62m7N0efwe7dtXli9+dNLs/IvSnwyrn7Fa/f83HUFFj3TI55xPp5Fbu92Ndxvms/C8vtvo3teeqzH33uy/Wen37PeHM0fdbcz44irntxnSRJvrMKuUlyM1TITZIkyQ+UAod/pN+bWt5qVERY0WXFDP/Qv6KFa/2I0xQNNDFizXWtYCB+ccb14aiJE7PCw7EthhUStjf7da6osj4NhRXx6zO+fbje/ZiLXNYRwwrS7nf7cy3OUZ9n51k5l9OYdR3t/VhUMV+8OGOe+Qo+8hm35vZln8vDii/s8zGOueaJ2b3uuVpDn2ZcM66xOXKiX589m7/43YM15ZZ31+K2Z/HHOOf6VmgyD2sqrMm7+zJfzJ6dHNruQdzaPt/9rOnbnsWbvz0ZE+PaOObrN751jZt3XINdG7eG/TnX1m++c7ms5b4Rv3EWN+KWy9qLc99z3N/uzRp7RvIt756z/uM8P+ficW3c9frE7r6cY2z353icr+1ndffK5hzZK8cxR+ts3RGzPbuX9e149Xnts18u5z43z0le8fJt7T2n5TNnYyyvI/pXxPRZWm/Pd/sztvvYfhzF6NO2p+3H3wWY5zmi3/U+u60tlz3Jo89xz16/893v9rX/eUROcftzYl05jDkXh3HNPSreGpfH/9ijyZskSf58FXKT5GaokJskSZIfOP+Y7x/v9w/4K3IpAGiKBPr9Y77CwYp+7B/4V8wxrmCAwgLHwggKBqwQsQKFc4WHta2vWV9zrgCxedr2Z2yFDu04R7+2vc3W2frLv+sd3Z9+881xLbejPk2cPkcxzhVGjveCnM63niM+h42Za97evmTz3a+8nr2x5ddvH87d//Lrk2859fkMttd9dsuPc/3ym4/P3Tz92jFejFjr6t98+e3TWsefqz0f545sL2I0/Zu3WDGaazmOP38+H8Vh/cthX9vT1vGc5DCOsZ2LMWbOLJe29dgcnO85O5eDPbvl07/94bg/D9uTo3n2haPrPRvXa3suI25/HsTu52B5N9+ajq7ZsxGz3Jrrjcmtbf/OHfc8F+e4/S6POM2YPTnfmHxijxQRt/7m+jnQJ8eOGLMeYrTjvWp7/s5nY4g93sP65d3fN/vzJpd+1/p37jnssxS3+9rP/frFmmPcfMTJa74x+xC3P29y48899me+Jt7fE472qm1v2595cum3B+eesTHXxs3zd5CmXwH3C1/4Qm/jJknyXaqQmyQ3Q4XcJEmS/FD4h33/cP+7v/u752vFgxUdFCIUGVbkMOYf9v1jv7aCxbF4YO4KBDtf//rkM39FAser7RhjvqMChP0oLG4va/oVPeRfTvO2P/cpxrXc9rKCijnWkn851swzJk4zx+/RVfzWL5+8ih6+kne5zVVkUSRZjDFfKyx2z3ax28ee6cbtnd2HYsv24h73tbHsftzHPhPcg9zot2/cq/1tzP48W/MWY1157cVRv3zmWkesMTn0r5i2Z77Pyn3KL4d59m1c396Odr5nIaeja/24tt/1b1/Oret5LHbjYvVZyz6c6zMmRr9r92VM/3IgRpNb7Pa1PPrtiRXV5Fh+ceY4HnO5Pn6Wy7dzDTHuzbV55ltPTsc9Dz97u9+t43w/P+tHn3Px7Blo1nAPy2Fdn505+5zE7bkbdzRPnFx+NlxvTfk0c9BnbTn17VmZ6379bOxefWYbc7Te9mI9zN+9izOuWdN8/a63V+e4xrj55lp3bfe4e9k+5US8NXw+jtvH7gmfz/azv7/MX9zWMN9Rv/sSI69rcz2X7Umf/Wj7fLaOPn/m9Osztnz67MG+l9eYb2XQ8NXKn/vc586/K1dckiT5i1XITZKboUJukiRJfmgUCPyj/ic/+cnTF7/4xXMx4N577z23FYoUAsSxAsaKEgoQzpHHHEUIfeJWpFDg0aeYoC2f47GJt4Yig7X3tqUxBQ398rhWnBCvz7rWOPZpij7yLbemGOu4AolCmLXsSw73JZ/ChvXtH2Pmybc48/yuYcWQPS9ryruCJc6PBZIVbjyrz3/+8+d8ru3B/hyxhn75lxt5Vhj2PLZ//VvXXuXV3Is4hR75/A5PuXxmnplzcXLsfj2/fb7uTZz97Hcqi3ef4uR1br7jCkpi7M1+xLk3OYzJodmbPvs1V594fa7Ntwfcr3H99rTfYYxrn6nPc5/F1ld837NDbvcmn3NWSLRPczTn5sop19YW61rDc5Tfvs077nExchkXZ8/OPQfn5jvak7jdmzz69mfLc93/SOBnZ5+ncRZnzDzn+9zQ53rP2bV8zvcZ7Gdj4/r3vDbfHv1MWmfPavvYZ7Q5xuQVb+7m6DMu1vXWWS5j1vFc3Afu1/h+PuTbnwHMM2be/hwZs5bPzH35eZPbM1gejIvzeRm3X89GjFjXcjnf3sXaw472gtitYe/m2be4PSNj8ouzBzH2t89OjDzmGHNv27P7co+adfdMxKzfHse9yWUvcjvqs4ZcfPaznz0/G3mOc5MkyXdWITdJboYKuUmSJLkI/lH/U5/61Lk9/PDD5+KBf/T3j/r+cZ+9eYoCg7EVNTCmGLHCixwoGihCKDSYt+KEtuLTChDWUFATg3nHQo9rceas8KjAYx/rP/bZ+/Kbywo84vQrmlnDXjTnCirmbp/iPKPdqxj9CrTWWTEGe1c40eeezZNLTjHmyqOQoxBsr+Y4ivMcl8u6xuQy5h7kQZ/9rghlvjEx8lvH2uZrx7VXwNHvWhHH+T5PnwFyuzZ3z9NnvOfk83Iun3Hc834OzBVnT/o8F/Y52vOe6/ZizH62V3H77HcPiN8a+teM7+fU9X7u1m+/8rnWv3nrE7O1Nnf73Z7FiDWO+zef/by53+1DrOYZLa9nKdaz3vM87kXs5uGZYH253bccjvJp1hNnvmt5rbGfGRaP3Ltv9O8+xLlv96bf9YiXU5/m+ni0L20x5huznxUo8fMlv/vxzBajT8x+rsy3F3/ePB+x+uV37WfIfdq75+MZ6Nfnmcgvxz4nY9ZaXnZtv2LMWX6N/Ww42rt+Oe3ZuXnHz9I9aPrtxZj85orR9rO1vRoTZ85i3KsYDUfz7Nc6ju5Z3PanidvfP/r3nMSuz/+c8cwzz5zHrJskSb47FXKT5GaokJskSZKLoACgIKHI4is2FRE0RQKFBP/4v2LIigv+0V9b3wogcrleUcS188UrHujnWIxYIcTcWeziV0hZvGa+Pvt3rnAizrXzYz755Vpe8Ss67R62hjGcY3z9zrfG9qwtn+cl1r1rizG+AsuKXJ6JPmOut47mXN+KLmJ2z3JYX7/7lE8RyHr65DXfmPj1mW+uPvQ5t0dH68mhsIMcrs0xVz5H/Zjj2ryd2y9yL795u5bPfHv2rDZfv/M9j+XbHlw7N77PTD7Xm7d9bS3r6p/NFT/yHp+3tcz1TPTr047P0LU8nsviNt+5Ji/6tsbitk/X24s+cXs+xszFOv4M2v/uc3tw7Vyfc/e8c1xvbbk1ezZn+bZXY8u1ePe8PsfNW/Fxz8P40fo2Tz6sze5vz889Wste5NyfI9dyuFf3Ik7/7lPccQ3ncoo/xu2zc+6+Nk/M7tNajpr5YjS59bnfraNo7FwOa5grds9DTmP6zMW1MfH2KJ91NndzxOh3r/rEWdO18z0j5/7eNrY15Vm/dfXt2erz94YYBd19pbIxOZIkyXenQm6S3AwVcpMkSXJR/GP+008/ff5Hf//Yj2KbgoIiwZrCgD6FhBVZ0M8KGQoDxlzLrbleoWLFicXgWr9cmuv17bi40WfPy2vM9QpG2Of27igWBUvxK4xsXyv+GFtO/etz/67NOfZZV/+KNxvXdk/yyb/97NnoM3f3bVxObfuTR6yirRj36DPSv0Iu5mp7rubILX75XaNAaL743YeCj1jrij/uYXG7f7aePrH69yycH+/H+fKOuM01pu25bb1xLbf7Nsf11ti1893zrh217WG5xvk+C6w924ux5ZHbPdjr+rZ3Y9bYvO2HxdmzuPXN9sbW2F72zDbX9eaap38/98vt58qYJl7/YpbPfnC92K3pWcu7GOvJ4Tk6Xy7na+aKOfatyX28tqb4fS7WwrrGj8/UUd/2Zs7Wmf2dtf2Zs3O5Ha2pz7U1fTb63Iu/E+RcjLHdz9ba8xDj2vhyyLfY9TvuvozvfvYzYV3X+jUx/g7Whz7rLOf24+8Zca63X82zcxTj6O8GsXsr1zcC+Gp3X6tv3HpJkuS7VyE3SW6GCrlJkiS5SN7S8nsTFfMUHxT6FAZmxYq9patYoE+RQEFCUWHFjBVOXOtXZFmfIsaKDZtzLIbsyMY2RzHCuHmuHTd355vDiqSKHiuWiDW+Aos+ccuxfbpesWP70oxp7tm44ohxefa8PKPls2c8A8/KG33bq/nWM0+O9W+/e65y+Vzci71q9uBa/Nayhmvncu9crPz6lmP3L8aYGHvduq53z+Lt0fme4fbI7kMe+fVrYvTrk09zHyu66bc/bwfqc65v+9G2P2M4yqktRi6s6VoBy563H3OMuT/nW2dH+xTrvvcsx/n24dy6jp7H1tM2x16ca3Ivbja2nwvEiXG0D3uwnnvYM/R8jJnnWrw4621sc+Uxd/Rz3Ic972jcPs2TG327112z/e++j/3rswd2jX3u2jrHnw30H/8c7Nku1/biej975opxr47+LOrbXD8Hzs1Dbtea8621tffnbTkWg5j9uRa75+zzsP5+npdDs2fz9Nvz1pFT3PaiH3HymHd89mKMWdN6+znGPfpZMe7vF7HGnRvz5824N3G/8IUvnJv+3VeSJPnuVchNkpuhQm6SJEkulgLAZz7zmdPv/d7vnR555JFzgUHhUaEIhQx9CgorZChMrJChYLAChWKBYsOunYtxFL9+BQtFDrn8/tWN6V8Bwr7uvPPO8xgrZhx/t67rFWM0zD/+Plx7N8e6K5g4N2YP7tO5PGLNxR4UPu66667z+Apn4lxrcnk+9rPnpjjuaD+Kle5xxGkKLnLZo2dqHxs3T27rG5Nfn/xyiVW8sob1PQPPcL/Hd/dnDXu2hnnG5PcZyXXst8aej+Z5WVNuexG3fPIbc75+fXKjb/dlHf07uue1rWu97du5fYjfXsRYQ59mP2KsYVwu+9Enhxh9+3zWxOt3T673nD1Pz8/nKmZr7xlv3eX3uexcnOv9rmH59Ru3Z/k0z0S/Zr3d7/biKMae/C5T+fZ87Us+18537Xnao71tTW37cG+aea61zddnD9Y1f5+Nc3vYs1ou7MUc4+vDnF1bz/n2Ycwau1+M+Rlcv+fnd0ibt/mekT15hq59PvbkZ2H5jcuhye1av3tTyNyz3/1t/9uDa/3ol3fn9uE+t678ro15fv5O2HMypllnBVT71+zFmH1vf/iz6mdncXId97I9bNy1/NazvnvWv59/R2/cepbmicG3LijiGne/SZLkL69CbpLcDBVykyRJcvH8Y79irgLBiiUKDIoCCgjGNQWFFbgUBxwVK1YcEavwoFiDa2OavIoNmLe+tRUbFi+PosXWtI5ChT7xCiTy69sbZ/rs2bUcxlfwMSbXrlfkclyhRvHIWgorYvUp7lrDmD2Kd1RoRg7r6DPXuuJ3vXuVy/PVbz3PE4U7fda0L+tgHXnMt8aev7ni9a0A5J71GzdnRaQVdYytsLz72hzn9q+NGLFyGb/33ntf+FnAXuRHn/vb2u7RtT04yiPWfuUw13Ox9uY6br7jLMYc+3AtxhxNnzH9uzfNWvaxdXFubPdhDn4m7XXrbk+erfM9+xVt9SFGTgU85+bb2+ZZZ3s2X5x4ay3v9mdcvOdi3L1o24d7kXtr24u8mj6xcuzeNf27R/TZj5+vzds9YMw6jvrFsyP2sALyMQbn+jX7WR78edSsufuwB3McxbJ497zn6c+E+C9/+csv3J996pNzfw+4f/ey/OL23M1zbt7mits9yyMGe/dZbN39ucccsduDnHJ4JtvD9mFfxpZPHp/b9rJ9Ylzc7tdR0ydeE+NetqZx+1DENabPGP4u9z8FLFeSJPmrqZCbJDdDhdwkSZJcPAUDBQrFBm9xKVIopqwQoZCgaOBasUXhQIFgRQbz9CtgrCDhXKFi146u9WviXZu/AsgKGgpmy739GTNvffZy7BNjjwphjpp+6xiTf3vcuHN7k8e1e3aUG/F7Bjhqx2KbePehcMPW2FyFKmuLXX5rGtf0m69f4cUc18vl2tgKX+Zq7s06mhzGNXNdu6flWDNnn8Vym79nL/+udw9it8cV3cS6lz1Tdk/bk3NzFm9tDQVLc11b07nc1pFH33F/yLW12DPavRozj43tObnevbE4186PhVzshT2r5VsRdpZPzDG/vcjnuDV2j7sHeeTf3uTwXBa/fXsGmvPdq6Oft61nfH9m2fyt5Xp9Yvas5HS+/dsj9rk5xvaZbP+aXIthn7NY43Jp+sXKs7yL1bc/r/o0e9LYvsyT1z0uv/swvnlsP9vv1j3mX/NnTb7tw7PUh/XsYfdgvT3/rb8+uR3Nl8ua+3lSeLWWfrkXa+zY/N27fTqK19DnXBHYmHz2oM//nOG46/397W1c9ybW/CRJ8ldTITdJboYKuUmSJHnRUFDw5ps3zBQGFB1QpHC+IoiCAooQrs1bkdK1IseKIYoM+pyLWZFlhQt9K2Cuz3rOVzAxZp5+8duLPufW2l62jmu2l2OsJr9rBQ/FGufym2dMw/WxGOJ8ucU43/NZzuUQa32W/7gX7Fm8fYjffEdt6zkX63p55XOu3/Xm6lPUYfdkzPlysGdqr/oRu/vY3vf5bj3Py1Hbs5RLAQs5lscRubZHc1aA2nNyNL5rtoY5GotZbv32s7XF67fX5Wd7Mn/5txfHPZfF7R43Z89RzmPc7l+fPPowZ2uZs/27ZvcwYuU6Mm6unObZkz73unW1ja2AqM/9HOfu891XQYuxJ+sujxy7J3P0uV6Tf8/B+Nrma66NL9/xeeFoL1vL+qzP2uYsF/qM78+IIrafR+d+jozJ41wzV+7tQz5/x8jvWh7X2jHeuTy7x43pc41zz83a1t1cxGPcGq7FiXHUr22eJsa+WOyehWt7P+ZytG9FYk28P+vOFXb93nPXciRJkr+eCrlJcjNUyE2SJMmLisKAt7+eeeaZc+FgRZIV8RQRFFb0iV0RwrUig74VgkahYwUQRWLFCXk1BRlzjK2AIqdcrjUczbtVn9zWNFfRUaFHH/axApf1HI2voGI+4sXK6dye7GH7E2eOa2uscCifOWK3hnPxew7H52VtfVt7556rGDn0W0f/5q4ItfvXt37x5m2fxo+FYW33sn24Rox+62BNMdubc7GO1tD026951nItXox+19uLvJ6VMf3abA97psf92Yfrxez+dr7nIX7PyH3Kb65zfebrk0v8YpZr+5LP8Xg/1ln84tyfudZcPmPsGYjbs7IXn4W3bY2bY4193vrY/nZtnj3JIZ/7MC5uP3ty7PMSsxz6NOPH+9FnrmtxyM/Wcz+7N32u5dBn3hrLI87RPsS5X3b/8trbfjZwlNfYrsXK4WfB/brGHuxfzHE/+rQ9a+v6u8u4a+vJ46i4uWtFz/Wvbf9Yd5/D1tz5YrZHfeabo7k2117M07Y/jJvrvsX7+/Dquo72o8kjvzzmOXePK+K6lsNXKWuKudZIkiR/fRVyk+Rm8F9kFXKTJEnyoqPQ8Ad/8AenT3ziE+fCiDf5FAgUGVYMWrFDUUW8fucKDooNCh0KGOavoKHwoHChsKNvRQp9jmti5ZfL0e+q1W+d9d1+++3nPufiFcvkUfxQ5LAXzRr6rLlx8fZrb+5FvPkKIebsd9d+7nOfO/8+XPPEitneNld+e7E3BSN9Crb2uQIWcurz1rO5cm5v8prv3DNGDmuJW5FOvH7zxe+5yGvuyKF/59a2N7ncn7w+H3kW63lYY/ew+daxpjhN3H53sBhNv89cnLz2aS2x+3xwrgi1z8L69rPzEa9f7j0T44pW+9mRy1rWML5nhTnma+bpN+7+7NPRPPe2GNz31pVbnPX2eRsTY2zP5Liu3OLFaPodrbX524vPzPk+292b3NYzZh+el3jrGbemz8iYfMbMkX8/Az4fayL39qK55vj8jOuXU3Pu2dij8923xuI1Oay7+eb53a3Wsid91vAzYd/iN3/5zDFuT5o/I3Ka63ox4ncfrpdrz0CfvOZ6nppxz8y4z0fB0335sy7OuDW2ljhHa3jeyCm3+daST7wY54vd2J69GPdlH8asiXjkxBzNtRix9ra9eG6+Nnl/zsWJcS/25Hk5T5Ik3zsVcpPkZqiQmyRJkhc1BYzPf/7z58KMooxiEiswrMChCOEaBY0VRVbsQMFB0UQefca0kUfRSqFiha+NW0+feQoYxz4x9uHa3BVbnSuiKMQuRr859qsoYk1j1hKrYKVP4US8Aq0j7sec9cmjcdyzMdfWcL8rtsm7vblP+Vyv0OTe9lzsWbxY48e5zsWLUyDavq1tzHO2l+XdfDGuxdujz8kerLnPZJ8djitkOW7Mc/Z8zDF/e3eU18+I9Z17Xsc4e3Af4u1LPueIdX/LZY09O/OtqyCtzzz9jq4xzzWOPoPld//bi2aOo3Frbd09K+eae9jPmfOtyfaHeZtz3POeu2c++q1tXUd5jXturtl+dm1NzTruwxxjm791js9Uv2Y91/tZFePZYE3Xfh7N9xm7NmefubnIZcy+7VfMMefufeuuf89s/5OEOM90z1Ee97Q/c9urezPf0T71L7ec9iCP+ft50ed8zbh1/ZlQCPVNA4qen/nMZ86FzxV79/Mst3zLyfLo82d/7MH+PHtztz6e5467V/HOzdk6+h19HppzrOlaPs0ezTHf0bi/k3cfnp+8SZLke6dCbpLcDBVykyRJ8qKmOKB4oEihYPD000+fixaKEiiuHAs4K3goUKzIsYKIsRU/tBUwzF0+xBu3ztbanBVCzHG0phh7YHHW3x7EaOvbuuYZt0d5V6xbkVURa3tfXuzH+ojT5BK75yHP5rJ7UHDZs3JU1GTx4sx3rg999u3ake1HrONiHM3dXtn14nDPilt79mv65XQ/8pnjuTguj7E98+WzN3PlWxEOcxar4Vkv/+7bPEfPErHGVjx2rnlejtsvW2u2jr3K57g+187NsZ7Pw/XuffdgDetubcU0c67G2R9idh/Wm92TPJvP7k8eMa7Nddx8c4xj3uIdPeNjjuW3lljnxjT5rKGt3/lxzu5H3/a8vDg/PoPl2/O0xjHf+jfX9eZb3/k+F/OsvZ8b456rczHminH0syOffp+dpk8+f279TK8p2Cp2OirgKnr6HbKOvm1An/9Jxd9rux+52T6s62jt3e+xrc9+7EMszjHOfm5cizHu7yB9+/tGO96bcUd/925v+hSmfVOA/bvP47pJkuR7p0JuktwM/su1Qm6SJEle9BQRFES81eZccUHBYUW+FXNWqFCIEedav+Z6hZHF6VtRypgmVlED5+IUeszZPPHHQitbV5/CyeYejyPGWtiDYsjeTnRtvjXNs5ftDTHWPxa0jIm9SuwaKyBrcrD8jnJ6HvazOfqMO2I9exTvfuXRZ31xro0fC3MbR5/c9uINwxXMNm6d4545jsvvWblev6Zfk+/4bHa9e7A381wjxn0Y2+e+a/fA1rbu8h5ziN1eHO1j+cTI5+ja+OyZa8u5nwP073ke78u4t4MX4/qY21Ez5rP0me45Lcf2wz7HXbP7FGeesV2LXz6sZX+7n51r4jGX/XyLMX8FVuuL2RxxxuV2NCYG19uPz0iMc3M0Y2KX35iY4704ei72ItYeNtf5np1Cp9gVN/WJUcRc8XZv1irMKnQq4Cp0aoqeirf+JxR/fxkTY67m7zI599arveyZYU/2vz3sfM31+vYcNrZnJP/mOtf2rJZfrHP36J4cPTtHuezdHu3ffe65JEmS748KuUlyM1TITZIkybWi4KAgojiiwKDYoiii8OHcUSFEcUcxYkWaFSmcrzC0Mc25PvmPMSt0KPjIra1YsgKw8xWLFmeuYok+RS9cm7vioLyul9ec5dwa5sq9ferH/bg2b/cm7/bsaEwOXGvuU7wx9nWs+4pl4/KsELc4Y9aTw3149sYRax39+tyDuBXnzLWG3M7lVKy0jraYPT9z9zzFymuufBg/7sWarhWFFyufftfLifXkVNRylHNHllucPCwfxlzbnz7zPG/9mG/f5iPWmDXW7MvcfdbmI9f2p8klToyfA3mXT6xzMY4aPpcVWTd3+/Qsdn+LM7aczt2Xtdl9afs8tq5Y9+HaZ+nnyN6P+ay1mD3PfV7WFmMNn8WK0tYR4+fRehpybI/acsspt2tz5drPAMuh6du94dpnIM/25bmMvcm5oqbcmnvVFDUVNzWFzb11u//hxNu3e3PV0d9T+3PjHsbeFUUVSO3PvuzXuX1prs2xJ/G41r9n4WhcPM6td2x7Tnt2YuQxpl+zz51r8rnXT3/60+d7s9etkSRJvn8q5CbJzVAhN0mSJNeSQoKC7v/8n//z9IlPfOL0yCOPnIs9K5IocCi2rMihYLFikaKFvhVKVvxYoUacsRWbxMqlYLVCFvoUgtZnXfMVoY591pNz+1MIUSBZcc5aipBirbFii1zGzd253Nu/OPvW5DLHGpoYxSPxxs0VI48x19bbM/A7cc1z7r7uvvvu89gKPe5FkWpx1t5+3I+YPes77rjjfL25K2pirjme2wpJ5su1gpyj565v+fTbt+P2KYc9bm3359xzMFec831uYjXk3Tr24Yg8YrZnz8L1ioj2unlyaubbz/Ivn3jk3H6Mace55pi/Z+WZGJPDuXvwzLaOfMYUGD0DuTf3uPftZ2uvz9yNyWWumOXbvha/ubiW33OwJz+bi93z3/253p72DPZ5rm/ri9u1+a79rNqPczHW2t6RT4HR5yuvGGN+Tqwtj2v94vTtWh753IdnrN86ux/Pec9aM38/qyva+kpkX5G8/7Hk4x//+Pl8fQqifn6W57uhUGqOPewz2f1uT8aWdzH4OdHv/u3VmHV3lGf3qm9/p+ztWnP1iUFe/e7dPXnG1th4kiT5/qqQmyQ3Q4XcJEmSXHsKC7//+79/LugqhCjIrCijYKNws+KMQoXzFavEKXIoUohVUFXAMIa52ooj5otXFEJOBRB9ik4szrwVGxVAxpwVJbc/42IdFU/0rbirX6yiC9aWw96dG5MPa2vmOR7nbT372f6NWUese95+vB2oQOZazNaTAzGemX7P0jMQ55m7d5bT/sUbX1FKHvn1I8fyewbH6xXmVoCyruvds3Px5jk3b+spGmIv4pd76zi6R2N7NvqsIQ797mN7ld+1WE2/+c73vBS/Frc9b1/Yj/X3M7NnIwbPFfnwM+ne5JPLfIXU7dF8c4zrc3RtX56zc/nF+Xyss2v72HOxxu7BGuLk8EzEy+uzRp+4rWlcrP3h51ifa/nF2vOuPQvrsf1ZW97te/dhjjHzrL984uR0T2LkMOZ896Atr1z6xbk+PivPWD4FWEVaR5+j/4HhU5/61PmN1E9+8pOn3/3d3z3vwd85CrYKof68bA/7jP+q7Nfa3ur1/OWU295c71nZt3txH9bU73zPAMc9P/PlcZ+ae/AZ6TPfus7Fu1f35Dk47pnKnyRJfjAq5CbJzVAhN0mSJNeeIoQChKKEAojiioKLwgOKOIoeiFUMWQFHEUiRQoFCUUlT0FhhyrnxxZuviV/Ba205HI/91tsaru1LnHM59SuejHFr25u1XDvqW7+5mG8fYpbDmHOFsK2xvbsP92RPuz+Mbx3sd+vpE6fJ67j7UAwSg6O9sHvY/VlXHvO29/WLc67Z12LWfK67z8XqP6619fSLdY58W1e/9baXneN+xYoxtnHz5NL0ufcVH625fWzvzsW53uexPZsr556JuWKOfdrWlZ/lV/QV59pa4vzcb654/dY53of9GtO3vOLc8/Jr2Ks97b6Oz2TP4fizIcY+Nt88jGEvI8d+Ju13+Y971+dazNWf/92bezZuX9urIqVzzf5WpFyh0jz9a/pW0FSsVTT1lq2ipSKmtq9K9j+IeON2b97u64X9XbN15bdH7a9rz8M+V3zdfe5etd3fnsnuzdiOu3/Pw33Ku3zO9bF1xHkGK0zL8728tyRJ8t2rkJskN4N/req/tpIkSXIjvfa1rz09+uijp/vvv/903333nR588MFzUev49cD33HPPuSCjgKRw5KiA4VzMCkN7c1LxY0Wku+6663y+Qo4+hSpFrBVijHsr1HHFEE2Moon1VrzSv7cn5fJ2q377QZw9afJbU597UrBRjDEmjzHztwf5FMIw15ztU6x7WQFJrHPNHPlW/HGOdZHfs1oRzvojv7Xl8QzEaoi3zvYhztrL616s5dqY496w3vOWU8zucTmXw7r2tnu0js9xa1l7+fbsdr9i5NLM0xBnH2LMs4Zr+c0Xt/W0raHPM/SzJ+f2sPnyiXftDWlj2ta21gqbYhzlVYizBuaba4/uAzn8TFlbvLni5HKUez8zYoxvXc9KbnvV9Mmx573npW9rmX/cn0Lp8TM2x/2LRczuz3o71/Yc3I+58vg5l1N+ufwZWrx9iXVfK3Ae9yT31pdPDjnFuXfzxfn7YPdovvUQp+8HzXPxLDTP28+Q56a53ufniHP36h6w5+PejbtPfe5Nnj0rxz07cUmS5IfnIx/5yOmjH/3o81dJkuuqQm6SJEluPIUyBd3Xv/7159/9qrCrkKWAce+9954LGGIUTBQ4VnBTJHGtALKingKHPnNXtFU8UkhCrhXcxGryyG+eHAouiizWQTFJjOsVGleYWRFOnl1bb4Ua+awnZvPYcXGO9uBcHvvEfZir0L17N7bCqzzyur/dt+PmGrf3NevYqz0hh/mKUPrlsefFunb/cq0YZ2zP3PNYcXtx2481xOxafp+bPbl2NNd+jVlvz9e18+13Oay9NfSbjzjP7di3efbhaC9Yz33Yr3muPVOfm+cgzrX+PQd71efoc3IuThNnrnVdy21tuXxecmDO8TPePszbfsw31xzn1lszXz799qTtWTjK6Yhn5XzX1lJUtQbGxW9/cspvH/a454v9id/n4dyYa+fymoc3RRUa5dr97c1Y/Qrb1jTHuHXEbL617Nn92pO/D8y13vbo2txLZI8PPfTQ+dl6PvvZ22dlfPfmPnavu3/PRp/7VcR1dM/7GbzU+06S5CaqkJskN0OF3CRJkuQKhdzHH3/89JrXvOb08MMPn99GVeBRGFmhB8USBQ7FjRWYFDvEKFj5+mZv+YoXs4KJwogYBSxFFgWVFVzEWEcxS/8KZ9ZaIcxclkecIpV8KzztrTlrm6OZb451NHtSkF1RdcUxBRxjzrUV2+xdXkcFUeREfmP6jXseConO5d8a9ia3teTEnszf3sy1rjnmr9i0OfqMy+/+9a95TuvzeXiO5qz4Z3+e5/bgXj2zFa+sb47xxdjX3pp2j+Zb27jrfZYrKsrnHtyruX5+jO1e7N25cXP1udZWyN3+NW+Fu3/njvKLFbOC3fLLZ7519VvD3uxdjL2IM08un63m/sTvnu3Jvflc0G8fcsgv3+Yq8suJtXZfiGU/N/LKqV+c892XMfl87vavz3PUZ09y28dx/77GWJPLnpz7GmRNAdfnYcy8m8qz9tk6ep773FcwX4y+JEny4lEhN0luhgq5SZIkyRWKRApMmuKRApO33F71qledi1bGFQQVR4xpClcrSJmnGOXoWlEKRRQxrldgdK4pqIhfocV8BSgFFtcKUSvYrYjl3BzrK3SJ31oKZ/rklcP+FLXMEy+HnLtH64qV07W55onzFqN5xs1xjjxi1sRq5q+Jldu+xO8ejMklp3H7U0gytj7PwBxxruU2pt96cju61r9in2vne67rcy5ePs249RyNy2+eMdcY33Nc4Wv7db5xc3dv5lpHXrmOseZ65uY4R5zr/bzsuZpj/nKxnxHx4jSMr1+8e7aenwnrL4dC8faM+eYdi6PWdQ8r7G2f+q2/fa1f/uXbZ+5nbZ+dMc/Gz6Qx5+IU0c3dz6V+R+taA9cKsvLYozHz/Ey6D2P7nbTmyGU997p9ajfdPqs5fn7Tc0qS5MWlQm6S3AwVcpMkSZLvggKYtxYVnhRAFJ2cK+4p6voKZgWk9TkqjM2Kawpk4laQUzxRdJJP/IpvK3aZJ0ZBa8U4zTVyrd+8FcEUvVawc73cYpfTcYU8Vjxz1OQ0R3HMXuQ2z73hmbiPPQtrrM+53ObJ4Xxr6dtaY7717FfMnqO52597di6/c01Oz3/PEWvbk1zHfGJ2X/rNl2tr2NfxOW2P1jjGuEYO8+WTe+vu83AUj3HPT58Y+TbHPnxeyIe1rHP8HMQen5M8+lxru+c9I3MUPf3MyWU9MfZhzZFTnxh5Vvzd+vJpfg680bv13Yt1xMjt2n70Wde5frGeg3sUo+nf9XJZwz7sW59xBVpFW2vLqXCr6bOuWM2e5NR3vLckSZLrqkJuktwMFXKTJEmSvyLFKUUyBV7FREUkBTB9iliunaNI5VycQtOKhvoVzMxTzFoRSzFMjOKUeHHGXCtqyc3evFV8M65fAUxhy/ka5srjaNz5sbBmHXuRx/mKaWLtxxri7HX34x4UB7d/xVc5jvcln+MKe2JWLFy/vu1Fn4Kw/OI8h+1RnPtZsc7Yiou7V7HHgqN59iLW9Z4xPg9xmnnu03Fx9mYfu949O9fEu8ae7N164j0D19uXe5H7aiFX3PYqv/N9FsbEbr+Ld+1ck9Pn5HxzECOXfqwnv58ZffuMPevll8fPl33b556zNeTSt/XM2Vd57/nJa31rmbv7MuboM2A/V/uZwvW+Jtkenfudtwq4CrzH5pkkSZLcZBVyk+RmqJCbJEmSfB8p8io6KW45X0FRQUxTCFScXMFMnGKYpvCluIV5zhUEFdrkdK0gpu0NRXGuL4F7UTjE/djbin2Kd4qtCqnuXzGRFXH1HZ/Rnpl+975c2n7vq2eoWXfP1DPZM/T8Xe+ZGbfW1Tj5VjyVy7piXK9wu3hx1tK/PCtM2uMxVr98+yzlXcFT/47HeM9jn7X1xbgP15r88ih+mud5y+H+jNuboxy4X7H2sHVWbLbe5snjWn62F3PlF2M/fm+uMf32Jq9Cq3Uc9RsXr/kczTdXsVazN3F763a/3/ZLX/rSC4XdJEmSfLsKuUlyM1TITZIkSX7IFCP/MhTMrpOr969gqECoPfzww+dipDc/XSs4Khbef//95+KpgqBxxUL9GoqGCpy7Zm9Nm6NoqIioqKjoaA/LcXybdwXQY5/ipjXxWaytYClGIXRF3OVxX/I71+znWARVoFVINU/BU6z52yfiN2/Xa56H+zBHPtfOreV6xXDraXKKsabCqedgTfdg/wqx24tYx821jljPcc91z0AB1leNG5PLXPnxLLanp59++ryGt26feeaZ0+c///nzverb/SVJkuTWKuQmyc1QITdJkiTJRVH4UyTUVkR0rqioWKiI+Oijj54eeOCBc3FUIdGYQqjCJK6XY/MVX1fQXDFRgXjFTzF7g1gRckVHucQpSmrmi9ucvXnK3oBViDTPflzLZa/6Vsi1X/tRSN1aexNW3hVZ5bama/NZ/PakybcxeRVmt9YKseLkVjC1jj3J6371y6kpxFpXLnM1b8t6DsY9MxRhzTeuT7xCrj7zra1vv+fW0bhmzBu3i9naFXGTJEn+YhVyk+RmqJCbJEmS5EVHkVLRVHFSU0hUPFSMZNfeQBWnyOmt2l2LU3jUpziq8LkxBUjXiorGFFdXBNWcr5C7QqxirvMVOhUmF+dcQz57W3F1hVnX8uxtW83+jBuTUz73JNfms2v3Yg/Occ/Lu8LwCs7yi5XDPSuybp44ucTuvsw3Lqf1PAd7Usi1x92zmBVtFYudawq2G5fTnrbXCrdJkiR/eRVyk+RmqJCbJEmS5Fpa4VMxVeFXU6zVFB81fffdd98LMY4rqCqkyuGt1RUbVxDVrwipoHksih4Lt/Kb69z4CqoKp+bq31u6K3KukOva8Wox1t5dK7Kaq8+YPrFbx9H97K1Ycfa8IrV7cLT/7dE5xqyjGCsX7k28gq0c+jVfmbz9KNx6a1ehdscVdB2TJEnyvVMhN0luhgq5SZIkSW6kvY362GOPnYu5CqGKmPhqYQVOxU+FVkVVsXur1/WKnIqkYo8FUU1++Zw7il+xV/FTUdQc+cXo21u18rA3YM3dOs6PhVznmsKtnPLYn30qpuoTZ20FVfekLXb3aNw+rCdeMVYM9iR2v8d2a8rnrVzn4r15u+eSJEmS758KuUlyM1TITZIkSXKj7fe9XqW4++CDD56/LnnX99xzz7koa47CrqYQqk8h1NcIK6B6sxaFVNeKrFtHsdSbrAq6K7BqCqj7nb1ikEcBdYVcuVbYdW7eCqqu9Svibk/W3373hqw8yCuXgrA8CrRirC3G2N6m1a9Q+9nPfvZczHWf5o9Cb5IkSX5wKuQmyc1QITdJkiRJbsFbqtoKn7teQVa/wu7DDz98uv/++8/FWgVRb9E6XwFVgXVvxSreansDFzHmKQoriCrMKqJaS+ze0NWc61OwNUfeFV+tJ0afMbEKrs4Vd+VdLmvqswfNfLGKtQq3+uTVnIvVzBVrDbmSJEnyw1EhN0luhgq5SZIkSfJX5G1WRVvF1b2V621YRVkFXW/Y7uuYFXIdxSqKKoTqM8dRLgXSFU4VW8VozlfMFa+Yuj4FWsxZkXXn3qgVoyCrQMvi5T3OWX5N3zFXkiRJLkuF3CS5GSrkJkmSJMn32Iq5vhpZQdf5CraaIqqirmKspk+Bd0XWq2/Ryufam7h7G9a8FWBX1BVjTP/GFIblMRfXYpIkSfLiVSE3SW6GCrlJkiRJ8kOgkLu3XhVyFX33Vci+VnmFW7ztq/iqMJskSZJUyE2Sm+FHnz8mSZIkSX6A9nYtjl/60pdeeIvWVyGviIu3aCviJkmSJElys1TITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTAVcpMkSZIkSZIkSZLkwlTITZIkSZIkSZIkSZILUyE3SZIkSZIkSZIkSS5MhdwkSZIkSZIkSZIkuTA/8qftT751miRJkiRJkiRJLt1HPvKR00c/+tHnr757/+gf/aPT3/27f/f0kpe85Pmeb/ebv/mbp1/7tV97/ipJ8te1v3f/03/6T3+lv197IzdJkiRJkiRJkhtEwfaXf/mXv6399m//9umDH/zgueiQJLkMFXKTJEmSJEmSJLnhfv3Xf/305S9/+fT+97//+Z4kyQ9bhdwkSZIkSZIkSW64j33sY6evfvWrp9tuu+301FNPPd+bJPlhqpCbJEmSJEmSJMkNp3iriKuYq6ibJPnhq5CbJEmSJEmSJMkN9/f//t8/3X333af//J//8/M9SZIftgq5SZIkSZIkSZLcIL/4i794+o3f+I1va295y1tO//bf/tvTv/7X//r5qCTJD1uF3CRJkiRJkiRJbpDf/M3fPP3yL//yuTnnv//3/14RN0kuTIXcJEmSJEmSJEluqF/7tV87/fZv//bp7W9/++mf//N//nxvkuQSVMhNkiRJkiRJkuQG+/Vf//XT5z//+dMHPvCB0wc/+MHne5MkP2wVcpMkSZIkSZIkucE+9rGPnf7jf/yPp5e+9KWnv/f3/t7zvUmSH7YKuUmSJEmSJEmS3HB+P67fk/v444+f/tW/+lfP9yZJvhd+8Rd/8fQbv/Eb39b+zb/5N3/htyD8yJ+2P/nWaZIkSZIkSZIkuXQf+chHTh/96Eefv0qSXFe9kZskSZIkSZIkSZIkF6ZCbpIkSZIkSZIkSZJcmAq5SZIkSZIkSZIkSXJhKuQmSZIkSZIkSZIkyYWpkJskSZIkSZIkSZIkF6ZCbpIkSZIkSZIkSZJcmAq5SZIkSZIkSZIkSXJhKuQmSZIkSZIkSZIkyYWpkJskSZIkSZIkSZLv2ite8YrTK1/5ytNLX/rS53uSfD9UyE2SJEmSJEmSJMl35Ud+5EdO73nPe04f+tCHTm9605ue703y/fBjf9r+v2+dJkmSJEmSJEmSS/eud73r9Fu/9VvPXyXff48//vjpda973QvH97///acnn3zydM8995zfzn3sscdO99133+nZZ589ff3rX39+VpK/rh/50/Yn3zpNkiRJkiRJkiSX7iMf+cjpox/96PNXyfeer02+6667zl+d/KM/+qOn9773vac3vOEN56LtHXfccR572ctednruuedOf/RHf3Qu4H7uc587/Zf/8l9OH//4x8/FXNdJ/noq5CZJkiRJkiRJ8iJSITffL4q2vjrZ27bve9/7Tg888MC5qPvII4+c7r777vPYS17ykvORP/mTb5WYHBVzP/vZz56eeeaZczH33//7f3/6P//n/7wQk+Qvr69WTpIkSZIkSZLkRaSvVs73g+LsG9/4xtPb3/7207vf/e7T2972ttOrX/3qczH3zjvvfKGAqzCr/d//+39fONf/Yz/2Y+e3dcVvzqc+9am+ajn5a6iQmyRJkiRJkiTJi0iF3Hyv+crk17/+9af3vOc9p6eeeur8e3Dvv//+89cnK9CucKt985vfvGUxV/NGrzm33377uZjrLV3FXYXer3zlK8+vluS7VSE3SZIkSZIkSZIXkQq5+V7y1cmPP/746f3vf/+5iOstXL8DV1FWodbXI6+Ieyzczq43pmjr7V1v5Mrz0EMPnXN96UtfOn31q199flaS78aPPn9MkiRJkiRJkiTJDaLg6vffvuMd7zh/nfKP//iPv/AGrQLusV0t4u782MQ999xz5yb+DW94w+md73znC2/63nbbbeeibpLvTn9akiRJkiRJkiRJbqB777339Na3vvX8Nu6jjz56evnLX/5CQXaF26sUeY8Ws37zFHJ9rfI3vvGN89c2v/nNbz596EMfOn99s+sk350KuUmSJEmSJEmSJDeQt2Tf+973nt/K9abs17/+9XMBViF3b9leLeZevT4Wdo/n4vw+XfnkfuKJJ06/+qu/enrta197Lhgn+YtVyE2SJEmSJEmSJLlhFF0fe+yxc/MVy/tKZMVXb9Xe6o1cc47F2rnav3kr5sr7Yz/2Y+ffv/srv/IrpyeffPKWeZJ8uwq5SZIkSZIkSZIkN4g3ZL2Jq6B6++23v/B1yCvernG14LrrWxVvj3Ocy+ft3hVzedvb3nb6mZ/5mdPrXve683WS76xCbpIkSZIkSZIkyQ3xspe97PT444+f/s7f+Tvn31mr6KrQqq0Qywq1xzbfaex4vVwr6Mrvd+Z6M/dd73rX6Wd/9mdPDz300Dkmya1VyE2SJEmSJEmSJLkhFE//4T/8h6fXvOY1569U9sbs1a9RVoj11q6269G3Yq1zORZ7bAq2O0d+a/mduXfffffpPe95z+mXfumXzvOP+ZP8PxVykyRJkiRJkiRJboBXvvKVp0cfffT05je/+fSKV7ziha8+RjFVsXVv0Gor2B6LvPpn49qKtgqzx6ZvOVbMdbz33ntPTz311Onnfu7nTnfcccfzGZMcVchNkiRJkiRJkiS5AbyN+5a3vOV01113na/3Jq52LMreyuLWZkVcb+C+9KUvPTdf36w5XzF3zFXM1ffggw+ePvCBD1TITb6DCrlJkiRJkiRJkiQ3wMMPP3wu5Cqi7q3bFWX1KboqyGrHt2iPbtW/uYq3L3/5y7/tqH2nYq5C75NPPnl64oknTnfeeefzo0mmQm6SJEmSJEmSJMk1p2jqDdjHH3/8hSLu1ULu3qZ1VMzlWLS9+sbuxsSuaHtsx2Lu8s3Wv/3220/ve9/7Tq997WvPcUn+nwq5SZIkSZIkSZIk19x99913/mplvxvX27Df/OY3XyjQokCrgLviq/Nj0Xb2Zu1xzBu34jXnx3Y13xr28fWvf/389crvfve7z/tL8v9UyE2SJEmSJEmSJLnm3va2t53e/OY3nwuxK+KuqLqvRtZWzF3b1yyv+OotWsxZM0ec88Wx3Aq5CsjijvOwF+1d73rX6T3vec95D0m+pUJukiRJkiRJkiTJNef34z7wwAPf9nXKV79aWTFW25u0t9122/m44qpYc1agPca7vlrERf/yXc2147PPPnt+Y/g1r3nN+eufk3xLhdwkSZIkSZIkSZJr7o477jgXZn2d8Qq4a4quayvQKrh6k/aVr3zlC1+LzOJxFHu1iHvVirneynW+dXH0Rq75P/7jP37+iuUk31IhN0mSJEmSJEmS5JpSIPW26913330uyF4t5I64q03R9eqbtPpnBVrHq465EbOvVt7ax1yKud4afu9733vLfMlN1J+EJEmSJEmSJEmSa0qx9H3ve9/p8ccfPxdIFXJXQF2x9ju9VavYqt+btN7MVbRdjH7X5q4wqyFGu9q/tY7rONcvxpg3h++8887zeXLTVchNkiRJkiRJkiS5phRJ3/CGN5zuvffeP1NUXWHVm7J7s3YF2Ku/P9dbuYq5K7Bu7DgPx7Wrlmdf1by29byVe9ddd53+yT/5J+e3iJObrkJukiRJkiRJkiTJNfbggw+ei7CKpleLrq4VY/e1xxtbodZRnxhv5h6/Hnk2B/1rx3xj/tW3chevmOv38r7zne88ve1tbzsXn5ObrEJukiRJkiRJkiTJNaVgevvtt58LpAqrV7+yeIXcY3HV8WoBdm/ueqN2BVrFV3bkeH4r5n6n+Qq5eCtXMfeRRx45Xyc3VYXcJEmSJEmSJEmSa0ixVPFVu1qsxbk+bcXVW7URp5Ar1/qPX8F8PO58dm2drYd+uRzl8jt8n3322dNP/uRPnn+vr7eAk5uqQm6SJEmSJEmSJMk1pOB6zz33vFB4vVqY3duxx6LqCq6LO85xPBaGXW/O4o45jucjZsXjXWuI3e/Kvf/++8+/21cxd+PJTVMhN0mSJEmSJEmS5Bry9uwTTzxx/lrlWVF0BdwVVVdE3Ru2VwuwY/5+V65zcX9eofVWY9Zdv+OxzTe+8Y3TT//0T5/+xt/4G/2u3NxYFXKTJEmSJEmSJEmuIQXaO+6443yuQMuKpYqpe7vW+Qq3G79aWGV9t3ob9ztZzPKzAvLy4ah/RV5fsWzvb33rW08f+MAHzjHJTVMhN0mSJEmSJEmS5BryRu6rX/3qc9F0v3/W1xazQu7xa5f/PMZXjDWXv6iIO1fzH69X5F2f8+NeH3roodNP/dRPnWOTm6ZCbpIkSZIkSZIkyTWkkOt3zCrYKpAqjK6Qq2iqiHu1kCvuqo3Nrh1X1B19xxyu1+Z4fvXrnLdPhdznnnvu/LXQitFvfOMbz1/pnNwkFXKTJEmSJEmSJEmuIUVRRdIVW1c0XSH1WGAVox3HWN8xlsVrV12NZdcr1h7Hna+f41Ex9+677z79i3/xL06PPvroLddLrqt+2pMkSZIkSZIkSa4hRU9vsfpqZW+5Kop601WB1NixgKq5Xjv2Hx1jrxZVj/HGj455jmNby3H9O+q3b8XnRx555PSOd7zj9OCDD57HkpugQm6SJEmSJEmSJMk1pNB62223nY8KuCviciym6jsWU6+2xRzJqUB8q3F9y3m1/2pbnuNxzFXE3VctK+R6Kze5KSrkJkmSJEmSJEmSXEMKo3fccce5IKoQuq9VXgHV8c8rtopZYdX10eYfbR5X8x4d4xy3ztX1jrmeffbZ8+/79btyH3vssfPvzk2uuwq5SZIkSZIkSZIk14wi6Etf+tJvK+SyYulLXvKSF4qmtyq6rpB6LKg6Hguttyq6zjHfMf/i1nd1rnN593aups9bud4ufuqpp04//dM/3Vcs50aokJskSZIkSZIkSXLNKNS+8pWvPLe9jYsCqTFF3hVJWWF1jY0dC6prx+vZ9XIcz9nxSJ+9ac7ltcdj7p0r5j755JOnn/u5nzu9/e1vfyEuua4q5CZJkiRJkiRJklwzvnrY27gKnSugriCqiLs3Xo9WTOVYYGVzV2hdO+bH+XG9FXyP/rz45bWuAvQaCtD6/Z7cD37wg6ef+ImfOL385S8/jyXXUYXcJEmSJEmSJEmSa0aBc1+rfLVYqiC6Auz6cb7i7cZ2Ln4FWcXZY4F2R45xR1tnOfeW8NaZrS/H1tic49FXK//qr/7q6VWvetW58JtcRxVykyRJkiRJkiRJrpmHH3749KY3ven03HPPnYufozC6Qu7RiqmOx7bC6dqs0KoppN6qsHurefIdv+p5867OWe5jXnM231vFb33rW09vectbTvfdd995PLluKuQmSZIkSZIkSZJcM4899tjpqaeeOj377LPn4ueKpZpC7rFAOhufFWCPxde1ORZc167m2fX6lmvx+zpl+9L2+3vX7/w4z74cFanf+c53nr9qObmOKuQmSZIkSZIkSZJcM75a+c4773yh8KqhmHostq6tWLrrq+crnmo7d1yxd7HHAuzmclxzedbn6Hf63nbbbec933XXXefrYxHXOgq5W89RIddbx75mObmOKuQmSZIkSZIkSZJcMyuo3qrg6nzF1I1t/HjO1fE5Xssj7wq4K74eiVGI/eY3v3mO9datYvMrX/nK89H13sZ1rahrbG/ncnwj17n2ile84jwvuY4q5CZJkiRJkiRJklwzK5weC64rtjr+eYzfKma55F5bUXXMWxF5OcRo4lGcVaTVbr/99j9TjDVfjLdyjSnsyre1tN2fecaS66hCbpIkSZIkSZIkyTXy0EMPne67777z26+sMLsC65/nagH36vUci7MrrK6Pree4AqyjPgVaxVnNuaYgK3bFXuf2umKu49W9b+2HH3749Mgjjzzfm1wfFXKTJEmSJEmSJEmukSeffPL02te+9oXi6oqxK47u+mgF2FsRr60wu/mb43hsV4uxW1NTsNX2Vcpy7q1a45vvuPkKvivmmqfPPEXcb3zjG6fXv/71p5/6qZ863X333ed1k+uiQm6SJEmSJEmSJMk18hM/8ROn17zmNS+8BcsKowqg6zu62ne83vmKsbdqezv22PT7+uQ777zzha9QPhZ1teV2PDb5VhC+WsxdAVjc17/+9fO9/szP/MzpqaeeOo/Jm1wHFXKTJEmSJEmSJEmuEcVThc+92To7P/bNCqtHV+Ncr2/xmqIq1vN1zs8999y5ib3nnntOr3rVq87tgQceON11113nt2qXR8F3+zw2Vsx1rZi7r1l2b3uLdzHeQP5bf+tvnd70pjedY5LroEJukiRJkiRJkiTJNbIiJ1eLs6xQyrFYeuxnc3fc+DHueH6MU9DVFFXvv//+8++xffWrX31+e9a1ouze3BW/grDjMedxfyvm3nbbbS+84asobI6xxx577PSP//E/Pq8hLnmxq5CbJEmSJEmSJElyjShsriDqeLQiqeOtCri7vtrPsX9z5zi2t3L9/lrre4PWW8J33HHH+ffYKuo6it0buWyvyzO7VuhVpFa8VcRVzJV7X7OseOuN3J/92Z89F3Wv3nvyYlMhN0mSJEmSJEmS5BpRwLxVEXP9G1ux1PWKpVfb7PxW444r7CrMrpDruJgjX6/84IMPnn937uasoOv6VnOwT0Vbb98q6Crm7muk9ZmnePy+973v9I53vOP8Vc7Ji1mF3CRJkiRJkiRJkmvq+LbrsYi7ouiuV0A9tqPj/DWOcSvIrm8xt3LfffedXv/6158LsayQy63mKdTa73HPjuYrCPu6Zet+7WtfO7/t663c9773vedxBV/zkhebfmqTJEmSJEmSJEmuEQXOtRUwd70iq6N2q7gVTFFcvVVbHlyPgquvPvbG7J9Hfl+1/OY3v/lciEUxd66uMfqPRV+sua9vtn9vAnvrVzH3Ix/5yOkf/IN/cLr33nufj05ePCrkJkmSJEmSJEmSXCOKnCt0KoauKLp+58ciqeIn6zvOuWqF36NjPmP76uOrcVeJueeee06PP/74ufCKIu3W2HxHhd/j9c63rjX3e3OR+/777z89+eSTp7e+9a2nD3/4w6fXvva157HkxaJCbpIkSZIkSZIkyTWyN1ZX7GTFVv3frWOBlqvna8d1nCu6/kVv5CJWnN9l+/DDD5/f0LX3vZm79cRtjZ3vejFb01csv+xlLzufezPYtfzvfve7T7/0S790/t25yYtFhdwkSZIkSZIkSZJr5Nlnnz1/vbC3VFfw5PhGLiuI7prjmPOrbTmOubbG8mn7auarX4OMvdnjc889d772lcgPPfTQ6cEHHzwXX/VvnvzsOK973etOP/mTP3n+Wubjmoq43sx13Fu8vnZZ7p//+Z8//cIv/ML5DeDkxaBCbpIkSZIkSZIkyTXy1a9+9fT1r3/9hUKmhsLo3nY99s/Vvp2vgOto/q5XoF3c1fkoyiraHn3lK185ff7znz99+ctffiGHr1Z+9NFHzwXdFXKvtsUeXV3T+X5frmLuxjbfVy3/7b/9t79tTnKpKuQmSZIkSZIkSZJcIwq53/jGN85v5O7NWIVLBVhvw96K8RV+j8xZW/HWOYu9VYEV+f74j//43I4+97nPnX7nd37n9Hu/93vn872Ze/fdd5+eeOKJ07333nu+llcB1p637ta+yl52j9ZdMddXLB+LwZ7Jfo9ucukq5CZJkiRJkiRJklwj//W//tfTf/tv/+2WhdmrBVHji7taKN31sW/5HNeuxqBfEfVrX/vaubA84v7oj/7o9PTTT58+9alPnf7H//gfpy9+8YvnYq59+Krkt771rec3dOWwX8350dY+NrYPBVtv5CrmyqtfIVeft3L/6T/9p+e3f5NLViE3SZIkSZIkSZLkGvn0pz99LpQqYB4pZq6Qu+s5nnP1GsXSFX2PxVOunq95M9jXPI8+e/B1y75i+Zlnnjl94hOfOBdzvXmr+PvAAw+cHnnkkXMRVuzR8n6n/R3H5PL7crUVczVv5L7zne883Xbbbee45FJVyE2SJEmSJEmSJLlmFDRXvBznCqPeTJ0VNzd2JMfRCqXyrki8/N8p1lretj1+pbO3ZbXt5Q/+4A/OxWe/M3dfs+z35d5zzz3nYuz2trVu5Vb7sEdv4CrYKgq/9KUvPedRRD7+/tzkUlXITZIkSZIkSZIkuWaOxc+dK5o6Houqx5ir7Wqh8+qY5vzqGMZW8FWc9fbt+P21CqlsfcXcT37yk+evXeaOO+44v5nrK5a358WyvV295uq5YrA83sRVzNXnXN/2kVyiCrlJkiRJkiRJkiTXjLdO//iP//hc6FxBVXPtq45XGEXfCqLH8+Obu4qf5jteLZSyeWtyb44irkLt+N203rhVRFVktY49+Zrl3//93z/PN1fcww8/fM6xYu6K0VfXOrYjebz9q4DrzVzNuefzz/7ZPzu9733vez4yuTwVcpMkSZIkSZIkSa4ZBU9vwipkrjCqIIqCqLGrRc8Ru/i19bHiqeNViznGK5r62uQ//MM/PO/L27a+NtnXHSvksmLu5z73udP//t//+/y7db25q5j7+OOPn/crZm1rHwu5zrWNbw9r+525irni7ePDH/7w6Rd+4RfOuZJLUyE3SZIkSZIkSZLkmllB09uoR+vXVgDV5lj4vFURd8ejxR7HVlRFfkXap59++lxE9kbsnXfeebr33ntfKOSaK85bxJ/61KfOxV85FVsfeeSRc1F3exe33Ftn1xyv5T3ua8VcbwOLUST+m3/zb54+9KEPfVtccgkq5CZJkiRJkiRJklwz3mz9nd/5nXPBUoHyWNxc4XZ961/R89jWv+NfNLZ81ljD8bOf/ewLX/fsd9T6Hbj7nbVr3rz1e3K/9rWvnYu2K/o++OCD56L0ca/znfa/fcziFHO9lbsi9xNPPHH6lV/5ldM73vGOc39yKSrkJkmSJEmSJEmSXDMf+9jHTv/u3/27c9HSm61HCqQKncf+FTm/kxVGd3606xVO1/b27Ob6euXPfOYz52Kut2Lvu+++89crbx9iNMVcheivfvWr537F6Ne85jXn2Ktrb09s/pH117dxOayvKeaKuf/++0//8l/+y9Mb3vCGc5HZmskPm//V4P/71mmSJEmSJEmSJLl073rXu06/9Vu/9fxV8p0plP78z//8txUzV1T1VcXedj0WUY2trW+O8+d4bvwYs+tjPgVaxVlv2GrW9hXKCrv7nb36zHPta5UVVRWjFXHN9btzfT3zl770pdMzzzxzvl7+3edxzV07Hllna63g7Pqd73zn6Y1vfOP5+pOf/OTz0ckPR2/kJkmSJEmSJEmSXEMKlH/4h394LkpeLXauIOr8WOS8en3ViqTHtpwbR1GU5Vuh1O/K/eIXv3gu3iomv/rVrz4XbBVrl8+58S984Qunr3zlK+c+b87++I//+Omee+4557X33dexsTWPbWO4Rk5v5fo6ZWvKqcBsTw899NA5Jvlh6o3cJEmSJEmSJEleRHojN38ZCra+lljRdAXXFT0VL48F1BVfWd9sfP3H8RVUN85xfIxb31u4iqjeGLYvX6O834k74hRWfcXx3XfffZ7r3P0o8u5N3K2rbd4Ktez8uJfjuX1o+qyvubaW4yc+8YnnI5MfvN7ITZIkSZIkSZIkuYa8+fof/sN/OBdJFSpX1FT0VHxVtNS3QugKnI7HNsdzjjErhq4dx4/rivO7cj/96U+f37jVp6DrK5Sdi92bwuI++9nPnr9GefPF3nvvvedx96BxXPfqmOu1xR3n+Yppb+UqKuP6TW960+nDH/7wec3kh6WfviRJkiRJkiRJkmtsRVaFTFbw3BuoK25yPGfnx+Lo2grB3upV8DzOOzrmEKdQ6iuf/9f/+l/n/ieeeOL06KOPnn8P7iy3Yu/HP/7x53tP569WfuCBB85F1+1f4Xfxsz2if+OLuXqP9mV9BV3Mtc+77rrr/PySH7zT6f8HR/XuFdThZ64AAAAASUVORK5CYII=) **Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
# TODO: YOUR CODE FOR LOADING THE VOLUME AS A 3D NUMPY ARRAY
from os import listdir
from os.path import join
last_Images = listdir("ct")
last_Images.sort()
last_Slices = [dcmread(join("ct", image)) for image in last_Images]
image_Data = np.array([slice.pixel_array for slice in last_Slices])
print(image_Data.shape)
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# TODO: YOUR CODE FOR AXIAL
imshow(image_Data[100,:,:], cmap='gray')
# TODO: YOUR CODE FOR SAGITTAL
imshow(image_Data[:,:,100], cmap='gray')
# TODO: YOUR CODE FOR CORONAL
imshow(image_Data[:,100,:], cmap='gray')
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
# TODO: YOUR CODE
wc = last_Slices[200].WindowCenter
ww = last_Slices[200].WindowWidth
rescale_intercept = last_Slices[200].RescaleIntercept
vmin = wc - ww/2
vmax = wc + ww/2
plt.imshow(last_Slices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
print("WindowCenter",wc)
print("WindowWidth",ww)
print("Rescale_Intercept",rescale_intercept)
print("Vmin",vmin)
print("Vmax",vmax)
# 2) Play around with different Window/Level values that enhance
# the visualization.
# TODO: YOUR CODE
vmin = (wc-15) - (ww-70)/2
vmax = (wc-15) + (ww-70)/2
plt.imshow(last_Slices[200].pixel_array + (rescale_intercept-60), cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
vmin = (wc-100) - (ww-150)/2
vmax = (wc-100) + (ww-150)/2
plt.imshow(last_Slices[200].pixel_array + (rescale_intercept-60), cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
vmin = (wc-400) - (ww+250)/2
vmax = (wc-400) + (ww+250)/2
plt.imshow(last_Slices[200].pixel_array + (rescale_intercept-60), cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
vmin = (wc+200) - (ww+450)/2
vmax = (wc+200) + (ww+450)/2
plt.imshow(last_Slices[200].pixel_array + (rescale_intercept-60), cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Which values make sense and why?
# TODO: YOUR ANSWER
#-175.0
#235.0
# Since the extreme values have a better level or window image quality compare to the images with much nearer vmin, vmax values.
###Output
_____no_output_____
###Markdown
**Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
# TODO: YOUR CODE TO SEGMENT FAT
vmin = (wc-80) - (ww-375)/2
vmax = (wc-80) + (ww-375)/2
plt.imshow(last_Slices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
vmin = (wc+50) - (ww-375)/2
vmax = (wc+50) + (ww-375)/2
plt.imshow(last_Slices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# TODO: YOUR CODE TO SEGMENT BONES
vmin = (wc+650) - (ww)/2
vmax = (wc+650) + (ww)/2
plt.imshow(last_Slices[200].pixel_array + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Are the segmentations good?
# TODO: YOUR ANSWER
#Yes segmentations are vey good. Accuracy can be much improved by using eachtime different value.
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
import numpy as np
import matplotlib
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Requirement already satisfied: pydicom in c:\python38\lib\site-packages (2.1.2)
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# TODO: Without loading the data, how many slices are there?
# The total of slices are 220. Because there are 220 .dcm inside CT folder
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![arm.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB4AAAAPyCAYAAACdB8IfAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAP+lSURBVHhe7N0JvF5VYe7/BSiQQAZIICQkJCQhYQgzYUiYEVQmcSitU1vUaq9Dq61tsaVW61Dq7f3fa61e0Vu1Tq1KW8SKiooWFJHJgYCMYUoICQmEQMKs//xW3ueweDknA2Q45+T3vZ91995rr732cMh57fuctfYW++2332+KJEmSJEmbqd/8ZtX/WfzrX/+6lieeeKIux44dW+ulwWrBggVlyy23LC984QvrkoItttiiLiVJkiQNTP0+AM7/IZ5lsO3/QSJJkiRJWh/4vzEJfZ966qny+OOPlxEjRpTtt9++s1canB5++OHy4IMPlq233rpstdVWNQD2uxZJkiRp4Ov3ATB/ec3/Id4dBPt/kGhzkj94yFIabPxvXIOd/41rsPO/cQ10/LcLAmD+Gx46dKjhrzYbhMArVqyo/w4cAazBxv+NosHM/741mPnft/T8bTF79ux+HQBLkiRJkiRJkiRJktbOqj/tlCRJkiRJkiRJkiQNeAbAkiRJkiRJkiRJkjRIGABLkiRJkiRJkiRJ0iDhO4D1vD3++OO1PPXUU/Wl7JIkSZIkSZIkqf/YYostylZbbVW23nrrWiQNbo4A1vNC8LtixYry5JNPGv5KkiRJkiRJktQP8f093+PzfT7f60sa3AyA9bw89thjnTVJkiRJkiRJktTf+b2+NPgZAOt5Ydrn/s6RyZIkSZIkSZKkjaW/fyc9EL7Xl1pMWz569Kgyffq0Mn3atLq+9dYv7OxVb3wHsJ6XpUuXdtYkSZIkSZIkSdJAMHLkyM6a1H/tv9++5dCZh5QpUyaXYcOGlRe+8IWFUPPJJ54oy5YtK3Pn3l5+euVV5ZfXzVl1gHoYAOt5MQCWJEmSJEmSJGlgMQBWfzZhwvjyute8uuyzz95lyy23KI8+8lh54sknekbXb7HFFuUFL3hBGTJk2/Lrp35Trrt+TvniF79c5t+zoO6XAbCeJwNgSZIkSZIkSZIGFgNg9VdHzj6i/N7vvr5ss8025eGHH651W221VQ182wD4ySef7JnOnNHBjzzySPnMZz5Xrrjyqlq3uduoAfBLX/rS8q1vfauzNTCMHz++zJs3r7PV/+yyyy7lzDPPLAceeGDZY489at2CBQvKZZddVr761a+We++9t9Z9+9vfLi95yUvq+vq0PgNg/oH+xV/8RTn++OPL8OHDywUXXFD+/u//vjz00EOdFpIkSZIkSZIkbV4OPvjgurzmmmvqcn3Y0AHw42NGlgcPnVaWTx1bnhy5Xa17wdLlZbtbF5QRV95ctl64YQeXTZo0qdxxxx2drQ3vDW94Q11+5jOfqcv+aPvtty8HHHBAOfroo2umNHbs2HLppZeWD3/4wz1B66Z2+GGHlne87X+Uh5cvL08+9VTZaostywte+IKyYvmKcve8u8uyB5fVdiNGjqj53dChQ8sTTzxRfv3rX9fpobfddtvyfz76sXLtz35e223OtuwsNyj+o/rsZz9bDjrooE7NwLHTTqPLMcccXf/D6W/+6I/+qPzTP/1TXf/Qhz5UjjzyyFryj5V9tAE/g/6OeyD4Peuss8pJJ51U188+++zOXkmSJEmSJEmSNi/jxo0r06dPr4X1/u43W21ZFr3ssHLHO08vD8zeqwbBv97mhbWwTh37aEPbwYAw9eSTT66F9f6EQYTkRGR0559/fjnllFPKrbfeWt7xjnfUQYOMpE14valNnLhbedMbz6rhLyN7t1x5bVu9cKs60JGg+vo515f58+fXcv1115fLLr2sLFq4qI4M3nLLLeuI4EcffbS85c1vKmPH7tLpdfO1wf91ETx+7GMfq/9BEfANND/72c/ri6RnzTqiX4XAn/vc5+qI2Ve96lXlH//xH+vzjZ/97Gf1r0x+//d/v4bu7B8IGPl77rnnlptuuqncc8895eMf/3itW98+eOGcMufCD3a2BrPXlc9ePqdc/tnXPWN7w9579zklSZIkSZIkSc8Vo3+vvvrqWqZNm9ap7Z8IdOe/4cSy9Ig9maO3U9uLlftoQ9vBEAK/8Y1vrJkMhRC4v2BWXrIkBgySzxH4vuc97+mZPZb6j370o3VE8KZGEP36176mbL311qvC3y23rNM+P/TgQ+Waq68pjz/+eB3dS05H2WbbbWrYy7+Lhx96uLblGEYDb7fd0PK617660/Pma4P+y5o6dWoNf5mOeCCGv0EIzD+GWbNmlSFDhnRqNx3+WoO/dljTM+Uf79vf/vayfPnyTk3/R/AbBMGE3JuzUUe9przltF4+1EcdVV7zltNK//64lyRJkiRJkiQ9VzvuuGP9jvzmm2+uhWmN+/Nsn/edOrOsmPL0yMvx22xf/mK3g8u/zzi5/Ns+Lyl/PH7/MvIF23T2ltqWYwYyXs3JKNuLLrqolqOOOqpOrdwfEEYzUJBguh1E2MprRLmHTWn//fYte+21Z821CHJ51y/L2+beVsNfRvkyzTP1lEz5/Nhjj5W5t8+tATD1LJcte6jst+++Zc899+z0vnnaYAFwwl/+kqA/z3m+tm666eZy2223lWOOOaZOTbyp8I+Qv8bgua4O7/z90Y9+VJf8wuEfTX80c+bM8p3vfKfMmTOnbrNsS+posyFGA/fpgxeuPO+F5ZljZTf26NZpZdbepdxw+c2d7adNW7WjPHvP6nyxnDVrRplx+jmdbUmSJEmSJElSf8RIyEMOOaSOcIzrrruuDlRjX3/D9M5LD5/e2SrlxB12K5cc+Ioybpvtyr8tvLl8ZeEt5bDhu5RLD3xl2WvoDp1WpR7DsQMRYTzTJ7cZGNlNf5lSmXB6bbIhZppNELypzJ51RCm/+c3KtS1qkMuIYKZ0fnDpgzXUJfClri3UERIvfWBpbUsdx4JjZh9xWF3fXG2QADjhL1MPf+tb3+rUDnzz5s2rYSS/YDdVCMxUAvwVyZr+0TKUP+8EprDdHzGK+ZxzzikzZsxYbaHN2972ts5Rm4lp08u4h+4uNyzpbPeYVqaPe6jc/ewdkiRJkiRJkqQBiNG9J510Ujn11FPLW97ylvKa17ym1t9xxx11CQJgsI82tOUYjt3UHjx0Ws+0z7tus135x2nHlHfdcmn545Xla/fdWv5z8W3lNTd8p3x54U3lY9OOre2qlcfUYwcABtv93d/9Xc2/GIDHO3XBjK1BAEzwyv62MFhvY+N1od3vJCa/+8u//MtnXV9bPvzhD9d2G8s222xTJk6cWB597LH8J9QT8FJWJ+0S/II6Rgbvvvuk8oKtturUbn62mD179tNPZT1gTnFCyrPPPrvPIeUD3ahRo1b+ozmgzJlz/Ub/qwh+sTAne395tkuXLu2sPTcE6gS8a2Nd2q4O7wA+o1xQzl1yfDl7ZoL8ueWCGacXxsbW/ZNX1a6yct+5S8rxZ88sbey/7Kpzy6yzvtjT34xf7lfm9By4rFx17qyycncHo4fPLj2n69k/rZz2luPKsBvOL1++rA10R5WjXvOqMvLq88o3uob5Mi30q0ZeXc7Ljtd9tlzeXtuyq8q5s84qX+ycc/pNq64z1/D0Nrqva+XdXjCjZJDw6z57efOMnrmv6jr3sqsuKDdNP6OXc66qp6un+/hguXDOGeXpR937M6OvT5S39vqz6t3q76mO7m5/wD3Pq6PuL+WCC0o5I+06bVZ2/PR1dB8nSZIkSZIkSc8RU/US8C5YsOAZr0pcnXHjxtXgbPr06fVdr+ti5Mj1O+r2jne9rGck79m7HVymDh1Z3nTj9+t2N6aBXvrkY52tUrZeuLRM+t9f72ytH4TibXi+PhCOMtqXYJXS35155pl1VlkGawYB9i233LLamXsZwcw01hvr1a67rvzv+Jy/Ortn6udq5WKrF2xVrvjJFWXhwoV1uueMDK67O+u885drPfSwQ58xCpgRwEwd/d73/W25//4H6jEbC8+cQZztQE5Gi1O6M0WC9g2V9633EcCEv9wcv2y6/2pgTWVT/AUEI2NPP/20dSqzZ88qQ4cOLTNm7NPpZePhrzWe638M+WsUrTT5jPLW8onOCONzy1XLJpczLlw14fM5p6+su2DuyjWCRvafXs754lllVm1H7nduPe7pEHWllf3N2e+Xnf5mlHOvKmXm2ZeXzBb9wQvPLjMXX9DsX7xqR19G7V0mDLun3PSsOZ5Hlb0nDCv39Oz4YLnw7Jll8QWr+q33soaun0YA+8zrWnXfq9Twd/pN5dzsW3lTo8+YUzqPqSf8ffrcM8olo1aFvN2Gzzy+lE+salODWI6dc0YZ3XmWtVyweOUza/rvGD7z7OZnNaNcMHflz+pZ03PHmu9pzhmjy1XndvbR3+KZ5exn9bfyHD0/zwvK3OG0mVPOHnXJM+reutGmA5ckSZIkSZI0mF1++eVl3333LY8++minZs0IigmMOXZTe3Lkdp21Ug4ePqZ8c3Hf4Wsb/qI9tj9jZCyhan995Wa3a6+9tr5StMUo5u7wtzs7Yj+DPTeWESOGP+Mdv/jNyv9HmDthwoSeUcAJf9EGwRN2m/CMfeAYQmMGdG5MPDeeJ4M5g8yU/JP69h3ebJOl8jPZENZ7APzP//zPNcF+xzve8YwpiNembIppinnoF174jXUqP/7x5WXFihV1BPCm8Fxe8s4x/Ec2EPBe4A3+vt9lV5VP9AS4XyxnXTK3lMn79REqrgVGgzZDY7941ifKVcuGl+knEBC+ruw6emWTJTeu2rnSF886vTPS9ebyjfPO6xr9u5p3/E6bVfYuN5Se1wK/btcyuiwrT3e98l5OX7tRqa/77PFlctd1l3NO7xmde+bMUq76RNPXF88qqx7Tqqf0wZUNhs+94Bkjgs85fVVI/ixzL2lG9j597DNC9JXnJqtN/z262p1z+gVlbplcupthzfc0fGV37SjjvvqbWy7o6eOc8tV6U8+uGz79hJU/XUmSJEmSJEl6fm6++eYa5J5++ullxx137NQ+28EHH1xe+cpX1imgWRJwcWx/svSJR8sLugI5ULPlyv+fMhDxek5G0xLuMc1zX8id+sMAyAwmXFM2tKmzo6eeeuoZo3tZUhjdO27XcWXK1Cn1DyOefOrJ2o7CaF+meebnwAjgJ59YNfq3tyB4U+i+jk1hvQfAvPOX6Z8ZRr4x/0JgYxk/fnwNKK+88qpN8lJs/sGu7hdLXzimv0wbvSaf/exn639DG9Ti+et36t5n9ffFMn9xKcNH7VnXCZgZyTqnz5Grrb7f8Ttt+rjy0N03lJ49NZQdXkfOzukeOrsGe44aXpbd9P3en0MNljv9znm6PD1z8qpQe+4vm6B1Ndrwe3XHnvPLuaWM3vUZoeozj8WNZckymj07el3zPc0tzz7tOWXVadcQ5S5bsvLMkiRJkiRJkrRhrCkEftWrXlUOOeSQMnr06LrNkm2C4E3tBUuXd9ZK+fnDi8srd37mO2SJw3512OvLvUe+qZazdtlr1Y6V2mP7uzYEJnjsDYMd+8MASPT2HuD+ZumDD/ZM30xJGEwhwN13xoxy4EEHlhEjRtSRwozsZQpz6vbeZ68aIPMfWPex9PnAAxt3+mcyUv6dvv3tb+/UlJol8vOnvh09zjZTv1922WWdmvVrvQfAIGjk5v74j/94UIXAhL9M/8ov4GXLehvmuOExZP+5/GNlCDnHDgRnnXVWOffccztbg8Q5p9f/ds69anQ5o4apfQfBo446pIy75+rSNSiYHeWQcfeUq7t21Cmrmfp59Bmrgtp1DIL7limwu0o7ulaSJEmSJEmStN4QAjO1c0LeYORvO50tA6mCtuzflLa7dUFnrZR/XnB9mTxkRHnn+AN6Rvsyse/h13y1/Nfi2+t2qz12ICAEHgjBKsiFesvp2tHJ7famGKl8/5IHyoMPPvis9/zGVi94QZk2bVo54vDDy8GHHLzyv/WDyuFHHF4HPm655VadVqskBKavpUuXlmUPbvwsj8C3e5pwtnsbVLohB25ukAAYXDTJ9W//9m/XQG+g22effcqUKVPKf//3f2+y8Bdf/epX6xzz6zINNMP3+QfOsQPBVVddVS655JLO1kD1wbLf5GePcv3iWbNKfX9sH1MYP/sdv08btfeEMuyem549LXT1xXLWrBmr3ne7llNZ37hkNVMYf3F+WdznNT7tWdM1lz3LqF7eAfxMq0ZHP/vYlU+Nh7am0dmvO6FMH76s3PT9Z7d6bve06me1eP5qzypJkiRJkiRJG8U222zzrABp991376yt8vjjj3fWVpk0aVJnbdMYceXNvJC1rj/666fKa67/djl19O7lp4ecWT64+xHlI1Nml+8f8PLy6/Kb8p0ld9Z21cpj6rEDzLBhw+r7l3vTX6aABiNSCUXbPxhAOzq53d4UI5WfePKJcvsdd9b/7glvKYz83WqrVeHuHXfcUS679LJy6cpy7TXXlmtWlkv/+9Lyo8suK3feueq/JdpyTI7fdttty+2331GeZHTwZmqDBcAgzWYk8DHHHFP+8i//slM78Bx44AH1L2gY+fvII490ajcNnil/XbIuz/Ov/uqv6j/y3v66QH24cUlZVkaXZ84K3E7r3GXyGeXyzz7d+IMXnlEm90w3/Lry2Qs/23soWaaV097ylvKaozp/udX9jt8e08qq1wJ37XjdZ8uFzXnXxRfPuqTMHT6znN2OGP7ghWXV5qppkSef8cyRyq/77IVl1eny3uQzOu1XWXXfa3bOV68qy7qeGec+Y/KyctVXnxmaM3X20+dY+SzfOrMMX3ZTWZX/frBcOGdOTz9ruife2zv5jMs797BKveZlV5Wu00qSJEmSJEnSJjFu3Lg6CriV0b8Eeeedd15dZ5lgr3vE8Ma29cKlZeQVN3W2Srn1kQfLS35xQfnzW39cljz5aN3+gxu/X960svzbopvL5ctW5RUcw7EDzQEHHFBHAcfJJ5/c8/rO/jQFNN7xjneUD33oQ52t/uknP7mi5w8IMoKX6Zt//KMf19B34cKF9T3A/OEDhfV7711Y99GGthlBDKaFvuLKq+r65mqDBsDgr1QIgRmePRBDYMLf4cNH1PCXF073B8wvn7/YWN1IYEb+0pafAcv+iA8x3qm8JrS56aanPzw2uPbdus10zQkumWr5GeHl3AvKJaN4x2/elcv0yaeXnkxx9MxydmffnDlnlJU7S28zKT/rHb8xbXoZ99DdpZfXAq/s+unzruq6Oe9qnVNOZzRy535yfAYtM7X0BXMnd6asXlXOHvXLclYGyjKt9QVzy+Qznt6/3y/PLVetzQD5lc931rkrf/k+69pnPd1/x7KrLihLjs85zi4zy1Xl3Fln9TFKePX3xAjsVadNfyt/VuWCMqPP/iRJkiRJkiRp4+E7/4ceeqiuEwS/+tWvLltvvXVZsmTVl8PMuPqWt7ylrrPMDKzZvynt9F9XlaG3PT0Q7cnf/Kb8YOm88r/v/ln51D1zyrUP31frv7nkjnLTigdqW44ZaHj3bwbcMQ301772tRoA92cbcqrh9eG6OdeX63/1q7L9dtvV0byLFi0qP7n8J3UaZ0YGE+5uueWWzyjUsY/wl7Ycw7GMzv7lL68rc1b2uTnbYvbs2avi8I2AaYgZiTqQ8N7fefPmdbb6Dz4E3vCGN9RfKowIZh73n//853Xf1KlT6y8dpormeX/mM5951nQR6wv/+J6P448/vrztbW8r06dP79T0jqCY9wL3x6mhP3hhJ0R8vu/GHXVUec2rRparz/tG1zTPo8pRr3lVGXn1eeUbA28mjOfhdeWzl59dpt90bpnVnQpLkiRJkiRJ0iBE6MusqoTAw4cPLz/84Q/r9+O84/eQQw7ptFo1Erh9/ebVV19drrnmms7Wmo0cObKztn79Zqsty32nzixLD5/OC1k7tV1+85s68pfwd4unft2pXL+YEpupgzcE8hcGPBICEzZ+9KMffcZo4P6MGWP576l7SugW2ROjmd/znvd0ajaOCePHl/f9zTn1v/0f/uC/y2+YBvoFq6Z2Rve7gTPalzD4qSefKlusXB573DH1Z/L+v/1Quevuu+v+zdVGDYC1/hH2EgIfdNBBdR38JQeBMMHwhv6rjucbAA8G6ysAnnbaW8px5QflvO6Ud9pp5S3HlfKDZwXDg50BsCRJkiRJkqTNC6N9Z82aVUO6m29+5jfCr3rVq3qmgm4x+vf888/vbK2dDRUAx+NjRpYHD51Wlk8dW54cuV2te8HS5WW7WxfUd/5u6GmfN2QAzAC9P/7jP66hLznMQEKO9MY3vrEcddRRnZpnY2DhV77ylU0yaviIww8rMw8+qMydO7e84AUvqHUEvwl7EwK321l/8skny+TJk8tPr76m/PSnV9a6zZkBsJ4XA+D1OAJYXQyAJUmSJEmSJKnFSODdd9+9BsEEv7fffvs6jfyNDR0AS8/V+F3HlcNmHlIDYN71W8oWqxtMzv9ftt122xoA//Sqq8u8+c98f/bmaoO/A1iSJEmSJEmSJEnPH2Evo33PO++8unwu4a/UnxHgfu+SH5aFixaVIUOGlKFDh9QwuJ0CmnXq2Ecb2nKM4e/THAGs58URwJIkSZIkSZIkDSyOANZAMHbMmLLbbuPLDiN3KNtss3XZaqutav1TTz1VHnvs8fLA0gfKXXfNKwsWLqz1epoBsJ6XgRAAM/97+5chkiRJkiRJkiRtKAPhO2kDYA0kBL/bbL112W67Ve+TXr58eXns8cdrEKzeOQW0npf8tUV/ZvgrSZIkSZIkSdpY+vt30gPhe32pRdC74pFHyn2LF9fCuuHv6hkA63nZZpttOmuSJEmSJEmSJKm/83t9afAzANbzsvXWW5ehQ4c+6wXckiRJkiRJkiSpf+D7e77H5/t8vteXNLj5DmBJkiRJkiRJkiRJGiQcASxJkiRJkiRJkiRJg4QBsCRJkiRJkiRJkiQNEgbAkiRJkiRJkiRJkjRIbPHa177WdwBLkiRJkiRJkiRJ0iCwxcpiACxJkiRJkiRJ0iDy5je/uXzqU5/qbEmD01//9V+Xk046qQwZMqRTs2aPPPJIufjii8sHPvCBTo00+BgAS5IkSZIkSZI0yBgAa3Nw2WWXlX333beMGDGiU7NmDz74YLnuuuvKUUcd1amRBh8DYEmSJEmSJEmSBhkC4Jtvvrm87vfeWIZvt02nVhocfvOb39QyderUcvDBB3dq194111xTbr311rLFFlvUIg0Wy5Y/Vr74L/9sACxJkiRJkiRJ0mBDAHzoEUeVv7tucrlt2badWmmw+E0pv/51ufqtWz7nAPiQT/y6lC23XLllAKzBY8rwR8t79p1b+C9bkiRJkiRJkiQNMoz8NfzVYPfUU0+tc5EGK37n87vfAFiSJEmSJEmSJEkD0pNPPllD3XVZSoOdAbAkSZIkSZIkSZIGpLUNfdulNNgZAEuSJEmSJEmSNAgtW/5YfR+kNJitbejbLqXBit/5/O7nzda/WVUlSZIkSZIkSZIGgze/+c3l5ptvLq/7vTfW90FKg8lvfvOb8utf/7rsscceZcqUKXV7iy22WOvlbbfdVm655Zay5ZZb1m1psCD8/eK//LMBsCRJkiRJkiRJgw0B8Kc+9anOljQ4XX311WXixImdrdIT8kZf23feeWc55JBDOrXS4LPRp4AeNmxY/WuMadOmPauMHz++vOAFL+i0lCRJkiRJkiSp/yJMmjx5cvnIRz6yxjCJ78bPOeecctppp5Wtt966Uyvp+XjkkUfKY489Vqd2bqd5Xt027TlOGsw26ghgPtRe/epXlze96U3PCnoZZs9fXPzt3/5tmTNnTqdWkiRJkiRJkqT+Z+TIkeWYY46p33kPHTq01v3Lv/xL+cY3vlEef/zxuh3Tp08v7373u8uYMWPqtLWXXnppufDCC8utt97aabH+OQJYm4MXv/jF5R3veEfZZpu1n+acAPhjH/tY+c53vtOpkQafjRoAv/CFLyznn39+mT9/frngggvKVltt1dmzyoc//OH6F1Df/OY3OzWSJEmSJEmSJPUvM2fOLGeccUbZd99963beK8oIw29/+9vlC1/4QlmxYkXdR0j8+7//+2XUqFE97bBo0aLy3e9+twbBG2I0ogGwJG2+1ikA5oNpu+2262ytGz7YGPX7uc99rv4F1Gc+85nOnqcR/LL/v/7rv57zi7cZvv/oo492tiRJkiRJkiRJWj+Y5fKss84qRx11VBk+fHjPu0X5LrsNd6+//vryD//wD+WEE04oL3/5y+v36u3+rBMY33zzzeXTn/70eh8NbAAsSZuvtQ6A+YB62cteVt75znfWD6Z8UK0JH2T5EGRqi7322qu8613v6jUA5q+dpk6dWu69996e0cEEwetiyZIl5U//9E+dRlqSJEmSJEmStN4wgveDH/xg2XXXXXu+826/J08dli9fXqeYPfHEE+u7gftqn22+O/+nf/qn8r3vfa9urw8GwJK0+VrrAHjSpEl1+ubbb7+9fO1rX1vrYJZ2/BXT4489Vp5cueQdv+edd1755Cc/2WnxtEsuuaQGtwTBjBYeOmRIeWrlB9+6+D//5/+Ut771reU//uM/OjWSJEmSJEmSJD0/fNf9qle9qpZtt922J8Btg1xmp/zpT39ag9eHHnqo1r3kJS+px+y88851u/s4ltddd1353//7f9cBTuuLAbAkbb7WOgDebbfdagDMuwv4y6V18ZGPfKRMmDChvgP49NNPryOAP/7xj3f2Po0Pxh133LEuh6z8AP3KV75Svvq1r3X2rp1rrrmmhsxf//rXOzWSJEmSJEmSJK0fRx99dPnd3/3dGui24e+vfvWr+j7fH//4x3W7NW7cuBoCz549uwwZMqTWcSwjf7/zne+Uz3/+8z3vDF5fDIAlafO1TvMr84GUD6d1cemll5bf+Z3fKU888UR54xvfWEf49ub9739/nULj2GOPLQcfcki57Ec/6uxZe4wcliRJkiRJkiRpQ+D77r/7u7+ro3YJf5ctW1ZfeXjuuef2Gv7innvuqQOr/v7v/76+H5jv2pk58//+3/9b3/+7vsNfSc/0gQ98oM4c+9rXvrZTIw1u6/aC3efov/7rv8of/MEflP3337/+BRQvte/NRRddVKe4ICg++eSTy4IFCzp7JEmSJEmSJEnqH2677bby4Q9/uPzrv/5rec973lMuuOCC8sADD3T29o7Q99prry1/8zd/U/7zP/+zzpR58cUX1yBY0oaz3377lTFjxpTFixeXPffcs1MrDW4bPABmRO6BBx5Yhg8fXnbaaaf6QbjVVlt19j7TjBkzyje+8Y3a5oYbbujUSpIkSZIkSZLUvxx22GHlzDPPrFM78/332uA1iUceeWQdAPWmN72pDB06tLNH0oZyzDHH1H97t9xyS5k2bVo57rjjOnukwWuDBcDbb799+cM//MM6qpepL3i37yte8YoyZcqU+jL7bnxAfvWrX61D8Hfdddfyve99rxx++OGdvZIkSZIkSZIkbXrbbbddDW/f+c531sFOvNKQ6Z2POuqoPgNdpoqePHlyefe7312P22abbeqMmf/wD/9Qpk6d2mklaUNg1O+8efPK1VdfXbcZjCgNdhskAD7ggAPK17/+9fKiF72ofPKTn6wfakz9/Mgjj5Tf//3fL6977WufNc867zlY9uCD9cPvj/7oj8oXvvCF+k7g17zmNZ0WkiRJkiRJkiRtOgxeYurm008/vU7pTMEOO+xQ/uzP/qzu22effWpdsO/Vr351fQfpEUcc0aldNSU0/f3FX/xFDZElrX+M9h0xYkS58cYbyw9+8IP6ilKngdbmYL0HwAcddFD57Gc/W/7t3/6tBrhPPvlk/RBjRDAB8OjRo8t//Od/lk996lM9H4SEvowO/umVV5Y3v/nNZebMmXUq6N/7vd+rIfJv/dZv1XaSJEmSJEmSJG0KBLnnnHNOOfTQQ+s2o3pTEgQzLfRf/uVflj/4gz+oo4Fnz55dvyf/7d/+7TJs2LCedjkOvJv0j//4j8tJJ51UtyWtP4z2ffDBB8uXvvSluk0QTCDsNNAa7NZrADx27Njy8Y9/vJbLLrusjBs3rn6IHXLIIXXa55e//OXl9a9/fRk5cmS555576qhf3o/AB+D5//7vZZdddikHH3xwnQqD4JdpMJgC44wzzih77bVX5yySJEmSJEmSJG1cS5curYOfWCJhLhICUwh6TzvttPpd9ytf+coyadKkTqtV7YK2OY5XKf73f/93Z4+k9WG//fargxYnTJhQZ6ml8McYvJL0+OOP77SSBqf1FgBvueWWNazlH9AVV1xRxo8fX/8i6nd+53fqX1Lcdttt9Z2+TzzxRNl6663LBRdcUHbaaafyta99rVx++eVlwT33lPvuu6/ceuut9S+o+Gund7zjHfUD8Lvf/W55+9vf3jnThsd18z5iPnhZByOT2eZaJEmSJEmSJEmbF74fvvLKK+to3WuuuaZ+1516Ct9lUxht+KEPfahMnz69/Ou//mv52c9+VtshbSlYtGhROffcc+tgqccee6zWSVo/9t133/LCF76wDlBk2vaUX/ziFzXDIiCWBqv1FgDzF02jRo2qgS4jfF/60pfWIPepp56qI3kff/zxGvgOGTKkrvNi+2uvvbb8+Ec/KgsXLiw777xzeeihh8r9999fbrjhhvrBx3EMxb/kkkvqcUyXsTEwArl72ukEwQ888EBdSpIkSZIkSZI2P3xHzKyWhLZ33313T/CLuXPn1lHCJ598cg2aeCfwT3/60zqwKCExVqxYUS6++OL63uCf/OQntU7S+sW7fvmDDN7922IaaKZoJyCWBqv1EgAz+pf39X7lK1+p01/wD+eYY46pU13wD+mOO+6o00Hfcsst9a8tpkyZUvbff/869H6bbbctu+++e/1riz322KNMmzattiX85UOQ0cP0yYfpWWed1Tnjc0Owe9555z3jr6yuvvrqOrp3Tf7+7/++fjifeeaZnZqNiwC6ve7uwn3wl2JMKyJJkiRJkiRJ2rC+853v1O9kv/nNb9ZtZsZkhsxTTz21HHjggbVu++23r9+dM/iJ77gZMHXzzTeXf/zHf6yvUnTAkbRhkC2RN5FRdbvuuutq/jRr1qxOjTT4rJcAmOCWEJi/YspIWUJf/rqCKaAZ3bvtttvWdwEzpJ7Al3B4q5XHUD9x4sQaCtMPeJcwoebixYtrf4wc5sNzq6226mmzrgh/CUlZcq78VRYjlgmF+aAeKM4+++ye608ozV+W/cVf/EW9R+5HkiRJkiRJkrRhjRkzpoa6jPTlu9k3vOENdfbL4HtuZrd8+ctfXkPhP/3TP62vR9xrr73KC17wgk4rSesbo3553+/HPvaxTs3TfvnLX9YBh29729s6NdLgs14CYP6Sgr+YWL58eX13Lx9mTOX8yCOPlPnz59ewl1CXqaF5L8JVV11V/u///b/lzW95S3n9619f3vSmN5X3vOc99a+h6GvevHn1g5Fw+Ne//nUNjdn+/ve/X0444YTOWdcNAS9h8ltWnpOwNBjZS6BKeDpQR88SYhMCc29gRDPvMJYkSZIkSZIkbTiEv6ecckr5kz/5k/L2t7+9fhfOd9nBAJ4sZ86cWf76r/+6vPOd76yvPmzbSZK0Pq2XAPjII4+s7yk4/vjj67uAGbHL+4AJhPnAY4QvH2jU86HGyF5CX168zTTQvO/34YcfrvsIkh999NEaFjNdNFNg3HffffU8P/zhD8vo0aPr+rpKuJsRyi1C4E996lN1dHBfeCcwI5x7C1bpO+9wSGEUbm/9pZ+0u+2222r4vD5wD9wLOE/bL399xvmoY1+2uZYW98I95voobHeH4+2U1OzjXtv7Yr37GEmSJEmSJEkaTH72s5/VQJfXGDIgKoEv35FmSaGe5eOPP17+5V/+pX43TngsSdKG8LwDYN7VO3z48Dril2mgWedDjBCXUb8EwHzwMfKXwnsRDjjggBoGz5gxo/zVX/1VLe9+97vLrbfeWt8TTFhM8Et/119/fR0xzDpBMkHxc3HNNdfUZV/BLKNnGUnbF8LMF73oRZ2tpxGoEnZ+73vfqx/iFNYZhUt9GzjTljCV89COqai5T0YndwexzxUBcN4b0du7jRPwJpxtnwXXRzCM3AvPhbbUt4Fyi/2c68QTT6zHEETzrLin1YXqkiRJkiRJkjTQ3XnnnfU7br4X/dWvftXz3WqL7895V/D73ve+8l//9V+dWkmSNoznHQAzSpdpmhnty18s8Z5e3vHLuw/Yxwcdwe5FF11U/uzP/qxce+21ZejQofX9Bk8++WQNCBlBzEvw77rrrrLddtvVAPOxxx6r00fzV1H0SyhMmMxfUj0XTPNMv4SVjLpdH+/8ZSQt/RC6ZuQt+KAH95YQlvOmbfYzFTWhKddFYJqAldCYa6T0NmJ5degrYTfn7w5guWami87/CCGEBtfJ9XEs+4NrzdTS7O8tVOYaeb5Be/qh3vcRS5IkSZIkSdoc8ArDj3zkI+ULX/hCHSAFvoO9/PLLa/2nP/3psmDBglovSdKG9LwDYEb8Ughwjz322PrBxkhfQtt777233H333eWKK66ogSDh70te8pKy22671WMffPDBWkc4zMhfQl/+Euqee+6pU0HfdNNNtc0RRxxRRxPfcccddbpopppeV4SthxxySB2dC8JWztXXqNa1QSBK4NqGv+AcnI+SUcU5T/co4zaw7S1cfS7SH7oDYK61t5HOub6E0y3up31u3bKvlXMQbHdfgyRJkiRJkiQNRkuWLCnnn39++aM/+qP6vfhHP/rR8r/+1/8qN9xwQ8+00JIkbWjPOwDOaF6mZmbJ1M4Et0wDvccee5R58+aV/fbbr47+fetb31rGjx/fOXLVh+H//J//s4bEhMZMk/GBD3ygXHrppXU77w0+8MADy6mnnlrfG8wIY/o46qijOr2svYy4ZYQroSUjVAlxGWmbKZHXVm+ja4NQl5G1FM6JTB+dd++2Jfvoj2tqj2d9Xa1u1HBv/bX30obHrYS8q7vvVu4ba9NekiRJkiRJkgYDvvNlcNSHP/zhOiqYmTAlSdqYnncATKC49dZb19CRkbsrVqyoI3zHjRtXp7YgzN1rr73K9ttvX0477bS6n3AY06ZNq/tGjhxZ+1m2bFld8s5gpoJm/z777FPDZcJgRggzXTR/LUXYzDuBCYTXFaNTEwQTeBJQEswmiF0b6xIY82wSyhLqZvrl3spzCXy7tYFrG8T2pb2XvsLj9rpWFzDH2pxXkiRJkiRJkiRJ0vr1vANgRvHy/t999923Tud833331ZCXYJZ3+p5wwgl1hO8222xTbr/99jrlBftjl112KRdffHH54Ac/WLcJfgmCmfqZ0Jew8bDDDqsjgfmrqTlz5tRRxUwZTchM+PxcEQQzLXRGva7LdNAJRLk+3qvbG4LVr371q52tVVYXMtPP8x0ty/Uk0OW+1iaIXZvRurlfln2NEm61IbFhsCRJkiRJkiRJkrRxPO8AGFdeeWV5zWteU9/le9lll5Vf//rX9f0GvMeX0cCM8iXUBYErgTEIhGlDCHzooYeWU045pRx++OHlpS99aT2GkcIEykyZwTH0wUhdgmCmheY9w4wKXhPCyPPOO6/PgPfss8+uy3UJX5kSOaFoX/0yvTQhcxua9vWeX86ddwo/H/Sf8LX73cR9aYPivgLqPJve3vfbm4TQ9GsALEmSJEmSJEmSJG0c6yUA/u53v1uDW0YB/93f/V35xCc+UYPbSZMm1YD2oYceqqN+t9122zJ69Oi6ff/995f3vve9NXxkhPAb3vCGGqS+6lWvqiExfTEamBD45z//eQ0eeVcvUz/PmDGjjjz+xS9+UcPmNSFUJRRtw9HWuoab8alPfaouCTt5Bgk9OQcjf1kSACNhbHdbcN8E27R5PgEwI4gJkUFfOffayL3QR28hcILr3kLl9l4io6LTryRJkiRJkiRJkqQNb70EwISXjM59y1veUkaNGlXf/fuv//qvZddddy033XRTHRlMiMuU0IS6hLa33nprDYF57+3UqVNrWMoL8QmQr7rqqnLhhReWb3zjG3V6aEYYM+UzoTLBL8Eyx63N6N/g2kD42o7CZZ0AllGqGQm8tmif0JjQlOfAaGXui1CUdwwHYWzC07YthdCWfQlLeRaE3ZTeAutunIvAOdNNc13rei+cP9dHP3lGnJ9nRkiedyZ34/rbZ5qAu71nSZIkSZIkSZIkSRveegmACX8JbAkI/+iP/qj83u/9Xn1n73e+853yk5/8pIakhMBM2fzEE0/U9/bSnvcEH3nkkeWRRx4pd9xxR7ngggvq6F6O5T2/TBFN36w/+eSTtWy//fZ1lOywYcNqWVscM2XKlHotBJZt+EpQyb7nMvr2xBNPrGFrO80x/TGKuXvqY9rxjNqRxqxTty5BaXv9FMJkAlr62HHHHZ9z6JrrI+Rlymz6JszmPng+3FdvOB8BcK6H0Dh9SZIkSZIkSZIkSdp4tlhZfrNqdfV4ly8B4L//+7+Xj3zkI53ap/Ee3x/96Ed1VO43v/nNGgIuWLCgjtq94YYbav1hhx1Wrr/++joal+CT0bxDhgzpmSJ6/vz55RWveEUd7XvnnXfWoJh+CYgXLlxY2/L+X9rvvvvuZc6cOWXx4sWdK1iFaaGZWvrrX/96p0YbAj9fwmEQdvc2MliSJEmSJEmStGkwaMdX9EnS5mm9BcB45StfWc4///w62pcw+KUvfWmtZ/pnpnwmvL3xxhvLG9/4xnLzzTfXtkwZvfPOO9d9J598clm0aFENgMePH1/fGcwoYcLjLBlhesUVV9R3ARP+Ehy3DIA3DgNgSZIkSZIkSeq/DID7N2ZC3XLLLXsKr47k9ZfbbLNNHQzHkhyEbCRtwEyrYMbUzLpKefzxx2te8rnPfa6+hjPlwQcfLI8++mg9RtLmY70GwPwC+o//+I9y0kknlXPOOacceuih5eijj64BMKOACYF5v+/LXvay8tOf/rS+23fs2LFlr732Kj/72c/qe2N57y2BML+8+AXHLyZ+iVH4RccvLALikSNH1tCYEcYtA+CNwwBYkiRJkiRJkvovA+D+59hjj63frRPwkp9st912PWEvM6IS+ILlFlts0VMiITB1CXjzakbWyVVYZzAd2QrLX/3qVzVLYabV5cuX11d3Shr81msADALfiy++uHz+85+voSDv+GXE7uWXX15/8fBL7JhjjqlteL/viBEjyvDhw2uou++++5YXvvCFdWQvv/QIffPLLb+8eC8w7Wl300039fy1SxgAbxwGwJIkSZIkSZLUfxkA9w8vf/nLy8yZM+ugNgqhLzkJGQdBL9lHJNRtpY7wl/YU1hP0pn3q2sAYbFNPvkIIzIhgvtu/++67y0c/+tFOK0mDzXoPgPFP//RPdfoC3vPLFM9z586tv9CYfoD3AhP0MkU0f23CLzt+4TCSl794efjhh2uom9G/Q4cOrb+4+CXGFAZMHc21LF26tL4XmF9cLQNgSZIkSZIkSdLmzgB40zjooIPq6y15RSavumQQXF53memcI7kHS7KOLAltE+SSl1DHsYTGZC3kJ+x/6KGHaqiL5ChIbkJdQmMKfbCPPpk2moxmyZIldZTwT37ykzqDq6TBYauV5X2rVlePX1Jnnnlm/UXw4x//uFPbO/6SZNddd63TPvPLiPf6EvTyy4d1fskQ+E6fPr3+crnzzjvrqN5hw4bVUJfjacsvMX4ZMVKYJaEvgTK/lNjPL6juEcD/43/8j/KDH/ygjg6WJEmSJEmSJGlzxCsXnblx43njG99YXvziF5dTTjmlzoI6ceLEsuOOO9ZBboSvINcg/yDXoDCCl5Ksg/0pbNOWkkA34W32M4iOY7vbZ516jmGZdmyT0ZC/kMuQuUyePLnO9Ln33nvX3GbOnDm1D0kD19N/arIe3XXXXeWOO+6oYS9zyxPq8kHz7W9/u/6SYYpnQl/eA8yoYH7p8JcohMGsg79aYR+FX0hs88uMJUEwv7y6pzKQJEmSJEmSJEna0AhPGeF76qmn1llRTzzxxPqKTOqY6pnRumQbjPAl5E0gS7BKTpLgN5kIGQnBLCWjhNtAl5IAmJyE0b8c330MyE4y4pf1BL/pi2viWJYgpN5ll11qcE2A/Y//+I/lDW94Qx3oJ2lg2iABML84CGkfeOCBMm/evLrN6F5+WfALh9G/06ZNq78EaQNG9fILL7/8wDbyy4ppEmjPL6r8wpQkSZIkSZIkSdpYGOnLFNvve9/7alA6ZcqUOpKWGVEJWxP6Mqgto3TBqFtmS2VJYEv2QWaSKZoT2rKPwjZBcncdpXu7nR66t5J3DnMM/SGBdIJhrodgmxHBvLv4Qx/6UHnd615X70/SwLJBAmDw1yf8guNl4oziJbhl1C+/RBgBfP3119e/UknIyy8YfglSeCdwpA3HUM9+wmL6NwCWJEmSJEmSJEkbA8HpW9/61vJbv/Vb5cgjjyy77bZb2X777WtwmsFtCX9ZJlhlSfBKEMto2yFDhvQEshT6pQ8KCGZpTxsGxqW0x9AXdbTLsQTAKW3g254j7TMqOOfjOnOt1HONEyZMKK9//evL+9///vLe9763JziW1P+ttwA4v2DALwYCWv6yJVM888uOXyy8z5dfIjvttFPdxzryy4YlvyQZQcyU0IwgZkrpBMD0KUmSJEmSJEmStDEQ9B511FF1qufjjz++jBs3roavZBntSF/WqSPrIA9BQtYWWQphasJctmmXtglss6TQH/vJVMhRkslQ37ahpL8Euwmis90W2uVcOR/SniCZ+z300EPLpz/96fKKV7yi7LPPPrWNpP5rvQXA7S8nfiHwi45fEIzgJbxl2md+6fBLMCN4aZ9fgrQl8CXk5ViWtMkvGUmSJEmSJEmSpI3p93//98vb3/72WiZNmlSnSAaD3lKSYSQnacNZCsEq9bQlM2GJ5CNtgJswNtvpK+FtcpjUp6Q9JddB4RhC6bYkEE4BI4oZzdxOUc05k+mAIDjPgymwaSOpf1pv/zrzD50lvxwS3rJOIAx+sTACmF9E/MIg5OUXTeQXlyRJkiRJkiRJ0qbC9Mfvete7ynHHHVf22muvssMOO9Rsg0wjASqZRgJbshEK292FetoSphL+kpUkNCa8bY9jnbpuCXTBsW3o3K23PmmXEDnnpy73QRumjmZkM0vuCezjmh9++OE6qyt5D0H4i170ovKRj3ykzvYqqf9ZbwFwfsnsuOOO9ZcHo3kJeRn9y/t/+eWQaRDy1y2SJEmSJEmSJEn9yemnn17+7M/+rMyaNauMGTOmhqJkIGQflO7wN0Er6ykJclkigWuC22iD2ja4JWhNSd+sd4fD2U5Gg/TXHpdQN9eVa+FeCLRzb+Q31FFyzRzPsewn96ENo4X33HPP8p73vKe86lWv6pxZUn+x3gNgRvVee+215fbbb69/DcIvju5faJIkSZIkSZIkSf0JIenLX/7yGmhOmTKlTvdMWErOkff7EowiQWpfJftZtussczyFkDbHtKFt1lPaMJjjgvWEvCy7j034S2HG1qyzL7gvpqZmBteM9GUwH/fMvXNtHEPf5D0Jiul7xowZ5dWvfnX58z//89q/pP5hnQJg/nHzj351+Iff/qXJxsYv3/wClSRJkiRJkiRJWhOmeX7f+95Xw9+dd9655iEZGZsgFASnbdDaWx6R0bUJdrtLjssyYXBKi77ShvYp6av72L4Kx3DNBLkJhQlsqQP3SfBL4X4zCjhhL/dDW46hL/YxGpjgmLojjzyyfPzjH69TZkva9PjNtFZp7cSJE8unP/3pOqz/0ksvrb8c+qPXv/715Xd/93fLt771rU6NJEmSJEmSJEmblze/+c3lU5/6VGdLq7PHHnuUt7zlLfXdtsOGDavBahv6ZtAbwSdhaoJXsGxnQU19twS2SIDbrQ2NWwmBKQSvbOea2utopY+065ZrZj/rKWnfW31CbyQczwjhvDuYQYRXX311Oe+888r9999f20ra+PiX+r5Vq6vHX3EQ/I4ePbr+AuSXCf/Q+1Phr0z+3//7f+WSSy5Z40hlSZIkSZIkSZIGq4MPPrhcc801nS31ZfLkyeVP/uRPym677Va22267npCVcDPhJ8ggCEDbYDXr5CVZzxKpz5JCP91hbaRNqz0Pst627au/1cnx7bGst9ea7bRJfVt4PoTAPDPaEQTvuuuu5aCDDirXXXddnVZa0sbHb4e1GgHMP9wRI0aUQw89tE4PkL8C6U+4xnnz5pUbb7yxBtaSJEmSJEmSJG2OHAG8ZoSUPKexY8f25B4JfBP+JoBNcMt66ihIQIrUtcvs57i27ZrkWtAe11f9usg9ZD1YT8n9d2+3z4X1jATmWhioRwiMO++8s/x//9//V2655Za6LWnjWesAWJIkSZIkSZIkDQwGwKvHKNX3vve9Nfxl6uKM+kVvYSt1BJ4JTdnuLXztbofe2rUIU8ExCVbBuSntuXI9OUfbnnXKms4X6bvV9st+nkvOyZJ66ii043jqeHbUZaT0kCFD6n6mgWZK6I9+9KM9z1fShrd2vwUkSZIkSZIkSZIGgf3337+cc845NfxlxCqjVx9//PGeQJQQkxHBbehKuEnQmYA09UHb1EcCUnDs6iRwbeX49IGcP+dKv6lr2z4X6YOSMJdnkRHQqaeOfWxzDCH60KFDe6bJ5nmxj9eKHnbYYeWNb3xjDYUlbRwGwJIkSZIkSZIkabOwxx57lLPOOquMGzeuTlXchrsJMymgjn3d4W/2d6M+fWQ7+joGq+uvG3Xd/XIPCYLXRtpyrd3oK/2lHYEv5yHczTrHtveaUJggmHb08dhjj9VwndeLnnDCCeXtb397bStpwzMAliRJkiRJkiRJg96kSZPKW9/61jJx4sQ6GpVgt3vkb0JR6kDASX2Cz1YC0JRgPf2sThvapo/uILcNWbslpG3Plbq+sK+v/iKhd55BzkHhOWSUb+TZtH1z7KOPPloefvjhsnz58vq8Dz300PLOd76zDBs2rLaRtOEYAEuSJEmSJEmSpEFt7733Ln/4h39Ydtttt54AMu+tbQNOAkxKG3hSsq9th7Yu9dm3Jm3brNNHX7qD3fb4Vm91sbp9ILjt6zwp7b1mCY7LsSzpi1HAK1asqEE7z3327Nnl937v92obSRuOAbAkSZIkSZIkSRq0CCmnT59edt999zJ8+PAaThJMEv6CEa3tqNZ2mQCYgoyKbUNQCn1mO+FnUB/pO2jb2zEt6rNvde3Q3X+s7pho29APJdeea0T3vbf3B/qhnmfKPo5jKmjqmQ76yCOPLO9617vqdNGSNgwDYEmSJEmSJEmSNGiddNJJ5fTTT6/hL8EkI38JJAkmu0f3skSW1CX8TRukbdq10i77CD4TrnLO1CdQTfuch/o2jF0djk0/3ajv7it1vcl15B4jfbTH0ZbrZZl9LNtzsX+bbbap7wYGU0JnJPBBBx1UjjnmmFovaf0zAJYkSZIkSZIkSYPSySefXM4444yy00471UCS8JdCYJkQsw1rE8LmHbht8Jl2CUhzDFLXhqdtYNoekz7ac7cl19ZKu7bP6G4bbfvu8/emt/o20G3393b9rGc752MUMCN9qSN0Z+Q11zR69Ojyyle+sowcObK2k7R+Pf2bSJIkSZIkSZIkaZDYfvvt67t/d9555zoKta/wN2El2EchACawTBAcHNcuE7BGjm/ru9u02rbd661cQxuudrfJNXWH1uhuuy44tj0+5wH982wprOf6UsBzpp7r4nnyM2A5duzY8v73v7+MHz++tpO0/hgAS5IkSZIkSZKkQef1r3992X///cu2227bE+gSZBJI5v20kZAz7Qgpma64DYxbCUTboDN90Lav9tSnXYJazklJQNruzzFtf+xDb+fIPmQ7x/e2f210H0dfueest8+U9d765n7yXJkOmmMJf0855ZROC0nriwGwJEmSJEmSJEkaVEaMGFHDX6YYJpRM2AoCSkpCTCTkTABM6Q5gKb1Jv33tb4+lz/Zass55CUfb87bavlnnmLafdjvnawvadbTraM+b/qJ9Vsh5u9tkJHDat8sEw9wnATCF6aGPPvrocsQRR9R2ktYPA2BJkiRJkiRJkjRoTJgwofzZn/1Zfe/vNtts0xNIdo9STX0bZiaIpS7tu4NStMdSEr6ujRxDv5yDwnpGHrchcPpO6U3bX641dd16q4v2fuiHbaTPaK8jbVrU5b7a7bwPOPebsH348OHld3/3d2tbSeuHAbAkSZIkSZIkSRo0pk6dWiZOnFjfAZzAtw0lE0AmvEwoSiDJ9MQEnOxPaJl2WYJjugPZ3uoi+3Iu+gbb2ZfSbnNN3fuoSz/IOsuEtayntLrD3G7sX1Ob9jn0huN5bhTuk/YURgcTABPKs861EXiD9zS/+93vruuSnj8DYEmSJEmSJEmSNCgMGTKknHbaaXUKaMLHhKaEkoSQCTcTYiYgTcCK7rbINssc2xf6IaRtS64jJfUEoI899lhd0n9C0wTESPu2X+pyfbTNeu6Ba0z4yr7sb9FHb3qrpy4l5+iW43I+zs/9ZD3bXFPbF0ve07zffvuV4447rvYh6fkxAJYkSZIkSZIkSYPCb/3Wb5XRo0fXQBFt8BoJcNmXkv0JKVmiDU/Tpu2rXUfbX8JaClMdU5dCHe2C8zE6lpGxOXe0x7Dk+mjHMiXHJfjNehsA5z6iezu663P+tg+2I/Xteds6ri/HpZ7juX/2Dx06tE4DPWbMmPKiF72obkt6fgyAJUmSJEmSJEnSgMeUz/vvv38ZOXJkDSETXLZBaySQJJhNsJrAMiEmywSW6F4m6KUgS2Q9x9NfZDvnILxtA13qcw60fbCfKZQZ6UzI3Rb6SJ+5h/TTPoP2Otck90dfvcm1cT6uLefNuXM9OT7tU0/YO2zYsPqzY7nXXns5FbS0HhgAS5IkSZIkSZKkAe8lL3lJ2WmnnWpASmiZ0LMNJNsAMsuUBLAJMyltuwSnbd9ot9v+E3yyzHr2p01buuva47iWdpQv2vp2ST/gulKyjexHQuy+rG4f2r7TV0Y9R3tfwTojnllyDFNg8/5l6nh/84EHHthpKem5MACWJEmSJEmSJEkD2vjx48tBBx1UR5ESKmZUb4JJEEQSoLaBKYXQMSV17KctywSXbcgZCVNpw3rCzgS12R/ZzzL95Dh011HoK/2xP+8NfuSRR+o6o5i5LtokVGU7pQ1je5Nzo723aPf3heNyLpbZZsnxeSZss8518mxp/+CDD5bFixeX+++/v94P72+eOXNmp2dJz4UBsCRJkiRJkiRJGtAIFHfYYYe6JGRMCcJIgsgEv0yZzEhh2idcbSWwbHF8G4bmmL4C0gSflO5j2+tr13vrq/tY7oXQl7D00UcfrUEwgXCCYJaZ2jp1bSAe7fmDddp3a9v0Jcd2H099+xzYzj1x3UuXLi0PPPBAWb58ed3muR9xxBFl+vTpnR4krSsDYEmSJEmSJEmSNKC9+MUvriNHM8o0JWFkAscEkQTBKWzTFlnSNrKePlq91fUm7dKW8/R2XF8Baq6rRV2C4IwKThCcY7rPmQLqE8pGu962a+tbaYO2feR81HGuhOptIM21M/1zQmswkvuMM86o65LWnQGwJEmSJEmSJEka0CZPnlyGDh1aQ8aEqAlSWSZszHpb1xZ0h6KpD/ZT0Fcw2i3XEhy3tsei+/hIH+wnPE2ASn2usz1XtnP/3XJc5Lje5JpYttfXfUx3fzlv2mVJPf0QBBPMT506tYwePbruk7RuDIAlSZIkSZIkSdKANWvWrDJ8+PAaGhIgtiEohQCSOkbHMl0yZcWKFT2jZRNeEkDSNoEly4ST0V3XrufY7mOQPhNyrkkbzua6esO+9Jl1CrgOCqNuKbku9rfXmfa9ae9lde1WJ9eGXFN3XTsyOD8TRgGfddZZtV7SujEAliRJkiRJkiRJA9ZBBx1Utt9++xoiJlhk2RaCRaYZbkNg1qlPEJlQtO0j+5AwlDrW2xA1AS3HdGvr0l+OWxu5pvSTc2VJn6xTEqSi+1zdy2i3e7vfyHZ7Hd1t+tLdL8fmuhNQs07fTAlNCMz7mRnZfeihh3aOlLS2DIAlSZIkSZIkSdKAxUhRpn8mWCRUTCjJOiXhaeozypTCehsCt8fmuPSZkrAyASjbLNNHtwSblGz3pvtcq6uLXANo194LWE9BX+eO9NWXtu++tOfrxvnz7uUXvvCFdZnn1x0C0wc/W8J9SevGAFiSJEmSJEmSJA1I++yzT9lll11qkJjgsQ1IQahIuJigkf0JVruDyhzbBqUck3YsE1YG222A2ds1BO2Q8/cmbYLtvtqibc96e+1oj+3rutYG/VC6++/GNXTfQ4t9CX5Z5ueTOhBkc63bbLNNmT17dl1KWnsGwJIkSZIkSZIkaUBihOiQIUP6DF4TiCZcZJ26NhRNG0ordb2FmasLUtM3bdIuffTW1/qwuuvp1t57i/p2X9tn1ttntC7nDPrnOEZf51yMBM5oYEqeEfs537hx43qCYUlrxwBYkiRJkiRJkiQNSLwnlumfCQrb8BJt2Nruoz7BbgoSOGaboJK67n6p7w4/c1x7fHS3zXb6Zdndpq1r20X3envOdt/a4lzdpUX/FPpO/9xrK216k2OyP9ecYJ4lIfC2225bR/tSlzZMAX3iiSfW4yStHQNgSZIkSZIkSZI04BAYnnDCCT0jgFOC8DDbrBMoJtxkm0LQSMk2aEfh2NStjZw/I1lb6ac7WAXn6tbWteu9Hd9eO1Z3zexrnxEy3XJ7nu52bZ9p19vz6eta0479HNc96pefAfUsCYHb58f24Ycf3tmStDYMgCVJkiRJkiRJ0oBEOJgAN7pD0jawTGiZILIv7M9xtO+tbXdomv4pCZZBO0p3WIrua6VNd10r195bX9RRcv5cc3sfKZG+Ute9DNqlbfrtvs6cJ23TLtrju0NfPPHEE+Wxxx6r00PTjvacg7YjR44skyZNqu0krVnfv90kSZIkSZIkSZL6MaaAJixsw8gEjwkRU2iTwDH7kCWyTjvaoHvZ7mvlHK20o749X3e7aK+lN/TX27mRe0tJ2xT67u6f+twPS2TZyvHdeqvL+cB+7jWF0cbU5Zw5Pud8/PHHawF1acu7ng844IBaL2nNDIAlSZIkSZIkSdKAQ4CY98UiYSLLrNMmYWNCSaQOWbYhbSSYbNGm7RccywjWNujsRl073XJv5+veznnac4Fje5N+2/O0x7KdOqTvTMWcevR2LeiuB+fqvqZcOxjVy+heSq4t14eE1mnf3jdtCPqZ6lvS2nn2by5JkiRJkiRJkqQBgGCQkJDAMAFlli3CxrTpbpcQku0EkNGGnVln2YaXkXoKIWf202db3wagKdH2mfuijpLrRbse6Svn4JjcT9tvb8fm3jNyGGznGug39as7nvf68jNJYRsEvytWrKije9v7z3VxLO2Z0pslYTTH5hqoMwCW1p4BsCRJkiRJkiRJGpDawLKvkLM7vExYSQHHdYeRaddb+6Ce4yLbCV8TxKZvCiNhs79ty3p3X5FjW93Xgt7aRe6h7bf7HDl/d1v29Xa+Fm0T2hLiUlhvRxa35wvqUmjHiO4EwPxsEwJTN2PGjDJ+/PjOkZJWxwBYkiRJkiRJkiQNaAlZkUAxgWi2E2Jmu3u92+ratNsJb9vgl6CXkvq0A22yn/VcOyXaeq67DVB7u5aUNaFN23euCalr17tLb9o+8owT2iYMZvQu4S7Bbl/XmXvMdNBtgMw2fbCUtGYGwJIkSZIkSZIkacB57Wtf2xM4tggJ25AxwWUbVGZ/G2wmgMwy62C9u1039hP25jwEoMOHDy/jxo0rkydPLtOmTSuTJk0qO+ywQw03QfuU9N9qz5P9ubbo3m711Wdvzw1ce8LfXFNKb9p6+uXY9l44D/eaMLf7uiPrtE/42xbqCJINgKW1YwAsSZIkSZIkSZIGnFGjRtVAMGFmgsoEsNSntKFngsq0602OCdq2fXRrz7nddtvVqYr33Xffcuihh5b99tuvhr977LFHrTviiCPqdMY777xzDTfRW8Cac+Vaus9NXXuNSLvu+t70dnykn/a+0pa6XG/apSA/gywphMLpCzl3StAHP1NKRv6m8KxyDkmr578USZIkSZIkSZI0ICU8zJLQkdIdSHa3Y5l1tOtIP91oR79p3waShJSM7iXsnT59etlpp51qXTfqCIgJhlkysjX99dYvuveD9b622/q+dB+P9rzt/bfPoz2mrWsL9Uxx/fjjj5dHHnmkLhMAc//pI/2mH87fTvuc50d7A2Bp7W35D//wDyXl9a9/fdltt906uyRJkiRJkiRJkvonQsOEigkNExC2QWHCRiRsbOu6cSz7u9ukHgkss+Tco0ePriN7x4wZU7fXhHfa0p4ponk/bjfO1R145vzUt9cTbR3L7v294RlyHxyb+0Fv62mbbZYEvU888UTPO49zTtZTT7sEut331OJYgt68Lxg5H9trcz+SVv4uePe7311SfvrTn5Zjjz22s0uSJEmSJEmSJKl/ItMgZCRQJBykJGBMaBhZT4CY7e4wMvtZsi/L7nbZl/Udd9yxvueX0bzriuMyHXT6RHvu7hLd19miri294Tlkf54ZhecarKe0+9ptgt7HHnusPProo7UQ+mYEMNM/83Ph58M99hYCc/7cB31yTFsSIvd1H5Ke6Rn/wm6++eY65YAkSZIkSZIkSVJ/duWVV9ZlO7K0O8xMWBnUtctu7TGsry5wZD/nHDZsWJ3KmXf/PheMdp0wYUIZOnRoz/l6O29b11fYm2eQ/amn9Kbdx/20heeQZ8Ey9cgy2E5YS+hLCEwgnACYc7TBL8vuewD9ZNpoCv2kj+5zSurbs/91SZIkSZIkSZIkDQCMDE3w2FdImCAzYSZt0i51bUDahqfd2uPYv/XWW9epn3n3b184JoFmb9eHESNGlF122aWGpO29dJdWQtQ2/E6bBK4s+7oP7oHz5Bm2dd2llT7bAo5NX4wIptBnrqMv2UdbjsmzSgCcEDj3JmnNDIAlSZIkSZIkSdKAlFGmlEwTHL0Fhqlj2VvbNoxMOBxtiJnglVG7O+20U6e2d7feemu5/PLLy09+8pM6EyvX3BsC4O23376eO8Er10DJ9eY62/UWdVxnSoLhtqRdlpQ2+OU5JsjN+dMW6acNoMEx3BulDX/bcDp95pxgPYX9BL4rVqyoAXD6ynXQRtKa1QD481/4fN2QJEmSJEmSJEkaKBL8JkxMUJiwMPW96Wt/6gguu7WjWdlPYMso4L7MmzevXH/99WX+/PnlnnvuqQEwgTBBZzemgh47dmztL2FoG8JSl2tu19Mmsi/rWfYW/lLHPeX9vCy5L/a1fbfnRNpEd9/t+5gT/raFfqlPP5T2PPxMCX5px7akdbPllVddWfbcc8/OpiRJkiRJkiRJUv+XoBAJGkFomOAwdUFdW7K/ux3SJrqPI+AkAObcvSHMXLZsWR3NmjDz4YcfLnfddVe58cYbnzUSmD5HjRpVp5Omb85DH9TnHKxTch2t1LWF4zlvwtU2UGX0MoHzXnvtVWbMmFH233//cuCBB9ZCbrTjjjvWUJj26SN9tuvpj2uk/bbbbluX7c8k99BbKJx7Skn77Kc9JeeVtGZOAS1JkiRJkiRJkgYkpgtOCJzwEAkle5NgMetIsNgel3bddUGYyajd9rwtrosRyozoJRDlHNQRCC9evLiGwd3ob8yYMTVEjZwzQWgC1O7z5loTlBLapnBeCvu5lp133rmGvtOnT69TT+c9xoS+nH/KlCnlsMMOK1OnTi3bbbddPa4Nj5HzU8c69zlkyJC65Nkk7AX7WW8DYKQ/tind/adwTK5f0poZAEuSJEmSJEmSpAGHUbXf+973aghM4JmgcF0lME3pDhnpl3qwzGhYwkzC1L6kLYEoIeqwYcPqkpCXwvG9IYAdPnx47Ztzg2tKAfeZkrA1YSlt2uA37+NlWmZGGE+bNq3ss88+qx29TD+0Jwjee++9y8iRI5/xHHIO0Ec75XO39h7WJP1ynpTcCyOmc05Jq2cALEmSJEmSJEmSBiTCzYwCTjjZLUFiZL0NJBNSpi1BY/anPUvqsiTwJPjsC8HvrrvuWkfbMrqW8JX18ePHl0mTJtVAuDeEqIzIZRQw10XJebsL+xICcy25HtonSKWOAJfgl6mduYa1Rf8E0pMnT66BMefJuXNNwXZKZL23fa3sS7+UBL/Z5udsACytHQNgSZIkSZIkSZI0IBH8MjKUgDAS5qKtj4xSZV/CzJSEjQkfu/e39YShhLx94TxMr3z44YfXwrt1DzjggLLffvvVeo7vy4gRI+p0yoS3uV7Oyf3mGtBeT4JgltwD6/RDeMs5ed8vI4/XFc9z3LhxtS+uJ+egtM8aeX7ts6JQl/3d2mPadm1b9l133XX1/cmS1mzLQ2ceWl82LkmSJEmSJEmSNJDceeedZcmSJXUkcEbDtkEodQkXE0S2wSJSR5u0a9cpBK/d4eu6hKlDhw6to3BXN+1yizZMAc0y19Z9nawj9blOQlre5bvHHnuUfffdt442Xl1QvTZ4joxI5rnmOac+sp7rQbu/W3tPFNqmsA+cJ6OaCfolrZ36L/R3X/+7dUOSJEmSJEmSJGmguPfee2sAzPTACR2RoLK7JGxEGzKmgP3sa0Pfdj3Hc44NJdfQXi/Xxz2w5NwExCm5N8JSpphmxC9Lguc1Sf9rQt8Jpbvl2pBnncK+trRybtq168Ez4BiWK1as6NRKWpNn/yuVJEmSJEmSJEkaIH7xi1+U5cuX13CW8DCjfykJE1lS2hAy62mXY3N8ju0OSBMOE0iyXN8Iswm2H3nkkWecm/VcV3vNrG+33Xb1fcN77713mThxYg1rc/3deFZMpfzDH/6wfPWrXy3/9m//Vi688MLys5/9rJ67N5ybkudD31nPNriW9tlhTettHc82I6xB3/THs7j11ls7tZLWxABYkiRJkiRJkiQNWD/4wQ/KQw89VB5//PEaJhIYtqhLIWCkIEFlSsJG1lm2292F4Pf+++8v8+bNe0Zg+XwQKBPMXn/99eXuu+/uGfHK+XIdhLAJoLlf7LDDDmXatGllwoQJZdiwYbWuLwsXLiw/+clP6jO76aabyuLFi+sIaurnz59fn2NvCI0zzfbqSjeuuzvEjqznWJ5jSvbxMwDXSEAtae0YAEuSJEmSJEmSpAEtgSnhaMLDBL0JcltpRyF8TFBJfY5DwklG1KZkGmRGpXLen//85+WGG26o7yO+7777yoMPPlivhUJwSnhJsEvgOmfOnNr+2muvLVdffXW54ooryuWXX14uvfTSGswS/jL6N+Eu5+H6KVwH15iQlECWdwoz4pf383bfY2/uueeeGvoy0pf75BjeD8wI4rFjx9YwuRvn5JgE0hyTwjVFux7ts+xNrpl23A/3zTIhMPfNOs9O0trjTyfet2p1lZNOOqlcfPHFnS1JkiRJkiRJkjTQHHzwweWaa67pbA1+BK+HHHJIDUQJaRPmskxB1hPspmRfUJdwkiV9JvSk5DjO8dhjj5Vly5bVoHfBggV1VDDBMKEvS0JX9tGGEJXgOIUglkLwmbA050vJOdEGqgTRu+yySxkxYkTPPazJyJEja/v0PWTIkDJmzJiy7777lqlTp9YwuBujgrkn7hO5ljxHdJ+/t+vprmuPJ+Slf54D69RzLQTbhOgf+tCHajAsae2s+c9BJEmSJEmSJEmS+jHCQUbetiElJYFvSjcC1ZTguASTjEAlKGU7BQlBkbqMyiXE5DpYpzBdM4VzsMzoVuQ6cx5C3Yz2Be3ac7MvbQlIe7un1SHwnTx5cjn22GPLy172svKKV7yinHjiiWXKlCllm2226bR6GtdMeE1YnevmOnLdyLW1dUh9dF9r2lJP3+mfeu6R+2Od87c/H0lrZgAsSZIkSZIkSZIGNEbbMq3yww8/XIPEBKWUBJO9BZUEi32FqO1x6G7Xhpvptz0vJaFu2rZ9Zj3HpZ7Cubg27iWhMWhHf5mGmtG5S5curaFzX/fRm/b8ubbe0D+jq9N/nleuuT2W+vYa2nO00k8K29wfYTnb3BtBNaN/qb/ooovqKGlJa88AWJIkSZIkSZIkDXi83pJ31TJaFQlgE64msExo2YaVCSWzRBtuZl+r7Qus5zztuVLf9tEex752O+uEnykc1/bFkv4IvJlm+o477qhLRkGvLzzH2267rfbJNXC+LLufR7ZTkOtNCdbbeybgzohpnhvTeA8bNqyOAGb6bN6TLGndGABLkiRJkiRJkqQB74EHHig33nhjHbVKUIk2eER3GEkQ2RdCTRBopl3qoruvhM199duGpui+HnSHq5E6lhTaEZ7yjtz777+/BsA33HBD+fnPf15uueWWcu+999Z96zo6GASyc+fOLYsWLeoZmZvwt53aOteRZdZbbK/u/PRLf7ThWbDNOtfNPfHzlLRuDIAlSZIkSZIkSdKg8KUvfamOWGXK4ASPFILJYHtN2uNS0FvAiYS+BJhZtqOAU7c215K2FGTZIiRNIEthnUB2xYoVdcpmRgQTBjMtNku2CYl7O1+L/Rw/Z86cGgAzCjj3nGvPuXP+dl/WI/XtetqBa+ZnlTCZPgmfCX8ZzX3BBRfUdpLWjQGwJEmSJEmSJEkaFAhAL7zwwp5RwAlfKSBkTBCZEDLbWW+XqW+xL/u7w9kEtyltmJsS3fvbutTnunO9LHNNOSb1uSawzahaAtw2EP7FL35R15k6Ou1Z8ryY7vnHP/5x+clPflLXObY9R9s+y/a8ueZorxVpB9qx3Qa+bFPPtN0EwgTQN910U+cISevCAFiSJEmSJEmSJA0aBId33XXXM0JOJGDMOuEkAWTaJMzMMvWtNuBkvQ04oz0u67TNiGCwzjtuU/Ku4uzPsr2OrFPa/VnmHCltkMx9MtKWKZ2ZHpqQ93vf+14tvDv5hz/8YR31u3DhwhrG5vq4ru5gF+21tKV9ht0l2M/1EPLm3b/I9XNeQupPf/rTtV7SujMAliRJkiRJkiRJgwYBMAHi0qVLa5gJwkWCx6xT2mAzy1bC4TbAbPvIevTWR6vdz7kJV1/4whf2lAS2SFvOkWvIerazv11yXFtSl3vlOIJXnguBMKN8CWDZxzVss802Zdttty1Dhgyp69R1h9PIdYD6lNxDtvvCs+W8+flwDOfgfNQzUpkwWtJzYwAsSZIkSZIkSZIGlfPOO69OfcyU0ASVCRiRoJLthJUJM6M7YI2Emqlv9/cWeHbXsZ3zEYJyHgrX0T0SuPvYtA3W04bjKaAu95X9nC/X2vZDO86ZsDcjktvgN88p/be6rxHtefvCtRD+0m748OFl5MiRZejQofVc8+fPL1/4whc6LSU9FwbAkiRJkiRJkiRp0Pnud79bQ2CmGUZvwWSCzd4CywSmWaK7j6y3y/SX0hsCWEa6MgqXEblsE34y+pZCCNtXENzi2jiWZUYst3IewlaWFM5H2wTQ4BxcN6Fvd/Cba8h1dC+D7TxPCrqvB9RxPQmACX6HDRtWl0zb/aUvfanTUtJztd4C4E984hPl0ksvLSeffHKnRpIkSZIkSZIkadP4/ve/X370ox+VBx54oAafkVCyXXYHlalLQIq2fXf4mX3UpyRATRiK7nCUEDaBLNscw2jcBLBrg2tMP9xn22dCX8LWBM3ovv5orz+F62XJ9WU9y1Z3fZ4J2nvmWhiZzZJwfvny5T3X+bWvfa1ceeWVta2k5+55BcB/+Id/WF8UfvXVV5cDDzywfPnLXy4XXXRRZ68kSZIkSZIkSdKmw7tk77333hoyEkwmlE04ybINKruxrzsEpp/2+OxnSUkbJBRNANqug3WOIZylJBRlmdCWQp85J8EuheOyJDzlOEYUs0x9jkmwvP322/e835eSaae7ryuo4x5yH1mP7vsDdTkvsk3hOrlGllxf9nO9d955Z7n++utrnaTnZ50D4JkzZ9aQl9D3TW96U/nZz35WDjnkkHLEEUeUT37yk51WkiRJkiRJkiRJmxaBIjOYLl68uAaPBJUJgdEGlb3p3k9YmbI6CTwTcqaftr9cQwLShKMZHUsomn5YT6jLklCY9rRjmXXC3wTILGnHMd3XzDrv+aXwPPq6n/a600fblvr2HllvS9ueba6Le+QeCJ4JowmhGaX9/ve/v8ydO7e2lfT8rFMAzPTO/+t//a86BzuhL+Wtb31rZ68kSZIkSZIkSVL/wsjSf//3f68hMKEoVhd6ElR2I9xMGNqtHf3aSvs2CG3PmfO07dqgF/SdwrHUU9rRwgTGbWjcBsIsc+0ck9G3uY72elqp57hcS+R6W7ShPuE0S9BPe26uk/NzPwS/BMDUX3HFFWXJkiX1GEnP3zoFwIz8Pfroo+s6I4Ap/OWMJEmSJEmSJElSf/Xzn/+83HbbbWXp0qU1nCSAJARGG2iyTmjZBqNtqNkdfrbtEpS2da3e6rvDVbTt2gCY601hGwmDKWAfo3oZXcuyneo5x2bK576uE7nPnJu2aZ993cdTz/V03xPbCaoJpcE1cF0806uuuqp84QtfqPWS1o91CoDjzDPPrKN/3/ve95YZM2bUIJh3AfNOYEmSJEmSJA1+fJEsSdJAwejfD37wg2XevHk1iOxLG3S22nAzAWeC0YSxfQWiq5NjW+mnt+ugjtId7rK97bbb1sKo2iyzn8C1DYdZj/S5Ou3+XHOC4FZ3XwS8mfaZJcdwDUOHDq393HrrreXcc88ty5Yt6xwhaX14TgFwZEQwYTDvAn7Na15Tp4mWJEmSJEnS4DN27NhyyimnlDe84Q3lTW96U3n9619fjjjiiDJq1KhOC0mS+rfPfOYzNQR+6KGHahhJENodwrZhL1hSRyHQzD62E3a2wSf1YLu3gLdbG5gix2fZyjWl7wS/2223XQ1V29A3YS/t+joHWO/tXGjvvb3vvp4ZS85FYZvQl8A9U29zPXnv78KFC8sHPvCBWi9p/eJPPN63anWVk046qVx88cWdrbX3zW9+s3z2s58tt9xyS6dGkiRJkiRJg8VLXvKSGvYefvjhZe+99y5TpkwpkydPLvvuu2/Zc8896xe9fJGbKSglSZvWwQcfXK655prOluLBBx8sw4YNKzvssEMNIgkk0R1yIsFpG3imXat7O9rjI+vU96a7/xxPYb29HtZZcg/dYW/adx/Xyj2tTvrrPr67r2zTDmzzrl9G/lIIzgnbCasJqO+///7yta99rVx//fXP6kvS87fmf929YJTvpZde2vMeYNYd+StJkiRJkjT47L///uW0004rO+20U5k6dWqZMGFCGT16dBk5cmStY1QwIfDv/M7vlFe+8pVl3LhxnSMlSep/+EOlr3zlK+WKK64oixYt6nknLQgi25K6hJogHM4oYJYpfR2bMLmV/avDOduANteQ/to+2NdeYyv7aE9/bZ9tH2u61nZfb+dKUAzaZtpnCtuE0wl/H3jggXLhhReWb33rW72eU9Lzt84B8F//9V+Xv/3bvy333ntvnfqZwjp17JMkSZIkSdLgcNRRR9Vwl1FSjPbdfffdy/bbb19H8PCFMIV1ppwk+GVmOV4RNmPGjE4PkiT1T4TAP/jBD+rsFYxUJaDkMy1TQrNs5XOvDVQJLwmU20JdW7pxbBuW9iX7OQ9yzmiPz7VFe96+ztMGwaBde4522faN7u2cg3rC8Iz8ZdpntjlX3knM1Ns8+/PPP78eI2nDWKcAeObMmTXw/X//7/+VM888s1Nb6jp17KONJEmSJEmSBrZZs2aVMWPG1Gky+UKXgJdROwl/GdnzyCOP1C93KdQRFB966KE1BGYpSVJ/RUj5b//2b3WW0/vuu69+lmWUKn/YxJTKCWoTjFKCkJXtLDMSmH4TBHcf00p9b0Fsq23HPpbtMQl72+OynmO7ryH7u8/VWx/o6x6C/RSuhfvPqGr+NwPPkf/9wHNdvnx5fZ0oRdKGtU4BMNP68I/1rrvu6tQ8jTr20UaSJEmSJEkD10EHHVSndua7Hr7EZX3EiBH1y2C+2OVL3ozuIQTONI8s+YJ3jz32KL/9279dXvziF3d6lCSpfzrvvPPK9773vRoCE9zy2cdIVT7P8k5dJIBN2NkdniYAJQSmH5bUtRLe5ljWE+BG+s6xad/dDrTJ/hR0L9FeS1vfbXX7WvTX9pl7538fYLvttquvi+APyXieS5cuLV/+8pdrkbThrVMAzC9A8PL4bqlLG0mSJEmSJA1MvOOXL4D5Epzgd/z48XU0FF/0Evjy5S77+bI3X/hSGD1FCLz11lvXKaNPOOGE+t5gSZL6s6997WvlsssuK/Pnzy8rVqzoGcGLfNZ1a4NS9icMpZ7C52Jbj2ynrrd+2Zc+1oQ2ad/q7dje6tprW1c5dwr3wv8+yKwgBNMZ+csz/Y//+I/yjW98o3O0pA2NSezft2p1Fd7VcvHFF3e2numee+6p/+P9ZS97WXnd615X58bnLzr/+Z//ub4Phr+S+dd//ddOa0mSJEmSJA00s2fPrqN1mM6ZaZ+ZvnG33XardXyZTcDLl7x88cuXuymgPoU6Rv5MnDixzJs3ryxZsqS2kSRtHAzauuaaazpbWh0+t37+85/XUavDhw+vI3/RhpvZ7g5S2/35bKQuqEtZG72do1s+d/vC8d3X0JfufdxL6uij+3ra7SwJzPnfB4S//G8F8L8f2H///feXr3/96+WrX/3qM65J0oa1TgEwLr300vKzn/2s/h8Dp556ajnuuOPKAw88UN797ncb/kqSJEmSJA1gL33pS+uXyjvuuGMdsfPggw+WUaNGlQkTJtRRPIzs4UtevsClXd7tR8mX5eDLX9pQlyD53nvvLYsXL+60kCRtaAbA6+6mm26qn13MfsFsFny+8XlHkMnnWgqyjGynfbCe7bZ+ddKOPtfmGNrk/O2xYLvd31pd/9316aP7mMwAwqsg+N8I7ON/M/DsyI6Y8vn888/vtJa0sazTFNBx1VVXlZNPPrkccsghtbBOnSRJkiRJkgYeQlq+32GKRoJfvrym8AU4s8HxJS5f8FIHvtwl/OVLcvbxJTlf9jJNNEsKXwLzhTD7mUGO6aAZESxJUn/FHzr953/+Z3nXu95VfvnLX9bZKxgRy2dePhsphKDUd4/8zeckS46hgM9QSntM2nZLuJqwtTfZl75Ydrdt+0m7br1dA8fl2O4+2M4x1PFZT/DL6yEYAcx+/ncEZdmyZXXK5wsvvLC2l7Rx9f4bRpIkSZIkSZuNmTNn1i9r+aKa6S8JhJcvX17Gjh1bQ12+7M2X16xTCH/bL8TZJgjOF78U2vPFMPWHHXZYefWrX13fLyxJUn933nnnlc9+9rP11Zj8gRThZjsiuJXwta1nnZLwlEJgSuHzMcuEt90hbfpK38h6jum+DmQfetvfjfbtMch6X+egPv+7INfN/ybIH4Ex7fOXvvSlWiRtGgbAkiRJkiRJm7FZs2aVIUOG1OmeGaFLWMs64W+mfuaLXkb2MDIKCX4JffliOIW6jAgmOCYEzpfdvEOYmeT222+/2ockSf0Zry34wQ9+UD70oQ+VuXPnlqVLl9awk88+Pu/yWUhdwlA+79ptSsJU6hP68nnK52o+IxOiZomsJ4Bluw1j6TcBLdeRuixT1hZtu8/JMgXsp3DN3AOFfXz2ZxYQpn3+2te+Vkf/Stp01ikAZiog3gF89dVX91rYRxtJkiRJkiT1f7vvvnsNZgl++RKXEU4PP/xwHQU8ffr0+mUuX3TzZW++rOYLYr7szZfNrLdfMueLYr4YJ1gmBM6XxLxb+MwzzyzHHntsbStJUn93xx13lD/90z+tgea8efPq5ySfc5kJg3XwWZkQuA2AUxKuJkRFPjuzBOvZj/bzta1HPouj7Sc4prd65Fxpk22uN9sp1HFvmfKZ/83AZzsIfnkWCxYsqOHvBRdcUI+RtOms8whgfrm9973v7Xn/b1uOPvroctFFF3VaSpIkSZIkqb/af//9y8SJE8vw4cPryCaCYILf7bffvkybNq2+/5cRPfnCt/1CmC95kS+90X7RSz2F4wmAWaaPXXbZpRx33HFlypQpndaSJPV/X/7yl8u5555brrjiihp0EoAi78Hns647DG4LdZRgPZ+dLNt1tJ+xyGcrx2W9Pa5FXc6Vtt1yPVwbQS7v7c8fe/GZTck2oS/7CX55RQQl4W/+UOyuu+6qz4jwV9Kmt84BsCRJkiRJkgY23vk7atSoGvgySpcvb3nvL+/nnTp1atl5551rcMsXuvkCOF8k86UyI44orOdL5e4vl9Mm00KyzpfIhMf77LNPOfHEEzstJUkaGG677bbykY98pPzlX/5lufLKK8uiRYtqOMpnIJ93fKYSBmd0MJ99kc/JhLNtoY7Son17fFDHPkr7OdyX3vbnXHzGdwe7rLejfBkUSD3rFI7h3rhXCsdfe+215a1vfWu55JJLar+SNj0DYEmSJEmSpM0I7+Bl5C/4ApgRO0wFTQDMl8RM08yX16wzKigjgVgHX/qyL18o50vkvr5gpj4jgRMCs81scqeddlqtlyRpIFm4cGEdDfzpT3+6Tg1955131ncEJxzlc64Ngdsgl8/GjApOIQTOej5Xe/t8TR16a0dhm9Lb5zKyP6N+M/I3588ffnXXcwz3ROjL7CF8fi9btqx8//vfL//zf/7PTu+S+gsDYEmSJEmSpM3EhAkT6hTM+SKXET1M+UzhC9+Ev3xRzTZfSOeL4BxDyZfK7ZL6dhRT2uWLb/ql0B/7dtppp3LKKafUEceSJA1EP/rRj8pnPvOZ8o53vKP84he/qK9UuO++++oI2nwOEpqm5DOx/dzk85Vl1vP5S2mxzbF9oc/0C5Yp6Sv98rlO4Vz5rGbJPur4Y62cjzCbwmc4M3owewh98gdkZ511VvnkJz9ZRwlL6l94Ycv7Vq2uctJJJ5WLL764s/VMt9xyS/nSl75Ul5IkSZIkSRpYDjjggDrNM19O885CvshlKmjWeecvX/iOHz++ftGbL4H5MjpfTvPlMPsymjdfRPNFMPuQZfeX1PmCmbb0xzb9jBkzpsyfP78sWbKk01KStD4cfPDB5ZprrulsaUPJ59pll11WvvWtb9XPy1tvvbV+5vFZl88/SmbRyHpwTHdQG+mj/axFd12W1FEi6/SZgJm6dkaPnD+FfSlcK/87gZG/7Fu8eHG58MILa1bEOnWS+p91CoAlSZIkSZI0MB100EE13GUEUkb0MoqHET2M/GU08Lhx4+pU0HzZmxFIfLFLW5b5Ephj8sUx2JcvkbuxL18Oc3y+aKYQJnM++rz++utrAC1JWj8MgDeNG2+8sfzyl78sc+bMqYPpmC6ZEcF8loLPTz6LE7DyGZlwNp+7aD8/0V1PSfvsy3a7pF9KRv2yns/49nOcwuc015bPepYUrpP/ncA7kP/5n/+5Bt1MeS2p/+Jf99O/QVb6h3/4h/Lud7+7s/VsM2fOLO9///vLzjvv3Kl5Gi8AZ977iy66qFMjSZIkSZKkTY3vcwh5CYD58nbkyJHlkUceqQEwo4CnTZtWvxTmy16mZObLX74cbkcA54thjmG0MG2oS6AbtEG+WKZNi23eK8iX4SAEvv/++8ull15avvnNb9bRyZKk5+/Nb35z+dSnPtXZ0qZ24IEHlmOPPbZMmTKlhsK8foHPXT4381kb+exkH6X9A6t8zoLP2vYzOJ/LfEbnczrv8mc95+GYtAH7E0zzucyS81DPe3753w4E25/73Odqe0n93zoHwF/96lfrL6a/+Zu/KVdddVWnVpIkSZIkSf3R0UcfXb8MZupngl+mW+YLZ74AZvQtdRMnTqwBMF/47rbbbvVLYLb54jdfLvPFMfUExsOGDev50jghb750bpct6oJzM5CAIDijiRctWlRD4M9//vOdVpKk58MAuH/idQy8AuGlL31pz2dzRtvyeUjw2oa6bPO5mRCXz9N8puazNkvagc9qQmY89NBD9Y++6JPjsuR8hL0cSx3r2eaPtPjjrHvvvbf853/+Zx3J7B9oSQPLOk0BffLJJ5fjjz++/oNnjndJkiRJkiT1X0cddVQd9UuYyxfMd955Z/2SmfCVP/BnJC9LtgmDCXUJd/nyN18It19C88U0Xw4ntM0opDbcZZ1j2zqwncJ+jk/AzHn5Mpy+2X/33Xf3fIktSXpunAK6fyJUnTdvXv2jJ6ZSJgQmoCV0zecnhc9GPitZ8nlLHcsUZD+fn20btvlcZX/+oCt9UnifL4U2OQ+fx4z25X8r/PznPy9//ud/Xr73ve+Vu+66q/7vBEkDyzoFwHvssUf90Pjud79b/+JDkiRJkiRJ/dOsWbPK2LFj65fKfLnLl7d8ucsIX97bR9BLKJzRvKzz5TNLtCEwXxojXx7nS+XIF85ZR/slNrKMHMMX0+DLagJpCl8+L168uNZLkp4bA+D+LZ+z/Ix++MMflm9/+9v1c/imm26q0y0vXLiwfuYyW0amcUaC3nyO8nmcwjbHsMxneD7H+ZxlH+vso8+HH364juy97rrryve///3ygx/8oPzTP/1Tufzyy3uuT9LAxP/yXuspoBkB/Cd/8ifl/PPPL5/85Cc7tZIkSZIkSepP9t577zJ58uQyatSoGqbutNNONVBllBGFP+xnakjeQ0jgy5fBfAm8yy671MIXvplqkpG4fHnMF8u0Y8QQU0sSArfYR6F995fG1LOdNtlPvwTTfAnNqGK+mGaqSkYefeMb3yg33HBDpwdJ0rpyCuiBjc9bZudIsHvmmWfWWTt4Fz/7+Mwk1E1JO2SJTB/NZy5/dMXn/YMPPlineCbroW758uX1j8MkDR7rFADDdwBLkiRJkiT1by9+8YvrF8SEu/fcc08NVxlBxBfJBL9M58iSMn369HoMwevOO+9cA2K+JM50kQl0M7qIdwbSd75sRhvucmzqWM+X0Kwn/GUJ1jkPX0azznXSni+mf/zjH5d/+Zd/cdpJSXqODIA3D9OmTauzfvD5SeEzNks+Y/n857OcP6riD6wkbR7WOQCeOXNmef/731//D4Ju/A/yc889t1x00UWdGkmSJEmSJG1Mxx13XJ1GmZFBTB+544471nCX0UL8UT/bBMCMDmab0JVpoXnvHyOF2c8XxZQgnOXLZPqkcBxBMHXsiwS8kTAY3e3SljZMeZmpqgmW2ceUlBdeeGG54IILOkdJktaFAbAkbb6engdgLTHql6mgDznkkGeVo48+2vBXkiRJkiRpE5g4cWJ55StfWdcJUQlnM1qX9YS/1BG0ZkQv+1IYLUwom2CWJWEs2E5dwmG2E+Ym0M2yW/oB+9OW82b6Z/plKkrqCKhnz55d9thjj85RkiRJktbGOgfAkiRJkiRJ6n94ny/v5U2wy0xtBKwEvgTChLWMBCZgTfDKsi3M+JapnxPStmEufWR0cOrbdhzXas/RardZ53q5TiQE5pp33333cvrpp5cxY8bUfZIkSZLWbJ0CYEb+XnrppeXqq6/utbCPNpIkSZIkSdp4Dj/88Dq6d/z48TU4ZQpoplRmSuclS5b0jLJlFDAhMTJ6F4SuvPuX/QTHtI+Eu1mn7eOPP16XqetecjxLCufIvmjbBgEwQTB19E3hWg844IA6EliSJEnS2lmnAJjpnZnmuZ32+b3vfW/9PwwkSZIkSZK08U2aNKnssMMOdarnyy+/vE6lfMcdd9TAl1CVUb95ty6BakbaZjrnkSNHlqFDh5bFixfXkbcgwG1DXGTJcYwSzkjhFm2oSz1tKUh9W5B+OR/Xl/CZAJg2hNl8H3XQQQfVekmSJEmrt85TQHePAv7rv/7r8uUvf9l3AEuSJEmSJG0CTJPMyNnhw4f3jO597LHHauBLmMqS/Ql/CYMffvjhGuCyvmjRojpaGIS1hMG0TxDbBrQEsllyDtqznpC3DYnTrlvaoHs/15NRwBSukX4mTJhQXvSiF9URypIkSZJWb50CYMLfs88+u3z3u9/tGQF8xBFHlE9+8pOdFpIkSZIkSdpYTjnllDrSd9iwYTUwZdQs2wlwd9lllzqqlymb2SZg5T2/rNOGWd0SyNKW8HfhwoXPCm9pwzbHJFCmZMQw+9I+/eUYdC+Rdc4VrLfvAiYAJkzmnPvss0858MAD6z5JkiRJfVunAJjRveeee2458cQTfe+vJEmSJEnSJsL7fk899dQawBL44uabb67h6aOPPlpH+zJadtmyZT1hMAh477333hoEJ1zlPcFDhgwp8+bNqyODly9fXutp0x3YJgDmPAmaE/hyDG2yjRzfHQaznXZZZn/OS38UrhOMcH7FK15Rpk6dWrclSZIk9W6dAmB0vweY0cB/+7d/axgsSZIkSZK0kcyYMaMGogSwjJRFQlumcyZAJThNeErAy74HH3ywJwymPe8NJtB94IEHaj+MFB4zZkwNkekjI4FT6IP2lKwj+9vwN1LfBr3dbdv1dhRwjuPaON/EiRPLkUceWd95LEmSJKl36xQAd7//l/Kyl72ss1eSJEmSJEkb2p577llGjRpVxo0bV4NRRvoS5hLGMpqXcJXRuYS+LPPeX4LUjAbedddd676lS5fWwJj3+dIfU0lT304NTX8ck5JRwKyzZDuhbrSBbo7PekrCXUq2s4/rpQT7CLOpY2DC+PHjO3skSZIkdVunALh79G93YR9tJEmSJEmStGGMHTu2hrRM1Uywmvf2EvSyTYDKegr7H3rooXos+ykzZ86sIS/BLwEwI4Fpe99999XRv/QPglfkuJQEtWiD2m45Pm2zHX1tJ6gG4Tb1LCmE37/1W79V31ksSZIk6dnWKQCWJEmSJEnSpjN79uwa1IJ39hKs8h5gwlJCU4JS1gllEwAT8FJHuMuSUcK8I5jwN1NA815gAuXRo0fX8HfBggU9YSxLCufieEob6CasZZ2ANvU5hmUr9SnIetuWe6BfpD/6p92UKVPKKaecUvdJkiRJeiYDYEmSJEmSpAHg+OOPr2ErUz4ThjKql/CW9/oSlhKMMrVzAmHqUk9h5CzhLsHxnDlzajjM/mXLlpURI0bUvu66667a34477tgTtgbr3XVsU7iu3iS0pUQb8mZ/N87R9pljaMs67z8+4IADapEkSZL0TOsUAPf2DuC2sI82kiRJkiRJWn9OOOGEssMOO9TQlnCXIJSQlJG8jPDlfb+Eovfff38NThnNy5LCKF+CX47jnb+8S3fChAl1++GHH+55j++SJUtqgEw/nIdgmPo2sM25kUC43WY/7duQGG0f7Ovezv3kuPTB+dv+qef6wVTYhOLcuyRJkqSnrfMIYP4Pg/e+9731nb9f//rXy9y5c+s6deyTJEmSJEnS+sP7egl/mZ4ZhKyUjOBlHUzp/NRTT/UEv4sXL677jj322BqePv744zU8ZQpoQlP2cTwjfgmRGVlMUHzPPffUAJk2CWUTwnIM2y3OmVA2+xLwssz1JdRt19vtdj9YJ5xmyb6ExKxzPvbtv//+jgKWJEmSujgFtCRJkiRJUj/GiFzKyJEja2DLdM0Jd1knFGUkMAEuS+opBMbUPfDAAzUwJQAeOnRoTzi89957l0cffbTW0cfdd99dw+AxY8bUILgNaRPAgm0kkGWZ0h7T6t5G25bCdqQ918q9tH0j28OGDSunnnpq2XnnnWu9JEmSJANgSZIkSZKkfmv69Ok19CWovfbaa+uoV0b6EooSfhLkEpIS2DIimP0Eo4zeZXvFihV1f0bM0p463hPMO4ETFjM1NNsEyLRbtGhRnemNvthP8JqQNyEs+xLEBtu0j7RlSTsK6ynZF9mfPlnn/kFde37WubeJEyeW4447rtZJkiRJMgCWJEmSJEnql5j2mZGtBJ6M9CW0JVzdcccde0JZwtyEpCkEv0zpDPYxRTP1TOvMKGD2MaJ43rx5NeylDwJkzJ8/vwa/nJs6gmP6oCTATTgL1rMv2wSzveGa045jWgmNc3zOCULeFm3SjiXvOOb1ZHvttVenhSRJkrR5W6cA+KKLLionn3xyXeIDH/hAOfPMM+t69z5JkiRJkiQ9N4S8s2bNqgEtASihLiEpo3RvvPHGOm0zIS77MyI4ATChKOFppn2mL0Jd2jElNH3xnt9ddtmlPPTQQ/V8HL9s2bIaNNMnhb4IWdmXkDehawJbUEdB2iHLSKBLffc+trO/G/Wcr7djuEdMmDChzJ49u458liRJkjZ3jgCWJEmSJEnqZw477LCeaZwJcH/1q1/VIJhgNmEp4W6meObdvQS21BOKMlqYaaMZxUvYmymiaUNb+qT//fffvwa/BMEJdemPdnfddVfdB86Z8JdzdIexrbShRNq3x7b7u9umgOvimlLXW1vu74ADDijTpk3r7JEkSZI2XwbAkiRJkiRJ/cixxx5bR+IyYpdgkzCWQHfkyJE17GSd8JYlhf0JSGnDcRnFS9hLIMw7cglSmQaaPmm/cOHCOqK4DVcJeW+//fZy//33l7Fjx9aRxtRlBC7TSbdBMLqDXAr7Eyinrhtt2iU4JsfnOAr3kj5ok3bI6OPRo0fX2elYSpIkSZszA2BJkiRJkqR+YtKkST3TOzNtM+EuYSdLJPwlIGWZYJT2BLuM2OWduLQnCKY97xGmz4wWJvglGB42bFhtR4CaEHXJkiW1PfvoO+dP2EoAnGmXqaN0h7WUaNcT9Kau3Yfu+rY91831pI5rapfUM8J59913LzNnzqx1kiRJ0ubKAFiSJEmSJKmf4L28hJpMyZwpmwl1CTgJXwlBCWAJewl6KbSjDfUEpYS9tCX8ZZ0+CHbZR/DLe4BZ5zz0B7YZHUyoTOCa8Jf6pUuX9oTAXEdCWCSs7dbWpz3L7vZsc45gO6XdzrXm/BzD9aVNloyAPuKII2qILUmSJG2uDIAlSZIkSZL6CUJOQl0CWwLOHXbYoa4zIpf1O++8s4a6hL4U6ngXcEJSMH106hYtWlRHElPHKF/6T5hLmT9/fjnmmGNq4Nxt3rx5NTCmXWS0MCFsltnPerTrQbu2ffRWx3Z3SSDNedlmnXvJuajjmYwbN66+D1iSJEnaXBkAS5IkSZIk9QMHH3xwncp52223raEm7+cl1MzIV6Z0fuSRR2oh/KQQ7LKPwghgRv7ut99+tT+CX/qj3T333FPfLUx/QbDMOQhN20CVkJl9hMsZCcxoYvZznjb4zTKllXZgmbbd0ib7uvtq97ejfrketttAO8/kqKOOKmPGjKltJUmSpM2NAbAkSZIkSdImRvjL1MwErrvttlsNMwlQCTMJONnH9MYZBUsoSl3eB0whpGX0MGEvYXHC0gceeKBOLZ3pndlmumemkabvxYsX1/OCEcP02wasLDPqFpwn52yx3Ya+aR9st8e0fbTrYD3t0w/rXCfbhN1cOyX3CfYRaPMMjzzyyE6tJEmStHkxAJYkSZIkSdqEpkyZUsPZhLy33357DTYJPBcsWFBDT6Z9pg0YIUzIyQhf2hG6so92hLljx47taUsQzLGEuIS7BL8cS99M8cz7fRklTB+M+s15E7xyHCXnQd4vTJugbYLaSB8p6Tf17fFIu1a7nfa5RiSkbvtjnXs+6KCDHAUsSZKkzZIBsCRJkiRJ0ibCSNW99tqrjtwlyCTEXLhwYd0mxHz00Ufr+3wJbdkm/GTkLhLMZpQuU0MT5tKWdY4DfRLuEi5nhO79999fA+cEqLwHmH2Eu5SEqfQ/d+7cGizTX8LXtOkObbPe275W+mdJn7RjPfWtth+usV3n+rOffrguEIbvuuuuZdasWXVbkiRJ2pwYAEuSJEmSJG0ihL8ElYSXw4YNqwEo0xuzJOBctmxZDXRZPvTQQzXYZORvglKOIySmEIYy/TPteLcvxzPCl/YcR9vTTjutPPjggz2hcfpIeEq/rOf8jBreaaedauhMHUEr9WnLeithbOSYtgTrHE/p3s81dMt+9uW4XC/3wjq4NtoRcDMKeOedd671kiRJ0ubCAFiSJEmSJGkT4L2/hLuEswSYBJmM0mWKZ0YAUxh1yzaBLYV1gk6CWULO0aNH1zqOJfgl7GSk8MSJE8uKFSt69iU4JSDOuRKYInWcA7RjlDDHs49CMA2OYz3BbZY5D1imf5btuSJ1CbxbvbUP9tF/QuhcX3tO9nEvhOu+C1iSJEmbGwNgSZIkSZKkTYCAl9AyoSsjgAlD2SZUZVQvS4JcRrPuuOOO9Z2+YJ1wlumiGZ1LUJx3/VJHAEqwHPRz33331dG8hKRsU2h366231imijzvuuLrNdM8c2x2wEvBmGmiuk/1og1ewHdSz3da12muJtO8+rrsPjsm7iHlmXCNol+vhme67775lwoQJdVuSJEnaHBgAS5IkSZIkbWSM0CXUZcTu9ddfX8PLTNecYJMAmLA1o4KZuhmsE9AuX768hrD0w0hi2lLHcW3Qy2hhRgcTEHMc004nvCXsJUwmROZ4QmLC3QS8HE99CnUErCwJgxO0RndoC9pwbF/7cp1tX7TjmKy3JTgmhbYJqSNh+rhx48rkyZM7tZIkSdLgZwAsSZIkSZK0Ee2+++41ACZ0zehaQt1ddtmlhsCEuWwzejWjegmECTcJex955JGekJbwk/fcEvAyUph9w4cPr0HvtGnT6ruDhwwZ0hOOEojSP0v2ZbQxaDNixIie4JclJQEuhQCWOnBNFCTIjTaobbX1CXRzXHcf4JzUpS0l10s96xkFnGuMHEeAfuCBB/ouYEmSJG02DIAlSZIkSZI2EoLfvffeuwa7hJVM6Zwgl0CWEJeAlnCYkatg5C9tCWuZzpipowk9CWJ32GGHGhYzGpeRv4S4oG0bMBOE4u677y5jxowpDzzwQD1nWzjHscceW/vNNVHaALaVgJg23fva9hxPyXrbNsF09ke2aduGuuk315Xr5Dpol/5yDup5nnvuuWfZY489ap0kSZI02BkAS5IkSZIkbSTTp0+vwS5BJSN1maqZoJZQl6CSkbsEmOxj5C4hJ9tM40yoyzqjhAl8acuxTPtMPQg+6Xv+/Pll7NixPWEp7r///tovdSwTnnKONoClPhKuIm3Yn35Zb4/t7itBLG1ZZ1/qsp6gGrkepC/apET6SwCd60gAnOfBPjAKeP/996/BuyRJkjTYGQBLkiRJkiRtBExDTIh755131iCSwPKuu+6qS0bvJpAl2GUkMCEuAWdCTdqNGjWqblOY9pnjmBYaaceIYUYGE6xmO+8PTnjLKGCWCUqp51xz586t/SRcpZ7RxglSqadwLQleacd2CnpbUnIs501fnC/BN3KuHIu0pU3asY08t9xbrp9CPYVwferUqWXXXXetx0iSJEmDmQGwJEmSJEnSBrbjjjvWUbeM/iWQJFRl1C/hKeEkgTAjflkHQTHrCTkT6N577701LGUEMMcThlK3zz771JD34Ycf7glTqc800glqKbfeemsZPXp0bZPglEIwzXUeeeSRPXUUrpVlQlVKwty0yTYS3LYBLto2QR2F++3exzbXl37Y5prbtiy5p0yJTVvaUJ9CH9Rxz4ceemg9TpIkSRrMDIAlSZIkSZI2MKZjJlwlvCSgHTFiRBk2bFgNT9nmPb4EwASWhJXUUQgvCX45jhG/CTsTclLHqGDeK0wbRr8S/DLdM6HylClT6jGch+WCBQvqdRAuJ7zlvcD33XdfT/hMiJp9OY4lEqoSIoP6jBDmuOxPiawnzAX3AI7lHO0x1CF16ZtjKDwTthMG5zootM1oaLBNfwTmvAd4woQJtV6SJEkarAyAJUmSJEmSNiBGnu6yyy41rCSUJOxkBC8hLe/vZRpnAk0CWNaXLVtWw1+2qWc66ISbhL0ExYz0feSRR2o7+mT0L6HnAw88UI8h8EyImhCX8yXkJTQ94ogj6ruFOY79FI5hH+dJqJol+2+++eYaMLP/scceq32l/zbcbdfpL1KfJftYp49sR66f/Qlz2c8610zhmSYgzz3QPu3SP32xTfi7++67174kSZKkwcoAWJIkSZIkaQMhbGTaYUafgtCW0b4pBJeEuAln2U4h4EyQS2Cc9rzHlmOwZMmSGhDzHmAKAShhKsckOOXcBMQJQ2kDthPsckwK9Ywc7g5/eSfxTjvtVANXcA7a5rgEraA+ywSyfcm+tEtw231M6hIA8yy4FkZTjxw5sm7neaUdBXkeTMN90EEH9fw8JEmSpMHIAFiSJEmSJGkDYJTspEmT6qhewknCxwSUjJ4l1AVhLoXRuCDcZKpmQszddtuthpwEmYS9HMf7fgkzCZPzHmDK0Ucf3ROisiS0pX6vvfaqdZyXOkracU0JcBP20g6pI/hliugE0sH5cx5K0Ib6FueK9hrb+vSdOvanjvWU3C/PlNHQjK4eP358GTNmzDNGP9MP7XIu6riH6dOn1+cqSZIkDVYGwJIkSZIkSRsA75sllCSA5H28vPOXEJeRvgSXCXYJfzP6FwkuCS3322+/WkeITFhLgLl48eIaHnMc7RKSLly4sB7LNoHsvHnzytKlS+vIYILcBLUUtnHkkUf2BL8JfNlPP0wRnT7Z34bDXBsIgbOf48B2K9fUXY+EuumvXQfrHJv6FO6bZ8GSAJhAd+LEiWXnnXfueY5tG3B9HMuU3KeeempdSpIkSYORAbAkSZIkSdJ6RhhJYMt0zQS9TN+cKZoJKAl8KYSTbKck4GRKY/Yz8pZwlbCX/bz7l5CW9QS9d955Zw2WCZgT4DKlNKOD6YvglTA6AS+FPtkH1lOfPsFxTK+c9inZhzYApl2uvw1tewt+2b82ODbX2aI+wS7XwPNhxDXv+GXJPo7L801bzsv25MmT66hhSZIkaTAyAJYkSZIkSVrPGJG6ww47lOXLl9fQkZCSkJZQklGpbQAMltTTlnpCyoceeqiGqmwT/DIKmCmgCYcJewlqGQ1MsEwgyvHTpk2r7/tNvwlgu9/py5Lj2UdQDdY5f4JcJPRtS8Jb2ixbtqwuKVxbztebvvalrt3PkntgSWn3sQ62uRaeMXU8M57LuHHj6rPPsTwXStrTltG/M2fOrP1IkiRJg40BsCRJkiRJ0nq044471imeCRofeOCBOgKYwihVAlTCYMJNgl3WmaaZ8JKQkpHChL2EqbRlNCsBJ1M+Z6QrxzJ1M0Fvgk2wTBjMuQl4CXMJPalLgEt99t1+++1l7733rsenLkvMnj27HsN2jmedcJqQlRG31HFN2c+5cw1gmUA614rsB/XZ17ZBuw/tcawTQnNe2vBMMxKYYJs6zk3hWYH74znuv//+PVNsS5IkSYOJAbAkSZIkSdJ6RKjI9MtMn0wQTOBIeMsIXgJJwkdCX96vS4CZYJKRvbQhYCUAJjSeP39+DYcpoD193HPPPc8INDlm0aJFtQ+220IATNuMigX90AftuZ700ZYErQSpCX4JW7knprbOaFqkfXtczhVss6+tZzt12Q8C29R1twfXm2O5Np5vtrkfQmCmeOZauXeeX/rKM+F9wcccc0ztT5IkSRpMDIAlSZIkSZLWA0LHl770pTWAHDp0aA2BmfaZbUb2Ugg2CSOpo7DNtMWMXOW9vdQR/NKGkHjs2LFlzz33rMFmAkz6ZJRxAl7eE8xoYPqgv/Z9vwk777777hpM00+OIRhlnbqDDz6455h2JC+OOOKIOrKY++EeGY3M/lZC2pwzx3bLffS23q27rnub83A852QqbHBtFJ7fTjvtVINqnintKDwfcAz1jH4+5JBDap0kSZI0WBgAS5IkSZIkrQeEiYSOYEQqISmhI2Ek4SzBLtsZkUpdgmBCS8LJjFhlBDCBK+0YSUwdATFTL9MP2yeffHINiTkemeo5ATABKQiMCZnZl/C3t6A32+yjcE0Z5Zt92c+ylXNRn/Xg+FwbhW3ulXXq0yb7KcgSrOc6kXWWFJ43U2VjxYoV9dp5vvw8CK1zPgrrOY4g/bDDDqvHSZIkSYOFAbAkSZIkSdLztPvuu9fplAlsE+QSPBKIEtbmHcBgP+EkbQlTCXZBcMq7azNVNO0IeAmAM8I3QSjLefPm1TYgSE2YmqCTY3kHMe05D8uZM2c+K8jlvOxr61ISAHfXs51QF6znHITX7fUg7XJtWY92nT5abKeu3cc6104h8CX4pR+2CYM5PyOxR48eXZc5Js+MdvxMpk2b1vMeZEmSJGkwMACWJEmSJEl6nsaNG1fDR8JRQkVG3BLAEoYSTCb0TQCZQghMUEmAyTEExdQz0vfhhx+u4fFdd91Vl/SfYJX39xJssk7wSpjJksKIX9ojdVwX56a0dSnUt+Ew18OSfjF79ux6DNs5F/tzPRTuFdRnvcU5unGvyLVxj+16u6+VfTk318Mzox19EgATpPMcCNAJ53n+SJusE9Q7CliSJEmDiQGwJEmSJEnS80RwS4BLEEkgycjb4cOH16CSkJcweNGiRbUtQSUBJNMPE7QyWpjQmJHAjAAmvGQ/4STTNTONMX0RvC5cuLCOBmZEK33zbl7OSVmwYEHdx7uGeW8vQWyCWpYcT58cQ11Krpn+Ukfb7EO7j2tOSZ8czzqhK+t9jQKmbbBNCfa1+zmW55AwG2mfdjkv18U00FwTz5v9BOhcP8HvDjvsUJ9zN46nnvcjT58+vVMrSZuf4447rnzlK18pF1544bPKO97xjk4rSdLG8NrXvrb+Tmb5XBkAS5IkSZIkPQ8zZsyooSqhLAEooSUjTgljCSMJIAl4WbIvI33Zz5I2BJeElKwT4BJgEgQTTnLMUUcdVUNk2oP9hKFHHnlkDTkJnNmXeo6hPqFtCg499NCebdpQCEI5ltGwOS7LhLwJlVMIebnu9JFC2xwb1FF60x0Ct9uRuu4lci5GWi9btqw+P0JjfhaEwLQlQGckcJ5RroX7pjCamhBYkjZ33/3ud8vpp5/eU37xi1/UzxoCYknSwGEALEmSJEmS9BxNmjSpjuQlgEwATIhLEEkIS7hIGEn4myCYEJJQlWCSYwgwM8qVkJf+8j7blNtvv732xzp9cjxL6jgX9YSa1FOwxx579IS1KQk+WU9wypIC3mXctk/hXBybkb8Jf1nmeAJr7o22FI5rw1aWXGe33GP2pX3uCbn36O6X83E9XAPn5blSl6mg807m7bbbrueYHE87rpspsCdOnFjrJEmrXHLJJXXJHztJkgYOA2BJkiRJkqTngKCWaYMJfAkQGYVLkEuwyGhTQkfq2wCYcJZ2tCHQZD/rBL9MYczoVd4nDOppP2/evLLLLrv0TANNIXBlee+999aRw4SYbBN+EoSyPmXKlJ4AN+0phKmMQk59Ql32cc4cQ7hLeMp10Y5zEKIm/GVfRtkyNTXXyPG0Q85JXXSvt9ttwIvspz4lsp79nJPzMb0210vQS9jO9RECg58XhZ9Djm/72XXXXcusWbPqtiRJkjSQGQBLkiRJkiQ9B4yGYurgTO9MeJuwlOmdqUsASRuC4gSOtGHaaEacEqgmCL7nnntqAJxAl3CV4JfQdvbs2TWYpZ52nIM+Dj/88NofoWzC24SwrLOPknXO0z2dcwp9MxUy56UwEpl74BrZxwjhhL/sI0gmVGW0Mf1SEvrmmlKP7iVyrW1di/3s49lRMho4hX2ci/NynYsXL67HZEptfiYUfgb8vHiekT7Az+vggw+uQbYkaZXjjz++LufMmVOXkqSBwQBYkiRJkiRpHe222241fGWkLwFiAmBG+xI6EjYuWbKkrmd6Z4JLgkpGBlN/3333lbFjx9awkkLgSn+0I9QktGSZQJXzsI9RrpniOP0m/KX/FJxyyil1PfuypD+un+MyopdAmZG8mYI69SwTAHNORgFnNC1BMCX7KAlkWXJcrrNbwlcKbbulnmW7HTk2hWvgvATuPEueM+9iZl+ug+e700471cA92A/6Jnw/4YQT6rYkbY5OPPHEcuGFF/aUffbZpy5/8IMfdFpIkgYCA2BJkiRJkqR1NGbMmBri8v5dQtWEvoSxLAmDKYSL1IEwkjCYQmhKGMk6QS/btCWkZIpo9hFopvAO4LxXuA1bKfTBCNw24KWAPrNNmJugmLCTd96yTdhLWMqIWd5pDPpL+Jtj6IPzjR8/vmeEMNeefumT/Vwfx2U768E618Uy9WynRLbTLvt6OybtODfPkqmxOTcjlBnxy/VTz8+JAJt75VmmT3A8QT7TQDO1tyRtjr773e+W008/vZxzzjn1D5kWLFhQvvSlL3X2SpIGCgNgSZIkSZKkdcQIXEbQEgQTGhKG5n2/BMDUUUA72hO0EgITiCawzDTOhJW8B5g2BMeZ7pnQ9f777++Zzviwww7rCVmzpJ+pU6fWthyT46jnPAloKZybkuMyijnHJcglAG7rOIZ7JAzgWlgnUG37ba+JQh3bXEvOB5Yp0QaxaPcFfRHaZl+OyTJ9cj6eI8+Nnwf3SMDN9XI//AzycwN90nfKzjvvXEcB8zOTpM3VL3/5y/L5z3++Tp3/gQ98oFMrSRooDIAlSZIkSZLWAdNhEvZSCAkJGQkTCRwZEcs2+wiCWRI8EiwSgtKOkJLgl7bsJ5BkPfso9MGSIJP1hKuEnawTcibwZRsEsdTRjmPZh9NOO60nqE2oSzvaUMfxqWuPJzhlP9e23Xbb1eucMGFC3c8o2fRJaYPltoA+sw76pvSlDXRbbLMvI6rZzjpLCuehnmviPck8P4Jzrp8l00PThgCYwjHpN/dN3YEHHlh/zpK0OWPa5x/96Ef19+FrX/vaTq0kaSAwAJYkSZIkSVpLmT6YsJZgluA0AXCmfE4ITOAIgkVCYMLiiRMn1qCU4BdMVbz//vvX4DGhKWXx4sV1f1uXcDJhbcJcCvuQQDNtOA5tWMu+1BPkpm36otDHkUceWadQZoRwjm1H+tIudRTuMfsovKuYftKWJdsJeNsl9dmXbdpnu63POtpt7h1cA/fD9cyfP79uE8YTaNOeeraZypo6tMdyXt5vfOyxx9aftyRtzv77v/+7/tES00Ifd9xxnVpJ0obG/1797d/+7We8l/3jH/94Z++aGQBLkiRJkiStJUZBEewSaBJwEvqC4JDRpXxRw34CXNYJRlkSMN53331ll112qQEjhfYErLQnECa0ZIpl6hmx2k7rnGASJ598ck/4ypJCkExgm/AzxyVEnTZtWt0HttMX67RLewr9JoDl+lNHcEqhLX0RkrI/15El+3mP8U477VT7oC/2Uc96ZJ1lQlzWKTyvdpuStmAf2m0Kx3FOro9rIbTg3cnsI6TnuVJPG65/9913r9NDcxyF/vKcZsyYUfbbb7+6LkmDHaN9CRo+9rGPdWpWYSros846q+6jjSRpw+Pd6694xSvqH9+05W1ve1unxZoZAEuSJEmSJK2FSZMm1cCW0Dcjfq+66qoaGBLyMsKXoJHRv0wvPHz48J5tglNGmxIyMkKYEuwneKQNfSY05T28rNMHOE8CTwJV9hH8ErZyfiTIZZmCTFUN+qHkPLxXOOFtSsLaI444omeEL23SjmOnTJlS11NPIcDmfZGTJ0+ux+ea6Y/9rFNaub+2nrrU94Vn2S3HcH2ck2c6b968+s5i2vMcKNwPz4P3/WYqaLY5nmO5bsLiww8/vGeUsCRJkjRQGABLkiRJkiStBUbvEuoyWpewkhCRkJFtCqEwASIBI9uEvISHLGnPO2kJIjMVcULOBQsW1KmhEzyypCxatKgnkGzD2VtvvbUGlkuXLq3BL9fAcZg5c2ZdTx3t6Z/399IP9Sns4zooCXbbZa6DdUobAlPPcYzyZZtpsTnHuHHjes7dLrkP2uU6u+V5ZJ3+W+yjvpVj2n2pSx+cj2fOs+S+EgLz3NjP6F+umWmuEwAj599rr73KCSecUNclSZKkgcIAWJIkSZIkaS0QGhLsEg4yUpfRwISbBLwEi0zlTKDINsEjI0gTKK5YsaK+T5bjqaM9S6YnJnxktG/CS0b1MnUxQXGmLE4oe8cdd9R+GJnKuVMSWNJv6hj5yohcgmn6ZqQv9fRDmMvoWPrlWPpknf05V/plH9vtdWQfo6IZ8cv9Ubh3+uX+UxihzPlZ57wcx3a068gzA+vd263sp+T5ZT3n55oJgO+///66j6CXnx/7WCe4Jtzn55cQmOO5Tn6es2fPLtOnT++cUZIkSer/DIAlSZIkSZLWYLfddquhIeEmAShBLuFgQkOCV4JcwkbqCREXLlxY9txzz1pHOxDIHn/88fX9wYxM5VgwmjdLjk9ISviY0JVjuYYEnYSwFILMBLKgPe8bpu8cm0CT6yeo5T3DCXXBdM5st6N82ccxTOfMOoXjE/LShgCba+ZeqE8ftOU8jD5mlDDnR66X7RRwP1lmPfcD2lHfHpMQPdtBHfsSAFO4LoJ1njv783PjWAJypoEmCE4AnJ8L62PHjq3vV5YkSZIGCgNgSZIkSZKkNWCEKOErwS8BJwEi6wSIjAQmtCUUZZQw4SPrhKAJPwlJqWNUbxsaL168uIaS9MW+NrTkOAJIwktG0bb1yAjktoDRwQlsKQlqGQXLedmmDwLW9EdhJC/72kIbCufiOgmOCXoJmJkOm+CU83FvGVVMO6a7zruAOQfXwTlyPbmHvnDfFNqxRHtMu579bfuEw5yP8/NseAaE8mnLz4olhWmh+Rnzc0j4y5L++LnPmDGj7L333rVekiRJ6u8MgCVJkiRJklaD9+oSgBIGEiRmJHDWCRkJHPM+XsJRQlJCYeoIRpnKmUCRYJSRsSCQTIhMCDlr1qx6PCVhL1760pfWuoSeqSd4TbjZHkc7ronzEsZyHUzjzH7eacu10ibnoBDyMp1zAtocz7TJXPOBBx5Yr5v7oBD8sj8hMu3ZTyi84447lgkTJvRcTwptcm1ss06hD5ZIXYLZ7O+WdkjboJ57zKjr9hr42TAqGTmOwihgAmveZcyxLfojyOcZSJIkSQOBAbAkSZIkSVIfGMHKyFDCTIJUAk5CXdYJbwkzM1KUoJJ1wkRCRMLTW265pWc77/ulDSEpwWOOS2hJSJnthKR33313T9uEqqA956eOZdZpx2hiQmvOQz3vDibg5LoJM2mX4zLSl37ZJtzm2rk/gmOeAfvpj9CX9iwp9ME+wmv6TX/ZT585F6OHQRtKi2umtOsJZ+k/9VnP80Dbtq3nOVOQ++Oa5s+fX9+x3OI4RgETbLNkO+diyc/wqKOOKvvss0+tkyRJkvozA2BJkiRJkqQ+EJom1CUYvOuuu2ogSEhKHaNMCRYJPQlMCWUpbFPP+2MJfRmJC45l2memFGadQmBJmTdvXg0eQWDK8YSVhLC0S11CVzAal3MlVE2wSiDLOm05X/vu4PRBGMpI5by7l/f4EhBzX7wPNyEu/XAMI44T7rJMYR+FdvRFP5kqOteUwlTWXHvO2Upwm2cA1nmeeUZ5Xt3SLsfShu1M84w8P34WBOJcQ9CGtgTm/MwzepjzsY/+eCYvetGLar0kSZLUnxkAS5IkSZIk9YHQl5GvBIIJP1lntCujbKkjOATh5p577llDRuoJUwkPjzjiiFq3ZMmSGo4SxhIqEy4myCWkJXg8+eSTa1B677331v7Sx5QpU2qAmRCTJY488si6noCVdcJKjmGbKY9pn8I+pmcmoKUwbTPvJ+Y+d95553LYYYf1BL+5X8JujuVaqUtJO87DfRF001fCXwrHsp97YfQ09w76S79ca4vt1GW9bZPnxr3kXlkibfNc2xHA4Hw8I66TwJ37CI7LKGB+5gmecy62GQHsKGBJkiT1dwbAkiRJkiRJvZg4cWINBAkvMyqWkJCRvmwTdBICM7UzQSJBIaN1CVUTSLLMaFOOC4JJ+iKIJDylP1DH9MRpm+CyDYAT9mYf5064m0I/7TuFac91LVq0qL7nliCW0cNcP9ecIJd+6I/1BLxs00fujzrCW+6JqaIJqalntDP1bSH0JkwdN25cz3XQH9eYbdYj67mPdh/3ROnexzIhbbCea26PyXPiGgjZFy5c2DliVf+E+wTZhPEZUZxjKdwL72SWJEmS+jMDYEmSJEmSpF4Q+IKgl/CPJcEtwSbBZUazEjKyzUhg3tcL2lN/zz331NHCrAehIiN+x48fX49hH8EkhbA4oSP1CTEJIzkHhXbUUWjLyN2MdGUfx3A8+wg6uV7CXoJNzkkbRg4n3G0Lx7Ev25wv6xy3++671+CX4JrQmj7Z5pwUglPORz1hKdsJlyms5zm299fiGlJa3A8FeQZpk2dG6W7Ds0Pa55xcCz8vQuC2H36uTAXNM+NYCvvT17Rp08q+++5b20uSJEn9kQGwJEmSJElSLxgdS1BJ0EkoyGhgsE5QyOhXwsCEuJnGmdG7hIy8a5YQkVGlCRIzDTT1jOpNYMuSsJgwd4899uipp1/WOR+BKhJ2zp07t14D007TloCVJWFtwk6OYepl6gg8KQlG23CXwnb2tfUUjuN+GEHM8+DZEPRSzzKh+KRJk2p4SshLe5YExLTjWtnHNXGdyL3neru1wWyWPMf2+UR7PO3yTKhPQM56jqMPRkXffvvtdTQw+ziOtvx8uE7ORR0l/bPPdwFLkiSpPzMAliRJkiRJ6sL0yQS5TG1M+Efoy3beyUsoSoiY0bcEoATEBIaEoxk1zDZB6PTp0+syo4YTNBJGUpj2OX1PnTq1hpMJOVkS9s6YMaPuZ5tRqwSRbCfUTMlxYDQvIWgb9iYU5R6znZLjDz/88LqP8JYpsAm5GRHN+4N5p3Eb/NImIS/3wf2z5BlQeA8xITTH0j/noZ5rzzbXk4CVJftabFPaQLa9l7Z92w/XxM8px4L747g8J6bw5vnyTuD0xc+JADg/E7CPQgC+9957l4MOOqjWS5IkSf2NAbAkSZIkSVKDcI+QLyNuWSesJUAkUCUQJXwkJKSeEb3sSzjJNMusE4oSkBIcp21CSEJJ1jkHx7cBJu0SxtKOUbcZWUxoyUhjgsiEtSCoZrstCTO51hT6pdAvhfV2P9fLsYyM5Zq594wgZl+CU0byEq5yj4S5XCPB75gxY2rQy/XwrmFCY0Jy+qMPjuF+GXFLP1wD9blWJGhlu30uqY9cP9cL2rb7eb6cg3NSn58P8gyopw3XxPTbBOu0oS33TgCcnxnyXHkmJ5xwQqdWkiRJ6l8MgCVJkiRJkhoEf0zbTPjHKFsCTsJCAtgFCxbUJSEto1wZOUrwCYJB6ggQCRgZNQsCW0JQ9tNPgs/bbruths0JLlMSVLJk1DDXwzEgWE3omYLJkyc/q55zUBjpyz4KYStL+uNcBMsJZglpebfv/Pnza3h78MEH19CXfVlS6Hu33XaroWnCbd5zTBic87DOtTO6lnY8Q9YJfumbkJjz5VnRP9caXFu3NsDN8+Fa8myoawv72zbU8TPheXIu7jvPiDZcL9NB8zNmH/8N0JZ7RM5PWzBV98yZM+u6JEmS1J8YAEuSJEmSJDUYlUvASwDMtM6EgAR/BJlgnYCQIJBgl9GghJsJVRP2giCU/uiDJcewjxGztMt7hRNWgpD0mGOOqWEs7SkJMNv3A2dJfxybbUrCXsJN6glu2aY+67Tjnb1cI+fiGkeMGFHDWY7h2IS+BLQU2vIuX95XTHuC3dQT6BKicm/cF33yzHg2bBOU03eugyX7uH62c87IM+S+W2znebHOMbRNfdrwM6Skf7DNdRPu5hqQfhjdfeedd9Z3NXMc7QmACY7TJ7hWfp5HH3103ZYkSZL6EwNgSZIkSZKkjj333LOGlQR8hJesE/olZGQELggP2UeQSECaOkJjwkKCSOoTUtLHxIkT6z76ZdQsCHvpgzaEqgSQ9MtxCXlTOD/1CZLb/eDdvAk1KZyfba499Ql0CWy5DkYnc02Es9lHu/TNCOGEvwS/jIhmmmdGHM+ePbtnH8Ev98K177LLLnWEcKaAJlTmXLThvPRPOExwTrjKved+wHYKuO9gvQ18ea7tfvCMqGc/z5Ztjsk+6jgvx3Ht2QeugVHcvLeYZ4GMGs41tf3xbud99tmnrkuSJEn9hQGwJEmSJElSB+/vJeTM+28TMBKgZhQoI10JAQkxqd9///17gkwCVUJF9nMcSwrvliU4JVwEdawTktKOEaf0QbBIP5yXoLcNf8E+Rg4nMOX8CXkpbcibMJfjDz/88LpNAMt5uTfCWUbkHnXUUbVdCtfPkv4ZIUzoTVvCYO6dPnJurp/rpLCfEcHsZ9/YsWNroE0h/OVYQmTa8iw4B+04X54x95B7ZZl1ZF9vzyTPjQL28QwpCYHRBsM8A/rhemlPPe0SAmd0MtIH22nLOflv5dBDD619SZIkSf2FAbAkSZIkSdJKvPOWsJMAsB05yqhelgSCd911Vw39CCtBWwJNEGgSlHIsfbFkJCnH0YYAmQCSMJQRsexn+8gjj6zHcz62wfLYY4+t69QTOBJWYtq0aT0BbJacm/077rhjT4hLsEnompCV0bkElYz2pU2OoWQ7oTHtcxxhKCEu10x/KewjjGZ/pnsmACbk5T3CPAvum7a0YZ3AONeU8zDymfujLvcY1AfrhK+5ZrCdYDZol8K+/Cy5d3522WZJoS/Onfb0xbXxc+L62OZnR9u2b/6boD3h+tSpUztnlyRJkjY9A2BJkiRJkqSVEpAmhGUbhH8JHBm5yn6CV4JU2hNusj/BIoEvQTJL+qD93XffXdvRhu0ElqwzdXLCYYJF0A/hMe1py/k4R0a6cm5Cy3a0L2123XXXGsImrCXEJGDlehipmoA3YS/HgBA6gSzBMde+YMGCOn0zYTZ9JVCmb0Y6c91M48w9Us8xXBchL1NA0/fuu+9ez00wzX5K+mLUM+15pmyDa2rD3KyzzDr3z7Og//Z5sT/BbNpSnyX7uBaCaLZ5tnnuPMs82/x82mdEHSO0WUaug3vg2UqSJEn9hQGwJEmSJEnSSgR8BH6EfAR7jHplO8Ero0dpQ+BJcEigSzjIPsJEwk3CV9owAjZhLyNiCR4ZScy0yAkq232gHwphcd4FnFGxCTwpYNpmro1CaMt5CYwJbAml2eZ6CWN59zDnTKhJn1km4KRwLPfA8VwrwSz7aUc/hMmEtuynLWEzx9E/+6nL6GZCXo5lZDDXwX2yj6B30aJFdYpt3hXMNtdNn/SVa+uWQJfnllA3zyvrucfsR9senJfr5v7Yx8+RuoT8ac+SPnNN7OPnScl+cD7WCYB5f7QkSZLUHxgAS5IkSZKkzd6+++5bg1vCQILPBI6MgiVMJaAk0GSbAJBAEAlOCT05noCQkJRwE4SehI3UEzLus88+9XjC4/YYRuASOBKYMqI05581a1Y9R1sSdib8JShmVOu4ceNqPX0x/XLC1IS4XPPs2bNrHSWjfblGQtncf8Jb9lEIhemfZ8CS43Is/bCfgDf3xPEJeXkvMKE3ATGjhRNKcwzvRaaO6+Y4CmEqfXK9rYSuCVx5liy5pzYEDvYhzzH90iajjllyTzw/romfA/vbgDd47tTThn2UjAbmHPw3cMwxx9RtSZIkaVMzAJYkSZIkSZs1pismqCRUJOBjHYR8bBP0Ef6xn8CQwDJBIvsIO3faaafahvAzYSnBJ8dQj7lz59aAl0CZenAO9jPql1CUbfqmZB/hI4FoAt0EmbwLmAA1+yiEkZS0JchlSWBLWEpfCW8JbOfNm1eDX6510qRJzwh+uVZCXEYoM2qWkDTHEtYm5GX/IYccUu+dZ5JAlfukPX3SP/dHOM4xhOK8P5h+mKKa++DYBNxtsJsQN0vqeS48e+oSxrOeY5D2ef48T66dnw0BMD8zAml+XtzfhAkT6s+b4+g7Pwu203f+G6FP9uca2Cbc32+//eq5JEmSpE3JAFiSJEmSJG3WmMKYQDLBH4EkQWQCX+ooCf4ICgn/KISeBIkEnISdHMPIVuoPOOCAnnCQKZ1pQ8jJcfRFQd7RizZwBOflWtoQOEHvHnvs0RMId+/jnbsErxSuK0Es18W90i/BLyEs9fTBOQ477LA6ephQlCWBKceyj+dEYJrnQshLfzlPppAmOGZUMevcN1M8M3KaYJj7Zx9B8F133VW3CeC5bs7DdYDtPIsW2wS+lASwqWc76+C5J6il5BmCeyf0JcznPsePH18DcJb8XGifYxMqs2Sbe2cf507fnJOf4cyZM2tbSZIkaVMyAJYkSZIkSZstQj+CScJBEEIm4MuUyAkEQehJPe0IIQlTGUXLlM+0JTBlBDFteA8wS0bCsgRhL/3TH/uZOpnzEyYyKpWQsS04+uije8LdlIyQzTbBacJe7mX33XfvCWapI4zlfIS0hJQJbtlH4boTJDMql9HBBLW0Y5sQl/skuGWbegptCHkzGph1zsV98dwIjLkvng+jfGnD/RP8EpzTF+emL47huXNd3FtG9oLthK3/P3v/FrrtduZ1vi8tC9p10rBoNJuamftdMmdmtrMq20piVbnEQsGzQhGk6fJAPBA86QOhoGmaFrFPWqGraQrBpk888kAWNrSKUaRITGUmszI3lapsamNhKaKiYp+s+jx5v68jj/+ZZFalkrzv//eDi3HfY1zjGtcY9/3M+X+f7zPum5o/VX8Ni/NVisvfHPnJzboDwKCv6yNXMFpe9WFULmK4Vt0jYHhjq3/nO995WftpmqZpmqZp+n5qAHiapmmapmmapmmaplsrUBLIe+aZZy7gFAwFJQE/kBCwBHA7t7v3lVdeucBDbQCoNgZk8gECf+u3fusCPYFUAn7tjLXbFNDVDiISeCgHj3QOfDL5OGcn6K1U/6EPfehyzAK+TP8ewwy2mqcdyPpqs1s1X3nzA3fl6THGgVng1zz5g+XeSaxee5DXWoC8dhMDqeqA8aDuV7/61cv4wKo1AIkDxc7BY/DVGPoAzc1ZmaxTYNZxbeqYddfWOTk/Td714y9PYxfXXNsVfD2edSN1wK96x+arFNccX3zxxYvfNE3TNE3TNH2/NAA8TdM0TdM0TdM0TdOtlUcdg34eRwzggZl2yYKz4C64Z0cvwAcOBxL5OQdewU+QFVAENQFScUFC/cUBQAOUyqBvdRnoywK+QVCPZj5Br1I9cy6XswQrPbIZyDQ+k1dtjz322D2QC/KaLwCrjzH1A0ZBZH17n69jj54O8qoHea2fdetdwNbBGlkHIBzwBnbBZdCXgeni8BFHHsYEUZ3LUy4UiD2PzzK1rlQbM45zMc3hJvFzPeQDArt+1rf6fMRxHgTuGhrbOACwncTTNE3TNE3T9P3SAPA0TdM0TdM0TdM0TbdWwCdoB6iCj6Dm17/+9Uud9+iCmHYHtxuUPDI4IKqPGIAmoKo/P7ATBOXrPHDI7LIFCIFDcQO5joO+J+yt3XltwWdA84TDTB9jg67grFzVsQCxHbh264KYYK/cgVsGCgPW7dIN8lqXdvKqs7vZ/NtdLK7xQWAQ1Zp49685A+Sgql3G1tbc9QVjgWBrKK7x/tk/+2eXPuIFxK3NadrlHmTv2lRSa0rqjSmWuG8kfuKB2XY0G4fK+YzXvcNHmzrtQLprMk3TNE3TNE3fLw0AT9M0TdM0TdM0TdN0KwVUAndAHugJDIKSoCI4Ck4ChkFAQBEstWMY6AR7QU5QVX3wUD87Z8XlEygkjzvW9sQTT1zgbbtcje34Ix/5yOU4M452YDHAW5324DBYDeIGdp0/9NBD9+LJUXuPdLZrGaQM/Jo/OAvygp9gLLAL5AZ57eQtL2vHr3cFM/BWDO8atobmHQhWZw3kZVzrpa0d1z1+Wp3cxAWdza35B18Du613ENZ5uj5mrjXJ81tJLGsIaiup/sVwzi8Arb77xLy9C9g8pmmapmmapun7oQHgaZqmaZqmaZqmaZpunezSBDWDikAfA0eBPDA24OccdNRmVy1oCoqCqoAln3b1AqH8Gdjr8dFklyugGSQEENvNm4HNAc8gb6U8vX/39K/NblyQFYwEUwO+2sQEnM1VzgBsu5e1qzdvENMuXZC3NgAZGAZjQdNAr3nY2QvaajO2XbvWQTxzlgfYHAjufcSBaHAUrLZGgXK5iWX3tDjWW/wgeRDYGrVW1tNatqYsOHyuddeEzEG8byVzsV52LRez/uVB/ALALB/9nnvuucvxNE3TNE3TNH2v9Y2/SqdpmqZpmqZpmqZpmm6RHnnkkQtkBO8IBAUiQU2AEswL/BGQCWACj4AkOAr2qQdHQU+7PtUBm8Cp3a3O+Yul1J+JC0JeA131H/rQh+6d56Mv4AmSArxyBEvlrO6DH/zgvXpmfPmqs5NVjvzBT/VB3Pe85z0XyKrP+Qjo2q2LvgzkNV8w1+ORPRo7AAoe62MHMZhrh6/SWhg3UAwKi2NttIOs+gHkYqt/+9vffrkOADBZL2vQullTa2FNWNeIOrbeZ0ldT/OXzxupGPIxTzA7iVEc5tga8A0Ek3vLTmc7qadpmqZpmqbpe60B4Gmapmmapmmapmmabp3AR0ASuAPxQEWw07njQB/ACObZuQpAgq38QEEgkdUH7HSuPThJ4PA17AUwA73aMvXA4rU/kGt8O3TL0+5jeWnXJs927AK95ge29hjmwKe58/V+YuOYEwCsTyDWDmngWBuwyUBd/T3OGbjlIxZYa/7BaEDXuM4DwMAvOArwgqLqQWM+QK946vQzv3YM2yFsjtbFGsg3+HoC4NbtGs6ypI6sl7mm1o/Es8tbLvrKy27lwG73xinn1sdczvbHHnvsArOnaZqmaZqm6XutAeBpmqZpmqZpmqZpmm6dQE7gEYQEQ4N4jABFbfzAQudgJzgJsoKYoCDgB3jaOcsfQAQTK0HHp5566nIMNAKYgUwxldemL2jMn4GzgKQducCz8UHRHqfsHCjtkcl26wZVwUsxXnzxxcs8mfkw/fQP8poryKu/xzCbrzYAmT94y4Bfc/bYa485Bj61y7E1Na7ds48//vhljcSSG8DrXB7iGA8cFsv4cgaU+YmlHZA1nrUJ1rpe1kmddTwVgA34nucdG4/0tY7GNAZYbV3MxTq4xtYAkK6v0hzEKybjax3zs1af/OQnL8fTNE3TNE3T9L3UAPA0TdM0TdM0TdM0TbdKAU8C8OyqDeqpf/jhhy8wDwAM8IF5QCF4CwJ/7Wtfu4BP4BfcBCPpySefvABKBi4y/YFNsFL/wK42ELRz1k5fOYCzoCQw2o5d43i0sFzaIQuUmsOjjz56yROA1bdHOjNgM8hpXkFesa2HR0EDnyfkFV8+HncNPIvRO3vNx3tuzcnjp4PLQV7rCN4C5dbH2jKxwVUA9qtf/eolV/mr10+d9RJLrtZZDnxaV2O6LgHg1p7Uk7XtOHUtmfUDnknMoLh1E89amK9juclH/tdqnOZnbdotbBzvhX722Wcv59M0TdM0TdP0vdIA8DRN0zRN0zRN0zRNt0reXQsAApNA5quvvnqBjIAd0An4BW6VjD9YCvIBh4AriJpf9vTTT38TkHQMEH74wx++B38DycCjdxEHftUDkCAq0AokAqTqsyAouGpnbqAW5FXvsdLBXxATVJW3HawvvPDCJWd5iq2On7GZsYzdLmIGwooBbFovIFS92MY1VnDZOPysoRh2LAO6zz///D0f62dsuYPDILQ1sgbWydqb+5e//OVLbO9VtpsYNLbu1pt/6+64NXZ+k7o2xu4YOJY/gdXWzlrIg/iYIygsX+vsXjljdGzsxldnHRK4/rGPfezu2TRN0zRN0zR9b+QniT/zjcM7l0cS+aP3pZdeulszTdM0TdM0TdM0TdP04AjIA11BOsCznabgJlgHLPpuRB2YCVQCpXai8gUbg8TqQUJ97Qq2S9XOVoDUcbtBxQMHX3vttcsxqx4wBB3FAWHlYBx52nkLoPIJNhI/fQOi1ZP8jK/dHOUPbgKyQKpdwl/5ylfugUv+wKc+6oBacFRd0NN8xTI3ueqj1EdMc/W4Z+0BbHBXXwbeMjECryCvHO2YFsN6G19s7801tutjDCBZPO3Gtd785E/WUuxzjSqTc1Yf66YPkC8m0KsukKtsnV1rvgH6YlyrfkrzFE9uzHU0x2mapu+l3v/+99/57Gc/e/dsmqZpuk36fX/lr/yVn/mJn/iJO8wfs//gH/yDyx/q0zRN0zRN0zRN0zRND5p+5Ed+5PIoZRASHAU3QT7gDugDcp0DlCAkGMsXdAQxAc2Asf789GHagUqPbwYtxQn0KoFBgDUoqB4oVAKugKdjftoJdNQmP8cAsXH5/sqv/MrFlwBREPOtb33rBWr+8i//8gVA9ghmO4YDlNr4g5niiaUErH0nJAfz1F9MgNbaKNsRGxyXkz7nDmC+1sw66SO+dxr3SGWPRXYe4AWygWBxAq18rb11bc1dKzkBzubimFpfJQhM2lub85j0k4dxPMaazCuwy1ccPqz1UCev/MjxdXx+jaG+e+dLX/rSXY9pmqbvjQaAp2mabq9+39/9u3/3Z37b7rDPf/7zg7/TNE3TNE3TNE3TND2Qeuyxx+48/vjjF6AH5CnBUrAWDAVJwUUAz/cjdoA6BytBWMAQsAWGg8X6igX0gYT8wUiPNhafBSf1e/311y/n4CA4C3ICm2AuH2OzoKKY4CQwCpiq018JXoK58r6GvHbRmpPdv8YibXYaGw+gFY/U2cHrXEyPX7YuYqqzDuYI8oK14lkH+VsDILh1MwYIzOQO8OoHTBvDubzEs4M6kHyCVfNTb72BU+vvXM5imae1In0cWx/95HAtOVFtQVtrE0y3xubU2mZ2B+tnnsZwH1gPMYpT3o4rmXja9LcWX/jCFy5zmKZp+l5pAHiapun2au8AnqZpmqZpmqZpmqbpVsjOX/AyYAkqgodAJvD3W7/1WxdwBzgCdaAdHzDXTlbwUrvdncwxiFiZaQMOgUIlqx5UBY9B2NrlZGcy4MlARjtef+3Xfu0CO8FJ9cCpUm5ALPgsP/WZvuC1ncli6KseOAUxzcOjpcURG/wEpvWTC2ipXgzrIT4fQPmJJ564vMtYuzUEZ3vHsDqx7STWT27GA7jB5N/8zd+8PEbbehfTHKwtQKp/O675BnWdF8+YILIxg9p0Au6Aq2MK1HasjTnWD3h27eXjPP98tQHk1lze8hS7ONRYVA5y51+de8ou52mapmmapmn6Xuib3gE8TdM0TdM0TdM0TdP0oAmU/MQnPnEPgIKvoCLoB3iCneAfEAzwAb3AIAAMiqp3bLetGBmI7J266gE/FrgEWUFLAFFdMNHjj0FPx0y9dgIZQVs7Y8FO42rvXcAEgALIfMxLvNrKVV/gFPh1zN9cKUAJygKp+gS0AWlrwcfc6qPNOlkjQFm7vKwBoC2G9QJ0gWIx9AeV1QO71o9v0FRpnfg1b7mDpvrL3Vj6qtPfdQG27VIGsMWTGwWWiy9HdpNaA3MC5u1OVicPc6xfsdWJa7yA8DUsLmZ9K41B5moudgFbl2mapu+FtgN4mqbp9moAeJqmaZqmaZqmaZqmB1ovvvjiBdwRAAzy2mUKjoKYzz333J1f//Vfv0BTEA8QBC4BQfA0AxlBSaBRv6Dva6+9dim1M8fG+/rXv34P9DJtwOBZDyyCmeDms88+ewHT6mpTEjgrP/2Di9rlC5aCtSApMJmAbhA6yEvimBuQ/Morr1z6a7cugeXWJyisHZQFMNvBbHdyu40D5NYTyBVT3taBP2BqB7IxWjf9A7zOxQwEkzVmgWA7aBtD/sYkY1Hr39qo7/hU62du1sr8XG8g2nVwnviIE+w1FyYfealr/PM4GUdfkLn1sAvaDwCmaZq+FxoAnqZpur0aAJ6maZqmaZqmaZqm6YEVuGvHKLAHkoKEdqmCcGAi+Gcn7csvv3xpBz+BRjAPjHSc2TX7S7/0SxcwCeYFKYE+j0NWF4hkwClQCP5l6j2K2vjAICgqD0BTHLuJ8w1WMmBWPoFL+QHAzkHV87HI4gLK8jU/7xfWBvqah/nqLw/rAmrqw8RTmhdIy9ecANoTBMvbGAFeuRnPGspHbmAncMtHPX+AW18Ql5/1CSKDsPIztv7WSl91zckcxAqsiiHnrkdQ3JrdpNaTX3N2bHwAmNWfOWbGcG4d+LveSvXar6VOOxnDeTn+43/8jy/10zRNv9caAJ6mabq9GgCepmmapmmapmmapumB1QsvvHCBoOAlcAi2gnZ2pAKZAGQ7ZbUBfMyje/k4BiHBSbDRrtlAb0APPHz99dcv56BlbeKDqCf8VZJ8gE25UMDX457bIQwgZsAkyCuvwKSdqOYmjvHFAlfNEzAmdfqaGwUzmf5ANAjsPMgK5Joz2GpndJBWvTh2/p47da2RNQSCPaIZYJcTPzHVi2X99BereRlHnfVobPHsdg5qWxN+6uSpjunDXBuxAs+nArmZ/tU71t9au4bmYk3UW6euVePp070gV3MjvqT9LEkMffN1Xeyu3i7gaZq+FxoAnqZpur0aAJ6maZqmaZqmaZqm6YHUe97znsvuX/DRjk1AFqgEdtuRCRgCi0AtuAc+AqaA4Fe+8pV74BccVD7zzDN3vvzlL1/qmXpgF/gE+YLA6sE/gFG84C8DRpV2wRrfcaW8vNPXOdMX5AVJgVU5OQ/y8gEsvQvYWMFHcRwrm7P8AGTwEigGae0CNoZj8xEX8O0x0Xy1iaG+9/xaM3VgpvmIZ/3kJB9jydl6WDc+YroexeMLvD/88MOXccUES/Vndh/zq65SHgC03MVunq35NYg91+SUtbNm1sRc5e/Yehin66KfeqU6ObWOXbNkrMYj/Z0HgPm7xz796U9fzqdpmn4vNQA8TdN0ezUAPE3TNE3TNE3TNE3TAyew9/HHH79ATTAPrAP5wEfgEcgEJz2iGKi0IxPU4+8cDOQDOgKLAWC7Uu1gdRzsdQxiAsPqTggMYrYLuPfNGt+jpgO9gUYlgZJyaMcyiUdBXmZe4GOQEuQ1B7BR3qTeu3Xt5AW1zcvYQUsxtMmNH5/WSAzlCXLBT32DyeqsGWjKX71YYC/obl30B3eNW27GZdanYya29rOsTVzg145gedWmJGskT2tTXTC2kq7rzEN+cjEGwCxP7daW5MKMYQ1dK/MzDj95pMaurfZiWg/g3H0xTdP0e6kB4GmapturAeBpmqZpmqZpmqZpmh4oPfbYY3c+9rGP3duZascqOArgAb7gHeALsDJQFTgEMcFMUJM9+uijl/fnVideu4XBXX2Cw2K++uqr9+BvbeoBRuMET7UDi0Bju4EZOKg0BvisHwWHtQevtYGKAUaPWebjHbvGYIAuWAo4Kq1B0t5O2ne9612X/sYlcZn18khkwFueAVnrpa6dyACyNjnZldsOXxC+OGcJhDYGO8c8zVhK45mL6xj4rU1J1sC6MsepY37BXFJfm7zlZI3EBIHV1Yc5VucYALaLF+SWW+3lQmI7bwyl/uQ6ifGP/tE/upxP0zT9XmkAeJqm6fZqAHiapmmapmmapmmapgdGoOPzzz9/gWxgIAjrscMgJYhrlycfYBeQBOKAT++e/drXvnbx5xfYVQcIAniBYLtaPboY5M349hjo+qonx8FXeZWbPOwCdh4oBDrlCyar0wYwOgao7WS2C9k5A2aBUbtrzY0P+Ar6Oi8usAkck/no005fCiyDltbFOAFesc1N/SOPPHKJ3S5iZhz92QluWdD3rK/fTUb5M7laE+Pe5EuOrZG1suatFym1V55txTCOenO1JtZUm7GpvvzMx7Vzzzi2C7iYxXP8RuftAjaWe8tO4Gmapt8rDQBP0zTdXv2nnz5O0zRN0zRN0zRN0zTd5wIKQTzwzi5NgBe8BN1AO5DQud2Y2gJzSrs51SsBYjs8CegD7pT5dp6p+8QnPnHpz+yuNY4dx3bFyqc28fUHFD1yGbQFT4FUu1CpWPrZbSt3QNZjrT/1qU9d+onNv7zEe/LJJy/9xG+8xtLXu4LtbD77OFbaOS0eAO1Ye/b0009fYvOrrrmrO616xq8+laes2yk+Sd5Mn1deeeVe/GIoO1af7xnzPL9p/MCxvnY7OwZngWT9zEEbaRPDtbWurkmQn/g3llJsKge+Yjh2b37wgx+8tE/TNE3TNE3Td1sDwNM0TdM0TdM0TdM0PRCyoxZIBUfJbl3QDdRsl6bdnYCe84BioBKUA2ft1nWsDqQLZurHn6pjxREfFGz3LdCafzBW/ECz+ECrdxOLzcQLUqpnAHJ5pnzPfqRvdcw4xTyNxDzPK9PZlpyLV8wzRnN9I1kfdh4Xvxi1d8xcq2Bquu7XuflSfU85P31rt6NXHQBsXnZsG0+9c+vY3NS5fqC8ewQsbi1S45xSp28Q2fl73vOeO88888xdj2mapmmapmn67mkAeJqmaZqmaZqmaZqm+17gL6DWo4lBORAPELbT107a4Cto92M/9mP3IKk6/h/5yEcuj/R1HPATIz8WuOV71jP1wB7oDPaJUf25O/j0p85ZY4tj163zLH/ioy5QyZ/qJxYf7WdbdpPO+tP3tddeuzyq+pd/+Zcvj6Y+8zj7/NIv/dK9trN/PpXldJMlx2DpacZO+Z59b1or9ecxXZ/Lh7wH2X0SAD5jnutJ/ZAA7FdfTHmeY1aSvucuYD8S+NCHPnS3dZqmaZqmaZq+exoAnqZpmqZpmqZpmqbpvpf3HAK9gJxHQINsQJ7HOLcr1LtkwTwC4NSDv0Ce44BfCsTWHgRMzvkHAL/+9a/feeGFF+7F0Q4o1m4scTI+6u0CzqdSjOIoA4fk3NzMFcwOAJ/WWtgFrR18ZMHHTB1w+6u/+quX/D36+vXXX7/UObcbGlw3nr5iAcL1B4a9x/bXfu3XLjnqq748qbGdN37nldd1Gf/zPN90fX5eH7KWp/JXX5tzsclcjGm++XWNlYxcV8d+YFBfOnNJZw79OKC69773vZdHc0/TNE3TNE3Td1MDwNM0TdM0TdM0TdM03df68Ic/fAFrrPf+Ap8AnR3BQTnQDcgL5Coz5yAo+Bf0C/Y99dRT3wQJtQcjjQl+AqXAszagF2xuRynYS3/wD/7By/EJgvmLY3yWxLfj1SOl/6v/6r+6zCd4Kk+Phg50Z/rYpSsXu0v/y//yv7zkpw10zOfLX/7yna985SuXOL/1W791eWS2dQM9lT1G27jqWlvHDHT2fmOPQearTfxMjow6BpTBZfBYDtl1/kr+Z3u5M2tSTO2nrJ/1dD1P8U3nGrf2xXK9XRP9rTtfber5dk80jmtsjd1jxVUv1jmmukzbCZddJ/fvNE3TNE3TNH03NQA8TdM0TdM0TdM0TdN9q2efffYC60C4AGUwDzAE2TzWGcQN9oJ8gN0TTzxxD/LZKQx4/uE//Ifv+Wmzw/Utb3nLvVjq2D/7Z//szvPPP38BoeAsf48FBl8B6OBvO4z1KYdAonjBR8dAZ3MAS41rbgFWO3KDrnyql5s8jf3Wt771AmjPNiXga3cr0x94VAKYYpkDoGzXMAisT/2Dv+ZljfQLPstde+OxdgGD0b/+679+WSP+2vhpY/o3/6zz08e5suPabpJr267r71RiyY1cQ9dOntbCeF1z6t5hfImf60eV+p1lMlbrJYZ2u9dd62mapmmapmn6bmkAeJqmaZqmaZqmaZqm+1YeT/xf/9f/9WUnKyAXpAUqwdJ/82/+zQWyecxyuzu1A3HAnfZ//a//9T2oCASKATaCwmKDpEE/u2YBZUCTxPsP/+E/XAC0XB5++OEL6AV+xRFTv8BgoE98YynBQFCVga8grXEBV+AVRJVjuYC1X/va1y71fM1T3B75LB6AzOc3fuM3Lvna0QsM85F7fiwwy6zVCVvLkX/5qM+vvudcjON6yDfwm/GxG7j4mXUiZecd882cW8vWkzq3zq4HA4HVFffU2fe8NuQecU/oFwB2rN4x6UPqXHuQXm7lUMzGVjZG9dZTPL5gvB8yTNM0TdM0TdN3SwPA0zRN0zRN0zRN0zTdl3r3u999AZsA3AnqgErwEWxrp23wjg8oCpoCfJQPaQM5QUAws758QE1tQUYyPquOAZxALeBJAUGQEJgOwGr3+Gg7TsFfu3iNwQcItstYHsFg1qOX1QGHTDx52eX7z//5P7/EUA9IA9NB42voG5ANrCrPOsfqM2vXcT5MPV/jWNNAdO2NeY5bvNOKTZ1TY1T3yiuvXOqTtc2sNfgOzLpm1xLjW0kf4B9Att7dI4FdMg45z8816D6qvZyvx3RuLawRX/P7yZ/8ycs1naZpmqZpmqbvhgaAp2mapmmapmmapmm672RHb3CT2XUKggbgPN6ZAMN20PJXgnUEvIF2fEDJYKsds2IEbunjH//4PcB77iR+9NFHLzGcK+0QFuPc0Qn42Y3rnbGgLPgH8BrPI5vlAzw71u5RztqAZW2BYbm3S9h8gUcQscc/e8eweqafNmvCTggbmJWXuWfBSgDbe4I7P33qe/ZxLm7j5Hf6qw/+siBuY5xW7Naeqq+krk/XKqutXdjp+vimNv1cX+tp/ay1eal3ffORh/Lf//t/f5kPaF/9dWxSf8qauG+Lc33PTNM0TdM0TdPvRv/F448/fifzDwV/fE7TNE3TNE3TNE3TNP2gCix773vfewFzdnoCdR/72MfuATrA165aAk0BUdDwQx/60AW2gXPOgbu3ve1tFzDLP1j5x//4H7/4UTCPP6BoDG3FCeCBsHbnAobBUH18zwLmGkcb0Gs8kNE87DC14/Ttb3/7JVdtwK967/z9l//yX975oR/6oQv4BXwD3uKDytr/wB/4A/8Z8GUndJVL5t288gV5nZuDY7n1vl59e5cvKa2NfsYV44zJtCsbr/OOr3MpNjk+z68Bav5Kqr1rcZp7glwv56c6P8frGhcTzO8dzoC8deXb/aUsF/GM43qZmxjFpfOYznb+Yqtz/KM/+qOX+mmapmmapmn63eq/8Ad+5h8afrE4TdM0TdM0TdM0TdP0g6p3vetd9yClXb3gonfhEoCqTltAkEA3foFIoNPuTY+RPtscA8h8SH9A0K5YkBb0A+yCgZ23G7fSTmAQUV8/uHfuexf97BoGboOM8pA3IAxWB36NZy7AsZgZAGuXMvALBqsL+AZ95ZV96UtfugBbOdhJbAx++nkEtcdGt9u4/sXyLmH5WC99A9wMIBbfmjVWa1h9Vltl66utsuNT1pZRPkyda1OZ5R+st/71T/kk8eRF6sVxXVwveYK7ci4+iauf0jjtFD7jOi7fa7UOroFjvt4f7YcN0zRN0zRN0/S71Tc9Ato/OPwjYJqmaZqmaZqmaZqm6QdR4KndsKAc4Aae/bt/9+8udQCd86Bb5wwgBgXVg63eGwzAec9uEA5Y9ehf0FXfACIACpB6RK+6QKBYACFw6527cvC9CjjLx9PW7O41HukjV9+/eIS1d/n22Olzxy/w6wf67fhtRy+AKx5/9YFYOZgLa+7eB8wfzLaLtXcF89FHbGtgXLHEYI2lTnvvEeZ/jteYxmrNr8ubTFv26quvXtZF/SnrxNJ53NrlY53pPGfWmey0zudUMeWRyoO/XcSujetvvbtHmkcxlXKyZt03xU7FrW/iZz3FJmv+/ve//3I8TdM0TdM0Tb8bfRMAnqZpmqZpmqZpmqZp+kGWxyEDmXZcAnJAnRI8BSWDgHa2fvKTn7wAtwCxdrAucAm2Apz62AUrLonXzk5AGJjjrx9fcUA7u2KNDyzbjWscY+sPPOqfnnzyyUsdKAwoAtKgMsgK1ILGHhPt3Hjig4oAoXawGMQN0AZgmfFee+21y25dQFvO4rRO4uXLgo7XMFepjn998znbq1PaBUyBzbPMkjxPE9N6npb0O+vtlraT2jrS6V9p7U9zrXsMdD6n1NOZM/F17ewAdk2th7HlTMrGF8O9og5Mp9qy1oFPY1B11rFzAPid73zn5XyapmmapmmafqcaAJ6maZqmaZqmaZqm6b7Qpz71qQuMY+AkCAma2akJwIKegNsJfAHa/OyeBVHBVLtyQVAQMlALxgXpvI/VI4+JD7ND2A5fsYFF4A/UVebnUdHFkMtTTz11yU9OPWZZXEDRLl/g+a1vfeslbo+PPsFvOZ/gtzzlY4cvaweyGPIoBv/yr99Zl511+inzP43fdVxzTeZ8k/iw4la2C/haQVvrBmh7PDKdYxEflv95nnmcc77lQUp1qb4d62f9ydr2AwH9xE3uBf4AsHU545Djxm1sMn/nXStyX+8x0NM0TdM0TdPvVhcA7BFG0zRN0zRN0zRN0zRNP6j66Ec/eoG8IBxwZnfnQw89dAG8ABqQZncoaOcYPLVrlC+IByQGjfmrJ+UTTzxxAXAnnPMIZXV8wUq7fcFeO5DBP9BYXyam3bfKZ5555l4uIK9+wenf+I3fuOQEBH/1q1+97AI+d/wGVu1eljPoGBwMttrpC/javdwuX/BYf33zk1flmWcmZuN5P/B1n+KcMczrut3x+SjnDAi3Jmcd0+fMk10LMAXVrRVQby1PoCrOtbR3XfQF3a2rumB9OZy6jnuaWH4s4B5z7wD1zZkFgfk5dq1A4GKK4bjclMVOraVrod2xXcDumWmapmmapmn6neq/eN/733fn9/+/f//d02mapmmapmmapmmaph8svfDCC5f34np8MpgHuIF7QCKBdAAvGAq0gYYgq92wjvkCbKApuBaUY3bgAq1UHVBrPPE9mtl4tQG3Abtzty7wC0oD0MYGZe30fctb3nKB0e36BRLBZY+B9s5Y54FYYNdcAr8sWAqoAsbigYy111fJz3F9wEVw1/jGVJePR2aD02CzOB7lHNhkIOWXv/zlS792NVd/01ggsLHEE9e10CZG/ZTlcNq1XEOw3Xrr88orr3wTNFV/fc5cJ+Dc2r7jHe+42/oNQFu8TP/zPBmvc6V+Hqvt2HUBZvU1NyWTL1PnXjv70zmW41OtiWug5GM39zZrTNM0TdM0Tb8b7RHQ0zRN0zRN0zRN0zT9QMt7egFFcBVgA8qAUjAOMAPPgDWQNTjMBwwMAoOzYKv62uwSbeesOEAfXwAOjAU6azeGY/H4OdYO4NkZej7WWV9x5G0XbO/61UffIG4xAGfvmwUY5R7Ylaf+YveYaG1MTny0Gy/IKx8gVkxrBIbKWVx+Hqds3nIzPms8u4vra0w5qmfWV2xlYyv1N2fXwlhyrK2+AHL9mbauAzNu4sfU882ntuR6Vbrm7az2IwF1WQoAp7P9up7Kwz3n3mHW0Dzdi/mYX/nJl0+xz7h8z3NqPkpr4jqQeB5BPk3TNE3TNE2/Uw0AT9M0TdM0TdM0TdP0A6v3vOc990Ab2AiO9djngCLQCdQRkAamOvfY34Ap2Z0KFAKg4gQYxQ6KApigqv5AH3hq5616x3IwBgV87RIGaNvpC0QaE5AFDO301cYP5A28gsFy1+5cH2PbnasNpAaLjV07QGsXcu1ykCfwCNiqs3vZXABv9fo7D0S3JtrEVaoXS77B6fysj528rkNrrt662FUtFzGsZeup3Tpl+ujP+FRfn1PVZfrSNVS169Z1cu3Y2d5YHQPA7gftxav9jNmx2B2738wRaLamb3vb2y7r0/0QEBbXWte3+NdqLfjwrc4a1ubx5nsX8DRN0zRN0/Q71e9769ve+jMO/LqT/MrVP0imaZqmaZqmaZqmaZq+n/Ju3qeffvoCynxf0Y5a4A04A1+BOWAPwAQv7TYFFoE6YPWpp566AFXwzg5VcBNA1dexHbFKkA/MBIgffvjhe4AySAmeGsejpuWhj8dHB1iBWTkGa8FRfeRq/B73rE3O5gQcBks9NtnjrfUXW1sQVb7tRNUmd7GCs6w8xJdvffORn+975AQyBlaV+ujPvzrH14BUf+sjF+vFJwHb/PUPYib9zc+6UfCTBUutl+vTWLWd7fUn11Mevd+X3xm/8jyWb3NsnHzyI7GoHGp37jpaz66R68CsHx95uS+U5zq80XhKbZ3rZ0x9Qet/+k//6aV+mqbpdyKvPfjsZz9792yapmm6TdoO4GmapmmapmmapmmafuAEBnqvLmAJbJLdrWAlQKYONLPbF4xTD+AGRLUFMkFbIJTZcfvjP/7jF2AHHgK0n/vc5y6+dnaCuGAmoByw1SaenbX8jQeEAs5iArTyBGY9Zlme4sjDeWC4xzKDiMYXW1/AUEw+oKJ6fe0a5t8uYsavvsZWyqnHQAPk+gKIYLgxwdfWyLt+zYfxE8vOaJC6vtcmvrHlYS7OA8sZwFxcFmw96+SUdX76WuPsup+6U2eMrL507Q+yAugB1tOXArSpc77MveK6upfk6jp1vaxH41sL7fqQuvK7VjlkfFwjcvz8889f7rlpmqZpmqZperP6fb/9D4Gf8Ue8RxSRP+S3A3iapmmapmmapmmapu+nwC9AFgADScE2cCz4anek7zDsDAVqQTfQ2Lt3wTR+vt/QZpcvoCcGiNkjngnAUwfiBVU9Ctmu0sy5eF/72tcuvuLl79jO00Bqu3N7H2/v/gVog4WNY16+k5FXQBXUFMccmrM2Zo7ald6ba67tgNVuPG0gZeujf/BRTALK9dXPTl4xk7nwV+exz+08Btq1BSqVLFCqPHfg8gmeamPtEibt1efXrulU30o7p11jEkc9oBvUve5P+lFr0BqeuXdcblSb0jq5NmQs6/pGkoe1Au6NVczGaO0a73pcx66dcfR3L7iXpmmafifaDuBpmqbbK3/h/0zwlwaAp2mapmmapmmapmn6fuvZZ5+9fEcBiAF7YCiACJjZiQl4gqvaCHgF3gBP0BJA09e7YQFM7+gFO7/0pS/decc73nE5Bm0fffTRCxDMADxQFmQG8YDfIDIQLD546hygk4Nc7LSVH187boFK8dsdqx8AXB1/83MuDlMn54Awa0fpq6++esmZmY8cGaAcOOavrxyMp05fMZlzOZzQWT1ZVzLnwDIffZLvj4LAGenLmve1zvazf20BXqU1cXy2naa/+kyda91xEJpOPz7NxXqdOdwkfQiwBWSVLBhsjW+SHNyfvlsz3huNI/5Z33jk2Jja3dd//+///bst0zRNb04DwNM0TbdXewT0NE3TNE3TNE3TNE0/UPrkJz95AaOAG4gGuDkGeO3ABcd67G7w065X8NE7gwFSIA78BEKBW49Hdtw7hcUBbz36uBj6AKzgL0iq3q5TUBSIC9I619e53cRgH3BpR6+dvsbRpr/yhK7yMI/iMxDbmGLoy8BZ4Fh+YPMjjzxymZ9+QLNY+TJA0jh2KcvJuOWgzq5feRtLbG3mHGS2nuZoDKWY6mtn+gRC9W0Mx52DlvnU57Ta+NWPFfNUdaedEuMcj7m2zLU8TR14695RVpeJo0wdN4Z++rg/rKHyWvzk6BoVL6s9dXxdp3/Xx/lb3vKWO+9973vvekzTNE3TNE3Td6YB4GmapmmapmmapmmafmD0Iz/yI5cduoAbgXrgpF297XYFxkBUu1Sfe+65Sxkws+O3PnbvgnZ2ZAJpdoECnHayagfbwDn14Cg/8BOA8+jdE9R6DK92O0yNxQeYBZLB10CpscFTPh7TbCyPJebTLuF8A7/qArmAs52y+tm1HKjWJufe13u+cxfg5c+AYfBcnVjN3VqoN7Z3A8tTX3mKLT/xnWe1B3KV9bXWZ4z8zbk2dsZhdjITH/Wnadf/jJ85Z3ZwU1D1pvbagr9ZEFcZmNWHOj7rlSR+8Ni9AviD/TdJP+vkOhjrjFd7OtvOeuvgOhnXNbODb5qmaZqmaZrejC6PgP7G4Tfk16Z7BPQ0TdM0TdM0TdM0Td9rPfzww5fduj/0Qz90AZd2tIKjds06Bt9AWW0gG1gLyAKiAJqdvz1GGRQGjcFT0Fd/kBEkBeYAOrs4vWMY/Hz99dcv8ZgxfD+iBE6VxQCCjem7EzFA0x7jLB5gLB74CrxqB03lHoQFeEk8oM+O3SBlIFapzXt4zQt8DHQDg6Ct8fQJEJu3ddHXPMQJ3moLaCrNQR0zvjHMqXcaU+2BSmORtaGzvfwda1efaq+0lqfUWZ9i6M83abupPas9H+1Kqjzjyc26nOC4fK/zds7PsdI9JGbXpLU45TpZS9D4Ov6ps67j0zdQbQz3gWszTdP0ZrRHQE/TNN1ebQfwNE3TNE3TNE3TNE0/EAqQgmzgJ/gFvoLAYC7wZkcsEAeeAptgMD9Az25aj4IGQ8V55plnLnVArXbyzl/vuNXPeMbp3bR2DzOxQVE7d+0MbmcsuPy2t73tXr/gb3AO2LTz9p//839+AcHyAK/t5DUHu3f5gbP69WhmEPqsszOY8dVXHsYEds1FbGOJKQfj6McfeASfzTeTa+YR1erkZQzrIM7pB4a3K5dpM3/m3C7eIGU+2vid7a7T2S+j+gdVM+fpBLCZc/XZ2VZ7fd0v+VVXf/dT9dWl6xjOzcE5cw7Oup7W8JR2bXysZbGzU2c9a1yltWSOXfvtAp6maZqmaZrejAaAp2mapmmapmmapmn6gVHQDERjoCgoa3cnMAt2AmPALjgHzPGzIxNEA0gBTUAXCAUV9QX8gFbt4oCpQC0YrC1QbHxQ76GHHrr3+GeAmB/oanz9HAPCAK4dy3bh2h0MUBvf2GCsXcDGAwv1AXpBWGAZTA78agdj1dnxKX9tzZnJTd52PDN9Gse4AXTWI6LtPuYrzx4RbWzx+Ztf8R0HcoOpyiBupr025XU7q42dcJe98sorl2tFxdGe7/mY6MqsuClwerafsck5c7/YNa3e7tx+VFA7K07SXt7V5yPWF7/4xcv9oC8p3Uti58fO9mupy5LxXJP6v+9977vcP9M0TdM0TdP0nWiPgJ6maZqmaZqmaZqm6fsuAPORRx65gFQGogGe5BjUBMKAWCDTzldA7NOf/vQFZNopC9YBm6ApgOZdwuAnqAjm8vOdB7AG3jFAVZsxfCcCJIsrPrhqPBBXTmKBm3baKuVsLHnylYPctIklhjZ18gIK5QIQm488QFzzkkM7iu3ODUoDt8YBctUFwhmYyx+QZWKTeMY1vwCztszayMnO4uBmsoYEglsHbeKBk9oy86Pr9sraE+Cqvkc1aw96MnV8zvbiFbM2udU/n9qca7feZ9+Os9ZK/h2TubQm4uXbPCtJux3n7inXzjUB8QF+P1ogvq3PeZ6lsz059uMG47hX3Hd2m0/TNH2n2iOgp2mabq8GgKdpmqZpmqZpmqZp+r4KqH322WcvkAtsAwIrwUsQMwgMbIJrgCx4qu4973nP5fsM4BRI1ReMBXwBVG1ALmAKombeN8wHvAXyQDy+zgFVMBj8BUuN63HUcgRkQT6AFVzka1etNnAXVP7qV796AXf6+Z5FKbfmYAyg2NyAZbHMF2CUp5iO5aQe0GyHsnHs8LUOIK44jL+5a9dPDBBRHbuGjuYa0GQgpPkETLUn55n2HptdvOqDsOan3THVVru1qI+yttqt4znetWlX5lO/Ysg935tiWB/15p+lc62sfXVUPTWua89cD/cKUKtffsXuvJIcy6fjax9juFedu8af+9znLlB4mqbpO9EA8DRN0+3VHgE9TdM0TdM0TdM0TdP3VSCm3ZNgGoF7gGygzq5KxyAcCAuuAbsg4sMPP3zpD3qCZdpAVf34eqRy0E9bBv6CdcCyft4N7Di4ps4xX8f6B+P4qSf18uYLAAKB2qvnByTLl/Hh+x//43+8+Dn3aGqPkTYP8Jj12GnxgG3gF/S1uxVgbBexOmDQ2vAxb/6g8tkGBlsjPo6ZXcXBTz4sf5a089d2GihpTZT8i1PMlE/1rMc8k3Z1p0/vEdZGjk+rvuuijqz5k08+eak7fZg1dy/58cB1/2szj9aJqj/9jSlXvoC39Va6rsZy7eXDN5VLddfltVwva20cO+T9WGKapmmapmmavp0GgKdpmqZpmqZpmqZp+r4KZAMuA2oEnjkHQZXAq5204CcQ9vjjj18grr5BNo8sbrfv66+/fomjPuj22muvXUpAVvxgozoKXgbcykdZe6Y+GYPJ4zwndSwY2HEQODj8ta997bKrGJRWB4CDv+ZrXo7BbrtqAeJ2EjNzteMYNBbLrubazCV4e10yap7Oza22AHHt5/xZENdca1d2nGq/thOEOi9u7afkcJp+9VfaQQ2kuy/obLPO1tHObaBfvevQvPicduZkDU+d4+qvHVi3XsAvK3bXm385K4tzltUn50wO4pPSjvRpmqZpmqZp+nYaAJ6maZqmaZqmaZqm6fsq8BeUBNOAXu/KBTnBMwAM2OTjPAgKFIJ+QGpgFwCk4BkBbIG3LMDZmKCpMT1i+Stf+cq9NnWgrFxOaAq2nmPIMbtWPuVhDgCwHb9gtd2oSsDXfBgADFba5WuuwHdQF/i1yxQolrc2j7vmL1a7fLMT4poD099O4t/4jd+4rB2d63JafZXOryHtCTBrywDi2qk+xbtWbbXX9xzD/QHiknrrZR0eeuihe37uBQJ+7QK3Nk888cSl/QS8wdnqq3POmod555dv+Su1u0ccux+1W3v5Un2dV1LxUvXJuZiuqVIuP/qjP3q3dZqmaZqmaZreWAPA0zRN0zRN0zRN0zR93/TCCy9c4G4gjTzCGRBlICwICvQBYB777LHOvc83oJaJEyz7lV/5lUtMwNMjpsHTzLtU9X/llVfuvZsXHLVT1A7il1566TIG+BY4DYAWn6ljjUtnPgzwZXaHmou4DLyVA1Dp0b4AtsdAg73WANCVNzMHoBPQBIxPnwC6PE74W24eTeyx0ObmEchgpXZ9zKE8HduJDA4bT98exUzN8Zx/bVQdO9cjNQ7T1i5tur6O1WXWzo5vO3ydWzN15lNdfUBcEN3jtV3Lsy05Nk6l69Ox/gTcNh8qL+JrDuqso/vEupJ+6t1T/JyfsFkbc/ztxK/rajyw/xOf+MTd1mmapmmapmm6WQPA0zRN0zRN0zRN0zR9X+RRznbCAmME6oFdQCXoBqwBnXZvOm+XKxAaPAty2pXrvbf8AmYemwwi20lrF7FjwFdfsYBU8JQv0AwWqgeEA6TF//KXv3zv3BgBzmAeBfeUAT9zAnbBWnMxN+DSTlFQW04gpt24cmHAYcd2+mr3GGjQWBvQGMRlcpMTA70BXPOtr8cGmytIeZq+QK91Mz5ffsbQZvezmObBzM2cm7/SDuLmX3vrUpluaifr1fUkbUxs62MedvgSP2sHnIO2Zwyy7gF3um4/xwr6Kuvn+pajNnmou1YxtFuHILBSm+uunvI1huPislPVd0zFd03k5J54//vffymnaZqmaZqm6Y00ADxN0zRN0zRN0zRN0/dF4C+gC3KBY3bHAo3ALRAJvAF/wCcoC9DyA8IASyU4BlYGLe2qBTL52S3ZMT/nYDAYC6AGWcWWh3HEYIHe65LRCegcG+M0cBtMBn3FBa3BzKAvs9vX7t5yAQ+Zx1ADxXawyk2b8QPQ5QjeWgcxQF9jiGk8Zmxt+pT/CYC1iQ8um792cRuHWePmSdXxrTR/1y2AWT073xNM2rLW7rTGCZ675oDvtepvDKp/9cpyAP2LS+d4YgeLHas75ygPx8xxqr+4jaefa+lcTPE6rn/rRGdOqXjJsb7dk449Bt19PE3TNE3TNE1vpAHgaZqmaZqmaZqmaZq+5wKwAC2wMThmZyzoZ8eqHazBUXAXCAuABSiBYj5gJ1DoMcrgmnbnYvIDYvnwDfqeINT5NfgE/4yVGV/sIF4G7jEw2zhgKqgqZ49TtnsV7LULGID2GGNzb9xyMU+7kM3JI64BWTG0yc1jnO30tVuXH9gL8PIDix3zNQcxA+LOvQfYfJpb47W251xPP30B5t4TTNpP0+emx0QHLvkEPE9gWrvY6vNRR87rz866s16cIDM5r+204jdW189OYmXgHqAvN/N33LhK56xYqTWz5sw94V7mU+zyOPsVixrrbE/GF5fcx08++eTleJqmaZqmaZpu0psCwH6J+573vOfyqJmbTBufaZqmaZqmaZqmaZqmbyXAMgW9vN82SNouVjtbgbUA2K/92q9d6gA1u4Q98hhYA0295xZ4I31O8At0Ap693xYkBfgYXzEAW+fGSuV2Al+7e8E9YwK+4hrjfPSyuYDCvicBac2XX2OaCxDLAF07l8/dvsFcfvqZq3VRBo4DvXz1YfVh51jgonm1jrXpAy6rD26ebRkFK51bL30qyTqd43R8rdqZ/o1LSnVkp6vjztMZ+3p8UlfeZ/+uZdY1dS2VYrYjWLu4YjguP3J+qjbjtHbUO6rdq47L+Yx11vG9jt25dte5MT7+8Y9f6qdpmqZpmqbpJvkL+Ge+cfgN+YeEf3TcpH//7//9vccLZf5I9o8Yf6z6o9Q/cPhN0zRN0zRN0zRN0zS9kYAsu2KBUzt/QUlAU73vJnzfEDAFcoFVsBOc08fjkT3KGRT1/QQQa/csSAbMkjjg28svv3zpC6IBceIaC0z2PQYf53bq6i8XO2x91yEfZhx1JAY/j5e2YxQ8JHkFnIOs4Cp/O5L5260rjjmr15+fsbR5hHNAUAlAGouPPNRlp6yPNTjbfE8jN2Xt4gYba1OCz46TdbYuTL6+69E/6cO0K+1q1j+rLzO2a0GNUT+lvo7JergvrOMZx/dRxq9/42fgu7Wjc36Z/mC8+ix1jYlv8Ni6i3XG0a81pEoSIz85W8PqnPPtGtaveHTGOqU+P4BaLPerHyyY9zRN0xvJpq3Pfvazd8+maZqm26Q3BYDJH8tPP/305Y9x/6Dyjy1/bHpkjz+mB3+naZqmaZqmaZqmafp2et/73neBsmAWsAf8gVx2y5J6EBCEA0vtkgVMgS8/RFcCpsCsPr6TsDMXuLMDVx/gMRjLxw5h8YBidSCyOOBqoFmbGKAkKMp8TwLciRHYbVduj3tW5xHNILKxzQcA9L2JPvqLq5+68lIXEDT/QKZzfTy+WTwQ1ncy6k/ISwAj1c4oGMnE1k76nm3MWplzsv6nyTeIK74+6iutv/ZA5RmbnYBZWb2+xnatwXiP8T7jnyb+TXPLTkBN+qivPCE1dazdfeFY/K6Ha+r66NvjnJnrkm9ynGkLoOvrXEnFvFb9Ok7VF5vcW3L0HdznP//5S900TdNNGgCepmm6vXpTANg/FB566KFL+5e+9KV7u4D9g2mapmmapmmapmmapuk7EVDr8b6ALngK6PpuASQEAkFZAK1j3z2Ar+Ap8KVNP6BUm9K5H6eLpd2P1vmCfuBdMUFCYE5sx8YEZO3OBeeMw9e7dvVnYgKEX/nKV+6BPfDN9yPMsTpx5Cg2YBrsY47lKI5zEDEjkC8DocU0ln4J2JYzECgv5QkYweMgaOBQfT7mGkisbyZ/7VQe6uTAzOcExGc/5jjAWvvZFgBmzs/4gKZ1ePzxx+/1r704ygB0Kh6r3byo+ZUDOwGwtuSYvzWVh2Ol6wTSk3tIjDN+Rvom8TLza8zid03V6V95xuq4kvLxuRHbvfoLv/ALl93l0zRNN2kAeJqm6fbqTT8C2qNm/Lq2HcCO1Wmbpmmapmmapmmapmn6dnr22WcvkBTE8r0CcAoAA1pgq+8lgLMek2xnqHMwDjwGzOwabfevXbLB1wCvc9DOOABjZTt3nYO8ATnn4KzHQoNsdgM7tqM34OaH8eBwUFeu6gLE8mPeS6y/2Jk6scxNjuoCgklfeRQvn2Te5y5fpi7YyIKgYLjxrKsx7SA2jx6TLK6+wVF9AWT91ZM1zwDgHqPM1xjqHSv1OSEvFZu1w5g/8fF9Uu9K5mN88bWJX199ag9wy199bcra668sRv2bH9OnkgH01Loz10Qs9xIj56zj1HExrUtjk7j9KKAY1/2Z/un0qU08seXmHvYo6Gmapps0ADxN03R79aYfAQ30+oO/3b/+QQIEDwZP0zRN0zRN0zRN0/Sd6KmnnrrAQIANsAM8wVbfK/R4W6ALDAUw/Qj9rW996wWq2e0IpOn31a9+9d7OYN9ngGJiAXWApVhKIBTc4+Mx0OIYw3g9Wtr3GQRUAm3OxQaSlfoDbt7pK74+chNDPiCcPqC12B5PDW7KwXjAsXpmJy/Q2bmcxNTXTmRzqy0AaD1YkFOddrHBZd/lmF/Q3LzlpR0wVJqHddcXNLcudhuDw/z5nRA3eGldzcfanrt8qdgswOqYj1JfJT9962NtPGVOTo3DT//k/NrO9sZg9Q8gZ8Vmcim/0/jJpx3S1k1JAVtr3fdd53XJr5LOsd0TjcFHPHb6U33OmKnzs9794ty9/+lPf/pu7TRN0zdrAHiapun26k0/Avp8/y/zh2byh6o/mAeAp2mapmmapmmapmm6Sc8999xlFy8Y6DsEUBUkDZSBjKCsenXOgUptgJrzX//1X798fwFaBvTUgYlgKh/AVT8/XDcO6Q+eit8uTJCZ+ZE7sOe7D3H1952HPDz62Xcd+hsDwAMJQeF2B+ujXgxjism3HcMnVOQTqAUl+QcVSX526uqjnnznEkgEY8FnOQHifPg6BsqdF691U1orPtbVmNq1WSdr1vz4MpKfemaOPSaa5FJbFiAu37PNGMYHzYPBjZXvCaDL/7T6pfrVbo7VN79zDuLnb/3dG77fAvpPBX35ugdIHCa3ro1jJXX9yt8Y/JVJ3ABw/m8k7eKcaizXUJt70I8S/KhgmqbpWgPA023XJz/5yTv/0//0P93503/6T9/5qZ/6qXv2Ez/xE5cf0Pnx2jQ9qHrTj4A+d/9em7bB32mapmmapmmapmma3kjvete7LmAVdASzQEEA2PcJ4CNA6LsJkDB4B3IBqcAZmAYC2rkK5vk+Qj8xAozBWJBVyU8bXwAOXOUDgtp5LA8x5aXOY3UBNjFBQv0BXSBQu5hyUuccDBSX5aNUfxrpa0y7X/kYW9sJA4G9dgFrCyg6Nq65BJYDgsHCAGg6+4LGHk1dnfUBP8ULboLPQVwKnDJr5ovSdtmS63H6BHDVB0CtpR2rcuj6pnz0dVz8AOkZ/2w/81Of9ZjqctDe3FiPBgd+bXAIKBePjOs6Nb5S7s0lWfvyZKfEY/oUw7k43RskxvXY17FuOhe3NbWb248UpmmarjUAPN12Pfroo3c+8IEP3Pn7f//v3/kLf+Ev3Pk//8//887LL79850Mf+tCdF1544c7f+Tt/567nND14+k8vkpmmaZqmaZqmaZqmafo91Mc+9rELbARagcBKP0QPsIKDIJ3HOwNngVnviQW+QFkgki9IFwTVBqQ9+eSTF0jHQGRwGDQEnN/xjndcACWICgYbGwQ0njHUAYNvectbLuPy1Z8/qHvCXzuNmd2X5SEncNduZPm//vrrl/ovf/nLlzqPWrZT01jqmb4BY8eZd/iaFxNLPzt/5ai9tmIwx2BgbcHG4mu3TmC7dfalaH7l45hPpo4PFTe4yfhU71jerlvt1LHHZBcr6WPMYjim+pz1HdfGzvpyMX7GJzl/6aWXLvfbww8/fKk7YzExmGOxCGgN/Baz+OT8WvrKrbUvv8BtY51j3qR8UsfFNMZHP/rRS900TdM0Td9e/hb4p//0n15+bGeH8DQ9qBoAnqZpmqZpmqZpmqbp91S+YPujf/SPXsAnIAag2pnZk8bAXztagV2AC+z0KGPwFaDlC2yCsvqCX4G/BMYF1+zw1df7fIHf3jVrDOOpE79x1dkNa3esHIyhPvBrPIAVwBSz3cfazAncBWe9v9c46vWXr3r5AN/lzjx20Byah/jqAV4mN6WY+hunPIBl/YK26pVBYPECjvk4t7Zf+MIXLpDbMdNWHvorg5HagU+7hAFTMbTXlxqrNiXVrq4csldfffVejPqeebzyyiv3wKeSnWOkxjh96ueeYfl0v7kXqHpynJlvY1kPpb7uKwr8XsPf6jqXR3PvujnnY4zGyzd1fNadan7FJnN697vffTmepmmapmmaJnpTj4D2CJ3rdwCf5pe1/ojdY6CnaZqmaZqmaZqmaUq99zdwCqiBsHbz+q7B9wkAGWAKboGddvwCswCq7xz0sVNXH3BUf34eawy4grfgqzr9igfEOgZTATMxADm7agFjQK3dx/wYP5DVWMz3HHzbKRys1AcwNobjoBwLHuebBfbME0z2HYydyAAjMMtHDuYvZjCyPoFH398US7u2TFzrmtT5vqZ3Dpvv+Rhmsr5Mux3T5tRO2cbNR3xrm842Zp30KT91+bie5/h8tGX5Fr85ne3GP/M/25jrr71+Pera/XH69KhodkpOwXrXTwzrop+SP5+uZ/NM5/nZLo6+4ojNTuV3luVWHalzzcvLsXv085///F2PaZqmb2iPgJ5uu3oEtB/x/fzP//ylzo+m/sSf+BOXv0v/2l/7a5e6aXoQ9aYAsH/k+MWqd+F4fFH/EPEHpn+oaPMPrwHgaZqmaZqmaZqmaZrI9wy+fANoQVuwE8wCOe3CBQvtEFYPaNnN6LsGvr534Asag5fAIVgM6NWub+AXuFTHH+AFb8E2oM67bUFQdaCZfuBtwNajnPtOo3YxgVAG5gZxmT7G0s/czjZzs8PX9yvinG2Jj/mYv/iBYnNn8gUozT/4F0BUgqBi89WuvjZ2Alr1gU9zM28AVP9Um3W2tmI88cQTl77iay928RvzjK9N7iegpeIrfX905if+GZtPAPe6XZ3SNW5sanym3T3hngKcn3rqqXtxa2fXALh6ebr2XXNrXH0x1FHXtGt0qjolv3In/d0Dxbnu37k+jq/bSV2Pk3Yvfe5zn7tcu2mapjQAPN12BYBtbPypn/qpi33qU5+6/L33v/wv/8vlb8dpelD1n56VNE3TNE3TNE3TNE3T9F3UY489dud973vfZTelnRcAGugIxAJ8ALD37Xo3LtgIyAF3DBQFyIBRkMwXeMAi+AtSAsX56w+Egbt20drR0dPKQEBQzDjGrA94CxIDhPyBZT7tJAb/vvKVr1x2DnvXrx/CB2n9MN5jqn1pCKaCvUAwP/XgbI+SVq+PvubDR77G5aMuMybr3Lyz+is7Vg8Ciu+8cZRBQ1afzHuK86HGCIACitqTtjM2867fdLbl29i158P4eAx0qs/p1zuQxTjbHbNz/HzyB2i7B975zndefK5jKE8FhoO07jN1/KxtP0ZgrVN2k/JlZwxm/MBtOo+pc7503U7FNYb7Guiepmmapuk/1//1f/1fd/7YH/tj9+zP/Jk/c3kX8DQ9yBoAnqZpmqZpmqZpmqbpuy6PfH722WcvEI3s+ASpPNoYzPV6KUAX6AVetdt1CRSDWgFFcYBbULXdu2CtXaRAKhhsFwfQq97OULHazev9uUBru15BW5BWuxz4Bp2BXeODvnbvAtS9u1c7aNs4dlzKXVzz4ivPdg0DuWCfUtxi8tGuDWA2xyxAyqyB9x6DeyDgWd/xCRlPwKmtvqSsn5LxBxWz+udjrepL6s8x2Kn6Msfn2Kx65p3Jxqjtup05P/tXf+ZwSj0Bsu45a+tdws3vuq9j7XSug77MfeaHCsRXvu6D+p67get7LXVyJ326J8TQ5n4OIJ/9i3fWnSomietcfj5v0zRN0zRN00QDwNM0TdM0TdM0TdM0fdcFvAJY4C5wBug9//zzF1hld6Y675cNmgJjIBY4FuACaIFTuzGBMjuK7SDWH/gN/gZk7a79V//qX13qQNmAMlAM3Nrdq7+2AK7dsNqZdrH4ALv6azeeuOCyvIE8EFMdqCtH+Wfa+YHHxhKvdw/Xv/l61zCIF2S0TmcZ4FOqO9tPyGvdas+sv3Zl/fOT/wlA6yN28e2yDULqr+9pYmfaTx87n892dWds53YBn+2sPMS67n/Gd3yq8U8fdqp+mbhkjoFj5l6xq9v9BfTy4y93105+/IPEjotFHSvr2zUQw7WXS33zTR3rw25qUzJxOvaubZ+3aZqmaZqmafKX8Hf8DmD/gPJLWSX5B5Bf69J12zRN0zRN0zRN0zRNt0+g7kc+8pELvAU/QbR3v/vdF2jmGKgCQ4Eqx+Cpc/AWMLUrEpy12xYoA3+BWIDWuTiOgbTen2uHLbgG4AK3IB3wK34gWJ1dob63YHIB14K67doVn592cE4uxhJfLvIDCcUMvmmzq9j4+mflqz34qEzGJzkWRwme+s7Fetj5rF0bk9Np57t0xQtiMn19z9M4/NUDj0prpb8xmXZQs92t1tn1TGd8vuKf45cT4+v6kGPxlbX77sn85Nf4tRff90zmrr446uV33Z6d+Ysvv7O9+PnU7hzQd9/a7d01CVhb+9QYYrGuLX+qbMyOqThn3+6jU85Pu1b1YjQXubrv2TRNE+0dwNNtV+8A9v/Gn//5n79bO023Q28KAE/TNE3TNE3TNE3TNN0kwOwTn/jEnSeffPKy4xVMBdPAVTDVo5wdg2lgqx+Ug6+AKlBn16XSo5m1B36d6wcYg7H6+e4CvASNwbMe82xMdSBbYBeEVQeWAZLGAPjEDu4ByHIEhsFRfcUL4Cr56xskDgqfcC5Qa+eseZmD71jkLEdtwb7gIIDHp0dWe18xP23qzTkYqd6OYWsFVsrRo5qtdwosBlHN5VSwUBkApgDl2a70ruXaqPjZdXx1/PUF8QPMpDz7mtsJkEk/lk+Alop7U7tj5Zm/+EHixE9bfu4Fay9X79A9Y/Nx7V1rsv5Uf/dK15LPOU91WfVUDPXG0Na9RGeM4pz9T9Ue0Hdv+ex8+tOfvusxTdNt1wDwdNvlR31/62/9rcHf6VZqAHiapmmapmmapmmapt+xwLUPfvCDd97znvdcIOc73vGOC4QCtIAp5475AaEAp+8e/uN//I8XoOocXAU/AckT/IKtPd7ZF3ji6AvqgbBiBmo9qlld4BdU85hjUAz4VScvu2uNAaLKgeSh3bjG1DfIy8T/8pe/fPm+pF2+6gE4jwqWqx27/MQ0Xu1J3vJngTvHxjUHuTWuNgbqAZP8rRPoKL8ApR261kZsKq5+Sj7WCiB2rl1bkFN/cQOktQdHla5L8a1lY2ftUKbGV19Z3+be2ErjA+X686fi5gdwF5/O/sz11jcrZ++azq8cnOfj3uRnTv/6X//rO+985zsv7afpy981cl3Mrzm0Q7xxxeFDfEh9UudcDOpYLvrVfipfOo+v45iLOPJxP7jHe2LfNE23WwPA0zRNt1dvGgD7R9Ezzzxz+QeYfyS89a1vvfPEE09cSn98+kfPNE3TNE3TNE3TNE0Pvj75yU9edoiCvB4VHHyjwK/vGUDCdmLy930CgAZWAbHAKwgHhAZ+gVbtwKi+b3vb2y4wFCQVUz2/3r9rJ6dzMA00BOSq04dfgIxpC7rq79GA8rKzVp76g76Arno+vvcALQFk+WszHl/tdikH864hYCBWDDBbX2vVeOr1EUsf5lx+YKb4zvkFJ5XysdbqmX7qM+ftwq1d3+zcBUzaXUdtSvFdW32tB2DrGvX+Y2sZYOVD5aAEIpvfTePr3/hnu/6VxddW7Np7DLVz18Va+e7KefMAiZ3X39rz9V1Wu2flqLw218h9cq6/0j0hfhK3e+GU+tT6VDYH5/pVLz45Z2cM6jx/eZiHenma2xe+8IVL2zRNt1sDwNM0TbdXbxoAe5STPyb9ge0PZv+44e8PYn39Aax+mqZpmqZpmqZpmqYHU48//vidH//xH798DwAOAmkALsAJpIGWoBzQafek7xGAORBUG1jluwNgzQ/MvT/X9wvgol3EAJ423z0AuHbX6gfkBeO0qbPzF/DyvQSYBp6BimDcL/3SL90DbO0UFs8OSRBTH7kAcer1BeKK1XjVmwtwLH6wWeyA3fWjnglY9r2JeZqfseTCB7DT3/ytnTrnp5G4wWM+YgQPmXyvIStY65oEWItF9WOguu9yzl3CcrY++rsWXTOg3rwau7lYp8ZufFaeAdx05g4wy9/46ezr+ATUVFsxXAtA2nv+SJ/a+LWL2LkfHLhfzSsfANUcznPzBfnde/I3b+tgfo0t79Tc3WOdyyNdn5PzTPzuo1P10f9ajclHrkrn8vy//+//+67XNE23WQPA0zRNt1dvCgD7Y9ljjLwbxz+0/MHsHz9+ResPX/8I8o+G8w/gaZqmaZqmaZqmaZoeDPnO4KMf/eidd73rXRfQ5HuCdu0CXwCnnbsAGSAHaPmugB8I5/sC575D8J0COOn7BVDO9wu+cxAP8BRPH5DVuNqBWwK4QFPjiAfeBWv58wPDejewWGBwUM+4xlLqIx4Ax48/0OlYrHa9yt08A7xMnwBcBhj243jzKm47Tp2f/cxBee6CDQQWk881xA1CMjlbD8BWrsbny+d8zDKprx8DO8WWq7XhW1+yrsHnrLHP+CegJm3lL0bS3pwy1+HM72xj7pHai2l8a2yurpMfJeibj3k1R9fd/eP7riBxc9em9Lhw66DeteLrxw3uE+O4R4zT/PQx7/KRp7bT51Rrx7fzJJbz7gsqRv06vqkkOcqd3G9f/epXL+s2TdPt1gDwNE3T7dWbAsD+AeQfVv1K1q9X/bH5q7/6q/9Z2zRN0zRN0zRN0zRND44+9rGP3Xnqqacu3wWAvAw8Be/O3bNAHRAFiHlsM/gHigJUjL/vHuwyBap8hyAW0AZiAm2+X/Djc1CRv/fsim9s53YSg3XXsJaf7ynkUD0YLB9j8gcCmXG0B2SDvUAokBrElQtfcZkcg8DGAwF9F9L3IR7HLL544vKrr7w6DvBZJ1CwXb6t4wkWlQHg+qoL+MnZuOZcrvpl1kx/0nbGlaf+dmCXK2ljQD4wfPYn/bvO1tX6ltvrr79+bx19x6S/djI3KjcxQORHHnnkUp/KL7/6lxdw6/oo7WK29ta3dnED/trk5v4942rPQFj3c4+8dt8Wo3bXtfvFubjFI2N03VsnOo9TddZLLDG6dqR8oxh8r8/lYT0cy8H9+9JLL931mKbptmoAeJqm6fbqTQFgf0D6g98/kMg/TPwx749M/9Dwh7FfGE7TNE3TNE3TNE3T9GAIDPOuX5D3+eefv4A6uyWBK+/zBa0SKOV7AyAKUARFQTXnQNzb3/72C7z1vYJdltp8t+CH5fUF2TxpTH3jAL8gI0ALcIFz/AK/vovQ33cawV2Pn87XOPlqC+KJbWcwiOu7DWPw9Xjg4ufHSB3wBwoCrgE/JR851EcZqJOLHZngsTptgTsl01dbBl773gWUtH4AZ3H1Ky++Z9/afU/DANJ26VJj8yW+1qlYrP5K66I9aa+N+R5JHegLZloXKob1dG2Lf+an1Kf8mhO1PqzHOFevr36VQW6+pN59J64SuJef9W9c9a6j6+l+lJudxNoDv9YfnHcPyrs5yEE/vqe0uxf43KT61t65XPQ1xtl26qZ+p497xLzEcQ0+97nPXUDwNE23VwPA0zRNt1dvCgD7Y9Qf1f4g5+fcrzo9wsgflv5h5o/faZqmaZqmaZqmaZruf334wx++89xzz12+A/DDb98DgE6AHhDa+1FBJ/VKbaATgOr7BeAYSLOT1LFdwY6VoJ2dwKCcnbZAMAAG1oFovn/wfYOxvOu3HcDOgVx9wS5wUmx91IFi4qnjF6RlwDAoCAaaj/wBTn5ZvgG5jsULJlfPgnF0vq9XG7U+7NwFTOqAbWvJDzSXHytu61pfudS/2OCyudSuDpzM2qWrjdVPm9Ia6lt/7cz3QK6X73vO8fVhzq2Z62ldtKk7Y7cL+ITfZ3x+rvN1/AC4+yGI3Lj1rb/5aT/XxH2nvdK9Yw34s3YHm593VPOzE129nOVbn8YJ9JP7R5/my7SZX+tA6jtO13XiqLOW1V/3cZ51fqp5knv11VdfvYDzaZpurwaAp2mabq/eFAAmfxj749Gvcf2jhvwjpUUHg6sAAP/0SURBVHf5TNM0TdM0TdM0TdN0f8t7Uj3y+bHHHrvs+PXvf4AMEDvhJ9AGHAbHAnHqAEkQE7hT+vG4Y7tRffcApNoNDOqKD6zZ6QqABX5BLOBXW0DX+EAxwKbOufbqjKEukAsi2t0J6pH8xKk84XAmzmnVByTPnar6UwCQTtBKtVkv359YR7EARLmJLQ6Ial3KoT7s+l27fKw3oA1gmrdxqXG1d216tHHQsNiVXRft9b++rgBr7ez01f8EuHTmD6Rqr5+24ivlpy4ADtBTPvq7r4pXWwYS62cO2osPilpna+sc4FX6Dsv3WXalu4/46e86iGWtjKueuX/UlwPVbk7GY82/e0Zd0vZG4idP47d+dB63btUVu3p5yadx5PwLv/ALl+Npmm6nBoCnaZpur940AJ6maZqmaZqmaZqm6cHVJz7xiXuwF/ALqoFTYO0JnQCn4Bmo5l2+QKTvFYJmdgKDbnbmAmJigcMgLT+gNmAMbgJ9wGyPf9Y38FsdH+fi+YG6XNSJxbw/2Ph2IquXI7AKruUToBOPvfbaa5e56iMvsI8BvmBhcJDUt5tV3wBcBmzX7hHTvQ+XBSIBVXnwkYfyhKiZNWZBULAXJLW24hmvNuNaQ9JXfRb0rN2Ydh4D0AHkAG86+wO0doG3XvzOObP6F+PM3Zoa37rw5SMnRtYWjG1Nam9869W7nxtPbD6utXXl515SajOeuYG5jdV9aCyx+FpL191aaHe/aQvQuy5+wCCH5tTY5S5fddQadV3lSo7P8pQ6ufAtHolz6uzr+IxtfDmr0889//nPf/4eTJ+m6fZpAHiapun2yl+N/+mniL8t7zrxD4BpmqZpmqZpmqZpmm6PgMePfOQjF0ALugJxYBoFp+wGBsoAM/ALFAWXvO+XL1AGEoOFwBmIBkwGd3u3L0ALBNrlaxxgD4AErEBesQDeIK9dwGIBY84ZP2D3rAO97AQubvXyr+Sj9N2HY/1BatAvqBfYK0bzB9fMXWl3dALcwMYgMoCun7ikTR+g0hryz9Tpk9l9LQ9txrSO1rwYTz755KWdtf6Mn7UtL/21g8XalNrN2bysrbiBTzG0+15IX/GLm7ku2q0HvfLKKxdfMfSVo/zKvfjGFss9Vv58+MvxLOWvL3MOHPthgWNm/fR3jVwLgNe9pM1YILWx1D/11FOXuOUAmNv1K9d3vvOdlxjWv3o+7n/+gLIY53hMXu5p+VsHZi3VG0s86+t+cv3dX/qf0qc17tyx/sbQV4zG6J51npwnfZ27P1wnccg8/tf/9X+9vAt4mqbbqZ/+6Z++87M/+7N3z6bpwdJf+kt/6c5P/MRPXP5+/E7l74W/+3f/7p3//r//7+/WTNODqze1A9gf6k8//fTlV5L+EL42/8jzh64P0TRN0zRN0zRN0zRN94c+8IEPXICYXY7AJZAEfJ2PI2Z2voKugBMYBVY99NBDF3AGuirtkgSeQDngFsgCpZQAInDr+wUQzW5LUt8x+ByQff311y9jtQtYDLDY2MYB3tSBbHx9JyFuvnI/DfSVM3BoDDFYvsFk/VnngeJToHagO/CWn+9N9G/cAB5Zw+tdvmRO7HyHcP3OY2t1KlgamATKE3/11lB/1+XcZUvamHU5+2uXz6/8yq9cgKr1AkrNx/c+5ixO/X2XpL/vlepP4jI+7pE3yl+70viO3Uty9cOAfJh7QWm9fRdlTOdd169+9auX/s8888ylZOpB0eC9ay4Pfdwz6sXzvZY1Mk9tYHTt+ql33cvRuNbA2lhf59ZMaZ2sQaX61qSSHJ/1/MTufjvvAbour2VsOZO+rt1LL710OZ+m6fZpO4CnB1n/w//wP9x5/vnnLz+eu2ZVb2T+jvC3xc/93M/djTJND678tfg73gHsETr+0H755Zfv1kzTNE3TNE3TNE3TdD/pD/2hP3T5IgwQ8yWaXaztrAWBnYO8wBLwBygBTOCcY7szgWK7f0E7EBkQA3TtqgTVArh2ABuHP5DmGMQF7fgEdY3t0clAm+8d1Hl3bjt7tZ9w1Xtw9Q/kqgu8Ofa+3doDapUZKQE4OsvazRtIrE48cg4QsmCjLyNrAxPPNutaG1NvbSuBR/Wknz7qM9/dBDedg5cZeGyXrfxqr7/SdznBS3m5hsy1099ubn2tuTUqLh/guv5Mf8C0GOBvO2abt/rrGOcuYXmZtzhM/kDrE088cW9t5F27Nj7tlDaG2PLX5rgNDLUbtzaldv2trXvUF8LaHDM+5sLH+OA2H7HkzNzDSuvkPrVmxuGvnvhod5/IpfvIupqbOqpeP/eYcbqXtXWvpu4NdY4rmfWUhzjO/eDgL//lv3z54cQ0TbdPdgD7//h/+6f+1J3/12//v32aHgT1/zx/K/iRw5uVH0X4O9P/P8//v07Tg6L/57f/lv3f/ubfHACepmmapmmapmmaptsogIwRmAv6AmxgFmgFTgFvoJRHGiu1B7mAXr4gl52wwT2QFqwFzYA6cBkMduw7BDtKwTX1QLFHNoOzjkEvcFed2M7BXbldP9YZFNMGIlev7rTA8nVbX/jd9MWfc482lgcQ5zHCyRoE8nzxGMBzrM26BCx9Z9IXlKSuNiUIStqBSOAuGKp0bRqjNqb/CVn5W/sgqGtwPqZZbNftjOELU/2ZNn2VfOWtr/kwsVl+4geoz3si0158PsbTj2l3XfU/888HeGUf+9jHLrlrdw0aIzhrjBMiF9v8zzVoDtZMP23uB77uR4945tcYxefD133aY6ABVbGCwO4P97P7yf3pcyAGa219jrqvk7XVR+4sdSw+c+5HE/mf0tb1VdbXmPpaD3N27pr+jb/xN+58+tOfvvhM03S7BAD/6Isv3vn//o//453/z177OD0g8n89P7X6/Gc+8zsGwC984AOXx+MO/04Pov7lb/99+//77/67O9/8F+Q0TdM0TdM0TdM0TQ+8fviHf/jOc889dwG/dvcCRUAV0BuYBa9Yu20Dp+Q9qyCb3YUAEyBmp68dvsEnscFZjzQGi0Ey7/61Y1g9yBskBnfBLkC3d7o6DwQDbc4DxOLw0/eszzx6OlDYPK592AmG/RhePzuZjVnc2s1dH1BZXkD2GetcL/3sOhY/U5fxES874+Sv3rjFO31qY2dc44Pvr7766jfFNi87QF0ffvoZQ5t5Wn/XXWlty1c89wKfc36Nza/6cii/cixOfeVQOzVX4s/HbrWkTnvQM3/n7ltldazc1J/AFph1Hzp3f9rM0COeGRDtnnEfO+b/xS9+8XKvO2egsJ3rdqo3T+P4DMilPBxTsY1PzUGZHOdPrcEZ52ynM8YZS70cWOtm/V988cW7HtM03UbZ+Tv4Oz2o6v/zb6acpgdd/pvvv/0DwNM0TdM0TdM0TdN0i/RjP/ZjFxBmpycQC3SBfMAsuAXQnVAVSAJmwSTSD/htp6Mv0oBYfdWBayAaaMvPLkrATLvH6ALBxgB51TdWu4TlYlzA0vn5WGgg0y5NMYOO6hnoJa72dg9f++THjOd9xPqA1PKx8zPYWX8gF/SVD+jcY6sZaCyWuI2jrO4Ectd5BDk9glAecjCG4/q25vUvhpy0uTbqyqfcrRlIbc0Bbedn/2Lrf8Ld2o2n7bo9H/Xn+Kw2ANrc8qn99DkBb2t1mjpgM7hZLrW5D2p3D5ZLY7IvfelLFzBrd3qPc/bFL/+AKghsN7B1cv0Dwn1JTL/4i7948XFtXHv3sHvMdeJjLCUzNzmWt3HEazz5Xat8GTUP86wfFZPUn+ep+Vtn/R2r83ln0zRN0/Sgyf8TT7j7nZxP023RAPA0TdM0TdM0TdM03RL95E/+5AUOgXq+BAO1wDGwFrgL0AWyWHAKcAKR+IG84BoYpq844G+Pkgbo2qELsNpZGdw9x5SHscA0u3XVOe4xvPJhADSYDMCVXzmCsGL2eN/6BAvzEcPuVrDPzmO+wKgxe1drfcSWmzZ5l5v64pdDwM4a6c/K7fXXX7+suzag17pYPwZSg5OB8DO2vnbxehQ12Gt+8tYvQCzPQKFcmfW1/s1PHd/iMv2tR3nr35jlcLbVvza5nP2rV2b6dN8Uo3qle+IUn/yUAV5f0vrCtrYzL+tTe1/m6mPHrnM7dkH1zgOmleo8etq62gUfqNWe+Yy4r83ZvQ2mW2P3rz4+A/rxVZJ2OZaT8jrH7JTz1qw5kr7XffjQGUNdMeqb3BN290/TNE3Tg6bgbvadnE/TbZG/CH/mG4ffkD9i/UPhJvkHj3fQ+KPXP/I8Gsgf4I6ZX/j6g9cf0NM0TdM0TdM0TdM0/eAI/AUEgUcwCCAEikDVoBUp1fmSzL/xfVHm3/v8gTD/5vcdAB+7aH0v0DtVQVKQzG5iUBPIC57aRVtseQCB4GZ1ICI4GbgMGPYe394BXD0ICaqZD4BbPTMvJh5Ip698Tr98Og66BdAaq7balQS2GR+UNUfSV12PslbvkdhyMH5xjAn8FiugZ831B8DBdvOWM3/1mfUELq25vsWp5OP7HTpj9yWoXLRrq10fj7WWt3zP/uaqX6a/a1Y7FVsJbAOvtVH3UuaeMYZx6Wxj7jPXtnUW+4zhmgKb2sSSNyjrxwnOle4ja1je+qt3TeR/+mir3dh+UMBHLOuqv+/DjJ1vAFgu1P1TjgHZ7jHH55pQ5/meOs9rr+4sr/s6lpectbmPfb7+yT/5J3c9pmm6LfJ+1Id++/8Vb3/55Tu//w2+85+m+1F+AvWbP/3Tl78l/L/uzZi/Vd/ysz972R35zf/nnaYHQ94B/NqLL17u72/6yeHjv93gH1HTNE3TNE3TNE3TNN3/8u98j78Fx/zgGzTzvldQ1xdgHulMdkwGjQBiEMwOSnCMgV3ApC/aAEgwuB2s4tldq018UM0uT/3AZZDU8QlVwVm+2u2UBdvaZRswA5D51IeBlI1RHQPpGPhlt61YYosVGNV2WnVnSY4Dk+TLwuTYWgGFSuvEX45gbW3Wy7qLo4/S+mrPxyOHa7PuPUJbXwBVXf2NA1wCksy6P/HEE5exy4kPE+fsD0bWTwzt4LR1kzc4qF8+wLL7otz0148Bo0pw9sknn7znU3vmXnjqqacu7c1N37P/+Vhm89ZWjtofeeSRSxsfa1Lf2q2BezI/c/B45sZxT5mH/OvvPlTy05eP+z2gCyTLXenz4j7jy69HRBtH39bYPQaKW0vmWpivNvee+xAktt7nfUady5+6noyK4VybGFQ7qS9O6921rJ/P4F/9q3/18rmbpun26Kd/+qcvj93/b//Un7q8D3KaHgT5f5v/b/o7xP+b+3/nd1r6AaK/Pfu7cZoeNP0/v/136v/2N//mAPA0TdM0TdM0TdM0Pah697vffQG/gJ4SpAWrwLOgKDgGoAUeAa63vOUtF6gLwIJddqNqB8HAQrtPQTRxwS3vmw0MOwbm1PuCLbALpAVjwWHxAVP+fIK8fOwsFq86BurK+ybwW0kAMRgHuKljfbmnvD7uC8GbpO1U50FcXz7qK09tzPpZqxPy8tMGzLXOysBl7fXJznZxg3oAqLX3HQ7p6zqCjq4Xc3wCaHX1Nw4waq2LX1z9lAFm0h54Da66FuZWu/jqixMwrd2Y2s8YH/7why9t1uWEzEwc94Acznb9wFf3FEj7sY997F4MY2grvhz1cT/zlQ+f4C8DhPmYrx9FiKPeDxzM4fTTLg5gLB85Gsd1cr+Te6H5umbuD+vcjxu6T1P3oT5Mu+tVm3w6Vt/97JjV//RhzbP+ruv//r//73d+/ud//nI+TdPtEAD8sz/7s3fPpunB0mc+85l7P2R8M/J0mQ984AN3z6bpwdU3/9U5TdM0TdM0TdM0TdMDoeeee+7yGqfezwo6glQgFkAakA3KEmAFugGzdveeYA0MA2YBMOCr3b3ezQsQ25Xr2OOfwVe+IKV+/OQACItr9ynwawey43JkdirKoTr5eaQymCeOelbu5Q9+6WtM8zsh8Wl8M32U3icLMAfTapevLwnPd/mSvKwRqKc848unvMutcZ2XO5/epXu2Z9WVU331U4Lo8ibt/OVqDezydF5fcVxv6ydnZv3VZ61r8Y2nb/GbU37GOaXtnDeAql+QsvrTrOsJMuWsvrkrtQVHyX3sflOv/Utf+tIFxrJ8wHM+7mdg2n0JhvNxP/NTDwyLB466b93X8gaDfaEsJ77BXj98ENc97B5379oFbByfESoP+ZGyz5Dj6tN1XXMg9XJI5zFdn+fPrA2r3vV/3/vedzmfpmmapgdB/l/u/6/+P9v/479dyV+/aboNelPvAJ6maZqmaZqmaZqm6QdfzzzzzAUAB8IAO7t/gSEAGGRLvgwDc8EsX4rZ7QjmAmN2EDoGDH1XYOejOAFiQDEQDJw5BsZ6P63jYKI6kE3f3usL1NYOwMrFzuDgNIBlPDnoF2AMboGEHm0JwAFn17uD82GBsY6NZ318CciPgEI7OtWLaawAne9L9CP+4DBffcy1+Iy/fJjHaFtb9cY+2xwD26dqsxbWrL6M1OdjDV0vcNK1E1+7Lzlds5v619fY4Hz1ysZlrp34tbHaGsPaiZ/PdTuwWjtVnxlDDsn41r1219y1V2ee3j2tj3tTu3o+7hnzd4+63/nIjbmWxXHuvgV+3bPyy/wYQbxAcWC4z4M1Jj7nu4Ar7SKWhzlaWzKfU+4ndq2uQf1S9dQ1OH2u2yvP8eWW3Ld2AJvbNE23Q94B/NnPfvbu2TQ9WPKDNz/Y8v9wf/N8J+bpNn/lr/yVPQV3uhXy1+E3/XW5R0BP0zRN0zRN0zRN0/0rj7kFgEFJUMwORRDu5ZdfvoCwHssLXoJXoKwdj748A7wAM4ALQARq+QFv7eS18xQYBgf5+PINHAVze5wzEBbs8h0DyBkMBnT1PXcgG0e/E+Cej3zOrzaQSzswJ6fqKk/gy0Bic69NTDCZgpLaGodANPDMGgYcxTcHfbWpV3oEMH/ALehmHfVltZM46rRn57t0awMrz75MrkBkwNc4vsfRT5t+PT7ZtXRdut76yzcomtWuvzbXW19xrL9HMJf7F7/4xUv+2gPEgGn5iae/vsw17THUzPi1V/7wD//wvfzNp75iKwHY5sHHvNXpz9yD7m/w1zryCdzmlw/wayxzUO8+1g8Udn+8+OKLl/7a7XSXg5Lxcd+ok0vrIMfeBey+YNbCfWa+xiP3jjF8TtxrN0m//G/yUddaO86c155a7x4DzUe+f/2v//U7L7300l2vaZoedO0R0NM0TbdX2wE8TdM0TdM0TdM0TQ+I3vve9152QtipC44GiOyKtFMVvPTvfruAQVfgFyQDpYA1wEgbgGtHL2AU/AVu9XcMMIJqoJgYwJZ2cRwzIMyjm4PDjuUS/NXeY5fVBVaBMrszgr/VZYAyuNVO4bONAW+Z9wGDX8UHixnIm48+xtDuuDXTRkptza18klzsKDFP0pcCdbUD8qRdnbyU7KZdwGdf8PI6J+Kjr5L41E9818j1Un/m1bisseurX2aNgVJgGPC0bvo4BhYBW++WTmJo01fp/nAPyb8ctemnnYnt+hTX/QjUgsnamOssT3356e9+1WZMP0J44YUX7uUtjntZDPcqH2tmHG3lUHx+xjC2a+veN6/yMwfjufb8mB9MyNN9IRYzlnuktVbKl5Ri90OGZF1O/3TWZak+1/WOa1Oy1pqMrc07E6dpuh3aDuBpmqbbq70DeJqmaZqmaZqmaZoeAIG/7dwFuMA3cIoBWuAPmAukgXoAFz+QKPgGaPG1qxE44gMgeoyxvmL3HmCwDFADVAOx4JedoUCZd8QCzyCiXcNAcbuAtQPQoGhQNTCmPshaqQ3As8NY/sbLP9MObinB5t4vzFddYzjO/7p/Vhx95HeCX6b9jNVx/Zyf/s61kbK62quv72nFaazWj2kH0YunTnvmGgTZGZ98M3Gzs69rBfIC26679uAn42M88ROf8i0/x8HI5n626eNeM5brZUfzKe36uUcz/u5h96t29sorr9yDv3wAW/ece1B/kFgbA4eVwK8YzDnQ676xazmf/PwwwhzE9XnwIwvH2vSzmcJ6q+MnFzJ2ErPPHNVW2Vycn23neWpNzfU851cplnWuzrkfiPhMTtM0TdM0TQ+2BoCnaZqmaZqmaZqm6T7Xe97znjvvfve7L4AHRPNY5+AvA+xALrDqBEngESAFTOljt6kdjyAWfxAQ7AJdO/a4W0BQTPAXCASF9ecH0gYOgT7gDSgGUYOGdgsDy8HiE0aCggFGx0FCcQLI1Z/tTFzgWx7inj4Bx9Pqd5r1qe30U5/le/p5zHR+1TUvx3T2V5fZ1Vzfs19z0JbVVjs726prrdU19uuvv36BlHaD96OAxq6/tbfOzHHjl5e4xWbinOKnPn/lKeeZnMBc8BRQdT8GKhtTyU+bHbbGA3U9mjo/7SRWP0wQTxvV7hq51/2AwT3Cx/3PAqnl5EcQPg/Gcs/3WWHucXF8nqylz5j73Y8krC8f89CnsZXq9LupnvSjm9pO1U6nb3LMzL9r4Vws8+6x39M0TdM0TdODqwHgaZqmaZqmaZqmabqP5R2x7VgEYZnHP4NYQBg4FQgEggJKAS3Qih+4C/461x/UBXLt0gW17Lh0LA5/IAnUBcDaHQyMAWHAoccvB4/PXb4AHkh91gXyeocw+Fi+4gBx5yOf82fAFrAHEPPR7zT+N/V5I/t27aeKnZ1+zs/xwdfaqj/zrM34zlsb5dn3uh8758X/BLTB3h7X7TrV5rrqX2x2gmOl9S+2sevLz25ZftY/Nd9yM4b8KfApXtLu2mnrnsynce3C/YVf+IXL/QdeuofVU6X7XT7uf/5iJW1Arh829OhmdaePOrHcg9bKfeyzFcxl5ebYZ0Pudgb7bDh2T8tRbGq+GYkhv2Keyic1FilrP9fvrL+pv+vkehBfP9rwY5FpmqZpmqbpwdYA8DRN0zRN0zRN0zTdpwKpnnzyyQsYbWehnYuAGvAK/AFAQFwwzSN7gSC7KYFeQBDA0h/IBWBBst4jLBZwCCR7PLMdlgCu9/TaScnHGACYen7G7V3CAcXAmvGCg0G+X/7lX763c9dc+INWHh1tjqBVYJH/eWwHKyDdbuLaajfnb2XlkJ1t30p8zeEcj+pbXXNlSXt9azt3EJtH7Vlq3LP/+ZhndQFaa+36uA6tDX/HjcGMre8ZOzvXhM62M0/3FLuOoe1sz6f6TA7uX+3UeGAqYAqcag+gkjikj/uxdz5rtzsXpHXvuq/FYe5z7Y794MG93+Od9WXuKT58mUc8A8sgcvFfffXVS5173f3n8ev5+wwGl5sPdaytdjp9qPWu/uyXrNl5XfI/Y6nreuSr3zvf+c4LrJ6maZqm2yA/fPq5n/u5O3/7b//te/bn//yfv9s6TQ+uBoCnaZqmaZqmaZqm6T4UaOXRz8DT008/fQG3oB+ACvopg7/BH7KDElTlE/i1MxLcAlw9jlc7oGr3qHGAxHb6qveoZT7qgV1Q2DkAB4i1WzcIyMd4QG51wBQD7oBjbQFFAK4dlfkDV4FDpfnYpVyuAc6znZ3HNxldH6fzOKkrprLcWDtdtdVePsqzr34nhGXqgVCg3a5na+maaDt32Z59snM8173Hc2s7d/E6V29sVnxtTP9i6nO2seoyMeR56ozB9DkhZnFam9MnOArOAqx+iJAf6BqAvQad7l9+L7/88uUHDKCsHbynnxh8xeZjt67d83zyc7/z8ZnwWXAv+4zJKUDsWA4vvfTSpQ1MBpx9ltoZrE4ccZVJf3WVlF/q/Kwj9c2h845Pnf34u975yd1n5vpdy9M0TdP0IAr8/Qt/4S9cfsz1x/7YH7vY//w//893PvrRjw4CTw+8BoCnaZqmaZqmaZqm6T4TaPW+973vsmMW6AOTlIBZ7/wF5k44BFgBrWApSBXMAroAISAWROMD6PmiTD0YCf4CuGJ6rLPH5YK/IKUdlsb7yle+cukb6AXjgFw+PQY6ICgvYAp8BNj4q9dHHDFugrpZO4ZBPj4BxJvshGTnepy6CbRR9cVgxay+/FhzS81Jyc6dutUxgC7I7hqeO6e1uzbi6PetAPEZW9/66yvv8qwtKzajfOtrvROfs6/roDQ3smbnWAwQdS+Aj62p+9H9FuA1F3XuTdffe3fVl5eYTIxgrFjAqzjanPcY8yBt/cTQ7n7/whe+cBm3XcEE3rqX+YgN7oLB2vVRJ57PhPucn/xqB4vds6Dyww8/fIlH/DI5lH91Sdup5n1df/ah6zh0fd51IPHk6Ucj0zRN0/Sgq799X3nllbs1d+78vb/39y5/Nz3zzDN3a6bpwdQA8DRN0zRN0zRN0zTdRwKgPMIVdAO6wEJwDQhr1ydQBUAFmjyq2ZdfysCvx9QCqMAsiAe4OQ/qigU8gr0ALtCoj2NjAMH1B/jAshNcqpMfmFddQA/A7JHPwWJmjBMWqwvgdWzXr5j1OX2ya2iWrsFYgI2dbR0X5zo269z4Z652qeZTG9OmrK/SuUdsA7/WDkgvpvXKrJ/23nF7rikTo9iNmzV+bY179rfuvght3vXJxKH6l5Py9CMxmkPjK9W5Hz163DUGSeunXb8vfvGLF5jLzz1qLP0y59oCrtbCTlb1SSxz6T4Xl+kDzpafz0hx7J53r2qrDwV37ZTv3cHX+dhx7DOjTV9x9fP5E5tfOSQ+5sFuEt/mVN+zjs56Vqz8qqfr68Pe9a53Xc6naZqm6UGW/3/7UeM17P1Lf+kv3flzf+7P3T2bpgdT/oL/mW8cfkP+IekP1GmapmmapmmapmmafrBkF8OP/MiPXGAV+AYuPfHEE5cvt4BU/6YHouyiBcL4gW2AECAFTAHBwK0vwzy62bl+4COoG4D13QBgJm5gli8obFwQEowFngK/zG7R6s7HQOvjUcTgE+DWLmV5AsLgWTA4KJjpyzxK2k5Zfa/bOqYTlFHnytNOXfu80fFp5mJMZUCPuU71uW4D4a27naqugf7qQWBrnPRVT3x6ny01vvago3WxrrXTObb4rjOVl76Z/tqvYzPH3Rv6kfoe0+wY0He/1UbBUBaADLQSP/elHy8Ara5hP0wo72CqUg7uS/V2wSvF0p6P9RWvHb7Wul3qzo3D5Gt8j3CWJ9PPnNzb4llz8X2m5MSfD7Nerh/5QUa5qpOjH0SYc+tJYrmWzpVZ7d9KZxylWNd1Z0mtM8ldfvXzWfZZtabTND24ev/733/ns5/97N2zabp98oQR/89+8cUX7/zJP/kn7/zUT/3U5f/bdgFP04Ou7QCepmmapmmapmmapvtA3mfqyys7HsEcgLUdu0AqsAbgAV5AGngLRIFdBFDZXelLMNAHDALJQEM7SIE3fcBeMdplChwCaMAvuFYMMBbolAdwC7ABuWLoCyLLi6kPinlENTAW/PU+4QCy+hP+BsgAbXmIq98Jz9gJv9gJvqq79vG+XjtFk7qbYlVf23U9KcuZnXJuTtbH9XKNWtuzz3V85+YaWG/e+baG55qZU3Gqq52d8fU9rXaq/2li1W4XL3AKwp4+gOK59uVK7j8/GKjdee3l6R6xc1ybe1zZfABa95a6+heLT+O4P/mDt770BW/5aVeyYKjPRpAa1LXTt8dAi68d0M0nSMzHGOVu3X1+fBZ9Jh566KGLf8a3mJXloFSXzmP9TquOz1l2fOr63DU+5TO7R19O0zRNt0H/x//xf9z5E3/iT1ze//v5z3/+zgsvvHDnb//tv713AE8PvAaAp2mapmmapmmapukHXO9+97vvPP/88xdQCjqBZY6BWGAR9AKrgCBACpwDmQJDwBUQBvza4egc/AUk7R4O6gLBIJvY4BnYBWiBvXZ68gEjtfWuWiBJHXBnDHBTToCTfkAcX0A4kMlAM7BZDucjn69NXDtP9XWeX9AvM9fk+GyrzntqjcnkLVa+9Vee9q3qKs98xTofA+3cfI2nrL6+2l2PHmWcqZdfVuyzn3oxrY05Ob9uB1216e9R4epr6zoxPr2jmPg3LlPv3nG/AJzn+Bk/91vwsbHyZa776dMY51ja3L9BWMDVPZqPeHzAU8bHjwv4BFddAz+ayOdaYpjTCW4B7eavbCzx/fjBjyU8Kl1bRj4nwL7HWp/g1w8p/PhBTql5BX9d+1PFTOc4N6l2Zk3O8rTWmFr7J5988lJO0zRN022RRz8Hgj/60Y/e+eQnP3m3ZZoePA0AT9M0TdM0TdM0TdMPsOxSAGqAUoAKTAWQQNGAbGAM6CFlgAxoAnWDv2CWGB4BC+aBWuBkIBiw5QeoAYR2/vLnB8YCZmAXaKsdDK4uGAw0BtVO6FuevVtY/oHDAJUSdAMjjddO4ny0nWau18fKjEA4O4jlI+fGUQKS6exXX/EoaHaT+JZbJblON/VTx6ybdQB/qXGbQzkqWQA3v3Nd87ML145pMN81bY0DxOcYzs8Y+l/nwQKWr7zyyr1c5X+OzVxzOdbO3G9+bFB8FgTVXh7qi+Pa23nbDt76ZXKTj3vVjxDEV5eKKV+Ala/xfB58Ftzbxj6BtjZ1xXbPO3avely0z4b6YLLS/SSuz5acjeXR0e41O92Ba1CYj5yoeZePPo5PnXMh/qe0V3fdtzlUFsv4rld1zLz9N2WapmmaHlTZ5ftzP/dzlx9TnvI3yzQ96BoAnqZpmqZpmqZpmqYfUH3wgx+889RTT112MgJaYFo7YsGxdsWCO0rwD9gBf8Al0AogA7n0tTO4nb4edwtg+QLMI30zdUCu9/V6JG+g1ri1Mf3ECAark6Mc1AFgwd+gnhigHZit7gSIjgN8HgttrOBl81Myc7ypvK6zA9kcjKd/bY3nuPrshG03naezns4Y4tau7FgbYAdugoZ2k6br3KtrTeR8zkFZvblYf6AS4HR/APzqmetw3ivFaO2L4b6wc7ac+VBj8Tkfm10MbeWiDM4CrX68cPoA0cVvDGviPmPGcq+aC9isjY++Su2u6xe+8IWLfxC1tvw61+a+dr/KR52YDKjl59iYdvmCtoAylaPxAsnu+SCyWI3VI9ID0vyZ/MQm/sU0Zj7slPZT9Tul7lrNje85Vm3WpjbyYwj/TZimaZqmB1Vf/OIXL38T/dk/+2fv1nzjyTrve9/7Ln8H7F3A04MsP+38mW8cfkP+keYfR9M0TdM0TdM0TdM0ff/0h/7QH7rs0APQ2gnbI5cBNdAJ6AOYQB3gB6ACXtsZ6Yst/QAwYAtQ9W9+uxvFEddjasHBHt0MvgJE/H1hpg2sNJZYYhrPOPxBwfIByoDb4CPgBAjLsf7qagsM6qsEncUO/KrLAlgBt9O02Xlq3gCdR/JaC/lpy/JlZCwGDoJ6VBtpq+z4WgG3jpn5AvTWor7M7lC7TtkTTzxx6VNf43Z8reIy63iTmpd2cTp3LB8Gqva44nzKN1jpul63Bym1u7Z2g2tn2q219kCnOjtLlXy056M0f/eb+9KPE4Bwa6O//NyjcgDuG6f42vV3f9hBLZfmJ4Z2dXzdt/J1v4sln8YxtrVyP7vf5OGHFo0lTr7uO/m2E9hY5qGNWTM5AcDgb2um5FspZzKGe7HjPgfW+410trUmVL0yeyM1f+LX5+Gf/JN/cqmbpunB0/vf//47n/3sZ++eTdPtk6ei+PHaxz/+8Tv/zX/z39z5qZ/6qTuf+tSnLk+A+Yt/8S/e9ZqmB1PbATxN0zRN0zRN0zRNP0ACvn78x3/8Aq6AKaDHMUAJoAZGKeATWAOMQS2wyi7ToBkAdu6+BXUBL9DKTkYAyw5G8NSOwGCw3ZpAmZzAXX20yUO7OOCV98y2o1i9OiAZyBPfPPKVPwt6Mb7gmTzFri3Ay67PzRtcBm9BPHmZmxLcPvtl9VMaI9OmngJr6fo8qQfUTnU9GuuEdI5Z7b54zP/aqFz5nmt1/RhobeeaWpPGV1r36zWn+levbIxT2jJt1zt46yMWga52El/7yCcDQ8FU19sPHNSd4zAAXd/gqfj8lG9729vujanNfe5+b9dx4+rrRwX5+Iy4n32WzvcF251ut7FjRmI3J352Rvt8FUd8kFgsfvKRt/ri8DvHFYdvPkrSru07lRitxbXUnZYct/6OG9s1sJN6mqZpmh5UvfTSS3f+zJ/5M5d3/2beBTxND7oGgKdpmqZpmqZpmqbpB0gf+MAHLrtDgVi7eMFfoCmQZwcfA3AYkGYXr12PQK7jAFQwF7RrFyRg2s5FddqAM0AMqG1nMKAGpgGy7ewNrpZLj5K2W1gdA5LF46/vCSAZSAZCAWb1B9H4BQqVpwWuglfe6dsu0eIHnm+KcfatPNuU1wqQ8T3Lm9S1KF7xzz7Oy68y8dNfaZdK/SrF1CdTz8r9bGPqM+fn2pdXsZ3XpnQ/gJ1ne3EzwLQ5G6P+XQvHgGZWrsUEPM97mJ3xM5C8e5l/70w+Y/U4bffb+S7l5i5/97r7xf1th279ldqZdTcOYCsfpZjuzWLx8aMJnzn3nvf7nuvjWM7mXCxzrU9rQa0NnVCZWpM30tn+7fzImI7Ng5UHM8c9BnqapmmapunBk39t7BHQ0zRN0zRN0zRN0/QDoB/90R+9ACr/LgdnHnvssTsvv/zyBbra2Qt+2W0L4gC2/g3vHKgFucA7ELh3kToO0PIj50AdGAbWAkB8wVq7LsV2DqiCeT3+OfAL/gFWBE4H/MBcYMmY1Z1wzM5VcNpOyHY8GjO4FpzSp+MglWPwzRzPfqffeX7WZ3SWDBQTj5m79UwgqLWy69kaXesm8CYm8FdMsF3fc7xzTKAdGDQOSGh9+FnzM5frXItbG53j+jHAKXWuGTMf6+iakv7BykCke0N7c6x/fq57Y+QT1FW6/q7z9SOca3d/ygF4FDuf2ovhHmT83e/iNY7+1s9n5V3vetclTmugjY8fTpifHeJPP/30pS3LhzWWebsG1t4ca+drHPedMYBfMcpZe/OybuKYbz9u0M+PJ/R1r5O+531KPl+Omfbqr1VbfnT6dpxPx3zlxYzrXD7m8pnPfObiN03Tg6U9Anqapun2agB4mqZpmqZpmqZpmn4ABP56FyrQBSj1HlMGCoO4gFaPSrbzsR3CQR2PqFWnD/gEPInj/Wf1CW6BxSAiWAUoAq+BXyAK0AWJAF3nHqULFAFa+gNHwFlAWJwT/DLQVxtIJhc+TLz8lIGwjORsDkCi+YB/csunMTpWglyV38qoMmgIhllb48kX8Cb1J3ClM04Ajhwz8RhZ79O/emtlbYFB6yHv+jMg8Xqc4tZePdXGAshnm+sQwHUNArz5qM8nwHvdHpz1vVH3Uu2BWX7W0b0K8Govr3yU1tdaG6d2fV1rPkC1Hb+uuXfzdp30FZs5tnY+M+XO9LcL3dryc3801uljDtbBmNZZfLHKhw9rPJDefVic8tFfu3p58wH2fR7zax3IPd+6GVd+1D1dG3X9T519k+OzH3Xu3iLnzb2+SmvnhybWY5qmB0sDwNM0TbdXewT0NE3TNE3TNE3TNH2f9WM/9mMXIAaSgjAAEjjo3PtOQSYQh4HBCdAJQoFxYgBPzvUDwOxsBZYBKj7AbhAZCLQ7EgQzFtgGTILDYJZzj1sGtew+BoiDvMz7ZsU0Dt/qzz7GkBeoePpkzYuBzOCl/KwBE1tu1/2CvpXfiYFdGakTS37may7yvR6HpTPGGavj+rVOHgmcxFFvPq1HuWmrLwPPzzHONr7qkmN1xak9n2KfpR3O7h/WOtTXPXLmfZ1fflSM2k+/60dFawMbe4evOvdvVs5+7MC02+UbtGXFqT+zVvoDse4dfX1OykWduPobH1jmA/Y2B58Tu+TF4aMUWz+lz4tYfgxQvuIZ0w783qMrlnmD1nyc55t/82xtMuN2fJOq15fO88Zixb/2a91Of8dn/tM0TdM0TdODoQHgaZqmaZqmaZqmafo+6uMf//gFBoIwIKTdecHggCzwBLYGpAI7gBKAy08JIAXYAC07aNudCG6qA3pA1a9//euXnZzq2+VrDADZblc5OQa+tJ+5gEjAHIAGmKpX513BYoJrQVtt9asvO+Ea8Kafcc2df7A4/+v+mf7XVtyOW6+OTzNWY/IXU339GwcwvY7JwL6O+cnx3OFcvFRdsc4xmmNW3PzOmOe4TJ051H5KezG1uU7FT+Vx+gUUWfVnO9V+9mXlQn6QAMK69x599NF7Pu7H4Kjdtfnop39+vQuYL7Ue7j++2vrRQO+zVX+a+xX4NabPmrmL0dpl5dNnSV7u59PHfDwq2g8ibnqnsJyD0sXyuZaje00df+vWnKg5ntflW0l/vqd/MbsudMaUW/MkpZzsVp6maZqmaZoeHA0AT9M0TdM0TdM0TdP3SR/60IfuPPvssxdABHzZiQs4Aa+gTLAmS6ANf2AWnNIfPAJ7PaLZDk7g1mNvA7h2LmoDreyyBa4As+CnOjuFwWFAFlBul2pAkwFpwLK+zvUFu3rscKBYnyDeOYfgnT7y9Khe4Lv4p+9pJ1w7z8/2zHnAK0Cm/rqOXfet3XE5MXNRB7yC50AikFr9aWLWzzF4nPKx29ljrvNnrVf92oV75nraddsZC8w/deZlHMcBQmXj5qc8Vd9ys0v83CUsh9qbh3vF9XVfAL9n7Epr048OTjhcHP3FpmCqz0gQV+7eZe26+FzwUSd2RuqMZYxA6+mTuS/9YMIOdjnZyStX1ni/+Iu/ePn8icHOnJkfM4jFV64+Gz5XPV4aDNZOxRBbKTfHzVmM1Pi150PFI2tEZwyxlfzM5ZR1fuGFFy7/LZimaZqmaZoeDPmLb+8AnqZpmqZpmqZpmqbvsT7ykY9cgCtgCryAM3YvAq/OgVvQJgFDIBIgBuIGaHs3KVhlNy7QGzRzDo6BPiCrY/U93hmEDAqBxEAQH98NiB3Iazcj3/NxzEAS0CivdhGfbRlQKE9Aunfs6iMH7UEp5ZuxIBg761Jtpzqv7bR01pmz0tzMoXmSNuZaWP8kD/WtmdLuTztQQXql+Wqz1ufY1/20n1IXwHStay/X2pSuc7FIHbOD1TxAbP211xc8ZACo+Z7vCq6v9mK4jo2h1Ob6mp/70k5Z97m2THs704nvdR58jMH4ahfL58KPD4yjPj/3alBWm9K9Jj/3I9+bfNSL6/qJ1fgPP/zwpZ2pd83EkrN4zs+1kQO/zNg+K36gId9iZfrIx7Fr1/1fvbrui/P+oDeq17eSiXnTubmzJI62z3zmM5d5TdP04GjvAJ6mabq9+uafc07TNE3TNE3TNE3T9HuuT3ziExeIaxcjmOgYTAMSQTvgKCAELIE14DCQFIQCjtQDdYCx3bSgE8DlXAw/8K4fsAtyGcsOX/1qA2LtRgUp7RLWH4QCfvmdj3pmAV4xA3Lt/D0BMMAMUspJ3mIYX3zt+SnN1TFz3PlNx+kagKXXXnvtP9udem3Fuq4/c5CXXF0bZn7qtWtrPVgx6187kG7dXevrdy0zAL2xG/Ncj3Tmlk9+9a89u35MtHvGzm9wszinnFffcQogNqZj5bXcm643v34QEPRkZ1/mHnUepCR9rM2Zw5e+9KXL/QrOXsdRMmvpM2GHus8Rf/XFy0eMdtq7LgB16+MzSI599sBhn4keNy2WnPMXS97Nh7ne2tz/yvwyOSrF4H/G6fhU60JdS3bWJ3XyFEMs5+Wav3bjnnGs13PPPXdpn6ZpmqZpmu5/DQBP0zRN0zRN0zRN0/dQn/zkJy+ACDgloAYYBGK8uzdQw+wetCMPYPXIXYBXPR9wC5QEbv7dv/t3l3d4AqxArtgAl12TgGPvPQVhAVnHfAE6YExsffgG3kBi4wOY/IJ54BHwC7KdsDjAxuerX/3qBa5pFzMAV/8TlmXalMXoOAtWMW1Kqg7YMw+PHW7M2ljxQFFgu77qM+3Frk6c8s5uqj9BLjNfa2edlflnrQdrvErxapOr+lQ949d4pCy/4qgDZYFNoLPYtdePqjsNSO9+5MensR2bN1DqxwPuoSeffPLefcUAVHMoBpV/a+CerJ2Vh9KYjeu+z9TxKRcmDz9YcH/zOXNlcvRDiR45DYQX+/Tt0dU+N2B2Y2X8PNZZrj6/gC7z+RT3jMWnnPNV+swqxSbtzV+9kmo/VRt1rGwe+rB2NHcuLuWT6uvHAdM0TdM0TdODIX/p7xHQ0zRN0zRN0zRN0/Q90Ac/+MELdAK+7CgEXYCkYJR/jwOr4G6PvAUzgRzgCPAF87R5/yzQBiQBtUAjyAcYqw8aA1HAL1ALaLUDN6CrTnu7UuUE8ALLgcvAFzAGHLUbtjZ9mJxAJu3arttZMMpxcTs+/ZgdlOYH6lknQJlAR4DbWoFuHuMrVzmJHRQDFvXlx0cs45A1DIKVk53DIKH1rO40ktdZkvGYmMS3/I3tupR7sQJ+Snm6B5w3TsBQXT8QOPvWrm+PIq4fA0PNmb8fFohP/PS77n/GdI/pE6x0L+lXX2sPsram+nm3LhkvIOq+lUfx9OerrI11H7vnxNdef/ez+xWQ1Vcuxcin6+pau5ev48iTnzzIZ4XPdRzt5mYOxnv++ecvPuycD3Ovyc3nBlA2P3H55JdPOYshL/mK736RtxyM6Z5RMp8d5anzXJ/6nfUdK+UhZnH5k1y0KfPnY509Brp1mqbp/tceAT1N03R7NQA8TdM0TdM0TdM0Td8Dvfe9772AXeC3xyqDcuCXXYbAkJ2zwBoAA1CBNOqDWGCwekAR+AQ9xQBeARx17e4Fa7Xx+frXv36BP4FgYMpY4A8gHAxutylgVZ0ds8anczcvcJXZVSlP7c4Dv2IFoIJQHV+327kMljLzN6Zc8wHagDP1xpGDHJXa+eVL4pu7OQestZO1sIas8cSWt7YT5H4ra6zAGnjsGojneskBZNMOsiZ9SdtN7VQbCxCTvuqsd2UAtzzAR/eOHKxh90Lj6tN9pQxQgpXtLD99AsDm5l61rmI3T9fjzF+sQGkgVF/3rjhMXKY9iGpsfcV0XUBzbQFV4/nclJtr5lqJf/q0k56PUpt6pWt8HYePH1uYp3ZxlHy0Zfrl45geeeSRS5045mWMcnE/NJY60NcuaetrvfSRe3I/MWN1X6eubdfwWuqZPnwdi2+N3YcsNRflKWN61Lb7bZqmB0MDwNM0TbdXA8DTNE3TNE3TNE3T9Husj3zkI5fHqz722GOXnazgD0ADdAE2QBHYBQyBN8CMnblALnAGHvUeXfAY+AK5nIO/fMAbgE0MYNO/789j0C5gCvAYLxgMvhI/wA3g6v2lcg38ZmCSXbgBPHHU1RbIygJTHQPNwdJgmXGNw+R5xtNmDPXFUmbnuWP99AminW3WV0zxwG/rwk97arfx2a9juq4vR7Fav3xTO72TdusbgHS9rf/ZdlqAVf4suKcEIGvTtzbmfnEfGVs7ac+sv3X1XVCPRDZeMVwb66PdY8a1i6M+H/BWGzVG15W51uYWSGb61m58pTkGSd3Lpw+TB1CrFM+85HqOZe3dV8Zq7cBOnxVtfPPx+RCvR53rr50Zg497RGmOgVzxfHatibmILVdjtWbay8c47id+fK7Vfcn4n/fVKXWs43TWnxJPPtrETXKQn5z5UD7A+xe+8IVL3TRN978GgKdpmm6v/tNPCadpmqZpmqZpmqZp+q7r4x//+AUAgVyAC/AYqA1SMrCHBXM8UjcgBhyBXXZogkpgMDAFQHmvq3ZxgV1wGOzzGGfQCggDVY0F9AKR/J3zEb9dnfKwW1g/O4nLuxwBIjuHQUXQTNxyD1xlzae2+gHS5gWIKRu3tTj7idP4Z2zrc47h2BhZcc5650rrZC0bt7bGqX9jgdXWyS5nc3BerPoFp52zcuOTHyvvyvyy2thN9QTYXfeXE519r8cO9CmLBw4CmgCnHyfUzl+7Y/ebR2PzS+Kea6XsXcEkvvUoBz7uu8RPfDFBVf7qXnnllW96VHS5i6Ek4BIg1s6ab2vBt3g+C8Z1fxdD6XNjbPei8Xwu9Tlj+Ay45u5Z96r1OX0y4L5cWD7M51BdP7BofUi9cc2ntWauCZ1wlvhQdWfbtfI1D8euYXOszTGd8dT578k0TdM0TdN0/2sAeJqmaZqmaZqmaZp+j/Tiiy9edkYCvoAt8Aj4gI9BwwAa+ALCMICLD1Dl2HtJARwgB5gFtUC7dvGKzwdMA229+1c/8LdxgF11oCuwZedj/eUAUoFeoFXAuPwYCNpOzmtwfcK1wJj5KI1rHuIa63qXb35n39O0n6aOb/ZGfvkqQa/qyrk4HTcfeVl7YM96WKMTFiuLxRqjOGdbfTKP06YgXH2yU87LTwnCkr6s+saunpyfxufVV1+9d3/lp9Tm2gZwWW35FifpY+xzfrUDm/o5157xsWsceHUf+QEBqFouxQSzg6na9G0O7iNmHY3Bh7SXg3b3f/eyRzST/sUhn6Eg7BlHu3Gbv3zLh5pv45Uz32LZIWx+2pt/1/70UwK+/Kkx+OTX+lSXyrH5nOJXP/mJGwQ+4xQjOfbjEP8dmaZpmqZpmu5vDQBP0zRN0zRN0zRN0++Bnn/++QusBVMAGLsQgS8wMbAKEDHiw4AjOxMBG6BGHXADHIO3wKS4gDLQBQSDVIFKOxaBXrDZOIFd46sDf8XRVh4gbbtxTzDNvD/WLmGQ2XltJwhjwTPHYKJ+5lu/fE7fzjvOnF+b+nbfnn43+Z/t53l1ZxsrB2DbGoBgQLq8a2fNA8yrbxDuPD7j17fxg27MuZjK67jFybSfOv0Ydb8o1TVG74i+bmfnGOoZ1XZeu8Tn7KfdvVj/2tXX3z3oPnV/+lHE6ZO5X9zfjUGnj1jMTvcgKr/mYr4+B+q+9rWvXdozcfLz+RFPHL7axaud8W+sfJgxai8v5yBuP5AAnZu30ufn3ClsrPJXWhc6x+lYSd0zndPZdpPUy4GfMfw35Ix5Sp1crJ9d0dM0TdM0TdP9rQHgaZqmaZqmaZqmafou6+mnn75ALiDFzk2wFgj1COcgFoAEyDCwl4FHdjACMXYGgjXKgK42McE0wCmoox/ABDTZ9QtkgrqA1Nvf/vbL+Or4BHoDucCdnZL5qPP+Xzske/x0sJgFvZg5gJYejWw8cBm8Br3lGyRTZsGzE7SxAFf1TGy7U0E4sY1fW/7kHHS+KRZofPrXX70dqfJzrp/jxihW+XfdnLNiFbc1EPccp/6V7ofar9tY4lNb+ZhjbecYzFqBeFnt7qHe7VubumKeY5z9q8/HfSL3sz2fTA5JvNYrX/cGFYPO/lnt8mwdmONi1s58dvzAAdjll49rkQ9/n5fgLx++tTNS8umx0fzMK5/W1Zj8nLs3fdYC29r1vbYzZ5/rILDPslJ79R03pvNrqb9J6mtTGltM/62wBo5rO2MYww9Annjiibs10zRN0zRN0/0q/7L4mW8cfkP9Y3OapmmapmmapmmapjevF1544QJAvUsTuAVcwN9grccfO7fTzs5MAIY/0Aq4gj7MDlq+/MBYYAZkAmlBXMc9rhmcC+wCpmL2+GcAq3Ha1crskASq2imsDjCj3hEcuArAnWZnMTgFHBvH2HIR5/QzRiaHjmurDty0mxkkZ9ZKHmLz5UdgsO8u5Aqc8bNu2tWDdd79CtBVb/1A83/zb/7Nxd84YpI+fG4yOnMkkIxZf/GM4/HbfGpzXVL1AT5mzKT+NNex8VmQ8GwvTnGDhq65cznJzY8FXF9trB8AXPdlduZaG/cmaden2GK6f7Rro8Zi/NwDcih28cXlY81dsx/6oR+61863/kr3j+sjTnmbizXTxsd1Ve++dV2Lp83njPF1D7t/+IO11oJPfuqNr94957PmuBiNJWc/pFC67j6v4ppX5h7gc9PasHNe59xJyeSrdN2755Xn/U/5fCtd+8jxHNf6dcwv38Z6+eWXL5+jaZrub73//e+/89nPfvbu2TRN03SbNAA8TdM0TdM0TdM0Td8lvfOd77zz3ve+9wJNwTKABaRqdy1IBMoBTY7BJcfAEP9gERjGDwC1a1I74ATeAbcATXAUuFMvfseBWAb6gXZBXmbHojr9qwN01YnjPPh7AmAQ1C5X8wL7Tr98TuN/bXK389acgD0l2NYasfLnX+xAlZytkfnzK6b5AXQnNCb95Zq/c31ITOsL2IqR1X6en8fiu17GdF67eNYGpD4hsPpMu2tUvX6BuJv6qmPuCWUAllGQlJmf9fQYcfO9bmcnwFVaS/Xim1MAN1Offbt2MNZ9fUJm9cFUa++ev56H8bvvAVaw0ufCe6rNxZhnHO2ArfV/9NFH78XRlvlui4/7yY8xjEnaiqO0XtYteHsdh7m+fHqvNslHW6X71We2GOz0MRd5X/uQUi7uU/cDnfe9svuLHOeXtJ11+WTNxTrwPcv8yVwJMHItpmm6vzUAPE3TdHu1R0BP0zRN0zRN0zRN03dBoJ1Hp4JSQA6gCS4FNEFDkAmEAUBBHeeAi2N1ARmgSX+wDFADwcQEB/UB2OxqFR9IA4XBt3bjyqHHOAdE1YFK7QKVUwDXDmFxjJFfAIo5tssYEOKn3wl/T99rMy9wGTg2Nugb1BNLftZIDGM3vr7GqAziBnIbX19g0s5N9eVTH/XKM9/GYeIHca2ZedpFbU3tJq6N6Wstjem8vuBZPupYdcrGrc1u59qdn22Ok/uhuMU6VQwCGb/0pS9d7omkvbjZKX0bW+m8x0xT9fX1w4Gb2lvb/OTNmnvtrr1j41z7XPd3r51QtrGas3Ofh9rzYXwy11NbwPPah873BfMpF+PwUeceVmba8zMnn9XzM6yf9uauZO6xxNcuWz/cCHJX778TxVLvmM68T5VnfqfKpfbTqJIc++9IoHuapmmapmm6PzUAPE3TNE3TNE3TNE3fBXn0M2AFxILBAIodf8AQaAjAnCAIXAJFg0yPPfbYxRestCvVjkC7IEFNj4MG9jzGVjuwC+KKq837SsHUQC9QZ6dhdcYDNQE4/YKsYBT4xC94GiBjQWQwDhQSpzk0n9OfgabehwvO6WctPG2Mgbf6AqjsjFOsswz0KfmyxlFXe31OU1f8s6762sBjYFqucjxhsTLYpmxMx9d2js3Otvqd+Z5xqlOqf/XVVy9jUv1r6zHd1wCPaWfVU3X6s/M9vWIbt7HBUqX65Ly1yPdsr28lO+X8XG+le6oYAGc5uufdL47lEpTNp3FOM9d8xCyH3vWbz/Ua5VdetV/HYT4vdvCecdTrp39xzOtU7Y3BjG3Hs8+iz4MfQ3hHs3FPANyu6I6V8mrdnH+n0scclGe/jqvP5Om/Z9M0TdM0TdP9qz0CepqmaZqmaZqmaZp+l/qJn/iJy7+nQc0AjfeEgorqwKjqASCwBywCduxQbYevetC488AvKMYPWCVw2a5dYKkdtACPXYpgVKCVAYbGbuduEKrHQOunPqClNA4QBPqKre6NDCQG7uxUtpvR2HLVT7zyuO4XiPtOLP/XXnvtAsmthzll2irP2I5v8qmUV4C8ekbm33UC4klbkIzO49r0UQLg7gm69ucDxDcWuRey2uvnvHqQ0PUvjlKbehbQtkZkjOvYrnuxqb7arx/zTNfxXePGJ/VgpbLHQBtDezl7PLe4/Ky3+9q5e8c19Rlxz/BV597Jp1wahwGy/MSXj9K5etcrv8bKh5kPX6ZdH/XGUhqLTzHkxy+f8nGuXV7MZ6m5Jz61+++Bz4h5em9xcU65Xu5F9Y6tA/W5SfU776Gbjs86uZiz+NT4fKqjPjd/7+/9vbs10zTdr9ojoKdpmm6vtgN4mqZpmqZpmqZpmn6HAtk+9alPXeCJxzY/8sgjF/gJkoG3oBHYY6fvNcCx6w+oAnrtdgSGQCIx7dYFpYJiwBIQaYcxAMVfX/CydmPoG9Bk4CwIee7uBXbkFtzlp40ZF+wqtrjqi5epA6D9gBysbs7Kxi9mVt11G0Dd2jjv2KOGtYHaYLVd0YHt60czM334n3XmWryznoljfo6v28/8tInDSMmnMnNen2LUr/PiKcHsawB3xgrw0dnG6lff4jLX8MyZNWY+Z5t45ZbPTY+obr3ce+1CpuLnV4zyA37tUHfvisHcY+IAomCpd/Tqo62x+PSI56BsYzQeP58LP5po572d9OXBjCEOFce8AN5zl7A45dxY/PjIv7FIO+mnvs+JnPUhPufatJbqzL0Y16ofBd7VOa5PPjfFqG9lvkyuytah9sqMrKWnCEzTNE3TNE33p7YDeJqmaZqmaZqmaZp+h3rnO995ASUgFvAJ7AC0wVPwxjH40zt6wRewGDAFW8ArBtICnGCVdjDVu0nbRdwjn3uHb48qBpUAUnViqAOjANoT/soBJKbzcc4MEJZXMYvLtHcMWtrxKO9zfCZ+Vt3ZNx8x7Mg0Z5AbkALWrE/1SvGDysbR9xQgDLjZcamP42KB0kEvRqBm9Wes06dj7UEypfU5dwFflxmd/VwrY5L26t0Xynb51gfkY87PnaZUm77m+EZ9tVuzdqFqM3ZxlfVP6vXT5jsh637uYi1+Pq5Nu4Az9Vk/LHDv2ulK+rp37EAFhfURp3kUQ3s7b8UAX8tb6d7Qx/3kuuvHr8coF6exlO5D/ayLRy+DusZuHLmJx8e6+zGG+8ku/mLwcb+K39q1rtqVfOSkH7/yaRzWZw5c1XYtccwn9Xki/brPrnXWfyuf8uwer75STkrtfkDSfzOmabo/tR3A0zRNt1cDwNM0TdM0TdM0TdP0OxD4+8wzz1xgDcgHiAErwBIDcgFbMIy0gcIAE3gJrABS6vUHagNz9QekwGCxAV1QBrwV17ge1QwOiSO2OtAYwOGnTnt1xpaPOgYcg0CBYxa0tcsTCAPM2p0MCsotiBWcUtYvcw729mjof/tv/+29x+MCjHzImAFHseXcXMQIRrGOtZuzfo7VBdOU1g58tH6AnGMCZIFccbJrVS8OWBbE833J2Y+Byn4AcC3+TN/rdnVBOGBZXL6kvjHdC9ak8etXe4A4qQ9EWpvmSnI9+7MAr3o626yr+KSv9eUnZ+1gPYh7E0S23nIHS59//vlvWgv5aQdVXX/3g53B5VCMoKt7wDWUj/vQfMxNOxOrOICq/lnAlZ/7iOTx0EMP3Rvv9AF8jaMNwG1u+chLacygNolZG3M/ut9bv3Pu/MzDsbquAbVO1lsp5+qsQ/dcnwO6Lk8Vq+M+I8z4p/LNn6/xzfUzn/nMpW6apvtTA8DTNE23VwPA0zRN0zRN0zRN0/Qm9cQTT9x59NFHL3AW7AGxwMsezQwkAVeBTO0gE6gUAFMHOgFaHtvLDxACZQEYcYFhccG8ACmAC870Dt92G4sB4LS7l6kTS139ADN14KOx261ZG1hMAJbY5hKYPf3EOEuPZTbnDGQ686vkXx/xGr/608znPK+POI0duOpYm/maW+OBbcEv33sk/qk4p/QDPPUDNV0v18U17LsTsDnIWwz+jRlIdEwgYKD1Jgiojo8yyOuY6qOs7bTittv8nGvt+rN28NKZMzM392j9tTdu+bsn5F4M7a65fkprD/qbfz4nKAVC+YrFR3+qnVlv95J7p3fmkjj6MqC5OOXDTh/3gDXRfpOP68mM1bo1VvNi/M3r3OFLzUspZ3A8IH2OlQVX/Tegdjqvg3GcJ/c9ue+rP/udZXJ+3aaUq/U6dbaLaxzrAQDLeZqm+1MDwNM0TbdXewfwNE3TNE3TNE3TNL0JPf744/fgL3AVaASp2L/4F//iUoJowBM4COCCQqALA4f0A9EALO9ABeQA23bKglDiALKB0iAueAtk6Q9Agk7GkQcznrhiVCcGqGPXcDBMfPUMUNYHdFZvrIBzkLYYv/zLv3zJy65kYxkfFDOeNRG7R0+z+rEA8nX9TZavdWTVd3zWmaN8meP6AFr5sFdfffUe7PKIW/M2FwD7VP7mDgpal3agqi83Jl5WXe12QVNjlpe28kvXfc+c6pO5BqlxW1PmOJ3tld6tfMY0z+4Vpq5cWXGLrV3crLhZfgFOPmeMTL98Ao+NXe4+M7VT8U/zOTh9qherY/cq5VPOjemYzxnn7M/4WLvmLc/yUXZcez7qxPT4acc+a3bZ56MtYKz03wty3n831CtJn7Ms31Q9dSwP8+nzcfrU/4TW/pviEdzTNE3TNE3T/aftAJ6maZqmaZqmaZqm71AAoN004Ih3jtohB/jYzQfsgDrgClhoZyF4os2OO+fAD5gIOuoH9joHS4Fjjyp2DmL2WGYw2G5P5+djoEFLeTgO0IKCANFZx0AfsJP/NfgFk9Xb6ev8GmaBkACvHZu9u1V/OYKhJ+hlxpJj1rnybAOfKrOzjdFZf7a/UclOqSd5a5N7u3itkfqgV7t1k76txRnXsT5nv+txa2fa+TFqTHa9g1id9oCf65Wqr63HDJNcz7juJ3P0PU/ttTG+YqvP1BffjwTce3LjS9rbxeq7I+1i1F4bcy+75693uWrr8+B+cE/5nGgvr9qVfMyjXbeZtvy69/g0P+aelT8f5rOZjzmecVifiXP3rmuirXn5jIt1jlWcfMSwNn7UwVce/vvgfjv9tLU+ZywyrjjdV+bXdbIm1VdS+Va+kbQbW5lv/s6N4Vy+fjACeE/TdH9qO4CnaZpurwaAp2mapmmapmmapuk71Kc+9akLuAG9QJggD6gIvoKoQA34xQ8cVQJc+YJBoA9fIJIvP21AK6inDXwB8fw73bH3BgOJxgJ/HQOw2oAtkEYd+Os8KMYXzKkvqMQ3iBjAVV8fOxPlDbwFepn+2otvDa4NPAKNwfKAFbMT1lzVB5wya2euwDmpqzytWB2fbTeper5ykzdzfPYJvnnMczCWTp/zGCSjgJ1+gdZUTAayn3M725hrc0rdCXkDdbU5Vron5HvOU1t9XTP3qnbmurvn3Gc95rn++jYuAypde7mJWd7ui7P9+lHU2pl2a31CYqavcQOjSnHFUV77uC9BV/WNxY9pN5Z246mTk3OfOZ8F9ZlYfnRxjlUc7cVRb97ayPl1HOt4zqt1UVpfYNvn2HU3Homnf77iXMPvVPt5bd235LPXNU03nb+Rznw7p+s+xvO5/PznP3+3Zpqm+00DwNM0TbdXA8DTNE3TNE3TNE3T9G0Ekn3oQx+6QCHQBLQBbB2DO8AoWAp4ATV2B9utC+DoCwB6ZHQ7MkEq8A4kAkRBWLuC+YB2HkMLvgBIdu6CQ+3qbceoPs6NwV9cdXJQ533Ecjl3A8uJ5F4dC/za7WdOwFZgOT/5MDGBImXmXP7mA/I6B44ANXWBPDJvgAxAC94Fnqyv44zA6GBy9Y4BZWNYCwArf8rv9K9M+gS+OmblUP1ZnuITtFOeALi2jM/ZXh9rrbTe6Yyr3XrVfuaVFdfcWGPqa51d1wCna0na9HWvme8p1yQ4aBfwQw89dLflG1Kfj/bezcuu27t/roGrXMpJfu6RdtTm0/uD+TCfnXNnrnFqE8vc9THeI488ci8neTSWe/PcTVysYvhc8JePH2mk4vA758W/cYpTLNcK4LY+6TqOe1/duT7sPPa5UVIA2Fw7/lbqfknFUZbrKf5Me58V6/W5z33uP/Odpun+0ADwNE3T7dUA8DRN0zRN0zRN0zR9C/l38gc/+MELqAIb29UHtgKQQBEgBPbwAZcAHef8etwwSAxogbX6glViOwbqxObT+30deyewGCAuwArKgHZB2a9//esXWKPdOQOMCZwKBjNx5VVfJl+lOCS2PuqzwG8mh8zO3UCSuPmIIVa51s9Y1q8dzupPSGUnbXAYRAsOqwcbrZm1tLbiAWUnwAxgMe3VVZ4AjE5/daxdwPncpLNPsO58lHPSVrs8lcWtjdVXTHnzATi1Kd0PjcmKyeTbrur6sq5D75yl1kxMZk3BzuZbbHBSu9K1OOF1fbWxdvgm/d0P2gBX1/F8XDJp46Ot+0ye6lx/x+6R4ijNod2yrHkY3/3vvpC/XJh2fny0iVEcj273WdBuftZP/TkWH/NuvNqYHOV+5pOPej7mZP1Ba3lpV/I5zfrzOeMwkrv/Dpz9u6/l6JzyP/1qo7M9qbMuymJeyz3kc/rSSy/tu8Jpuk81ADxN03R7NQA8TdM0TdM0TdM0Td9Cdv6CIME9UIZASIBTPdgTuAKOAKl2BQfA1NnN2S5F5wAvKCMOf0DZv8vtxAVlQEOA2G5eUMvO4aCu3bzqtDtnPRqan3N5eRyznI1X36CbuEAZ2OZcG+hTuxyCtyywZBz9yofP2a+6+tR++p6x+Wi7hsYknjnKXz1/Aq7kELBVn4lHlcn5G5l4xTyhMuWTrs+Dja5dbayY1r9d2kmfjM85Zn1qb45iNn/iR417zr/x9RX77Cum+My1dx9pP9vASaVrfFNu+bg2YGpttQdCXVeQ1Rg3tYP8Pid81L/jHe+4FyvgytybPUadn8+ZH0eIf8aigGrrdx2n+xQAt1ufzCUfn2VzDhJT+bQuxi2ffLTJS1/H5XOun3y0ZfLx343yPcfr2FjkXN43XWu6LvnXzopH5WEu+af8ul9eeeWVy3+rpmm6/zQAPE3TdHs1ADxN0zRN0zRN0zRNb6DnnnvuAqSAkieeeOKyoxGMAc2AGwImgRKgjPiqA94AIrsLQSJwt12F+gOC2gBPvuKJYTcu2KceXAONASSAOHgV1Dvr7PAVW50c1bWDOCDM7BCWIwB09q8M0AZnwWjQGvAG6vSVl/b8+V33q3wjO/3LrbaAVG3KE2QloIq1A5bqf/qe54xP9ed4xevaUH3oOjarD9nJWy58rTHApmSum3vIfWAtAUKw0fcwdoJqC4jy0cbcC+4Dbe4pvvXVZlx92yGtr3Z12gO85S8n5lqCoO5xc2hO6muX2/kY4/prq11O7sfWIrgImIL2fM7HNyu1ydVnxXyCxMUh44C7YpmLcUBZ8Xo0tbYAMHOsH7/W3Xlt+lofn4nHH3/8EoPKlzluZ245s+bNx31pbj7bfF0L/j4bfBrTHD0loBjnWErXSb045LgxxWCuYeoaKn3+yHlyrG91123neTnQ2Vaenbu/fvEXf/FyPE3T/aUB4GmapturAeBpmqZpmqZpmqZpukGPPfbYnXe9610XQAJKea+oHbPgLPjkGIABfvxbGnwDVHpks12yfAEeoDXQBebw1QcsAobBKjAJeAz+gi4gD1gM6HrccmDnBLr68DvrzsdF66tOrj1e2ljOTwNZ9WHeuxsckiN/8cUK2vK7Lk8jpXyv284YLNCk7Lj6+lPn+gap6Bq85kfXfTu/ricxM9dHvbUAJwHwdmhfx7qpn53X9XMPuC9cD+Z+0Lc+jsFDENB6u161iSUOCKuva6GuNvcSwOge0te1rC946Dse9925PuoDo64xsOq+o/LSro2P8X0GUm2Zcc/dsvp0/wC37psAr76OrYfrGLx1LI/eKVwcMfgAqfIX793vfvc9SMr0L478r+GtUns+1vccK59yZvnIOTUv5nOv3efCDl7z77rkw1+c1k9bcRqHn7l5nLQcGH/3jLjWTj/rI37XUNlnyDG9UXmTxGw+xDd/bR2Lz+cf/aN/dDmfpun+0gDwNE3T7dUA8DRN0zRN0zRN0zRd6dlnn73z5JNPXh4PC4aAVWAJsATUKO0iBOPAYHDHzt3+TR3EA3aANxAXfAXrQBugjz/IAhwBtkAP2AQO2ika0AOS+YkDNhnzK1/5yt1M71zqQD+AWI7UDmWmDYwEm06Ie4LfzG5fACpwrL0Yp18GDlVe23X99XnAKdB0nrP8zrb6VZLrwwKcZC2AV7tmXZPrfjcdkzhkzuBeayb3JCZYesaonzXTD2jsuvBRAoZydJ7pK7Z2YFNc9edaFSOwXPt57J47Aa/SvcBcT7mcOZO5aW+OrRN7/fXX74Fr92O7hEkMCnAq+binuxa1Z+5Za2Ndzx3Hxg7KGst953Hp2oKh2n2OlNZCjHzyO+PwYT1WmRGfgC7f6zitGb/yOUEy49ecxLBWAXg6x+Kj7AciJyAXqzh85Ou/J64lX9fj9DeOtuS8a9m9kBzXj862lI88zDGJ1zzPsdX575AfQEzTdH9pAHiapun2agB4mqZpmqZpmqZpmg7ZXWhnYBCm3Y2grF28YBfoYpfeH/gDf+Dy72hgiR+YA1YBsECXGHYKg18gsTaQSxtAxc/uVXDOMQBLxvOOX+MAVuKIAfyCRWLJA8QK2qrLTz3TH7iRY1BYf/UZwONR0XIPXKsPpF37Zuf5dRsDYQGjE3oCTwGpazAFOoJu5nH6XfftWElAlfEBS9fBGptDOncHs+CmaxAUPeNav9a2NvGVyb3g2uujzRoDaeJWX9vZr/VQZ21du3Z7urdcJ31ad/cAwAgiAo4B4nJN1kBfdbW7Jxio6XueE+BqD3byEV89CGkca6BeOzBtPaytMYzFV1sAU3zzd9/qx4e/uYrlmrgO7k8764OLAWTtSmPrf8bhY43U8ZGreufKfPQVx1o6zkebNfR5A6K1yVkc8vnia07l09zcAyckJm1iaHedjKfNfftGceQFJF/H0cassRyff/75ez6nr3jdj9Q11i4Hx6f4aau+OPVPzltzOuPoo72xPvOZz1zu+2ma7i8NAE/TNN1eDQBP0zRN0zRN0zRN013Z8Wv3LxD66KOPXnbmgiMgEAgFGgU0wSzwJjAMBAE9AKDdkAAPoAb8gL4gK9ADQrXTt/f7grOOe19vj5p2zB/8AQP9mz04KR5A02OD82OgUDuH5aYtqBjQ8WhjObKzXXn6dixWVgz1r7322mVdmPlaEzBLOwU8qf4grO8eTmhrHNDpeoctCw6fALT8zLf5n3lTEExf109u/Gp37a7H8o5kc7HWzsVjp4xpjuAZc94cu476yEcMoNdcxdTPfFv3+hlTHyA1oOncHNyD7pHgePlrE9s1vn4Mtj7iGEdpnZsrU6e/+0heAKl8U/cGc8+xICedsa2rdve5e+D8DJgr42Nu525ZOarnp5S3PH1m1FkzeYl9+ol3PuK5HJi1ULre8nEt5EL1z4rTnJT5KMHr4LM25rq4n6xZvuZ1PrpaXm8UJ5WzeErXWp3/NjSvaxOnY/5dy+4zx1RJ+XRcyVp/lrp3yDjO3d/+++NzOE3T/aUB4GmapturAeBpmqZpmqZpmqZp+m3597AdeAFWMBjoAd4AYDtaATsQjYAbcKf3rwJxYgCA/AJu7fgFxsAsMMrjoI2hHwBHwJWxgbxz1y8/QLg6fUlc7cbiE/zkC9qIp119gKj30gI7gWF9vpXpxwCjV1999TIXYA48swbgK7NO4pVHAlmBb+NaLwDUPE//4suLjGHtgTVmftrFAvQc61N+5hWYp+r1KzdrUT01VrsvjeOa8RG/HcLnOErrC/aZQ3Pln/GzPs1XzK6POsCx66Ivlau24jIC45jvagDCE5o7Jm2up/tPDuKan/H1Yebj3Lrqb771tz7Wtt2wVH8mJ/3d6+rzqd190Jj5yEPefU5ARvO2BkBykFFb95J85MJcF5/B4gQqmbXyeWqHr/4AuusljnM5+Lycc+J7HUc++bByZWKK5bHv/KyzR77LLz9l165xSH3WvWxtmvf1WK6363L9eOvT13Wg814jfR2ru1Z1Z9t5bE3klsRqXBKXyeFzn/vcJddpmu4fDQBP0zTdXg0AT9M0TdM0TdM0TdNv62Mf+9gF7gAt/m0MegBWzPlv/uZvXkAPeAU2eocv4AQeBTXBInBQqQ9YG9gBLoEj/+bWD5i0w9gxeOeYb2DWWL0nWJ3HPwMzALL2ctMHnA7aAMP6qgeXwR2wTE6AGSimDbB7IzN3pd29+hZDXzGMETwWiwWK6quUhzk3J3V81Z9+zDyV1lF8Y/HjcwIroBJQa04gnX7tHC5WebFis86VroHcjGkty4cAL9cKlDQOUAYC6qfe9eWf6WeOrrn7SOyuQ+3Gdf90Xi6ZR0SbQzHVdV35goNybV1IO5OTxzwXS3ttzvU586qveekbLE1BR9bOdfex/JP+1sb9IXfr005YRkFOpXHFcQ9rb3ztwC0LbvsxBhUnn2CpOOIFpt2XwV/mHqHA7Bkn4+M66psP01berpMxvBOcr7U8fVhjvRFsbu6uH93k4z5TWpPycZyvsuuoTN1X1Z9tdH2e1ItpTOtW/EpjK8XvXvz5n//5y+dumqb7RwPA05uV/0f7m4D5m8H/B/y/Ypqm+08DwNM0TdM0TdM0TdOt14//+I9fQCBABnYATHYfAj+O28EHJgF0YA1gCDTpB6C0ixSYAoKAQLAEsHOuXjxwR51/ewPJvlwDL/17PLjXLt5gb7uLjRX45aeeQLnAnkc7A0ugky/v9A9wmov53WQe7yp/AE7eYJ5ciytOMeT2rYwP4y/22eYcTAouZfyNw5xrr2++crAGoLI5FYuAK2tkHnIPWltncBKAF0Mfdo5VnRxIad7Gcg2Mpa5cjOWatbM5kOceECeIq48y6RfkBe2ttTzdT2IB3+agj0dRa2fiAt3GCHIXF6gLpIrV2H4wIDf9xTeXE/BS90n3SmA2CFgb6z4KcjLjyilzva25OVAxtPmMmKPPjM9ReZO2TO585HaOJU4+rqk5ycmj2sXho4++5cNXm89HcfioLy9rVj75nP1ba5CZtJ/zysQ5d0gzPpm1cS1O2GxMbcYRwzjlo73xMuOaMzlm+ijl2f1J+VPlTeq+abzr/mK6j+X68ssvX55QME3T/aMB4OnN6o/8kT9y54Mf/OCd9773vfdei+Jvkmma7j8NAE/TNE3TNE3TNE23VgDshz/84QtQBAiBXlDQTkogC7ABvUAQgAyoAXDAGuCE6avOl2PgG2gFdAF26vUDC0EUgM1OYtDGMUgHtAR6wRXQpR22oBH/QLFzZmevnEA9fnL2zl+xjKnOeNcWzAn2miNzrF+guFKemb7Z9fm3qw8kfTvTP9+Olay5n2M4Jn7mDP615nyLRb7vcAy8mi9zjYLDYtlx7dq6zqCr70e0WbN2U6u3Zna6AotdO7GNRydwdl3cR/ro6/24xuja6QOukXZrDwo2B9c0OOlec3+J27zUuw/5GNMa1J9PbX58oO2EwOoDlOYqz6Ar6StXJi/zOiEnBTCV8nWfnjGMoT3j048L+Gjv/pMvHyUfa6ydn1zkz1fpesmXDxWr66d03fifOTdn7eVsrDPn2pn/Jii1tXaOW5t8xfGo6MZxXfloZ+LIK7jbvNwX4sqVn8+yR8Vrz9TnR2J3r6Xzc5HyURqLznYS33rKtbb8nTvuc+a/G4MA03R/aQB4ejPyt8kf/+N//M7TTz99+SGkvyv8P+4zn/nMXY9pmu4nDQBP0zRN0zRN0zRNt1Y/8iM/cvmyC8gD5vx7GMQBYTySGaAL7gElvgTjD8iAJv4NDdbqF0gC3sAswAZ0A86AYWAMzK2PXb7GdGwsoCWYGBgm0E9sOemvXh/n5alOXs6BpKzz4BAYCfQ0Dgv2nv3432RiVGZ2IZurNWh3qrkARdUD4Oaqzft6gTs7ONsNe9YzfXrfb2OoB2ytKSBeTqAtsBZcVdfc5cEcB7lANPMObLku+p9raEwxXEvX3LV3HVof6yUX4zWWtXX9gT71xhOzfsXULn/nzaFcWHOuXYxAoHUR15rpw0c7iW1Njd28yZwDkf1YQZ/iqs+u20m9NTgfA82H+IgdcG2Hb8BVW58bxs/aiWOt+4zYeZ8Ps0Zy8chk/c844ivNl527d+mMk8/5Xt1rn/LxmciHNW85u74nSKbTRylnOiFxOednrP4b4Vq51o5rZ+LoJx++rrn/JvnMl5s5ketfqY/689qfx28k8VpTuu7vHiyuz/Ev/MIvXNqmabo/NAA8vRk99thjl1ei+H+Z/2f5/5P///3Df/gP73pM03Q/aQB4mqZpmqZpmqZpupX64R/+4XuwDNgB1cAoX3SpBx37AgyAsQOUgBn/dgZNwBpgRp92EQJX4CW4pR9/cBBI0e9Xf/VXL8AmEAkEA9D6aZOPc2AIeAoGAzzq5APIgMHq9HN+WtDGOCxIXEztyus+mT7nOfNoaXNqByww5otBADlQpM1aqDdW4wcO1QN/xtNHqR58sh7FYsCsNQb/xBJffRBNHznwK1/XD1QT1/zA4b68dC1aO3MptrnYcameWW9tYpuPNrFrMzZ4B+KWuzHElodSP+Od6yj3AB6QdkJsbaTU5j6przp5ZtZCu3rt4pG5uhfdO6S9vvLrmrhv+NRPfT7W0v3qvipX85Ora+jYNXKt+OSnPuNnTfPRLrb11O662OkuF2C2fOXQumfqwObGEkcMba6Hz5/4J7Q+89Gu1PfMp1gMyBfP9aJ83DP6uJ7l7dqfayc/bcWS//XjpPNhrlf9QO/iNK9K41hHOV0DeX3dG8z1Jcfa+2w7J3V86kv1SWd+dLY3jrhi+G/gdoFN0/2lAeDpzegP/+E/fOe55567/K3m/0P+n8L8P8L/s/394e+IaZruDw0AT9M0TdM0TdM0TbdO4C9QAxYG0XyxBSD6wgv08wXX/5+9e+3Z9arqv78SXsX/fgDF7velN4W2UKBgIwU0gJsQ/oZdkMTwwFdg5JHvQ7YqhoABNRiVREvZhdqWlhbwgZIoD3wRt59DviuD875aWmCtXmt1/JKZeZxzjt3cHMeY1xjncV6SL5Jf/k6WTJKQkQDypmMJGwlYiSJJG/wSWyUOBdC85YqvBK9Es0SdpLDkir/DJZslXsiRYJNwUSSS0UvMsUubhDF76YmuAJ1ClgQle9gnGYW2BGcFrbd02cpG9rJHu59JLtkryUmGxCA7yMrGKU+bfgmz7Jrt7E+vGp15iKf2+UavedNWQUc/Wa614TOX7LU2JZrNEdn6jUu/pJ6xmHP85lwxV/rAmtWHtz585DU2bfbMtEOhFy8dJdBqZ6e1oaM3n/WBfmunL15QS9LRb1z2os+tMTskMfHrk4TEH9DiU9qfvcEL+NAofQHCPjSP9rmfgMTbfkdD70yoaqfXmNBZ8xLJFWtKpvGitb6+eJEMNV50Cjr05jgatllj849W0dbPKlfYkxxr01rpA/Iak2IN6TL37PJccL/Wj9Ycm1fjnnKiUdhln5W0Rmde9ZkzYzJ/+pNjr6DJXkV78+OarORV6MIbXJ+2qyt9PgvdM5M+WjabQ7I9ex5//PGDdrFYXBnYBPDixeAP//APD3/pfMY3Ovfw55LC/j2KM/Gjjz76U+rFYnHesQngxWKxWCwWi8VisVi8rHD77bcfASwJnuuvv/5IRglwSdxIsgh4SQBJGkmKSt5I0Eg6SpQIhOHR729oQTJvberTJsGmXRJMYlWiR5tkLr3oXZNNr6SgpBZ7SiZKwLIF7Uy2/uQnPznaSgZrU5MhQSN5RJdE1pSneHtVUkkiiv2NSYJRvz789OIlh66SnsmREFLwxZsN/SxyNOmVrJQcnO0ln/WVgDLX2aTMJCkeCVI81ioe4zBONhiXpJk5yy56yNXnrcvmhNzeEPZ/ZI03Hvok681H8uYc4DXf1r0vESiBnXQ2NjayWVLN/EoCavOlAv1sM3dQEtdeaR7wKEC/hKd1yRZ95CnWruSidjVbjFMxzhLA+pT60BknCACbfzbPfjXdJTnZby+bo/qNocQtm+w54yEbTXqspb2PJjsV827u7D82oHNP2ddk6p9yfFGgsSQnmtaUjfrwAzu7h9G2Nqdv5jZuxbqcvuEbjWI9o8kOthtDY0dH789LfrO7pLX+WcyXfWztS9S6hni11zcx21yTlW5IzpRLHjvcS34CGv1isbgysAngxQvBu971rgtvfOMbL9xzzz3HOcGz33Nf3bmM73cG5jf5dGfVxWJxvrEJ4MVisVgsFovFYrFYvGzw6le/+sJdd911JKsknvwUrWBWCT7JGYkVbz5KjAiCSYRo91mSRFIKr0SPRJ6AmASXBJ+/qfVL1AqWScR5a07wjB6JQXCtTR8eSTv6FT+zKqko2YVGWz8DTXbJTqX/HVx7fRI2iqSshBG7jaXEcHLRuMav1K7QKaE1E7qKn0/WLrFlDtilJsuYJTLNhzmjl01sNKfmCK25ZIdiLtiW/WSgJ5csPBJOxoFOiWeOQ8F3ylOiFKZtinXHYx1L1uJjY3YLcpZUS0dgoxhKc4GfHDZJArJz6kePX7+ks/E3Zn0l+KyFfQGSbuQaU0nR3gwnSzGf+JpXeskL2hsze/DPN1n1G7fEpLF6+92bPvor8aMz78ZkTdFNGdGw1zjN7TXXXHPQ6LMn9ON1H0kkp8MY9KFRzCk5N9xww8GPBqJRW1NzkBwy1Prsd7X9qh0NO62XfW3u6VDYSVfjhuxpTMaDxtyFOW4FjX5rJemczlMaSWvyFUiHwl6f8ZVoV9hlL+inY+7H7hule3WiPiBnXhu/+ay9PjIr9D/55JMXnn766cO2xWJxZWATwIufB774D/7gDy686U1vOnwLH+CZzwfxJc4NPjtf8El8sjPYY489dvikxWJxfrEJ4MVisVgsFovFYrFYvGzwwAMPHIEtASxBLgEtQSzBLUmvEluSc5Kz3uBFK1lUYhafdkGv3vjV5jNaiR/0AmUSzBJ15M/ErrdL6XFd8lkyV6CNjBKbJYzJj05CkN0gGaiNrFnYbpwlkfFKdk0auvAmswSo5I5kUAnQErclsIyXXna6RpccY5DsK7GZHuP1U8L4soVdam+QSGhJzNJrTiXo1N7GNAbzh755mePwWYLKNdmSnyVe6dZHD5vMYzaU2CKTDjxofNZu7Iq1tm4S3+ZHsox9ijc0k4mPPoUt5q3kcXaQl97eCO+z5Jt1VZNNnvk2Dv0FYyUq6aM3xItePxv1a1fwKfrFfKyb9Zu8JR/ZbX7cA9A8tC/IZjs6X6bAD+mPBvR5yz479NOBJrrelm3ss59e+0DfHE+2Jo+c3gK2RiVf61e0W+fThPSUZZ9kjzJp1PYpXf00c0V/42EzHTfffPPFMSn4K5LWYA3QVPDbh/YLOmM3LrLx9CwxDvT2WXuKDjVoI0Ot6FNP9Jmc7I9u7kk1Wa4lfwX8F4vFlYNNAC9+Hj760Y8e+8QZCPILfFJ+xhkNXCvOERLH3/zmN4/2xWJxPrEJ4MVisVgsFovFYrFYXPWQaPN/fyVOJDokaCVfvKlbUkXSTTJRwkWCSxJJoke/v5P1oRH0kkxSyCUTffzkCqJJHOJFj9/f2/RIknYt4SRJLMki8eMzGeySdNFW0lMSFZ2AW4lXgbneyBWck+SDEpn4FHTKj370oyNxx1aBPUE+SSZjYLda8i8+NmpnI1pJqRJsxjTl0I/HuCWr0LHHfLMFvVIyWaG7ZCabp27jR9+bmeQo6Ta3JWXTwx59JWzJpyebtZMr6Zw8stk733Q2z2DOzAEZbDTv0egz7yV5S5QJjEqWKeatJK+5pwsPG821fYAeX0gWvoCmJK7CDutSX/3Wp357hw0hXjTGVH9Fe8V6mTN7LeBtzswHGsnSCe32g2KM6vmTyvbJlGEPmxNj0Zcd+uwLdOaDLRLS+qNDo6C3FiVVJeXZjU4/GewAe//OO+88+mCOe9pTshmas/TZmzNpnR466OpeYwckJz3JQt/PZNu39ib9ZERjH1hLe91+QZs8tXWnK/isHZ/rua98DtFV06kk+5QvHX4VwE9ALxaLKwebAF48H37t137twoc+9KHjS1N8B//DF/BB/AJ/4LPzVOczcF5zbnr22WcPHn56sVicP2wCeLFYLBaLxWKxWCwWVzUkg+6///4j4SWRIvEisSuh4u9fQSvJF0kYATD9Ei2SZIJbAluCYIJhikSlv521kSnxiU5wTF9vUOoradtbpxLFeL0NDAJrvT3qLVxBttokg/yPWjq1kYdOuyQyWrYahzFKotGhX4BOApQt7GsM5JKDVmKJPDxqPBKj5qMElCIBFn2ySyCWuNXGRv3Z05hrx2/s+NLdeOIveSbppI0MPPTTaS4Vco1HkhQNWsA7E7Zs05YN5qvxkFGiDOhBp02ii3z0ErYlh8mKp2SZJK/+9JhDe8nc20/ms30Tn/3FbjYmLztK8vX2OOjTznalt3zZfNpPl356z+q3tvrtV/rTp0+S3FjIMI8zEarWjl8x5/aWtY6mfYZOMef9r9v0nPbbA/aMPjaVqI/OZ3Mn6ZoMpT1qHoxPm7duJ6Y91tH1ddddd9CG5KjRsKFkcwVfdGw2bvvYnDU2fZLH9hU5+Jq/5h9NuuwVzx/t5ghc06UYlz3ker6VnD7XZJk/8kNzoaZDbY+F+iDaxlbfabG3gb0SSca9WCyuDGwCePF8+OAHP3j8mgc/kS8Az3klH+Ha2c3ZJZ/Id/vXKn5Bx78cWSwW5w+bAF4sFovFYrFYLBaLxVUNwU8BK8lCiSRBLgEuyTl//0roSDhJiknYSHhIsAh2/fd///eR6BHswuONPclDyTlJIMEwPAJfZEjaqOkREKNTgAy9JIq3Y9USe2ySLJIMZhNaCR96FPr9jU4WGyXcJJDRkilBVyJVnwQy/fjUJWLxGI9gHZt729XPSzeukpTo8bGLfDXZyqSX6CrBKdGsLfmSX9rxSGBrZ09vORq/JKk2yTb2mOeSqJLmk6d2b9CiTz8d2iW5ZsJWwjD92Z0u66etZG4wp2wnA2ZS2X4wh2xHh++HP/zhxSSvdvbgVchOPn3Wzme87S2lJKy9UKKtYGt1+zGYF32KtbdegJZMc6MPnX0z+ZPbWuG3t82NNZOExJOOksjpAH3tAfcO3rOSpfoVY++N2sZNdnNl/swBHfaB+UKbHoUONT73Xnoaiz68rrWTpYZ0ZY91YMMck8/0RcNmdpChD+pX09Ve0t/82QfuxWjpnYlbpXb85s89dsstt/z/dJGZPej1GzsaJXrjAzaD9on24dxDrtHNNjrSU7v9CtrIUNj8ne9857B/sVhcGdgE8OL58L73ve/43/f5GzW45o+mz+jLc85L6Fw7R6B75JFHDr7FYnG+sAngxWKxWCwWi8VisVhctbjnnnsu3HDDDUewSkJP8kYAS+JFErLkqqSh4FYJM4kVRZJFcEugq5+GJkcCRGKSLH9DS5RJqEnmSfxKlpTsVSSI8JFPJ/3otEm0oaFX4pEcNAJrkquSQSAhU7uEp4AbG/W7lkBmnzafFfazER9dxupauySYa0kr9uhjBzpJaXLNmzo5bJNUQoOXDnGE2hXXAofGY+7IZbuaLPbRbRza4zHHzUHzQU/JaXNHhnZ8EovkCUyWsNVHHp50ka1Pu0SzeSLfZ3IknNkrkWku6iMLSoDZI9bdmJOnljg1lilTwd/PgM82c6mQYw7tH/uDzeZbUlRhd/sR4m197ZuZfIX61NbUms1+fZXmyz6e+37SkGGt6CVDbS0KElsP6zBp6lf0m1NfKtBfIVu/+TRn5kAiNBlKMhQ05qeEKkxdavNvza1HMpR0oSGHzmimHHRq82Id5tyxS190aOztmbRO16SZcho3G7NH0T7nzxpon+Nq7Ggq+n0JgR1o2ifgmix1e/K0P5DFLvLoBv14Qp99kcX/ATbuxWJxZWATwIvnw+/8zu8c5wD+iq/hE2D6zc4wzjP8Dd/Dl9X+qle96sKnP/3pg2+xWJwvbAJ4sVgsFovFYrFYLBZXJe64447j/4+W3JSIEaiSjJJcE7xSS94JcElySLxJxkiElGiRYBHsIkeySy1hh08RNJNQwuuzRGE0JRTp1uaNWHrok0SRJCopjNb//pW4KRnjrUKJPigYp10QTjJT4kbRhq+xKfSRaezegi2pZCyS2ezqrWFJsZKbxtxbwOwihwylN22TI/llDEpv7pqP2uOhm3zt2uhWJJrxmFdzqA3ffKOXrdrMmf7eKp6JV8XakodPYlQfvvrMD1gXNpKBlj36JWslh+lpPySTvNZIn3b9ykzy0qUtmEv7CZJnrRRghz2arWxs34nNGL84Tbq0twfYb07tn1C70hcT0g/4jRmfLwuYd2ML9GcfPWSULMWrbh8p5oPt7hH98dbvnjCG0zdh6XdPmC90arYYS3qMAS991syetBfqV0dDv+vkzISq9uxxb6r9z8NsgWjoOstm0KfoA3SS1hPkoFGz2ThPE7fWvLGj676gJ5vJyGb7zd7wVrt2953xmXdffMg+8sA+0dZ+0d6erV8JaNOVrICHjGRJ/v7Lv/zLYfdisbgysAngxfPBG8D8a/6WD+cLPPOV/Id2dJ7//J8+UDtHOQ8888wzh4zFYnF+sAngxWKxWCwWi8VisVhcdbj22muPxIsi4SKxJGkiWedNBwlSb7NJvAhagQCXRAc6gS4QBJNckkiTAFTjJdNnSRjXZKglSyWZtEveCpZp+6//+q8jESNohlZCx7VAmlKQrWRvyB6JHvokI0vekM2WmaQlVyJWII+tarKNn1461JI92skuYayUiEwOGcaVHIkreshRS14VLMSrXZGEEwTUzr6SU5JXZJJl3Gwx7/GwC4xHG5SwJW/yiF2Ya/LYwBZ267PO/UR0uvUZY8l4n9NR4ktSlD4y6VKjsS7iJWRXID7zOPu0G6PaPpNYtm7xdd36tqZqfIoxmbsSvPFqV9hnHMYT6NNeabzas8k8VsyluTW2kOzm2xyUmNVnL0h440Vjrt1n+rLdWkRDBh19kcJ9wa5phy9DmIeZACavxCQZ7dlo7Bf3snWccowFTeNNjn41eu1zXiaNYm+zOTmQDYo9gg7Yjcbaoanop8s+xGOvstd9kp5o6GIPu5R0tac9K9rnEvLR+Zx9bM6OCW3ugfbbKfRn7+SftOnw/Pn6179+XC8WiysDmwBePBf4Ygng6Uf5A+AD+Jl8h3ZnjvwZ5If4H1+6/NrXvnbIQLNYLM4HNgG8WCwWi8VisVgsFourCv6u9dPPAlWCW5JOfp5OkkXyxd+8glP6SlR5m7a38CRi+v+9Al6ScJJeajIkXtX4vVmrpktCSi0wVrBMcEzyRcKI7t761UaPpBh5/Qy0hJPPJXwl0ATWJF4E4UrYavf2rnFIDCklcEoMkxEtGb0FbMySSNrYQo42YyKH3eTQryQHLVnq//zP/zx4/N+45LPf27lsMt/Gq00CFY8kuHay0Cv64rnmmmsu8pg/tXHHQ482xTW7zB8bk4VPAWtagph862Md7A+0FTC3dJNnDaxjfWAd+z/AU495UvTZR9p+8IMfHHoV62o9rBtkGx5B0+SyiQ1KAVUFP/vtRTz49Vsj/fayfvwhXjTGVKITH2g3H/aCfnNrzNM2NPoV/SVv6bNO+qNRl8A0x6cJXrrMGf0lL6G+avvJPai/kgx99pCaHnZ4c/rUVsW9OuXAtJUt6Wpe1PWzxX46nTs12eaCLWh97g1pSE6FLrbiZ++pLsWepMc+0U+mvWM/2es+s9c633XXXQeNYo2ryXNP0DdtCdaWrAL5pyCDjnjQVKB2+/HZZ589bFksFlcGNgG8eC7cd999Fx588MGLZwY1ePbzL6HrfAXaScNH8DG+5ORM5tciFovF+cAmgBeLxWKxWCwWi8VicVXh3nvvPRI0EiiCUhJSElQSKf7m9VnSRfJSQlbyC70knaSh5J9EiSCWxIoar4SjxIfP5KklvNSSL64ljcgs0SIhI5nUm78SeRI9dEh8okOjDS8aydWSPgXZJJ30Kd7M1WYsEm30K/hLrJLNHnZKaJkH9pdA1ac92fjZMOWYE0ljySj07BHsU+tnt3k0J+ycSSiJIglD48CDhjxvi+JhFx79eMxDfWgFGdHEY/6Sx272mGfBRrwlt8C1+ZVUMzbXxhlmstZPTRsPmXTbG9pbl2D+FOtXn/8hzSa2aW+PmUM0xq2Y975QAOQbn7nFW4JYu0LPDMaSN/vJ1KewW/98U3X2lyAukanoN3/mzppZq5KpyShBSb6x4pHotOeyXx8aY540ySHD3Op3X5krPzeNrtJ6orXWbLVm+si29+i0pmjJNabbbrvtoPFZnb1qtNbCmKNhrz669NNlDuhKBhr20tfcSRJrR2tv2PfkVMjqJ56T05woZJDJFmsUsqeCRr/94Geu2dUcN8/2lDb7RZ2M9iAdxmV9gD3tp/YOe+sPc37UIR4lGcb/yCOPHPOxWCyuDGwCePFc8P9/b7rppsPP5FfA854/6xryQdNP8A3V/ItzNT9kv/Hni8XipccmgBeLxWKxWCwWi8VicdVA8tf/95QwkfgrKaf2Jqlkl8SXhImEn4SU5Jo3TSWpJOn8ZK8EjLYSiGRItpIjoScB4rPki36JEdcCX/6ulqQpoSPBSa+g2UzmSEJKDJaALJBGfkUyVlDtRz/60RGMk2hjU0lab+8KstFNTsE5SUH87GAnG9CrvYnrLU52s1e7ZGZvrZJXYgovOXQmJx46JJ3U2amgQR8PWuPFR5Z5UaPTjoctbCXPtXHqm3qaj/pAMtd6avuP//iPY53YXzIXnVIAU00vOvPJRnY19+2B+Mw7WYr1su4l5fCRx37z76d+06W2DnRYe3uMjObWNZ2tlf5swK/dPlGbD3sKotFXws6cGIe+SrzdB/rRW19JWutSv3Z2ZQOb9TWX5sS4JYlBPzp8aOy75qFEtEJGc9V89QZwhYxK+1Nx35hP685Geshgv/nq7V0F8OtPjjnBq3/aaxzkm38yySHf+tlH+nxuXtDhn2/51le/duNOj/45bvNijjxrsqex62cHiMX5n8LJURo3WnucHHZ6Dpkje9x9Yazm276jb8LakAlkKNom0hWdz+1l19V43XOeF4vF4srAJoAXz4U/+qM/Os4Inu/5bM9+n3vmz5JfyIf0edbOG//2b/92fMltsVi89NgE8GKxWCwWi8VisVgsrgr4/2MSIhIZElwgGSR5I6glceJvXkEpyWDBLEkvP9nb/zCVhESrJkdyRXBM4kUtIUa2RFS1pJHkliSQNm/f+qxI0qjpIVNgTAKlAFttakkrsnorVbJKLSnDdm8GaysJiZ99EmJk0M1GMpLDvpLgElOuJYgkJckp6UNGiSR1MthkDo3BZ7xda/dZkpQsSSr20imuIIEnCGjO2Iumt3bN/fwJ6xJwBRn7P8YluvTRpZg7QGfM9DYGbUFy3X7Qhse6k8kOiT9tSoFM8+mzpJ5xmK8SzvTis0+MP5mT1/oXAFW0KXjxWZvs126/Kd5wpk+CORgP/fqtlXmY/dpLRJrrErihfoW9dJpHX45gI/14lcZZwjTbrE00+o3f/aMftJtPdGS77g1fNOopw34h4zRZqg+dcehH5wsK6WFLMtAp+Ke9yVGyxZu06Fzbk/YQ2WjJsIb2DL0zuZ0OawLdv9mTnnSiKyHNVjLsZzx40aij0Z8ctintbTZHo46GPe4dusj2P87d8+mrxmOM6tB6J7c9qL0+7Y2ndogvmDNrtMmkxeLKwSaAF2fBm7/eAIbOJfyA6+Dac58P4zv4hvyHWrv+zkTgvMBvOYM5vywWi5cWmwBeLBaLxWKxWCwWi8UVDwkRiV5JIYkyiRLJv96yve666443en2WvFT/5Cc/ORJAAln4JFkkYvFKgkq6STBKHKIrCaaWJCRHUqnkqwSt5IsElFpAjFyJPzzeLD5t89aq5F1J2mxhm0BcSR3XbCphTB8dbFAkUyWGyEJLFnq0SolhwTp66VCSo02tX0LXHEgGGa/EkySlsZa01c9mdclt8gsUmi/8jbWkExo8jYdOY8ZDHtmSWXjI0qd4K9dnctQliCXVmmt9MBNh7CeTLT4XoBT7INfn2vSbD7YX1JzJMjLjq0D6JPb7medsj9dbtOYpHijpZrx0SowG8rTrV/TP5Gu8irGZL3Ndv1q7Oernl++8886LfTD5ra8kdXrjn3NnbtioT2GDvvQ03zPBmwylvWc/6q/ok6DFR4790duylSnHmtgr2atMWxS63KPmuzeJT+X4bG/7OWlAq61+tGSY98YE5mfqMm72WAPjoK/93/z2pjA7yHGtjx5z5r6IZs4PXebEfU1m8yNBPmm61k9W90JobIrr9mfAyxY2hWj1QTLM7eOPP36MebFYnH9sAnhxFt7znvdcuPvuu49nPJ/hGZ8PcN3ZSJ1f4QPyL3xB7bWRhd55XH7pySefPNoXi8VLh00ALxaLxWKxWCwWi8XiioZkyM0333wkISUVJQUlWgSntCmCUxIrEnTejJWskbjRJ0GDT/JELbEh2SVpVo1e8so1PglktNol/gTBJJP00VGf5LFAmlKSFE3JYHTa2StRLbgmAKeNvWpy4lP8/Cob0SmSQ8kgj51KPOahMtvol9A1boVMSSBxgeSgQeuzwB4dzV069JvfdGSvtvhda4vH5wpZ5lmf69rNGagl1kpuS4CRqd3bJdaiRKtCp5+5lkQkM/1ozK8iKSpRX4KY7N5cNn4yKujB+Evy0kOeNv3VbKlPXZ9Sgjc7Z4LXOHoLFeqv6Dce7QFfSUYJ3PkzxWp9FXNurMYW0h+NObC+2aytN2fRmbPe8K2Yu4q1o4eM+umwtyS/G6d9hoY+SU/3Kz4y6LRWkrvJAO3147Efe8N36tJPn/kH6zff3mVD9hqbWh+adKGZ80LeTDYrUw4d6Z9vLk+b1fatsaK1Fs2teUuXOWlcdHje2FN0JMc1XSWK0ZFn70o+028OT5FdYH7IAe3kJGvStd+0tafh+9///nEvLBaL849NAC/Owq//+q9fuOGGG45nv+d7PhS08RGe/Z3Zoqvk7/kwfQEPeu38njP3YrF46bAJ4MVisVgsFovFYrFYXLF4wxvecLzd621fxVtyAlElISVvJAAlRsDfu4JaagkUiUdJFfQCVWrJlJn4lZDRRo4kHh3e3BUAE+Aq8asvvd4G1i95Q5b+mQyWwNKmSPzCafKWPv10lUhT6GF7utToSnRWK8aq+JlriTf8amMlkyz2ncqp+FxyNnneEJaoMj+9GWys+o1Ru/ntrV19P/zhDy8mvSQS8WSjn4ku+YonWep+tlnikY3xmENBRgWfpGBjogcv25KnAD5jsobNoXG2LsA+ewkP+4Ee+0otWTihvSKxjzd5s0/CrARsfRJubFazXz+9s79izqfu+Ga/9WRj/YK5+oy5JHP95NffPeCLBOSYUwllexJNdPYOHWSTEy+71HSU5ERbgnfqaU7dVyWc06G4n7TVpzQW/NV0WMNotNHnnkuO+WxOJp1+9thLxlvyXCEXjX505q6ktX6Y9vST0+Zg6krO1MVecyuhjD4d1WiMwR4wjr54QQ4dan32Cn60xmjtWpdsNs/saF9Xk6ddvzYFX7yNUXv81Qr99rlnymKxOP/YBPDiLDz88MMXXvnKVx7Pd891PoAvArWzEX/h7FV/ZzrF2dAZSz/fgRaic+5Wf+c73zl812KxeGmwCeDFYrFYLBaLxWKxWFxxkGS77777juTS9ddffyRHJEH8j1PXJVzUAlSSrBImAlOCVIpkl58SFsTCK8mj7o1eiUSJFHIkf9BJ7AqKSXTpc01Ob69KjJTokbgls6QN/eTj8UZEiRbJSLzJU8jwFquES/Lxpic6wTW0s/aGsAQR3en3t35ySugmp598loQ1LslZcthsDCVuJcrMp0QTPrbRSUeJowKFruMx1+YDLaj1WSfrwRY8IGlHH5tnIksyt6QsfvK14zWukufaAhpzjqc5aZySa9qVxjphvqCkV3tGsZfsqZJh3qImkw3GQ3Z9k08xdwENenOqWBf2BvbXp1j/EoxAnjnSpz7tV2tXJCmtzfxpZYjXfNv31t/9lJ31V+iwlnRbW+MW5K2fnBK4JTnRkmO98YA51zftwJs+tPMtYHTRKPaAtaTb/jM+yW33UrYkxzMCf3LM+ZSjpOvUHjLMm73VuCv123/oje30DelsUICdt9xyy8/YEw154nH2fG9lTTn60dlX+NBJXOs3JgWdL2Co7UdAa6+Fxjjb482mUL9214prdj722GNH32KxON/YBPDiLPgJ6M5Vnu38gOe/4rpzUX5A3flLzf86K/GNzoTOD5AMcK7hM5566qnj82KxuPzYBPBisVgsFucQ/LEgkgBigbLFYrFYLBb/C2/9vvGNbzyCTxJVAkwScpIwEiwlKvrpWslbiZMSNQJTanzeQsQjwcP/+j/B2tDjx4NOYlfCRYBLoEufBIvkUG/2CpYVAJOsERQjMzoBM/LRsptOMrWXiJXMKglDr/ZZ0CoSaN7AkwRjq6Spa0E74y1ZrNAdb/9z+JSeHexHo18yCMhRSrCioxud5LlxSrzRoV3p/wRrx5tufejIak70mQ812hLtydMH5kPC0BxJcJFvrnrjk+3ph2Qao7FIDKYLrJO1ia9AJz3mX5K3N3n10SvZKV5ir5WkNldoSorZL+zUppBnjpXkliBWtLNPzSZj0g/126toZj+5oG/2SwhC/drrt3b4TxOv9o+izzqVVFSiyU6JePzuGXTmUJ+5IMN8kF2SUzEOvGgUvGh7GzldaKKzVuyxH/ShaxwKO4E9kqWNScEfXXJmYryxKNZUTUc0rieNhDZ7JZIhW+1Da+GabXT18834ydKHl01sLjlO1xyXfnQKHu0z4azNXraP0dj32umjy2frb8+6f+jqniar/aaAtkqfW+fZFn1IhnvV/3Y0B4vF4nxjE8CLs/D+97//8DOdf/iU0LXnfb4AHd/ic+fnzoj8jS8t6lPid9bT9+ijjx6+a7FYXH5sAnixWCwWi3MA/7dQIPmee+658JrXvOYo3r4QPLvtttsu3H777UdAp58rdPh26BbQWiwWi8Xi5YJXv/rVFx544IELN95445F44Bf5S0lNyQsJKYlDRbBKkkXASVJO8kQtkVLixOfeLOVfvQHJt0p04v3xj398+FzBK4k7tYQPH4xGMlcwzE8GF0AT7CoghkZSSIKYXolGOsnRjp/dbJMAIpsdJUbJrviZZGORYGMjPvTk0VPSFB893ngt0VtC17jNGduq0aeLXexPVm1kSTBJlpU0Zq8+usQQrAf72EWudgmp+UYvXjK0S6KaM3TZoBRg1N6cWkv2kF9yG412OP2pZ+tGF5usbbLU8YD1dv7Sps/8eDNbgtLcNncl77JNIjBb8Solz8yrLwuwJ5Cr6FeLuwSf8U7+mUCuX5/a2tibIMiqzdrqL0k4k6a1K9bL3PcWcPboM7fRlDANaEpQmg/z42yaDkWfgs6aAxn106VPocd69mZuJTvRmF/72v6OPxo2WC96zMfp/+dNjrlBY77NCWRzNApdZE570DW3avbaD+bH+Ol0r06akrva8bNZIZv+aH3O5uiMWTteNNbRMwqPfWw/sbM5VNCwCT/bzZXrZJJjfhqT/aQO6JTa6MI3aeD0MznuA/e959JisTjf2ATw4ix89KMfPXybswxfAJ73Pqv5g84hnZ/U4GzFx/NF6J3LnKl8xjvPL/yaLwz95Cc/OXgXi8XlxSaAF4vFYrF4CXDNNdccbyu8/vWvv/Dggw9euPbaay/ceuutR3BHoMYBWnBOLdjtYK5dEdzujQuHbt+0XCwWi8Xiagd/6Y1BRVCKnxRgElyStJQckZiRYJS8A0EnSRWJEckSwSfXfKifcuZf+V5/A7uWaJF0kcAjzxvGZHvTVbDLNTpBLgkQSWE1eRJ3+iSjBb1KhrFVQk8/mf4Xrn62s4WvVyS7yFLIYK9EnDoZnRHUdEUvaSkRxH705Bs7OjZnG/0SWP1kseSnwJ2AntL/4pXIolsCyRjwK8kkgyyF3dqNpTGQiQaPxFc82sHYzR39dFojxfqVkEdTEtuY2FUyV5my6GQvW9lsrNphJofxaRecVHyW2DZ3+OhAR56zVuPBkzyQ8LfmtSfPXrSXjNsecU224lryVd9MruKzvkoJu9N+7Qr5bDKn6Yx/9luP+grA6rcGrs1Vb8zOfsXcTR1Avrltfsmx18xBNGRMGmtmDrND0T5llGhmg/7GodiD7qGSsuTTac/Yd9GRp5+c9JCnr3ElR7sCk98ak+l+j1+tz9p5nhgPsNHPzOuHOXcKOs8Q19lrTtOlpgsNPWyzB0vw6ifTPnTvuifYni70SvaYx5tuuumwS9FnH9tvZLLT+NRK+3ju2+YE6Jh7XT9Mvu4lfY8//vhh82KxOL/YBPDiFPyCBHBnKc/zeUbKL+iffqDyqle96vA//A3f49yg5s/U8aj5QD5p/23AYvHSYBPAi8VisVj8EhC8kcz1LX7BIYFJQTeHYgE8QUsQuJHsveOOOy7ce++9x89W4tMuESxYI9HrgFygtoCUwKV2QSE0+h24BYAFdl75ylceh2r0BYMWi8VisbhawNdK/kowKpI0EmmCU3wpXykIVRALJLcEoPhTvpjvlGySkJK8Qy9IxX/yrxI1Baj08el4tUnk8e8SxiBARp63TPnh6LzNCxJR2pwD+Gp9bJSkBTwlRBXJZbaxgS/n80umJoONZEiWlugt4Wmc0dGrbk7wleyNXtFnvsyjPueQ5svYnGXUbM9OhdzKabt5UVzTq11dskhpjbTTRaf5oqegomu26mMDWu1g7awFOcmmx/jxaUtXPCDOgVbRrj++1mjaqNBlH6CPz9pUStLWbz8WMJX0s4bGyCb0xtMc+4JByVP8+CTR9FsTe1Z/0F5i0LnQ/NjjQbuCxh4+fSPWfrHvklOCl95ssw/MrX42lLyNJvlKc1eSuLHXH429ys5knPbTc9b/zLVHza+a7ea3M/apHGtmvp7rLWCFLjTZa7x0sY8eNJ4H+qyb+TWPbDNX7g+2aLMuJc8hmyvsQUdHP3HtunsvGglusFdOx+U87zMavxLkuvlnJ3pzQh7b9DWf5skem+trf3c90b7Vrm6Ph9PP6EG7MZArqWTvLBaL84tNAC9O4ezzkY985PAfPec90z3f+RZ+y3PemQ7QAT+g8E3onEn4Ib7SmclZma/Kp+Rn+M4vfelLh4zFYnF5sQngxWKxWCx+ATjcvuc977lw//33H39QCUBr8ybva1/72iN4JDEr0evN3te97nXHTzkLxgioCWYLaKEriOZNmIBGoMpB2kHcoVxbP6vjbQC8IKAnECWZTI4DON7FYrFYLK50+NKUL0/5mWe+TXJDskESWJJDMsRnvhAKYvG3/KUAFz41ulnj5WcFtyRO+GP+VJ/2/uevRKAgWElhCT1vEvvbmd/tJ+3oxMtvKyVCJYYFwPRrJ9ObrYJrChn0Vcc33+o1BgE0vp8ONXry0JKJT2GrABx6fCV02R49Grq0J1N79kXDBskodjh7mAsJK/8Pl2xFkkq7NTDO3uhldwkzshTJaOcUCTZnofRkN/3Nf+0CiBPkFYCMxnzS73MF7AfF+alk7qRRklWJDyQH8RTE1G5elWRC+tCgNVczQQutt71mLepPrj4BVsXalmAG+gRU6zdH+vHRN3kVc0xG94p1ro8cNkiYorN+rtlU0BeNNaKDbtCuX591xVvytnnRV7GPrONMREZDjj72TD0+dz+Tb+9pN0fWnQxoHOS4j9W9lVtpHNlib9nr5NLh3Gwek6PY5+wyH61PMirozko2ayc7+DcubGfHHDcaa4bPm7v6FECjPxt9Nlb221PuPesajQLmQDDeOpOFh17zZ608A9qjE+z4eWiM3QPgujH4AotfLFgsFucXmwBenOKd73znEauafsZ5ig/pS0jaeu7zKcCXoLvuuuuOtr7w5DzH35PnjEEOXrV+/vbP//zPDxmLxeLyYhPAi8VisVi8SAj2/f7v//4RrCqoJsjJhwq48qOCSv1MZUFM14KdAl3a0ONzKHZgdlB2SBZME4By6HYYdwAXMHLQdgjXjsZBWjBIgEg7kFmAWTB2sVgsFosrFW9/+9sPX8t38oW+WMUXSg5JgrguKSFZwz/yqUqJnhIn/CKfO/0vH46uRJ4kSf/zl9/mz+lgg8AVWfy8Pnz91DO/O5OxeCRI2cM+cvHqkyzR7uwQfTyK5CnfT66EHLn41ejISVY87FEka50N4kVDx1n02vTVJjFrjs2Zc4oEriAe3ebMONCVMDPfdJgrfWThp9vY6NNOl7nWZ1z4ydE3f5pZ0S6RKyE528kwZxXrQiZbnY8kv8grCd18oAU2KRL6eJ2x2EMPG+yHqQvw4pmJYwWSp+CdffhaA/siG8C1OVDMnf6AXzteNbucGwHf5FXMY/yn/eaEPDa4ZwLZBXrNnXOnNolTQF+/2ro3hsarnXxz4nomb9GlQ7EnSjQnQ3tJaXJ8pget+9T52b2lr2Kd6EvPtDUae0fbqS360JFhT1t/46UjOWjMh2Le7C3nbDKyOzmKvX067uZUKSGtr4QsGn3swKfftT5zrFbIsT/x2Y/2qfmS3GVzdPjZYB+4bv/go8dc2pvuc3sN2B3Yg2eW4Pq0v8/1V5t3er/97W8fbYvF4nxiE8CLU3z84x8/ztd8iOc4f+IMxHfyL3wQ36z2vEfDF3juo/PFTP7IeUUNfDUeZ2X0k48P++IXv3j4qMVicXmxCeDFYrFYLF4EvIX7sY997GKwSiBXoEdg1uHYm8DeAvYTzYKjDsiK4IuDrwOvQ3VBUkEegShBSQEycgRv/F9ftPQJ4gho4hMwo0/QCBy+BanUaAV30DjAs8OBHv9isVgsFlcKJL4eeuihw6/xfQJUJUsEkvhQbwILLJWMEICSSJGgwZM/lRSU+OGH/a3rbVx+Ga1kL//Jx3qLV3uJYH6ZHydHUpJufl4AS1IFL1pynAe04ZcURqevpG1JXecAsrVV6Ffj008eH06eoo9cdNWK/yNsTiSHzId5MF62TPmTX+nNXecDZxDXbEsvfmecqVvxWX+21a7gibc2uvCTVZ82feYHjNeZhh3WjS1ojEXiVZ8gJFsl5RSf7Q/ryY7oyQLz7rM5L3lGTl/Ya5ztG7BH8BXkJMt6V9sH2mdf/X69BS/or0/pZ56DNntTkRh1zpsJZP3WomI++mID4DNP+vDXjw+0k6mYZ3M/+dGZS/PSXN95551Hf+UsHRKW+rSbT2+h6veZDvNKdvabbzTJsWbo7FdnVvT62MJWsvE6QzcWduhHp7B5JpKzp2IsaCSQ9eNH05y479TW+FROdqjNm337fLrsO+PxLNBHFzr701yRgY49nl32txib55lnQuNiWzTazJvnmntFu8Iue4MO84bOGtg3xuxzvOSRb82N12d8ajTGhYfN7bf2nqIdzFE0Z6H2arRPP/30Mf7FYnE+sQngxSk+/OEPH76Mr/E85z/U/BOf0TkwX8GXuOZHFG8Aa3dW4Pf4An4KD9+jffoVhczvfOc7R9tisbh82ATwYrFYLBYvEHzke9/73ovJXMEth1gHXX19M1+f4IuAsUOxb0f++7//+0HjQKyvQ7JApMAhXsHnDskCSOgFmB3GHcwFjsj0mRxBoQJOav5bULNgnTYBrILV698Xi8Vicd7h/+L7yWcJO4kOCRFBJD5xJv185mP5OlDzd4JWErpoJUV85q/1SSqSK2nHD0sWFbjiN/lQb9GSy1dLytJFnoQK3d6UBfrzxwofjtaZQAJHG9/PLrLQ4qdLQav2hS80ycenvRqdpK3kmURagTny6CqhOeUmm35nBXzxGqe5YaPAH96pa9YVn43becNZwnxOPc4m1qokqjL7zHE8dJlffeywLtmgHQ3bzEd95qU+sq2jfZEeZ6cCi85LgpRkk6mdLnuh5LCiHV9l9inWpFKSd/bhKfE3x+1z7WrzjRafmm0VNtobE9pLDhp7/ECmPnLV9sDsV9sb+nz5z9pLuGZv/RXzJTnJBn1sJPvUButBnjOl/Va/Ym3dR/aXPSo5b9300WE9AE8/0cye+hQynF3xZmdjnbbSYc9H0zzot9bWWDuadNiz1gcdG+wFYzaO5Mzx2Mf48EfjeuoyF+p+mcC47W9zFY1iPn25wDONLHLo0hedPWP85Dd+18mIxjjcE9bbuqPTZ43Z4F52b7pmV2MyZ+z1vKLfPAYyoH3dNbSXZx/0Ga8anTn9/ve//zP/ymaxWJwvbAJ4McFnfOhDH7roczp7+cx38CH8Rb5ArQ2ta89/X9rqfAX6+SdnG/KcodFPv8E/feELXzjoF4vF5cMmgBeLxWKxeIHo//sKYAnACD4KrAjqCDYJXgn+ONg64DoQ9wYBCN4I1jgI43NYfvWrX33hqaeeOvjIEDwRxBLQEXAWGPY2sTaH8YJQAjwO0mgc4P3Mjs/oHboFogWK0PmMhp39n8LFYrFYLM4b7rvvvuMna/ksvk5iie+SwJDIEpziWwWWJE34Yn4x38jfSbigE3jyma+uzd+5askaSRuy+F4+uMQuH46PXy2RLAjWT0PPZG482tHx1WTiBYkw+mrP9hLDbKZb/6RR6NMvWUW2eeDXyaM7ufF5s9nZwVnEWYP8EqgVfPSnw/VpkWymUyJMTSZZ+PUbq/OLvpJYZDmDeAsbHxsEAp17ChRaL33OP2Tit66Sc8klZ46p9mpgC8ykq7Wmk3wJ/mThcwareCPXmrJnytPn/KUP9LU+iuu+cKCg11aZSdz64qWTrdpBX0FU61pyORR4VZuz+VPEgC8aa2KN06nW17q4j8x394n+ZKvdA86JM/E6ZXTWxNvPRE8Z0bivfNmxRDL6kpOKdWDrqR1oyFDbz52Zp4z62/MlgPWbC33KvJ/R+tzzI13RscmXKpOjTo+aHPsJTXqyx3zaC2SUuJ1vLuPXZ17sb2tsXPqnrmjAHM//F9y4omk/dpbPnnRoQ4vHs8l+ci+YC3PGBvTmoXuDHaCmH6phtkULPs8+8vT7UsYzzzxztC8Wi/OHTQAvJvjH3/3d3z38CZ/ZmYgfUfjtfEUl36SPHyODz4lH4Rf4erz9+g3g54ecWT73uc8dnxeLxeXDJoAXi8VisXiBuPvuuy/ceOONx6HWoVcwS3DIz0A78EoQe9PFAVpQRrLVgVpQRvBHYFGfQBsZDsRqARq0gpaCND4LopGrFmjF6zNZDtqCwOhAkLg3a0r6ptdncBjn4wXwtAuALRaLxWJxXvDggw9euPXWW4+EhS8+8VUCSBIZfJlErsQg8GkCSfxgwSWJGX6ZL5V45J/x4vH3rT5+kC8tQMWf+sw386P08J98Mlr9rvlXuiRg9CcjGm38v+CYoBZfPhOZFUlafAJgeLQVZFMkX50vjI1c/OYDrWu0s5z+z1/24cOTbnaZG4miziF41RLR5so8SXiRYw7MlVohK/vIJJ/9jVs7ea7Rl3RGa6z62WX+shG9Puh/7GYPW9kjmczebFUKGJKJzh5R6CavNUHrWg3GpeCvv76Ckop94sxFtgS1zwq72Snhp99cOUfpM2Z7pCQuvdakdVHstcZLX/3kkT0TwOjtAcXepovNjR2fdgW/fvL1K2x3r5jnzqoShPUryVesr7Gmo35nyea2/0dcPxude9Eo1oOMEsDAvgLC5gqde3LOS/1o7Re2G0v9aPWzhwzXAs7NH1304rFP2Gs/G5Ok7LRFaczsJYeOxqMdv30aDdk+s926e7aYD3TmnU66jDtZeBtX1/qsgTpdZOprXNp7nhlbOkvuuibfPWGM1sTfFtYXPxrFHKL1N0myGn/3jHs0kKnYn9Whe2Tu3ejAtYLOvfvkk08e7YvF4vxhE8CLCV8ue+c733n4Bs9xvkKdP+IDlXwt5E/4azTOL3wS8Fl8GxnOE/yjX4bQ5lrBpziX+uLeYrG4fNgE8GKxWCwWLxDe/uUnHX4FpwXeHIgFhATaBFUcer09JEDkYCxg5qDrkOsNJH34BDcFrARpBJgK8BQk01ZfARgBG29GaadH8Id+wUdt9GlX6KALr3bX2tELNHpTw6HdIV8Qa7FYLBaLlwpvfetbD//Gr/FbfKhackMRhOIf+WA+Ld/m7Uq+VeKEL/bmrSAT3yYxo13iToCKz/MrGRK9/C9e/tl1ycz5/4ELTulHpx8/3fwx+Qo6vhUdW9GhL3gmqevcwF421E4OW137Ipd+9jbe04IWj//923kDvfPHqU7FWNIZr7MCn19iiK4SusmpkJHMGbhTfK7oT//soy8ZSnIad+cVc1jAkS31mdPWDo/ijevOSM416UoW2EMzGQtkKXRI1k16YJu5MFclqKHApzNb/7uWvfrJUugzf+QCuenCr9hPQb+2knLOcLMfn3ZBUzW77C3Ae9rPXvPGRnubrNnPPmteclapX7F2dODLbkUfPrJdm/MSnfVbN7X1ZQc79YG6fjU9ZESTHdGYVzLMZXaaJ30VeiQxG69Et2sy3BNq+4TdveFbSY/CFnKM2WcJVXNHfnSeN/YDeLZYe3L0oWF7467PuLI5OfYWe2biWnEvmgv96OxrNrBFPI791oWcCjn6neHZTh87jF27uXCteJ7qN+fkuSfMsfmhj2w2n0Jb94a6aziLvnuT/Y888shPWxeLxXnDJoAXE3zEQw89dPijfLGzjWvPdL5J8TnwKQofCp77Xi7wOR8M/A3f8b3vfe9o42/4n84TzlNf+cpXDtrFYnF5sAngxWKxWCxeAAQge3Ph5ptvPg65AjeCQwJADr4Oyw7FAkEO0A64ErHeusHbWyK+sY9WIFmC2LXAkJ/JEWTS5q0egaCCiiV7fWOSPvJ9Jg8crLULADms33TTTUeQGNjjgC5Qxk7BKodyh3OyBVHxsHOxWCwWi8sFfulNb3rT4ad8sYoP5SMlLbQJFhWE4rf4QokZvouPLZAkGeKndCV1+GV/06r5YP6bvyWD3/O3Lh+Khz8UlMKrv8SuhGwy+GN2AT/Ojyr8Lz8cDx+creT3M8/pqV3h9yWGBdbI6CwRf6WEJ99uTgTSsosNeOIj1xmBTMW5oHY0Jde049U3+U8Lvh/84AfHfEm2ST6ZX2eFkqz62aUPjXb2WSt96OORqLfe5CafTQobfFbMDZgXdMZuDoyJ3frJtwdcn/KY837qGfTVrtYnscwmtrFdMq69AuQnU60U0KyQVzLPeljnCe1osse8QLzGg8Yet//0Z2NrqLDr9Gegtdv79kPj6w1dSD4atTUvYZh8vMZu/q1bP1OMx5w7G+LrPkNHRnYkQ7FH1GToS082qq2d+8R9Xb/x2z/mljxj7c1cfemxTmicvZs/52o1ZAs99qFr/2IFf6VxKNbe2N0P5t19nS79JVHZVoL+LDmeRejYrC+byTZO+xedPepMrx2P+8DcslMhA8yPeW+tsoUdJXfJMDa6kudeoCN5oA9NutrjPptDtqnZC9XgWt/pNbien+0/n+0Bb3tZq8Vicf6wCeDFxLvf/e4jXsTvec7zOc5iPfM7k2nvmY/WZ/5DAf6Fv0PDzzsP5a+eeOKJox0N8BdkkPv5z3/+aFssFpcHmwBeLBaLxeIFQEBDcE1gxsGXvxQMc9CVuO2tIvC5gJ3+6MFhWBDLAbukrMOwQI0Ds7eXOjh7e0dQiW4HawE6NuinjxyyBYwK/KjJQ3vdddcdwS2BLnTaBIkc7CWa2SBYx0btBR8FzxaLxWKxuJTgc+69994jMMTXSbR6I0G75Cr/yL/xaXwb8FF8mAQJn4ePf+S3+FQ+TZ8EH16+mK8jE41AVAlbb+76rPCr+XOf0bOrRGL9ZPX/ffnp2irs1M9GPLO9Ypz6+V06tBVMq/iJaWCrswY6hd3JJEeiTnGGINNcZBM56eyzYj6NXRDO+aLEkDMCWc4pJZ1Kfqnpby1cm39jME5y6VezRZ8vybHXfAbzSoYiAU6fBFbJVbLrx0t2464P6JJQtjb4Zrs9opDpi3DGaCzGqbRnWp/4wHiSN2UqvY08x2MOJdnMnXEbn35Fe4U9ZAfy6sPLFvOVLvTJLgmIv37t5lh7up0PAxp8ZKjNob2kHZ97oV+vicbcGLNzbm/paq+gNW/ZnW3kqa2ReWYnXshOxb60z8hnt7Op+5Vt2aB2H9OTndaLbjz6yLAns7H5wJst9oq+Eqlo1NmigHlvXgHNHBM59k9vE9PTHGZze54s9tojng362IzWfrBOfpbavUSP0pgUc09Xf2eQSZ49RUey0OlD4xlJHruSow/w3nDDDcc8G6M5tAey2zismbrxz70NtatP+6A2NrmX//mf//mwYbFYnD9sAngx8fGPf/zwH/kDxZmo5zr/5zo/AHwN36OPTwc+hS/iY/hB/g/6UhA6fh4vkIff/wFeLBaXDz+TAHZTusEd3BeLxWKxWPwsBGUclB1sBWQEThyUBT0EWRyI+dGCwt7+kRTWXoBGAEdQRgBLm0BJQcVnn332qAWABGwK1LjGIwBEryCdgCm9gjquydbuQO0tKsEzQSDtbNEmWOaQL5jGDrXPBbYc3hVjMQ7B1cVisVgsLgXe8pa3HEko/oZ/VYNrfo8P4xMlK/m+Ak9q/lObZKu3dfnLEi/8q79n+WgJIG9XSlDwiwJUePKn6PnxksFk+JuY35T8xZOvV3z5Kjm1keVNXbYp+Pli7YprxZe6gO3k1xddb++SUXJS+9Qj6Wv8Cj3oOpckRyHXmNOt9P+Fs6ExsAW/M4Kxkatdv75pY+3xTj3Jab60qVtXtXNLySu24wWBQmcY5x9JrhK21tK5JB3JKljJZm3JIde1MZDTWmlDRwZd1jmZgTznsZLRUxe5ameu2tXWq/UwHv31xVdhywQe/ApeY8ET6rNmxnNWMlOfYs7NnT0dyK9f8rEEr/lkZzJKQsItt9xyyK/Enwzrwk6y9bPPmpKJxlxnp4Ku9dRHFzn+Py06QIe3sdpn5o/MaPTN8bLXGba5OLXVutpn0WSvPnIUe5Oc3nzWn66SreSwp6RsY0JDvrlAC/r6pSCy0kOHPafWZ0xqJXvU9Fgfzz78/n5wn5KLt3UyN/4muPPOOy/aomhDD2wjw5o70+tPlz5/N/Ts6x5ATzYbmosXgu4ttvkfwOxfLBbnD5sAXkx84AMfOHwAH+MZ7to5zjUf4NkOrvMJfE60ina+hV9x/sif8Pnf/e53D7/lfKOdHmeVaD7zmc8c9WKxuDx4xf8cvj/hRlXcyA6DDoiLxWKxWCx+FoJXgoaCNAV9tLmGDsgFiPTzr9oEdgV9OkAXhIte4IQswXCHY/2CUgLb/rcKvRKygoACSXTy12QKTDl4g5+FE6Cm3wEbjQCYz2wQ/HHtYC4ITrfgOLnsFTxSBLEE1OnRv1gsFovFrwJ83/333398MUnSgp/k3/gbvkgyVcCIr9Xvb1QJK74rnyaZhoff4q/4Qm/N4uFPJfHIkNAj37WELr+Ilr8V6OqnobXRLaDFN/LBfCAaxRe6+GttfDQetHSyL1ryFf1qbxSzma8t4aKvfjUZZM/k66RRZvK4BGvllJbtvlDmrOBv+5JB5jAbZjEXycRLhvFK7JUsTa7x1G6Oo1f0WS8JPmtcu/mRuLZW3lKlL3mKebM+SnOrHR9Yvz6rzWVnJ2vKDqBDElQxbjakQwF8zQV5U2Zy6wN1fYo9iiZoa1+aW2+um69Qv9JexE9ufPH2M9DJ1y+wqt++ILvEraJdv2Le0OjHr8Rv3jsj+hcm+vCD/hKQJR4bo0JH/Qod9ql7zBz3Bm/9EvbuT/LR2A/OqNbcZ8V840vH1KPfPOHrZ68bbzqMx161ztPWxqtIfKq1u7eTgSY+/ady0DSv7CFHrd24XbNNctY+ZUtjso/pmnL047G3ybQOncmnLfjRuq+spZ/ljEZxZnfv2zfo0JNPp3Wzb8yxPjrYBGp01sh6eU6x22f3sb7kshna+xNntbHBWLs2vscff/xYv8Vicf6wCeBF4M/+7//9v8e153g+jH8IPfd7zgN/pJ3vdI7Tx8f15TDXfJBzoHMov+R8gwcv/0aPaz8BzQctFovLg1f8zyHzEw6aipt2b8DFYrFYLM6GwJYErQCPIIogkCCKQ3OBpYqATG8F+yzQUtDM4diB2BtHBVAduB2mtQkICaCkR1BFgJhM8gSkBJOSAw7UaPF2eBdkci0oC+QLlANe/WpBUod1gcO+uc9OyWT6BOoEVheLxWKx+GUgUXH33XcfgSKJB37RNV/GB/Gp/JFakEjQSIAJnb9TJZMk/aLjk/k+vsvfsnwhORK7+PHxn/xseiSYC3jhRcc/SsDxmXgKhJXg5Y/xoiupS4bAFrqSnSU2fXkLyGITPgWt4qeJnQ34WP48G6Ihp7eK03Mqw3U6yTP+Elfm1tjMxUwao02+2pkDn/OM8wNeMozVOBT95tqY2YrXnEgoOdtIOulrvU77FH1kWX/92d2YXOvPLjRg7BL98TVOsq15/1eVfjZnGz48jUHRVilxjHb2mW99JXH1aVOsl7OZOQ146NYn4Gl+6sfbGlfswanLXCvxlkA8q98e0K/9tN/6OqNKsNdPt/Wca2Du2JeOaBTjcPaTeCWjkg4yrA2Y98aCLxpy7CVjd3aMJjvQdM6sD9ihHx0atuJvLNlRvz3jGeD+ej4a4/HMyK6+ZBmNfWqfJEdpPGjcO2r3hvVBq22OCb0xuS65q02/87r5Tp75I4MtyUKLt/ErZBgffvvUdTTGZN+y3Rh767g51G+vuGanuaTDmll/+4RN2vV337hWTqGt+3Gi+0xdv+def3MsFovzhU0AL4Jf+3jHO95x0e/xAfkEmM/16Rf4GX6sc1tAw784Rzt/K/j5OHJd80Ou+TM+8R/+4R8OP7ZYLC4P3LE/8z+AF4vFYrFYPDcETQRVHXT9j11BHHDo7fArAFSAzUFZcFJQTjBGuzdrHIj1CYA5eAuWCUoJMDkYw2nwxv9GREOGoBA6wVuHcAdrh+kCuAI9AtcCQQWO2MW+3lwQVHJ4NwZtAqpqsukUJLrxxhsP3YJmbClwt1gsFovFi8Ub3/jGwydJMkmm8V/8Fl/ky0h9lpTgp/gfhd+VlJOc49dKbnpbF682fooPlTBEKwnizVnXEmclgn1Gx5/25SY+VBv99ApikVvwSrvkBj/Ib2dfBa2fWGYrmmRVouP/+Xy2m4f68UfDTpCUOktPNPl8YyfPnKBnr7Ep6abXWcQZQHFOYKs5QqdunOizS3t92aDQY26aR21si4ftap/NebDm5v8HP/jBYQebBACdN7STgV4xj0B3SV466bJeaJ1XrHs82gFv+0A7WnDuKuApAYdeQV/77IPZ17kN6tfGNrX50F8faI/GvJBXqd1a4p2JSP31qc1nCeD66+vNUXNqL5Abf0lF/datc2c2JAONuXYWnDT6SyyaS2uaHaDGj0YxdmvEjnSwJzvII6s3fJORHWo6jIUd0dSn2DN0Se6mgwx69LsX6GKLebMPtMGcc7W9RU78Ppd4bUzk01lSOtrGo2azZ5h70lkZrf3auBWQzL/++usvymnuzLv1NzfO5dbCHNGTrvra6+aI7WxgM732Ljp9aMhlGz3ssw+MhR0+k+tZhLa5dh1Or09pfO7aLw0988wzx/VisThf2ATwIvgXAm9+85uPa/6ls918vs9nfOCz0HXmqk87X8LvKD7zK/wYOshX+MznOb/7H8GLxeLyYBPAi8VisVi8CPhGo8CXwJCgimCaz36aT7CnoJPDtICMIIzAk4MuGsEdBa8DcME+B2LBZbTe+vHTmP0/QDoKhJEpMC1ogxe9glfQR5tAj4CXQCvZEr8CPoJDbBT4kegVKBKgo9sYjM3hXJ8AmD4BVDpdk0G2tsVisVgsXgwefPDBI1mX3+ED+Z6SrCVd+EA138W34eHf+C7+VTtfy6dKtvB3/KNgk0QiWglSST9y8od8ZQEpiWE2oOVj+TZJUuAH8QhyaUfLVjp9Vsiu4MPDhmQVTFPQCHQBGflS7fUbs3MDnz0T0VMGGmcCdCVe6Zs60SW3//nLNnOMzriag2TP8dRWu7f5JBetgTY0p7oq2mefOasG12ywhuaKLfWB5L6zTXtDct4ZynqzN1kKGuOCuZ9qr1hfbbWb3+aZDfUp2uonM/kw+dhJrn582hVrI6nGZvtO31m8yVa0WyPFfDjfsQtt/eTWj/+033nN3i6R5xyYPrza0ehjn3MgZAPZaNTm2RqV4FXI0u8cq889Rkb8kAylfWmc00786BT7xL1LTzKyNRn0zP+pq8SvsNW4spU8+8f+y1571/W0F20y9LHFmRwfnZKuZOqzlujsLf3ug+YlOejU1hu/L4fO+UsOe8ixj52zjcvZ3lmc3exAp5BlHT0vJp19RwZZnofW1LOAXveJz+jwAVnG7NnqC53oeuaZP/PTOtGPNrBhfg7t6wltxkXPt7/97Z+2LhaL84RNAC+CvXDfffcdz22+l3+b8Ezna/KbwTVfoX/6iOgU/oQPREdG/fnmzjTOLV/5yleO/sVicemxCeDFYrFYLF4kBJwKWHsrV8DDwVaQRjBFv2/oS5R689dhV1BPUrcgUN/ud0DGJ8gqiKNPLZAlKKPgEejRJiAkcCMYpCYHDTl0s8X/DBb8FiQUDELjDQEBtd4iFlRnE9vI9iaxMZFBD/kCSNoEiwSNBKjYRl9vKC0Wi8Vi8XzgQ7z520+x8ksFf/KlBYq8DcBP6ZOU4a/4Rf5MLXEimaqdr5KQ4RcltrTxu4JM+PkrSVPgI/lMfk/QCg96ekvw5l+1kaH4YpR2NvosWFYiUgIaZlI3PqW3ffn4EspThut8afYlQx1dNPSYu2w5pUt2byrz1ejRnNoWrWIsziDOAs4Q5tX6OAegM1/mt77OJM4FEp34fWHN2QePc4SEu7XSp9BjjlrrqbugoT5rbp3p8Fk/3SW+FPYAHvNb4vi0T7He9oKfju5swz77Dl+JWoWsCj46gdzZp5jbQI+ApnVWOjdNufUp1nrKMtfa7V17Vn/210+++baP9KPvbNf9pJTg1e+z+XSPTBpza0+TTUe07k1zTk8yKuRYzynDHGTnlGGdyZC8PR1nxR5gU3ORDPxkuxfQWa8S3qcy6GkfW1PnbXJmIrnSvzRJ16SxRo3D+flUl/1o76u1N3cKOey15vrZwua5xtEo7GWra309v1zjV9hEjjnXZ00ld13rx6t2vyn2tb8v0NnP9lD6FCDfGMyDBDH7rDVa9pFnzs0FmIuzUHtz1h5X3CfG5v8+smmxWJwvbAJ4EX77t3/7wjXXXHP4Br6Xbwr5gc4+PeuDdmUCDVpy8svRTF+Rn0fL5/zlX/7lQbNYLC49NgG8WCwWi8WLRAFQgRuBHkFZwUcBP0EgQS3wWaClgIxrgVzBFoE7tN5wEZwUiBSQERRzIBbYElAlSxCIPodpATe8fbtfLQDkME0HfnIdvgUw2VhwTEDv6aefPoLneArGG09v/bLF4VzADT/5ZBkDOrLZ7w0Jdm2QZ7FYLBbPBX6wtwz4En6HL5Ls4jf5FH18l2ARP8Yv4eNrBInU6PhICVl+Fx1/xaeSq40MvkuSTHKDX6UPLT7yyeFfJTzJBm3o8ZOl8L+CViVK9LGTzy7QxT9HX0HDxwp08Z8ldRu/QrcxQX576lbo54vpyj5FX7Jm0pUvlqxCT2+06a54K1jyVsFXgstY2ILXNX7zRYbPkk/sMB5y9JHtbNL64CnIx46ZXGWrM0NnIvwV8qyJdSOfTgVf6A3u6FsDJV311UYGmcbYnohHgpLNzlT04MVjzqMpydt4tBlrZ6T6FG36FLqei1dtjrVVtFsDST5y7Ll4ILnOc+jY6kwn6YcOT8lKRaLPPDjXkUXO7DfHtVesjfmNxt6w7mS5X83VlGGMMxGquG/tR/KTMfUYQzQliY25OWCDZwJ+n8lwnR7jtM89G9p7niNsEchuPrMFv9pc4J32ks8Oa4HGviR30uDRV7Gf6Gydspct+ulzX1l7cuNHRzY78FtDc2NO6UOH11ybL+uLzlis86233nrY2/zRgRfQ9eyQAG/e6KDX3BgnWT67xyXmm5sK3Wr6AX1723XwGdg4Ea3y9a9//RjfYrE4X9gE8CJ87GMfO/wI/wP8SM/6zkNqvjB/APNZD9M/oCWHfyc3OckCPop/0sZPfu5znzv4FovFpccmgBeLxWKx+AUgWOKAK7gsQCXYIVDjG/iCaN46UjvUStgqBdf0CdoIEjkECyAJTpPj4CxI2VsrAjaCZA7aZOGPjj7BHYdowdECdd6gEmhCp40cwSFt3kiSvKWj4JV29gmgFqhin5oOYLODugO8gJV+wUg8rgWYFovFYrGYuPfeew9/1f+ydC1RIYnBn0lSeWOVL4GCSRJsJTP4GokLCV3+hp/jj7y5y2fxxfwVf9j/9OXz0PB13tSV2JDQogvoQ88epTdgJZjo1c+H8o9kzZ+Bzgcr+hQJzoJYJXXjVSvpLkmdjGg7GwB7p55ksYEt5okvNiZBPPKiRyvRy687A5ijEj3oSvSqzY8Sb/Y2LnPb9WyfPNrrc1Zhm8/0OuNYd7QFAJ1XnD18js81m51XXCvJqlhj/zuYXOtkbKeJXMU5xzwav/2lLZnAHsX4nbXae2ANraX56RwW8OhX7EX2sKs++0ZhE/viRROvfrztb2Nxb7ClfvOjH59xdLZzTa7z4h133HH0Z4/7xFwrYKzmRB/or7CPLHvHnpfQNIf67BF95ozNvTmbDPapnXWd+9hJp7nuPJwe1+Yx+5yTrbs9lSx8bNVesjgdClp2okNDnv7Gioa9vW3cnGhvz9tfbGWf9mRlh0ImOeYZv7GTlRy67HW2WitnYzrR4EUTLTn2RufvU5sVNllLa0SWPWgN9OFX04XOHjZ31s21e89n82KdrCGbo+tLFo3bODujsxEdO9lnHdnaXKjJby7PAp2nYKtCri/l9KsFi8Xi/GATwIvwgQ984HjW95yf1+DaMz2cPvf1zbb8Jr/jXMfnuZ40fFtnIbUzgi8M8W2LxeLSYxPAi8VisVj8ghCEExASfLnhhhuOA+9NN910/Jyy/7Xl0OsA7YCrFtxyGAZBEkGfu+666+gTBBK0IU+7gBE+MgR5BQl9FsRBr508bz84ZAtI0Y9PAEfgUrDLAd3B2tsRfgIRXYFTAbBqvAJG6LUJCAkkCe6hp8tbTfoFkH0WLBKwwu8gz/7FYrFYLOANb3jD4Uv4HT6R3+M7+E61gBN/k//h2wSGFP38Hf8igSvZwSdJXvA7/K7P/Bw/xT+Rg14/P8gXayNL0o3/4k/5Urr7qWRBKe3k8KF08dUSsvrBGLTNQp5ER/4YH336JKIlmvhuvhLIppefJ5vvNkbFNf0zMUwOeQp5JaXYQo7+5CkSqObY/JGjmAv0xowGfbwV7enJfmOTSC2h5Kxh7rXrj9bnyeuzuafbfNBVHzSfzjnmxtwbPx3mcSZyyYJkSoqV9CbPOMmzh4zB2BU602PdJeGmPPOoOBuZc/zOOeSzA68+c2f/Av6Clu1RvGgr1rBizuOlN536SgCaAz85DPWpS8Tp93Po5DQ2fUrJw2zRRi4ac2G9p33JTU7jmW/O4rceCpvNIx36spEMdqrpIccXNKxLdqSnveY+sHfcI8lIF9nOzYAfkoFGMtw83HzzzUc/+qlHYUfnVX0KG9lgntjQlzv0SbjDWXLQNBb67VFzYL+xV7vx3HLLLYctxqONLvSKObF/jKt+Ndns6NmmnU5rrL+50Wf/kAXuPetkT9if9jN70teasYssa29Pmw/QR7c1Jdt4JH6tCf3ml036zZv7C9gHxqMYb/Vsn4VdnkO+ULNYLM4XNgG8CB/84AePZ7bnvJqf7Jkfeq6fBf5CX/4g39xZjz9zzpn8+XzQz1c99thjx1lzsVhcemwCeLFYLBaLXwKCv4JfgkQFRwRQJGAFWfzkslqbILaAzJNPPnkEY9ALNMUjUFkwXOCxb0QKLLn2loY3DgQD6dUuKCQwTF4JXdcO1QJNJXUFkAqACfo4lAskuRb8EVDS7zN7tLFbm1pwzTgFmPRr9/ODvdkg2EtPNi8Wi8Xi5Yu3vOUth38S5OG7+BA+CvhByQy+jt8AfkbwqESIBIikhARQCRE0ffFKIIm/0s6/ke2aXMlgPk578vnUElJkQF+YKlGpkC0xjI++EqcSGnQaBz8qUcOHJxeNhDPwiWwhX38y2HT69m5FfzTk0EGXBE/0yYlOMYfsNWfmlSztjSe6ST+LN2rpMcf8O73GaXz59QJ4zgk+K/icN5wFZntyfeHM+sw2cthOrrVVsxENGKf1nvKUxkJGbdnU53RBfeQpzkzaFDS1u/bGpz3ArnjMZWtYEi2Z+tqnxjChzbyd8qZPX3vIWlx77bVHP5Cp33pbA5Acjle/M1/y7QX66dSvTb/ibEdPb6jiTX9JQ+Mlg436p34ynAFdTxnZoJ18c2JNkjHtQONMS07J0kr9CjvMBxn1G5O+6Ky9Z0g6lPSwWb81dI/gZZu97JlAPhnazKsESDKAHDLIQkuWuvOxe3jOC7mu2WL+mxc0+rTpJ4M++8B94ixuvtnJluYPjbGR7V5CZ06jMX948NrH7pn2mX73rXteTadryV0yteH3DAKf2UqnJDg6zzh8bHR+N1405NATun+U5g58Du4nvOZhk0yLxfnDJoAXQQIYPM89u/mB02d+fT3zZ9tZ/gC/Pv4MnHOmj9DP52pzza/Zj740tFgsLj02AbxYLBaLxS8JwQ6BGEEaQRUBEIE9h9v5VpPgjgCVgIsgkyC1wy8IyKATrHEwJlNARgDr0UcfvXD99ddfPHQLDjlUCwYJSqu1dXiXfBb8QSsALgDFPgEdepOLR41OIEiCl34y/ARiASn87DEGuiS7BaEU39o0VsGogm+bBF4sFouXLx5++OHDb/BDfIIkm2QQP8FHSoLyjQWG1HwVH4KGX5K04I8kU/gmn/k2vpIvkzDiP0tiuO4tX0kY7RKJ7OD3FP382GzjN/lTvpSt+CVB6pNcxUMfn8cO/PShwXuaUJ6yFW/7kv1cuvni3vBV0NClzm+jnT/rLGnGRzsXsAlNsjojzM94SyibQ8kg/OYufjqNK7sU8wGdS5wlslE//eaaTeRaP9d09lPPdOujm4zeYiRbaR90VnLOAO3WxZkDj/Zosws9nCaOA5n1QX3a6zMHQLY24xOkNEZ7zl4FvPRV7Ce8ydKGT3EOMsedAck23+ZHMWfmxJji126O1O4HP5VeX/1kk2NflKyr1KfYh+SflZxVrLe1LolcqZ8uNPaMMSbDZ2MiHx0Z7ttsIMMYFP3o8BhnMshOh3VF67mgD9Bla3bMsdSfHOdUc63fM8Nzx/XpeOxJbdnbnCQHr71mTOY+XUo0bLXvzZv5Zxc+zyh8rV/7lPx+nnrOjTkhB537zn4wB2jwu88UMu0lPH7tgDyFXnPHDmNXtJHrLeC+FGOfmB862IAOjfvQlzjZTgZb3PdkWrOeAeaosVSfonb3hzkm44knnjjma7FYnB9sAngRPvShDx3Pd6VzIh9xCs/1fDPkEyrJAPzaqvNxtXWG8pmv42eco7797W8f/IvF4tJiE8CLxWKxWPySEFARkHF4di3oIaDqcCsQCgIzgjkCT4Ij3gIWEPOmj+C44KQgjEOxYA9ZIPgjeINXQIt8fQ7M+AVqJJK9qaCNboEewR39AnfaBJfw0oOfDsGhakFObygXaO3/oOmj3zgc8AWT2CkAqE1gjn6fyWUjevoXi8Vi8fLC2972tsMH8YF8Dj8E/IN2yQfXfBN/V/CIv5Fw40skL9Dym3yWX7zga8jrrTz+Ths9ErBkavelJPL4Wv5TAMoXmvhjtNoEnUpw9BPO9NBZO38I8dBV0c93s1nBpz1ehT1kA59ZIjf+Cv/J3pk8nvape2uYLeYIbfTpmoVt/L75MocSSuYbj7GwN3viry6JM2s2sFNiKZ3aFZ/JZRfZeMwnGjrYkV70gT3JUJpLvM4hzjzOVfi0QYlcn6es9pC9pi8baq8v+Gw/pNP6TNCr3/nIuJKroNcXDV7y0lefObdezkTZoM986FPINg9wyqtYP/10gr74O9OR3zjiQ2NdzP9MzppX94++ZLgfjYHuZKTD/iDDfnGmcx60lvokJNlDxvy/ueppJ134m6dsmfNAj33aWNElQ9FvLdLTfOoroWr+JTPn28ZosxWtPWPOTu3V7zxrLtHZe315JTrteI0DvXmzN81nc0xX/GwxJvT6yfDZ+ZoeezQ69a233nqMWaHHORpftlcbp+ejMZhb8uwzwNd93PnenLp/rT1680IfGsX9xAYyyG6u2cpObcqEz+R0Xb/57dovBNgbi8Xi/GATwIvwkY985PA30LO75/rEabvr+dwH/g9O+/iYzmpo+CI0+VV0zgdf/vKXD5rFYnFpsQngxWKxWCx+BRB4UQSfHagFKgsYKa4FQxx4eztBANvhWMBGsMehWDBHewdkQRgBGT8FiMb/GhaIFdShB72gjuCOn9v0toy3DcgVxBHoEZQSxHPwJqOAoyCSoB55+shjW8E4gS21AJEDvEO6oDjbjKW3FcijS2CQPIEp9OZgsVgsFi8PvPnNbz78gGSCt9UkSPg6/kEbP8Q3CfpoLwjEp/BbEh5oJTtKXvlykmRLyTLJUDIk5fg+/drRSwSXpOLHFElYPoz+2vhcfGo68o8Sp/le+qLXp6Cng38rqamtPnaVbCYnGZNfmf/Pl1+Npv5o+ORTW4wZLV38fecMvto1mc01+9R4yMSvxmsMyrz2pq5EmOJMYR2cIVzrN+fRkjPHlRy1z3SmLx4F2Oh8MBOrivmnzxmC3MljHpR4tKtrJ7PEGujTps9c6/NZAW0Ve4HcgMfcK+YSb8CvPV4JvtpBW7wz8akkt8TnaWJUXR86a9d9oo9saxyNfvu3MXZeM/f6W3/nMuc5stDpI4cONMlIjvvNfFh/86hN8r9xpL9C7xxHY7SWbHFPnyZv9bNBYYNCRnaQ6csg9lB62GUM5KMhH489pl/tuZMOBW1zZl+7xmvM5DkTO/eyMzpy9JGtreS3Pdk9Ae4LvOSZIwV9yVpymhtnZm8J06uPDHTR0mvvOpeTad2icX8bC3n+FmAHOmvqmn3mD43xoPc/19FL7no+ul/IMwdko/X3A532DPs8K8n3bCOXXWgBfZjXYI/Me9I8PP7448d9tVgszg82AbwIH/7wh49nted557fT53zP9rOe//WBNn6Qvw3a+KAp1zXwd/wL8PF/9Vd/dVwvFotLi00ALxaLxWLxK4Kgi4CQJLBAiwOvAItAiiCKg7Igtz4BMUGeAiSCWmgdnh3EJVoFYcgUnBEo7C1b8gSh9QlKOTwLKhUUY4NgTm8I0IVWYIgMn+nTxkZ2uRZMEvQi06GdPa7xCQYWXBRsMh7/40/tQC/ApF2wVC3gxc5NAi8Wi8XVj/vvv//CzTfffPgTSQQ/neyaH/IlI76jxAPwQ/k4/oXP48sEhfJ5/Ad/yVfyLeTwSfwL+fyUa4kLvpFfkwghR6KYb9LPD/JHJXjZVUI4f4qWPWi1+9lmtvGnbChQJhmLR6FfoZsN+ukzzrNo6KGbHjT01B+NMUZjLGfJccbg69NVQY8mHrRKfMaoSFLz05JBfL45L9lj3ulWZx+wyXpYG77fvDgTOB9oty7JnwWfQr+a3YprvGTQzY50leRVyDCvFV+Qc46ZfeSp2TH7tJeYm0le/dlRsXcCWc5T+Jyl7ANyg/b66cSLB8gS3FTMoX3tfKQ9ufrU7gHjZ5d+qD/+mZxV9Jsv/a25tTT/1o1N+tCwjQ5vlzZPSvxq/PhcW4++uFGiMxv7OWr86UgGO9jZvHSOJCd7zaH9mg3RuN+aC2dQe9Ieayx4mxO07j+8aFxPO9jPRuuB13UyFHT2BbvY4LnCpjkexT5EB/R4VpBFhnlBY87ZKKmLv+K+oN8e67lBn59bjgY/Ovz0oHO/+ncq5jE76OpcjcY6mB/PV/em5K69497Sr5hnsswFe40Rj32cXXSDeQVnfQljsqyDfWNe9PfsAXMQyFGCa2NhCz7P5/2/jovF+cImgBfhwx/+8OEHPdc94z3D5zMees7P9q7rA218Wz4lGv6DX/A5Wp/zyeCc89nPfva4XiwWlxabAF4sFovF4hJAEMVPoIHAo+BwQWWHXwdeQRmBIcEsgRtvWDgsC1IXRHFAFpADQRsBMMEZAaCCi4I3+AWMBJQ6cNMr4KxPkMoBn12CkeTox0M+28gX7BJwUjusq8miQ3BIUEkQSSBKXbCaXG8ECz4VsBRAci0Qv1gsFourE/fdd9+R4OCzelu1ZCLfI/nAl/BB/JFAkS9D8V/8Cr/B1+HhV7TzjdrxkSPZIUHE/5GDDw3/4pre/COfxff4zB/yU3xqCVP2lQwGfpNM7TMBSyY5arr0kyOxwR6+m93kSNLQhUZNHtqStWROmvoVb/sWiKMr/mrF277mF91MXkenJC/ZEr1sdNbgs/l7c28OFGNsPqac+M2ja/DZHEhKmRN8+oM1MMYKe+mtHdC7JhO/dU33lGUNS+QGc6MYP5nJcqZyZnEGMZ6SXIBWad5mH1kl5BR7YwJPxXzjxYM/ufiSO+0rgectTet52k+mfrV+uuuvTyn5rD+d3RPmzZpqcy9N+fakdVJbT3QloRWy9SnW1ZjM36tf/eqfkcEWNV320RxH9qvtC/ea9XaPWocpA017zlr1Jqx2ha3GZby+REI+mN9oyGru53iTr79a0lNf65MOhQzz0V7Rr6CNxlwYL5q5NmjIr6DVbuzOxvas9dTuOac2v8bGJjKdpc0RnmjUirHYa9FZZ/zsNXfkWAvFPeD+wUOGPnT6jMc8+zvBvWS+tZNJvn60eNlvjK6Nd+4HtOTS3XMAfUBX3f2oNt9PPfXU8QxYLBbnB5sAXgT/A5hPg57vEz3fQ5/zAfP5XxtflF9x9pjnOpg+gm50fOanPvWpn1IsFotLiU0ALxaLxWJxCSH4IkAtSCngLEgkCKN2ABY4E3AWeBGwQVtgXODFYVmgyM87CwYJ6HprQjDvmWeeOYJAAjuSxw7wZAlMC6IJDjlcF3QroOqArgju0C8QVKAJr2uBOHoEgQSn2e5a8QaOYD/7BJYE/Rz01fTpg4KW3gAz/sVisVhcXXjggQcOn+HtNc97fkdAh9/hF/gpbXwVn8LHqfkYXxoSLFL4CXTa+STXkkS+qEQOH8bH8SslY9HS3Ru8/A+/pi+fo51/LRmMNjlsjBYv++Ov4FF6w5dtaCSPyGULeWjU/Kg3ZPn3U3vRlLCU5OGHwTiTMwtZ9EJy0lO/UqKXXPbRzdbWgXzzyQZ81eYmGcnTVpCuos/8NYboFEDP/7NBYkwyCw8bSuZOeemcbQpYA3zmF7STXzv9xpge/foU451v+oL2EoH65pu88el3rsFHB2hrb5q77An6K+YXH1lq84/P+rLJ2mmv6K+QrT9blPjrt898Qc/5y/lKu7Gbg+6X0+RsxZrZN3SwtfGyzRnOOpiX22+//aJ+oJ/8KeM0YVoS1Nw51950002Hfv3TDjStE/vrn3aora1xWgd9aFpnxfiN1dvI2XHWeO3D5tQ4yHRmtifJYwt5krLJQef+MUZzQha7+ulq9ij40JHPZtd09i9RFLLQeU7QB+4Dz0hrmT780blmp3m2R9237in22P/o6HFGR+vs3dyygQxzwWY1HeSZK19E8Jwk298eZOtHS66abWTbr2CO7Atnf7zuf/NnfIFdStd4zB0af8M8+eSTR99isTgf2ATwIpQA9uz23P55OKXp+Z8P4FP4JP4nmfH4nF/iR9DR7bMzzte+9rXDNy0Wi0uLTQAvFovFYnGZIHAlUOZb8U888cSFb33rW0fwBdSCLAI/AkaCvg7JDsa33HLLwdtBWV+BZz+zJvCER/BGYEcQxkEbDzpBI4fw+T/DBJYKOAnG+0x/dIJTAvXsckjv7WTtBeXpMR59Akh0CbyRJYgkIOjtYvyCVezfA/5isVhcHfCzz3wSHyQRxe9I8JRs5cP4IYkWfoY/4aP4EYX/4CskfLTzGfwSP0Wufj6FD5Nw4fMkJPgeRUKIz9KHX39v+/KlZOD1JSbAI1HCNiVaSRa8tbNb4av5LePCR1YlGrLZzM78JV8aPRp+mhxjNl6+UmGffjrZofTzymgB/bRp0hVso4ssMtEbCxr68Ro3eokiNrKVLy6Brl9imh/nv81rSVtyzAOb+oKYtvoU+qZehV0VZwoy008H3ZJy5BQsRGueFGtPl75ojKdEdG2KOVfINBfkQn3kRWOvTT7znE77ALK7fcp2c/N8yeOZHDW/zjxsOatfm7VTrJvzVP3pTQa92iQiS2yjQ4Pf2c/eL+GZfO36zZV5KZGpkG1u7Y3k0ENGa6CdDIUMc+DLiclQ3Nvs1o/e52Q0DveGNWeDukSp/uyctp41lsaZLXMs5Mx+Y2qP2OOeO+4LNGxB0xm1+bWvPaPo1o9Wcb+hMQ571/yTqxiLYh+yNx3R2c/RkG+d6Wg87idzh58NrTP7QR8ec2ruyWd/8+ls7XlJt/Gmx7iSZZ3JsNbmHy35dOM1XrTG6bP1tVc903zplE487l/3Nj3GNeE+AuOokOkNYF/+WSwW5webAF6EEsD8qOf2zwM/ETpDAZ8w/bFzjz6+Kn+GVz9/xo/wqfnu/P7Xv/71g3axWFw6bAJ4sVgsFouXEJKsArMOw4I1DuMOzbfddtvxM5MCXQ7Q/V821w7RApEO3Q7TgjmCTt4wQOegLeAjUC4QI9hV4FAwSJvaIZw8QSPyBJnwP/bYYxcDnYJKAmh4yHRQFxCjm142CRyhu+666y4GxozHHxQCcMbgjwG0CjsXi8ViceXi9a9//cVkry8B8V++HOR5z6/xF/yEYBCfJYmgv4CTwA9aCQbJCUlSfomvws+XlKgiozd3+Sb0riUo8ivkayPPZ76tBC+ZAkz6JToLXuFHVzufpvBhIJGILxqyFL6NPUAGu9mophstvy6ZFI12YzJf6uQZK5/Pl6vZap6Sg6aaXn7d/LGNPn3sS6bi7EA3ecbDf5sXNpbEUehAw1/rw2te1OZen3UyB80Z/+1MwBYy6M62WdCq2UW2MZGjTeBPsWbWM3rQbnxqY0wvHebUmaLzTEBvDPaWBJjkGeBLXkWftjB5zVF9au3Gr88YZmJZv/aKtXAOMzd01D5lTzvssZKLCh7zXSKOPDSKpFz/g7fCrop5Nb/aya7fHnC+s+bOgeaw/ZVtybAXtE8b7RvrE409ah6Msy9p1EdXdsRPR/swOfajeaSbndmAhp0lmvEmJx0KGnaYTzSK9hKv5Jgv45lvCmdLBR1b/EzyXBuyorEvzR/eks7JwW9+ulfsu5loJweNYs6sq2t7p+eKsVtf4+mZY51uuOGGozYOZ3K06VPMG52ev+jck/aSe9Fn9NGpjcN6mVtj9VlB6/40JvPHbm8C24vuTXK0ofUsxYu+e1KBrpNr7BLA/RucxWJxPrAJ4EWQAOYjFPDsfj44P8ApvTrfyO/ygfwhP6DwDfwkPn6MD+H78PCl+tB/6UtfOuQtFotLh00ALxaLxWJxDiDY4jDsYCyII6D+7LPPHsFvQSwBGYdkgV3BI8EfgVABGm8V4HGgFsRRHKbJK4gsMEWWuqCi4JeAmcM7+a59879ApEAmuegE7wSLyFCj9ceA4JLgdnq9NaAdL3sEAwWxCoCRo994F4vFYnHl4d577z18lIAPn8Bv8TOe93xTiSaBH36oWqKCvxAIkpCQ/ETL30j66EMjIcK38XUlfsnTpr8ESvx8GB8jOSSRC9r5OzaSJxAFfBBevs1PJ7ML8NPP/7nOF5OjSGYYG3r2S35FEx07/c9f/Wzkh6PRV+Fn2cUmctjNJnVz2c86VwTKyM22KS/dkujkWgfy0FUmrc90TX3J8Hnaol2xhvSDpBh/z/87H/jMp9sL1kLB09yQi1/RDmQp1hX0FUhsjp1xyLc/1Hi1m9tsice4nTk641jn1oyt2q2v84g+oLNApGJvWf9ka9OvWEt9E3Ral5Jt3m4NPuvPLmvimi3ObOTVx162SOKlXzvZBVHtYeNubrILjcQeG0pk6jdu4zT/5Gu/8cYbj75KshUyBG5ds7HEY/3WgI343PvJoD87nTfNMV3GxgZ7wl5KDpkzScxudtpr0eAzD42l9Xeu1E+e+8CcOFeym1x0FWOeSVs1O7WbezR0a6dr0sRvTtyf9m/rp8/84LEfm1t2+LWe9Gl3Vsbnmi5rcf311x/rZhxqY7DPjad9ZKz9XHRzq4889uAxB/ay/9tMbl+msEb49aNT6Ger+WNnieB0sTlbzA1byPHs9UzxbLa2zQd97jvofkdPh2vz4+efPcMWi8X5wSaAF0ECuOc4X9D1c6HnPH8Brqvx8yHO9c4Z/D7f5ddN+AuFf+DLgAzFZ+38/1/8xV8cfYvF4tJhE8CLxWKxWJwTCKIKvgi0CCA5UDtMC8QJugjw6PO/tQQQHZ4FkgXjHMDxdNAWVBX8FNBxEA8O8HQU2KVTIEhwSZBIcMi1QKQgjoAROu3kCAAJMukr2CsoiYZtgu8CTwLz7Eq/Wjs6Muk3jsVisVhcOZD8lfzoDUBJFr6Kz+EDChLxLSVWtKkFeyQXtHujjK/xGS9ZknSCRnweP4WG/0DT27z8DV/Iz7mW+OEH8eVf+CvXki/kSqTk9yRz/BsFNtKrnW9S9PuSlQSURA+ZbFTze+ShobukqeLNVOMjk65kTTo0xs9vG8/sr6DjO80nXcaG1hjM7aTzs8zGyVb2kWssZE698czPePFJPJlf7dZIXTEWbbX7jDeb1PS03myWkLIv4lUr0HWfAx42QH30Ga9xd27QR4ciUWvtolcLPrYG9o/1w89m7ebH/jJH9k285OmP394M9SkzsRzw0KPod86Z/cm1Pv0vZ+ciIFs7XnzmciZw8erHb98YS0k7NIp+fWr9Sl/Us0ba2UwH+fQYH15IttL5Tx8bpw597gPrYN9mp8LOZDgfOhuiMw/uBfbWr1hPe0fikZ0lVieNfrz2KJnsMTZ97LFntHtTds5J/Yrx0mG8xkkWm9gWjT1qXP00c+NpTclT00EenfZWiVnzojbn5tt46LQXnKHt4WgUsthiLCWRew74bNzNNd32qvbO3ew1DnTa8OG3n9Xa2ArWWhtZ3f9s6H5Hy2ZjRssmthmL+WRHXwKQCPbZPWd89CrksAnUCnn2keed58xisTg/2ATwIvz+7//+4SM6W3l+u65M9Hm2o8cHav7Fr1LwE/kGPkU7f5F/QKs9PjKdCz772c8ebYvF4tJhE8CLxWKxWJwjSIo6DDuUC84KZgv4CM4IJjlECyIJ4jhMC9wI5KgFaQSyBHXwOHQ7VAvEkyMwK1COv2AY4HdgF+AR7KFHEAh9NrAJfW8u4e/tK3q9+cIGgTHf+KTbWLydIGBEvjYBLfrpEfwiY7FYLBbnH/41gTfT8gme5SCoU0LHtT6BHYmQEsGe/73lK6nB30geS1hI6PIJkhV8BX8niUCPvpIb/J9Ehmu1JAl96OjSBiV18fExfBPZgk5so5OfQ9NPKhuLIpmiPxnsUpPN36Hn/xQyjTOb0WTjpEdTsrq++hVvDfOrCr1kKOxPnsQ0+xV6+XnzqjaWKa+Cx9jNt/XBRwce/ebCvFk3184UCkjglMxFO+2euqIH89i5Auh3lkhH8uNBb27sEXStk2tt5pX+bEOLRzGfE/qiZYM6oLdXGrt5AzygHY26BFrymvN406tfG7n6S9ixObna7SljUqzT7MdTMUf2FPmNMbmK/SVZWp/iHjKv9om5Y4+zmD46pnzziV6/vmjcl9rZZ13tdTZMG9Eo7km07uNsoFNf42ytGqdiHNFI3rLlpptuOuRnS3aisa+0Oc+SA80l/Wq2Tjsai0KO+6Y9Tob5yxb86NrDkrWNRbEO9oFx0Of+MS+C2/qTg87zjA7PJPeYn5TWljx6rE1n32S5t9C3z9mBjp102it0tL7mvr1pnsmgAz9ae0Obe4ctbCDPPKDT7jzueWr/mz/X+nxGSx/7wD003/5lu/O/9Wt/nD4Hul/IffTRR4+/BRaLxfnBJoAX4f3vf//hbzy/+SrP71k844PnO18R9GsLaPmtm2+++fBDQDb/w0flI9DRxUcng1xn40996lMH32KxuHR4xf/8Ef8Jh07F4dIN6eC2WCwWi8XipYHgCQj2CLTw0YJFDtIKPy1whE5QyU9EO0gL0EjYzv97xrc7eAseOYyDYA45BZQE2QSgHcgFlugS2BFEEljX51pd8F4RcKLPgd43/ekjk342OtALFtHvM7n+QDAmgVT2ssPbPIvFYrE4v7jjjjuOny/1nJcw6Jku2OMZzwfxVf6WFNRRK+j5CokMXxbyzOdrvF3Gf+CXSOEv+LSSnnxMiQ80+PkfNPTwIYrP6PkyMvD0hi/fxEfiU+qXmGQbGrZpV+jRz//5NQv+SmKEfRI5xkKeQjefmr14yDUm/hk9oKu/Yr78QkdBMLbllyeNYizGYc6zf9oZrTmv4DG+5OJjazoUeoMAHZsbrzG0nuT5KVft1kSisgSrPrrB3LAFDTnGxkZ0SjwKHmNSs818No/JAmeFgA/0Kc4W7KsdjJlce2LyatNnz9IlMYUX8JPXXu0nxvEo2vAZjzm0DvVBfWr99tPk1Tf7T2XrK2mp3z7Wrp9Ns4/d5FsrZzJnLeco863YF9YcfwV/MsyxdWwM6SdDjd966J8y9FXIoD8acpwbJRidF9FMGY2DffQoZLinrAEawNc4rB87O8tma+NQJ2Paao7TYW9ay9M5RdOc2Ofksc/YffacwUNGNtunxugLK9rcL8Zqv2dz9PRYo+bEGCcdsJ0d7gn3JDo22ov4FHPsfmCbPWNO3VvWF9A0t+S79qzpeWCvSPY392r8ntl+TcFb1em0l4zfnLOrcUh4SwK71/rfwGQ779NvHEr3Mx1k6JdkMq7FYnF+sAngRfi93/u947nPZ3jud5byLOc/59nKNV/JL3jen6Lnv59/nv35Buf4aMhQ8298Fh+ibxPAi8Wlxyv+5wD7CYdYxc3oIOgwvVgsFovF4qVDQW4/cScIVmDHodyBXQ0FoR2eHc7RFKD1Bo8gkmvBI0EdgRyBG0Eu0F9AiRwHcgd1wS3XAmLOBf44EDgSNNPnwC7Q480JugXzyaJH0K1AGfsEgfzxgM55gwzBMsGk/vjYJPBisVicT0j+erPN810ywt+LAjuSaSUcPOO1C+x41pdoKXErycD34NHOh/BHfABfUSJETaZrNX14JCLo4RclkvOBZJPBX/WFI3aQo91ba3wV/8TvAJtKOipk6+Pr1MYgicMnVtKBXnKYfWQaE3o1W/g1vPSzpfmp9D+CjYNc/eQaTzTkkwkFyKJTosNTIdec47NOk17x+dQWn9Gymy30JBePuSCzPrDO/LVzAKDVpp+s1q92YwU8ziK1Z0OfK4BHwWP9Z1/BQ7W+EI8+xR5INxiH4nxkTicvWcmNF5LpbBWvdcZbn3b9Cnv1l9isPxrzMxPEinshfv3ZnT147V99ZGv3ViYbXMersA+tPYiXHO10KPZwOvTjR+9+sH5o3af9rHtjmDLsC2tsDu1/+8C9pDROMno7Nz3JYDc6MkreTh32HBqfyW+ck0bNXjLMJx0VSU485KDzbOlfppCBhgx05kE/PeSVMEWLRjFOtf1Hnz1srGiMgx5Fm9r86jOneMmLRmED3d1P5gAvHWyxxkp0/gbwrDFOsq2XMzqe6JzTJWj93eAXBwTh7UF8ztzxocWjnwzjAWOzZsZmTO058+LZ736W1DUW680WfSWCPR/iYYO3fx9//PFD9mKxOD/YBPAivPvd7z78iHOY5zfwBfyEtuAzoOED+IPOY4HPc5bzJdEJ5wWynM3xkqW4Rs83pf/v/u7vDh+yWCwuHX7mJ6DdeAK4DoqLxWKxWCxeWviGvsCVYJyAsGCQIJG3eyRjHaIFt/ltQZkO5oJaAk38uoCfwI5DOfoCZwJGBZ7QCdoIPglaOZQLJJYYdkB3iBccp0fgB5025wY62UZeAXz82tmu1scm/AWM8JBTIE1AarFYLBbnB37SzXPcs5vf8IUkyUbPbV8A4i8U8NznbyQ+0Hvm45Ew4Lv4H89+CRr/IoDPQavmqyQg+AE+ih/je/g7PoSvIZvfwa8Pj+StYJRCVolab8Bp48cUPLPfW618psIPksc2/fSjUdOtGDPaxpBc1+j4vWQLaHkb2Lj4TQWfOWPDlIvW/Km9mcdm80UeWdFU+Hw+lB8m17wml370k2/yp6eSrQq9XbNvjkVpjQvgmc/mvX7F59MaPRslpUK8oc94oj/lAW3WQe1ckT1461PMYX3JE1yU5HPewDsRn2LvQbzarB8+e0U/ebPPOtSfXVC/4v6wJ/RrJ0Op315yX5BvjZ2nJFLtTbZrszbPJV9Bm30KHucs66lfjcb5y/1mD7N7ysAzZWhjd4nV5rqfk4ZTGexNhmtj8byonw3WSD9ectF4bkTjHkhG4+zc6Np8ei6wzXPDeZlca9Hzxv7Xjt89g44d5hFf42GLuaALneeW+UGjAN3JYnPyzEt02qwjeWjSSzY9aMwZ+bfffvvR395xP+OFqfOaa6455kYQne3uS/xozS0Z5sJnX9RxbY90//VMRUeeeWVzCWM/DW1f6EPjfjZ39oEvNbBL0tg4nnrqqYPWvBuTZ/Kzzz572Or6G9/4xoW///u/Pz4vFovzhU0AL8JDDz10+Lj8a7488B8BDfBL/M/s42Pw873iVqfgT/gvvgqtwufRx8+oyeO3nn766Z9yLRaLS4GfSQCDP7I2AbxYLBaLxUsPARrBdgdlQUABXgdvgaKCog7UAo5qweOCRQ7YDuyCPq4drPUJ2JChTbBGsEgAXbAKraAS2YJwAu9kkC3g5hCvX5/DvCR0NTg/CNahJ0twScBIkFAAiVx9AlP4BKv8f0YBNXahF3BaLBaLxUsPb3/dcssthy+SeOB7+CLPb8EevgDyN/pc8wNoCu6UOJI0kJgt+cK38WuCUPwYn8RP8A2SHegkLwo48ROSD9olJrTzJ2zRVjIYSrRKdvTGrsIOYGNJ1opk0SwSG8ZsDHRHz4azePjI/CafahyKseGJzptzfCmfzCcKngFaNqORoGarfsU1ueww3sqUq7ChYs6Urqv738D8Mr9tLvnxaBTz6LO6a2CD+bCOzgXNd/0QDzlo8SiTxy+UWA/67Y9TxGMf2BPkzPbZF7SZS0WfvWEP0uMcYu6iYYt9l1x1vL15jFZ7iTyyrFGJS231V/SXtIy/ZGT8s58M/dbX/nJfmB9fumDDKT+a9Fe0R9O+sLfcn8ac7XSYF/eZL3aQrSSjgp8O1+ZuJkwV50l7R7AXP6Sj0j6myznWWkRjnshwPkTnuvWp33iMVRtd7hdzYv6aL7aQT64kptpY9EdjD9pj2p012XFKY+/SQRdb6LFG+ntOoTEedlifnjueM81L+pxj7Uu01hCdcWlHB+RpM0bPOPqaY3x40LPLWhof/c7J7iH8zV907mn63cvd1+wEMqyVOcJHD5ns94sK5pzN+ulnl/GQiY8se+K66647ZD/55JOHTnPqOegs757+7ne/e/AsFovzh00AL4IvIPnlCP5Q8Zw/PccF/cBndc6c4Df4peJBfCZfxb+g5V98JpMOn/GQl15+5ZFHHjn4F4vFpcEmgBeLxWKxOKcQdCvxyz8L9HRwFohxaHZ4duAusCMYLGgjMCMgI3jnUC7wjlfQU/BH4NNB3IFcsMZhXbAQDZ6Cw2ryBY/w6I9O4O173/ve8b8M8fgsiCWIhlZQSBCLHYJJgkzGQxb72YFG7Q8K4/R202KxWCxeWkj8luCRwJQY8VzncwSDPNs9uyULPPP5Cf6F3yjZU2CJX/Dcj0cSRwKCX0IrKcUXqAWL+DpytdFZAtXbanwPGr6LDMkPurSVlMGfL+Ev+RY26Kebv1HIL5iFnu3sIxMf3eijq6BXZ0s/v2ys6OnXP/kkfQt6oTFnbDpNDksgm3M+0lgU9GimPHor2aGWhPG3vKSSsZTEMjbjsn7mg7wZ4DPWgnPPPPPM4dOteQlb7Wq2ta58vnVyTrC+PltDZ4R4gms62eKM4kwA5EkuOePgUYD89lBJrHDaR0ZFm3kuGccmSavJVynxPMelHT+5Piv6tJk762GM1k4f4NGHxlq5D/BrT2ZnNTTxurZG7oPmxlxa5zmudCvWxDpO+dqts2Ke8UioQuMyHxXyu0/rT3795sQ+8gzQnww60NnjznP1kZMNxgFk+JKhPmgeum9rsz7JMdbk2Bfucz+DbM7IQc+OZFhje6k5RdM40LDBnLmfjJd8dKc0+OwVzz06FDa4D8hGq9/c+EUd/WRlDzo6XJOFzvxE17zYH9HgcS8763eGdh9aQ+tjXMZIlqJPbV2MBeq3b3oukD/P6Z4xxmyv2b9syS42ZacvkdqL5oN+8swTHWgVX0xxr/r/wOR5I1jS95/+6Z8u/q/wxWJxPrEJ4EXgT17/+tcfz3X+wXO/AtWgH/it/Fv9+hQ+wbkWnAf5Bi8kAL/ni5h4Ohfmh9R8DT/NjywWi0uHIwHsG6AO17AJ4MVisVgszgf4ZD+7JtAm2FPA1oFZoAlcC+44kDuAC+441PvsUI3XodobGBK1AlkFkASxyASBJgf2AqLJQiuQLJikFgwWHBbg9EeDfgd8QSc0nSfUgkoCUWQKEgskCYL7A4HdbCFDMLpAnkCYYPpisVgsXhp4M0xSxttdgjWe4QI0ntl8huc2/yA5xCfxRwL/nuuA3jU/whd55ksoCALxM2rJMD6FvOq+bMSv0EWHdv5GG1/GT9BJZold8vTzd75ohIbPjF8pKSsIxS/xOWzml9hZwpVM8qInyxuzbGQbXkXyQ42XLXwoG/DEZ+5K6GYPmmmPIvlsvvLnjUWfOrlkJleil19ll7nl742F7cZgDbKLjOzDmwxgm/k0NnNCBlptMBO6+NCr6SG/MU04u5BfYfscczrV7RHzow7tH7U1nmiu6sMXr73Z+tInARnQTN7OO5NXu3rKZYc282x9TvXWT6+5n/zskNwryaZItJkH+92+Q4fX3FtHc6qd3Ir2ucazn0x6jIcc9wl92aaOV229rLE5x4sm+xVr45677bbbLupQpgxzaz7IqD8Zkor6JR+dPelX6MKLRm0OsgM/GvKToaDxRm782aEPnX1Fhr2oX2GXubBPyVPomDTa7HnrRJ57ydo54+Jnu2Qo2enLdvxkeQagO31D2vx5tlhf9NafPe4F60Q+GmPBxxa0rt3P3QtkGB999DRXavZaZzajs6botHury/q5D82Be9gcmku01olNbEDjmehZbl/5CXLz0vmdHfiSzWY6ffmTXHbtz3YuFlcGNgG8CJ7tDz/88OEH+AnPdtfKKWrDA50l+6zmQ5xdwJlWLIdf41fQ8lf8j2vgh+jkW/gY/mX/fcBicWnxirv/37s/4Qb2xzJsAnixWCwWi/OBm2666Ujm3nnnnUeAyMG6Q7jDtaCsQ7fgjaCVg7T2Am4O13y8g72AkVrgxkEbfHYYF8QRwCIbPXlk6SMXHTvIEBgSBEPrYK8NHX6BL+cINNole/tjoACffn8EsI9eb6kIohV8dw2CSovFYrG4vPCMlvjhKzy3BW/UvtQjIeK57VktOew5LUkwE2kSFugL7mgjS9IjH6IugYRe4EfNN/EF+ufbvv42dc02CTj9fB2fw2eo6eGX9JPNl/lJaP6Kj2IneyR80BiXkl6F75MYYSPbBKbIRo9PoVsxB3jpwevtNzzsoY/Ppg/v1KEYk6SyOWK3ZFKyktc1OvaYI7VxkN0YzLVrNpGhGD8ds5i/5tM8Ka7pIMd4XEfHrgqfTb+xldDU5nyAlqzWX7FHShpPedGE6M0THus76cz/7Gs/1afos//iQ2PNzS0bO5fM/uRKkjnbhPqSa26tpaSchJh1Ildxjd61dZdgsw4+Wx9zic8Zhw4y24vm0RpnV3L043WvkWlc2apdv2KtSmaS5b7UhqZindiPhg7y7R389od90hgU82SNnevSxVZ7Xn8ytKuToT8b1XQYV/NAxqkdjYMMa1RyPFv0kcMmOrIDDX73gb1njukgwxzbl2rPG3RkNGaynFNbz+YsXekzn740UIKdzuxFY0/Tzy4yWodkmPfuMXaxg17ryT5yAZ35oM840Jk397F1MAb9+JoXe0abPjw+ewbbd+j0sYUe958vn3gry/zTwx5zjpY+NpDrbwbPPjYL4rPBlxTocI2XbFCTY97J2GTSYnHlYBPAi+A5/r73ve/wI4rneWe2U3TO8/znM/gGn0Gb4ouj2sEXhJyv+Dv+D/hHv+qRfL4Kn5p+vvlv//Zvj77FYnFp8Ir/8//8n+MnoDcBvFgsFovF+YLAkiCOwKIDsqCPg7PgkkCVQJcDuENzAXjX/HgJYcEeB2vBM7yCOgWHHMTJETwS0KLHHwAO8ALCarIKUgoQke8QL0jomhw6yESnpksfe/0BIMhWcEvASS0Aha+3lwTc8KBlr/HqWywWi8Xlw5vf/OYjYeDZ7m0wfsFzna8pgaKNf5FgkAjub0d+hC/ydyX/UtKmJIKaLLXnf2+18VclMenkewSn+AL+ji/kGyRdyObL6OFj2Fo/Pl88KrCkXzt/Rrb+aIyDvWrJDDxoyVLwsJFOBR97JHr5MX5L4UPxxqM2FjzoyTAG1/SW+OEH0WtPthotOjaZYzSncslEq8an4Dst5qnCbvPt3FDbpCnwVw3kNifNB7BfIQvwKKCd3fWBtrNqPNGbw84x2mqvzzrPPm2TL3q184ZiDs1ZcumLJ/76ILn4SvbZ83RPuWjsQ2vgLJRt+CQK7Sn3hv+hjS/byNRPhnl1f2hXsle/Wj+52VoffeT0Znlf+DuV0brF39mLzWjsBfd4iUGJP/T6FLLcMzMRqtjv5NDhvpcE1e5ekLj0bEiG0jgaJ9rmwX5Cg7exandf2j/2Oju6B50JjT870ZJlvo3BlyanHv3R2NNoBKnnvOg3N/YJOuPD37PLNV3a0bXPycgOfZ5rxkBPMhRfAOi+Mxfo6LJm6NTuM+uIjl5jNSc+e4bRyxbz65cZzIMvKJQEtw7WCg+6ZJsPY6YX2OPLBdaP3ea256C96tovBZmj5kOfBD16pTnB69+7sGmxWFwZ2ATwYuItb3nLxXNB54jOgRN9Vuezuq7wdeSAL1/yUWT1LzicN8Wg1ICfPn4FH//zla985ehbLBaXBpsAXiwWi8XinEHA0UFacEctqCTYIjBUYFZgxuFa8EVgVpBOX0E+QRsHbcE9cgSG+HeBIIdv7f18pza8aBzE0ZEjYKXfobxAkGC/4BR+wUPBLPaS2c/ACRAJquJjMz5/LHz/+98//hDAo12/4JYgoEASGWj1gTe8FovFYnF5cP/99x/P+L61LzmSDygRwVf47LldQpDf8MyXdJAU4S+0keOaz9Dv2Z98CeKSOX4qjlx+gv9R8z904vU2MKBBL9lBNx/BZ/BHbAP+kk8TUFIkytjLv7GFL6X/NKmKlkw1/RV+SSnhhod8fGSUWI63wja62GVezCXfPPWRrybfdTr4QDq0TbmnNinmRHHtbWFzYU7MtXFbB2PXby3NuzOB6+cr6LONfG1qKODnC17J0ieYp10tke+MAPrjUXyuHa0zg2LtooHZZ06Sr7g2v2fxadOnmG/9E3iTbV/iyybzZb2sm/HTO3VaR/3mVn+Jbvz02WNoyCHDmsefTfqtr72ULQqeeOtnIzmdv8inmw32Bvl4yZ/89pi9iU/Sz3mPLP1k4Jdg9G9GzAEZEL8aPxvYbF95JmjTr5DjDIrXWTU59SvG4X6jW6GXTe79aNhqLL2ZS6f27LTP6PU2a2NtrvWrrYV9bq3T1Vg9R9T2KBprSr4xWT/3FFoFjfk21+lAh0cfGvdEcjyv3AN0lNwFMthact3Y0BsHeT6jRUcWXuPTbvzd7z57zqAzbnTksaW31O0966DfXBo7Ha71mz/87CQXrXZja57tEWOQ7PXsEsCXQLYu+oyTTnNiTN/+9rc3+btYXGHYBPBiwpd9POf5BX6EP/Cc7zwUfJ5tnSc7N/A9fDp/D/yFMyDceuutR80f+SIiXqCHr+JT8t9f/vKXj77FYnFpsAngxWKxWCxeQgi+ORy/7nWvu3Dfffcd/5f3Na95zdEm8CNgI/jjkC04UzBWUMZhWrt+B23BGIG2ApYF1hyqBbmixY9G4EwgRzCR78erjR6HcW3OBa4FvfCiFdQtMEWH4L2gFJ0FoZwr/HGARk0P+drZjk8ASmEXmWTTbSxote3/A14sFotLD35HYsGz2bNecsEz2LVnuGeyZ3eJLwkF/oJvEcDRr80zvMSCa/0Sm3hdSyy4JpsOsgSO+A7+jW+iU6KEv+BrSpzi618U8DMSJfTiqZ88CeOSQNrJU5OpH51C9mnhHxVvwvGddCn4T3l9PuVnc7bRV+GL9cdrrIpkSzrYOe1KfrQK2/o1DkkexTpYA/NND30CcWThIQPoAP64BLE14+sleayPtW4OwOfTYnxkqScPNBZzX+KohLTP1nwmRRW0ePSVkE2OcSmzb/Ip9YX68Elymh86Q3yK5Jgx4KnPecf+NU/mBy9b0GhXyLYnnGG0Z+/st56dcdBoK9FoLsi2d+Od/e1V8+atWnLqV9NxKl+bfvefcZn7G2644WJiVtFvPtDYF/Z4/dNGsuj35Tx93lA9laFYd3TNQ3ZU7ENfRrQX2SPB6J4lIzl4zLtgdHbgZaP+6PDqR6tGo539ru19so3BuOw7z5l0dS9BSdvGQ5e6+4wczyR7wzyTp81ez2b7g83OudbRmpFjXRU05NNLX2f07mufzQ/a2uiz5n1Rw/wah7Ghc7+zmz7yPW/YQE602s0BWvf4Nddcc8yZ57A5pEefNs+pniFq++buu+8+xuL/jHte02tuPFclftEsFosrD5sAXkw4O73zne88fB4f4znPZ/nM1wXX2rvuvAf8DV+Ch88CsvgLvlOcKzhXo8M/5WvjR//mb/7mpy2LxeJSYBPAi8VisVhcZgi0v/3tb7/w+te//kj8Cl4J4Pj2vyCOtywEgQTFS5zefPPNR4DGoRocnAuC8eH9JKeEqaCSQ7h+9A7agkV4+sk2+jrACzzRS59gjwBWwVKH94LpAmCCafjZ2KEfP93emEAvEEc/OYKCAlLkq9EIbrHFmx7a0AhSmQd1f4T0B4k/UBaLxWJxaeB5LsFz7bXXHs9vz/8ST+CzZBcf4HntuVzwpuSLpIREgZqvKmnl70pvB0p4SQJ57vMhJYT4D8DHT/BhZHv+8310oeG7+Cz6+BntSglTiVR+A0q+1qeUYJnFTy0LOvFDeJWSHXwYOWTEy6ZqbzKYIwVP/phtk6eCRzEGQTBzZpx0GAcaPJNOopcf5RfNvXmlp2R2ZfKlC3+10pyaY/Oana0j8LXGrW0WIKO6xJZib0Sj1scGeqypsdE7eawhaKvduJQSifXXpz7tqz2Z0w59kmBKidr64kk+XjXgszYV86Q/vVOu81lnF/2K9niba7KzVbv9lezZLwHnXrQu9pTEqbk0x9kXv4Lf/tFHr9oesSetY/vZ2PVlX/rtG/uHDdnPjpKY1k3NnjkH0wZ91rhxZIt7ypmyvYu3BK9CBtl0GSM7ZjI8O4yHbjLJQuPaXLl3zVe2tN+1e+agpafxeAbhK4GaHn2eSdro025tJU7NJf3mOnvQA162O9fSM3UZj9q4yUZnPsjzpQRryl7zhNazED0652xrg659aw7NtVoxB4p/n0I+e42L7WSy15ygUbPRfeDvBXPg7w1zhRYfmWzHL8ErWcweY9TH7ieffPKwZbFYXJnYBPBigq907ucP+BzPe6Xz3lnonBX4uHypvyPAmYDf4Of4nuCX4Pi75JPF56mdd7761a8e7YvF4tLgFf9zCPyEg6UDMbhJNwG8WCwWi8WvDpK5t91224WHHnrowrve9a4jmSs4JxgkACRwphbcEYARJPPtf/5Z0NtBWkBI4MnBOTh0OzQL7hQgjEbAymFcMJ0eQR4HfQFJPOgEqwR6BHb8NLM2ffSh9TYY/WTjc0h3RvCHggA2+QXlCvSp0dFnjAJIAv7oBKScN9CQL7hlzPqMFU+BPkFN4/IHiXGhXywWi8WvFvyTLyNJfkgiCOB49oIgjeevxCD/4Jnvue4ZD3wQPvSSCHyAJIPnNT/El0g2uC7pi9c1n+d5z7fo98sP2sgnA60EaH6OnyRPuy8l5e8U13wIH8NnKPSQpyaHHXxMSRm+tkQQ2ewguzp+iV6BLLzGZ6z5s/izN934KmzFl25zWEKJjvQo5ptvbf7JN2bzo9CB7pRn1l1XzJ0azxzXpANzqNgPAa+fla7NZwWid4bQXx95yTbPyqRXrNVpn3lRzGd9yuSzTtFX41GTGdiB1zq1VvaXvTXtP9WpTXE2sgZqvPROm5JbP9349JEXv71EL/np06fgb+/bG85F5GhP96l8SK+C1/yzzx51PvOGJ7n6s0F/Y1O0K/aWe74vELLP/eRMyB409NFzOsZspNO9JMhrn7v32eCeICtd7GgemkPt7gl2OA+SkWxz4tyrr7G0r5wjnU/dR9p7DnhOuWecUclprjxv2I8/euMrudv9pZ09xkSve8XckKWQpZ9t3Tfmvbkiz3lZH1n6wJg9F/CjdW2ezIf9SKZ7Ex3drslRozMmfeQlxxyaZ+tvX4Pr3vBFa/yee3TgMW/eCLcW5sq6OWsbD/mu2dI4oev+9/lisbhysQngxSk829/97ndf9K98l+vOSs+FzlkVfP2/X33OhfwdfxSeeuqpw58lH58zjtpZ82tf+9pPKReLxaWA38D5RMlf2ATwYrFYLBa/Gvh/im9961sv3HvvvccfXYIxAnECMAJSgluCgw7Mgk0OxA7QgjL9BDQ6gSUBHu0OyjAP3gI0BdbQkuUzCMgJ2tClX+BHAMlbEw797PE5XkEhB3a2CGbRWRCJDQJSBcEVtNoKGM4AmICbgz9e+gWktAn0kStQRZ8x+OanQJsxC8jRwTZ0bGbrYrFYLH61eMMb3nA8p/kENX8CvrgjUONZ7JnOT+gvacJn8EOe0/yWhI9nPB8loeC5zTe4LpGBn2+RaEDnrV0ytPNZ8fTmowASOj7N27rpxC9hg14fHvbxEyVeSoDgYZuxsJU8tPgUepRTXteSPvjoyWY8+OlUGpNEL3+F15yZR/x48JPD5nj4SD+x6u9uc4qHrafJ4eRX46uYC0Ez/vW0vfqU57RfTa/i2pmDz+bfXYMvpM3EMMSj2B8TnU2UQBdac8Lv46lfXZJNnzl4oX3PJbN269DbkTNRrd/+QCNxVoIzPn14rYVzGtn6oL76J6/S+ci6o7XvyNTnPOasZB9Zc7+64guCwHYl2QoZne8U8sgmx34iA4/zU+Of/GjsXbxswNd5LRrrbV86m/kCCHvraxzakqHWjs+culfMsbNtZ090aJKRHdrRZ4N5aEzaQDtaY3FPoWc3vdbOW6+uFfL1K2SoPcvMi3vLc8waJUcB4y3hrCTH8wq95xX5ZOnTrs0cu6fQuj/MjWL8dLn/0dkTwG7A7x42VnNmPvA3ztaRLejU+tiJ11h6xupTmy/3vi96+uKoe4Pu5hQN29mg9qaXN3ub98ZgvNaXPGPTZy6bF2PDt1gsrmxsAnhxCn7gN3/zNy+eT/kh/qPz4lnghwGtazV/yZfwW9DfDIEP62eho+djwqc//enjy5qLxeLS4UgA/+/l/8KhdhPAi8VisVj8YhCEedOb3nTht37rt47AjKCcw7CgmJ+kK/DouiCyYItDsiCLt4PRCoAL2gjsCFKhE8ASAOpA3gHaQZ0OB2nX5HUAx0MPmXR7o1cwSTCUHPLIZquDuXZBIIEicgSD/FHAPsFBASZy8OkvMYyut3vpEFAVMGKPABPZ6NEYj8CUAJM3NVzTK4CLVikgKYgmuOZ8or1g2mKxWCx+Obz2ta89nv18BH/gORu8Yefb+nyMZzYfoHj+l/Dig/gCz2V+wrPc89qz2vOeXP5ETXZJUD9DymfQze/xBXwVmdr5D5/5gRI2+AWOyFLykRIn8UpikE9PPgqtWoLX37j8H3qFvWTwnXwkXjqMId5ZjJ99zQWZeOlubCWE+K/o4lP4dDzGxUY2m0/XU8cskrz5RvazWWLG3Fo3fpV/rq/k1/w/omSYTzJqt5b61HSzX2EDWN9Kvh/wGLNxKL1diy7EN4EnPus5aZKl33zUp5z2BX3JdM3G2sH5A5+5sj71Q7zRWP/kqPGU/LI26dWXTMVaO29pV+xl6+LsF409Za/Z79abTvsPrWvrNedDW7zWg3xt1pZsiUb702els9LkJ1thP93W1/VM8Oq3D+Nz7msO6lfbX2zER5Yko/FlAxn2jP1lHObn1A42OhPaQ5Kq7EVnHvCTg8d+19c8Z6NCH1vo1lchg27rSw4aY2GvfmPQbgz2AVnoPdfY2JjYbm/3fDD35gwNm/GSlT363XeuzSu97oOegdbNmBqbtfP/EPEYIzp2uFfJRsdevNdff/3FdnPCLvdtY2czsPOmm246dNtn6IwFTfa2DvRLnjuPWwt/b6AhyzjZRAY68w9s4QcWi8WVj00AL07BVzz44IOH/+ST+BC+Us338F2n0Hfazrfzhfyq/lP4shvfx493zqCbf3J++JM/+ZOfUi4Wi0uFTQAvFovFYvErwMMPP3zhgQceON6mEjgXePJWh4COYF3BLsEVAZ8OzpKlrh2A0TlAOwhrv+WWW46Aj+CMA7NAjkBkwVlwiMZbANK1oBQ9Ha4d4NmjLqhVoEsQiD34ydUHrgW3BY30e2NBctcBnlzj8pmMEsNkPPvssxeDxAJVbCJDUExQif2C/2jZlm7BJ380oC8gpR8PHQWx/VGiLBaLxeIXh1+ekCTxvC5Z5dnKV3km+yxp5JntWQ/8gue/BG0Js5JPEgX4+RCJrgI9+sjQLklBhj7+CPgm+hTtnvNk8wNq7XSwRfI0v0amYBVfRza5SrSu+Ss28lfo2UIfPrbhU9CzJ75ZtKt7W5fP4kfNG1npnrzxGItrby/zzWw3LrrZqT7lU/BJ2NKDzxjZ3JjxJh+/frboJ5NtCjpjTwZ6MCfOAPlq7fGwUYH52Re0egu4dnKshULe5J31LNHPhCskS60v+voq+gI7asdrfeFUpmL8EtWNIVvMjdr8JWv2NXf0atOHpsSas4s5tg/sD79sgla/s48+0A7J1x9NezP99Fh7/c6AzkHgjNjc1K/YE9mX3doFY+lvvdzz5IN+MtDYQ+yfc0AO26Kz38jwrGCra/2Vs2RMG+wfdjjX1q/gRUNHMtzzjYUM/HS6Rm8eXLsXzbnP7XWy2KqPLdp8McK6u7foUVx73tDhedUzD38Fr/vJm83mtLFaD/TRsduzzf8MJtO9YE3oLYlLljnD64uPkrQ929iLzj6bdPaM9WeXeTce7XhaZ+Nkoz5na/yS1WBvOpuzmQzjJo8+X1Z1xvdsuvPOO4/9w173CHrjeOSRR47Pi8Xi6sAmgBdngV8Rv+Jb+EZ+DfgKn/kk/vu5oB/y+2I2E9qfeOKJw2/x8cnLj9HxyU9+8qfUi8XiUmETwIvFYrFY/AK45557jp9p9r8T3/e+9x3BHolfgVTBJMEqP2VTYN2hWkAFBGQEaARrBP7iExAqEKudHMEZyVBtvuUvwC7Y1EHcodpB2jf6+XCfXQvG8+d0CBCRJxgkwOSgLdjjfw0KQCmCRvrJF8Qihy3GVTCtAL+gFBt8NhYQgGIru4ytYJw/AvCbj/6oKCjHFrKyT8BbH0g4+KPAZzpdo2eXMS0Wi8XiFwPf4u0vz1TJCeATfAnHs9jz1jM+f8F3ed5LohSwQcfnSBB43ksaeG6Tx5d49vMt3tQlgz/hk1zzB/j5F1920gd4BYcU/lNQin8gm28hTx99/Aob1fjJdc2PsJFfLNlaiSdaJduSk97epiWL3yKL3ebJvMSfjAp+c4UfHzv4MWPN3mjnNd2KL2XhaaxoJl9jzWZ9jWm2N2Z92sxafk5EAAD/9ElEQVSf0pmht3YntHe2mPTx8PfaXIO5qZgbqP+5ClryfIlgJo0V7fol0zpbQH2KBNZz9fUz0BPkOYPZw9YwXnzZbj+zR1+yasdr7axHPPqdS9xH5tcZTZukWkBn/Uu6oSefDIXcCvn2SHoVe97+w0O+NTXHaOIvsYffetqb+hV67UFrjM6enf3pUNyXbHUeO+2fZzZjzQb95ocedO5J970zr/b63UueG/rZaizs0K9oa25cJ4NMvJ4pPV+yx1mZDeiSpb3+7hXzRLexZ6t+YwH93shtnRVzZq3tY/SNz9jtIfNpvj2X1GD+9NNrT7CPXe4/dvjMBuM0fs8+cozP3nCNzmc86NLvTWB95qXzMRsBLXlsUUvwG4O/AfqVIbTsx9uz1ByQh19y2zPz6aefPmSwwf9h3HP2YnH1YRPAi7Pgy5X+XRnfxy/wEfwq36MAfxn4pwl92vDwI5078qHkPfPMMxf9JLrOMZ0nPvvZz/5U2mKxuFTYBPBisVgsFi8AgkQCKnffffeF3/iN3zg+C/TccccdxyHWgdZPqzn0unbYFQwUCNYuOCex6mAtsFKw1GcBcoEZwRtvLPkpuRlQF6jhmwWyyBUUhhloFdAqAYyPrYK+rgt6oiNfQEgbOwWlCmLqVzusO4wLwjrI08smerULTglo0ScgJniGTlALDVvIJN/4jZVO9NrIA4GmAk7ZTC55dHiro/lUkyuI5Y8RwdrFYrFYvDh4DnvjS+2Zzy/xP76I5Pnrme3ac5c/yM/E61ktUewZzE95Lnvu69OGh7/hG0oKeWaTI7gkaYJe4oOP4h+08YX8EwgKnSZcyVUKSCno+Qr281vspHvSG8fkUej1/4fx8k1qfod/48cUvoZdxsA2Jf3eBsYz+dlsbvCwPf5sOLVDkob/Y7s5pJ//ZvspPXtnmXLqN7/KpKutAnRUrBlMGnMYbUBrbqxv/hu0N2/OOPWRcVaBZE39+uicfZ2RAhrtZ/Vpq3/2kUkeGxXraj9ljz7F2qnrIwe9tS2hab9aG23OZfZywU3rri/dydCOl3z9p/L167NPSuBmr33hHNW+cs/UH019/fyvPWdPOS86s2mLpsTqHLd29rtnXNvHdLBPu7FmQ3bSgbcxOL+hwa/Qz1bPEl8GNOfZoOgjw1nWswaNe6t+94JnBx3OhOylhz36zIu97b7Klmy1n9w3xsE+dGjoR8c2n+0LPGjxkcVe50xjxG/u0LETjUSpZ4G5pQdPY0DLTtdk2h+urQuZ7lHzZOxk40Nn3vQZr3mnz/j00YOOXRLz+tx/xkW+ml34jc9n9J3VPRvZ4O8Q8+Ca/e3P5tO+6xyvLb7FYnH1YRPAi+eCZ794Ff+Wz+Yr+CN+hg/1mS/yWc3vqGvna7TxY/wV/+LsoSSLnHw9PXwpf/eFL3zhp5YsFotLhU0ALxaLxWJxBgRm7rvvvuMN37e//e1HwMUbU4IpAlp+wk2wryCRoJJAjYOvAJJATEFkh12HZQFf1yVk/VyyA3Df1NcncONg7LBc8rM3a+nSRy506Ba88XYwfvodqAUnXQvqkCm4BAI87CMb2CRgREdBLX2C42zvYM5+ttLNJod8wS1JWteCW8YswGbuBKbI0cc+c2V+1P4wECgugMY+Othtfo2R/QKu5PYWs/GxiQzj9rbPYrFYLF44JH89t/kHtYSEZzafw8cont+evXwCoFU8e/XxIZ7Jagkcz3bPcbUkAx9Bnmc8X8fn6fd2K58AtUn+CBTxZ/yUttrxezuNjXyXmh/iC4A/Yi9/oUav8FeKRC17400Ofrz0sMM40qcU8PITqfQpxqvmp/L9+KvjNf5sSE6l///LL9NPb2NIr/mZpaCaQsZp+6xnP1Trq/Cd1lJtbcAcG5/ED7/bukP0ldnns/FU9KF/Pkz95jH4rJCjf/7ktHHMvmmjMYH20z7QZr4V8y0gmZ31kau2j+wR+9i5yH7U7kyiHY/2bNVnPyj2u77sqB+vYo07Dyr2knMlm5JhH1kD9yVabfGT37/JaB60J8dZCtBY11P7FGMgFy++krf1t3+dx3q7Vns2OMv1zIjG/nVP6WdDevslmexEo9CBxniiQa+PTYp97Bd2Whdt1sS6uU/Z1H2WTSXk0bKjsaDvWWYN9PlsbGjpcJ5HZ+7tOffhlMVeZ1vrG53xk8lW9w6go9d4PRfJYV/nb/PGJmPGj5YMtfF5FpGBTpux29/NIf3uBTLNu3522UPsRcNm94C/V+j3zCHLuPSbO/ZkM3ryyTXW/X+/i8XVi00AL54LzlzOZF5G4Ds6q/BB/AN/43PnzM5ePoPP+St+hR/r3IyX7wQ+SJsa+CdvB//jP/7j8XmxWFw6bAJ4sVgsFouf4nWve93x/8ne+ta3XnjXu951BLcchCUyHWgF5wRR1AIpgk4C1wIwAkqCcIJvAjF8KTqHX7wCMn7KzeFY8EUAiQwHXwE5B2j0gjoFgQVjJFkFjKrxCPagIdcB2/9xBJ8LvpUAdlhH7zPgJ8chn062OpQrAqPGwgZ9Eq+Cg+iNTSDJoZ3t5sRY+slDushAK5CFzxzQwyb2k+1n5oxLIsDhn/3OHop5MZ+CdBLQ7CBPIK1xm096m7f9mbrFYrF4YeDf+CjPVL7Ms5a/8Oz1rOaDPFs9Y0EiKT+iSFB4jqPznPdM9owWOMLHT3hue46T7xqtfj6FDjr5ipLB/AO5+Q/P9Hyb5z45nvV04Y2fD0Q/fyqaPyu4xM/wK3wfGyp42KpI8OKVJC5IpVboNi/04auOl/5qxfiMRz2v2UculPQ+5YvevM+iTSHDfPC9vVUYjeStMUioaQftZIZoFfMNdDdOc0tPZwjrFGqvlFwEtbb2x+T7ecBrX1ijs+S51jehTR8afcYTsk8psQ14zL9S4qxzCX70+uyX3kB37tOvmB/7qrOSPVFfvBVr1Pwo9iL+5tivvVj7kp949NHtviJPMlKNv/5J515Q2wvOqOT6rNjb7MOvmKv0s0W/dXc2mwnexgf0lpidNijGZw7Nz9Sjr3vOvjNP7Gxf0KEPjf3k+eCLKPUr9KMhi532NFtLfLuP9aEjpzn2awVszE7PAeuLhh73m7O6drI8o8jXnxz3FBu9aasmS5/xuu9d41f0k8c2a2Cc9hHbzYVniT50nl10oXPv60+nMaNTk2HOjNn+1Ea3ui/jeDPL/WLezaFr/cbasy8+e8hzAQ/Z+o3RuuElT00/+/Cw/xvf+MaxBxaLxdWJTQAvng9eCnjggQcOv8JH8GOKa/7FtTMMH6ZN4UN87jzGzzkHaOtzZ3ZtfA2fBNrIkPz93ve+d7QtFotLh00ALxaLxeJlC8Fw36i/6667Ljz00EPH2778oOC4w6tAjIOqw6mAT2+0CvA5APOX+AWLHGIFeQR8BHgEmQSOyHA4hoKZgs6CMgI25AgMOTgL2JDRmwN0aSejb+UL7OATsHGoBgd11wWV2OMQX+JaUFPAR0FbwFAwzDVb2OFg75DewZ8e9qSTfPRoBZOMh53f//73D3kCmMavTZCQfjr6qU3zJohnTOYT0NMhmUAG29EK+JFhHcyzIB/7zA2a1qLE9GKxWCyeGwJ//JUv7XimSox4jnume77zW57HM3Aj2eB5W+GLPPc91z3D+Qr/w9dzGC2/g9/zuf/9C577fAvfV3AIPRu04wN9JYPZVdBI8ZznYwSO+Aq07GYT+fj4XP4WnzpeCVS8kin8i0SRko/mA9V8G974fU4Gm6srfJBfzzCf2cUmushno3HmN+NJjmv/i998Thn4FdfZiR/w6qMnn82n0mVtXCtkg+uJ9GcPREOGM0Rv0U5e7faFpNrUE49S4j++yR89/eTgU+yh9hvMvpnIxUvH5Ktd0VZ/fWQlb665NaldbS6bT/1TLxrtinWyf4w/XveFvmTbS/rYos25y7kFn/Vlqy8DRkNv+8b9wLb60k2OfuuGv3tv9qvdz35RZvLH6/5uT0jwNv5sQGcPout/ARtb/e4d43ZtLOnXj067cdiP5kmCuT7j7suE7FTIMFfp8Nm8G6Pr7PO80q+wsXMsfXS599GzPR3s0GYs6NFZA7Tk4NXnPgruB/10sLc3jicdGvNrnvuyC3o0SjbTaY3RsdX975yLHg3bQJsvcLLd88vY6fbZXkLHXvcpmcbnizOe443FFzfZwg7zap3YTg+bnP/9QhD5nqGey3itj3lgIz3f+ta3DpsWi8XVi00AL54PfAI/58tC/Bb/zJd0xuJjOkdA5+zOkvrIQAvxkFlf/hIf38wHffGLXzzOnYvF4tJiE8CLxWKxuOohICnY4dvw999///F/e719IPHrZ9IEhhxCJR0F1gRHBFsEwRxaBZN87uAqWOLAKinJZ+JFR4+DcsFkQWcHWnK1C+gI2ODXT57DcwlgcCDmi0NBYj9lKcCDj10CPoJlBToD+WSRnRz2sxUEg/QJJPXZIV+y1biNg30CU9rZ4zp6AUryFTIFq+g0B4p5Mg9sNkdkGLdSoIwsNkgS+OOit1j0G5M+7QJZ5s042GeuBLvUAphsNdfkCdCxZbFYLBY/C0kUCQn+RIKJ31B7dnvue3brK+nh+Stw4xmu37PZc9Z1X+7pmn/zNhpfwS9pc+1Z7rnued3bvvj4MPokTunIJr4FrS8MCRgVJKKbnfj4EXrJ9fxHr1borJDpC1Xk4OVj8cwksZLvnryKNjIq/UQy/8YvsYvcfDk/lF3kqpOdLONX850SMcbFbxk3uyY/3vinHVN24yTPPPKV1sH8s5NsvtsaWgt0MOsCe31OjhrPhDZjVs8zCmiffWRVoHoCrVKyGdClX+kN4ck/+Uo2Tz524GO/ttrbR+ZPorrx6bff7RXF/jjlnf3WuyRt+shGQ7a1LIEoWWhdnWkUdGhOk6faydbnDfjZr697wXpb9/oBDV50ij15VhLavefaftPf2E757TF7xxg7+3ZGjiYZ8RqvtZg09p4975lgDtCag3QZq9KZ0bxqN0567X3y6DEmZ2120E0GWdbResWfPc2XNjX9irkmm23m0L4hx5yy1Wf3S3rQ0Q/ksEORUG0cybPuaMwrO4zLGdn9SLexmFfjYBc55t/zxD6pnR2KJDo72KygQ+/vGHuELH+vGL97wZ41luCaHfrR+6IOevxs9sUTbV//+tePpPBisbj6sQngxc+DL/V7C9iZoXOGmm/j96AzGT/ZOVe/M0pxIX4sv5+P5ff4ST4ND34///z5z3/+kLdYLC4tNgG8WCwWi6sOfJkEr4D3vffee/wvMT/vLHHqQHvbbbcdQZ2+RX/DDTccQWwBF4dbh1RBE4dTn9E4vKJxCBbIURecEUQR7JFMFjxzrV8ADX+HaP/zV6BK8EXgS4DQ4VfgiE40DtUFnMBnhVxBHnTkqtkX8CoO4B3SzYNrQSoy9Wunmx1ksB+da+NgW8leQXvXBQIF2wSdBLQEm8yXIJoDfm8hOPw7R5CfXHT4zQObBbL0q9H4Y0Gtj/wSxGxWs9vc+8OB7QJ0iqAbHkEtevWTtVgsFov/hWc1/+fZ6Ysz/I6aj+Nb+LcSifyFZzq/kB8pweI5ToaaP/Ls5etc8wk+e957bvNr3vYlg2w+RBvaksH8hYQHf4pXEiL/px09felSK3yxwl+xjx+hn42+KFUShk788arxsScZrvusZjNfxQ/xJa75N3PEHnZlG55pU7LSU5uxGpeEN19sbrPr1LZTPiW5ldqTWxsZbMtW7Z0LSqSG1loduu4cUYI0JEvh7813POjrs3/YA1M++FyJ3vxOXbNP0TflpEvdmFwDWeZXbf21R6/dfrF/zRO5s18fGvvd+OZ86bN2amvXeWryOqs4ozn/3HjjjT+TpG3d1fjZpk9hK/5kq7sHXdvfMwHL/vmWr0L3tM8eKNno3GtM6cDvrNTZbdpvTtT2jrHRO8+O6eiedQZzH6PJDvdM96BEaTrU9BqPsyD7tLPPWNPR/ee+sG+NpyQymWwoGavNudQZcM6XZ5z9mT2N13OCbusfnXby2Kc4f5ojNd2eceiBPHNpnvCjo8dc+Oy+I8tYtLkP6KPHHHVvk4mWXdrQkWn/+DUEb1/RaX6N2VyYE/qN198axurLMn1R1Vobq/F4duFjCz76u4+eeOKJY16doR977LFjXIvF4uWBTQAvXgj4M36Gv+NL+Bt+LWjzme/iN9X8Ex/D7/DT0blWo+OjyOIH+VefP/nJT+6XkBaLy4RNAC8Wi8XiqgD/5c3e1772tRfuu+++42fVBI38zLPAiyCNQ6mfMxZ86dAqaCSQjMZB14FUcIQvdJB1CEaPX0CmwA4IxAi2OMwK5pDpmpwCsHQI4tAhiIPeYVi7wJADM33ayMUjUIyHziAgWTAHLTvpKVjqcK2A2rgKyrFJkhmdImhGluAVOejoJZeNBYoczNnh7TF8ZBbAEqgylwJYAkkCgoJOxqS/AJzaXBgb27XpNw9gnsnwDVA2mWv26UcvUEqua7b6I0FQkX1o8LLLmK2P+dwk8GKxWPwvfJPfc90z03M6H+S5zTdIdPATkH/wTNXm1xl85gc8n/kcz3C+wzPd85pPyD96RvN12vgCfoIfUcj07OdvJDn4Vu35JfxkslU/ffwDP57f8eZs/g298fC7fIyx8F1sJANPfIprvpVPSjceviX7OgeQZyzkJ4fMfPSUTX86FElkMs0NPcZhLvlgMibtlHNWmTqsX3XXlWhdBzSdC8zvTGieAm1162HtzGftUJ9izbPDZ7940l4xdnNg7JJu9sup/uSYG3sye7NBu8+dFSBdlZK8oMbT3sUXHdSnWAtrPWXZCwpb7S1j19781W9vOI/VX1/nH/eWsc9+OrXH31u+bEJrziTy0mHfaZPYRKctfsW5h/3kkq9Pm3vFnGvD2/ygmzLYED96+xSv+8BnsJ+yMRnOV2iskV+P8eVKutAo9Sud+cyl/WBu6HUP0IHWtT2Dlw3GLOmMFw055tXcoGE/OnNrzxsLGnw+s9f96951nxs/W82Nde3n7/GjYQ89jdlYnGvZVbK4cRiDsWj32RdIzZHP5Nmb9DS+xojGM5Y9nrlozSd5xkoWGWj93WJvOueab2sIxscO4wV/M/jiqjZJYGP3JUmf6WCDcZOJhw3sMQfm0Zx95zvfOWQtFouXDzYBvHghcAaXAOYD+Q/+iO/ofAY+d+7i//gaNT/N96CF/BYZ+vA74/BTzst/9md/dvQvFotLj00ALxaLxeKKheC0pO+DDz544e677z7+h6+AioCRg6VgTwFYQRXBHkEYgRJBIgHO22+//QjOCKjgc2gVkHI4FXxxUHX4Reugi65glsOtgJMDLXm10Y0HBGQEW9gi0MMOOgANnYI72tEqZNErQEenIjj33e9+96h720EwKqCpxuvAXWDNteAsXn0C9MbsWvDI22BPP/30EdCl35w5C7BDYElASR85xgYO+d5CIV8xX3jNL1p95JQAoA+PfgEwNTvMnX7rxnZ9gmHmufkRqCMLHdn66DJv/X9i55f68ZjrxWKxeLmCP/JlKD/5yV9IovAJPccV/sqzUz/wCXyR56dnsS85ofOc9gzPb+r3XJ4JF37Cc1ntWZ8Po1PBB/Rrp5euGQjCww9oL6lJF/1ADlp69Kvx53/QK/wdn5H/56MkZ/jMbGIzWer4p4wKPYoxKa77SWg+h3w28kPNn7OGeVHIju+5SrJnoYMf7lxRu+s+q6G2U2hrTq0lWaeYMgCt+cJnfSH52hTz4hxUksu1fvPgXGbMaLSRp/Dbp0ng9Ngb9U0e/fr49dmXHcZkn7oGNR77uT5IlnbFuswkrn7t9pnanphv6Sr6FEk0/PaRsTuHuLe0xe/6lB+tfrJdmx/3ijMMO/SZT/uUPdrjZbv7RkmGPWz/OUuZ85nMtI87PyYjXrrRouk8zFb92WjfkW1c6VfcT9YpW6w3XmPW3xi04QftzoNoyEKTLjbYx/QZw7Sjkp1gH7i/0HTvKeQ6Y9uH5tSYzFE0zSlZJdrZo1+f9VKTY7+VBGaXe4CN7lNrTS46eiSBs89825/NP35j12et28OS/xK97hE69aNrDxoz/WTRYbz0mXt2st3nvhQqSE+O54U5INccazOfdJNJnpoue5jsxWLx8sImgBcvFM5QfIrYEf+Rn4c+80XafD7tU/NrCj+kPxp+1PVnPvOZ44udi8Xi8mATwIvFYrG44uBA+ta3vvX4Q4bfEqARUAGBF8lEh0uJRIEuwRvBkJKMAjUCOQJogiWCQAIrAkMOqZKiAqX4HYAFk7QL0gm44CkwW5BJsEegxoEWLZ0OxoI1vs2PFl30aNjCdnwCaeQKMrkWWOrgrJCnsKnDNZu16YfqDtnGUdCJ3QV/u8YrIMQm9kjIss08sdvcAT1sErgS9DZ2cyGIpKarpDLbBNC0mXO8xiV4hVY/GQJUdNNl7ugtKW0dmkttbPFZoAwPu8kV3DIfeBuH8RoLffQvFovFyw38on+D0HPec9SzUZLVF4j4yXwHH+QZ61ofv+g53HPVs5Y8CQfPcs9V8jyT+YSSmJ7FavLweB77uWcgj1w+RSGHDRK1fAG/gA8Pf1ACRcKWHjye69rV9Eh4sIWdfIpr4yAPX4XM5Kkr5MzSOJR+hUOZ/ooONV/GLrLZps5/K8k0x1Nun6v9ZDX5iuQVP0u+MeEH/jTfCuqu4fRz0EaPuVesL7vrO0tGJZ4Ss9qy29w13mzUD+mYsrQp5KU/1M7XTz7IhvjoRl9fwUj7BtKjraIvGWB/KOw/7SfP/Dvv6bNfp0z91qY3Zdnj/EIGGv3Jl1zrZ5rjs77uL3uIHuN0bk2/PaaQzT7nVO10k1+fvaAGtkSXbnTOZs55zkj4tTsToXcfsAH8j9r0o8sGBR073cvug744aTz6ybT3zZPPxke+/dEeZof5yY50sE0xB87H3iQmtzks0WzPoWOfX36Zc8w2Z0Kyjcde9W9fzEey2MAuNrpGZ83YhzcdZNHJHrp8WYGd1gu/Zxs9+tFaD3TuAbLNlXOxvxesqX52ty/wmUN99jKZ5sbziRw20KfNFzL78gJ+z13J8exkO3nNpXH4ko/nkUC6Z+5dd911rJUxG1dzwYZvfvObm/xdLF6m2ATw4oWCP+L3gC/snADOP67zhXySawX4aO0KWmdF/kq/az7zW9/61oWvfvWrh69cLBaXB5sAXiwWi8UVA8GTd7zjHcdbTYJgAkuCJQIfAhuCO4JPAh36BWYcSgVCHDYdZgXEHTYdagVfBE8EYxTBLIEXQR3fwHdoLcBCXgEVdPrQFtCjV38HYcGaDsYCMII5ePEVMPOzx/GjwY+XfQX1OkxL5tIhYOzQbdwCQcYVOnx3KKePX3dNX0HUrgs6FdhybWw+Cxj5CSDzSpfP+gtyN39sNw/sKBjnWpCt4H7BvJLmxuGPA7KMoaCccZvnbPe5ebE+aNnvWj/o1ya5IYhObnNcUmCxWCxeTnjNa15zPF89uz0DPRN9scYz2DPWZ/6Fr/Cs9EzlT/lICYT8j6RPvtGzVmLKsxyd57hrfkBCgU/J10qg8Cn8QclC/oRMz2jPcj4FP/+tnzyBoYpktWd4vh2f575rfoW9dOOlQ6Eb76kshd+qrvCr5Bmb5Aq55sQ4FPOQDrLNg+uzZJpTpevZxhdmO118m/GY+2Sy33WyzR+YM8kta1eyuC+n9QUvtBLi5so6AL3An5KhWBdy0Bmv2tpZByBHiUdNXu2Nx3XoGq0y5YH+qZ/NyUJvvvUZT19S01d7ffHFY/+o7TF1cF3Rhz451sCZq37zby2cXSRs7cX66LTHrJM1k4SzRmQo9ljy04G2Yn7tGfNNNlp89phif2U7XnV97i97RT9Z7kF22yfaFfuL/Y1t8uOxl+g3Nvexvaxdv/uPDmNKf2c8dls/toI+Ccb0pEM/G8DamDvzQA75k45+uvWZb+PxHIrGnjY2c4WWLHwV9wMa898eNzaFvGxBY5zG0Lkdr35zpTZutjhzoqfTOttb+JpLX8K0X0uao7MeEJ370D7wbJOgtn/Zx3791smY8bKFXM+TnhlssQ/QpdfckCOZ6+8Q43H/k2MMzSOojcl8e5ZJ7pgbX2DpeWB+PX/+9V//9UgmLxaLly82Abx4MfD3AP/2tre97fBZ/A8/D/PsB3yYazQKf8ZH8dt48+V8Pfzpn/7p4RvjXywWlx6bAF4sFovFuYcglcSvnzpz7ZApeOPQKPgjWFJwTuBFQEYQ0mFTvyAPehAQB4Ed/QIs4EBasA+/gItAis/6fBaY8qarIAwa+vEICgmyCOR0AHYwdvjFL1gkiEOfPrwCigJLDsOCNA7Z9DkkOzCj71BMlkCTIBT5bC/AxDY6yIkWH91sYh+gKcjlmvzaBaSAXrb5jJ+9BanYRh4bBGedFSQU8JAx2wU7BaToKgiZDPYKRuFDYw60mQNr8dhjj10MsgEdbFFbS3SCjYJk5Dff/kARRLMHjMFaGC/91m6xWCxeDrjnnnsuJnw8Wz0DPdM9dz0fPT8lIyS40Hkue5bmrzyb+RfX+MnxvOVHPVv5BM9ocvmtPnuTjxygr2e/pIN2NJ7LCjv0kSfRy9fwkwq9nulsIId+NXr2KPj4vFnYcVp8YUoiiFz+hg/hJ/gyvoOOzg3sIpssuqbMU/l8XbVki/nhm8zRPHeozSf7G7OanimvQl6yzZlr88g+BR+YHzYZF33GZk7A2ccaQ+sB+ptTMunojOFM4RyFPp58a0lbttQXH2SnNgWfeMKUVXt9E9rsPTTxweSJLx3oFXPLPnu4vvr12XP2k3mSQLWXyNJm3iTZ/C/VzknpwmuPmCNnGl8+ZJd2vIp5tpfoji+51h3Y0huhSrxkW4MSuI1Hu+JcRI55tS7sxl+/4kxlXMbILvydK+0zfXiceU/HVWEn2Wr3iTnCS7di/Pac+Zk2ojf29ioZZDcW+xF/+9L9oXaO7Rde5v1obvH6dZ/06DcGusnSb+6c9elS0kGG+4E9eD3f3Gf04teORiFHctf6mVvr4N61FoAG9DuTqu296Nw/ZLi/2cOOW2655eJbwujotW88U8izDs2na89eMtnV3xbkGC+gtW7+3rCexqIfn8/G7DNZarrJpuvZZ5899kH7Rdv3vve9Q+5isXh5YxPAixcDvkZ57Wtfe5x1gG/hAxWohvr4RtfgDOFaW2fYv/7rv77w6KOP/gzvYrG49HhRCWB//N14443Hwd3h+7QIrrux+6NnsVgsFotfFv6/7/33338ESPggwRz+yGFSEEnQRwBQUIb/EpQR/AG+SXBEQEYfPyXYIujj0OkgSpaAkeAJGdELzAgIOfgKiAlQaZfc5Of0axOgIRedQHDy2EeW4JEAoH60AkR4BYYExukV0BEEJ8dnthnbBLsLIAu0oyO/txT0sUehWwDIWxuuFTYJGGon+8c//vERbOot395OMJ9kuRY8oo8uAb4O7+wnk03Gw95+5hrINV76BBTxs0sgin7FeppP18Yg+EiGuaLbOtIhQEuHz2QIipHX2lpX/ew2pwJv6UBnbtkloFZgb7FYLK5GeH5KaHlue/Z5NnoOejZ6nnvWetb7n++ezXwZP+RZjRaN5zwfINjDZ0nceH7yT3yiPs9qz1z+Di8dfLM+PoJ/8bzOl2lT8JHvWUymZzw+9qErOUk2vXxqPuS08ENqb7rxD579ar4nHew1juxWyGdLOhRyfFam7K4Vvohv41vINld8VXLZrzb3ZNLhOpnpSPZZ9SzakuVz/r12uvQlX1/+voQuJCs5s0A8JaTSY+2MT83Pml/jLrnVWQrQA1ooaQzZpU+xz6KvLz1sqA+06ePr7ROfs7d2tb7a67M+9i95fupYu/HZG/aIvV+drWj0d14wZ9ddd93FPoXc7hP71njoU7SZH/LItkZs09dYtON3NqTb2Uq7feUes2/oVqwt+XgnPz1ktJfp7P7UrxgD/fFnAz591o4O8Kxgh/7mRzF+OkrwKu4x66SPLMlH92i89LhP3Hdous89m9gSjbEniy620sOm3t7F31xYR/vefUYGXmdX/Oap5Ck76NNufdicDDT62Cy5q2Yn3eaQbECLzn1jnjzjrCP91smc04OOje6F5JHRfe4+ITs645LkZpOztzkxZrr1G5c+n8n0VrHnM7vZYS0kgclU2NL9Qof5MLcV62Oe3I+LxWKxCeDFiwX/LJ6k8IGdBfhAUPNDimtFf214+CPt/BZ5f/zHf3zwLhaLy4sXlQAuSOD/PSndzL5V6IDpjw4HUXSLxWKxWPyyeO9733sEVSSABbcERQRYBFYENQqKOGC6FlwSRBHskCBEL2CiCMhoFwjk5wSGtJMjEEOu4Di/Jhkr+KSff+Pv6BR40U4XnYJDwCc61LLVoRePWkCGTNfkS17SJ7AkKCZIKODtMMy/CjDRA37OjS6gi260DuAC0YKHd9xxx8HL97KVPWTTX8CzA7p5UYC9+snFj5f/R+stMbaaV3YXyKJbcEpAlT5BM3NoHGwpCIlPcpcuc2kOzLd2YBtZ+Og0bkEr8gr+mUdBfeMpiY2e3CeffPKwm03sKeCppt/aNBa69KHFS4exLxaLxdUGz3RfkvJc9gyU1CmByj+VfOm5+/+xd7e72lVX/cfvkzG2PJdAgVQsjbUa06gxHoOnZOIRmPii79QXVgJUCoUUbh4KbU/mn8/SL5n/HdBunu4b9m8kM2utOcfzGHPOtce8rmu7WqOt85r113ppr4XrsMLfdPYHa6d90tprbbcfWfM9o4NDlrXaeqvPuIbOeqx1wEtHOtlb8bPv2But4WTSy9WYwxeHIfYd+4B+6zxdyetw1zX++CbfHoaP/UTz7ErPrvYb+569CG97h2v39MGbnK54oyerfbJrsuJ/9t2877l7jV/b50DPGtzGTvxw+ZlfxOzEA93XjAP4YmqfPvv5TawcavEr2QA+OXIoXNf6k38CHxoTS3SALLiNyUs66MNPf82Yd6b41+9ZvuoDrvJcrsgR+nu3agx+B22uxtOVzPo17zbeScg9aeWExid803uO9wz8TnofnEeXvtF7v6Mjn8rxvqkbb6281k+vxtFp+cg7k7HkwCEHvXce9vOF+WVdkLPmEb3F2HyNVoOrwaMb+3u/k/90IQOOKz7sNkfD0U8GPc0L85stcK0tfQgwWfxAh1NGdmrNYTbTyTpFpjkr/sbwxgMOWemYbABHLMWGz8WNf8xn8906IKbu8cZXrPQnxxgb+Jc8uurnJ/w6SKYHPLqSKyZ0f+yxxy596AAXD/6lFzvxR2vcms5+66v3Yfjs8mEaeUtv9vElenoAcSTbO/NgMBiAHQAPPg/Yg7wneJ8B9hdgv7JXeZezB9mPelfRF7S3oXv55Zfvvf322/8zMhgMvk641QHwTfCy7IXei6wXcIvCDoAHg8Fg8GXAT3/603vf//73r73G/uLlU1HIC6SCtpdJL5r2IN/Q8KLpW0eKWxV30CqSKKwowHghdTCseAJXQca+h7dCFxwFGsUUfBWoKrzg68XVASh+FZPI1d+3fDUHzQ5+FWq8CGtkATop7tARH3J/8YtfXLLo5xnQQ0GWHgpifUMDnQNgRR4v2/DYb+/mGzy8jHeADF8D3dOnAqx7fqyASk986Im34r2iFllw2Wec7oqObCcTDzpVFOPD4qWfH317Gj/+pxtavOOBJzlwKwLSBw6/481W7xx0EUtFOTTeR9jg229iA0dMjPMdfPwU08iuQFdcBoPB4JsM/je+tdLaB+xBDjCsf9ZIYG9wAGe9tV5ae63zmjXRvmDfsQ9Z6+1L1lPrqHs49lQ89ePrZ6Dtg9bV1nwNH/32LXh4asZc7avk23fwQksn+ttPrOlwPdOrMbw6gE2W5hDXHm6/qZF/Xu2L9iZ7jbW/PQ1feiaDfvhr7umSPp/G+6ac/2vcT1PzYWMV0Fxv3n9Wu4mvAT7VOgDWHLD7G10c5chJ33uB5j1CzvhbXnPvHeSkAfIHvveEZGj6NfKNBXAb09Ituems9W7SmBg1hk5f4/V75zKGd/1yWmzlVh94MJZ+3kPgiPen0daMsyVZ8fWeIifQiKXciT/e8Zdn0aPhU3nKv3KRT/vpY7Tsjd57LPrmamPeIelMD3NB7qKNXr9mrpDJ1+Zn75DG0k+Om9fxd6WXdzC05Msb30Y1Vkt/uQGHbO9k8cE/evOGnXC8v9MDnnF8OvA0N6wD+b4PG6KFhx4dW+Rqaw57jOMnJs1f74x8554efM9W8vCCi0d45Gue8U13fJNJd7oCePSDhxc95Jm4eWf9oz/6oyt/+Mk4uWxB198xcpO/5BCdjOHR3wnG5bDCu3d/661/h0NfNtMJPv78hUazxvmXKoPBYBDsAHjwecA7lr3Mh5Bc7VX2MnstcO99w15r77YH2UuBe+P2Te/g//7v/74P4w8GDwh2ADwYDAaDhw6efvrpqymSVeRQgFK8UShT2FAEURDxUtk3SiqGKKR40YzWXqUgo1DlpVO/4pSXVff4KOB4wVVoce/FVeEFKKT/7ne/u+gVeBTCKpKDijT2QnpoCjo1/OCTRxd48N0rDCnmsEdhC6CBo7CHtwITcM8uPnjttdeuohQ96cwn9mA8FLiM0SMazT2Aw27PZLEDjecKaJ694CtC8TufotPnBR8/MUkG2/oDgK/5na/oBUdhUIHePXr94tv/A1Zcgw/Y5A8IcsUJr4qGnuGKBzn+GGE/GnF0wKzwRhe68iF8Noof/eApbjrAdqhBN/bKo8FgMPimgf/P5cDC3mC9tFZa/xxe2H+su4H11loOxzoL39poT/A3nWfrIRrFGmupMWupNdd6ar8C1l1j7W0+WAUf2MOsu/o1uA4+yYTj78hwanD8Agcd4Nl/HIjYf/BwaKzP/mHvtF/ZEzzjaQ+Hr1+Dw372spueZLLvpuwOd099u2r4a31bmBy6kEN++yEc+2OHrvYxV/tbeyIc+w0aY3TUz/94oAf+n2f9YqQ//l27B/Zijb504xuNfQE+8GuA7fxhLxVnduMDXE+dgNzS37tXY/qM8XeHsvqM66+J/U1+aDRjIF5aOXuOgWiMJ0s8+PT8EB4c467x5Rdj7PU+1rgxuZfv8ktyXMngX37zbufDbae+Jz3f86d3U+8ebCgXjMtJeuKbDPzNM7p5Jr888y6EJv5wjCeXXuYLufLKM/nkZh+eyfc+xobey9DSh7z0ZKe5pJ9v5aP3JjTwzFfy6IE3mXixoTVIvOmBBz3xhSMf9eODHz7mjHh0CGuuWa/cixX7xRcfDS29rEVkZyNb8KKve/OCzPiSxQfeReFZ38pn/MjBi9/gsQeOD8H4MCLfk4ePfEeHjw8i0t/8LnfgkY9fOhmHS1fvrX7FIV+jYRt89uPv7yI6WYPMPX87yE+6GseHfT5USsfBYDA4YQfAg88L9hl7XP9mpndoe5S9zTuQ/as+9+e7s33vH//xH68PMtmzB4PB1w9f6AC4l1dgAdgB8GAwGAy+DHjxxRev4g5QAFHw8jLppbJCqL3GPuQADyj8VNCBo1CjmGRPU6Tx8qnPC6oClCKJKxq0+MGBay/Dh2xFHmN0qFiq+K1fQQfoV3ghC6+KZnijJVcRSBGn4hgb9CnSkEMfh5KKavRX5KlAiif7Aza89957V4FIwcgYOjoqTrGbHHt6+EFFSoV9BTH3CtUKmO7pwh4FMvy9pHuBZxt+cBT5KyIrKuLZN6196xYtoAfaYsUf+PEHeQpeeMLjd+8SfMN3+r1X8AmZdFEUU4TDC336o1EUVABTSHQojB+f4OnQHw49yIeDju50hVdB0QEKP8Kj72AwGDzM4GdMHQRYL61jmsKLdVWzj7Q3AXuDZv9xSGGts6Z6bq/Dy3pqbbR+WjPxtDe5t4bbP/F3YGsP0E+GD+Hgh69123revfU8OvwUgvRb1+HYf/Cwbhu3x7AHD3hore2ucOJjn4FnTdcac4Ufjn2dnppnjYyu3fuAkfXfnqbZP/iMjvjxFR+5x5OP0aFhCzuM0SGdk02Ge/3tQfQD+HRoTR46+MAhHb8DeGerDy4Z5CYv2wIHR+IQfr4J5+Qrrho9G4PXe4QmhxrTL041Y/qD9nK2eIeAb1w/e12N9e4C4BtzlRvpBKLjb/EXKznbu0p6wOk9qiZOmncmeWIcbjyNecdwL1bG5CEZ5gh58TBv4g+vfvmS/d5LTv54aeSjl1NiL8bing7xcFjYTz03pom19y8xRCsv0zUc/PDXj1brPRR/98bOX3VBb4xdrgCddy/5EE5yyiN6igE6cQlHPrJVTngXJlvus9n65LmDT37nZ7nqHZ+v+UjDq3ljnpAnn+U6+njKEfId7tINHhl8yV4+scbRC56x8PgQ5JfGrW2K3+7ZIC/4gTzy+YGvxIS/9bGBLmjC5Ucg78izhruX+2SxzxU+/fD0jFfzF7+PP/74kw/MkP3KK6/s4HcwGHwm7AB48Hmh92B7rD3avux90B4J7Ff2UPsePO8D9lt7l73fL1L867/+6yfvE4PB4OuHL3QAfIIXTy+troPBYDAYfF74kz/5k3svvfTSVWTyEqmQohjiBbNiqOKMF8iKK15E4Sms9EKqOKIgohDlpRWuF9D6FVzw8HKqiKOQ5MVVU6DCy8ur4rdDQUUnfHvZdQjqwBDvipFAEcfhqm960LfijcIRm+yzdLbXOmiuEOubPvA1hSXXXqrZW9Gz5v//vv7665/4hN10V5jEG092hB/oB3TiC2NsRwPfS7uCEl3da/iSYRzAV+DvoFQBi3z+gs9XbOQbBcm+iQPgoamQh5eiI5n+qNDEil50FTPxoiseimoBPnjAJbvDZ/34OqhnF/58SLYirMIivvijZZdiJTvQkyEujzzyyIXfHzr62T0YDAYPC/hWmH3HnmNtsyZac62J1m5gnXN4ay+1RnfIa82zvirQtO7hYd+y1tlXrX/WUX3o29ccNNgX0ODtnmx8rNeucK2d0dDHt3jpAOAY18gybv3vqi/a7NHIMq7phwfn7Ot6s6F1wMt2trZf2PPtS/Zddqa3fccVP7LTM376yT5tqP+mLvHIF/Yg+jSWLxrTD8SvGNKZvprnDoTwOVu8NXw8K9ABf7OjFWN2e+fo/Si68LXGArrwkzGHaB3YotHfmH31hPi17wbxqxmDB1zr73AYfnzET+M/8eOLAE3vMfI9+Zqc50PviMbloqv3MoeGYu5ZTst1tvWrNOibN65w8YdPpn6+5TPjYtQ7VTrp53vvSN6V8ISTfsbZo7Gt/xGMnt5iRh/36MW4dzh4dKB7+sljfqIXOnnmGX9Xz3wAX46Z9+SGw355yX46pKNxOsgZ4EOJdMhG/qQXPHqad96r6GGeNcezA9DdGkUfspLDX+TLAzT8i54vyKAjHmSWI3jJCXMOHnpj8NicrvDow0a68RfZeJFvDpChj1748ZN+uHQjDy96sIf/+YMfe08mx7MrfH1nzrHT3wv+XUn20Ck/051M8w5dsfC3ijk9GAwGnwU7AB58EbDnffTRR/eeffbZa78C9iQN2LOAfal7NL608M///M/Xu8RgMHhw8KUdAA8Gg8Fg8GWAl0qHcAphXh4VWSrMKZTo89KpoKMYAio+KgYpiCiuKJBUxFXMUSRSoFI4UqhTMFewwR+/ikGKLJ7x7EVVYUURBw75moIQHHwrfnsBJstBJPkKPop7xshTJHIoqbClCMUu8vT7BqsXZuDA2Rj+dAUKPDVy6KCIXVEMHnqFIvs4/fijl3KQjumvmAXYyQ8V9thZQQ1evqMnWvievdQng+/wEBf+F68Kc/yJDxvZDj9+aPhX8VI/2WLf//xVnGUTvfDU557N8PBAy1+KbfRJZt8EUmijM97yg0zFNXkmLgppZODhsLhY8a17ecQ35Clc4luxd3/MDAaDBwXPP//8J2uqNQxYsxze2Gush+0b1k3rljXa+mmNtMZZx6yd1kNrqcMqayRavDr4gO/DTYCs9j17grXTHtA+YNwvS3QAZe11T894wdHsFxrZGp6u6O3x9lG61O/qJ1Dtc/ZbazA7rPt0bc/1c9PWeziu5LOVLtZ3etCbLvYqV/rTBb0r/dLr1NP4qc/Na/enXeeYxm9nO8d71tzjIz72IVf68mWtd6B48Q9/iHV9AG/xYX8xI+Oz+Ogrd+yDJ9Tvig4umjPfOhzWT/ZJE78TX3xcO6Sr6W8MXXpp9ndN7PlGXPXDgW8MrfcPtHKKb3xgQn4blxv0k1M+PJFMeQVfzogB3ngaRye3NbnDp3Dx8I5Al3TT8GCXK/nmrTiWv2LRO1j8k2HMO5FYmZ8Og+E2v1zpIKbo6Y4Wb3PH1aGisb5F7D790bMPmNfh4EE34+xy34F5/jG/zDk4aK09bDRv2cjH+bH3JXnhHQwPeqA15p2SHmx0IEq+tYq/5BH78WanQ3m84dLJHKejGMk3eNmp6edHMuiGB5x8Le7051fzHg4+/AeH/nTzHmhMvPOZvys6+KYHea0x8s66K7/oipdYsUujA1x68Yv/G8xWdPzCRjzJ0ooLPnim2/379y9dBoPB4LNgB8CDLwL2NvuN90i/4GP/sp/Zi4CrfdB7hf1Nrmm/+c1vrnfywWDwYGEHwIPBYDB4aEChxbcuFY06SNQUbbxQOvBUXPJSWVFEQUjRpKKogotxhRv9Cn6KU+EqlpCjwOPgUREIXd/C0A+3wpJnxSt8fevXiy7eQEENHhkKOPD9FKfiKxkKOoo1CjmeFZfgenFWnPcijbd+MtNbsYiuvVDTuUKW++TTzScxFQPJR08/tpAHD3405zN/Vmz0os7PfGGMTnQ0bsxLvncD+rj2ss9v9NaneKcQ6A8DxSm88FTEA8bQ4YtWQQ+Nn9KGq1+c0Yibe37Aj10VB/GWA8b4BF82wJEzdEFLN+PpxXY5orDpWSzRee/Rrxisnwz28yG9+JaN9JIf5Rn/ohFvxUd9fEfeYDAYfNVg7bGeWXdaE62t1llrlXvrm7XeWuhqn7BWWeesZQ4YrPH2XPTWXmuf+35W1p7lgMN+Y421duu3P7Qe2gtBh0mtreRZj13pZR3VFIKst3hZu9FZ67Xo8WYDaF3Wb401RkfNHkwfazxd0POJ9ZtM1/Rgg71GSxe0Xcn7rKt9na5826F0+6rmm832H/6DfzbgCq//DWzf56P6tZOmls6ucOgTPjvh8A/fibGrPmMnf3TZmU0nH807THYBfXJHkxvGgHF94qHJkROiKQ8C/KLxXuS9Sh84+fVNX33pwDZjYh5/jb1+JlhO9F4TDXzvDPJL3tBbrsMBxuQVHt4H+Zkt8UYXvTxD1xje5Tu/ek/yPgPHWPQaGfytj67xwZvOcMrVeKMxR8xt+sk9+vsJYjjRo3WV3+g9ywd05gM+bHeFwz780z/78aajD7nla1f21VoD6GIO9OsuxpKj3yGmfvT5GA394PCXd0Pye4c0b/gfribOeOmnN1/IF3r2Loa/nJS32YsGLYAHxzpZPxnyn0w60ku+4OFdTl7Sywcx2QtP7IsLPO9+chSeA2E+hYc3HLie6UqG980+2Jnv3dOtePGJQ2RrKZvIpj8+YoKGr9Cwnc/1sevtt9++7B0MBoP/DXYAPPgywL8fePfdd689yHuAvc/7A7Cneafwjvsv//Iv915++eWr5jUYDB483OoA2MuoT2t66fWCfLP1SU0v9oPBYDAY3BYUURTQ7Dea4omfBra/KAgpAHmhNKYgpECtaAZP0Ubx5+wHCj4KKwrNAK5CpqKMopDCi+IPWsUlMuCQ1yGggo+XV4fT5KNRnKmIg0ahxh4JDx/9cLwEK9gomuHpG1SuFYIUAuEpStKLrRV66OSFumJmLTv4wb5LPzx885g8L+LGtXDPKyCjAppCElmKXPor9PVMX/7yjgCff+lHnkIanfib7xWw6Cwe/EQ/PCqu8alvsvCRuBijlyIWH+JHL7RsYSOZ5PMZPeHJC5Bv0ZDlmXw6AzYAPiSL3u4VUfEkg+z87cCXHPniG0V8WT/dFRn1K+Lirc+VPQqB8pc+Z84NBoPBlw2PPvroJwca1lb7kDXNOmwdBdbs1mA49ht/5+mzzvmbzZpmTcbHeta6Z6+zD1jLugfoyNCs89Zve3d7gIYevkNRa7FDFs3eYv21XtPVuuk+umjpT65mfc0uPI3H/8RrD4veuGbMs2ZvOO9rDnftAXxo76GzIhad7RHu6dB7BZvh8x/fuuIJ+KqxbLdnsZ3fe2cB9nz7BR30ne3Ur/GbfWzDj93spwM8Po5/71LxqIHuz3cMOdM4qF8Tr6A9VTvlaPWjkRvhe7ZHa42dshtDa8w1Oe3vcl48+Nj7Ugeu5bmcRIufZ7hiIY581Fhyi0uxOXUiD71mTO7HN1rxlXNw3Ke3cTzJ1vhFPp7yb/L3bgXHHPQuRV65GE6Hxxp64/KLbX3LF63ryV+TJ/yXL+kr3mTq690GrWdXutNZrrn3vuT9CR884NFBPOCV88b53HuQNQa9Mf2u+KD37stO+hUnz9Yk7+jWK7nVeyE6cticD4zT3dU6Qab5ysfw5KcDWLwc3NLHXPHMJ3TPV9/5zncuXctpeOTiC4+94s4HYgqPTcD6QaZYwKUfOrUz9rDPOoyOvuYTmWTziT7vmv5OEEsf8mG3+3RAzyZ01hr/V3EwGAz+ENgB8ODLBPuhdy7f8PUrFA6F7Umu77///rWX2fcGg8HDAbc6ADZ5TXCfkP20ZmwTfDAYDAafFxR9FIMq6imeKIoocrhXGLFHKVLbbxy4KbhUeO2bCQok8BRZKgYq0OCvX4GmwguaCr6KMHAdVirwKOwotJCjEEQHtGgqpKHR6FGB0LOCmgIQPlrFMM04PGBMYZYcL9AKRwpwClTZBgcoxgVsA74VTX968Rk78DCeX07QDxcv30Sy7/ODl3RFM34hj/5sZLPmmc5o4XtWiOMLPBWm2C8u+HjWFPAUR9nMRr6lIxk+NarQ9sEHH1x9/OzZmKKXe0AuX/Cdwia5QFENbzGnh1jThQ7sIIud/Ek2nuWNq4Ic34s5X8k7upAD5Bt64wqzbDfm2xgAP+P8QS4d8CNb8y0YuchXgMzBYDD4ovDYY49da4v1xjpknbWe2Rutu9YjYE+w9tpPrIfGPGvojDt0sK4Z6+DDHoKXQxR07uFb433IiVzroXG41jhrpLXQWmsNt29YO9HAQ08GXDTWbvI0+w8d7M+to9ZkY/Ypa771WV8tHFcfCCK394MTjy9c7ZVkWIftGfDpaw3PNnsNXelJv/Q8+dHfOHzj+ujpqt9eHw+46LOVrq6A/4D9kj585soGPs8PDtG9S8ATG/ZlUzw9u9bw7n3FXmRcw8teKubt3fBBNA7IjAFj+vhIky8n1A+HHJBd0d38Nm80Cof4nbo2Jj/5sT7jfGPPF0M2yPlkRYOnK77R6RNnV+80ahanPo3zv3E5YCxdvJvIPXzZgrcc8t5lDsqDk57e0eKtX5Mr2skfHT5iyi420skHyeCkP3q4cop8upQTdMCXTni4Fwv80aM79TNOphjLU7zSER4e8PD33ss+Y/hrcpqe7DCH6MA/+PZuJzfJJgM9Xr0v4q+x02Fr/sdbXOmOr4bGexgZ7MbLXDJHPMPJz3zobwX0cMiER0/60ps9ctr8kgPyH2/6srkYkxmeeAK6AXbC877pXZKd/aKPWHimhzW3d3PP3gfZaT3rG1N4wiefDe77EADevmXlm+V+6p+97LBWWutff/31K5cHg8HgD4UdAA++TLAn+YUL+5r3Sx+mdPWs374+GAweHrjVAfBgMBgMBl8lKO4o1jgEVphSSFIEUSRROFH8cpj329/+9irkKMLYt9q7FEMUa9ApPGsKSAo7xvFQRCFDsaUilD5FF3SAbM8KNviR6xP4CkIKXApGikT09LKrqFUxSrFG4UcxSLEWLjwFIHbopzv5ZNABKADhT45DTroo0iqs4WM8/eiscAUUQX3qko5wvGwrZsOFow9+NJoik0ZfeueLiov8pMhEX0U1uIpV+LrndwUqxWd+1dDjV+GYz8UTrn73Ysm3gCw46ekZf/7hM39U8CO5eMBVeKMTfHEhE64Ya/r5mpz33nvv+oYGOjT404EMdOLovqIlWxTV8OXT9FNErIjPT8b9UUOnCuv8JXZ0B3xIFv2TgadvTKOv8DcYDAa3BfudAwprqbXFs4Mxa6D12noVdPhhLbPuebZewUNvX7L+ObgwZk8y5t5eZP/A35oHPNtXrY9kw7En2F/wa910bY9wOGsddRhiLyCfTPq4t66ixds6Ctc4nnD0A7qyJVq87B/WXetseNZWY/o7tCKDHelofdasxXSka/riczY6na2+E+fkcdKc9zcbGr5KJ7TAmGe22WfYZp9pH2eH58A9mq4aPHHBMz/xNZnAuPeQE+DzmSsZILn6Ne8ixvSBxtB0ABxEo8mZdNLYpd97FDpjjes3bh8Va3njPcjeyQ6xNB7P6Opvn06O+KPnR88ameSEwzeucsLBnvyXh/ZwMQpH3vGv94xk6M+/4kZONujvgNd4OWzumK8OAvGHFw86pJ/GHmPiiE5OofWeKBbG0dXQ04189HT2XoYWDzpbL+INj4/SMxxrQjzwpQOfu/IBHH6Vv/DOeXnmK1voJJ7kwNPkIpmu/JKvvdPRH498RS/zWr74G8C8JUcjg459oND7KJnNPfzSkb3lm7XOAbR4hQfYhQ8dgL8r4Is5oHM+wdNVLFz7qWc6Zxt/8QvbvQfS3YEx/9IxvfgHDVnsdvjMB+ylgw+IGhf7/b/fwWDweWAHwIPBYHB3YQfAg8FgMHhoQJFEsUOxSKGpQ94KSvYnxRpFJ0US+IokipyKOUBxB75CkUKLKxz8FE8UWSrUGlPQhE+uPdDhsmffsDKmqKMQWPEKjgKRArz/v6topXjj8FBRC/6bb7556ePbpArkDlMdDOOn0KWfDM/sooeCkSKPRjcy/fybopSDYMUtRSM6aHwUKFjSAY3iEx8pmLEXL8BfgDz68yv/sUuBSmHKs0IZXmwhQ+EQPv07YPDMt8bwVwyjv3sxUqir0EcHxSz2VgxjNxxFN7SKrQqn+gH94dKFXEA3xUFy2Yk/XdgltuKCDi9FOGNksU/86Ucfdiu6sZEdcgmgw1NRDp544kcvz+7pjJadfAOfbXwqD4zRU2MXm9kAlzy64iOW8kcRVr7ix0+DwWDwf4ECnr3R+tteea451pTWG+uaPQee9dD6Y620llvb4ONlbdVnHbOfWMusjfYe92ThZR31oSe8rd+t3dZlB71k4Ekfe2FrnmYtdoVfQ2f9s9670iMccmv62NUYe1zpgzZ643RqrwgnGeHh6Z4NmufzqvkWg/XdHsxn1nmtcX6w31m77QVs9my/sf/Agc8v9g5+jpac9OhZo9Opc/iALzXxbI/VyBQn/E8d0ePDD3jrB/Y9TZzFNRp9cshVTp1QvyYO8aJPdN5j8NNnXJ/91rXD0pr+6PDLNn1yVx7Zx/GxV4J41rwroT151sS/A1z5cHM8ffBxlatiLa/krz6Heq4afcSXv8VILuuPn3E8xI5e/C4ufbDCmIYHO3w4jY/xYHfjGh06sCz/zEVxpBv+TzzxxKU7nPQ7ddDYLw/x0u89RpOf+JgrcMx7eqIxTkey6J0O+bJxNsgpPHpfNA/ME/1w6AWvw2h4+uWuuIXDDrz5S/7wHXl4o2cvfbyj50txYAOZch0fdhlzUIuXedK8hZef8PbhTvOFb+GV78bog5e40NkvvpABjy/xYYN7NrDFHPcvYOgHT/6hJVc8T/l8zUb5jY93dvzpLAagPONXOqLnwx3+DgaDLwI7AB4MBoO7C7c6APaivP8BPBgMBoOvEhRIFHsUjRRR7CkKOgoyCiL2IAUhBRMFEcUaBSp7lHEN6FcgUliqD37fZMJX8QZvxbyKpACecUUaOK4Vmxz6GnvllVeuA8u33nrrav7XyS9/+ct7P/vZz64CjT5FWbop8ChC4aNAVeGefYpICl320ApHZPlmQsUyRTyFIDiKTQq3CkV4KlLZuxWjAXsUsfBByy6Fp3Bd+YZuipBkdZBKRgU9euLBN8bgKShXrOVHxagKvvRxT55ntL6JK46es9GBeDY7CFU4K4748zH/8pmfEKKTdwwFOvExhhauuFbopG+2ySF0vnHr0Jz96Bxyk013svFgw8mLTLklb+jFbvTuxQ+eKznkyUcFOr6mh6IxGXIULV7484XioJjiVU7yr8KmuPGtvMBvMBgMboI1zdplv7NuW2+tK/ZHa4d10weMrJHWFYdy1kLrjPXIGmc9sla5R+dZs4ZZ26yJ1ifN+o2vfh+KseZZ563vrZvWOkAGXOufcfyt2XRMRnw1Y9ZHV8/Grb+tpfS2lttD/W3KXrj2jPqs0/q0+PdcIyM5PdsvrbOaNZksV3zd20/YgR+gkzH6WL/hspXN8NiKL2CvcTzgkwns7+QlA0++Fku0fHvq6/m8auIg3vxLTmCPtX+A8Dugxv8E+xl8zbsDQOPZvqadh7mgfs1eXD84x+JnvH1Tv3cW+umrRdNBaI3fvAfYM9FnFzjpjPG/e/J6b+F7IN/ZfuoCx1VeoxMLcbCPiyfa3v2yE228NXhikG2u8lUuou+dwVx0jR5f+SCn4LALvfF4wxFXeSA36EWPU7579NlOPjo+SwfAf8bDIRuduLrqj797vOVl9p868Ff6wWmuuTY3rTn8UM6Rh4+5wcfk8B37zAM86Uq2Q/54ocWbLC2fen/EE6/w5K0YgmylZ+/99O8dFF02mj9yMjzvw+YxejbwMRq2mwsdJFvrvOOhaT3wd4BYuMcDHd54ibG4AL5oXSNPTY2PrXn4kWGMvfTjz+KNF1/s4GYwGHwR2AHwYDAY3F241QGwl1F/8Cic+lS0F3h/YCp0e0E15sXWC+pgMBgMBp8HFD/sKQoiilYKNA7xHN7ZoxRZFEYUY/oGraKJIo8iDDpFGoVyBWtFogplcOxleCjKOFhUdCLP+O9///tPiumKReTj5VvBvtHr8Fcx6Oc///n1v7neeOONaz/sgJKOjzzyyPX/2374wx/ee/LJJ68/tvDxLQJy+4azVrGUbHuq4pGiraJQhSB2uFfIUkDSpxBmr6b3zQIRWn3sZCMaBUZFKXu2e4UrxUk0Cmv86Z5+9KKvpnBXQYsOfEUPsivuV5zla/0KgWJiPB2Lh3s86FRx1TOZipWu+lzFgV6ez0Jg9w5B2MN2cYWLL53cJ58f2EAOXcXRlQ/khRjwAfsqSqIhQ1FS7tGHf9Ap4uPFP2wlo+Ifn8oh9uMh5vRAw3Y89Csq0lG+NO6ZzXSVP33bhf3sHgwGdxusVQ4NgLXG2mRN8eER65U1w/psjbEn6bM+tkZa56w39hp/r7VGuto3rX+u9kzrkHWvX2CwruIB1/rngzzA+mTtwlODb23FnzzrJD3wdujh8KkP9hiH696+BO9cB9Org09rLFvg0Mc6a5/GRz9e7PFeYG22Fls7jdsL3dMHLt5ktN7jX2MHG8kgXx+Z/OFen3HNPbwTF+/wtPQnD5/soj+gL5/D07wDsKs+eMnQwquJAcgX+RYusG/hocEH/K21fwf65NQ5ho6MxtIX6A/fFY17/VrvEpp9Mj7RiUXvZ+IjfvZFOaAPnXeFZMUTXbTJdpUj9toO9Ohj7NQFjtjJSe8dcIyjq4m13Iiv6znmnUYc8TD/yOJ3DY6xbNCSy0b0mnH98l5+eq+jl9h5n3r88cc/oacDvmxOB3lrnrHVoaTndCjf4MJJT7lNHj3cs5EOnuWP9+10kCv45mt86Sn28B1y0sNcQytOdGA7XHIA/n0wka5kkI0HHLbpJ0M8kmdMo6Mre+CRZ/6wqZyib3HCwzs8PflBTpl3xsl0ZaO4y2XrqufWTvqiw8s4fa1r1lw0+BmnozUHHl+RZ0229uIj3mpm/iYwznflHfv5z9yC7x0QrjXc+yGe3lVd2fbaa69dOTEYDAZfBHYAPBgMBncXbn0A7CXcC7wXcS/6XmC9ZN8cGwwGg8Hg84K9RBFJ0aRil4LL7373u6tgolADpwKdqyKJ/UhxRQFFIcYBngNb94praOBXdFMgda9YBU8RRpFG4UaRCJ3iEp4KR+7Jwt+3Z59//vl7P/nJT64Cjp+Mpq/i1LPPPnvxUDyDa1yBTLHK3pk9ZCsi2Xvpwwa4ZAEFQAUmOikM2WsViey98fLs28Jsff311y+eeLATnWKq4hUa+pHHDsVHNsJ3iK14Zcy9Ipd7eooDfHhwFN7cFx++YZtnxS0FM3LJZzt8MdT4hI/5+4wZGxXoPCuKKRa6p4N+urt3pTff6adH8cKPDnTBj+7845k8sfd+I+Z4V/RVZON7tmnG+B9dh+7iSB7dPIsxHHzJQs+XroqU/A/fIUl5QB/y5CGd+Q0tezRFS/F1sK3IqY9Nnvu2smd0g8Hg7sGjjz56rSXWQWuCtcW60gGUNai12v7g2tpr7XCQ4eDKWmL9spY5kLUu6usAB0/7obXbmqiPTAeTZFj38UOPzoFusq178K1f7sPR6AiHbmRocNo7auR22OnZeDjGwksOPp7hG9dv/yHHvUZ2ePFwPWWd92y82Xe2xl3P+/NaS9+zT+tg1Zh4FCvPwN4oPvBqeHcfHzrycz4KB28NH+8Y0QHx4iNxFmN49dfkwwnRNBZ/Tb9WPxygTx66yhn9jctf7wj2TXuifZMuIFlo6Uj/aPVp3gniCddVnz3cfnrKrNmzNfs+f3kP0I8vWv53NSa38Ig3vu399m79PuQVfbSu6MmGA9/7h3mq37j3JLLt597t+C3aGjr9yecvfOSEMX38Fw6fwKGnfPD+QS845gIc4xofmNNyx3sO38PR3zhdNbbC6R3UuxHb5JnYyDky8M8G+sGDU8ysP3wKhwxxZ095iY/1wJh+duknX7/3ceufd0h5Qn8y8OSLfGIcnrzHAx4/0904fuymp7xiA3vMJe9eAC4dioUP3uCnn03WTPb2rkxna6uflqYnXO/l3ve8N8IhQ56zG09+wcu6yrcOmOWFD5zSCX9jH3744aXTYDAYfFHYAfBgMBjcXdgB8GAwGAweSlAIsb/41LuClqviiYKP4olijcKKwpSfhLT/KKigge8gU5FPUUmxx7d3XfsGgWIwHgo3ikMKOfrtYYpErgp2ClcKRoo5ClgKa4p+cBWeFJroo/ikmOMgFi3eikGKQopYCmSK0OF6pptCm2dFPXsqWsUfBX7PbGEzufRAx2b98Q36tlbFUUUleiju8Qtd+UxhyR4Ozz1ePXePXgGRzxXb+MT/MvaegMbBM/vYYEyhkE+8DwD+xUcxjJ8UwtgQLn+zjxzFPb4VY/TwxNL7CH/wKzvEib/wYo9+eeIwHo1CGX94H8GHr/FGw/f8yCdwyKIjPH1o4MEni82acQVFfuNvcUKHF1+xn+xAztBbnuk/C9Fo3ONDd7jkwffMZjzFngx60gXQg48c6IsjH8Lnn8Fg8O0Ga6v11vpp/bOGWBusE9Yk/dYQ64tmzbWe299a3+wr9hprCZrWNmu6e2s1HvZJ66v103qEB3nWQ+uVcYe+xuFb561H1vManPbe1kj9dNbsz3jby6xr+uC64hmePQeeNZLO9cMN/3ymB1mnbLLsHck5+Z+NLO8Rre3sY7tnevITWrbzo72R/8RGf428m32t+V3Jox9d+d41e4C1XbNPonEQZP9nh9jwhX4NjesJPcuL+Jygv8YuQK/kyiH5IuYAP33wXR0WhqvPPq7fPklW/Zo81OQgHHaIhT3ZnqvxdXTRRqcZQ6t5Ji+ecO2DcqRvD0fXeLToNPLIbzx5xdy8Yr/YiDN9e5fQ4BjLJvTxNV6ui5f5Z47AQ0dXzZgPDqYbHsbx0PCno1yUZ94H6IU/ejnDVrRw8LM2kC0/NflPR7ahk8tyU66Jg7jyGR5wync46L1TWnfw5F9+MMfITw94bERr/pgryfBcvp/vv2SIqTF8XDUgt8hr3tMfDX/wsZzkj/DYjZZucodd7JbX3sPQ0yU8cwdv9no3drX2eXfnTzy8V/K3MU1ee1/zPk9fY3wHTz+9QIfuDnPdGytmvTMbZz/fuYojPzn45TtAJhv9isFgMBh8WbAD4MFgMLi7cKsDYC/PXta9bHsp9UeGPwK89Hqpd6/fy/1gMBgMBl8UFEgURBRUf/Ob31z/Z1eRSWHlzTffvL7x6t8QwLF3OdRTtHPYq9BTUc9+5VBNAcgehq8CjkKRYkz7mGKMwhAZClr6FZ0UiRTIFIjwUMyy1+GLh3tFJfugbwooftkPyVbwcRCNF9n0whMfRSH7aoUyz4pYCkJoFY4UvhwiKkzZo+noqqCIB5kKUfh+97vfvffrX//6Exnw+AYfugJ6ouWbCo9kKKaR6f4sGOKPD+A3Y3zmns4VZhW36Ek2fRTK8PRszKGlYhZ9FMXIwUPM+IwP4dKVPIVHV77IJ0Bhjzx68b240QFv8vR59qEAQFf+FVM+dJUrZBljo1gr/uNLPhvp5hk/fb3/4KEYCIcvyaSLfrbJH7awVwwUjX1z3RU9v8snNHKVfP7gMzlCTv5VeMafzYq0+NHVs3xGJ4/xVogcDAbfTvBrE9Yha7f10PpsfbSmWM+sPcasIdatDlus+/Yi66e1zJoBz9pjHXHfh1VcrZXWRv14WH/gWpOsnRr+1ilrHvn4oLMvatY7e5NGJhrrfXurNdCaBde6Z53rQMa1NbQ9sD2l/ZmN+OJPZ/7wjmDNNI4fefBc6YcebvssPfBAQ45mTSavv2nRpSP/oIFH93gaJxNPYx14kaXRmRw4YiRmxmvoz2cNX1e+xZcvrP1wg/ZkuJ8G9fO1Zn84D43FszGxrg+Ib02cT5r67YF0QH/2R1O/qzjwnb2eXd4TAvj85irP8ESnFRdj9lu83LvyiVxun7Snpk88NXqyLz3EAS5/Gsf31AE/72/lEjp7uTE64UG2JkeiZ18HxXJD3rHXt/bTSUNX/sGTG/RLvnF9dEh+B8jZgBaeOeddWD+b4mM8nOanNQAfc5Z89sFhA1+QCYdPzQHjzVl57F94mBf4Z4Nx6wPc3nfJMFesSQ5qyU5XsTcH2EZG7zdyC6/85NmHHPiy9Y3edMwHbBVbMh22kgPP3Cwf2Ye/d1E08Mwh6wb55mYfBJST+NHVuxV58Fo39bPTv+fA14dAfBCUPLaTRwfzH/CbvPFeia93XPOCPHjyBh9+0ejCP/Qsxt7lB4PB4MuEHQAPBoPB3YVbHQB7+fXiDMcfGp69pCoueHn2x6WX4MFgMBgMvixQRFIseeKJJ66fRLbnKP5onp966qnrDxqFIsUYzwoziloORBWtFJTsWQowiiyKV4o7ijaKUPY1hRoFHgdqCjIKTaACr6JVB3wKO/jY9/r2qUPeDuMcVvtmB570r8imoNX/91MQorPikH609lLFNrrpQ6sYBR8uO+hAP/tvhSw66gPvvvvuhavwpRCm6KUopegE9LGtAis+/AGfjxSe+Fu/cf2KcwpyZBlDC4+cCqB4kENvxXgFNAVnfsKXX/ig4pZCoUPaCrFwyCALLp/xjfjgpZ+/xb9CoWffzMZTDDpMxpc+4oQ3fH4jkxy8vevwDVm+OYyfA3580MOjmzwjm85ygAw4/Mk2OVPBka/woy+djOEjv4zjwSd4GAd0YqNxcUZLZ/HEl0/xIAuO/KNT8tjG52zRGoM3GAy++WCfs/bYI6xb/gazLujrYE4z960V5r99z7rVWmcts59YY6xf+qxh9g5/u1lvrFPWHzQOOOJrjddXs+5rcKxxDkg6JLEeWQPR2G/pYd9AR4fWzlq4jeHrSp/2NQ19+0J6R89ePMhBi5f7Uw466yX7khV+/JJlrfXcNXvDC/eUFR/xcNVPJ/oZC+zv7Yv4wD9b/Xic+mjGi4nDUvHU92kQP/GowQ/w4Df98QLRGNPkBIAPjNmfNPnmOZ2i4eeTv7yw92l81b5Viw5P8s5++6Rr+2J5z7/2eDzxP3lGV4Mr7+21dManMYeDeNuD8ZWv8ZXb5U280ZKniatvjvKZPVi8z3HNHESvn176zBU54B5/dPbr5rY+sjV5l+01/fiS616utO9r6Q4HiIV3m+hPHchll/eq5PALejj44sPv9OQbvpIv8jQ5eHjf8g4XL3rAx6d8INO7mLVBLuDLbvj4wKGDWOAlH9gnfmIDH1/P/rcuHfpwg3v9/EdHeHg7hDeP4OHHb96d+RaeZzp5lwTmA33YhJ7udGCL5u8LMvrQozyAS1969D6HJz5s9e1eMbAesMHfBvLRM53h4832V1999cqrwWAw+LJhB8CDwWBwd+FWB8DAS6s/ljQFA61nY4PBYDAYfBFQWHryySfvfe9737v3p3/6p9fVp+6ffvrpq7BkXFNMUdjzzUgFLeDAVyFGoVwxRb9ilqKaZ8UqBRqFH4UvrWKiIo5ijMIQfIePCj9w0ChiKewoiCnY2C8Va4B9s4K7cUUfBVtFo4qM9kgFLzTGFILho0OvGFSRCD7ZdKNrxSqgCEffDooVoioSK2S5KmDR2zP+9KYLnoCPNEUnYw4OK/zxGZoKcWwPl/7GKuaRy2cArW+PKLqRW6GVbfRVDKMTevETD2BMXNkHnwy+hss2DQ96GUPHV4pvZNOlAh5ebOcDRVe6yAWH82ICjx/kgXENvcKheFYYpaMx/IB4oGMzWv5Q+NOPJ70BH+DnKj70o0t6OFj2rPEVPnDJDIcd9CBPTsJlLyCHP9DIHT7T8FFkBOJFd988lz/w8RwMBt88sC6Zy9ZMa08HPea9PuuN+W9tsDdYO9xbe+D2oRprWYcgaK0T+ozjA99aYa205pBrHK61yFrXuuMK8G9P0Ny3JmnWVa2fTLYOOSijh3443fsFB2u+Bo9+rZXkWzutZ3DR6KOvNdJ9/ee1lj7haQ5g7MlsJ8M6njzjXW/e104ZJ43+2kmHv1jpI5edYmX/Za+4sC89XGug/uKtidE5DpIB6gu/PUK/pk8unWPglGE/FQt9AL78Kt/CM14/GrnRGBo5Y4+TJ3KJ7mhOOng36fSjs9/T2QfiALrG8bbnRafxLTp+5g/vifFs3LuE8fg2lq5w5DQ7kyV2/CGX5DR6seY/4/RCV5OjbC6vvTfgaYwMuYG3d6BsOOXLb/6ig35rgJyhQzzM08bzl3dUue0eD+8/6eDdQx/9NfHyzoves/dK85SN6MVKP8CT/uTQRcuHeDuUxYdu/IuXmKIv1vJcP3tbA7zjpSN/OEzmG3UmMuGwm2wxgOMd3WGs+HrfKofh4QuPPLStJ/DIpQs8fXDo7N4Y3v0kND5iAh8fNvnWtXvvZWKjD3+4vffRne/YzUd4WeP8OoK/ZfgXrrz2/ofHK6+88sm/cRkMBoOvAnYAPBgMBncXbn0APBgMBoPBlw0vvPDC9Q3f55577t4zzzxzNT85p4CiwKOw416RTlFHYUrBSpFFQUjhRpGtwhhcRRhFFoUkxR1FKPgKX547pK0wpNCEt+IUngo8FXLsjYpQilzojSne6KtoV7FcMU0xhww6VmBCA1c//RWP9Ke/hg9bFZjIdA+XbgqJ5CtEAcUvhTwFKEUz42Sw55133rn0rvCoCEUnV/wAPsb41H0FTvopJrqnDz8Zd0+vfGxcEV1BM73ojD9bHCR0cMknDjfh+farYqx7eAptZPA9W3yjmi2N+UYvfdjDv+LT/1jkT3zYxh++6aEPHlq8+t+JinAV9yqC6uczz2LJPvwAe42zk37ia0xOODD383x0YpcYkg/04Un/fOwDDL5lLAf5F40rvyiU0kWs6IY/fkAe87VcYyPfswGUW/TkL7QafmjYixeZHSBVkBwMBt8MsCda/8zx1tj2IPuDOW8NcLVONf+tJw6BPFuHPVujrJvwHDxYq6wL1srW42S1hgJyW5e6Wuc1h7vWbmuTtYU8a5V1z3rTmoY3GXDhWVetUe7hWdfwTQ94xtFbh+lqH6R/a6wrXK37fmHDAVM8yMDD1bpLR7qTZe0/dUerj5/j7VAGffJvyqyle/eNu2pk8oW9RAz15V8xFE90oCs4+YbLL/YPsXawxFa62yvoHl34GnzxbgyPmn2ifhCNMXrFR9xq8qkxTZ/Ye2cLX2z5zp4nB6ONDpChz1X+Jfukt0eKHT2TZ1xfuhjLHnEXV3KNn2NafB28uk+usXLE1VzwzsL3fAcv+hOnd4vo8RYbeUVX+sMhgz78BAdfh//2c7TGNPw1vF3lTh8UpAPZNThyCj+5i5e5hI4cMsj1Pmzu409HYxo9rQdAfsvR5oFncxlv+SqG+YdtdMEfHzqiMzfNFfLkuTjAw6eYGYPrAyju0ZqzZJPFJ94T8WK3+MlNPNgLBy908h0effmYv+GRDQ8Ofcvx3tXEBC79+B7Qyd8MrZf6xd2VHvmGbHPNux0/eCf1Sw2nfwG/kO19mN7mhl8IchCMTozwfu211y78wWAw+CphB8CDwWBwd+FWB8BegBVi/THlj4ubzR9RXnj90TUYDAaDwf8FL7744r0f//jH197i59cUQvomhqKSIqE9RYFIgUqxyMGwwo5ij/1IkVoRRTFHgUg/fEUdxR+4Ci0KpO1x+hWbKnYZU/BR4LKXKdooluIBRyEIT0UfxUB7HlkKQIpF9FVwg4OPIpBij8IxeuMKYHSjLzwF9op3ilfoPaNRQMJHwcoYu41X9AR8o48tdHDFE47+ipb6+Na9ghq72Wm/xouurhpfO9DFg8766FvRmF/oS7YxRTb+ZJfGJ3j0jRV+VQjkY0VGco2TjT8fs9MYO8URjiu67CGLD/gHX8VOvvdcDpABV6OLZ8VAV7LQiBvdyFQUdKVjjW5wxYy/FPoqyLId74qlcPCnKz3kCv74yjl+Fme2GtfkVsV5Byd9GwiwlWwy8ORbPkGXv/Cni3v+IdO4PEHPd+zgL3HyLFb4mB/oFSDNMfqjoedgMHj4wC9a+PCGeWrdsCY116217q0F5rt1wVpn7bP+2Edad+0FaKxHfQPYPVxrgzUCrn7rEsDf3oCXfh/2sQbaS62T5JGLzjpqLXS17qCJDn9rohautdhYDY61ylWzJnZIis5YYC86vy1s3+sghT7oyLFPuKZP12SkU/iNsdv13KeM6dfsCeRad6219j97Ez+LD//1fiFmaPRnn6vn+Gn0tu6LDfvqZyf+eHs+AS+604O/8MRHE08QDd7s0Ojkuf7i2OFwY/qMaWeenf2N8T2/2FfkAVy5os+BJZyTznh95Lnq429x8xxPe5acMYY2/jf1MC5f+d7e6ENaeDVu3yeTXmKDL/+dtPhqYg/Hntm7o7jxT7LR8D095RZ6dNmszzN58owNaPGzb4up3I+HnEpfz2R6XyjPgW+RZg86tuKDJz/Cj4dx8uHQ3Tssveitnw7skUf4k1fsWw+yiQ10Mg+0clvexUtc6ME3fI+v9SU76YEnWfxAD2sQ3cSruUIPcYGPP754keke4MG/cPDGyxg/wyuWxunLH2zAm27sI8s7ZHHxDAdvfnDv3RJv66d3bHrgwUbjdOBX/+4FT++h/EEX/mcX++mEBq155pmM4qS/Q+nBYDD4KmEHwIPBYHB34VYHwF7IvVQrfPqko5duf3Dev3//eoE15iXWy+9gMBgMBp8Fftb5z//8z6+ferZ3tL8oqCiOKM65KsYoEtmXHPwq8OhTVLH/KDxFD18BDA9FG4UwBSS0+hWC4CtSKsYoPCnGKOoo1CjcVRRXzMHDPtc3ahR5FcwUwsiCi4+imgKSvbGCZUVe+yFexhVz3SuSoe8A0ZhvLaGzl7KFPnSHQz+y6K5IBb+iHNsUKRW67L8VgvnHN1QVmeinj9/wRqOw5RsJ9OQ/dihGsb0isD5XOlUA5hMx4FvAF/zJt/mCLHQON/nPPbkKXPRWcMsuNGj5hH4KhPDYRoZYsl1Rne/wgQcfLp/Ri0w6gvjgiw/9FebowUcavfH9+OOPL1qHovzAv/Tj63iyt8Iie/DT+Ev8+NE4OvLogw/5jbGD7uSKFXr2eRYzfPXzL9sA/5CBX/krX8lwz0a8+fmjjz66eKIhRxzw5At6A7LQ6JcfzQ8/2SiH0IHkDwaDBwv2R/PVfLaWmOvmsLXJ2n0eYFg7jFkrzGFrvLlvrbAv4WO96B6P1lg41l08O9iBa1+yBgFrnTXCOtn6ar3By7Xm4Jl8+4x1pn78/O1IP3LaI7OlZ7jWe3uEPnT63Le32wvo4Jn9rtl34td6jv/ZV3964JNP4ESDL7+Ql3+ih29d5is+QmO/OQ+Ga8D+cDa4AE9rtrWcD/E2xl4Qvv50izY+9lT7luf6298bgxed3KmJPRzgytfo5AYd9KEr39hMFzGNL3y6s8N+H8/ojLvKPX3GNP1o+M/+i6eco7+xdHGQmy7pYW8jC645QuYpjz+Ng/gmE71xMfVuIGYOWsnAl41w6EVnPNHqY6f3JbzJ804BJ9744oGneReOOYSP8Zp8k3vmjfcaOGRreJTrfBSOPrLpIXeMpxcfkoUn/vDoAMytcPXjqdGPD/nHGBu8l+DFTv6hN7762OXdgWy808+65KCVbGsQWnlCr95Hsr93Qf3FGw/xIJ8vfTBC3Mj0jsrHcNhfjF3JxwuOnPVepx8dP+Lv2VpajsCTM/rZJ85sFn8/O+/vDnzlHZ/Ic1e07CGPHWjMdbLQ+fUZMWUX261X7s85i4+1zreBB4PB4OuAHQAPBoPB3YVbHwB76faC7A8GL8Fe7v0BcHNsMBgMBoNPg5/+9Kf3nn/++auoonCjmKTIo6CjyKOYUlFGsUTxR0Gun/KFo8CkuGM8XIUYuL415WDPXuReIckn+BWFFGVcFY7ga4pTCjHtZ/ZB+xk99ClIKey40hlfBR24aMl3hY+vgpYCj6aPrg7ujOGncKdwpAilsF+RDZ4ClSIRnRUAPbNRc4ipqKaffhpdHRazj17sIQP4P2N+dlhhizy62Kvphx+d8aJnBTR4ZOoTkwqZ8Fz5SVGLH/hDHz0A+gp46NF0r3gmhsBBuVhrdMejWPMlPP7lM/rgT6YxhdC+icFmgL9YGONPP+Psng/x8U5DX/nCfjY5tCZP4ZKedKcHPdHo8z7DXrrRRUGwnCHPN/PYghaecb5nF/l01mcMb/4nG/CLZ+PyoHHxhCNX2I63+QEnPgqu0fMffcwJttHVuAIk2+iT3XKHXgqhjRVjenin43sHR3KEf+mzd7rB4OuHZ5999pP/b2+Om7Pmv73IWgDMXWuANdIaYJ2Ah8Ya4jCiPckeY86b09YD/eZ4+4u1z9oFzxphXQ3XWoavtdV4f+vRw7X9xbODDmsonvGiJ97tm3SOBxzNfXsQWvu0QyV0eBv3bD2MvwNlfeyrz9X62P3Z3/2nNT4O58R1bd0/+6PhG37ln8ba9zT7jziwiy3s8p7SO0W88OkdIT7o+a53DaD/s6D1HL7YoQeufKvZy86x+rUOcQE++uScZo/QFy9x1egrd/AExtqrTp4a+sboYUxc+ci+I9f4yT5lLHu09NA6yEs/uWTfw5sufBUdXnhqfBytMfhsMOaDC3zrHbFxeaf1PuE9wr13NPNNvNCzwfsJG0694BrDX6zlCz+e/MMxrs+cRwvHuJyRK658SG7vY9EXB+8jwByhq/Hkm2Plm/UCfb7ja/mLD5vYykYx9WzMMzx80NGDPfKZfnLWmPnvKsfheSeT/+Y82+F5Zmt+Iof+5jLb2EnnxsnFT1zRu/c+A49fxJEfNPfe7+DSTU7hQye0/KHPO5J3OL7ADw496MYP+IiFvGADu/jTmtmHNeHKJ1d+cpDr35Dwn7WJPt6p6Ck/+J88erDVOz+8wWAw+LpgB8CDwWBwd2EHwIPBYDD42uAnP/nJVZxRUFGkcj2LIgolCjGKQBV2FZ8qLNlzFHUUd+xVij/2H7gKgAAfPODDrTCov3uFbMUXfOAp+JADFKkUcBRvFLgUzOxz+hV5yNQP7Jl0pq/+s8AI6AsUgeyPcOydeNNHUQh+xSnfNiDn/fffv4pPHWICviBL4ayrxnf8SDeH5A69yaKjQ2M+wEfBThEPnqKa8fh4VtDS8Iu3xhbFRDh856dA8eFLYwppdDPu2wzkAPcKg2TwL1+LNTzFu/QQPzTwyeFTupFljCz2uGcnXoqJ/CaX+BgfRTljZIgpnuJc3NiGJ5/Dpxfe+PCxe3EUQ2N8Rl4H1o3RhZ7yj2z3HUBU9FMI9kwX/s+3/Im39yRxEmd2oaGvfrbLTf4lj2wytIqQdC2m8laf/0Xnf8sBY4DPyVNMNUfEA0909EPr0Ng9ea5slgO+pe8qD/EhA5/BYPDVgkME65m1yfpg7bEPmYvWGuuDtcd6As8ap8/6Z50wV81z/eY9XHPZumCdtS7pc/DRgSIZmv2pvdb6YA1tP2sdwguOdVUjC761C41G73A8W1vgoKeXqzE6ae7Ra+GxP10AHtZD6yUb9bOF7dZEY3zQgZP1jn1aP6esdcgD37P9nV/h2UOt8fhFT0Ytfhp96nN/jrGj2Llmg/VY4wNw8jzlaOHyv30xOMe08EH7dvwDPOx7jUXr2ZhmbyRHH4BfE4vwXe2lGt+J0ckzGjyji1Y/Onsdm+07aI3pb/yUd9K5GivO8l+u6A9HDmeT5/JFnonHiSvO9kMxSl/XxsmRB9U25JX3OTrBi7cmn9Cj8yyP0Ip98s0dvNONDewnxxxgC1nykm/QltfmUbZmv7zVD0eTh2SkFx3EFI4+c1Urv+l0vh94F8WfT9D2LiKf4ZFZPLzD0w8vPhdPOPSA4/3MGoOefO9C/CNXycnPcPXTBX8+Fqdw2OHZu5gDenrSFx5/oCEXDlvJxI/O8hN4h0EDT6zpZG3zgUL+kmts5Rt209FhL53YQxc+YS895Dz7W3fEjC4+hIPe+52+Dz744Hp26CK+dCP3V7/61SVvMBgMvk7YAfBgMBjcXdgB8GAwGAy+FvCTz/7w8M1U+4ciCLBnKK4oyNiDFLDsKYo3iiUKPg4dFVkcrAEFIIUvBTQFGriKl/oUYSoY6Vf4gac4pThDtkKYZ4U3RRn49jOy7WkKPfTqJysVh/B3OGZcMQvQj874040d5MJXQHJ1CKtQpGBHJtvckwfoz3b7p4IWnvbhvhFDJ3w00FXRrHtFNLSKV+zHyz3ZimOKYXjB5zv82e25QmYFSbj6G9M6SIXDL4pf/GJMQa0CLpuN+xYOHAU/stjI7/yJP3x24dehLlrvEe4V1Sqw0TO/4WscPTvRs7WfeBZX8XGFg0bc4OJPF/LFj8/4XT+evpFEvjF4xsUT0CMaRTu60cu43CGLDr0Xkcf/clY+wNVnHK2DHEXEdEBDL7HnK3ob41M5zFeeyRIjfODp4wOH8GSQpY8d5PFDfMpp80HM5C3b8Wpu8BV+eNO/A3bzCh0d0wevwWDw5YK55oNAfejCvLWGO+iynviQh/XK/LV2tmbANd+tHcasR+aqe33WN2sY/tZXc97cN2aOm+8Oh41Z16wzrcmtB/CsNdYsvKwDNfzI0/q1DrL0G4dfiwYvePYudqI1Tk54dIieHsZagzR6WffoyRZ4AI51Lr+gtfbCdY0P/aydcKx9xvAF9i++t9a1L1n/xQYd3expDpb4OVvxTUc4Z2tfFQ/+qR/cvAK44du7b8KJC/BnB3w61YeHfk3c9dUas1fYy+KhyYP2lHQJvzE+O3nGS+t9Qp+rePCnJnbJO3VwtRcZSx5Z0fKrPdo8OcdPmXhockBuiT8c+5x4o5fH7Zk1z+R4r0BHRzzIpRNatqZv+eVqTF7JE7R4pXfj9mX8kk8v9PJL/uDrfQOe+2iN06X3ZrkDh87GNHzITwc45oQ5xpfmhnt6WzvgeF+ytqDpoFM88IWTDew3zi8ADzjmAhnu4bANHjB/vOtYo9hEf3bjCdLFewZd2ENHcwctPPOPba7+126Hvs1zz3xIJjo+QOf9yuGuXDBHzFv+x4df8f/Od75z2a+/eKKnsytb2cU35PlXG48//vg1D9kKR+67kg8n262B9GabfLFu+zvm5z//+ZVHg8Fg8CBgB8CDwWBwd+FWB8Bedv1B6Qr8wevFGdwcGwwGg8Eg8FNrP/rRj67CkqKS/UPRRoFG0avCkEKSYpFxBTRQccZVsUdRy37jm4uKMwqwCmnAHlahD75CoWKQApriEPmeFWYUa+xh5Bg/v72qSKiwo2gF8KED/eAoDCkq4aewht6Y4k/FrQCuAhCZ+LCVHfQDbE2nClj4Vljjn0D/2QC9+EFRiSzFMXqgVSB97733PpHtmyvuFcEqoKGvKYAp9MYbuGdfxdhs5WvPCn5o+COeQEzyOVx4cNhKPrt9w0IREC48B7x8Dk8M4Hj27S02yhfFODnCT/zIVkU+eWUMnthVtCVPzBQE6dE3huDyGRxNjIzB43u2ySV5KZ/I0Vfxn6/wIUfjVzr5yWV0in10gaOR4Rko4uqjGxp6wac3HLE6c4HPyePf8tozveCxPX3L/8YVK+nLl/zU1c8V8rGfOORvfPmEXvRDKx50YJ8xuctHcgFfMip4DgaDLw79ewTrnrWg+WbOA+uIAwrjDjXMT/NZv3XAvTXL/midtGa0v7Y+W2fMZ8/WAPMfH3KtRXiEY32xTluHyEBDrgMUazc6a4Dm3prQvgXPemPMGuZqLYFnfbFWwdPsCcbg2ces7XRp3PoIRx9faHShn1afe3rTgd3hZEs4NeueBi+ervqsz/zgng7AOD3pT1c8gHcJPs4OdmcD/9as0xoZcPnE3uEADL5Yg5v4Gl7nWCA2QB//h4+XsZoxrcPh+uHKNU3s4Zz4+tF08FnjA42v0emLX3T22PPAVOM3jW+jq6UHvvSPxjM/ef+LvvGTRr+84lt7lJw2Jufkr5wlp58QTiY6/MW5+XOO1bI1mfh5J5AXYomn95TT1nDIx5MNcgs9nZtDZPOz9yG0+tHCl2dwPLunA1rNfMFff/Z555FT+unMNnmJB0DnQ4bZQjfj/NYcEZ/05xP+pqNndrpnBxy6ovUuCPCCwy906IMTYkMvvAFe5o41jYwOi8n3TPdsope8tfaIa3PSvBEvfmCH+SoG8MihByDHGgmP/cC49x08/PICvvD4hC3mMds8OzCGS4fi4f1JnumDWz6SWVy9U5L5+uuvXzIHg8HgQcEOgAeDweDuwq0OgAeDwWAw+Dzw/e9//yqSKL4ovCisKAoqGCkgKdQCfcYVaxWCFIwUZPQp6gB9iisK14pXCiuKS4pGaBTMFaR8u8hhnGKOopMDr74RoRhGBnDIRb5imE/t4wPSDR+6k6mYSXdyjevDn0540KPCqoKVYpACED5kKmApjJHhqjgGl35kwPVTvm+//fZ1ZTOazyr4klHjJzZVyKMjWj8zDPi2AqTimCIcnfiKHgpgDowD8sjR+B+9wlZ+IY99ZFX0S0f9xgF6Bwh8X4GTrYBs93joR8OX8OimiOmKh0Id39IV/4qXgHx4dMNDnsHHR3zhibv4iRN+3nXYk+0Kwr/97W+vsQ4O+IutCnn4Kij+8R//8cWbTfgXHzqIFx37NrCCqZjwORnu86s+8VZ07BvM7HJIT1dFQ/zJVmyUI/SFo9GFXcbEh9/kYv7Bmw7spK8+MTAP8MWPHIVT4/kCrw7a+Z8M/RrdNbrjjZ8Pd/AR36JTBB0MBrcHa4H5Zq0xN805c9B8dQ/0OxAxZ81V89baY95Zt8xbe6K1yBpfn/lqzuPTtwitOWTBhde6Z02BY57DIasxDY21zRVPLR7WQw2edYE90cM7+Wh0M86G9AOu9gX2sbN+66g9g09af7Xu6e7DXPYC61Pj+rvXwo9PTR9Z8M9+z3Q/7WwMvrjky3TlO/uatdm6CRegh58v9Lf+W8PbH8+9ojW3NTgwfvMKFy/4nhvTVxO/xsLna/GqX2vPccWvnHCVh/Zh49Z/e597Y/Br6PSLo33JO1r5ex4QR3fmtSs5DkWjaTw67xFyHg6fmgvnAS7Z9lH55Mr3p0z87PWaOMM5bcFfn1b8yZBfdDLu/UMzJg/wMv/MXzTx7x2VXvDpLd7ygB768BZj/PGqXyMrHfBPB7zZx7/Fmg/Iid4Yn8I179zjZ654T6CHfuPlf+/W5iZ5/GYMP7juvYvwF73Eis5yhDy2wMHb3O2diMwOctGw3zwxr3r3d08m2XihEwM84Dh89VPLeMDDgz/d052u8gue92lxZyfd+I9c436VyDM8+cwOfutdla7ZI8esL9bHJ5988vINO+hDBr+Tz+f4iInxt95664rLYDAYPEjYAfBgMBjcXdgB8GAwGAy+UlCg8v9EFaM0hRYHXg66gEKKIpNCi+KnIo1Cj4KLQo8iimJKBSmFlopzCl8KLApF6BXNHAwr/ChQ2c8UeeigMITW4aAxvNApgNGlQpgiFXoyNTp3MKwYqBiFjmx6u0ej2I2PwhE8+uNJP7iKSa7GyaYbX7hXMDJGVwd/njvExN/BJN0UqxTU2O/n6NgMyOMPxVvFKbIV9/gRDz7mP/rA1c8nimx4KKB5RleBmV6e6YsODt0cUigQihOZCnLG/MRdPDS4xQQOWXi651/3dEHDRkBXdmp4sscYHp7RGnP46sC2nPCs0ArXga9+eos3OvqKORy8xIT/+1/E9OVTvqUz36PzrTM8xYkPxJjdfExveB1ckAvwhhs+XPrAy68KgnTBi9/o5/CUbDmHRwVtPqKjZ/7Giz6KnfSGr2gpN/iPnXDJlpfsQOODDGwmBz1w1QcXFHP6uMdbrPiBHnyh0YtO9NHQm+Ns6mre+Pl23+xRGO2nEI3JBbbz92Aw+G944YUXrrXE2mLumSOtBdYGc1Cztlkb4Wjmn3XRnLS220fMbXzMMXPUugAXWCuspfDwx8+Yee9qnbJexc/6Yd5bczy7thZYGxyMkO/a2kdnawu94dkPyLP22UutL/jidfLTPJPNhmS3Nml44M8G+pNLd3boQw/f+sd+4/Dcd8CKJzv7CWd9nluju5561X/i6T91rAG6ipe95aRpHNQHTvxAX+us/Sw9A+Ndow/fOwde4dQPp4Ne9658F433rnBrxuWEPOIv67v1vDENT/y0+uDbl8TL2h+OPDvp8KkfDf7iRpY9Kz2Ml2Pyw7tSfMVew9N+TD/3+MLHV+7pY0u6uxpDa86RYbwxetiz5L57/OiXTvDiLw/4U5zMy8bTTTy8/4rhiWNMDstTgE860Ccd5LZ5hMaeaiyfweEHfPiG3/HlQ2uBPPVMhns6eIf1HpMv6ScfxYsMOppH555Nr3DI7hCUv/MRXmxrLSKXDnjRkXw84HlH8d6Olhx6hefe2pJf2Wld8W1c4/SnjzH64OEaD+8g1lTrj/dz6wqfmkv8A88Yv3qvcU9/OYWXtZJ+5POb9zk8vNd8/PHHl3wyjHtP54ennnrqso0M76Z+hWcwGAweBtgB8GAwGNxduNUBsJdy33xRaPTp1ZvNy7I/BPzhMRgMBoMBcMjmf3fZJ+wjCiX2CsUZe47CkGKQvcW4ZziaYqS9pT54QFFGkbJvBuhXiFGIUghSeLIXKeoo8MBVvFGUUYRSlHJvLH6KOnS0B8InEw+8FJT6pi+dFIbwUzDCi3yFMvooBCoQKp65ZxdbO3h2mKzwlK10BK76FMLYwV6FrQ8//PAT3ujIopcr3RSd2MQedvCtb/LCYRNeFekV4hSrFKbgoWELn2n8Qa5rNij4KTSyU7GOLuxih3t0eOOjsKkPsK1iInzFQXzJZZd7/NkAByja8pcxvBTq+JnP8csv7uNlrMIkPfCWMwp1/Ikfv6HjH3YUOzw8uycLH76hk+Ievyr0KTSiF0dy6M2/bMZDzqDNn3JFDKKhp3ExpqNY6uMX+BWq6StW8OiMhj1iQSey86c+BU2ymwN0oiv90AL82CsXFNL5jl7shs+n9CVfn2fy6S3eZPkmvSIoncgim4/hibF+uY0HYAc+9CMv/7GFDvxtPlgb8PYtHofE3eMNn1/gyTG6DgbfVnjppZeufadmXWnNMOfNKfOrddlcag2BZ9xaYJ6bx+iAdcP8tObYI/E1H/WRY9wYOld7hLUEvbXD3ucAxvwjz1qikUuPeFsf4hO9ZzprdCRbo5/1Ar17hybWcWuGfQV/Mq1L7W3WA1d8rAvkkZXermTW2AEnvehEtjH8851nYN23vrYuks9f/Jre9lpron566aNT4zebWGlsO/mAE68+/oDPPlB/fDQxA8bat/V3r+FTg3+O128vtHbrM66vWJGfPD7iEweernxkb4sOPl+6ykl8xBau9ym+d8/n6RJdtOjI4XM0YiwGGl7R4Q1Pv32ID+zP2RVfOOlj/6GzPQffZMr7bMZPnpMN3zi96eN9Ro7rp4sm1+RV9NlMJ/kg3u75ER37xb892jM88zB68tkJh3zvBeT0zpVP+NE1HdzDad+Of3ktV4tdvMjwzF+ezTP07MRfM86v9IPjfYB+5hVdrTXsc09f8wH/eJmv/E7vYscX9na0zS26oCs2YkcO2d4HxZj+wDoRDvv4jx/1iyc5+bY8IVOO4O/dWj/92IGOzfSknzXAOwge5rr/90tGfqdXPOkPjL/zzjsXj2eeeeay24f6fEDzjTfeuPwxGAwGDwvsAHgwGAzuLtzqANhLrz8YvZB70fYS/cEHH1zP7vX5QwDeYDAYDAZAQcUhnKtvoiomVeBSfFEoU8zySXlFOntIxV5gj1H0UcDRr/CjOKQY0+FZhTcFHIUoxTt8orE/wYHfIaDioGIzfIUjex98xSjFHXpWWFS0qngGFPjIVZxSTMJbMUkBkVz24angZF/Vp+CkiESOYhpe6Y5PxTL2+VaSwpJDUIevaDXFsvNKDjsUKt2jpwe+9mUFQvz00RkNP6F3EIcuf+rnK+CZ/oqafMEG/Onqnk8qiMHVABx93dOje7Z4R+ATV35TZOseKKzxC13EXD50L6Z8RwfxFBM/Z6yI6N44XHbynWexZh96PnfgyF/68Pa/hdlRnOnC357Ryz05Wn7IYTYZpyc/KvSxTw7LLzz4nRyH0PD5BG56sBdPNGLAzw4+fUsXb3h4sqsDYbFhlxi6Ohg1n/I9O8jEn3/gke9eAVXc8CATDt7mAdmBecpn8oXO7NDnKv/lKb3ZgtYzOXKEbB8uoFv28g3/8YH+J5544prn7KGT2MlZY3Tjc3a7p5+DBvMTnWf89SnM+qaxZgyO2OqHo8DxyCOPXPMRPhv8VLWfbPTNIYfN/KffuGYMrQ+riC394Gn4ds9OdA638TSP2Gu+8a1nvMv93onhyDk6ybX6B4Pnnnvuyjdz0Rpjzppb5kNrsrkix4A5Yk56tpbAN2fRm2/muit66y1e1gl91ihz1s+d2nOsV9Zda441xL38BOYmOeYnWnSaPDeHNfysM3iZo/WF69laED4+6QbPmq2fHHLNL/2A7fRjF5utOeR0cGT/1mdt4gf4+tHj2TMZrvQh19WYZgw++WQbR8tfxlsjyUfHl4AO7tG71n/2WStrYhJeuEF9ZML1HhM+Xvr4xrX9p3W/a3LiAV9MT3ytfldj8dXql09ygV/th/wi1/ggumTFzzg/ik/rH3zNuGYNTDcNDd/yM1nW1Majc5V/8bD/947n2R598tWHr/WV79r3sy877EP0ta9bz9monz5ksI3taE6+5oc8sE/yk2d7uNzHDw998k385Il8Jx9vzXj5Xz7DiX97A5nWBTzTzZjcEB/g3nzEhy18I3+zwV7mEFJM6IveWOP0SgbZZIgFn8Mxbsz86r0A/2SYnyeOWMIrf80fAKeY8xX/iA058PAxTj/+tCeThR/9xYe++OGFnszWSu99gP34sMGcpiM/4stO8r03yTU2wMOX7eIqn7wLiIf3cLzlB1xrqZzAq2au0kUM0MJh1/379y99BoPB4GGCHQAPBoPB3YVbHQCf4I8V4CUXKBJ4OfcC74V4MBgMBgOgIONwSyFKIcZeocikoKIIpt++oVingKLPvcKPworCjGKQQo4+Y5qCj6KLoo2iENz6FZi0U479CW7FKgUehSqFs3SDa3+ztylmKSQptJHDDgVn/cB+Se9sULRSqFJ0UhxUWHKYVFFNsc/Py7HFPqvwRWd6wAVsJNu4fVXRzjj5Cpb4sJWN7FDMokNFPzLYzRbQwQC90OIPp8Im29Hla/6rKMh29+xnqz73inL1s0G/Z4UyPPV5dk8+wJ8drvrJ5y9xwI9udISjoMZG93KFToDf+YEc/jXOFjkDvwNd/MRaH13QOexgA3vZ5cDSWEVKh3LiSD/yxQwPhT/y8RIPOrONT+nGdjQVxPEWPweDnunoOTvwIYev+Z0cOHjTyRjeipP6HZCzES1fiTd5dGYLHLI7mCiv6K4QyV9sa54ofBqTL+jlKp5oHYzyH3769clruUY+On34GKejMfrBIwPIZXrgL//FhX3o2YCW3Roc8cHTs3wRE4VaPOHyo9xoHjsgU9DXB9Djb97iA8cc4Eegz6EsOrqwAYgBGeTxmbkmPuTxjxgaQ4tf8eUfsoyRxb+Azhq6rn41xyE1XDmPb4fHvi2Et3vfUHJgLQZ+PhItfclFZ80w7gpfPvE3ezQ8m+snkLt38ocb/KS+3PPhgtZg80KfZ2DN6UCttdXcl4diLhesoeaC3LDPyHO8zJP2CXMMvVw2t8plOHjW5Lmr/YIu1iZXPNqDXOHhaW6SRRfz3Jj9wTqAvzXLOHt8yMU8cK/f+mGO4qHv7G9tNK5Pg4c//dgAH2+ykq1Zh+htjhvPH9qpj5bsxtyTn+zsw4PvNPuMZ2umfSE/sRfA52s+cuXjdNTc22vCx6vYaOZuUD9e9r5oAmNwavGA33588ijGjekXR3pZb1w7hDOejPLjJs/yy4e8+ItvsjsaTW7qTw9xIss6Sn50xtvvXNs/5I11Ds7JNzr3+NCltY8Nxmpk6pfbYmQvQWcMrThad8m+Kcs4ermBh5i1T0WvX+4D/PkqevMGf3a4R2OvcNXI1M9P+JBDBzLRmd9yEX/P5Gvyzh5AFlzj7Ct29he8i5O5kX/I4H/+7b1Xn/iT3Z6IlgzxYCcZxtiOtpjxh/kjv8UFpMvpx/CsGeUFmeSwgU5yvflLt/wsx/CBhzd+bDTmvcc9P+FnDK61zgdV5APe3qfZY76TiZY/6MS35qsPhLn3QTZ7uTz0vskW/uMDuqFlLx1fe+21C2cwGAweRtgB8GAwGNxd2AHwYDAYDL5SUKxR3K4YZL9wcGTPUECp+KTopJDiUERhSWGlApdijaKOfs+KNgAfBzCKOwpBijfGFa4UxStw6XNQgt6zYpUxhymKYQpy+tzTRb/CrmIYnRSS6G2PVPzRyGaTAhYbFA3tiQ5v2GafVFiilyIWHoqrinUdGrVn4qN4p3CEDi6/GSeX/ngqiDvEU4xiYz/RyR+KYfxEN0Uv+Pi4KmLRhX7swdOVHuj5Cg9yso8/NLz4t4Kb+/xHZ3wAGvfkRU8//tH4AH84eCSnfvf83UEiO8XLO4liHD8alxdnodG9Qh778HFwIh7wFBDpiCfb+V/8AZ3oIo78kX/Enm/xIkPRkGx0iooVPeHSGR+0dHDwYoyv0PORvOBbeqKR8+wH4sKn6QKPLsbxkrPygVw2iBVZ8OHq86wfDTw5gEauskPjDx9eEG/8xVQf3ypAK5qyi6/o7coOB+f8SWf8jJHJZ9lRn2fz3Lwh05ziH3bTlSyNvvJYn8Ovd99998ITG37jZ1d95qMrHfiXXWzHk98AvfGSG3RmS/ORHmII8OQDVzlqzFw199gjX+nAh+jpyTZy0IkP/mjpQxd+DNiEFo0x/Iq3QwY+5Wv6oJO7xviOLq5swEO+aXzOx/RxbwwO4F88xZTefP/8889fB8Vsd4Cs/+mnn74+gMNWh8vo9CuEO6BHh4Z/2dCYZp1pPrGFDmLGnr6p6hArQGudhi+3B/878DF/iaGc5n++MwfkpVyS/8AaKAbyobyBK//NabExv+wbcM0J9HJEnFzluT65aR64as0HTY7ji78cxZsu5NJP/6knec0ZOHi5t/5q8thcIse9PvqZx3S3Rtqn9MlN4+TS37yTy/WRV/NMrnvyTt3RAjLtAfiyGx88zU/39bl2j7ZrfWcDZNK7mNFDnMhrnQzQwE8//jKe7+lygnVGv1i1dgH9YuZ65kYNTffh05E+N8fx0egjJvLGvoevWGnG0QbJ1+gGNxv4wZrmKl/k2k1ZxuRGelkfrId0CAed8Xhae+GyFa7+c1yjK77o2SKXrGWNe6eSl8mwV1pD6Wk8H8hHdJ7xjBZvz+jkoljTSe7KQeNw481ncNCItUYncSZTQ882eUGe+95D4fOlVh57Z7S24k9PeGTTgW+8M9MBjbWBb8nDFz9zlg74WM/TiV/Yzqd40lsuoI2/vMXTGF3pTtdyEL05gF85wgdsg8c/AK/yQSzN93zFJjzEFT9X/fQ7P/Ag7zqw792ptci8dmBLnnWSjcUNkAePTuLMJ94V4Mg7V/rTmy2uPtBgb6SLQ2B9rdXeC9HwOZ+9/vrr17vXYDAYPMywA+DBYDC4u/C5DoC9/MLz4uslG/iDxku3F21/HAwGg8FgABRdFLntGwpaiiwKVgonikyaAowCkWJpn+BX7FF4qdCpuKJIqSCkuINOv6IUGfXj4V6RR5HIGDwFH8/GPdvD8D6/FUgnexgdFfsCBSd7n4JQ9JoCoUJYeyFe/jWCohl9FdwUu9jWT8UpZtHFM38oYrmHYwyt/z+r+KvopwimaKXoRC/2ADoZw4OuilP8yAfGFNXwU8yip358+BRUHISLJ93YBtimCGeMn+hUgZEM9/XDyx+ag1ty8VKcYyscfjKOZ3SaQ8YOtRx6dghABvvpiwYvfBXe3LOdznwit4oh+eLgIFK/9xK+xRcf93zj4AtvOpAv7/AUf3hiSk99eHcQRj79yOYD7z/GOqyAIyYaOXzG1+wxBseYoio7+KTCKn4O6pLBtnyIB9/ThR/whd8hgfvw2eXemLlQLPK5XGmOOKT3/lac+EI+4pHuAA9j8PkEDdvx5VNzS757ZmMFYn7yU9340adxccKb7RqASzdNPMSMrWjRdfAlNnwpPmitF+JNfz4SP3DaRRZ7jdPN1RwVF3xd2SE25LKNHHFRWMaHvNYeMqwH/G5toJ+mQC2P4cGnOz/Awwsf4+lHJ7LwIB8uOo2e7DRujM7iqkgub/gTf3jk4Ut/fK075gH/oKOzhn8+RxM9vu7R8jlb8Edr3eVjc6aDX7nAVt9k5h8tnc0H32b27Wf97s1FfBxGO/h0hccf5OnDz95ANwX7E9Dz37cF+NbhAlvbq6x3YiSG1mrrB7uB+JTf8lVe8J28cBW71nc0+Oo3XwGeYiOu8kmLhzywDssr62XrFX/bP8SaXta4m33kaeZsOea5+S0PyzM6wJMn6agfoCPfPNH4A8Cz9lgT2a2f75KlnbLhkmN+yHX20bn5rU9uWavwIhPf09d4wKv17EpGNp94gMwObvXRA74rXBCuWIqptRTER39j5kV2eK6h0R9EA9jKLvMVvX64NbZaa8SeP9hvXsfPuqaRI9eiS6fG6ZBc/cVNXllD0lXje3uJONGNfLmIL17GXdHFX5NncPHtXSuZaPSbO2xQu5Br9DJWg8NWcuWs3NPPTxodrJPosi9bjItf66Gx9kTjcotN9IQjn/GGgxbQlV75Ir58jxZ0wJts/KO3x1gX5Sxf8JHxmrml0QEOf9DLWiCf+Ybc9gqy7G9km+v4udITvv7zEJgMurrnK+8G/MWf8PjNOP/gY/02l/TTAx6fo4VHTu/F9JVj5QUextHxBdn6rGH2ETqRyV5+ZjO+5NpjvNPIPyBf0Hufwssc4tN0ogs7+Mfa4oNNcPjNXDVPzFu6whEDtvv/vuY44FfvCL/+9a+v58FgMHjYYQfAg8FgcHfh1gfA/vjwwuyl20u5l3Av0gpCXpi9CPvjZTAYDAYDoNCiuKV4o6BSwUoxRWGuIpuCjUKQApBitKKkQpJn+4uCj+KNQpYiDdCv6KOIZW8iw8EROQpQcMlX+FIoUkC0x1VQtZcplKIxTpaikeJUBTJ9ZNNJEYgOinJkoLFvonG4YT90uEW2QhS7yIMDV7+DKsUl94CO9lP4/OJnf/3vMQci8OjRGDvO+4pyCmlw2UcvOtuLPbMVTYUs/kDv/5qyP1xFS3ie8eQfdrOtoik8ulaoVPAMXyNLP3tPgFMx2T1fKkLCJ5cPxDLa4s1HcNDBlyue+V/u0N8hXsXCiqT48IN7NrlXtIPPfjECnvGlE5/IN7FXEOQ78TTGd2xV/IMPxzO+ZMo7PvIOxD9yhK/h0D3e9GKTA1F2iC0ebEUPn6wK1uaAcQcvQLzxxhO+HCCTLI0f+E7szAcHgniSSx5e7HHPdjT4RcMucwFv42yBT09j5//0NefQiRc5dOtwUKzxNqfFzDM6cq0FchwP+uOBPz08iyc8MWKPPrrJE4VgfWTykYK+e/mAPz/iQ2f2G3v77bcv38kFupCHB57kWj/QijeeCsZ4+KlaNgL+rpFJnjyStwrTID/izW768gE7jDns1C8X5Is+viou4k4Haxs99bG9/OA3eYCO78ilr3trKXnG4NJD/rCPPnQ158iSD+joxUfsEXN6Kd67wnEVU7zwtI6lC//CcY8Xf8gxvDybI+adnMlmvI2xWTPuCjd/8Deb0Sno+/ayeHSYxWZzgi/4nm2uYquJ4//2N8zDBD/4wQ8u+8tlsWKDGPF7ICfknXxlr7kknmj4zDzlX7GRO2IjF+CKHf+IARp7hDgZ6x4+3nTh/+Yq3njSy9oKz1i6kYUnHsbluiters3t8Oik354qRnIFf3udfBfT/FDesQsdnvIt2eaKvEGvzxW+5t4eSx82yvHmkXH4co8tbCYj4Es0zV16Ni+B2ODhWkumGPGRq5h+GhhD071m7ooRMFY8tGIPXBuzZpkPgT5x4Gc51Fg05il77EHeYUB5gNYakj6nfHtmOmjw8XKVK+FpyTZOP/6NF3+KMd+7x9PYyVMTY3hyBi8y6Nk4PT1b28RV7IqzNeLUB55+4+Q0Tm6+4if6nLaQU/yNu8qT9MRTvOSUMfwcuhqv6Wc/Puc7Cb3FoJzEy9z0SwzoTr3klXE+sb6eOHyEFzz4dDAf8JSv1na+gcM2cw4tfDh0kKPyFj1/A3z4uHUlPLrgZdyznDV/8TJH2IqPdYMcON5LyA6HLDRiQR+5IA/oiz69yMKHfHa6Fzsy+cI4PEAOueICrB32bVf7h/WEf8UHDhvEmGxzDU/7C77eLdDRUdxay8TSvfcS9/iwhZ1k7/B3MBh8k2AHwIPBYHB34VYHwP5w8EeMF2/FUX9cVHjxIu0PlP6IGAwGg8EgUHBVZOngTWFFoUUBBSi4eFZoUzhSaFL4UcBB296isAjHWP0KSopWCkaeyVAMUygDilBnkQitopB9q0K1PkUvxSjFHfuZ/c0+Z8/DS+GJTHLslYpWClhsoa9vPDnkcEiTfopE7KEzHpqinn5yyKaHohX7HbAp9OHHFocdeClYGaMvW9isOKYpYilw4dPhNxrFLH7ld7a98847l1w68Al78FNcs48rVMLTn03eBxSMPZPlEMs9X+KjaMbXcAEdyOCXngP+gQvI5V+y8BE/uOkGyOE3OOx1yFj+kOeere7pz5fw8K3AR6Yx/uVLvNHo4x+HD2j4SVwVZj3zg2IfHvztcIIc/XLHFZ7Y8i/f0Nd7EuAfvqNTfhYT/OGRzWY8yVAQRQMfb37Al74O7tDrw984fPqRLxfJwF8/GQ7QxBsPdGjYjQZkA/mag1v5zF/60YkhPdnGVnp4FkO46eNQvPdAtpVjZJi/bKrYi04sstMcMcfcGzcmrvrZpM98gd9BjJxoDZDvxhV2ya8p6tL1/v37ly348S89yOML9+jNO3ryNVw5zWY+pwPd0GjmisMldnhuHuFlnZB7Dvfx0fyEMlnk8x88cRcH+vEX4C/3FaHhiwtc/saTPj5kAg8fz+Jfjlkb6Uo3POQAffpWsDzRBxd/cRIjsbWGAHMAPXz+4m/rUHPa/OcrOSYG8g0/98b53jjd0ODDl+W38fKe75LNHnT0ZY8xYO7xDd3JyO9yxL1+cjU68ZWfuX7iiSeuOWvc2shvbMerOfAg4bnnnvtEP3ksV9gvj+nIDv4D8p7PPGv8wFZ5zxdo+Iyv4eHFf9ZzfXyc7/jJPVnJLL9dzT/rVocvdBED8xsvcdLsj9aMeETvapw+xZQu8s24PnzcW3PlBBx9zR/5pOGhyeXk6gfyiN1iiQ9fyDE6eyYPT3jyyxx3f+pmPFnu6Z4tjfGdXHUQKib67O/8Iy70xVcDrafiAKwV9DKH6AEf4BOuJg8CMs1xrYNe/NNFfzT6AD+KlYaGnPD5Qwz5xh4YDT7llNzQXzvHmq/ppI88cTXP+cK64Fl/DU9X9htvzUneyZPe9GMnX/XOkTzjZIkLv8s9eupPXofO6YefHJG7nunHF/Lb/BHreKNN/+joDN+4fGUnv8r58OhkTF++kCf60hmedZosaxB/osdbg28+sd385Cc09BRjOPDJxt+8pj8ZcNDwMf54yW/y4Ls3N8Q/f8snOc7PfImGX/E2zq/ynd/Qk0FnOQuHHHqhxcv8xIv/yMGDvmLBr/wgXullvSMDH7bBsw+j8aFHcuCJD3548w0f85N10316drjLB30YD625J9/h0pV8uhRTdtkXzOfWMn107mef+Y49fJmP+QoPsv37isFgMPgmwQ6AB4PB4O7CrQ6A/VHipdcLOPAHkuKR5oXZHyyDwWAwGNwEe4T9RbGoQpxiiiJWxSl7jyKL4r6CE5wKMGgqOinUKAop4sDT72eXXRWEyVIg6+BWsdf+FX/y4OKpKAtfsceYYpiCmnHFO4UlDT+FJXp6VgijiwOhfq63gryCFpkffvjhVew6C3QKVOyit0IS+Xjhiwc90L766qv3nn322atY71BOAQxf/NgCryI1G8lAxzcV6viIHfR1EIYXn7CLj9mq0JYuipNo3Cuu0RMfdqc7fhX46O5n8+gOH1So5tv6AJ3wwyud2QPfPd3EkB348qVvXPCb4j1ArxBHv2x2Tze88KWLwpxx8XEgT19jdOejCoEaYDef0kUukK0pLCokoqGTdxz6wIcr1+hNfgVV9wqn3ovI88wPcg9PMunjXj9c71T8UuzIg2+8eLCP3uzjF7qwHy/45hH/0Aeuw0ZAnuImO+QR/uYgHDSubKfr+++//8khCJliLI500G8OwKUn/c0j/hKT5ic/0Yfv9ZXXiunyR797QFf6kyUf/ew5HmTRkRw06OlAXz6AY8zBJjz5zBf85Rl/PqWLYjF6+Mbo5lnBGT46fPXhLb7kozXX+AedueLDAuzRD08O8Dk/8Qe/OcDNBlc8yer9WM6ICZrylv3mJR7WE/IAO/iInXyZLMAO9/KHrnxgHtPztNX89SxW8Omhnx1o0Ps2Njoy6EsnumuK4+ZfeVGusx9P/rP+4a3h3XriG1jvvffexbNv/8oh/LXWTfMJT7rQix58S1d6oaMrXOOtGdlJJzL0u3oupnyIL/70ciDgcKm5LT/cy2v5QM5XDT6c4EM+bKBXvypBNj+xka7izl/izl/5WP7xs1xC3/zTj4bd/I8HvnA0/uYPOVoutCYBz+W85h4fssS6/CNXn7lpnTKG1hwVLzRaOajFR3zoQq/GjQH+l8v4kBFPcqwP4eIhXuaGa/bhZyz58oEvgVz2zDf5x7U+vBvTVz+AS1/2ixFZxukb3xrAi+78Sj/0cMMP4NNP428QnnWxFk349YvvCY3J/2jwEjN9WuukBr9+e2ZjID5o6ZZMfea+OSVG6gDyN35w0PATP1jXHLIZbwyPkyc6vOQxH98c50f+tJbxvUPCxjQ84Whygn54yV8temD/oW+2o9WMyx36so386OhGf+sUmpoxuS/v7Ld4njh8AMfcNM/g4WsMz+aLezqjlyvmgDWJ7vyRDt7V6E5PdliX5SXe6Yg3nmiLg3H8AX/Bo4fcpFt2aPHgP3pYf+htnrcuiwMc7yx0kmv5TszFwL3c8DP/bJJfeBpnt7UXH/rBtR6ab96b8CUHb7Egi45spQ9e6IwlC1/yyi/8xcLe29qOdzlBNru8SxnjP+9HPjiEl3cRPvYrOZ6tq/xIj//4j/+41lLr0mAwGHzTYAfAg8FgcHfhVgfAXnz7I2IwGAwGg9uCgpZWwUpxRbFGsUkBSL+ijgKLwg2w9yjyKMgpDiliKZYpWHXYZ+9y1exTaBW5HBIo8igK4avPoRV+8BSU0NKFXoo6ClQKWsbJIt/BDJmKUBWR6K1IpJBEFwVU9GgVoozRQxFK8cg3Jcmlk0Imm+lCN3rps//+6le/uvg7DPCMryKapjBWkRpUsEMfDp+SjwfbFQrZSXYFPDrS37NDATbT1ZU9+Bpje/5hAzl8ojjn51iLhXFQ0VwxTSFSIc0VLzpVtBYnPOhSkY5OfKKwBx8OHeHRS+zpw69s0y9GCnt8ipcxfuNLID/EBl/8+Yof4Im3fjLI0u+wVKGTH8lSaCRb4ZUMPCq8KvrSU2545ge4YsZXfOKAyRg/lc9o8OIX/qILvYzzfXOgPvrzGz3pbEyM2e9gpJiTSya96Qmv+aKPr+DC0UcPMsTfzyTzGdtd2Y2nw0FXNuCliOtwVSzlAv3FAo4DUnIVYo2RaVw8xIgN7NFfUZwObJID+JDDTjrooy95/AUq7DuYNJYefFbsxBRP43LCFT+6yOUONfXjxef0ord7cvnEGB+66mO/HMEbPzR45Hu2kK+fHVpzjs/pqA8tMH/c5zdFf1d65j8+kmfiBehNHrkOVqyTZOrjt/DEmu1yh+/xNI6ePumNN7+5l7/8Q395YW3SLz/5WU7AMe4q93wDSkzQsc08IUu86MZ/ngPzzJykA9v5xZpJb3OHHDGAh6dfPdCHN9/oK450sM6QJUbmE33Fif7yzTen+QItGWRaT/mCrmzCj1+sOw6X+J7twNyWF18WkOlQgTx60FOTG/zJX2TzMd+IrTxgIzy+oDt70OtvHeI3seKXcgcNYCe74KFr32x94jc8XWu+DYwPWr7hu3Dc042P6aAPb75iCz3Rmhd0aq0TLzlp7dInDzVjeLJfa24BdrGFvnSR1+aVvDvpNTqc93zIH9YENGRrck1feoV/s+HN5uzWBxdfDQ961A8fnuvJJz34/gQ5qeEjp+HBh9tYe2Y86pcnZAdird81OWjC14/G/NDfmP5o6vcshsbFVyz5zBolPmKhwaMfGrp5Lvf4TL7rP2Xha03Cx9xkQ/nduDH4+Ih3h33G5d3pB/E0d+Sx/KNjvOiIh375Za2L1rh8Mo6H+Jnvxso1dOYqneho7U9n/DT5zubmBFo4/AUPjcYfZFjv7IlwyUZPfr6nI3n0S3dzF465ZI2iCxx80ZMnb+SdtVB/tK2Z8AC8bNH4t7UBn+JI9+YI35o/YgDQsIG+9mcfapE/eNDR/IVjneFTeuEVDt+Y1+TxtzljjFwfUOlnmeHxGzusGWLSv1yQy3DQ06044cFe+vOB/b5ff0EvVg6H3dMfLt6AjppfkcDXr6gYE1P+/6//+q8LbzAYDL6psAPgwWAwuLtwqwNg4OXfp/r9seXFWpHMi7VrfzwMBoPBYHAT7C2KKAorQKFKsUURSgFKkamCWs+agpkikqISWoUjRSGFR3sSPopWcBVz0NqfFIgUeRSW8DXmUMIzXIV0hSm0eOOFL34KThXJ4HfIWLFPcYvu6ByyKkDpV4DyP8EcPNATviKWohg74qnfoRIeimTGFPLheravVhCjD93tsVqFwhqoWImGXL7GXyGNTsYdpvCJZ3s42sbpwxZ0fFORVOELT8/0rgjbPRxjoKK3K3/zEd4KzgCuwy6FOgU/vqbDiYM3efrhO7iG5z4cY/zWQbEx9uDp3nsMHfhB0a/Yw9fHTjniHr1+eYHOOwwdFP3IliNw5C1f8A+72V+uwedfOdBPDtKRna7lIrn0IkM/eQ5Z2KvhR3982EJvwDY68DOf4kcuv7jnP+Po6IGHZ3rxWd+UJY8O9KvQq1juUEax1HuccbLlMnsVf10rePsQA1r3ZNNbvpDb3MCffvxnnHw+YzNe7uV5NqGjH7pyvSI5fp7h0Q+OPNaHn/iKCb/zLT8XHzzxK4bi4VvmbIHLb6750HwB8PgBDR/zOZ/QmXxjeNOBjX3rlx/wcWVn64ECOV7WK/IAv+Mp78jI50CukaGfvmLIjn7+me/xEzd+EXf94kIGmfrZJQfYkw9a96xHgD7lL3vKPTHBwzwTQ3brpwu/0986x27+bC6hKXc8s51u+sTKuAZfjMWOPvKMXXDJMid9I508ctGyix/gunb4Sz90+DTfy0/+wCvd2euKBr344eWZ/fLW4YB8QofegYOfBJWP8OjPP2w3N9EVu/8NHOwovFlz+cWexnf0phdZPjAjXuzAk33scBUn8uS3Z7FhJxs6KKGT/QMuPcXKle/4qGcy6c3/rW1ygKzWEDrgCfBnc3hyjZ78Snd4cotervQypsll+mjG5AWwX5GNn6vn5qaGFj56VzL0A3awswNc+wp6sfUcDyDX+Iw9eOlH78oOfrZesNG84MN07IpnVw19vpQTAG74QX3wNDmPf2P8XBP/AK4+PrOmRQNOmpu88jNe6aiFb6wcrj8auZ9N+lvD2Mu3Dh5PfnIquuzTz5do5Rjdk6fpNy7P6OwDCI3hVX6JtXhYZ9hijDzNu5g4GpeDcg7P4ijW8KzbcMxjczxa+sYPTusj2fDkhHWQbM/xpq/+vuXLVrpq8jabxBiOfDOGVuM349YBsshHU97CNW/YRgd86C5/6UlvfIyx0ZxhnzlgrhjjN/GiJ53wQ8uH8j3f5l/9cPCyFpDhvvjzTXMCHhrvRuSbc3zvkJ8887H5ixffkU1/OcB2vPiFL9gBl710R2OtNc52MuQFHgAfOuFjzL13ZvtiduLLNvyMwbGWu7de4o+Pd/bWFv6nB77wgfEOfvmC3W+99dY1NhgMBt9k2AHwYDAY3F249QGwwoWXZy/SXowVHuB72Ubrj5leoAeDwWAwOKECsz2kIpOrYpC9xJjij4KcgxWFpQrPDpIUdxwAKVTpV/BRNHf1rKimcNO3BhVuFLQcpMBRgHLoolilX6EHKAw6tK0Q5VlhyH3FLQUgP6X85ptvXj9t6oDiF7/4xb1XXnnlkkkPP/us2ETXX/7yl/f+7d/+7eKhEKqQiCcd6UdfhSsHC+xRMGNnBwCKaXRU7LpZdAY9K15pioDwgWKWAhe98PMtCDLYQr4xhUP7uWdFPLT0A64Klfb1CrR0o6PiJ1vcix2gH5ldgTF0ZzEayAF+x1sffgqE3bMdHXo5QE9jfKpQCshHTx5c9O75QQ6xRY6hlTNw+Fx8+cBBCtv5AS9xECex0E8Pecg3ckg/XMCn5ZnYhEPvMwfpxGeNKeYqOPKPYqV+uOzkS/rTW5yAOQCHLgrlFa/hiQ+eFTvxpDd9zCWHpd7P3njjjQsP8AvgI/LY3fzLh2zsf9jihS/b3YsZP/Ed3SqQ86tn+Sc+aDS+d0hIZ8/sgGcOemYH+9DJETLgOJjjJ/Hjp+STJUZ8hMYzfdnKdnqgw5+ecDS5A8fcJIfPyOcXuQiHb9lPDh35iA/4mW/ohI5MtM3XDrfDgwP4FJ5xcYdDJzbTkd1o6CYfXcnFQ8HavXVKDOvnG7EWN1e+EVO68AM5+PC7NVKfWJX/7MFDzrFPHx0A3dBaK+jtnh/pzS/oxBvgQzdXfFpz6KKZA2jo4BoeO8hWhDev+c94PvFcTpONjo5w6FejM9vRGWtdKtZojScPb/rTgW1ird89ftYRz+SRzR56oJM35MCrz8+a+t/ODsT08Yv5WZzEBsjb/MtmPy/Kn/mSTLzJkOuNuZIjN+mEH/vsB3SX6xre9GKXHMKLfPTyvByllyue6NDwldzkO3q4atYOOWccriv6dJJT5AHrI53IhScnyms5UC7o66AcbTw1/PhcLD6NZ/+TWF7WB/BhE53Nh+ad2JPHDvLF0nM804Ft2YNWv9zR7PWtY9GAnoNzfwPG0Z/X+gF8rX2vJuYnH31AfzTy/OTTmP744KFf69D4lBGNvLhJU7+rpl/+8Tsf66Of/pOfZg5E07rUIal28i3H5Ly43RwrZ8QILzbgYVye6hNTOvgABZlaeWAcrflgjsXfOHrzlF1ygm3yWn+5AuQqm8g1Bp9NcgZeY+yIDxz08qNvEacvHPlEP3NPzpkjYudeH/5w2I7WHJSX7OgdJvvIYDP81gF+kh90QO/ZvCHDPKOrZ/1w6I+ebuzBhww4nuPjvZA/vc+0dwLP5g8cejW/zF94dMEbDln8Lxb0toYZbz8hk6/gmN/lkw9kwZcr4g0Pbz5gs/WV7+z76MWBLPPXXgivWFtHyfeLJt5rxR0PPH2pAZ0YkAMXHZ75dIe/g8Hg2wI7AB4MBoO7C/6a/O+/NP8HFDR8qvPTwB8LXtj7ZKQ/crxQ+6PfHw/n2GAwGAwGfwjYWxRdFG0qxir8KPAqyACFGEUa/Qo9CmcKPAo4ClDGFecUbRRsKmrBV1hyhYuHwhH8ik4K4QrHipb0UBhC6yD384DinQ9L0ZMN+CqE0UvzjQVgzGGbAx820J8+il/2VngKUXSp8MwOPqmxw5gineKZe9/SdEgRPjsdfimEOWiwR+OtsKggaNyzcTrjg1+AB78pTireeU9gh2Kf4hk9yD11xJM9miJm/yu4IiYfK8Z6VnirIKxQSUcHJmS4V4Q0Rh4aurDBO4d7NMYBG/ieD+HwvXs/Vcsn8PGVZ3SD4zCHvnzNdvqRpR8+PLbplyd9azod+Jv+iu5oxBA9OfJUQdI3CPmJLsbkIB70VYw0Rj57xYR+xV4+OTAjnw/Zyz940Y1cBVi5EI1CqXcz43iZW2LK78A9W/BoHJ1xdnUwYm66OgDFW84quurHwxxpzhgTB+P8ooCLBi+64cM+ueBgWs41j6Plf/ahq0AOXOllnP8UtckxT1z5x5U/kwfX1buq+PMzG/Wbj/KSTnDEih10QS/u7NPEj376yRArdPKWPWKCtgMSYG7JC7GUO/kBX/L4nc9c2UknMuCKZzlBBrp0gY+eL4qFPvboB/HlC3105S8gbvzMX3TlGzL1dWjj4Nb6SX7FfDlND1f9rt736YCPHM02fNheIR+ufCi+cgke3vzCBrYZY695Ia5yly2tf+zAD190+FnTjeMBouM/+sFnOx2yXazQi7Ur37GJTniXj67WO/j0M4Y3fL6QU670NsZ+z3Q19/DzTAe2orOWGTN/yYZrTRMrOWPOmMvskAd05i9XuSRu8oU8/frYh49GpkMOV41v6OAe0JWv+Fjc0LAd8JEG4PG3ZzbzBTz9eHkWU37B31jx1zyzO1pzXV+NjtZROngmR2w8xxM/9Bp98aUTneGePMnSslMfXsnXyGvs0xqe4TokOnHT1z2+ZMETQzLs4UE2havRP3xrP/r4oC8f+5Zt/R2km6fxwwt+NO0Fmj40+uVaMtgW/ikfr+Sjk4vWQrGFk86asfiJlzE08O1/5i6/iRt8/fF0NZfFLH6NwacHernfmBY/4/YF6561Bg5edClP0FvTzRW0fGmMHtYJ65J7fPjGmMYOe6G5hM5eqN89nfkifdIxW9Pb/HU1b71DkeEgGR/39hBj8KwnZNAFf3q5GqOndcIcbz0WO+Nso6f5a87D4Qs6oLMmpIv1y3qSHvyk336VHuQYi4ad7umKf+8Bzz777OVvY9luDJ71RbPesskYXeiWvvi6x8v8FXP24EEXOHjhbW7jZw1AZ6+3DnrPhO++HKQ7QKdfHL3jkWUd9QzXtXWQbAfG5ODJHz5k4hktPbzTlKP+HctgMBh8m+Af/uEf7v3TP/3T/zwNBoPB4C7Brb4B7I8Ff2D4g8ILtZdvf2z3rYtzbDAYDAaDPwTsGQpC9hLFF0VuV99aPZtCjeK5b/Mqvrv6wBJ8Db3Cj4KS4r8CucNdfQr5ilwKVPqMwVE4UugBxtBWmPtDQLHP/3T0iVrf8Pq7v/u76/l73/vevSeffPIqxCn4KboqQCl4VSBXdLLnkotPxS84Clz0Qq8Ydha2Qc9oFNUUBdnPlwpxioIKlvZzRTTjfKBwq7inMEYWWoVSBVR228tPeQF9KqZ10KUwqfgGlx4OsxUdK2DDAYqY4RnT9MUHrnjDMYYvvwD6KiaiZZP3jPjLB+8hbKjhRwf6sKPCt34+1sd+BxkaqABuXKHfewweZPOh/t///veXf9zHk1/pw2fuFWPpKXc0svBWHBVHINbyAV8gPsaLV3bjI1/ZqqDJNnjp7V7ui6GCN3uTjYae7vlYcR49n+GJ3r0+vlUUlZv8yTf06wAMrbwJV26yid/lGz504Qc4bFC47aCKbnIPHX8qahuDnx10ZAefkoc/W9CxhR/519UznnTDk0x80dIdXzzzr0K/byGzBz/48PBzj6d4s1lOwhNf8o1ZD6KFg8Y939JZ3OhPJiAfHt/oY6Pcg4N/fF01tuCfvWynn8K0e7mFXzT8gzee5gs7jcW7q/WNTPzxK//FRN7o0/BmR3rKTzLdAznP5/zNx+liruNPZ7kAx5yRv9ZmfXiktzzBo9jSUwzQ4au/vMRXTNkgL/XTGz8xFx96mTt0bb6TjQ4OuXyIl5jBESsxoYdnfsAfHXxXejlQQ2fPMTeaY8Yc+uAvh62z4g0HvX6xbJ2iW2s7v2l0IUcc2e+qn97lLR70kpd4mLOeNYcgfE4n6wL/8R2f8B3Aj/7yw9zlN+PmJVr+dKWLe409/KKxKxnGXOlWbqPT6IcnnPrc80VX+gHxogNd0MkB8aEjP5cD/EH2yUOeJps/0OJrbqIPV5MHteTXxPAcx6erBspZOvKFWIqhcYdDeADPcPncVX7GJ55kugZwNXzkQFC/Jm4Abfy1Pgygz5g++RhN+PXVr+/kY5wv4+PKRrFhL33ls34A37hGh+YhXtGILZ91eK2hk9PGgfXA/EiHZMppa6k4ii/e0bqaC2Ih771DyUNj6N3DkTd4kCE/jLWeyBF7DpvJQ9tck3PmlvmJLn2tO8B+fspgp1zA14E8OdFkg/nLRrrQm3x85DR5+gCe5OKfL9CSIQZksB3/9CzH4OUnz+JMf7ytCfDcG7fOoRU7c8sctW7yBxyy6S1n6QpfLNlJN1cxMlfZhBc8MgC7xTsb9Hsmj27WEjjsEUOxsCaRyU/eza2p/AOPn9mHj/XZoay8IJONaOjkHp57PLxrkyF27HRfbPCiNx3pwFZ9fsGHjo899tiF61d9yKGD9dCHmAaDweDbBvsG8GAwGNxduNUBsBdqL82KAcALvJdvf4j0iWJF3MFgMBgMvung8Oj555+/vingm50vvPDCvaeffvoqGP3gBz+49zd/8zf3XnzxxasY+Mwzz1wFO8UzBWuHCIpU9lRFJn1AIa0DN3utwpSCmWIcPN8yMqZ4Z09VhMPTmBYoYinmORCPN1zFsg5GFdXooF9hS8EMX7L65oh9XQFR8U7xES97fPIq6NrnK/RVcMZLUVoB3zg67wCKiOHTHZCt+HYWgcMFim7eKbr3rkE+vdAANOylO/58oMBXQdGzfj7Vz9cVPelCB/0O/uHgrUDJ33T3rDDKJ2e/4mBxyK9sFtsONthBBv54+3aSWNBJXvCTDx7AFxM6iQN5DgSKCxpj8IzxNx08x9u3zdgppq7w4Bh3T59sNU5PNuHBDn3w4dCZ78SCbYB+bGaDfEAHn2706cMM3v/Q8osxhWrFXEVYNugvrxWm+UI88BZjcoyx3aEmGnrgA5dP6QvfXNHPDnlknqBHxw/w8XF1sGb+oTeunx34s4O/FZbphi/dw2EroDud9ZFZXJLv3niHdXJajOGLMznG04mf+b15JTZsg0+WuGU33uKsnw34A/aKWT6BLxf1mS90E0Pf9vfsgIHcdDNubcBPPzlyxThfKMCToU8BH5T3wAEPWnOQHPyKJzvkhrnFPjrhhzdbtfxs/tKDjvrwoR96a665RR95Rkc0rng4cLQeiDFawNfyiQ78JSYOqOHSBV82wDGvySWDTP5mE5vZJA7i5AMf+BprDvINHvIHX8/o0dKFTzV9ZDvAYE8+Zo8mtuGzxXzxTG8+s57JQ7Eln4/NNc9ynS/yGX3xkOd0ojt9+cw9W9HRCW85Qxc6uXeFSz4b4fG9MXHlc7LLH40vyXKFyydk4m9uwDV+Huw6+KFHtPjwH13kmDH96MWQHnCyFV0NHmBf+6uG5rzvGZ4r/fiVPmwii39OGvL4nC/c60+WeJ34nvUD/PJZfujAGB6Ay14Nn/qBPnFob/00GrbWXx8a8dfvuav+cjnd9Z39Jw9xNDfEsf3WWHj6NfpFw5dyU956RnfybN7zpUO6aLXyjx9d2ZaefGi8/ak5G9/GxcdcMm/lkHGxxY894mk/0o83/Y3TCV8xssdlW7luTyObPeShSU7ro7UGrnVIbnuGQ09+IbP1BO/spZccltOt42ShJVc/eWyDR0f9+MsrOvEZGWzy7onWet0HPPBrnP7mE77eh+wJ7u175LOBfmSQaX1p/hoTW/uZd1YyrN38Si/xBPRKDlzvJ/wADz8+YLd5xTa22EPs4cbw0ocHXtYiNsgbMSmv5JD1k1+ab/jBc2U/mXiTSw+4vR+Sj79YGPfMR9Z6PpQT+PhgafvfYDAYfNtgB8CDwWBwd+FWB8D+UPGC7kUanmcvzv4A9EeRl3Mv64PBYDAYfBNAgVChyM/SOtT1LV6Hun/7t397HXgoaink6bfPKYg99dRTV7FJAUnxShFOIUmxyf6o6K8QpUAF+saeQhse6BToFLoU49C7kmOPdXCkGIVGgQwvBTngWgGxA4YK6L61oHCm2KY4p5ilUGZP14xVWFPsU6RT2MMfLnvpVTE+oEtX+uFtHJ33ADa575kP4IZDFsgexTzjCnEVtcmvQMxPin76PTssUrhEjz9bFOy8ewB4+JBFd41/0IqP9xIFvw6U8KYHHP6gB5+TYbx+svRXHNVPT/q5V/iVD8b6Np8CKL+KPTzy8VFQpJ984kNj7HAP2MYGOUB/B6Hi3Bh9xU2TN2yjQwVZz3R1r99VXiie4+ddDc7pG+N8Im/gK8KyxT396eJAD74YoxVT+vh2DF3kF1BcBmjQ8rl4Fke2+la8n1TkH3jNJz6lR8Vf42SSJc70oZfCtHtxoj9whY+Pcf4UF/gKwWTwdfGjs5z3zCdsEl88+Nvc8WEPOWdMX/OLPg518GEPf7kPD8iD+KAz1+HwmdizkT9aAwCdNXTWFd+gZU8fQkiH3q/lMp8BfOWA/GCr/1Hunn/ZjZ6eeNDZFY2c5As2Nc+aQ/yCB/l0FxO84WQ3oA8Z+jVxxxdP+pVPnjtUEOe33377sofP0Zk3rmSYd+7pSXf8+KIckI8K+uj5zJUO+JNnLvtbBD18z/nGle3miZiRI/bpyh688eUT+Ste9NbYjicecPAWw/zHfvku//zEOd74ytfmvdh5pje5rv53PPvozFaxk/dkFX96eMYTDb7GzDvj7Bcn/Rr74GtymSz68tWpO73C09gvRvTAn16A3mTLB3gOrfGESx4d+EhO6eeP9jfAd2xobbBukI0OjoYeiA/d4MkHa2hzH76rw2XxxBcvMtGg5RO6wosmf9EvncQNrrwvxtnbczrBFXfzH29gDK9484u+cDU5HtTPN/KkPqAv/vQBeIlVDS/4Gvz6y+ta/fac5pi480/vKvr4V0zkKx7RyQF0J7/yhq1orDlsMD+Naace+MsxDU3jp1zzPpl4ZRPc8gP/6BpHL4/EVX6c7w34ojtlxhMv+PyLB1/E76Sjs3ySK/LJfDTn9KNvzpvH5pm5wRY5QC9NLphDZNoLzOfeL4zjLdfktn60dKAbu9jPJrlGB/3WMDh0QUsP+00Hqemt39wjl55sZ5tchW8uJQMenvxABhxzik8BHuwwrk9O0Tfd0dOTXXzDLrh0bE7zT3LYTj97rLmnH/T+QE8yixddvZ94z6UH+eTRFQ7+5SD5+YMO3lG8c+CBJ3u968PFgy7o6dOzdW2Hv4PB4NsMOwAeDAaDuwu3OgAG/lDycuwPRC/awEu9PmODwWAwGDwIUNBTrFOUVpBzr2ikOej17V0FIcWnv/qrv7r3ox/96PpDyP84c/ipwKXIpoDkXoELruKcwqAx/QpNCpaKSvrg+SaJoprDh4rg9Kmg6B4oGKJVQHPIp/gKVwFMIcqhiwKqQxg88cIXvmKVe4U1xSz22X8V+dDQrUKlohte5OtzGFahDE/0CmNo0NJBMY4ObEpWcmtwFeQUAelhHHRVeKvoXR8g1+FWuiq40acCtfsKyHich77eLRQG+QQPcpNDJzzZy24FdLhks13s3ZPhvUWRr0IsP2gV7hVp6angqsBdPx/Tw7sRWvbDMU4PdPRCK1eMyUGxRueen+HLFbhiz//GxMeYdyo+cCDEBgeVnunoHYse+slL37MYXBGdXLlJtgIyX7rixVdk4QXYI5/lA3/yGX/LPYdteJGPFnhWJJVbgHxyyJbjDr7cnz7mc/PLoQ3/iJ8+dGzOx2JFNvrsYoeYuhdfV7yN4wfgNCfQylsfjpDH5MFlG97FQQy97wK0YkcP8/Gtt976JLfxY7Mx+ePXAPrpYDjmjCs7xZ1N2UIPdGIin/TBgS/f6WWcPvTz7HAUT/4VFyDH6GvcnPcz5WwD8gcPPOHzkVibD2xCC5r7dOFPdrGhAwLxwdM60fwpX9iGztoGR8z4GrAXHv6aX0fwU5rWX3Tky2O+YJc5LMfIAMbxSD8x0keu+eZKFtvYCFcM4Yo7e+Gja154plP9dJdv1jixYTd5cI3TB7/8Ix7u2csP8MnV5AIc8cILD3lkzOGC+MoPuooVu/mWL/AFfC8X+FoffmILB6CVL+whDw++cKU3u4BnOrKNj9jCH3ylDy69+F0j11UM0ZIDL7vR6bdGsc0znu7FC2/x0JctcE/ZcOkef1f+S272ADmKLzz89IuF2MFFB/SLdTZZ6+qTW3zJd2JBPzabA541Mey+BoqpWIL6g3DRw+Mj19aNxm7SgXDlKp1q+sWvg2eAXn809LlJo4mfZzhnv70BjX4Nf41vxEV8zEc48DskFCu5Gc/oNHrH3zM+/E9Xa84pq7kjh/WJEd5kNrfIFKP2P/xv0uIvz6194t+YdZft7sXc+i+PyJFz7tGJOT7wjLnKDfd8gE5OoaGPD2sB+zA8+uiX9/zDVj6MHx+Ykx0o8o1cJxNtOK1VcoO9cOjIJ2iNRe8Zjitf4c9WtHDwZZd1Rfzg0I29ntniGX+0+syN4ssGMWGDuWYML/43xi/mJlr66DcX8W/Ph0MPzXj5Btz3Lev05QfPYtjcEUPvFO7h6TcuBt4b8qH1TH7hQSdXdPDkhTiJkXcTz8YBnenJt3DNLbGz3+DjnV7+WZs9+8AnXexleOD3zjvvXD4bDAaDbzPsAHgwGAzuLtz6AHgwGAwGg68bFO8UFBWLHOI6aPCtXIUcBaI/+7M/u54VFxV73CskvfTSSxeOIhw6xTSHPIpBimH4KgApuimiKUopJNkLKzIrNLlXsFY8UthXYKvwrIDqGa7CGLnwFcfg44WnIpP9tSJcRTuFOcUouike4omXfgVL+DUFMkUuRTvFxor6aBTBHIrh4SdwFdQUCPH2bQhFOYCnb1Qo5ikowsMLH3a4Jg/cLHLDpRvQbzyoiBhUgGS3IudZuFaUpH9FwwrIGhliAyqewuPLCtYOIsWC3eygr3445Hp2bVxs+KmCphiIEX/pk1vG8XBI1oGGeOqnB50rbMpHfWR0AI2PfjoqaCt8ZrNYG8OfPmSjNc4/CpbuxYd8MgE6+HDJ1a8IipdnvkOvn810VLyV5wqe6PTRlS/QGq9YzRdy3Bg99ckX/WLGXnzp5tCKzPJSLsoZPicLDw1vdsp5+PyID954yjGy6I2XfGcnXHFnr4NwcwxvuQHHGLkOGV3JRu9KF/O8b9wDeugXH3zopijMV9HSg1x5q98zXH2e2e2ePx3Q6sOzPNAUksmk7wl48W320Jn/PcNHa957ls/mlbxhqzH+MUfwED//z5jv6KTfFR+51ByR1/QTczkPRw7IL4fH5j4bjFsv8eMXh8z0AHIKL1c+pI98E0828RVbxNKzq7moqORAgO5iTS576EcPDV989ONBD/fs4xt5wAZ+0YcXO+gib10V+cWOnXTB19UclCu+Cc0f4oFeP9tc0fB1+jWGLx58Ic/dw8WDjp7xsrb6mVB+Q083NGw0b+1H8pTPrCnujfEPX/aBBjrjLeZ409t6gZf4iL17Ojgkgi/WGrl4yAENPX34ROO35jte8cl2NtNFTI2Hg4YeeNItutY18fKsn8+Si45OgD/oFq6rps+zK8BDDMRc46/iHq6WTfDjC/K9qz5XPnKPF55iWj45dMUbXriAbdYCMXbP7+aznAZk5vtyPx7aCfEMV6ytJWd/je4nwI8//Jr5ouGF5ma/Jl54iikbrFXmFLvkIHwAl0/E3z1+6G7yay92DxdPa4VnOUrH9NWHZ2s9H5un6PlUHOhhzNqDpjG86cmP5gE74msc33MNt4YlT+7gC+zR0RnTWl/Enj/w0/BDKy6udMXTmDXDfJXPdDMHyssOcvlCbrCrtQGOvGGDPIXDp2SgxdM8SS9y8zlecODil35sCwc9/vxOBw0eXvmSbPrjJQZs9wyyjyz95gRegO7k6uM/sWeDNUfM7D0Ar3ziA2zes+lML330aB5ZK6x58PmeLPysGcbpjp98aC8gx57qHRdN+vMZP+BNV3bSyzrmA1mAv/Xh05U9bLG22Q/Y5m8B4P2G3t4V7Kl0GwwGg7sAOwAeDAaDuws7AB4MBoPBQwf+QFFg8r91fXPX/9/1DStF9Mcff/y6Kki6dkCnuFThWhFeMcn/gkSn0KkYpLlXhAIKRQ4sHAopVikuKTZX6KqYpGCFn0KUQqBiUocuinOaMcVExS50imjwKoa6V7izz7qnr8Jnz/BcFb4Ue8mp8M0OV/ahUyBki0I1XSuKVgi2j8MHL7/88lVAhQ/YrpCmGMhv6HwTiJ0VRNHSQ2PLCQqfFcfJYjfoqjjIH0CfhoZf4OcLOtPJVXMAFV/+ds8/J0088GQHOfSr6Vfo5xPxEU8HbwqSfMD//Mdu9+R7RkcePys08od+fiCHX/CVb2jJIoOefK3ILl+KneJjNuDNvmJVPinSsoVvFLcrjGr5Dz468VIghc8nirx46lMwlUPG2Fjxme3u6Wc+BPpA/qgYXL5UhG9escszfH7Q4JgPFfflKh35hw/Q0B1fdil+4yEO5ODFTn2eyWFzc4I/8IPDLuOK//j07XA24k8euR3I6qcfGcY985O51OGwHIeDH3v4Dx/rAH3IdqUfntYMvsJL/JKlibeCtXXEwSEczfoEDx1+2cM3xVF+sLlvFYs1PfTBox896eIbSnD4mj/whstXcgm+Yro+rRyiOz84NNWXbHHRxI8P6AzkOXw6sA8u+8hhS7FsnYQjHzWAVweW9CCDnfLV4anDe/OOHWiN0x+Oho/4tB6yn85izEbri1zgEzz5Qh7SF0+Hs3Kff9DiwR7rnF90sM7QTS6S35j40av8wp9ubKaLK5vFmx75EH8H63iIufxw5a/8CAcPenYvH9Br9GGHueraPR3SkW7oXMnHl05ylt3WLvnVHCWL3+ilPzno8aW/tcBYOPYzvhM384dsNK54ijc+csa6AReeBsjTbxxPNJ7l9cm3/GUDXfTTB8g//idTLOjIl+j1aYAsuPrR4ouXGPFLvgtXM8/h6iezmNKnfnhsNm/SybN+sRP/ZGrGtPBqcDV8GgNn/0lXP/6Nnf38Lg+yRZ/8188OfpZ3bNSvxa/9QYvGWHOrfk2MWxfNRfsAPDQ1+qFHJ85ynk/kEB0bw08e4inerR2aMXli7hu3X0cbHb7iL1YOC9PR3LFWiB35clo/3VzRwRFXfMwbY+WtuY+O3HxHD/rpYwdc/m/P5wt52boHR75Y6+DEh2/oS74195FHHrlkJc8c40/+zUf2p8bpC4cv8KCDMThsOPWjbzjGmjfWjnLdeHL4ln7sIcO88oyX+FhXxIE8/jaH4JCDnq18LSe6x4vd6OG4d+UDOuDH//SSi4AP4ZEDrAP2QO9dZKHBg27w2MC/7PE+5yAXnkNg+eH9i1/4gC/4B70xvuBLMhz24sXeV199dQe/g8HgzsEOgAeDweDuwg6AB4PBYPDQgG/p/uVf/uV1+OugV2FHYVGxTHFTQUhxp4Kfgpail8KVPoVmxb0KYBXrFHz0K3qhV+xXoKpgDFzRk+mQgUyy8SFHoUsRS9GZHopNilKKSA4k4HjGRxEKkNHBpnFFaoUrAE9hiq4KhQ5Z4KChI9mKjopk5ClgKYzRw0EPeoU1hTd9bHFVNIfLZvT5iN5o8FPsrGDumU4Kj/RWnIxPxXiFN0VEvPiT3p61IJvxoTvQB4ev6Iq+po9v+Fcs0eWnbEaLh3H3fUtSHLKDbp4Be/iDD9nAj2xWxMSfP9A4sFFYVLhFo9GHH8jJP+KZX8lRWNbwk0/1yxX6yAUfOGATneRTB6n5QT/b2IG/McBGMvHgA/kgD/k6HQBZfOlQV9GUXfIfX/fiR0e5nk3G8eCPCrpki6V5ZYyfXTX82cWXCq5ygC5wFKLpSR5dyCCPzslBS74rH5ALny14+gaOuLDJM3o+oRc+chcf+PyKr376wsWXDvIcb/42//kDsBHAh8O35WI64+HbbA4l4ZiDZNAhn+EtJ+liPtBXbtCdn/HnK7H0/40dxHk2RiY8BWey2A/I8IyGXPaJm3G2kaGfvuz3bJ7gh05RGy7/8pPYwBOn8lWfez7S4Oonyz1ens2TfGnds77oa+00xhd8Zu6451t28yUcQFd2sZ1+0evjL/qJr8NfY/jSD44rmdbBbIMDyAHlmDlrHE39ZLOHD+nGdjjk6aezZ350AOEeDt1a6zTAN+TISbJbx8hjL3pjZJqncPQ5WNbn0IQP9MufYuyZHn6hQT7hwV409OF/Mvg4feChgWvusRUfOVn+um8vZK98FmNzmlw8z1Y+4IeXfQseefQA52Gt+USGZj2KB6B7sTxx4eFZfgVwPWvmEl+Tk/3xkRNsYCN5+sniD7jpKR4avvXzL7+e+4IrPDzkoedz7HzWwhVXMhuTL81dMsxr80JO8hn/A7QaPvDJDPTHB3+66wP6xDGak49+Tb7Wp4mhfOVr+U/fIF7lRnbp41sNv3ixxX4odtYV9nkPaxwfNOnhisacFfPG5H9yNePWRePyU07jLfZyG07xRusqj+QHOvrokwutLeaWvuiaA8VD/jeH8ZJX1gZ6ksFX+ukB4MfPmH5y+IHP5BO9ycCTHDjG+Ny8ZQ+98BATfND50JH4kA9HfrONHnQw99iIJz7WJn1ks6v4ku9e7jUX0ORPMWIDevMJXrmHVzGBJ378iYd1DA/48MjC37sfH6Q728oDNhrTj5Z/0ZsbbKYPHO9IdLLeeF9hj2e6yEl+IJP9cs8+J0Z48jPc4gWXv62x/fsHuOSYS9/97ncvXfkUHtn847k+elmn7EODwWBwF2EHwIPBYHB3YQfAg8FgMHgo4K//+q/vPfXUU1dhyKGMYpPijuKSwo7CkwKOoqRCkwKVIo+iGRrFQcU1zxUkFZwUAitYKiYpkCkoOfBBX5GsgpjimQIVOsUxhSm69A06shWo6KZQp7AUTwUsBS1NIQw/eiiUKRIrTiq646Eohj/dK3ApvPkWIfnkuiqOKVqxzR7NzoqniosdZDq8CR+eApoDU/bRxxWucfawgV/5k3zfJOUPetFPwQ8/Ojmww7MCKL8aw0cDXfkFDb80jk7Rn096pouDE/d8hC9f4IuHWEWfLP1sDxRExVtsFIv5m+5yRmzYp7G5e7ayDdAHrm/ROdjsIASOprBIT3LF0GEuHvQRO3q76pc/wDN8VzzpjYd7NomjXIhPBXRjeMDHj//o47BfgVpsxJy++NGHrnwtp4yhl0f5gG+aL/xsLJvluWc+98xe/uwwhmx+K2/pLY/gkwXffGRHY3IEnTHFV/6RZ2SKD/18U5E9bMfHVf6yhR74yAV8AD7mFxnFm1w68jGZ7tHpJ0N+kguff8TbHMKTTHOBP/B1LVeTTRf+zVf68IJDH+N8AvDIf/rpwm768q8xtptXZBmXw/SE16GrXMvnfEJHz3SDp4gNj2305DMyxEPM8HbPbroAfOQPm+Wke7jWQ3GjC//IPXOInXRlK4DLD/xHFn2yCdAFD7rwlTkuz/WxDx4aPsOznPdMH330Fjcy2KnffDQvWoPoxS/G6cNnbGQr/9CfTLzIsF7hDQcfY2SwAQ92iR894OBJDr/yjb5yAX9j/I/WOLBeki/+/CBe5ig5fABXPz+IPb3wbS2CLw/4rb2LH93TDz25dIcjP9gnBzX3fEVH93DxloPmSPrjk53xJAue+UMeX2nu2QOP3fxfrJt3/ODKf2JLBj3c43keIHsG2Z7O7GMXm8iDB4ev8DGuT2tuwqUDHdkjD819eucHMuHDpQMZGnp265fv9YPutVMefPxPHDrysfnkHsCDL55oQTzY6z2l/RCPeBszXz0b01eTY/GIj6Y/2vJMjDzrxy8IL37JrV9jizyRX3LV2JlTyYtGP5n8r1Zg32ktRSdvWnfFylqCrzHx188u+1180Wh48ym+5pE5rQ8deejsG2RFZ9ycKR/Q0RkOWWzjE1cHkMnyLLbyDY90ltOtq+Hob21KTvsCHHo13/HSyk344kQffPlEf/z5Do7YpA8c+WIOsB19eQ0fDv7wso1+8MTD/ENvHrGnXILHb94t0LK5OWsdEKd0FS/6yF0y6YJG3ofDTrytg/B9gEh+w5NXZBdjeWB/Eb/WdHOxg/fyxeGud1YyxJIMHxowl93zlWYNojtcHxRlgw/1sd2HSI15j4b35JNPXvLsffq0wWAwuKuwA+DBYDC4u7AD4MFgMBg8UPCzb3/xF39x/d9eBSxNQUyBSkFQ4UwBSlEJKDIpBnlWuPKNSwUkhSIFJaBwpigFz57mfwbjoRAPFNAUsxSpFOPQ6lOMJleBS6GNLEUwhbZ4uUfrWaEJOJBQDCOT/gpSDpwUBvFRwDMGv+I0engKb/SlBx7kkd8YndlFr4p+ilh4waeLIhp/kIUXvR2i8KNxRXM20oev8GarwhgeDkD4h26KdXShM/8ruOFHl4qcdHGvVVh2VWiku6Kh53NMMU5h0jM7xF0/HmgUED271+cwiZ9OHhUmyakoSjf4FfzZZkzrvit7XNmq8RV78BF/Oioi8hP/spmt5R85fFne4cdX+hUt9ffz0XjysThUKCbLveK2IrI4egZ48LXCJ3vpwk/4k+mKnp5kyAM6yhc6ZgM/4g8fKLgCcRdn8WaPZ4V4/uDzcpDu/IjOt0qBfjh9mxmwF+QXec8fgB1siA40b+SYDy6QR4dkyd2PPvro0kcfvsbxxce3foqf/BJTY/DYQ6b5zG98QU9rB74+mJHvAJ+TIZf4Lt7k4I3enEOLjxjqw5sstpNfzvFTevMFOrLwlEu+HSwX+EAfnvxiTvqwC5n8wj/G5TpgD7vwJpOt5yEd/eGbK2TS0RzBB6BjExA7PNDkX7bAwVOu6ScPL/HR5Cl92OGbU/zF9g5x8IGnwK6fj/AAdOT3eMKnK/vhykV97KFXfugAIzv4WY7Q05j8phP+bJbXdOdTNuGLf/NMv3t6oukQAB4+ZOtvjZMv/EJntPTAn83u+xlrtvvmmVjzCz3oZ0zu0vndd9+94i4OHbThYd7iz05xcSWTnujg2VfYQTZd8eEjsvAjPx7slgsORRx28SPfygcxsa7DC9eVjOxAq5+PzRu61IyT3zzVB1fcXDV68qV4sK25KP54yp9wAVyNT1oj+cH6qw8/cthOVzbnOzyM4csu9/DJ1vAVB7TG6kNHTjlKRzFrTgT8Bp+vA+MnzilL4298ALz68RI3EL6c0vg0Phpc/eKOJnp9/Mh2epurfsXkpNHoi+aUI5/L8/iVE3wmT5OlwdfgoHMtJ8QevmafwR8/jX54An51mKcfTzzQGMdDjkfXWGsqvuQ35p2LbPdyoHySI6c+eOMbP3NSbsDXZ77okxfyWX84rtYYOHR3jzcca4Yc4vNo4Yu1OYO3tc+9/R+tONFRPNjknk1kxQc+PcWzOMr99MELHtmeyxFzq3diviGjuYOG/+S0fjawh9/QNr/g0MP+01z3DA8vuljr5DF52QEHrnWADPKtfwAveAANeuNsNH/JgSNO9lZzGQ9+oCM/G7NPoaEr+9npfcEH2dhABzkl98TCesduB736HWyIib8nwIcffnjvZz/72bWmkzMYDAZ3GXYAPBgMBncXdgA8GAwGgwcKP/7xj+/98Ic/vIo/Cm6KVopiCkCKSPoUihRvFLYVpxTY4CkIAWP67F8KzQ6G4CkQVYDTFJ0U6IwpYhlTNCRDIUpxTHHKuIKRAjcZCmMKVHDoQoYiFXp9CmV441MRGJ1imn78FADpjEbB1bNiGh0VtSr6+0YFHM8Ki2SRX9GRrYqf6BymZLPimmd0DjjSW7+iIh0UyICCHHpAB7jG2UEGPfUrvuVj3+DgK3qToQDHJxWB8fas4YEuvsbEl92e+U5x2T18fg6vorV+BUA86oejyKiPrhrgL/h8xH9izS8dJPhGIB9kk8YHYude7mj6yFDMp6NCvANZ/eHjh0as5ejNQxB6sFURlL80esorPqeDMfZraBrD35h8UMREq48dfCKfOhTiD3aiBffv37/6+MaVDnQrLnxTPuvjn3jow5MPAFs79MxXbBVX+jgAK0c8l5tw+c38xc8Y/uLIr+ziN4V2sozRxZwkj876myfG0OnHE3880ZNDpnvf/mmsww7+zB94AGP6Ky6T4Z5f+QMuWfrh0qWf7dUnHuQY43eyFaDFigz6wIHL5+YlH9GdbtkAV7/cAniQLU/4An/j9HK17lmbrA90wkM/Gv3u8ZQbeNDLGL2Aovr7779/8U23/GJ9MJ+NyXt6xhudK5/55rb1BA0d5KO8EHuxoLe56QMrfCCGdDHvrOP6WlPRudLPPV7mrXiRLxZs1+Dki2TLFXaIizHNms8O65QYyjN5IMfgijPeZNGJD/kJf/d0pDM96c3XbBdTduP95ptvXr4zH/GgOznoPLsvRmKqny7w6K7RRb91nF+NiSE95AceIB3g0BNN6z1b6MHv/EIGXdlGV34UC/dsIxMeGmPo+IVtfBcOHp7TAz+y2nPCBXCtVfrpit44fcnW6G1Ms/Yb1zy78hN9+Jwf4kuveIoZmdFq6Sq++CYD8PHNfh/GYnNreTbACxfEQx8+56HuCWRr8OOhhdt4MW/PgnP2x//E5w9+Nx/FgP/lJl+xQax9ACgaPOGj5cf64qV1OOdensWz58aiIVeu8LX3HQeGydHQmK8nXWsAWn30R+MeDX7mvPtyNF0cDsobY+yUfw71xMu8g8duawuevd/F27i1Fi97jLmIrj1PvOWwfnnPJrzlJ1nmBr/pt+bhyY5zDD2QK6dOxvnBvGSHq5jiTVbP7tHQ3VXjMzIAu/CxFtCNzmxpjWMLmz2bd62j+KOjI7/iSx5b5IP5g9YY4CN0cMwje0Y+wottdKWHHCHTmuoDb+UX3uIlVvykn1zx8YEmPOCwybrFTrZ0iCsf6WOcHH4r5+AWS2P2PvzxlhM+zAnwN//IBd6V4cPVb4/QZ+6zZzAYDAY7AB4MBoO7DDsAHgwGg8EDA5/S9xNuim4KZopKCkUKUgpHCnyKbIpFilWKRApDCkX2qg4dFJIUnRSx0Lr3yf8KaQp/cPCv8IS/YpQ+RSTyPeOpAKgQpchkX4RDpsIZPRSwjCtyOiAiE47CFd4aua6K+Oygu4IZqGCmOIcP+/HHA88KZOzwzCcKhWTjQXf2K0aiwR9PBTL8FGcVzhwEG6+wzT48FPUUBSsEovctCzRk0qtCO2AH/dii4O+Zjyqm+rYPfelAJ3LgkKPB7RtD7vGC65ldxQSuPlc4ip36zzGFQP4G5LCNvnTlc/qLJf35osMR9lawFQvFQrZEp+HDX3LKPT+TzdcKpXIPT4VRviOjA2fxFH80+tGJB3sVbOlEf338qKAMx5iYso8cvI3jIQ/I5Dv6sV2xlD14wS+X2MH3+AA5w074CqFwH3nkkUsWXfiNP+gL2KsBfeiBPvzlKJ06TKc7PHGiszHf4OUXNruSn+6A7+iNJxy6N/fFQy7gZ4wP4OGDrgMF+uPB1/iym314eA5f7sgFvDzDk2dsoS98/MnDjzy85Qc/8g99NHzZJzfwgodvOanfs7jow5fd+h16kok/fVzxxJ+O5h3++tEDPMSeTN/0e+utty5fsZ/egO87lCCP//APj17y2LMiOP6gdQhvc9caaR444C03yEeXTXKW//TLi2x0Ne5wgK0O4uWbMTqQ6UqmPKez52KtFaNkwgN8Iw54FWe+F1+5wG+e8eRLernCZQ//lWvyXH8HIeYyfHMiG8l1zydw0OLv2cGrMWulK558wtY+gMHX/cw438LjT2tD+mto6Uw2XGPsoROZ/KPRWZM7QJ7yJx34iX382odH8KqRUXMQDU/Oh0cPNrONn+lrryCPzXDpqJ9P6SMWdNVnz+NLerMPHXvNHXZ1oAQXlDdwxZwt7vXBzTd0Iotv8YeLN57i03pNp3AdorJBnwbgoOU7tvMDaM1He/LAGw+QvXTO7hP0AbRB+B3EBcVTbPFvDG5jvb+kSzrzH7sd9kYTHX70ikZf/fHTxERz6Ciu1gv5ghZ+4/Iq3lrxkQN0kXPGT57wzF37k3cmPG7y9Cwnkss2DV/xjQ4+OWLGJnZb97JZDuDRQSEb0ZRDHexq8kkf3uYxW+ig0R+uvKVbMtHJkfPwV5+YySU2osHD3MsOOHypnz140pGuaJuPZLR20alYlM/o2c1WvpaP1mX4bCEDHnlk4F0ut5bQCx886IYPWnqzDa14Jae4yvdzDpl73ufIYTd874XWCjIAvOTrZ4vDXXuHdxT41hr+xo88fmUDHci2L7b340FnsWMLXdnCl/YUvvV3QmuPb/taW8U22/EmAx190H3wwQcXr8FgMBj8N+wAeDAYDO4u7AB4MBgMBg8M/v7v//46KFFQUrz07QEFModqikEOdiroKAZVRFVEUvBRGFMI0qcQptClmGSsIh46+GQoHiluAcX6CmiKT2jTQ0FJ0VHBSaHLYRN+CnL6FMceffTRqwClSEZXRT50il3AlWxFMPhwFbvIoI9ilUMXNujHX9FMv6I9uyruKeQpCOqjg4KYe3QV9eGg5Qt7Odv5En/+c1/hkp7oFJmN0w8omuKpnw/ZSv8KeHjj44q/oi5Z4eCjZbMrv9D99APairj0wRM9HH186lDJsxZvctHT8ybQN51d+YQdGn/nJ60DAb7FVwFUDMVDwZK+/JAv0Cik4oM/m+jB5/JJEbt+/PgbD34urzzTAcAD7OR/eUJ2+hjno3QuP8jgc7GPL1/Il/ABejrRw3xAm+58Qy5Z+QwfuQbw9e0ZPu/nifEVB/QOnugTLblsMGbe0hd4NnfRw6ODwjdZnunAp+Tg0fygJ1y+oxO57MCPbnSRI/BdxQ4fYEw+8XX+IZNP8daXzz3DpSM8z/gVe3ZYA8SY/PSgu3WDnviai/T0TA960d0YfhoZ+snBD8g3Mqw3cgxvVzh8LP/5TqGcTLkPn7/Y0bs6vyi4OzRobpJJR/5ID3T4Aj6go9zJT/rw0ufKJvKS4/91yzM20guQxQ74ZODDT2iyXR8d4OIp1vDp1pzwTJ+TxrorD9AY1yf3yfeNNHNGnIwbwwc+EDdrtv2DbDa6Jhc+OfwKWqfxkhPs9mEGc0hMnnnmmSt+fACPDsb4is/FEA0b9dNL3pmnePMHmeTjRw77PAMy8RQzfpDT+ujc2ueZzu615rO8IYvt+t3TNTqy2EkHvucbrQM4fOHGj572Wff8xiY6oyGfHP3GxQ9dMUQL357gmVyNDvzEH3Dze7iey/1w8eXHeGr0gos3oLdmPsOLj1jQ0fMpS+zkLtyz7+xvTB/eYisuNwFtcOL2jqKlnzjQsf5zjNye8eFf8WOD2OCnH06xj9+n9csjvOwBcsNhvvVHs07AT99ySs56ljvyxDwTA2NaPNF5ph9c0LdxNWN4Gud/ax35bKQb29BZR8Spfk1u8jMcfMwDOuNRnuEtD8XI/PHuSDfy2GsNAw4W6YMvGrnOj3hnF3lsdSAsJ9OZz/hWX/KtcWg1+akPP/mJD5reufDuW77u6ctm43Tkf/zxoZd7OQqnuOBtPbd2yAPy+IPdaNjFV/Z2uHDowDY640E+mWxEy9/0YyM893DIh+/g1vomF9DQD0+y+Aae/PZsbTWXycdXQ8Nevhcb4+zR6E1HvoAnbmz2wRr6OOzHo/lDrjluv9LPDr864v/74mtt9q7x9NNPX7bxlcaH//mf/3nZwr7BYDAY/P+wA+DBYDC4u7AD4MFgMBg8EPDt36eeeuoqlinuKYw5fNMUgRS6FJMUkBSPFMUVE4FnBSR98BV74CvUKTYpEikGKdAZJ6Mim0KR4pl7BSwFQIU9B8IK4uQp/hlXnKIXfRQa4SlIkYEPvRUno0VXEU+xSzENvQJaxXaHawqsxtEqZtHBGH0V0chBg1+NfHaz1XOHdPQkg80Oi8jnCwU8QEfFO3s7ekU2Nii++kYoefyAHp7imSIim33bM58o/OEJj1z6VUylu6umH8DxrOhJd/fs4jeHW2iMKXLCxR+O4p9DVTLzZQ2tnwBk7wniIb43myKixt6uNT5kixiziz76jLGBnx0gKb6KlZwJV3NPLr/Snd7iKC4VH9HxpbykP3vhy2uyAD+zu2e50cEH+Wg7ECqGxsSYbnzCT2Inn9DSzZjCqhjBp78rWWiNiYuGViEVDXq5QQ6ZaNjHn654s8Wz3DMn+Vau0BV/z2JNJnt8u7AYooOjn6/x5ye80YmtArN7jc/4ls7sVFzms+YKnxQPOvM3e/jT/+bVx3/4uxej5MJlA7td6cYf8gK+AwXfbIoWX7TAmG+hsqmDB3oaN2fMO99CKl/QsgeunIdHPtuNkw/YQcd8pbEdvj7j5PAT3ejrQAUe/7gC49Y+uYwGsNl7vpxgI1yy6UEv9HwB0Gl0Fqv6+BWf+IqzA1l9+cwV0NcaKLf4o4MHMvmztUsca/6npJxmL1nw6OUqF9C4x9sBAbnsIJOe5Mk7eSEfXY3Hq3mOji6e2czHdCfXff+jHE9xdvUhDTzNM/LxdhDBp+yjmyv76CpH8RN7OdHfWHwaDVx4bKYTvvzEZ2yBI1Z0hscGNHxPr359obUMTuMa+frw0uDpo0Nrlmc+Egs208UYe8RXftA/OnoCfjMX9WvAWPNKPPkUL3hkoHclpxjwlXyCyz98E96pl3UJjaaPr8oDMTffksfn4WroNfMlPUH91ojWSBBveUWfE8jumm7hk1u/ht443T1HF2/6pIN+PpOT4m4cv2g816I7eYmVmIuLdZne8TMup9JTM4ZGHrFbXtMTX2PaSRcvMYPrvjF4ru158tQ9/ZNHlj508lmeipXcksP65QF6OsjN+KOVJ+YmG/uWqkafcpmOeMHlB2stWejlJDuti/THm0z9ZPIlPnDkgzkvDnTGr7WNnPLU3KIPOvlDDvl4wpH3chktefQH/IKWXHieyWArG9CVk/DgmL90a90y5t0NLR3wpi9Z+Uw88fHs4JUueMFr3WsfwQs+wCO57IfrfZGPrA90wZMv0RqnU2ujOclX1ktXvNHYv9mJt/cgfiKLL+WM9VS86EkWe9GyD20fDoXvUJicYvvaa69dug8Gg8Hg02EHwIPBYHB3YQfAg8FgMHgg4CDPQaBikEKQYo+DH4WeCvQKRopHCoUKgu4VrhxSKhrCgavYqUilGKVf8chVH34KVopGiosKyopcCkbwFJTwVIhDX0GbXq5kOOBAr9DksFeRi74KcXTESwFMPx3pqikEeoan8K5AplDoENYBA30qXCoOwqVPxS7P5JPNB2Swt2JjhX96Oniki8M2/NDp9y0LdsKlPz4AHR+RpeAIFArJqnjr6nDJu4GinkIx2QqObOELxT/2KCQ6aCefj71L8CE70ZLBT/Dwc99hHjs1PMk9i+JoKzQ6HOL3E9h5tg4N2Hxeu+cTzX2ND1zZy7985F6e0AWtGNJRMZK9bFO4VFSFq9FfLsFXPC42/Ep/8WY7XtkvJsZcycITnj5FUve+ueWejnKMvg77zR8x1y/HyaOjPBFvvuZDMTPGd/Q+D5vgAzzJl+v5nk30EktzTLFVfOgjX/mqIq17dinM48NuevQNSHzkAp+yhWx+Ki/Sgb3oyaaDeQn4lSx6oCOTLXAB+WzJLrrgLT7ZKa74u+d7eczPaFtPyKSPXOdXukdHFt3pCQc9XfTrMydd8SGffuyoOI6fOUJetsoTOogjHLT4egbyAX880PODMXO8uWDNcI8n/enIb3QTBzExV/EF5JPjir8PGlh75RR89MbRpy96/OU/29275hP41vF+Stoz2+iDJ5+bN/rZwTf8Chcv64C5YVzekslmdqHlG7nDx/Dkorkgz8WZn+GKF5t8m81aRl92ywF68F/z1K84wEP3wgsvXLrip517El3xoCf5ct+cJl9e40VfPmcPXdiDj2e4bMbP3BN3vkML9OPRumPNJRM/eOxDo4/u9PRMH7nDZrjhdA/Ps1zkM/j0wL91GC49NH7TzBk20l0O4ENvtqPXR2eNX/RZ9+mq8Qs79ItndqNHa68il26tZ/rzRfuCvuSLn3ygNzpy2O1ZnvFL+p7yQDyyg90g3fDWyNQHvzXDtUMxYwBNDc+TB9z/x9699ex3VXUfv1+M1pbSFkilYSPKJqghhkQjCSSEA0888VV4oG8AjzzwSNxgAmrYSqGktbSQlpa2YHwzz/NZj18ycz//CqXQlt6/kcysteYc+zHmXNd/jOu6/82lNz72yUnXvD2BxjOf9N4Qc+vWbvMyxCc+1tjPJ3JcLNl40uBHFj3MGWj40tknX1pDB9+6q9yE5z3iPHD+mDfkOP7oxUI822PW8XSFg4eYiDdbybLP0ofexdZeIIcuyfYu4Ru85Vc5bA5u+Y+GTXSDm51ym3+8Q+hknh7yiF7u8RRnZ2a05tkhL+wlttAFrlx1Ftv/3o1wxcW+YAN8eSu3rIkbW8gQD+cLW+xjsU5/cumEPxvxQtd7Ad9+SYu2fchG/rDOn3TUuLXmHMILTe816+Szj054lVvW0MHLPjrSzfvR+9zZn89675ArD8TDO8WXzchgC5ucyWx0FQu6+vJGv0D2zFafb8jk70Z68Z1763C9d9wPBoPB4NVhDeDBYDC4u7AG8GAwGAzeFFAU8itgRXpFI8U6hUOFQE1L84pOCnWKdopWilsKXQo9imo1edG6wlPkQqcYpZBYc0KBy7OClgKZe0UpRSQ4ClP4KxDDVejCU7ESjoITOYp9/pS09yVdFazwgldxWaFKkYw93dNJcU/BCx770Cl2VQj0rACngFZRlK54KnrhQQ693LO3Ijs/+TUxWxTeKtp6pr+CG3wyrLERLVz0mkg1GvmFvw26V6CGqxioQOiZTvj5U8H4mavx4YqOXD5gE9v82gRPvBQwKw67WpcXaLLf8IweT8CegO4gfT27sq05fsGbroqF5MsffvUrUZ97xCMaV37uyidG92wVF3rgwa9yQtGTf+hAFp3JYWf+VgQVazTsqqBuDW4NEWuu5IgjXgqmFWDh09u8OJhXlNaI8UxPwM/W2Ceumj/u6UgH9sol/18e/ujItm7YE/JCgVUs2CtWbHeFQx95TE5zdFDA5V/80NGVPDztI3ko9/E1jyffoPfMbgOUu3KPHeSIJR9Zay+78g1ZCtR9AYJfs1W+4cUPdCFfDMSO/fYXOZoAzqZ0dvZYJ5t9+Brm8GO/eNEJD3HnU7zoA088+MMZgFZOa4jJj+KYTzU54fAJnfNRNnhGQ3d6oiHbPTz+x88XZszjW2MAfXF2JtGbH8kD9jB/wBEj88VA3Ngh36yTwUZy4PEr/nDI5Tv+gOuLP/5/doAeP7paE8/eBZoBvozgHo/2D1x4fED3/kQ2nYBz/GyGwMFbHtERHlr52DnHfg0IzSh49qS8wYtv4bnXKEdfrtGNDHNi656P2N4eId/ZYx7QAU/2OAusi4chDz1bN9iRzuKTHPLx4xc6wIMvTnyuiQjPOjwDfzrJkeymbw0oOObI5xv+Q6/xz9/miiMe8k4u0Yt/6YYGeAfLgfi54kU3tPxvjn/whRMeXs2DeNAx+/GKBojN+Ywen3iYb46P5WvQfPieQbhiGf7J//aAi0a8bs/jYb+LE5zmxUTuyXlnufMCWIsuftGJoXmxjp/Br4Z4pLOrNddiL++Kn9jDt2bIofDFWdzlkrg7P9GmFx70Fku8nOd0ba18Ey95IkZyVQ4Y+KMlM55oWisnnZ/0TG9r9igauqFLJj3FEA+5ko7skI90sYY3neJtnv/p6B6da+exczA+fE4O29DSHz6dzNMTHXCPVr72eQOOdX72+bUvktAJfz6Ewz58vR/oSW5644W3d70zyx5xxtDVPfr05xt0+Nif8PCSy3iJQf4r1+QWYJ9nelnne37yXsG/P7Ev9uxxnhUfcrx/2e694EzND3jxGxw60oEvnMHuvXfF3hlMJ7bi2/sfiCM+eDz33HPX+mAwGAz+d/hFG8B/+Zd/efMXf/EX19n8ox/96H9mB4PBYPBGwOc///mbP//zP7/59Kc//dPxp3/6p6/5TF4DeDAYDAZvCmgaKiQp7ihGKeQrhiqsKbZVnFMUrDityKSwZ10xScFJMUsxCr4rXgphaMJT+FJYUpjyDA+tOfjee4qT8MlR2FKUIxstXmQrYik8KVbBAwp8Cofm4bKHHMXJinjW4LVGtsIV+YpnimAKgeRrgCrK+pO6+NGrRjce5vBAS4eGeU08ulp76KGHrkIefyr+KbzRUzGOzYqCNXDIJoffzOMBKjhXpGUL/53FVc/W+UoxED/DvYEeLl00fz2zl3/dV3w0x3ZyjHj1bJ3+dGpYd2UPcD2Hpiw+bKOjuPMNnQ0+tQbY4lkjvEImmXT0XIFSMVTM0POTdfGlB3zy+FruiheQo+IjTvB91tL0k3dsSBZdrJHjWcGTz+QDWejFTAzpqzHLHrLphxce8PolGTp7g/1k05uO5tF5hs+39KFneSjO+PElm+gkh+UIneUwOWJb8doafygu0xUOfuTzhzwxxyfiI7Zk4Vvzgm6uCtmudHLlU/fAHiCPzWSWC+xhJ9+xiR6ez/1iTbMFLzbhRT796MMWNvObgX+y8acXvyhw9wUCuvMJPewnNpQzeLtHy6dk0IF/NNbbc2TIUbzxwtevt773ve9dfqM7kCP4ALqbR88fdHTFmz/MmyOPL5J1OzflFF70FytzcPkiPvLNXmAP+XCskY+WvuyUK/Q3z4+udHa+sAOtK97WGnRyLsDT3MCfXP5KFt9a19ARX7HAm2+BK1w68AMcuSM+ZOLVewKYl4caKfKUbs5LOYqP80DuwKMjf/kShRjB5VNrdC9v+BDAx6Ozhz/7c7vWyvlGOHix3zmjASt2/Ek/fnZesok9cptNrmjxlbP8QzZ/mHctF4z0rDFkzZwrefjZx2ity8109EyOUaMXHWgf8j/f4EVveHCiC1djVIzSqXwSVzZ3lsgtcbcGL/3ZLifYcc7jI1aegSufGfDJbC0+vnBhHqA3F74cC6wloxEP9tCzOTbILbp45lf2yHP+5B82iiXZ0Z264mfunDfkD5no5YWzGH9D7lszPLcuFnL7fNdGI8/53R53PuKRbnxDHnqxxUue4ssGuHTqXS535Y57n+usJQe+fIBrH7niwy+9W+khJ+iRvXSRB2jxsf/w6tyRm3Tlc3uUL52D5ukBRw66t1et2SuubDfXfmCjebj8If/p5RkOXTrHzIlveWu/dxaIXw1ePMNDU6MTDpv4Ew56fs6f9BYzcumAHl7xd6Vze4Rc9GTSka/wQY8WLzTyqX0GRwzIdM4DdosPGfg36M5ueY2HnE8On7dHyalB7TwTB7zp5TODPQWHfHrwDfkvvPDC9fmVP+Sdz+OPPfbYtV/4Ssy8N59//vmfNqsHg8Fg8LPhF2kA+7L+Rz7yket961357W9/+39WBoPBYPBGwCc+8Ynr30mf/exnb77whS9cw5fq3//+91+fpdXRfx5YA3gwGAwGbzh41yjsKQYp6ihWaQgr6ih+KZh5ySl8KUopkik8KSopHikUKRgBBS8FJDRwFchqUsCzhsbAkzxFJkXJeOOr2ATfnIKVYizeCn1kK4JpBnhHKnbSlX6KYYpk8VIIqwioSAY/3cjREPLrNi/r9FH8U8jyJ0krtD799NOXPvTmL//woqNGDRr39KKTwpvGAr394wwfeHRW9FOkU3zz4aAiMx7u+cKzQV4Fxuass8mVPeQa/CJu7q2z00BLLnv5no18xGZFSDwUVulcYVK8KkiTY3RvXVGYr9mRXsYJ5PKv4Z5f2Cgm5c852Nk837DDnFjyP1rDnBjQW0zxZjf7Kwz7pYpctg6XnnyryatQS1d2KAqbZzeZwJcKyKa3OMMTQzh4Ab7gQ7Lco6V3PlYQhU8Wevpbg2uOTXS1Jo/sPTGDZ03u4SlXXM0VR7YY8PkjEFd8auKKKWCH/NccBQq21szzJR+wme8Vjtnc3mADIJtd/KxIXjzogS5b+QCdObniHrBR4Zn/i5c9C48PyBMHcaO7/ILH32jtRz6gH77koucbeSAWfEUPtta04CO4eJjDm5/oKaauYm3d+UMeejqRTy/ARwAOfnINjnOEje1LcaMDu+SAOfdkke0ef2eKOTaxwxrgV3LbT9bxsI/ZiTd5bJXbYuKZTnxAL89yi+5sFmt7j50GvuUVfdCx+/7777/kuC/n+Ac+neHRjT5sJ8v5JDZ8bfAjXejAfnNwycHLMzvQ0p8frOHJB2TT0ZWONSXkmjn7DX9y6cd+V/qJtUFP+uPjTM+Xxql/sfFOcVaj4zNrdJSj7gG/k0PHfM+ncprM5HWlq3HGEn/+wVsO8o+czzd4s828dbbBZRsfw+M/PAHbnMGemyOHz+DjaR4dHelczAE5fAunOKeDeTG0H/mEDnIJnlHOyTOx8IyPuds8zMfb2cN/wJq5xjkfHwOf5j2Lm/XkgnxsWOsaPtvFk1/sMetiyzZ727vupImO7Piakwfm8UiOZ8PZIGbirBkLB0251n4lz7qYeDbO5i9c+0OuAHr7Yl664Wed7uTbE3K4NTq6yj/72f53xqAxrMll93S1Li/Q0UV+0NG8MwqOPHOPVn7Lh961eMlT+vo8Q3/7Aj/DPHw64kVneY8OP3awUc6zQYzwhkMv9NbxsE4v+0D+woVD7/zlHdHnVe8kMURPNh+yBU52wxErudTnBjlOB7js5Q/xwocO7EVvD7KrPYQXgMPn5slylR9k0IsPilP56DwBnXH4sZEtzkly4MPzDuZnesmr8Hon84d1tqOhJ/3JYRebxCjfwhN3Zwce9MRDbOQJHTXwDZ8xxMavG5ybbPZ//fLvYDAYDF4b/CIN4E996lPXO0X9wpdznPE/b7NhMBgMBq8fNIDBV77ylesaPProo9dn52efffZ/Zv53WAN4MBgMBm84KPYoCCnyKSgpUCokKVYpMikAKRQp4CoAKWrBVdhSmFOQU6CCqxgFX5HJP0qsKS4pXClS4aM4qRilqOwlCUcxruIUwEsxKlmKU/DNKUwpkCmiKba5amJVECaPPvDcJ4c97tlLbzayFW//mKIvHHYpLuKND90U4eiucUqO4pl5hWs68hsb6co+v5rAS7GOzuSjh6c4RxZeimoKjfTECw3+FQf5gz6Az+jsH3rW2K2xzFfuFS/ZRu+K0AqtNQAMc4rndCCLbPcVJH3muN38bd2VrXxDD/qkU/dGcXA1/IlZOoizuLGVTd27em7uHCdNc/zr2RrfioN7MhRn+UyuirV5z2yDC+RhDRqxFwvxUbQVbzJaExPPbMe/WFknQx6RSRY9ym37hJ89K5KKBz34qs91fKyYyrdoXMmyD8QRkAvII8v+Iq/4mC+f6Ya3uIq1NXZZI8ev2NmeHHzkDhs1Zz3L33xNRrYr8OKJX2uA3mdes1HMzfO9OfvQ51myPPMVwMdae5funuUsWjzIrZguTvIXD/GEa//YR9YV8Isz3nxvnzrb6KU5zkZrdGYDX6MVKzLFCl8+SQ/+BHyIlh50qqFhyBV6+YUwXvTiS/R0IkMcABv8kpy+Yocev/YLHcRArPxSy1x5RS/3ZMHLR/DZyHZ61ygB9HdPBzajIZde7JGbePplF53xkKfyGQ4f8bH8csbRwT7hW+v04xM88dGwEkvz8oI8PD3zO7vyN1/IKTobxdigWzI946XgRSaazj37ku30NecebzLozhZAj/YN4A/0fJ9N9pc5eAZa8gzzaMnGn//5Nj+4yh3rcN2zt7OYnuIE0PMXfnISH7jODXaKA6BzZy5/0NkcvejrWY7ILT6Xo/jQgT78wafm4Z70+PKvuABXvqOP4T58vrTuGcTD/An5jW4BPMP8iY9H+M2TES7fmIcHkmmeP5trvitatnun8yPfeBZjAEe82ChOzqZkxB8PNsTfnFG+wBNfe4+cYu8sO/UgR5zZJT6+iGPdGl5GDb7w5ULntMFWstO7s969NXTxE3fxB8691tCzly5iKHflGhrr5ukP2ATI6FxvvSKzzwj5gzw0cpQMNPDN4dF5i55s/sv/fCIu5uHjSS/reOMjh+UBv9DV+8Ness7vePMDPHluzzv/nEX4oz35W2tPOTPg0AlYp4996izons+dYa5w+BkftOTJIXbya3rD8/lP7DvXxJ4d8XDvc1v5TKZzkBz6tw+9K3wpk77OQ/43j55teKEhl16u/mQ/3tlCP/L5i/3yFk1fgiQ3fvIPDzHjczTlvM89fOlXZ5rAg8FgMPjF4BdpAH/uc5+7zuGnnnrqNTcbBoPBYPD64V4NYJ/5nclqQGsADwaDweAtDYpNClwKXophiliKQYpHiqjmFdgUlRSLFI4UtoCil4KdYp+imoKSYpJnxTdFI7juvdc8K0ZVwFLUUhCD752nAAXIUwxMliKY4p+CKRp4ilSKXBWu4KFTYFeYU/BiF750g09fsipqJVfxkI6KcfRXNIOPn4LbD3/4w4sX2WzgFzIr2plTmDTv/7lFo7DLh/Qjnz7o+btiqoKpObpXHOZPccDLr5vZIgbW6VQRVRGPPebJskYnPMUQLzYang10Bn34vTn/oKRzuK0Z/CMW2RBPwzNwLSdcDbrQgb384LlGhudwbo9wXBue5Va+RN+aezF3Ncijt9zRnAufPEVjhVV+UyTnKzzKK2v8LdZi6x/XCqZ8Kx7ijJavxJ1ecom/6AaXzRVl5ZU19PwoX8UvHcm2Tj/QZz/z6OnoT8/SC8DDyzwdjZoY9OMHeUE3+ViTwD3bxYzOeGtYuGdvsUVPd0Vg/rDn4eJHZzj2BBl4ymO64mNP418e0LMGLxp+4m820Alfe05zFh0dADn8rNDMNut8xVb60YsteBUDMeHrbOBTOO1BNiqO05Wf6EQf+x+Ne/h0diWPHmKKjr70sUa+wf72Kn/gY786p9hG93TGH1gjmxy60RPvbKArnYCY4y8W+ZifNBRc8WULPH4wRyd65EP6AnLgAjLgo4fLLvaSjyYZaDWryc0GfF3xwA+Oe77HMz7w8iHdyRBfOYIXHA0xDY18REf4znp46OwF/PmMXp3TbM4n1uHT2b15PpX7zk1XfMwZ+OBHnnvAD/R3Xtij/Mg+o3iTQT9XuOInB+GwIRy2pHN5SU9Ndnrzn1/75RsDsAueZ41de0YOySX5Yphjk7xoT6Ax2rPJBuw6+XrOZrzi7cpHci9ceEbvHzyN/OidYR4+gNs8vdMhn/MX+cA1Ps5efAC89oR5vgPhWxOjkz8a/pCL9moNtfi48gFA07zRPGjOeudmNvG12IktP9nvdGgNTc3caOjkLKWvOJFlPhrr+NBbLMSPXGsN+YVH63KNrNbcG/YtX4m/NblGXnzIsAets6PPEObEvP2Nj9h3NqElG0682eJc9H6kP53kEFoglnDg0t2aa3ujPWGP4U1Pa/QxJxfxdlbCJwtPfDpzzYmDefjs4Hd8e5/Rlyz8xR2NAg374LDJHmQ3HuLCZjzg0AF/tNazGx4fyjW5D0/cyCfPGttd42Wez+HQVezIhucswYdd4uZcpIfzjFw6e8+ap4M8s05/NPQBfOXs9NkEPzT85bMCOWjM0wEPf/kBjc9VYkgOPDzYRnd5w+8GG77zne9cxa3BYDAYvD54rQ3gD3/4wzfve9/7rv+K5l//9V9vHnnkkevfWLd/hTYYDAaDXx3cqwH8Z3/2Z9e/GcxV4/1ZsAbwYDAYDN4UUGjS4FPMU4jzrAiuUFXRUGNLIauCt0aHecUkRTEFN8UiLz+FJoU9BS7FNzwUnOAogimYKarBhYeHQhbeXpr4KmopOqFR6KKTYpbCFH4KWXhoINBVEc+zUSHYe7QitYIfG/wiCy96KcySUwHOtf/nkSx6Vcg0Z/APvfigYqZiqTXyyNAA9n9B8BcfJM89X7FTsdQVPUBLjgFP8U3jyzx/uiqCKyqSjR/9KthG0zp/4sW+/Guw2T8YzVuHLx6Ki/DISg+xgVvh3rpx+z7I/wZ/+oUIvWrKmOvafc1NcRQzzz770IOv6MHG8gOdoqh1sVUwFfuKvuRVaMa/XBQzdtZ4oTcavIsherpagyuv8CBbPsgjfAEdzdOlQjVaUAOJH+SDwr2iqpihY698kYti1f8jDB8fePKuvMTDLyvlAL7s4xf+5yN2KLCbg4sOL2t8xEZrdKUb3uQGfI8vWej4Cw/2oJdXgG54sheu/+fa3sQ7n1p3lqBPRn+amu8Bn2Y/PsWHXmSas8drUNs/5IglWnajAXSTM3TgF/rThT/Ig4+H88e5AqyTh39xExNznvmCviAcfOlcoR8PdHxqTWzIlzdk4meUZ3D4zZr/qzh61zNefumFj5xA67zki0Cu0s0XccjkB/bRiX18RF962et8oInLfkA+P+MNr31h7t3vfveVP/ga9ONXPPFKX3rSkY/Jlq/4GuSa5ye80cC3d6yjoz8a+so9Zw8cNPDIdG+/4+Uql/hfbOjAJ3xGPr5skQtw8TXYRT4QA3L5yWA/uwzznsmCL9/sR/qbs3/cd4Za43fnBdkaIubQNUcvg1+TQSc6A/rjiz9envmG7emBjz2Czhp+6OHbD9bhmaO/2PFBsrOVr6M1ynG64YsPegMunPDjIR/Aybc4wsPDcyP8eJjrXWzOuI2fDrfnAbnFyyAXLuCzYmUexD/fO98BGvo0L8+Sa8TffsELDnxxJ0O+isepL/ntE/sVjZFe9uC5Vv6Yt188OzPzK9zOIyCvNPXQtoa32MkbMXeGWC9HDXsBf2eGPBLz8pk/xaKzDk9rBr5yBJ08wxs/a/ix+WxWx5O/XMPH074uJ92zzx6peSsH4dnb/GDd54Dkw+XvZIirvewsdQ7gQ6784wu20NneR+vc6P2O1nu4Rmr2kosWoGeT+OKJFn+05KPjA5+VnYlkWBOjcoyd+MCR8+j5DD7gOzLzHdtdvWvEjH3kWLe/4fKfnKGDWMoN5468gWfvo5Fj3rc+f7qHjzd7kkdPPNkmn+QWf/lLIP6kKD4+V3mvwqOfdz2+g8FgMPjlwGttAP/RH/3R9Y7467/+6+vZZwhnvffgz9twGAwGg8HrAw1gn9k//elP/3T4t86XvvSl68s5Py+sATwYDAaDNw0U0xSLFPX8o0JBTKFKMU2hSVHOvSKSNc8KWgpTFbcUixTrPAPvMPiKZGgVrchwr8gFT7EJfwUzBSnFLUUzRTWFJ7rgiwd8xUL/AFJkrKDofelZcVbBDC86ViivyO1KHnBfccw9PoAsQ2HMwJMO/t8zuPykGUc3etPX1TMd/vM///PyoQImXL/kopPGG33JcuUL/lGke+CBBy4/KZqSp7ipAE13fNiNJ7vg0IsdnvGpOMkvniuY4uHKdgU+vwRRdEVPHhofWCpMpwNb+Fuh0LzBPwZdugdkn+C5wf/53pArDb86Kf785Cq2CtI1eRRv+c69K1+Ya56dique8Sy/+FB+sUN8wmcH+/slMX/kX37gK3kj/+hgjt58wg588VcAVng1h1ax1F7xD3Bxss7HisI+IPIVXPEmkz7kwrHW5z020IHvxCJfw+dL/vFsDQ57KzbTM5uswUWjUWhfyWl7Fr1BNv548S8e+VDhmr80KtHxhTU0/FPBnf/JBOxhNz+Qjzee7tlmHr1n83DlFz41rgBdsqvCu3iYx5McPrLXas7ys73SOp70Fgt+J9c8W13xtG6/ZLtne5SvosfP3mCnHGC7NXmABm868hVgE93IEDO0eIgvXD6Uk2KDnl9ciycbyIFHDlx04mgdT7FwPuBrwOUr+Gjpwn/w2Wcfm/NLbrbSLRn08MyP7NAcgWuObDju2Q3c+5WYfMeDDHGQT+jSQ0OBXHj45Afyy3n4+LOFXWSgL2/hk8H3fGDYe84JeogToAOZ/Mju9MIbDjmADoac4JfOGSMZ+NIFjsG2fO4KLx+aI9OwBshlBz3YSV+2FIN8i6+RLXQ1Om/yDXCOmQ8H8I294xk+aF+LY/PlCL/KZ3nmSqfbfOGhNy8P82F8+cOcATfb6WEONM9288Aa+vDpd87TyTz+6F8NH3jG2zX+wHM0+IDsMWf4fOLsN3fqad7+NG+Ya+AVb89yq3Ps9ho7rJt35XPvHnH2LK/FpHcCu8w35CY+eOJl39DLeeIZvnVDfsktIJ7nGl5ozAOfXehkzTw6fNuX5vF3dvGPfHTPLu84+hj0l9tyBh97iCx2OsfIgdP5J8dq8KKjI/vxxs87Jp+xBw49rdNJDuY7urKHLLHDV57aV/Yt+e0HfNBbK5ftWzzY7oqP/QvX5yL85J1n55FrPrcX0w10BvnMdzaByRbbzjpnAl/A9WUeTed40Itt+KaXXMGLHfzjs5v3EVv4Eh/At2jkoXvnunc+33qP0jcf4su27PRFIHh4oxVjuOjg08lnGO+B9gjfagx33g4Gg8HglwOvpQH88MMP3/zxH//x9Y6q4aD56x3o/eDP8g8Gg8HgVw8awP4t8dnPfvbmC1/4wjX+8R//8aoVvxZYA3gwGAwGbxooSlWgUjBTOFLsVpxTMFK01DirEGVesUrRSAFJMUnBCq3ilxejOcUnRTg8FJ8q+imUwVNIU2jzDEcR0fvPs4IYWnq4V4RSaFMQU1yDhwc5CoT0V9hzpZc1/zgCimF08SsGOtIPL3xdNbsUySrS00UBjEyNDDy9kxX94JNpsF2hFE/3fjXhT+t5Zi+e/kyTghreropt5vGnr6Iqn7CdPM9s5utkwct31tiFDxp8AXvxJZvOGsv0xkc8yYsH+xQO8TDMNfwDE655V/Jdu28A9hhAEfUc/JUfxC0jFwAA//RJREFUGuwxxIO+dDfftSEGzb/aOHHw4hdD7NltXrNODrm3Jm/pRGd5l978UUOk4q9CqwKpBqQ5ccYDLdxk2wf85p7N/AoP8LucYy88eSpm8BR9/UqaPvhZL4fFzLwvEODN3+RV/BV/e0dhGR0a8sUObb90EjNr9HHvSgf7XS6z217S2FPUbz/xAzoye2aDQr85uWEPlX98wlf2JL2tA77Fl9/NoXXPdjz9JQHyyYKLF741oensyv/llf3vyrfW7GH+wI9s/Pio+PAZm+koZ+isGQ7fc+t0d0aw1/kDyKWP/YSfHEAP6CDX+NTAh25+Ses8gWdeTMMxh59fWvWNfbrTuxyyLi50Iy/bgfjwCd3pSi92yQF+hVvMOkfMuYofnOKHv9gUK7Kty83yTUzxd19MyuHOIGtsp1ONHvfsEAeNEXR4sjF+6Zst/E9nOPwBBx05BrmdGeE67/gGbyPeeIohPoYmDB3pxA/kyhP3bEHn2Tqe8sMeQsMHhnX2mqOvvUYX8vC379tD5gG92YmOHHw64+mIlmzD+8O8OWCOTWyF2//323uQP+jkak4u9s7UtGEbXenZWclXycPXczrkw+bJME93Z6B5z/nbPLl8RgdDrO1lesQH9O5wHrMxPeITPjAH37B3AFxgLnx8mjd36gmshW80D855e4fM5JqTg2R7Fj+2ObOikVvWohEX6/nAlyDiWYz42X6OxpCH+Imde75klxwyby5d3NOlfWXenJzDC345rCnpPr7kO5uc3547m8w5R/Ewhz86ewZt9tgP5sIB9okzgW7m5Rs/ADzx5nP5gCc6w54wh7f8ka/yE23vXbLxhOM8wkc+2nfiioZ8vvK+g0OPcPjCfiTXoBu/4I9XtHjJSf9/O/r2l3k85Abd0PKDZzrxBT96lnPsRSMvfUEQLXz8+Nkzm50L7JL/YiQX8OIXV3scPVzvAOts5cvOLzJ9zpRjdNME7mymu1xIR3b6HICPd5J7ucVH9PMeSlc07OjcgKM5UbwHg8Fg8MuD19IA/tjHPnZ9ofdv/uZvbv7qr/7qp00H7y7nv3eHd9FgMBgMfrVwrz8B/YvAGsCDwWAweFNBIUwhUGGxwpXikuavOYUnhSmFJe8nBSN4CmkKb4pa5hTGFEhr4io4VRjXsFBQ0oCqgEaOP5uMjyKcohZQrNTkUjBTxLOGDx3wUqRSTFT4Uqy3Bs+8ghldXnjhhWtO81rDhT6Ggpf/11eRzTe2NOno8K1vfevm7//+76/GFNvN+3/PvvGNb1zFw+Qr5hp0VTAjSxONPxR52cgnmuIKcnRC7x9o6BXm+IcPFN3oiMYaGWgUNiuUazDwlTjwieIcerL5ArhXhMaLPuQp7hn4oTXI9msQsg3+NfjEPyQVC9G0Hn2Fa/qYB+ccKA9c8WNDjYfW+JlufJcfGzVUuodLZ4VPeihwyiH2mTP4VhwVxcVN3pCF3ohe3GsG05n/6APYU5GZLfDo4NkaO6zT2ZxY4UsfuexZzinqmhcLfi5ngWd7iXxxJkdMxQ8PcSMLf/EA/IQHXdHYK/gDuU8+2XyDPxyAp4aSfCz2AH+5o3jMt57lX+uufIIfH6OX1+y378WbX8iMN3vEgG38A4e/NZ7sITh42Qv0w4ft7KSj5kJ8yEfLbuvlcuv8whfynC/I4gd0xQs+HLqcsuWjdXprMOAHPNPdWv6kp3W82SYG9jUZzj7xte5KB/FDz69sKz/4ki5sBnjRBQ5b6AfKAzbhSR5eZCjcp0s86YwH3hoG9CaDv/CUq3R1f+qGXizzFz+T654+4u5XBa5k5js0dKM/Wvp29skhOtHZNRvIdp6Uc3TDFw5ZBhs8lxcGmcWSDvRjO5tdzQN09in7yDLwA/EXG/j8ZB/yC92dw+ygX34nkzx4+MBjN93Y5ko+PPjtO3ahk1/eYZoz1siXy86j+AM6ld/eL+yqycV2/PiZz9jnma70ZDNZ9hZd3JvDE37+pit57KixTF64Bvs85897zXvOt7fx2cQ/6QH4wznEh2yED04+zpvAMxqD/cUv3LOhC/CBa+1e+NbINU92c81n4zkvttEb/Cd27ONTZw3bk23d+0Y8xE18nSfiZFhPFny8ih+61sgyj8Y7HYifeevWDPT2mj1OFn3JaJ2v7TfvH+9AMUEjh8QFHb7o4MOVZ3xkPptc5WJNQM/yD3864WleTuOrwYof/fFDJ+es403XeMODI2Z4sA8+H9NPfrrHyzsUD3lEvvxiG7/hLyb0QWPP4cNmsRAnfOhqDi4+cOVDtPaHzxG+rEcntPZJNtCJjkbvEnmPFz/yEz70JUfxHS869rnHOjy2s6nPvs5Tuji36ME2fNjsXeuznneNnCKf/XjQXQ7BEzcy4dHNe1ae0p3t5Qz/+1wvXmx75ZVXLl4av/j6DEDf9773vZdOzi7PTz755EU3GAwGg18NvJYG8Kc+9anrHfT5z3/+f2b+H3jP+szuPfJaf302GAwGg9cOawAPBoPB4G0DinOKUcA/NhSUFKkqUCoOKWIpiimEKVIpMil4KShV5FK405TVnFDo0nxTuFKUUpxCi44sRScNGLLxVzysIFkBjzzrmriKYop17vHU9FNc9A+pZ5999ubpp5+++eY3v3kVsfyjSBOYLv/+7/9+3f/gBz+4cPxj6eWXX77oFd38GhM+wE+xVgGaPUDhTIHV+5legC5+VcEP//Zv/3YV8KzxHTzrioAKgHjyp19nubJRAU/hjU/IQct31vAE/MFPfMCPmrd4wfGPP3op8JGlgYKXeevu+dEVD42DCsjNKXZW5IaLn8Hf8MiCyzd0ZKM4s5G+9GMPMMeeCpwKltYaipYKkfKAzuda+WZewx7QlU34oFHcVWSVk+aal39skYMKl5qKeMGPv4Is2eTQWdGUr8WDb2tw8CP7rGnQmK/wqqheIZ5c8uhIPhniTKY1fPiAz9DQiT/lg5zmOzT0wYNMcvja5z9DHPK/2IiBGNoz5sSC7mTSgUzzeKPF21V8rfMl38FjF1/TiR6uZLLNfqcnX/MrwBfwI53sWTkuT/HlU/zkDfl8zW4+U4DHn39dA/bGP1p84ZGDtv1njk6u9GIzWrrA5XO8+RlYx0/c6Ml3fCiudD3p8eMr8aI3PD7hJ/Rk4sHv5sqj7IbjGW92OJ/4lz70FwN5iWfNDjlArjU4+Dr/8MG/WJKRfujYyS4200vO2S/iYJijE8APHlpy5TN74cClH9zkyif8nYnkskEM4NOTf+A5B63RGw9x5DO2m8NPnsJFY58Y5SAZbPHsnl88W8uXZOJnDq34waEv3wA4GlT83Rxwz+78QEf6mXdtv+LJPraZ5xs5S3eyvTfknJxydrFNTOjYGYAPXD4g036kMxvI53c4cIGrvINrL1ozyMSDjfihtx/llHs47AXlqXkDWJOj5xx7DfMATnM1vMw1Tyfz4mcP8wcd5J97evMBn8CRAwEefJh/rZmjC76NaE58wxkYpIsr2cXRHFzz+JgvH8xbj4+5+MS/ufDlhXjBCU987S9xYivfwSenvcoXnvvCh1GOJQs//uKr3setuVovl+SX++gMsuQmXfhQ4zAd6YBOzoiTM8A+IAMdGjrKI58x0Fgz3zsEnvwjQ4zpIG+TidYZgZYN6MKhK/90zptDE594m7Of5LC8phN/050tfGsebznNDmvxlWP2D/5w+Icv4NALXzj4wOk8py97rds34QA60q8zC8/2MR3kFRn0pqPYt0/N48UvdEyeX2GhZRu/kdF5gDd90ePtPVBO+YwprnRFQx/09GOPvILPj379ZT68bJFbePs8TGefD+mJjj/5giw8XOWJz8ri3C+AfRZG/93vfveiGwwGg8GvFn7eBvCHP/zhm9///d+/ee65564axwk+Rz366KPXO/31NiMGg8Fg8LNhDeDBYDAYvG1AYVHTU1FLkc97SNFJgUoRXrGoYrFfpimMaZ4q4nlW7FKoU4hSeFKIUsTyjJ8Gpvl+Xeuq8KSx4t54/vnnfzoU2xTa/CNJ8xbNM888c/PVr3715vHHH7954oknrmf/KPJrBkV6jTQ6KLpp6iqykft6gf3+kaWA5qpQV6OQ3pom/FCRUiOrYj4fKWjynzU0fKvIp3jHN3yqYFcBEB++U0RUaESrQQOPT6zRxRq/86ECIzq4eChK0kNBXqwqEKMhX2PwxDf4yi9M4BsK8PjS2aCPOUVQ8xpZ6VyR1CCbffxm3qC3wqh7NM03NLLkF52iC8+1Qu05zBnWDbIrzCpmamgoduIpJ/iabmyHh0aekOuZPf2JTfHjDzbDh8sma3jBJceze37mS2tyV4OAHUBs6euZ/+QDfDri2TP/8KvPgeJriAMca+Juj4oFf9LNviVfrGtU3XfffRctmeznF0Vgeom9XDDkE6CzuPMDm+ljHX1ngV990YMMdPi6wqOXe/sbHj78jI95voNnb/Klfe+Z/nSLVhH9jKEvWJRvZIhNZ5B1A+DPPr5CDzebPLObX878Q6uA7l7usVPMgZyiA/3ZTF4+DAcdHLqR45ncYooHeezUyLDOXnais44XndlEXzhyxVp55gwjF62zVI7B50cAB741sqzxczqzEbC3uOINr3OajwEZYm1OfpHFJvnFb/IO8BU+coAteKG13zQr6MMH+MplNvFxexEv665k2Rfynxzy8AsHL/HzzCZy4AD64ce/nUfmjHDPhhk5+Dm7naftb3qxl0/Zhb+41ASrsQvowZf48qN7Pitu1tGTb55s/oJPBl3ZLNbsBPZKzWJrAD2bzBn52DyfwjNHlnnnQrzNZz8e5k6++JWH9OqdKY+dG8UGHzrznefojZrIAG968J0Y4Nla+lmTy3Rsnm/Nd+ac82SIrWcje6zhA781c83zzb3m5WL+uz1PB/6wV8TdGlzysxeOOBpw+NU8OXDlQmeIuMslOOgM6/JIrlnvPWveOr+T65kufC4O5pwj4iNP0Nkr7T106MthZyR8fOgSnfcDXDo6T+htX2kSp2Nr8tgXgdjV5x78yJDv7BZH8uRBtoh7vMUnX8CV72idU+Uy39M1v6Clb/zphw8csno/uKcvuXTmB7zlMRzr/CYX5C2Z7gHf0g+9K3z24it3xRywxz1a5wA+8OWq95J7urILHj/wEz7u2YgHHL7jR1e6yg1y2VeMDWtyxjsW376U47xiqzn+RMN/4kovMUfvc7h3v/xAA5Jjnfxs9w5G77N0uIPBYDD41cPP2wB2Tn/xi1/8/5q/wGe+L3/5y2v+DgaDwRsEzttfxpm7BvBgMBgM3jKgYKUApWCvwapBpsnp2bCmyaHpqkjozynXyFUcU0zS9PTLAn9C+amnnrr52te+dvH63ve+d+G6auoqWPkl7ksvvXRdNXnxN/zJOjzwJYce569y30gg0y+a/cJCUU9RUEFPwbCinsaHAqAickW2Cvjm6R8tHIVIgB6uol5NAQVPAKf/zw6tq0KpwqqhuIceHt8oBCqIKvj1/9EZ+MNTIKU3uubwrIirIBuNIqyrQmNz7hvW2Ucv+VFzhf70IMsc+yvCKkz2bCiWap6DCrF8cK53bbRGzrmGljx+wYNe/A6HbEVXOGJDbzR4AD7J9wrO8WWzWPhcZj3ernizxz18v37CX77iISZixbcV5PmELjUK8MVD7MhWxJZT6MSnYjra/I+Xdf438FPEd8/m+BTf4mhfawLgb068FbbJN4B5esUfL8V0dGzEzxXvrmKNnl6uaOjsjCiP+Yw/xIBf6UAn+0ksxI3uZLIRrvl8le/4nI/tR/bIcfPo+oICWdbjIb/ow6aK8Qrf7Sf86QKXPdb4j57JA2LhGY544QXYiA++Yo0PnuUQufxMLwMunfmZr1zN1TBhA5/zH3x/4g0/uPjAc1bgTc9yyeB/cunJPjHmW3zoho49+MCnG33J5CN6sRNe/Mzj1x4xz1905n/y+QKNpg2egD/EAS4e7outNfNkhYMOnuZKZytd2eMewMNDvOHGG6SnZoyzhP2dqa7iBscVHj5w4ZEjjuUCHLqJg7yyjo9zjk/5JV+KCx9nhyt/iI19iT+fwaO3OOCJX3vFfM0q704+cobzMZ+Kdbj0FjPPzho6+jcjm/EwD7+9RJ6cMe/dy8/8W17EG6AHno1Tnvvw8Da8L05cfjPYHC/AL8UODVyjOVf45kBzPk+c86/GxzO51l5tng35xDPfereKE19rtoGThk89Z6+8lSPiHz8DrrjLGTHsF7vJQtO+6P1rnhw80ck/sfRFgtbpSD8yvF96P+OLpzW5Zd/i2/mULkAu2VPZ5JlP8caDLLh49A7zbMgPPNMRjnwkVy75zGMPWTfwpqP8p7OztM8C9nBfhEIv7+QJOvqyHa159HS1T8SGr/kHH7luvjjAxVuOR8smNGjxJBNtuUHP/MGXvgjAf+jRsAkfOM5K5z09vQf5znuJ7mzM356dD/Y82ebx9UUwctiHP97m+RU93s4U9gD7lQxx9Xn4bAL73Givke9KBr4+W4qFePlc3v9rLOfI8t7lHz6A76rxK4aDwWAweGPhtfwJ6MFgMBi8vWAN4MFgMBi8ZUEhTPHJUOBSdNag9ScyFZDMWVOE0qi15mpeoarCZ3D7+dcBFIoffPDBqwCoYKiQ6F3t6pu5/cKlohsc64p5Cn18qAip+Oiq8GdOEc8vNhTkFC0VTOOlQKvgp3ioOKjhoHinoKqAWgG0wisaxUv4ioZ0UCDEhyxFU0VKc2jQKz6SQW/zaLtah8eeipPm6V6BvaGwywaFSvYpdHpWtHVvjm5sUNikqznNDj703Fz4rreH3GMXPyls8gO+bLOOzohXg94ViBVIPaOtYF3BHF/+4mdX/uIbfuBDDQl2wRdbhdVs4jfzZIiRBgvZoMK3PaFphidb4MBXZManX13JC36Xb+bhkuUqFvIxGnwV7unAn3QF9KGHGBhwxUpxGi77+YE89iqA44+eXHlLPz6gD+AvfsDHHF/g5R4vw5prPgGuaMmmr/NCPPAin03OCnbJaT7BW66JpdjwId6A7oY5n5fRZx98NrjSQ06Sj2e8+QvAKW72HVvpRDf6ilP8yAvHGhzPbGcrWfzoWWPbPdvQ8SWc9r29rinjSy+e+QQOXbPXs2YCfdkoH/FDKy5w6U2OJjFdfEGHH+CxlW3FCc/yrQYGnuyQHwAOmzWw4NgP9CGrc5svrNlzgJ74w+vs4wtzzkD6GGg0JsSY3p0VXYsD+XRMtnt7lBy2y3u6s8+gO7sM+4e/+RQvPOnr2f6BQxdX8TDIl49014jBzzyd4Mkt9vAVfdjRGY0WbnzNoaMzv9Pbs3ciHfgELVl0cxU/a2yiKz7yhEz02UI+evh0hOtKFlx+sS436RyufQ3XHF2BhpnYoDXsGzLPZjEQS8N7wnxgDr5YtScBPdhFL2sAr+xB5wxovlxpPl7NG+d8fJr3bA0+ufExb3huTe7wAX863/ib//m2P+ccHf5o+DQe5vgRvnlf0CALHp/3RRjP8knsorOvalqi4xt5al+Lj/zxLP+iwUcMnXHovHvIpb/4eW+jw5sd7ENHPzjOOPqWJ3iJofzCGw2Z4PxsIM8Mc3jJX+/Y3iX4yGF8+pLPyTu75BX/9J60f9lRLrdX4MpFfORr+58N9CHLnsOLvejyi3lyzdGx3BUPfkRjvn0oj+1ZfoZHNjzy+ZfPzKPha/zxQhsOnnDwcoUnHnR3pvMJ3zrn2Owcy7byx7MzwfnuXr6II73KF/jy1OdsV7J83oYvFnzJj/ShI53x99nPFyudhf48qD3f53KxEQ//FcpgMBgM3hxYA3gwGAzuLqwBPBgMBoPBWwj84+zjH//4zQc+8IGb3/u937v55Cc/eRX5FHi9oxXxFA4V0xTfFOIeeeSRq9CoeKeQ6D2uAOhXHIp08BUDFeQU+fCqcIdOgU/RUwFUwRG+4p818hRFu1ccrOmApwKh4h6+miwKgQqE+CswV9w1Z40tiogVxc1Fo3ip+Is3GQqcCp9kkqHYChc/OgXsgE93854VKxVcPdOTzRVxK9rSDU10XfmD7/iEXPLgosODn/CDVyGfngqz8Cvoxjt/wOcvPNzD5yN8NQEUYdNJkxlvRWjNUQVeMRVD/hEj6/ypIB0PBd14sp9secEu+HjTjc58iYZ98qQmFxnW6GiOLDbjV3Fc3ilSm/eLenI0M9DDIxNvchTN4RY7/OjDV3SFJ0biDMLxXKNbg5kP2IsPGwA7yTIPl/10UQTnS7T8Rp4cTTe2GfaKfMwPdFUclx/m6IAPeuviyE7PZMpXfD1r4OAtxvzgiylwNQfI4TN5gy98z2Jinb32NMgnYmUdT3s/P6FlF2A/H+Nv/6FxzwfyApQHdKcvPbvHl2x8XMkSUzbRzxU/PNB4Fks6sBONeLCr+LHH/uNf+ouF/MIXLVl0oDefsonedDEvXuHRx5zB33LTnnDlD35w9dyexBMva/F1NYorfq7w+YYudGSXs84XJuxHzQv82GHARcdm9+zHQ7ztFbJcnQHym678SB4+7TvPrmwkU97xL1zDPpOX1g2AxpkhHvxioCMjPLrwBd3I45PWNO74RCzJhGuIl2f+Ae7Jt5/wOnHJcm0O0Lt9ei++4eV3PoqHOT40z//A2qvNe5aD9krvLgC3XOJjsgF8a2jkX3DOi1e6GfhYI9ezNQNu+M3HBw3+PVsTX7rwvZg5v058wzlP1/g1L3/wk3Ni3HmGp/zgO7FIl+isw7MX7f9o6NFe6v3LFmuG5qH42XfOVvkcnTymsxjz+UlHF/bZV84te1fukSUvooNLP/a0L9zDNw9fztHZunu6OqPEFG86kYcu3nTlH2cDHLntWS7bv9nAH2TyW36Qh+LCJucVHHaYd08v8sOxFwy6mSPfPFme0YmhdXryMb/C88UcePSUV/KWnmSgg0cWejnhy1ru6UF/tjuL5FAx8DkCfTazzRpeeLIRL1+qERMy09lnCvr0613vzfwlB/DlW5+DrIshG/mHDmR5v8ldPhEjOYuOHDr4DCmWaPzlnd5vg8FgMHhzYA3gwWAwuLuwBvBgMBgMBm8B0Oz9zGc+czWMFNgU3xRD/YJCIVGhTdHTe1rxzbN/xCnmKRyaU8BVCFQ4VhzsV7yKkQp7cBQCzaFRAFTcUyhUGFQstq7wSAd05ClA+nO2nhUTPVcgNEdPRU+FQ3LphBcccvE8C8TmzRkKh2gVHzUrzFUsphNb6GDgqRCqkEhvesIhVxGSPWxUZK3RYU7BWyG1Zog5+ij6elawjNa1xjn8cMy7nvdkkEuOe3FKZgVtDRt+Mp895NBdHMRRMZbPfP5ivyu5/MrXZCiu1sChlzk64MleeBXv+Y9sfvIMj27ZZM09f+Kn2aVQq7DO7wYb6CKf8HVPhkYEHuJcLPGBLz/ZxHY+p6MiMDo+NqxpsMYDvvxjLx+55x+88ABwaljkY/f4uYqtvAT8xDZy8rGBp1xle/bxk31AJp41HvosfK7Dr3lhjmxFczYAPrYuZuHT1T09xEgsxAuYzxfu6UgHtshx+sIlr19heYZDNz4vj+DYP+wSJ1d2lE/W6SN/6ImOv8VWjtADHt/BYz85+IhDOdseM4ePnDVHf3Niymfu8auxCw8/ePINX890oH/8+NU6PH61hj88tuOjEYGn+ez1yzO24gkPDn/BwQM/z2yQG+zBS0ysyQv+g0MPNsABdOIjfA38DLLFLP7lGXAtn+nnGa17tjoHybHuCpc8cXefHsWl/YxWnrd3+YF8OSPGcOWguNK7mOEHxEMTDJ05tPTCL/8A9hjyAJjPTudZPIB54+QRLnl4mGuePHmGB3wj/+ERhGuIVXDO4+05W8SEr+ThyQsu/uQ6b0F80Jgngx7m4YZPRvMnfvPpbjgPPadf78Zy4LaMcqhzIj3hOzP5zBmIDg3bzIu1/IifgZfYyxnPfTkjPcxblw9yrbVyBk8y5BcdoounfYnOWjpak5P09F4wn45y0HkHT67iZY+as7fN2wfovV/YKffRiRGecp5OJ2+64MP/1s33jrOf+ZuudDCHH1xzyeqzFBvdm2MbHHnts4H8wZ9fa2CzgZ/Q2qt44wGXvvICPX3FwF5hE/kaseyNf+cLPgb+hrjBh8Nv5KPhB2t4dvazF5BBV/RklD/u4ZKN1pyzDC8xkAvo+J6vvM+8I+wnvNnJv/DsZZ9NyfDfozz88MNX/vGdtf7bD/4x0PtvWPxymM6DwWAwePNhDeDBYDC4u7AG8GAwGAwGbyIozH3uc5+7+ehHP3rzwAMP/LT5q7imeKeYVhHTsyKjgp9fXyjYVTxW9K4YqbinuaA4F40ingKdX6IodL744ovXVeFPsVHhlwyjxh469wqHCqs+H3hWhMVfERJ/RUHFQoVDa3Qwp2CKt0JkPM0bipKKjfQ3zrUKtezCE58aBRVWNT3g0Z9e7FMohUeewiY/mAuHrnSGY53PrDesaYYqztacid7abdxzmMMXHb/Sxzx9a3a4pzcb2VeBHA39FJrdA7EVk/Rw5QexUHjHgw3Fjq1AnMglTz4oxEaj8IuPOTzlA52KgRgpRrvKMcVbvOWieTT4AL4XB8VjuShPNeL4lAx2kceXckpO5Be6040vNKo0iemCdzzYA4ct5tkJH57RmnyTR8Cfn2Qr3YF1eUUPdtKZnp6teYabr/C3H4FCOVy6WAee+SPfsQX/AD++Yz95cJJNjviRA/iH7jUCPIsnf6Aho4aD2IRTgyQcvsRHjMglkw/I5EPDMxvSDfCpeX/C2X15IJ5w5bN18tzTO/7w7DE+vI1niDMe5pw3p65k8aF4wmUTfhqw+JHBbniaCmy1N8hgC3/QxdUZAt/+kvd4orW3+AQPOSqGdADlDuCzviTRXLnAH/TxTL/yWLPDOj4NuOSxVy7KC7/WY5sGv71znjVw+QItO4opW9CxgT/kDJv5ji4GPDzg9uWK/pwrXPrSh118Eq05w97nH77p/CAPX7QAHhv5By15wJwRrsGv7CBPHEA6wLXvPeMDNx58ac5ac2Jlvljhgbd5MoN4ty8AGj61H/mCXHNkwMXHtfnkmjfM4wtOfD6Ab4RLBnzraDyb976kk5jax+7Dd012/NNXbpkzPIuN2LU3WjPECo58OtfwsiYf2J/s1vCyjs45YM2Qi3RFg290aOJJF/fo4OAld8zb657xdf7JJ+8x71q64C8G4grXGQund7RnfOSlKzoy+QA9GrrBlWPOJ/vLmcCf6Olm3j6jC//TRWzItb/4uL1EvrjiyVa5ZU/AwVu+4y+f6UInPOGUn/jTjU+cQ2T1K19nBP5yDB8y0MFjIxz20hdPetCLHnKIXXDoSQb9+Z0/0Hjfek8VK3ydqfwAl37i4XzwPkUDBy7edGIPfzhHfLkPPX298/2XI3xePtGNfH+Fgx1i57+zoCO/8/krr7xy+QQeHZ555plrLw0Gg8HgrQNrAA8Gg8HdhTWAB4PBYDB4A+E973nPzUMPPXTzoQ996OYP//APr1/+KuQpPCqUVuhTzFO8rYGnOOeXov5kn3tDgRCtIp5CHKjJo+CncKnAqCCoQIknfMVVvBXu4CrcKQzWQFAopAf+dFIIRVuhUhETDzzR0FHRt3U61FhRfMQLvaHAqUlCnnk6sE8h1xVvNIqxipgKoJqhio0VzxVK6aRwzT6205X8ip/4KWLShw0VU/Hx5wn5xr1h3dUvf4sFOiOc+NwexcL9yc/gW77kH/5KV/iAHhWE2aVo7B7wBd8ooCvE8hlfG+kXH7biz+dsRJssvuQP/uZT/lfAVczlL7zFgZyaBUZ+5DfFYnbQR8xdfVZUXEZLJ/q0jh5YI6MCNh4GvYu5WJLh2T19kgEPPh50oBOgq6aXGLKX3tbYms/R0oNszTR48oGPfdZ1z4/hkmsf8BFe9o/8whvQh98C+tKDXeit8zdafK3zkTm+JkeMKsjzmVjbI3TzxQP86IUHnp7F3TOfeC6vzckxcvrFq2d2wNOM6PM8m+UZIJeudPFlE3T8jwbQTSzJI8szX9tvcoo8evMV/8LxVwrYp3HQvyPoIees8xF8PqUjffAiW1zFDB5+aDU46CAf4MtrcmvQGPijIbM9p1HIL2iLLTvZYN2aObaR7cqXeImzOePEAfT3DNhEnxq7cpBP2eAeLjn40kszSA7yb35vL9MRjnu2k4unOTHJfrwBu/iBDLzQWedzNNbpTD7eZOLLx3wlf/mwMy5/iAccvA2x6b0j7mRoJrEDvSt8fhOvmln0gJtv+VRM8Eiv5sk0V1zMscs8iAf9TnzDc7zJRJ8tBp/zc3JPPu3vdCHTGp80F3/z8M014Iv9iUeWfe68dFbZjydNepFx8hYb/MyLJ1q5IY+sW/MuhY8Pn4shOda8P+iXDeLhPMFXPNCJIb70k5PW5I05/oGH3nx0rvarM8PZ5Jl+ckYe9Y42j7884zu6sSVe5sUNDRAXjUt7KDpygDM6uvY2/djMDvf2AR5slOvorbf36IjOvhF3vPktHfiqvSrHzfMD+/GX184ovjpttRfpjKf3p5z1K182exYPvNyjtW5PRGOPkCFXe6+EQ29yil98xB2tHIcrdmx3pZPPROznT7Ho3U+WeXjkaOjWkJen/cqXr/iHP8TM/+HuLGWLvU63/tQzfnDg40FfMs37v+TFgL/oT3f/JcRgMBgM3nqwBvBgMBjcXVgDeDAYDAaDXyEoLPol2/vf//6bD37wgze/+7u/exXaFCMV5RRNFfS8fz1r8iq0KdppCilKKiRqIt13331XkU9RU6FUcU9xVHFOUdS8wmGFRcU9RbkKq4As73lr8BRDFfQU+hQhzSvyKUQqpCqU4qXYh1aBz7NipCsahUZy8EOvuKswqchIrqtCpsIjWxRD4cMjR/FQ4TF+1tCwh31wFLvpreiq+Avck4me/+jLX4qfbHLVGFFIpS/f8a95NNYMttd0oce91lxvD0VT8vnGtcLuOfiDXH7OFvOAffTk74ri/IWXOMqBbJATfCMefKFwzU8KzmTkE/nGXiBWYka2wi9e/IQHPPqSq5hLHtsbeGrs9f/6Fh882AMUndkunga5dBInBf1+zchONpcX8bAHgNygCx7wxF4c2Z8eGpp0ZYs5OPzJL2jN2S/kabLSlU/Ikid485k8oDNfmsPDuiK2/ARiQI45v4ziG/zRsNEVD7bwJ55w+QngRxZ+7KnBzediDejNT+WD+Jw+yDY8yTTvGb57OZHe9EPjOb2Sy09A/Myhg4NXjRJ5wrdw8XMVEzpYh4eueIubZwM+3vQ0D1/+stecGNhPeMr1+++//5pjK7l409ucgb/zTIOFvnKnL0rwe3aS68xLX7ZaF/tiIo5w2IEOwEHv2b18KUfp7mreFbjiU57wvVyQn+1dc3SAhxYuveRAOSJG7APm0cHnJ7zYlw8AWWywBhd/MWOP3DTCEyu55Qss/Ien94o9hp84sc2+52+2m0drnh3OEXPWstUce8iOBx3ktZjCzV/2m9yEg495Ay/2mweejXQO8pvYA/h0CF9+BvFtHl7zfCoe9CHXmoG/Ieeajw98+tA//Obpg048xUgcrfGJOXkcjjk0mnxk5ANrzdMXfTydcWLmLHDWWCMfvuHLEGItbvYTfq0Z7Cwv7ENz8tmZCWrKkmeeTDzQ0MW8QQe45ulCL7ysyT/r4t08PxtymV/wpgsfyCexlSPoPLOBfWjhoGNTZ7lcJdvZUe7KfTExTw828gF8g154uALnD5nwy3Vy2Cse9BB/NtAHTvzZQme5KxfaZ/agmKPDp6YoPdG65y9xI0OM0dovcOgknmxxFgA4aAFZ3m3o6WNv0cXnPf5Cbz/jQT/nHDw09OlLANbx9d7lG3GG68sKcgEtX8PD+5FHHrl0J99eYysb6S2e4mQNPRo8xLcc4VdyzNGPb30edQYNBoPB4K0JawAPBoPB3YU1gAeDwWAw+CWDf2D5pe/v/M7v3Lz3ve+9mr4aGopn/UJEYU0BVoNKkdecIqDCoIIjUNhDozCnuKs4qYgHT9HOn7xVbESrEK1IqlCHhwKqoh5cBVLFOTqYV5hUvEOn2Piud73r0kOREl9FSAVAhT2FRHwUkSsSa1YpTnqmj6KhwqyhEIgvfdFqGim4soVMPBWL6YivuQZ8n0PoqPjoGT7+5FujO/5kK4AqhrKLPPorTLKFHunGJvw80wVOaw08+UlBk5+b7946f/MxveCSAx+vdGIbWTU3yLZmiIUmJRvg8jNb4SjW8i15AJ55fmEXXP7SVPVFAP7AUwFYDqDDn581pvHnL3rCc0/fmo74V1inc0VmV341Tx58OoRjDW/zcIB5+SwGCvDyFy9+qHFLNn5wAZ5w8OCD4mmwV5zYTy7/i7N7eckXZPE7eebQyUt2ig9fmHdlPz+Iu1/Q8z98BXnxIx+OGNifCuSn//Ag2xo7GuaNdEmWPcW3/OzZ/q2oziZ206UmZ76mtzX28hN5eOYHfrHmT8XzPznhJBcPOeDZFT667uHSjWx8PdOdPOvu+UYM+eMd73jHlVvmNBg6Z9Cas8/KA2vkwpP/7PUMl150JYdM8eTj7HdNNzGxxh46oOUfeV/emaMj3kb5KdeAc8ecOPFp/NHJLTEvjuKkWaTZAwd/o/yki5zjm3LOWUeW2MKRz/jiB4cvyZRrNZOcwXRqr8CDw4/0lSPo2GPNHhc3POFlFzwxgQe/xiB8cgBbxMO6wRY4dEaHnzn2mueD5l0NPuk5fLj2hTl8PbOfPP4Xd2sAb8M5UBzlvdiLHT4Afj6H2zyIBz/TB256w2ezZ+AqZ+Qdn/Ed/UD4rmxtPrkG35jPJnz40/6xXwD63lXOhnSNJrpk0Kk5usk7e8ZeoDtehnXy8zFc/OUeHU7d4PK1NfjOMTJO3cQBPR9ku9wzRwf8+Yht8kk+4mcdb3Tk2I/yNxpnrNy0D83bq3DkmmtnqDi7904jh2w5Sx6fkRcOPubxYKs9Qydy+Irt6NG4msND/rGVH83JDfvDXrVH5ZmcoRf73aORS71PyccfPXvbM73D+RdPsuGd+xRt+4b+6D13/sFhSzkqNvK+HKUDnvjgb44egE+8ozV0o6dT+YN3/hAv+J7Zxv9k2bPkk+OdZs15TgefE8WG7nCAPBFXOUAuvj5v9P/9kuvXwY899tgVG/6VD9/61rcufuk+GAwGg7cmrAE8GAwGdxfWAB4MBoPB4BcADVCFTVcN1I9//OM3H/vYx64/6azpq0mjAPfud7/7KkZ6typWKr5VCFeYVEhTmKyYCxTWFOM099wr0NWEqfCmUOmqAEgHxUyFRYU/BUO6KcgpCuJLlnc8eoU/BUEFPk0LepKvkKyZoBCJD/maFa5wFAEr8pKnEIoHYGNNB+sKlIqgCsR0VLwkFx0d8EeLN98oWtJVoZYd6OhivcYKvvhbo4tnhU9FTPqxly9c0bjymzWy8wffK6S2ZiiOWqN341z3f97xH/2tmTuvbGpNnBSKFVL5pEaKNbFhF50Vf13Nizf+/j8+eirA8wkdii0/4lPcK97zZXFhA//gq/DLb+jhKt6yG+BPHlwDnhhad5UfZNSsOnHYyg48zNGxPKCLfGNPeS+35BS/yzEy4kUnNOSQYc5aewEtOvzYVEzlSbkC8M9P/F/Ok0dfMulrnu58jhfZ+NBT0ZsMeNazBT/+y3dkwJeLmnpk45GOdI4XQGfPWxc/fMkWf7bIE+t+bS1X5A7ep3zrch4OvvYTOWIDB1/P8h7AOXMEDpvM8ScbyWYL3hpc9GGXWNLV3sITjTzmTzz8RQN86SdmdOZjfpCbaPDGoysbk5mP8PCsKY+OLuymq3lXutHLL3vZSA/6OFvIA/zTWZbP+UUuwbFuHg4brHk2L26t049f+ViOOKvlFF3IhGfQs3jTje3o+ZWd1tknxvZgcsJjg7XizudiRAae+YZfyaaDRgwac2yASw6ZfNH54pqe3hv4pAv70JKL3lxnxnkWoaUHPZ0R5qyJRzYDc0b5L3bskRf0Yhfd+NGZJx54wKUL3dJH/MmDly4GeQ35H+R/5zYd8U0/fnGGkGsOFAPrYnvim0+nfGeeLfYu++gEn07F3zAP11pz8E9envkbP/HxjjIfjXWDXnhFw4+G+LClNfLlMp5yylq5YM+ms2HN1bo8EKP2AdnO3fSGIxb2EV78Kh7W0KBnAx3IETP8yEOHJzrr8p49aNEZ5sWcD/mNXc4Pa2ityxc5L3ftCfaxEw7fmfcuh08+fPf4Rm8Pe3eihSNv6EUX6/JZ3uAjJ0+9+YBNnTXlPz78T2/nM/vh2D/lBh34mjxN4PLSvS+v4O3zDH5o6Ck+/EEfIG/J5Rs+dM+/5QpgAz/YX+LHP/6ySe9WXygUj5q4eLh69gtdePg5UzSExaD8FDP33mt0EAvNXbjiwaYXX3zx+pLZ17/+9WttMBgMBr8esAbwYDAY3F1YA3gwGAwGg58BCmYau5q5/v/eD3zgA1fT953vfOf1617FRoUzxURFR4U2hUDFOEU/QyG1X2Yo+imyKa7BR6cYaE6RT4FY8Q2NQrLCowIknoqVns1rFsLTIKj55U8UKvb5ZQYcxcpoySVLwVER0i9MarZVIIWrkFnTFQ3bFBzR9ktijQhFRvzhpZNCZM0k64qfCodsoR/76Ue2Aqd5MshnP/kKkeTRRzHWfAVZa/iyF44rngAP/tTY4HufZzyz1WAXf1mnszl8NXfhs8+cuERjaFKyJ/zbIzz3fEtmcVFUrnFx0vC5YjD7ygP+QcduPmdrvpA7irB8a42+AH2y8RQXz4A/xJ0O8kbBm7/5Xgz40ZriOpliSAd8xARP8s3D5Wf3+NIJT/w9y1k28KurQY5mGl58YQ49WWJKtpyhr9jQiXzxkX/xg8P/bDaHnp/pJF/kkbykn0ZheQ4XP7inH+0N8unOVnz4hj6AP/IdgIuO7vGiI1/jiU4hnp41ytMRD/hsgxd9jVpQA8JncDj0o79zRLzwQwdHLrmHowmRDXTDF7649WwPoA/MsRUNPmxnr5wyV9ODrHKATnQkuwY0u9ECNsMzj79Y8Yd9Lz5iQFc6weMb/PgLP3Pk8jO74ckJtqWbNfKitWeKhzl49PUMym/NarzZUBzZiT96MoqL/JGndGa/Z7rDMcJnIxy84NuX5V7xwt+AV6OJnXKY3Ya9bo4t/CNf8aML/PZbeuEnJ/DHBx7d2acphFdy4fBb5xBcNpFDHlx+4Dv0fIUHvJrN4ioX4Bpk0ce7KlxQLjmLyDpBgwwP63gEdOE/ugC8+Za+fAPQJNOQV2SBYtE8PPzZY55NcqCzN1sNfoEPXM95OdL5I59aS3b6kHHyuj3PBs/u6WEvdLZbw8t6NAZePaMRK3H0TJdTV3kpBu1B+9Gey85yV07xNVzxNI+3IYfIxMM5IU/h21/tOzwMc3iIF370dOaTlb78JgfcozVvztluvrMBfbzpY12c4puu1sWDLvgAZ67PHHSVa/aXObGHg56sGrzskYPxpxO/2qedKfSCx8f8QC4d8OTX8tca/mzAH469Boc9fA/H+4gc+8ifvEd/fk6is/xiuy9a9YUTz9blCj78yA8+J9DV3udvz+7h40UWnf77v//7+uJh/tCcpQtauvEXfj43avyKv89r1thCtvco+fSxz4up+LNTc5ld+39+B4PB4NcP1gAeDAaDuwtrAA8Gg8FgcIBinEavAtlHP/rRm0cfffT6da8imMKaofFbI0VBDcBXRFTkUzRTLFNoNBTealAp6iniKZ5WzFSMVIBTGFb4h6NoWSFPQe7ll1++8PBSSFaQVNAE+ClEKlzCQV9DRIHTM7sU8Qy0rgp+dHKvGEh2xVH0Co0KgYrpbK3ppJhKb58ZFITpzR/kWyMXT8VCRWm8fLYwp3CqKIlGYRKORgDfkU82n5LLl/j3qxm8zeNhns/gm6vQjIf7ruxCy4+KpZ7Nk4eHeTZFYw5+NBV+6eL5HNa6h9/zea+4qhHKX3xUYdm6WPIZ2YqtcgwtGs98Th82KvLSWV7BMw9PTDz7ZY8isLzhb77mVzaxW6zkTcVzPsvH5isGo5NX9HKv4WzNIN+Qyxpr6Nlgjn7yUEzllStb2CbHDH5UuIaPH9p+ie5LFvQov2ssiCmb6eKZD+HSG65c4wc+Ig8+3dMHeGaTPQD4ll3lGl+iy1/s08zjTzjFCbCNLvYvwIu/xZTNAK5mDWAbnf0CjO/Ryzf6gXSjDxrP9LHn2MMGAw675Y1nPsTXPdn8K674w/HM984QPKNjZ2cWv+HLbuvkyyf36OjoHj1e9JFHwJ60h+3JbElP+vMdfDY7g8T71E0u0Bs/V/zksTV6JZevei7+5PYlE7mNBh798aKHmMg1dGQ4M+jB9mIDD096kSF+cMTePkVj3Rx55Se78LGGV2eDmPGvmKPhMza1J8xZx5uP8PLlEr7AR17BZQ8/OvfJgoc/HTsn7FW+wE+s2Mtfxb33B/vxQEsGnwHzeNILyAH35DfPXrrhR3c62Yv2nLgDfPDlS7qZNwfwSKa55s1ZM0+v5vhTPOnLh3Qhm0w+7leVAB0e5DqHss+VH/CiPxnNm4MvrvRs3hy/0Y8/xT9+yTh/0Zmu5vkivHDJp6/4iBWc1tDhlV7NkR8Ne7030VjDj869V/nIWVCe2TvOA/pFw3d4Ae9bchpo5KArHejIh2jpYeCNZ/nqHBR/+eCcIcMcGXLNHP3R4iuH5AM9ewfJ5/jjbQ5vcRJn+pPb3koe/eQEHPrKa3vNPN5wxJNMOGy0Rj6Z9g8c/OlGT34UBzF3ltuP9OFjuHRs/4hHeWeNn8RPTOCQywf8QTbe9BVDOuAJBz4QDzED/KZR2xcO2OfMdF6SS558LJ7ssYbenuUvfOmk2eyzBh+wHy/NWp9XARr68X22O0vJ0zz2WdezNXMaBexwRvHN448/fr0b3Q8Gg8Hg1w/WAB4MBoO7C2sADwaDweDOg2KZX/h+8IMfvPnIRz5yFS39H76KhX5RobCnOKfYVzGzXzwqosH/0Y9+dBXp/J+nCpqKtoqn6BQHFe0U7BTUzkYNngqSeClUVuy3pigbmFd8wwONgmVFTPrRtUKvQqGGAD1crcHRJDEUFhVX6URPhUPyFBcVDRVg6UN3uOY1HNhsTpGRHuyjY0VrOqKnm8aeomaNHWt44q1oqdFEX/Se2eHzB77w+JlfFDf5K3wFVTh8WqEaLl3MK366N8c/9KlBZ43NrnjnazEh2xzdPRvWNFYVRd2fg79uz53DuoFP+eOZHfxZAyg8+eAzGFpNBrbVKGLLic/f4sbnfMIH1uQln8OHJx/ZT7ZcEHMy+LSGkUEWmYra1hXT+QlPa3Qjw9UzXfjLHBl0ELPiZtTY1XRlPxr+JSeZ+MkHQDdFfzTsqFFcs1kM6IMPHHaJL18mD0/zZNDLvmQ3uXQAZOYr+8J+IbuCuXX8FMDxt05fvOiKF57wyDEH37NYu6I7ZfM/XH6Fy/dsJY8ObBNra+wV3/QpjnK2HPIMx34UL80QPOlCPsCXjXTy5RQ45vANh110LQaeiy0fsR3gTXe5BdDTFR58z/Tif7bYh54NeQSnXAT4saUGLRBrNskXuvKXOKOnBzz6iRWbyAHiFN5pVzIAueWmNc8GfeUu+8UKD/fWyC9vAf+TU446R/EsJ93zkXiQq6njnOYLOWCdDa58Qdf2Env4gmy4+YRt8tu6OVc4/OFKtjkxFBu8Oz/YQBd02UAm39a0Sg58uMA8QC8e+NKJLHhk8xX/o4cPF2/DGQKsmTec2fI7gIcPHngCVwMdm0894DrrxIOPg2R6d5hHH744n3svXayl54kvJvTk19aSYd0wf/IxnKXsIE8c5BO57u03esHHDy/5Y0Qjr9DIPTR87ryDiw4O3eQzgJse1uSQdTFhK97mvAO9N+F457DLHN/Sj254iiu/ssVzOOa7d5Uf8PAhj550sA+ity/M0w+Ns4ZP5RDdWreH2Uc/Z4jcYD++9JID8pRsuescw0tO8BHazj3zdMWHXLR8yu7ku9obcMTDnP3pr7gkCz/84dCVTL5MrvyLf2uexb94yEOf/9yzn738dX6hiO5oxItc684I93SUJ+KHB53ck+eZjzRjvVvcs4E/fLkIHh3FAQ8y6OLM8xnGn9kvL/nf5y7x9llOHMjyGZYsuPhoEsgdjV94g8FgMPj1hjWAB4PB4O7CGsCDwWAwuLOgmPoHf/AHNx/+8IdvHnnkketXXgqQCpoKdQp2iokKkYpm7jVdFG8V+PzCQ3HPs0KlYptiITzFtgpuinzm4WmkKgwq4mmOKtwptineKb4ZFejhugcKevgqoCqU0k/x31UhT0GP7hWV6UwO+eyEU4NAY4FOfn2LPzw2K24q+NGJPIVbhcaKhAqj3Vf45Ad20SW/oMcXPv3YqWGrmArg0oVP6UI/+hjm0Pss4mqdbxR56Y4PPfFgM39ZcxUD+PTgA4VVMg18xcuV713zR0VdtuKFj3mxpRNZcMyd417znhvZGo4r3vzNTzWD4cgBc+JX8Vuc2cEmceQPetJfrin0KminC197JkfusYcN7GSXeJABxz3bylM4mqj8Qi49xIDPrOHl6hdF8syfGlfEJldu0SU6erQmXxXLXeUNPU4/y0H05y8s5SLgJ7aJebktV+DFA2+y5YK4G/FBHx+2yAfFcH5MT35nM5kV3/lcwV4cFN7RkgffQAsXLd50g8fv7NQUoRfwLH/J7v87Zmv+RxN/tprnN3zxJAcP9OR5Js9czSX2io0mIjs842X0q2fNic4ANgB87I10JZPtfCnHXOUGPubpBtgfHpr0wh/QAU26m9fUksf05W+QrfQJz7lhOF/sAf7KD/QVX3nEb+boJ9bFxJCHYkAv4Io3ek0R/9bhD3tKDnae8gM+8D2zja6u8p1O7Un2uScPDT3RozVPP3mLT3sXHn50YzfZAA++MS9H6ConNMbco00XviEbvQH4B27zno18aD48+pDlHOFzPOWhOX6hd76F74xvDrCxMxg93fAwJxZyAlhPnqtcvT2vEUcmWcC8EQ/zBp+SS0/6mMMHD/Ou8iJ8PPja/hWLaMzjZZCBLv3khrMFv5NXMtCYh2vOM/u9I8XXPoouGQbZaJJdLMUkGmtkNOgWPTvwlxto09saXPaZt6+9E8TV/oCHll3o0Th34osGLRrvCOtyxZ40L++icxa4kodO/osbO8SkvJazeKBrj/i8ZB0dHHTm4eCHd7rwJzr+gOfcxM/+xrv8hMMGz/KWH9nPdv4E3nNkkwXH/rA33NNZnOhlP+EpV9KNLP4TO/jhyHHzwBmLDxu8N32JTo7ToXn686ccSS7+bCVfTOkpRvj5PMCn5aHn/FMuss+fcPbFRPf08pmMDLR4kYnfT37yk0sv/kPTu5zd9HFO46GR7Dzka+v+j1/+eOKJJ9b4HQwGg7cRrAE8GAwGdxfWAB4MBoPBnQPFwD/5kz+5+cQnPnHz8MMPX0U0RbmKfgrzincKqRovCms1QhTT4Gq2KNIpyuGHh2KqortCpwKnJpJ5xbWKpZ7x9axIVwPDteYMPRT84HgvVxBVaKQDOQqHdFOgI8+cQqHiIRvgKvL59R99FfmAwik6RU024qcwXFGQLuSxyecBvOiluKn5Rw/NKjg1mswrHpKJjr0Kl3TzrCjKLgVPviUDTzbjoWipSWzdmkKtNYVpv0ahG5/hk0xF33xAT4VMdIq+dKOLezaRpWhMb1e8Xc2jMfgCb/LxFycF3xpzcJp3vX3fM3BtNG+wXZzMAbHgH8/kGOSIPX+bp498UKy1xja2d89v/MKH8OQCfyngxwetvMTfvSvf0kmO0QMv9OKrgEwuP1mTP3LLfUXs5DQvvubEUJPVmrzCEx/64gPItHfQFQP640FvOcxPfVlC045O5NpXeMMlm96ezecHfF0VvMnGi978IZ/5gF54WhMX+UUXI9xyXV6Ql7/kG97m4cKzn/DFjz7o6SFH7QW09oUmB53ZyQeexRM+qPGi4Sh34eDNNjLJoy9+fE1fe7vP7nwC8JDjNd7YwX/0J589cMzTM5/JeXHyTDdXceUf9wAeeXTol22aea5w+Zas4qI5AfhJUwMe3elQXsCTL3iaZxc57PFs0Mc6v/OXOfbA0+jGl8/Mibt1eHjwa3FgP7vF6uRPD/rTl3zr+Dhb2h/m7Q22lcPk8bWzCI11cth27gE+KYfRkE2e4X2ClxxCL8b2ATrP6NiiMdOZAYdeBrvFBC/83csrdpNjTi6ak/fyiK5oO1flCr2LQXzJM0e+QXZndXrxhTOUD8U5SD+xAXA953Pz5oB5PiCTLsmDhz8d+bL5+KApRs3DF6PbNOHzs3X+Ejc+MY8OPh4nf/NwxI0fxQZvzwb5yYDfwAuPBlz+5Cc0zaevgUbcyJGLdG3dPfxial+Kg70BnBHkwrEv5CUctOSRi2/xQ2fIVevkoIODzrNcoY88kT/2Dvnw5EWfleSTvPNZAA5ZZJgTT3PlJhzrfCgGeOCNn2e+R9N8fOgIh110czbaE674n01a7w/+kKP9EtiewUcsXdnRfiKrPcB+fpSfaM3TyfvWL27J9k6llz0uZ8lFax7IK3zEnJ0+DzmDyBYPX35ybvGDM9znFff8mg+dl/iYI5P/nAnsd88msfbO9aeeyacj3Z1V3jlso/9//dd/Xbr5zMsPePPdY489dtnhM4EvoDz55JPX56fBYDAYvL1gDeDBYDC4u7AG8GAwGAzuFGhYfOpTn7oKhpqLCraKrAqLimOKagp2Cn8KggpjFdUVChX0KpgqiirUKc6hdVXgA+gq0uGt4EYmcFWoM4ePARTyFO4UeStin8VoxToFP/fk9ucAAToFWEU8eHjAJd8zvAq0rgqQaBQB3SsaarS5Vxy2VlGYPAVGhUjFRw1f/vH/xrkqosKjPxrNDMVNnykUPtGY50++pE/FZH6yxh/mDUVcBUtFZcVXRcyKyezCh+9cxU+hlAzPbCPjwQcfvOIkBtZdK5wqjMJxxaeGhjn0ngF+aNPPsGZ0f14bQLGVfed8a6548jFb5Q274ePF3+IixvAMehp8wN90RcvX7AKexQgev7OP/nzIl/DkKRniZa2hWE4P+VKhuUYsPo0a6+KAn+YAWfGRA+JeruHJTvFjV7qzVf7KNwV0RXH82GdOXD3zlVxBb9ir5pOXjXQgVz5aJ98+BujoYNCHXhXmPaODwy+t8yNb+YK99GNrOcgWesonxX82ecYHjXW8zOMP0LMHTo2i8hK+Z35no2e2lOf8xXfm6U5He4S/NRask4t/fOUGMG8v4N0vcWt8yLHihxYuW+HJyWit40tvfODwUXrzN5nyzBzgX7Rw04s99Be38PgJzxo3ZDpb0WqEm4cP6GwdpFtnIH5sIUMsrJvDH7/iRo/m4u/8o0PnOb/ykTn+txfo4Hw13x7gF3nDfnTkWmej/SjWndX8QWa85FJnh3eCfd4cP/N/zSmD7oa4dy6Yx4uP0wc/83KCXuEBOsnHcPIHv9GZvezofMGTPICGXmicH+4Dvjbwkbd4h8+XzqDee4BM88U8MGfEP5vjXTMu2fgY9LyNLxZo+KZ5uOLj/QRfg9DViBcZbIgGH7Gju32naWf+pHGu0uuUQwf2uZenzjFnXWtyjhzPdOJze8o+8j7qnRq+XBJrcRLbeIuXNbE3xFDu2P/OHGvyjr7ozCXTmiHv7Jvo8I6OPeb5ii8M9tCTT+SodXufr9v37KE/mfzGVvqR5dm8HJMXZLs3J3fwRitX8XaNjl8AHHagh8P/+OMTDr9Zx9O51z4TC7rixw/lpr1OT8DP1uRQX0bDl6zOEvT8z6eauD5fwrEmj9DChydv+osXfGzNZxt7lK/p5CzjP+9jOPyCL/50pA+b2M0/ZDir+cavfOmIL3mG841N3hF8iM8Pf/jDi49GNj7f//73L3nf/e53r5wcDAaDwdsT1gAeDAaDuwtrAA8Gg8HgTsFnPvOZq1CpgK/gp+imAaNY6VpxXhGtgqjCm8Kiwp0iKDpFfeuaRIq1iqueFQ/d41FREa0CpGKdImSNkorkFY0V9NAqNiqEKs5VaDTI1xhU0FO8VAgkW4GQDEVXxViAt0KtgiIe/hwgW6wrFio8ssGaYiC78OUDck957FCUVDxUSKSjArE5flK8dEVjnk8qxpKJJ9v4GG++91mD/mTC5Ts+w0OhVMMku8WELHrUwGBD8xWS8TSvAI4Xfyi4k6upVtHamjmF1xokhoKvZzHhP7rBpZvn4tV69wa/kEsffmMver7h6+Jy0vMP39CZX9hlXvNYHrEJP3kjX+gBF457vqnQba6COVusswUt/7DNvLzJJkN86UOe2BniDLdGmKtYySc87ZnmXOnOTr6Vj74gIKf4Xl5kJ7nd051M9qBjJ37iogliXlFarsBDA8QQLXmGe7LQ0Qsfepqjlyv+eJJvb+HJTr7gAzjshC+GNRT7Qgj/5IsaIOSKM94K6WJMf7zx5UPy3EeHl1FzlVzxEtv8Q5a8MWcPkAsHbraSmY748hW/0B/wgXyzZq/yU3bRhe3sJkfe4AkPXznX/wluX2pYxIt8MuBZZyM6a+SLU7wA/8gHMuHTSx6eeNbQ2sun/vDoWT6xgS/aK/mCjPTAy7PRlwrkHxr2ygF+loN87ezDS9zMy21yPJfbbNWIwRsuWj5zjtORrvRgAzx87Z3AHF3pQg/Ar3iEZ72z2RxfmSOTHOcaXcTQPVpy6WiuMxFdPNH27oJv4Gnwv2f4ycKndx8e/MlevoUP0ORnepEZmMuP8PE98Y3b+Pg7n5rPZvNi755fz1/BmnPm4Rtv8/Qx79m8OXlJH/uYTvibd1awFS458EG5YJhPH752huIHR/wAOs9wXJORfHnh3cwX9h4708GQa3LOPrQuR9AYzhNysg8uX/Cp/KafeTqJnXe5dWeGJiBd0LY3yZZD5Xy5jQ5PPOipgWndQCff5Dyd6GfOOWtfyme+RC/P4XTWoJN71unBf+jojU6OsRuOeVfz6OwDukTLD3KZDfwFpxyzB/kBfflOXzhksdE6WvjlLZ3hsEneNA/HfiqX8CQfD6N8ER8gN9mNTk7Bd+bA4QtQ7tCVXDx8XiBbbvm89O53v/s6o5y3zingS3e+yAZfzPGjl9gZ7sny2c47iE9//OMfXznvPSTXXnnllesXvu0D++0973nPxeull166Gr5+8Zs9g8FgMHj7whrAg8FgcHdhDeDBYDAY3Bn45Cc/efO+973vKtop2CnoKXArQHr3KRZqhPolhYKbX08oiCuSoVFgqyFaQV8Bz7tTsa2iokIbnslR7MNPgZBMRUw0ioQG6ApfQbHiL10UEPFX9KSLYrHin2Ku5ghauHDoRI4mIpkKe5pJ8K3RzXCv8Oj/knPPdoVHNIqLCsCKiGytiKr4S7argiJcMhU3rbOZreRbV/xWvFSgtG7wryK1dTLYS0fF4Aqz5PG1dX62Rn9zfEFuNijWmud3uroqtOILTzFYQdpVQVmh2pWt7KjhAR8vz3TwrMhNF/Ey1/x5bwByyWgdHzYq6nrmU/oowNIzHiAaOPLJs9xR3DXPzgrr7OYD83xfg84zf+UnhWh5wX/8wbean9ZqHrVO1wZZBlsMtrtaEzd6iCHdGuG6Twc2lDfyuVxx5SsxpYc1OWUfkmWgkc/0UJjHR47xD1yy+NZIb/T2DHvcG+kGL3s9k80nZBv5OjvYQD4e4sdmz+bpQgcy4ZLX3qSvPSBO5LHPczqzjW/izUfihM4cGr6xf4C9Y49apyc5fCAv6QLIIE/jRozlR3mNXzT4omGrPDAnb+KTbPY4V+jG5+yPP33YANxrHnkmS/6Rm0x823fm2Ffs5aCGhdjWoOo8ECPP+KO3V9jl3HPlB/zgsV3MnW90wANvIMbFIfvhkEEXa2ITrXMUP2ce+8WnfdD55ursQOtLKmTLm84POcVG+SJP2CC+ZPAjIJvd9MEHPjqxcE8/V/RkeQeJCdvIwEd+8Du5/MDffdEjXcwZnanNkd8ZZA7EQw4D+OboQn9+qSkH8DDkMjzzrvzTvj7553P8yQGu5o34AHhizE5+0cgH6S4O/Oo5vfFo3hz/8VHnEX5izOf2Ij1O2Xg04sXf4sNufMM30JNjpFO8PDeHnh7kn2v8JCet87UYJx+dgYZcuPDseTp5ll/iX/7KVz43j4798lcOyYlyGI788J7DT17Bjxaewe9ymzw+pKs9yd72PBy8+RWtvWKOneY887c8oAeb3JtzRtEbjjV0bLdujl3lj3W+lcdyy7VYkhUO++hTI5eOcNiIv3U2wYfjnj1wil3zYuS80UjnW/z5wvCZxt7v3LeXksHv/OUzkM8ZaPnGMxpy2GfY2+zx+Uuc4fR5sV8J40U/dpl3L3a+QCRv0OApZj6XiqV3M5185tJU5iv/py8byKLzCy+8cPPFL37xep+TNRgMBoO7AWsADwaDwd2FNYAHg8FgcCfALyk+9KEPXcVbhTIFO0U494r+ioGKZQZQQFT89l5UvISviKeAqmimGFgh1ZpinKKdAqmiHDyFuJ4VtBUX8VIAPIvIJyjQ0akCo+JrdIqJ3tEaOGTTkXw60oce5LDL3EMPPXQVd2u0GHRSDFQ0pJu1fv2Br4I1uzVI4dc8Ma8Qjb9isHs41uhAjiu7yCcH8Cf/5j+0itMKrOxko3m0ipoGmYqwmtgKs4rFfEvPCqHwXdlJhmd0ZCmcKohW1M5eevGZa8/40r2CsdiwMV3obY3cRmvnHFp6nDjd4ycfzLGLf+nqqtBtPrk1cOjPJvN0tWag51v+lwPo6c8WBeWK+vJBgZfuYkw3uOb5B0+84bCRr/Ckl5i4F//yTg4rVNOVLGvkFitxpY/ivTmjRjGZBl3IVRinC11B/MpveZVuit1yAC3Z+AJxY+cp20gnc3jCwVNxHQ885Zs8rQnLVniGXEm+XyDjRwe68r95euYjPofv11pw04m/yNVw4kODnWJhL2nsskFx3xp+nuHUyMKPzfDJrylJR/YUQz4m03BPTw0I+IacAppp+PEnOvL4iCw2iQt98DHHh2ygjyvb4PMzncUUP/d8AV9jV37ip3nGN3Sga7lND1d6ZAPefJCf8JU/8Pgyfc9f9lrjC3GlbzExNNWcDc4ZdPYS3dlGlpynmzl09OA38Wk/wGNTvi4HDbbRydA8Fmf+4xNz9OEzZxcZBrvoKzZsh08mfmSS4zk/AXI8G9mogebsRmeOTebtqWg9mxcLPOQy+fYn/9UMO3HxhQvM87t4iB9fphNca/DJBPSAexvfCF8swk+mefnlOVv4Hm8xpmcQPpvMw4+Gf/mOz2saw7WHxJ8evsQQjbV0wgsPOoi58wOOPKLvqSv7xM6cYY5s8+xAK5Y1AM81uPiLg3xknzVnEf6GZzx639prcssaWraYZ493HZ6t4Wv/upqnA37xlMvozKc3X8tJcbdunuyTjjw+gkcWGWJsnT7k8QXfyz/x6bylP1o48O0HvsjPZPInn+Ehb+wfvPhFjpNLL2vOW/P2Y7kM0LOHHb1X6YKPOLRePvXZTpxrpNPPuewvINCR7viTTS4byIWHJ335zxU9m/iHDj4jsq897fOh84y/yKCH0dkBR3MZP/Ouclee+FzmXHKm4a+5S0cxoKM5Zxkb4dLR+96eZ5tnvPzp5y9/+cuXzMFgMBjcPVgDeDAYDO4urAE8GAwGgzsBGhP+9J3ipgKiwp7iouKYYrP3n+eKtgr5iqXwFOMUJRU2K9ZWyNb4URBVJIRjXYFQ8ROOAp1CHd6u8BTnKlwC/BQiAVryK4or3MF1xY8scuimaIifQqOit+ZWhWbyFR3xU2BVkFRY1MyiI1y6Ke7Cd1+Dm09qfikq4m+djBqTPiso3iqKutILL7jWFD4VO/kTL/pUiIWnSOpeUbTmRAVZdsHX5KA3/4uROU1gA/7DDz98yVPER6PYyTf48x3eiqp0Icu6ONCLz+HSWwFWkbkiajEgj901dosxHsCcARSI+Y3/rJu/1xVPuQWPjmLRWsMzHejWHPxiI4/oBPiI7WgAv8JT5FWU5nMgB8QcLp/VBJEvdIBnxJuOZLnvWgwNc3A0McKRp3iZo7O84GexEj9x8P9Ge5Yj9DDEqcaZKxr+p6tCPXz+EFc5mA/JRWPQW37TIR3pES56fD3X5KsJIf5wrBlkwzUH4NoL8tA9HcimH3l8yb982fWUa50ctJpQ4t6ZwLa+tIAfGnFBZ01e0ZPt9OJT+4Q+QC7zpT1JJ3GXy2Raw8fVOrnyBC3e/M5nfEUX+okjGjqQxRayNRzEEC2b5R0fpUN2kItGDto75LGdbPzwsg6yB72zB1824MGG8NhPN3o6A50XaOlCj3xMZ/ztY7oVQ341z/flhDl88UGfbmwggy5soD9d6Fgc6IIvm3o3sLW9p8GDH9/yO1lwAH7yr3m24G/fkNteZw/+eKCBay675IW408dVTtCRPuUpvpp99KYvPnzENut05EP3+LWH4Bnk8A2/wyELvnPUvgw/CJ8s+wAks3Eb35y8Ms++4miIFb8AfmmeDPNwgWd6eQ/I33gB8WW/efmUDDTxoitd+Dv89iFe5k+9+AO+wdf8ws/2gFh4x8LLH/iJf/uU/5Nt4GePWHdm01UsjZq87RU5RjfP/EO2WDhb0eFjyHH20o9e9ol1dN6pcgydz0PiiIY8+69ncUZHd3L4Ao51NtFHXtDJOj9G47MSW+hhTn7SBS0ebEUnrmjpihefkUOm5+LPBr6yRm+y4s2f8gxP/nJvj8HtXCPXlf3m+ZQf+pUvnmSSAYc+dPF5R+z515UP2UYf9nh3yD2NVw1afsGXDvTzvvvN3/zNa/86F+QJ39OPbvzizHZmATo6S/oz/N7ZdKQLW8lD508704Ve4uBzjs+3csJ+oqMvANLH50D/zy9dBoPBYHB3YQ3gwWAwuLuwBvBgMBgM3vag0KiIpmGoiKeBqODmXpMNKKYpuLkq+CreAQVRRTTzQLFRkbACt0Kpd6eiIRqFRsU694pxCo2Kgwq/nvFW8NPwOIvE5l0VRxX5KhArLuKpIFlxEW1NUwVchUvFwd/+7d++ruxSBMQbHwXDd77znVcRkv4Khu4fffTRC0/Rkb74Kz66KlCiN+gNX4G9YrJnflTs5iMNBIVctuLHBnRo3KMjl310VpTkQzb22UMRVhOEL/mDHnDIwYsvNN8q/sNBY418Miocizl8jX8+cE9XhVL4+Fa0V1Annx14KxCz3zNb+Z7fyKiRwqeuDfTWbs97Dpqjuyv9xVNuROfayF9idDaDDUVkgJ6OnsVK4Zet5ujDXr7gb3IUu9kv3nxlKDKLA3l8z+/8Ct+fIBdX9pl3D49P+ZleePgiBBxNJ/LhKMbbN9b5j918mY/zF574ufKxuNCPXornYpWuePMbXAV4dGKIjj7Jtt5+sicUyvG0bxTq6UA38unMbrR4OBvIgqtpCNceFB85Aa+ccuX3Gib2D9n4ueYDdPzenrQGl230Tz4fsh++mJJJB/6SB+j4DR9+QCvWeAI2sindDDhku+cTPPhLPO1FOcJP7CxvyCcbbb+clDvm2AkHL3rA6Rd0nsnmUzqzy1z5FR7dyYTHB+SKt5jCEwP6xo9MsuWPfHYmoKdnMmp25R8DLz6Vf3igYbd4kQFXfJxn/IZ3ueYsI1ujnO18KyYGWjSGZ3zogTd+7ONjfuNvfFzll1yEa5ADF3/50P5hB33xZxe+5qyLGXvITJ/msyNwtvKbOVdAD/qXK/zE13JEDgDzdE9/vOHjAV8MO0fjyw7zzg7+dZZbM18uxh+E37xn8vDnG/zFmQ3mTz7ywHNxzp/2abwA/nyXTtbiZc3gy3wAt/2UnPCT450m78THuw1PdAZ+ntNL7OQcf5JvT5y8rPGR2Ntn6ZHefN77jG3W4omPM5o88/jJHX63Tm/3cgtfa+blEXA2kYGObmREx+dks9G8e7jW5ZvcJpsf7C/48kQe0p0PnVc1tPG3F+kMV56i60sPaNuj5LPJ3mC7HAN40xW9eJovDmiBdT7mU+9wuPiTJZ/Qsd/nArGND73g0IufnYl9KcUaXu1RwAdks8t+xpu9rvzEBkAX70a+Bt4p/OFMEQ+DDGcffvwllnh5T9EFDf/jrYnLRp+D4HjfaxaD559//vKR94ccUeT/+te/fjWeyRkMBoPB3YY1gAeDweDuwhrAg8FgMHjbg4KbApyimaIfUHhU2FNkVCxTJPP+U4BTLFWQg2NdQU5xTiGxImyFQbSKlYqeinSK8tbRKwwqvCn2KeIpFuOtiKpwp/BYAdacezR4GQqbinmKj/iisa7w5/9zU4imk6uCpGK5AiAbFSr9IoRu5g1FW3z5gl1s4BsyDD7Aix803sjEH52mtcKpIjV94fEFXRQlFUL5C61mAd2todFsULzEhxxFXb7Dg8181a8R8VY85Q9+dFVM5mexUURmo1iSiQ4NHfhEIRmuQTa/uCcXL3L5EB7b4bjiKX7srXgs5sU9P8DFE445umquKg5Ha4Cu4PZ9g43iSg/+UlRXhBYfA085U2z5tWYwW9jumU70F3s6umcD36PhOzEwr6krP9DSQR6KpQEHvSu58gdPg43mw6Gf+LHBGlw5xWfkiI98ozc94CuG1+gSF3GoScpu9O0RNinQizk5YpgN8JPjygafYemBh2s85TGe9qJn/paDnvGWQ3CsoWEP3+JNPv+xhb7Z64oHndKDbnKYLmzFjx18gB6fGmnWAB3wox9+/JQMOcFP/OqZHHT4wYcHh9xygT3mfNmBHtGIPxvZV3OC3cCXCMShvUcn/PCRj2QbbLC/6WMP8XH82/f4s09e0UUTg7/pqpEEzzlIF3iAHGcHvvaSe77OL/jIP75Jd3LkB52cSfaOezlAZ7zp1h7Fw5mGD3vJIhcNu9nP1/SCz8diCOjtfOEXeGjxS0d8xNY5VdwB/eiMJ1080wceHLjmyMNXPPiU7vAMtpw8gX3NR+GIFR5s53dznvmOX+gPrME15CmInh1ynT/gwwXW8JFLIB78Iwb40yUwz59ykj4B/tacY/DTz7whLvg2D5c+/EFX8+kSH3Fo3pxBbrpaw5s+6WTtNo34iCe7ybOWvumVrnKAj8RDg9B8+GRE4yov2nfWTjvR4SPn6GWdnaccOvG3K1tbQ+PMt6fo62rOuWoP4uvcoaOzEI1cdb7IPbmIxhWdeNBRDsKRk3Tkf3v11IdN1sj3PsQPrfwv//Bw/nifkEsfvNlOHjvYSj/nQbmMN33ZQQab5T4ctOKGN1+hoV845POFdb7Hny72SnkCn230wgNPujgPXO0/V59L5DY+eGY/ncTTc/mHF3x6s1Xj2HnEv+bZBc9/Z+G/IRFrvhAfPmSz4R1YDH22IePHP/7x1ajmX3bg54t8dKEzG635P37ZZv5rX/vazTPPPHP9mWh7czAYDAaDYA3gwWAwuLuwBvBgMBgM3vagAOcXwDV6QMU/hX2FPcVcBU6FNGuKlt6HCpyKhX4NVvNCcU4jS8FYoQ++K94VV/FStOvPQSr64a1QaK1CpKKfoqKCY7SKmAqcioXWFQr9KULyzOGnMInXSy+9dMlRBKVrxU3rmrbmgWKhwiJc8tnChvSsCffII49ctAqXCors5ifFWsVIdBWY2WPNL4/pgp4vFCo1dRU2NYDIoQf7AP3pyDbr6DSJxIH9Cq9kwKFzBVYFZ3IVdt2bx0Nxl25k8gHdycAPL4VZTRW0NYLpRq4rvdhFR3Fhk8YN3tbIM9iLD194BmwTEzJB866358C5TpacETP35sRDrrBN3PEgk/1w2XQ2eujHV3KGD6MRe3gK4+WNBj85miDyS5zRG2w3yCpWruzDm078yO98wwdwwoen+RAtm8jkY3NoK6jTS54kA29x52+8zeGLXt7Sl030wEdeiKErnujExtW6azz5U7MSP4PMeHrGFy5e8RZ/uY+PM8P5oeBvj2QX+8UDjfzDg2807PiebLTw+SB/mSdD3uEv//kinLOJGtTQ1FzAUy7gy09ozvOj/LIPxERuaCqYd5+/yOcDtqKVV/TIfnryN9vEBG+y7CvnhSse7O+8yhftL3sNL3zRykV4fGuePvDYRD7ZbBKz7KQb3eHxOzyy+Ts8NlmDx1YxpT/c9pNzJBl8SUdnuXwUR/qRZb0zg0y+4ic5YN3+wY9t6DwDvOU12Xg1Z0+KhTlr/NM+bc8D89GfPDVx0Idnju/4EW7zfCKvzSUH3qnXOU8HPKJnMx7m+Iz95bRcE8MAD36y56zd5mONb/js1I9c6/TEwzBviLVn+rrGR1z5P3w8oil/6WvfwEXHZ2THqz0nfuzIL+bkIpAP0RjJpxd89/IhOeTjBTe75LO9Qh9+K6+tGfTgLznFZ3LPaN09vvxuTzm3yEVn/8ltuQDPHHnOdDRw+cO92Bn2klxCJ99OOrbQDT/6yBExszfoj06O04tv6OM9Yp5+bPYsB/CgX01gPDQo+bs5+Y8vn5FHNn3Ne/fyPaAXPeUIH8JnB5xykC70wpd8fOgFR7zI5Lf84Usl/SUJ+SB+eJNlXZyAM77cpBd/oMVL3Ps8hBce7SHz/M3HwLnrcxR5ZPCfuNMLHlvpKk99ac0eQcOHbPc5hs3se/nll695vNnsy1v+tLN17wODHk899dSVC4PBYDAYvBqsATwYDAZ3F9YAHgwGg8HbHhTyFPAUZxXigEKbgppipCKgopzinEIinArfCowK5xo1Cp94KOh5VhhUTFUM9P5UXPV/sykeKioq+CnOKUqiqQBPF8VQ9Aq31jR4FfkUAw3FQU1ABUa/HramoOqXJHRVZFV4fMc73nEVUxUm8WOLKzkas+xgPxlsY7OipKIiOrrhxXb6K3jiwS8aXhqr5CpUapZXOOU/dvdLXPN0Vpz1p6UVh635JQpe5vlC84h+mi8KugrF5JLJXnpoaPE9ufR1Ty+FbHbA50PFWjbwPb7mFKYVRdGKg6KuYik+9BQDV8VteqERV1e6iBs9+cfnIb5BIyZiwA78ihO74fAtvHCNV4NzHT5AT+d48wcfuo9vuHTgJ/6QH8Aa2z3jJTfkAB+JU/nFruTwqVjC5VO+xYN/zImTOIuTK1o+tc6f5TBa/MoNuvEtHnjaB/wLRw7Jeff40U1+ujeXXDGx99DiQaZ8gkt/I1sMOWBfyBU57Yp3+vInPmLKDwru9DQn5vg28BNT9+ykD1l0YRe6bJRHcMkhUz7yl3jkj3zruWa0uIqPGJ+NWrHDm7501fwVE8/5OJ/wlXv80tOz/UMP/qBXPMMp7/H0TEey6ezXa2KNDg++Ioeu4WlswLO32Gaez/AiQ+z4oD1pzRnG1mxjk/xhHzpz5NANrdiXS3wTLf6u8Ogh39mvaYIffeOXLvLBfDnXPqJfMvKD+BT/9ina/Aj4hR/oQGf78XYcxI4NfIle3tMFrlwl64whPOcnf5BDhsHvZMFnn3t5JrbsMMJ1bvGTdwRcssJlB77lkis9AvTZyk/uAXvYwUfh0yn/2mPxB/iY51txEx9rzaODjwegh1HT1j263tVo5MOJb44+7LEffHnLurX0khvx9iwm9jM/m5O34tEvOg00+RKN4Z1AjrikR2t40ZWf6csX4mnOuzN9DHPijd6ZQUbrdLGePdad23Q2T090zqnsYT+5mq/u6Y6PnDQnhui8C7270SQHnbjIKfLlPFlG+st3tPSUU3Ibb/f0jNZzecd+tHKvBi5d7AXPcpVO/GYOb3qT5Vn8Ad70RdN50/lrLwP4PkPILfkJB3/3bJc73nm+gIcmHPh4usoHPNlMliub+N8VPVvpx3dyxb0v+Pj8xyfyo88Q9p3GL9lw5Ax8cTfkCN+T2a/H5X1fvuMv+PYgHa35/MW/1vryoSYw3cw9+eST1/pgMBgMBj8L1gAeDAaDuwtrAA8Gg8HgbQ+KkgppinwKbBUIFU4V5hTyPHv/KQoq5inKWQMKg4qTCpsK235NrBCnkKcohw4PdIp+GmuKju7hKN4qHipsktmaIh/wXIGzord1hVJFV0MBUpO3RjTdFBPpYyimKtiCiqwKhIq65KJXcGRfBWh4fAMULOHDUcwlH22yFTo/+MEPXkVoRV1z9KeDAiTfKLYqZNJNgdW1ojveNYXQ+rUKP6FVnOVPBWSFaXj4kllB2jydawIqgLJLsdQ6v4oDXmInxuQUa/poWmkyVwDGWz7gI35sNo+HeOBn3SBbMViMaiLBJZ+u5HgGXYPbzwEewPo5yOEPulYwFhNr5Jyy+Kg1sRMvtOjke8V29/SWQ6AGhsHv4savePOF0V7JB+7hwPVco8y8e3mMHxw+N2eN3JoWbJMDfC5u7hXG8YNn4OkqDxTti1+5KG5yqwYJ/4idwrhntstHeOTUdDXoxE/2M13wtQ/whcuXdDntzwZ6sJG/0JpjU3tM3tBDs498a+y2TkdnADmexYSe7DPyo3X60R2NWOJJFzLpAJ8ens1rQNClZ/zRkU8vw54QGzLg9as9gw/oiTf75Qj5/C2W+Fn3TG5NM3rglw78gh9gn3PG2YceXnrIa3P8DtI/WnjOOf8msc/50l7BrxzFlz50dp8/4DqjNKfZz8f2Lv/yK73xh6cJY1/0RYizMRstf7hHJ7esG+wgG1+68YE8Lo50AubIzfdw6c0W9qKjH3/BLb/JIN9c+xMtXp1fp56GPC+OfEFnOZmv4OOLHk97AB+DDng7j+/V6MWzBh18uObFIz70QyO2hjW8zOGPlyFW5oz4wKcz3ZzTyTY0wvDBw3w+4jt5Yo78dDWcHdkUf/4Wa7jyn97WkxNNdGR477jCPfnJBe8wc+nrPvly3P5w3z6U53QnJ5n0Kt7yo1+qo5Oj8k/s4YmZOe9SsRYTvNEDuPIM3+bJw5uv0LXXeqdYx5c9ctD7DK69SU95w7b0kGflhzX+kMvFBB0+gM/kH57m7DU5Aoee5thKDtvx5jPy8OYDPPrsAAc/+tnjbMiX4ioXiqmc4R9+wxOOd6Vn8QJ4dWaZZwc8saMf+fSjM9m+xOLMkKfu6cAnPnexnS504ksy4cAVUzb5oo8/4Qz6xS8dfUnOZydy6C1ensn2hUJ4zgH86OZXwc8999xlz2AwGAwGPy+sATwYDAZ3F9YAHgwGg8GdAEU1xUGFNYVIxTXFOg1cawqmCnUKj9YqNipiB3CtK5IrHCqK1nBS6FM0VAhFh5eCu3uFRGsaOd6x8D0r9sFX0KwIqYGgyEsf+gEFU/ooFvqlm+bGD37wg5sf/vCHN9/4xjduvvrVr948//zzV1HxhRdeuNbRKFArqOPPdrIUEOnvir+iI/3gKMx7RmNd4bXCuoIzntayoaYi2/HXONLQU6jEGz6d8y9fsNmfK3z66advfvSjH13/zx0aOpDDZ/i4mlOUxcvnE/qQXxOIv/AkH3+ystMzH8NDJ25sE1s50J9ZVLglq6YGHRRb2afRRTdxpzd8ttf0AejwNgfwsGY+UPw9r625nvfWoy3eCsx05uvwowF8xDfmxIJvyqEK+IrR1jTT5Kz7GlUN8Tf4UkG6OOPTlU/lTg3Jhl8+iTG/eT75kUNectgjRmJDFhz+F0v5UtOifUa2dfLtKfRGdmazuLHTvpTHrtbjb52O7NL80Sw2X2MUD74nh0/ta7h8i4d8oQu96GhoptCFDnDEixz8DP53pQdb+Se/A/EWD89yXkMh3eHLJbriyT/0l+/mfGGD/sVYbOC4F5/8Xa6wBT693bdn8GYXmfJdrvGjs0Pjxr5vvwH7Ai46vMijEzrxK450dM+G8PBChz8/lNf5nD5s5Nf2It/yEz/iIS7yAx47OzfwMJLBj+msiUcXuGjpJxbNsY8/zIsB+9DKB/ayRXwNQB880Thz6cImuHhkh3OLLnDR4oe2uKM3F/C3OQPwj/PHfOcCcC3GJw9yug/Xc00/OuDJbrnqPUEPuHSiK7/bn8A8fPPw5Vp84eMjl8SX7HOen+0NfmkeH8NZSgbe5uGKK/36NS855UA6mY8PGfizwR7FK9vEVPzhiUf7lJ72AJ/1GeDUCw1Z8sheKM/xY7s1wzNe4iwv6WY++YY1PpcD5KNvzbvVM13sPetkRSPPxYXPyTJPJ/zphTYf4EE+/2UzfDzKG7loXh73hRvy8gcf8Ic9KYfLbb4ji45kmw+Hz93j0ZxYl5NoAN2cDzWBPdMJb74zx3fkRFfO4MkWazVv/fll4N2Ani/line097w5/PkNPz703qM/HFe2F3fA52j5An5nTF90IYNveyd4D+HtneC8Qc/HYiSWfM8ODWk2yNH777//ykFfDuqvqsgN/uIT71w88EPjc5FfE9sT/Oezks96zn38B4PBYDB4rbAG8GAwGNxdWAN4MBgMBncCag4o8FXEVpRUiHRV2FNgVHhViFMM1Hx0r6Co+IdWEVXxUTERjl+eKBQq5imE+jWINTIU+RQO8fark5o8NRUVI8nTgFAYVEhUUFXEJAOdYiW9zqbvP/3TP12/AqkpdRvoquhpaOTAffbZZ69fjSgseta006h1/+KLL17DOvxnnnnm+r+FzeGlQMtOtHRSFFUwds+vdGBThXz3Grzf/e53b5544omLn8Y0+n4tc4Iir6aAZopiK3p8+QNP+PjC42e+gaPIy1f5STE13HzJx2ImxnjLAetAfPEQG3LgKL6idcXP5yL2k3HO44FGjBWPFYXlBzzrNSuC8x6Ug11b92zIue7ZIJbsMA+3NbFQQG5OYVrRWKFY7ime05u/FNbd0w8OXI1Q9sllNuVTdOjZ55lt6OS4OBhiJDfh0Ysf8OB3RWpXOGRrWIm//SSG9p118ajBSBb9yDDoSw/3+KMlF2/5otiOjg34KKLjaz+ax0te4OvZIOtsOCiu4yuWaPGBxz/k8gcb2Y/GwBPApw/aGi54opfLZ6NcfNLf4APr4oc/GrrYS2JhaLjgDYev5J6cpb+YkAUPTjGUr3A9m0ej4WOe7sWd7/I73eDlG/rxt7ildzbi4czKP3LOvUa6NXuMXwBfwiEDnvxgszlnF3/JF/LY6J79ZNJP/PAGdNAcFHf4fAOXjOJ7yvBsD8BDy2/sI5v9/F9j1zMgl22endXsdc6VqwaZ6NJZLvCVPHcGJANYlzd8D4d+9jIfN+cMJkc82UZfeACeQQdzBh3xsDfQn3zFK/rmwm0O0Ms5xf/0Zov48wl9b+PbA3IzvwftDT6gI6Af/ubpUx4DtqCx3pmVPfDJ4GP4zecDuXXOxZ/f8MpWvOkphmIlJtGZRydfTvnp6x0HvJvNk2ONzs44983R076yRrcTVx7zK//ZF+bJt0YH6/jLU75Gaz4aV/qhM8RGrOCwt70oXvYJ3ekjl8VVfoaHlxjkN58/6IHGGt3xos/ZBG4fyH37B60BR46nrzU6ihl/s6c9RT9gTqzYAKd8RktPeHi7RwsXvTjg7d3nLLBn5Boc+tYExpM+YiSG7O5PLMNBwy98xF7NZDj4w8FfsxYPvuE/uOLknr1w+Jov8XLu8KnPUef/L+zc8P7hG/uZTvaUz1P+uw66o/GZQx7KefrIBXKs4WuNLZ7/4z/+45I/GAwGg8HrgTWAB4PB4O7CGsCDwWAwuDNQsVLxTrFNgU+BTrFOwVORTYFOMVNRtGKgwqUipeaCwhxcv8io4K9g6v3ZL6oUGTUfFP40OhQOFVIV+fqzfYp8ipoK+HAUHBUXDU1Zz4qJGr5f/OIXb775zW9eDa5fdSGQ3BMUUDXYFCLpSQc6+RWvhrFfHituau75c4XWNXn59hcB8vleIwk/fjf6BRCf1eATK/F0X9Gd/10N4DNNxXD+B/RTvFfwlRN4dhVLRXBF6Jq65IuhNbLFV7wVguHiW7MErjnPCsK3IbwTwrN2ewDr7GQbaN6VT9gjTwG/wZdfmghySI7Jc3oprNccg8MWOc1263yab+H27B6O4dkI3xoeeCm2uzesw7NH6GmdPyuK04tPxUr8FMP5Ek9fiOBzvPGBjw/56Mjhk7Mpgi98cRFPa3jyTXacNvKNwj968/jap3Slj9yx9+Fr1Im9xq41QwOBnXiRK3c1FOxhvOwduWwePnvoDxdkL3rx0BTIh+jRylO6oRFveVn++7KGHKV/jRF2oyHHc3L4gO58zT+ATmwVA76TX2QZgEwNI743J1+cgWTn++R5dl5GK/f40lx7+sSjH1o+LC/yC5+IJ3k1oOhRk4hv7DN7kK+LFzv4GH9zdIGHxpXtdIHH1uzHg37koiW7PCNb8zh/ioG8KKeas+e8N5oDePE1f+GZbLLEi//kD1p+5bP2Q3HiF/N0MToL5AEbzCWff2qumec/c3D56gTrbKAbfANf8uDXtE0eP1mr0QvYZ9iXdISLL5nwDWvRpA9+GmaeQfh4kZGuyTYvP+E14s+P8sdc+PzsXZxeyYUvDnyOn/mTpnhEY5CNLn7ywf6TU63RGQ+85at1uKfe4Vp3ppDHB+jEWtx9LsGj/BN/uPiXP0CutG+ixxfIVXIM+S3XxBKt/cA+ssTEc7LFn718T3f0/GGdDeyG4968PWMf4EMGPWoC08telgv2HR/D0xDtsxQfA7Tk8Scf85MzmSx5WGOVD9y3J/Cno/ed/5KDf+jPfvjyAg/84GjS0lss7HP+8JmmX+Z6hzpLzXeuunfud85ryLKNHs5rfvNrXrTssB/xN98v2a2hYTd5AL3PNvTyK19ADh865/gaf43fPi8OBoPBYPB6YQ3gwWAwuLuwBvBgMBgM7gwoWip4K6AqSCrqKUYqyClOKgwqHCp6Ko4r0GkooFOMBAqM6BX0FDg1qhT+8FJwVMjUMFXgVNhUmFWoNK84qIio0KdYSbaGqSKlqyKgf5h961vfuhq+fjXbn6q9q+AzidiIhQaWZpbCLL8qtCpyK/7yqXlzisNiKiYVr82LAzpxc29ezPEQ74rz5j3jixdaxfKu6MVaHniWUwH9rBf31sTYfKA43BWvwP05mkNfY0SzXc6hNTSc2E5f+at5IGc0IDUVzNHDUGSXh4rodEfLRmvdN+h7+/leOOaaP9fd45ucnruKjz3iao7ebOA39OIu/uLD12JiHl40J/94ijmbzcFHz2Y+4Se5Ys6aeCnel1OK/TX70gc+f4mBYj6dxUQ+4Kmx4F4u0SE9nDHwPdvvcMhln71fQ9Qzec4YPBT/6VJzhW7koK/BQWb6mw/HueNKV3PWyEFjT6Ax2AUHH00V+rHL1Rz9+FEMTv/gX4OZXeIPV9NLHHxJpL2kmVEc4ImhfETrmU5y1BmrGe/cldfkG/Qjm97wyCXTOWkvWCOD3mwSEzLg0SUZ9rPmOnvMw+UXupQ7oIZbeQP4Q2Mrv+FnjnzPcJvrHWKeH+gHj2/yAYBf8zaedIcnb9JdznbehZ+sdDBneIbfl5Dwgyu/zIubuXiY59cTzPGfXJd7gTgY/IwGPcBX7tJRznUm0cW8GLInmnTEi6/o3xx88/IFvnmQbPhsgCt326t09m7F31qy5SA6+898NqPNp2w8dUKDNhoDDTvYh4fGYHLQWNOkE0f0cgwNPgbd2ofiy7d0QCcWcs6eZht8ctiWrOzHw1ko/9GS44qvnO2scpXvzpL2aXmIL93gm+MHOHLTHsab/+lRY1juO7PIL4fxxgM+nfgRLjpyzNGPT+Qc3vzFRnGUJ67W8qdn+QXHux5/QEf5QF/28p8vwbnvrw44G8zTgZ58Libu5Yg1+8K+gsvnzgP6+/ym4cxuzVq+JlsTuLNW4/aBBx649OBbfMnll87sPqfgJQ4avI899thlK3n2lfiTjwed7I3vf//7l23uyfmXf/mX6wuA+8XvYDAYDH7ZsAbwYDAY3F1YA3gwGAwGdwoU4hQPFR0VABUFFSgVHxU4FegUIRUAFfYUABVRXc0r+ikwotOIwKdirDUFXEVWBVSFQIW8GgKKgoqMfj3sV7Rf+tKXrkavP5NsTgGwwuXg3sC/muiaZIamsCJ1xWtxVKQWC4VWxVdrYusqToqtrsBnHkXmGm/oG+bLD7QKzHjjJe4KuuasBXRRHHZVEK4JJq/gia38wAMojCuEA9cGOc25l0NoK+LLRWvm8Y3OvWYaHI01dshTPBS3PcMh30BvWG/IVX6p2K3IrbkBP30M+cq/8MSFn/K7grvmwsnblxz4qyYZPczj656vXa25V3CvaUcuvuKGx8lfQ1ws6AOHrvFnr3i7t48N8ronw35OLv/WAGEbvqdu6YpnRXxzBtvFEy3/1bSz561pdqAhxzw98WrUPAqHnWdDOhr36SPXNGDkWPpH4xmec0i+0sG6xic6cvBytvFDjdPmyPfLOfzZIOecn/jiaWheoJNbdCcXDv7o5YUckhvwOnvFCa0mDloxINc8/dlDF8CXdEaHF/lk8Fcyyj+51R5jqxjCEwv8z8asa/rhw7f0o08xpJe96z7/4UkHdHDxQa9hREc8rLtHz39iZ05+GObav678bZ/R07OB3nCW9D5JvtjFA2+NrGyFH8Sj8yJZdHC+8UPAZ+zhe/kAB6C3J+DzARo8DPMGGvpEg495+SAX4IJkuNIpf3iODxuaJxs+2+WDOMgPsqJxzpLBN5p39nzyT14GPtbgWONXeWNvphvf5rdyoxwnpzX8rfM3v/BZOskJZwed8PceiD8aeW7/nk1euOLPZ3jQw5xcd5Y7a9DSGcDlL7TlJj+LD97yiQ5sRWNYh89Pcg8O2+DgLZfkEZvwYLt7ecMmNpBrT9QEhs92uHiTz2a+wg8tm+wF63jY94D8chCNNbxuN4HFih/oQGY4dMHT5yy/xuU3Z5VzS7x9nqIHHPf8zU6yxJAMnyHM8y8fyhE6+TxALh96pzof2MJOeUgeGXSSk2TTSdycV9534ta5hse73vWui8bnS+8wtvmvMnw2lMeDwWAwGPyqYA3gwWAwuLuwBvBgMBgM7hwo8imYK8hpACgsKjwq7ineK1oq6tUwULj07P2oCK2QqugJT3NNMRSeBpRGrj+LrKinwfuNb3zjavL6k8n+0eXPJSssamIq2g5eP/ClX0oraCveKuwqsoqfAnmNCkVYOPxeEdnVr3bMKRrDU1RW5FUktq5IrMD80EMPXXgaF+KvqA1H3sgJcuGTZ+AT+HylMEymom/6wKvRFeBnyK/APVwgV+kAktWaIrImGD8ofCs0K+CTTTc+4QMDz4bcramhGQBfQZyNaOwTaxXI7ZMaRfYHGs/w8Uar6M5ffIMGHuC7GhKK4DWEG+TRR2OCbvTG3+ArOO7jT1e6KdbDt5/5lo72Ld3h2r/8gy+eCv/uycMXHza4x6dGjBEtXe11fuBXusCHezaDPYsTucUXD7rQmQ/owC8aEvSUL+bgO5/6TI7GkGv41sg6aZxDbNYYM2+d3u41MPCEJ6/kHzvMyQ95Ys46GvrJf00N+vMjXPLtBc/h0bEGFd3YDujyG7/xG1fThe6aJ/DTC385XKPJsIfpX6OnRpk59HwND7BDw1X+iRu51tkqDvlcTqNlh3gUI/qKtTxCZ7Cv3IDDz+RrVpGDVl5EHy+05m/rSJ53Q7knZ/BjHx/yJTq2dGax9dz77vFlD96e4fEf+fxpHwE87DG5ZZ3f+ZF/6CFX0Id7+xzBl658opEltp0tYmbwGR1P/GjkUeeQOfjyStzRwI+GbPj0QMMfctKafUYvawYb8WC/X26mU2vsxFdMgPv2HrtPXviQ4wpPvNmZXoazXgzFSeNW7M2zh3/xFsuaq57p0R7Bk21sksPoPNNTHlgXE+ualWLdGU0Omt4ndHWumYePB32tm8ebLeItvvSiDzl0p1d07JB/fIeOv8y5xw8tP7QH25t4lSfk4G+tLyzYA+zGyzpd8RBH/OgBxELOwqETWiBHyim5iz95/CFf6edK33DQ8z1fOaf4An/5iZ5PyHKFw4/OJ7GtCQz4mD+dn5q27OUH63TCk2xx74tUeNGJ/c44Ojm72O2s4Dt85YPPe3j4k9DWfTa0t/j0ySefvNYGg8FgMHgjYA3gwWAwuLuwBvBgMBgM7iwo9GkCKRIq1CmOKsgp0inuaY6Ye/HFF68mo8aPP9n3ta997eYf/uEfrqvnp59++rr6k81+9aGxrKhZkXHwxoCCs+Z6hWmF25ozirEKwkBxVkFeQVaBvII5fDFTBFakVqiXDwr95zxe8D0rhCsOKxbX1FMQDtyfowK7YrnPXArn+JEd1Py5DScPjTU2NhfUGFAgZ6NnV7bTjT8M9Gy3rpiNhh6tNxT3Fc4Nz3jEx9rJs7l87b7n1szzl+I9m9HzLX3tNfcGfPHkH80LRfUabHQ5eeKRLPf4awSQVZMPvvUaOPY8+eIrlmSIn+J+MvJZg4/Q4ikGdOM/uuMhHmjhJq+BBv3ZrKwp4RkNPnKSLuY0PPKVnEBz/tliNOUvvtbRNkc3+clWfF01BPGwTh4+7JCLGinpwzfOsBqd5JvTdD11ymdihif+hqaPvGI3fubw90x/Octn+KBDH15/rpku/EwPtrGLHXDI9Wxv8oPmDH7w5RHb+IJt+NMxv7HDGaEBiy872kfF0IAH+NBztGSQ7ywgw3vEfpZL5OBnwHPlC/HJHvrgWb6Xe3B8IYJeyXFWdZ7hlU4AD88G3V3JLz70Rc9//OncCd88Pe7V6CVPnNhkjh4Gf5t3npgHeKBhu/y6zQuN/NHQax6NQUc5al+TZU58DTKS67lc4h8y8k3nBd/iYT69rKMjR86cjVlDPtyWwwY6ibcvBuFhWEOjYcsGuZwO0fCrWJvLL8UXjVwVB+t4yRsxQUt3+AZdxRaOnHA1JwfoDAete3kjX/hY/kdHrjk5ag4uPdjras/gTXd7pPwih370cua6wsW7JjAe7uGLe7R0YZ98w8efTTbvHKcDn4ZDJj3N8w0asgC95bDc5Hc+DQetOXzlN100dflPrnmvabbi7fMb2nKEn/gBjvjBoRv75JTPd2IE2EtX/0ew+PCjxrErPfAlz2dD/48vveCRyW73Plvijc693PU5UvGdzoPBYDAYvJGwBvBgMBjcXVgDeDAYDAZ3HhT1FOM1ETRw+zWvP8vnF73m/JlZv8pTAIY/eOuCYq+Cq2JvxX0FY4XthuKtIr3CreKyOUVbxV2FfPmgmaJQrBCsYF0hHF9Xz3ABegVzRW7y8CcT73To3tVAiwYfdIF5A3384Xc1FMEVzE/e8Nmg+K6QrTiv6K0BQmcFcwVvDYQafebJNtB333NzrvDP9ebP9fP5vA//fCZbsT/duhp8IjaaFJ4BuxTtxYId4hPPfGbvaijF5xxsTgb+7jUyyPF8yuBHjbBkWEdHd1eFfo1K+uFjXlOmpg96OeBZLokzXvjAPWXBd289XQH56PDFz2d0Vzzxdg5FQwe+1AjRFKkBiK+mbbbzEZ4au+Y82wcaLvhpCNUU0jCreVLjg75k2l8aSXSSa8Ux2zSFyM+vwD6iD/6u8jefZLM81hTyTAbfyWE0ZBv2HhvZJff5Aj7Z9itb6APYRhf09gx+NZLIRyNnzPMdvfibzeTQxT1aV2uGX/qRkw/5lJ74hIceDjlw8o17a/QvH5wf5vicjXzL53zGVn6GJxf5CL49wDb8DPNsM5cceHwgT9kHB5hjk2G+M8Sz/OI/cgE+8M07U4o7fMO8oUHL1uZPXsk28OIj71Jxw4ueRjT85lls5bwvLZhHm75GsuU7ffE3jzY/gpqI1vDAl6+sOwOiac/G75Qjh+jNHjTw40UWGnrnVzR0Fw901uDLDbEVa7SGvWPPObvEyhx5QA7YY3ITTnsGD7zlniu9zLV/0MlJc+JCBp50Zze73OOFb/uBHHqKC96uaOVne8Q6WrlxNoHlZH8amf3hmMfHffuCf82Ty17N0QcffPDirVlKpn2qERyO5i3+Ykk3/uJbZ4W/4MFuZ7OmK/qz2evznXk6+JIeHZzhfMEvnvHBg4/FkH79ytcaH8tznwnZh97/2ct/zlRyfFb8rd/6rWuv0vHxxx+/viToy2liMhgMBoPBmwFrAA8Gg8HdhTWAB4PBYDAYvC1B8baibcV7RXvFXY0boHCrSNsvlPx6SGFXg0eDQDEYjgK8z0cV4hWeFXNdFa7RKrYrDivgk422poPrOYJk1LBQRPcM3J+4AC8FbMXps3ED11Dk1ljQPPSZTlOiBps/U0k/hXH6mjuv58CLnO67+mIEH7hqZPCNJoeh0cAG9zWf0fR/9JKvQVaz4Byn3J670o8dhnvz7vlATBXwxYxs+OIpRtbIo4+mAv3EhS748Ety+UTc+Afv5uQNOWx1X1OHDuJvnhzXGnro5Jx7A64YiQkd8AlfHtLFunill0EfTRI64cEWDQy86QennMYTb/long4AHjrr/CSHNdNqMPETf4m1nCcTjWf7BJ94ATrzM52A/KU7e8zjJyfYxW9k0su8ZokmC37yllz2soE+6GoO8XN2otXAI5NtaPkef7E0R1/z4ktv+0Cc+BRfzRl4+MGjE9/ynXjnX82i9og5sqNtDsjhYgDM04Xd5jzTT8z5wF4IyDfwoHc8wTnHr/QTE00wdp3zNengZ795TTc4wBxf1ritQds8HWt0nvh8369tz3mDv/ABeDXPb51JgC7iygdijMYaGvNo2Ebf5uljng/FSK6iy970dd+cvKOT2Mcr2Ya1Ym0NzblGZ2t0lDPiTQfNR7gN+DUGrbmak/vOXDR+2Ylne5F9cPCWL3KeTWTCFxP09JJ75ODbfhZXOWjO3mMHfujM09u7C1+5QzZauWwdP3rgJZb8Sp49wd/o+cw+tPfgyDH0aPneHncNh+6t5yPNTc1bOtib5smkK3x2iE/5JB/tFz6IFg2cckje8DceRuelBq9mr3v7Am8+8K73/uZnXy6oCSwm6eEd5Be74sIGvmCbe7Y5A+nNZmcO/egkNni5518NXjHVYLa3+UYjWP6Y/9u//dubZ5999pInDoPBYDAYvNmwBvBgMBjcXVgDeDAYDAaDwdsaNDIUeBVqFZbdK8oq9NZYUgx3r5mrKOwZjmK4ArImg8K24q/iMByFZIV0RWqfndBYd4XrSlbFf6CgfDZ86KQwbShmK9DXQAKK2wF+XRXdNQPcG/AVo9Eq6NNDI01DQHGafEVwhe+eG2jJcb19b5z/PzBf4aNYj7dnVw0jQ1EeT8VyeGyPxhXwmQaGNTGgq3t+9TmUfIVzvnGfjg28DbayxRWOe/L5hZ3pZ4Trnh1kipHc0DypKSKWeOHDHnKy55SHNzn8YI5eYs1OTQC2xMs6fI0G+OjlYV86aKDT6OEL+aSZgi96fjDKJ3L4TjMp3YAGhWaUv1bARk2QfAEHP3rlQyCX4Gi4yHm+4BO47tNHjvIXejoBetOBX/ksPU/bPctR8TSnUUMO+9LfPDwAj67m0OWr/Fz+ayDxg3jiZ9jDfMge/NhhH5CFH/p01BjD03yQjuSnT3HKZvrYq+jjZ3/wPTnR0dEwJ9/FQ8zoJA/kA3oDT7h4kAfwYYu4ZIM5+1Guyj88wqWDUeO2s4F/+IlPyEYrj8RUjtMlXHrAlb9iWjP55HN7no4GXvyS7M6MaORPOsK31j6CT7Y5fnLmyif2xSs5fUGADfIHj9bwwgc+n8sRfuUTvJKP3plOhrjyiwYju+DjhYZv7Et50dlhP8gPfrTXyGRjzVz87bv4sMc8Oc3jY06s6eAZfzaJtya0fSFGZJNJN9f2jpx0z16645EedBMH6+TJVXPiyid06R1lHW82kYcO0IX+/EYnVzjo2Iwf39DDlztOHGviJN/pBJ9+mr3veMc7rmdNYDnDX2IhP+D6da4Gq3y1H/DnC7/U92tcfhIb6/DhuGc33TRx0eAjP9jKFsOfdDZPb7FlL+BvfnQ2+YUxnbyDNH7xJc+fBM/n8hwv4MsmGs54fvvb375sHAwGg8HgrQZrAA8Gg8HdhTWAB4PBYDAYvO1B4VxhVuFcAVnTQPFZ0VaRWuFaQV7BWwFZQVdRWfFYcVtB2K+LFIkViDVtKnQrChsK+X2OUnBX5FZYV+THi8wK2jWJugI0aGvMWTvXFesNQA9NEvaY86soshXSa1oowLNVUZu+7HVN35pPRrL8WldDgC/w4A/8atK58on789qIf/itN89HfOHa4Mfo6W+Y0yDgDw0UV348dT7HKeu8j7crWXzgvlHznb7sT56iP/liEj9y0jM/xJtNfJ2Prcs5/tN40GCKl3X5RBY5mhhwavrgJ6ZypWarpoW8LKbpIkb85SoX4cgNMSNLU8Z88ZTnZJHLTmtwNbKs4Z+O5thDJzw1VzRUNHLQ2zf4y5dswVvOu7KfbmSwwz1bxYF9ZKGjF9xijA+/0RktPA0YjSt+MC/X6Vf+iB2etxu4NdbwRkcOvWvKWueHmloaO+Kgga7RI24ADXpzbOYzupIB+MCgJ5pw8Sz2fArEXm6RW1OXvGwVh/Y5XL7nP7rTWdzNO0/o2xlgLj3EwFzz8UDDZ2TycWdS+PDM5zd7Iz74mkfDLnMAjXk2oRGbaMxHw2dw6cMO8cPTfDrGy0i+ObzoJYfozlfO5filM9vEiq10xN+aUYNRbvbFA7kH2jenHDqag5tvmxdLzUG41sQyufRgU3tSPrEfrfcKWrnDPvTiKl/wMS93NWr5x7lS3Olmnd5k8bN7NjUnN9HhIeflknk4dOM7vDuL6Icv2XBPOoCWPZ7R1Si2X+RqDWb86SMH4OINh/7yHx9NXTGRb3D5wRrfdE75ZS8a+wh/vOHjyX/8nE+cBXKA7X3xgH+85zXExU5z1n6Bx/b+n1/60M8XRtyTUePYFyzo6RlfNtKFbb5kRQZd6U4H60899dS1NhgMBoPBWxXWAB4MBoO7C2sADwaDwWAwuDOgSKsQXBNEMVuxvsaCoSDsl1oVsSuKKyQr9rp6tq747Orzk6KwZ4VyeBoNitzWgUK7QrdnBWwy8aoxRLaiMh7pchvwaGgeKEYD8mqkKNIryLuyUyNCs4Essg0yjeQomuOp8E1+I3zjpEeTbY14Ns65E++cx6fnUx5Z7DE0IenGRj5jj6ExoTHCZ/yPTsFfbODW2NP4yM/J8iyuyeInPiJPQ4Je5snRhNCgwY9/ycRX402zwLPYk4MfXnR2ZRN5mhHwrWtCiH+/PqcHnBqU5LBBE6PmCl3kFTnh9etHcqzjrbGTbYZmCZtcyZDbNb/g0QtfMuhnzi/f5E6NG7JcNWGyAW+AJzuKk2e618yGa+6Mgzlgf6ClWz5gnwYWPEPcyfbrO77gLzrbR+VEzTe0+HgWE3qQi69mzulruHT0TB+8zNGn5qN5eQDvbITXqCsH2UEWHc6Y5lv21fAC5snrV5t0BfYzWnsQkA3XEI/2nCtcPsC7Zh08PjJu86AzfHzIBvDEGI8avQA+veQWPs27kssmPsYr3vCjOZu25W/+Ey95gK81NDWg8Xk1XuZrlsLnJz61lr750dlNdvKt0ReQHU/86MYetPwI34BPlr0lt+GJs+YhvfD01yLkKL+KpSah8wkv/MlCS3f05uls/7IfD/PyRj7VBI6OHp13/AMPXzz4M7uiw0POalTKWbrJaXahhY+3nGBzX5agt3Xy4JqTz8XeGl/RAW/y8XDvV7Dh4G+teMJxb+/Sm618DzRW4aDV7LU/0fM5fVw1V/vFLz74sdu7yj4Dvjjxzne+87Kdf8nA1xkrTs4S/pF3dGMjn5DHZ+j7lS97NKnFnE7ODrrTy5eo+As/utPnK1/5yqXLGr+DwWAw+HWANYAHg8Hg7sIawIPBYDAYDO4UaHoo6CrmK3YrBCvyKi7XKNAUUdxV8FXcVmBGpyiugGwdrWKzQrFiu89PrubgKRjDNafArPisAK/AbV3jwBo+gcJ8zSM6WVP4VijvCtwryteQ0yRQ/PZMX1cNl5p5itrwDE2k+PoVMb0U2dPlfxs1oLp/tQGna/fGyeOcbzRXoy65YqOgb55dNT0NzzVzaqDxMRw8NG5qpGgGuBfnmrj485P4iQ+eaMk0rCfTvXXzZJJFJ4CeDLzFsUZm+sOXa/iw82yw0ksjAh4aessPPPA3NIU0UFzxtH77F8pkk4Mn3poa/clX6/JKk4d8c/KJTuTIN/R4oe3PR5MNj03W8IYnt+ni3w7FSkNFvlljU3j8VIPKwK9mlmcNGzI9ixc6g0/ZLE75sr1QA5x+nkHxC09usyE5xRlPc+jCNcjJN/Tjm35ly8bkyB1+be60x1y+Fgs5ySfwzCdLHFzjCVe82M1XAA82iDXeAb3izRZ8nGlsk/t0xiO98DDCvc1DfNIFvlwQb7GmZ2AeDb+jwSM+5/7DqzX7jY/9lQL2syOd0OGJxjN5/Cpm1pxPfBcv63IVbnLyM/n8xH90PuWjs4fZD/+ksZ496WSe//AC7uU6efR1nstT9vh1aXRyXq7gKQ7m+oKApiy+dJGz9oV3BFutk2HdM3r7wTpcfOlOrnV4zho46Mhgq3u+1qDs/JGr/EEeun6NLr7u8U4H+vKDdfh8z2YNVLRs0pjV+BVHvOHzZzbiYT9rpobDd9bj4QpHg9eahqvckz/yhE/p7gs9mrX0YyPfs8U8Oo13+Pa9s5MtfGTNXuIH+cRPZPgrHnzrL108+uijl7/giAndnQ/FkP2vvPLKJdOZ/cILL9z88z//881LL710/ZJ7MBgMBoNfJ1gDeDAYDO4urAE8GAwGg8HgToIGr889CseaA6BnRWzFcQ2AitruFbsVixWhFbYVqRWKXdG6aiZoRCjcK6DjpyAdXs0RxWeFdXP4K6Ir+MMlR8GaXorxNYlc0QM86KCY7rMbWWjQ0kHzhIwalwZeiudkKYTTrbXWX22Q3T24Fz4cQ4Gd/1wV7tnO1n5pVUMtmpMHvv1J3NsNzu7Pwc98aPBX9+Y1FAw+qInbmiGW+YFf+bMmAj01FpLrGu0pxz0Z4sbvcM3XlMePLf0qz4AHXzPEM7htKxy60UV8rUdvXQzxJYcd/Hs2jK3LETnFx5obms7mNUTwRVOzJP5w+UKOp09x5Q/2mgP04jt24kcvPG//EpZcvtdMsY/Q0V2uorWGVrOG3fkRrVxhnz3CVjGqyUln+UwmXjVa0bJBY44c/MglB08NJXzoUSPNXDzzb4298hWY51f7nSzzng3yPac7e/kWD3rIBfuBbzSe7E88ABy+oAfeNaPSDW8APx2sZVu+dvbwj/h1VrDDEGsAL/+gpwuZ5gy60D25zZNpXszJ8JwuchUvZxAac3JCTlprvbV4sZm/+EezDyTfla/4TG6Qac5gCx5sIEduwrWmWXrK4B9r6OmLxtqpl/Waf+7RyDt7gqzsxP88N+wHcaiZS551cZL3vqhAn/YIW/CCb92VvNb52Ty/4ds9ee0xc+yVz/wmdnSnW++hmsDyQq7zu3wnp3NNE5h8OtEdL76hbzEpXmSS/+CDD145Zr/BwZt/yKYDu32ZRd7D8SwnyON/v5xmv3h6B4u5efsQD/rAcW8vahSzg6/xpz+d2Mc2ea4hy24ynAWGPyfNl/3KF39687WY+OsNzmTxY7/Gtv/Xn/1o2EXW17/+9asRTN5gMBgMBr+usAbwYDAY3F1YA3gwGAwGg8GdBYVmBV/FZUV7xWCFZ4VlxW5rNZ0Uo80pUCsw95lJw0AhHo2it+K7wrniOHyF+4rwntECcwrtmg8K2PDNKWIDxXnPRqCYrqgN4APrdFSEx18xXZHbvSt9DHwV08n0jN4w38DfnOs5b5zzCuQaHRoUivLu2aVpomjOn+ymC/vSKbloNBXQo0FvDg16cdAk4Ed2wONv8tMBn2y7PVq7rX/zrvnWsyv9+EYjJHw20Yd8DQBNi5O/K19Yb00u4MVu93DYI7/yk9hqwlgzxJStGhHw4pd+gHw5CN+vPflL84Qcc/yCnj/5TL6RQX888YKjGUI/OpGbLjW86MEHmjbmyaLP2WBNJ/Tyn3xz1szZI9lWvGqAuy+HNUDNwQMaNGibQ0uf01fywr5jR0247Cin2M8XNafSMTli4BfO5SSeNePsJXzxoQ9faDLRRdOIbDzxTparHBejs1lMH3R+qShOfF4zDb6cDujADnF3BtENH2eQe7zPhjF8vOgG0KIz6CxW5uDjYTifagwb5ugCX3xPPnKVXLaaA/DN4yNf0PGlhhpcdHzHz/yAJz3RdC401zw+ctOvPtljnhy5yLeexeqkwQsN2WJGR7TZT0Yx5Ht6mu8LMuHZQ3KLHPRylOx44ZGtzgZz7sm1F9HTSS6iMdDg5ws4dEwX+Vgeus+udMGXvuLlyk5+jZ6s6ORB+Sqn7BHxphf95RU57JVP8ggOG2qe0p+schtvfoKHBzx08pwOQIzphQ+/uAJfnIDr2f6gL1wNXn9GGS88nYdiRze+ogMcMYLPb/iziz/gslVzVpzsVXryhUY0HLLo7d1GB7To/FL3ve997+VzfjDniynsSZeXX375WjOPz3PPPXf53hnwxBNP7Je+g8FgMHjbwBrAg8FgcHdhDeDBYDAYDAZ3GhS8FaM1aHwGUoBWkAcKxQrDCuQK1IrPmhgaBYrPCtHmFZEVpxWrFeEVnTV4DI0pBW+Fdo0KBWpFaPj4AoVwshXzgUK2ObppIgC64ONqXvPP/z+okK1grQje1aAr3cjBr2I+fHN4NW/Q5byeI1x/bpN8/OmKl6s1esa3673u+c/wjBbdvejFgxy4gH80CPidbeKiqM8mfkfXn8HlH7ERCwX9YqVpwa/4n3oZp3z35Ne0ox89yDNqEsoV8xpkmoU1DzRn/GoNL/Rw8PQM4Kezeetyhbxw5FS6km+NPPLhJJON/MInGinkxcN6zVOApoYPvoGcTD84/Kc5RRa55vhO7MnjT7aKj+YPGeg1p+wlMvCqeawBJC5ioLF3NkDhGdGSI6b8iBavbDToZh/xH79kG+DzYgbsY3vglAPYYw59fnCfHc3Rhw548D3e8Oij6ecZLt3pbN9no3iYoyv72+uAXnynYZYNdMLHeUG3gM9r0prHB655Aw90wNUcfTXt4IHb+M17Zhf823yyVx6Sa86Q9wa90WvcpntNW/aIW/PsRVOe82s6kSNP0dUotC73omFL8muW9n/w0i05aKwBeyeb0JCDnzNLLmqMik00coZu5Des2RvsYq/Y2GPozBn4ec6W9JYLbAJylR3yxjwauM2xX7NX7tGtBqiY2wdy3ly5Rxa7O8/QwWmv4G0dvvjZF/IGDl/xi2FObvE7QFteshU//tMwvu+++y4c+4F9csb7wJ9xho8vP/A1Wvd8RUc8xZQu+KGFI7/oiQ8d+YXP+JCf+IAs8fdLf/G278jyjL/9Rif37HMe4c+f/gKFc4x/+FFD94EHHrhofHEHnga1eQ3773znO9eZRZ/BYDAYDN5OsAbwYDAY3F1QBVkDeDAYDAaDwZ0GBWSgyK4orWCsqK2Abk5huoK8Irkido1HxWS4Ct0aAgrqitg1IhSyFZgVpBWZNUfQuip0G+4Vr8klU4FbcV5joD+R6V6TAp7/g1ETQEGdborbcDRj3Ctss0mDAQ2eZPiMV9PKXIOs8/4c/hSt4n3NE3yMEzfa5u/1fHvtNm1z3YfvSi5bXPkou9wbfMgf4mNoPrDT4At0eKERI00KfuQ/jSAyNAvc08Gza3465eORHvDgWJMDYuAZ8LVYyA8NFM3FbJM/csHcT37ykytWGjnW0Is//jUTrdcAMshlm8YI+zzjKdda9yxuNUrZw0caJHhpgMhlOvODuXQnP7r0AXKYXD7Av0Z3ecxmuuJZg4tccxo0nvGjlz3gmU5yCw59NGTYRcZJ65k+aO0F+4vP2Eo/+vAzO8xlb3uNDL4Ud/lRTKzxhSZYfiAHZHexMczRQXPMM3pAHjx6sjH5dCWXP80B+WfeOp9pFhuaYnxX8xSQBZ/fa9ThYx49W8J1rQGXTHMGXKPGsMFn8OUQf9xu2tKxhiB5ycSbjeJHJ/jRyAO60gs+fs07I8XtbPTWmKUDGnzMwxMftPFKPnx0aJIfL3T2jjxvDQ8624985xef8FvrbEPDP3jIRbntHp1YoyMfjTnxTg/2iaH8wcs6v8lfPvBvbL5Ei6e9jZc9y5d80xcYkkk/MWSPPGl/0w2tNXRkyju5zmbr9KoJzCY82G7P0sc6/cyhE3v39GULmzRCNX7hwcGH39zTj534mKerLyThYb/iww454le+3k3uzcEVW7oYgM75lG+8Q9nn2rlg3ZAP7Jez3o/4+XU+/6CnH7+ghyOu5sn2q197rJz/u7/7u5sXX3zx+pXxYDAYDAZvV1gDeDAYDO4urAE8GAwGg8Fg8H9BwdqvihTQFbkVoStYK9orGJur6G8o+GsAwaup0C/i4PlMZb0GhmK3ZwV7cwrRmiwV+OFU3DeHB16gBoIit/UaAHQim574462hoNGFPz6aAIrzCvnW6GDOULBv3H7OHzW0Wo/29oju1da6nveu6XPqZdxL1+a68j0b3Tfg3n7WIIHr2pyYiqOmgmcNQH7jU/Fwz69iwu/oyIWbDM+nfM/4GhokdK6RE50h/hotcIBmhMYF2Ub/PyV+csC6fEiuES8yyi+6aprQnc54sdG8Ob+g80w/fDVKxFgu12yRZ3xBHjn0l2/N+XOr8DWNNHLOpic4bSUj++HgB89Vwz1aAK8GbLTsQpu9nulCD7HR2LN3a56yGb9Tx+TggafYalLxAx742ofo8bFeDpjjn3SiIx3Eg654pjtfks++YtLZQQ/QPBDjcjK74NaYM/ClJzzzoHm8ix+7DDzkVY3bIF3YU0zM1Xy11tln3TO5fdHAPBr8a5jSqebfKZtO/MrmU441NHIf7/KcDPKs1USFY86Qd2xBY69qlKYz+eRmnxy2BheNOXLwJMc83mgMz3JIDOhMX7kVX3ob1g05Icb8i5Y88bC35Fn7jI50JVveZHP56jxnK/3QtU4H82JBFn3Kac/oyCMHrb3B33R2DjlD0NbItV7z1jpwTuCLHi5+0YH/w96/xGyX3Xed99NvD/pVDxh1CIRAoqTssutoWz7lhJQQgoTECwpKFEhCGgksQAKUCVIPIiEx6AkzxMQTegBC6RZCeaVExCRO+ZA4MYWr7Dq4bFcFGwa09A5aYtDqwTvofHbqW71y5yniJLbree7795OW9t5r/U/rv9a+7mvt37X27Vyc5at5RK+5wL+cmgfI0+/4ju+45pa2ctM9JUdibzeua+0+O8Qoz+BekrP6rl9+7KSeD/74FYecyZUf0pAx5u5JZPO73vWuaz76DNA3+fMjD/e28fi5n/u5e88999z1920YhmEY7gJGAA/DMNxdjAAehmEYhmE44NWiHmp76O1hsYfkHlp7KO9BtQfUHmx7KO1BNpJDHfgO5cEzAgA8lPbgni0P1T3w1oZQ8PCaLB32PcD34LsH9pFlZCJYPFxXXFcnLn7sMmZD7B6CO0dYIB9ceyjPXkSW4uF4xwo4yoOH/XSSU1zfrHuz87NU71i5XzzJnXX5y3dF3hyTO/XOUi5v1tF37ihXxglhIYfyJa9IKbYRKsYZYYGAME6nvfv5zycYp3YhGl9kRjL8yzWffIN5VZ+BTMSJOWMOnjboiR1pQ5YuPwi2Mw7f9dMxfyAikp46euZP5Bd/Sv8rky9y/CHnkDHyqc49EqmkrjmLnDH/kTJySQaZ4x5zLyBnkGDqiy9dRJCY5I6OcdJ38dYPhX3yYtTmXIxySa48iI1+demLy47C7lt9ERd5cRo/968641X+oVgj6BTXxsuYRKJB9y8bgX+fNcaVH3aqN+f4lCfzrnrxle/6wCc7fGa/erJyqX/mox3dJ9FKvs8VsmInK4f0taknaw6Itc/A+kQnPX0mXx4QcfoGztlNhx3niHnHYtZ3OvIs3+I9/biWF/NTf/ilo84YyGPxi9vccJ+xabzNGbkib36ZN+XEtTyRcc4vH9r0iy02zCFy6rXLo9j4UPocNobuK/eaeMsH//pvjkf2Ns/J8WXc+TH/mht886mtH4iI3+dT9xf75NkWF9sgJ2yLkx/5ptc5Xfb4k0+6fJ2Ea/ek/pVX9vwtQgjrpz7Lm/7zp5/GsftebsuT3PHLLn3j5w0U6o2fc7pyJA6y+mZOkxWjWL1Vgb68kRODufrJT37yuh6GYRiGu4YRwMMwDHcXI4CHYRiGYRhuwMNiD7sRG5FdN889cEZWeIjtobQH2Ig013Y6edDtIbyH2R7cO3pI7kE4goGOh+vIMw/vHe3S9YDeue9jZNkTi/NIGDEoYkA8eHAvNnbZEF/kGD2+PaRn27VC9+YRHD3Av0kYJ9d55Wb7WaeE+7Vlo+IBP9LBbi3Egrj1Ty4jT+iJ62ZfFK9xlje5ra3283gWthT5Oq/PIndIGjnOr7iMpzhPQtirSMUgVv7II0IiWNUZQ68lNc50yPDTuXFuhyl5enwXYzaMs+v8IlOKWTs7yBLtkN/sApnq+FZvbom/uECddQJb6ui5T8QQsYOQMU705Mbc1YaosjMeOaMfbKizG9k1n+w1j5FIN3URVsYhv+43fRNLfaGvH+rEgABCDpGtXj/Un/mFm/kqZ47ua3Glr9DVZ/3Ub77dMxG99NjUZ77Isg3q3dNii9AVh7j0PcI4X2TZQboZg2IWr7ZIUbKObMiRHJIHbeWVDDK/en4VOXbNLn92sbLfDlxz3ZyN6CNjXhcrPfVyop697mOfic0JedQmjuLVdsasnj1jJ3fmPj/q8qMNfI4i/Gpjg445gBRUx5546dhNqk5M+qcUW2St8fT5HqlIzxzQr/rUDyf4UkeXPFnzts8rnxXya6z10dwxlvzRE5v7SF70RbvY6RlbdWKMBHZfNA/4kQvXxqnPAPGJRbv4teuTHGbDGLBN398qPvz9QPBqo9vnnr8JXgdt7PVBXHLIjrjMZfJ+QCEPXh9tjtGVW/2XC8W1OMjTe+KJJ66Y/DhEzuTT54g8iE1Mjz/++JVrfRSz/+VrzvEjFueI38ZR3GJ27434HYZhGO46RgAPwzDcXYwAHoZhGIZhuA98H/Kg2YN6D6c9HPdg37kHzB5OO6pz9MDdQ2gPxz3Q92C/B+0ewHtYz5YH3Oxq64E9IkwdQgGZFNlB3pFtPuyaYhs5AGwgTdngw8N5Mh6UewivXYlYi4D0EN7x5nnXYmajB/bJ3Dxnu+uzHapDtojJw365khfEhnjrk3MP7vl0jnwQr6MH+fWDLCJBjpAEyAF1bNKXL2OAVNAH9fJKRju9iFvn5CJpxFp/lPP8Zp08nvlVxFMfwHfqdMpFPoAceXrGJ0KHTuSXcRcvGUSGcc5mPssB3Xw6B7nqxwrk5Ubc8q7IkRwgUMxbYyQviCNz0xzPlnjanasAe72O13gBeXHKkXnqmhzy03X918ezzwHBWP/oKfWBHD33ohgbY0UO9SNSmKz+y4/4EErk5VNe+dbf5iN980396Z+/s9/lg7xYXStikkNzPFLXDxnYFIM+8CPvxeB+VQ9sFK+5oX/8Q/URw5A8v+aGGMVKh2z9zj6oc3+xT54sO8aWLSSZcXNP8K+++5Rtn1OO6vltvkcGKvIpL3Rcmx9yyhcd9uTJ3NUXfsmxpU3M+VbErE29XKpjh31jTYeM/rClqKMjX3TKU8SsmOiIW7zq7DJ1b4jLOCOH0zNe7gWykcd+lMCGwq5Y6Oqn8Xc/sKuePTbKofgU9sRdW3PP3EqPDfnwOSYe9e6j5rDxM6/Fya8xdP+pZ0eOtcsHefeBOrbZYEuOvPHhbW9723XuM6H5YazEJ/9syJnYkauPPPLIlWMyIH/5MofkylyQC7kxZvrvM0a/+ObHD3fa+e21zMhevt0/cm1sXn755Stf9B21IZv12xsuxNDf0Wefffbexz72sUvGvTEMwzAMdx0jgIdhGO4uRgAPwzAMwzC8CTzYt8Moss/DaEcPve2a9IAdieNhtofuHkZ7CO3huIfbHko7eniOQIj49RBdvR1SbPTgXJ0jEiZ4EI4A8NDe97Qe2nvALzbn7Hm4Li4P15EyxexBv4fwYmTXNV3HzrvuyMepf8revD71g92ociV/Hv5XIgWLg/3OFW2d5zsZfVLUiU2dNjqKc0WbfBgXPuWeXjaKg6xzeROnsTGmxrFdvMaMv5v9Pusc8+/aEZGBRDG2yBO5j6zRzv8ZP3mEK9KEzKuvvnrNI7EbU3OADPLntEG3OLIRceSHARFH5gQCRhz6RF8Bts0POeMPzFvEDHl6+qA/iJ1iAGNc3HyCOOi6Fl+x6//ZX2QWOVCvjr1061NyN+2JubEVD11kJzLceCKJzGP9Qvg1J+q3Orpsss2PdnNAf4pJ/UmGK+r4a0zzX1zyFJEo/+5dNs23+uCzAtmmDdhQj1CUf/oRyewi3aqP1C0+OsbadXXsyw/77BZj9s377Kt3zbZ+s1+e+ZVP99Xpg31t3Yvmij6SUc9eO5+zl3/5Ma+NiTq2yBkTsatnw7z1oxjjQg9JKT5z8CR2tdFxpGMM2GeDjpy7F91T5Bof/eBTzOVRmz7R8RkgR3zVJ7kwb+RWn8ViXM07ueZfn8wNMmzy1WehXLFrTvOTHlv60pwSL9/6q42MucRe93gksNyyqc21fPGjnh2+2ECk6qe/T9/5nd951cuB+V0fy6O/LWTkhh2xmzfsIL/FjuD1N4yu/rHFr9jkUcx29eoDHfXIWjmXS37cV/LFNltkEMLGj3+5a1cym3zyYSzZEqecf+ITn7gebPvsFMswDMMwDP8PRgAPwzDcXYwAHoZhGIZh+D2A/PIwH5HRw2XnHth7MO5Bup1XdiR5WO1BuO9UHraT891KnQfiHoZ7uO2BdyRDD/m79sAdgeWBtzaEAH2lV+x68M2eB+liI8+unVRIhZPciizw0L5rNs/jee6BOv3q71du6lXs4BLX6fuU61y/lP9auyM795OrzvHUO+UdK9VX5CKiJ13EiZzLJyCwkC5ICETKSQLeLNlQ2ESKKPJo3OjLK1vGArlz6pBDwGh3bc6d/swJsRhbY24eZEN/HCO9ssGf+WCOkmFDn9jVzq7Y8qUuRJDKET0kDRm+5cg8NYf1sTiQL2Ikbz6bk2JwfzgXl/j1XxzuJWSWV7eyh2yiT5cfuvKG8CHLhnZ9F0NEWH2hhzBsTMVMVk6SLZdnHpIVF/JQnPzVR6SY+7ixIOvejqyjz6Zy1oHPCOMix/KkrXq29VN9QIqJt88bYJc/9WzJJxg7drSJsRjUs0MWuZd9bdk319lPh53su3/ZqF6uzQVjrV7hUz37xkR/9V2b/Gjjq36oU/hQmltyLOfGnrx8yJGxiXxU39wxj2/aS8cYF6N6On68ILb+J6z+qGNfjPKmju1iMc+QpPTEUG7MR3ra2UJ4m3Py6x5Tzw69SGvX9MTmnF7rbeMoB9rl3JEv/WPD3Kenz/yIhVz3q3rzu3mi79rN40hgcC5X4gPkKT19d6/Il3FXJwayPjf4YdMYOAf9FIs49R8hi1im757RPzb9kILdPmPZFad2O3O99tmY+XtBhr5dvvoon/yJRxzG6bnnnrvqxWPOfeQjH7le7cw/X8MwDMMwvDlGAA/DMNxdjAAehmEYhmH4KuCBtIfXHlSDh9IetnsQ7twrQz2I9tDfg2oPyT0g99Bdm6MH7jeJX+SJY9/BtHkgTteDfG0elLPrwbw6Muw7Ij+QLB7Cd+5he6QfIoE+vR7Gu1Z/s6jXBzbSrZzX6TuexW47D+hPv+mc8jd179eWjdMvnMd0yJy6lfQq5CrVJScvZxvyRF/kwjkSRA6ROIgl88E4n746P+1ny7xBejlHjJy7Svk+x4UvY5199ugqxlUdGWNlxzIUe+dk+EEyZdc1u+Zasbk2r8++sH2TKFYXGZUtpZ3vyB99I4u0kTfzzbW5Sq4Y0zVnEWtyo//mLV3X5Zy+e+H0yx9SS8zVk60fxUyWfbL6rC4b+h0prEC+mgts8iUe97r27PLVGOZf/Um4AjlElnxE1LHBf/2I0GXDfa4Y6whQYEe98ReL+tOO8WK/ONTxW9/VnfbFw2/15OUqv9Xzq9589/nCt7bi8dknpnIkHu10tKv3uUfeZ6B5QN45+8hI8vToNI/MbfGpp6teu7jFp07M6s0bcbtfk0Xy8kWeHp8+X40vOXURx4hWcYi/GJQ+L7X7QYRcRkSLmX8/ONDP9LTTMSd81otBf/g2DuISo3Of/Xzyow/65chuczsSmD33EV15Md7ds8ZZHV2xsOmzSj+Lw1E9G8aMvHp90sYeuBaXfumTHxrd3OUrXvNBTvUDCSuv+sm+/PgfveSNI3l6dNiSH7t8vULa+Kgna8evuP0dYU9cL7744mWTPlL+X/2rf3W9oUEeh2EYhmH46jACeBiG4e5iBPAwDMMwDMNXCQ/GvXYyosuDbiSCB+E9TEdEeIjt4bwH2x6c02sHmHrXPZj3cNxDfnace/AdEYw8oM+mh+J0PLjnH1Fm1zFy2YN6D9KRKggXD8zZKk56SAJ1kRM3Czl9o49kQLzoV21nUQdnHaJADEiLU+6UOevOY+eReecxvQqkU1HfseKafrZO2ZvlZvt5XTEGciMnySBckCmOxtoYn3YrZDsqiC/5jcCBUx60I08QjO2sjWzUJ4SI8UWEIE/MOSROxMlNG/mNQCqWCDbkYTGqi4RSlx77+gjFaV7x2ViJy3qCnerY49N59pJzbU6C+jOO/CKu0gUkl3utfvDDh5J+OdIPde4NYyVfcoWULCaySCUE3EnMA/9y795xn7mHXXdvl49iPXOplEtzhz+onj31+heSdw+ba2LOp2v5RjIam+Tze5O4Zcc9yb46UB8xe8qzo16/fBbpgzrt4lQih7Mtp3bMnm106kOfjcZI3N6QwB4/5ixfbIgvm+rFRka+Tj/q+dIn42iczAPjIE8+g/SlHcVsikH++EXylq/uW/eQ+nyz23yOZPQZKx467InLkS7/4jM+Pq/NZXb5M78jgdkjo45d9uTYXNR/eTLm5iV98TSP5M54s+Fvgzb9M1ebf/6GiIsNcYrL3wf5QqyCWNhnwxySR6SqcVFPD5DddOVKjhG8/TCDnvyJx/iJDVnsVc/G2Y86jIFreROzv11s6je76vTtpZdeuu4rfdBnO4Mfe+yxq8/0ycqnPnnFs7drDMMwDMPw+8cI4GEYhruLEcDDMAzDMAy/T3iQ7qG0h/DggXUP9n2P8sDfw3GkiIfo5BEGHvYjIzwAJ+/hvAfs/j+iOsSDB+EdPfz2UN4Df3WIDg/hPRj34JyPHtzzhYiISHBEUHgor/AjturE5VzsCoiJPQ/nPdhPtvbzWH0lgqq2yinT9Xm8WeSh4prciVPP+WnvvL6pe1PnlP2vHe9XjIv866841SF8IuWS65yt024wTu0oNJYRyMBuY2sckCrGJxv8K42RdjFkQzHGijoEpzmFbDJWzs1J58ZbDBFg5qu5FmEZAS0eMfAnPv6yLyfq1SGKzbfiQhghq9It1nMHrXjN85PUArrtdiQHfDbf3ENiR4DJoXyd/dUH9517kr7Ctzr3TH2G+t04ivPN8ipWspF0/udyOYrQra9kxSbfbJNnU+zqkYHiEa9YnHcfFoM86KMxF7c2YKd4fB4lr45dJB9b6siyo7542FGvqI9gZSvb1feZYt4i9CJoa5MP+fQ5g/zzWcWHPCAZ2SpWOgoZ/ckm4lI9O/TkSx09Y+szTlxIR30pPveRNvkRlzaFHYUPdpor9PnWhuAEY6zv/IubP/1iU3xicc2XvDnyr15ejFvkMXlxkjN/kdLuETLuA2Mtbu3mjZjZpmf+OMqTdvPFeDV3yPNLzg881ItZvuRdv/2NYI9dfTL++gPt7lXn/kOcI3iNH1025cb9QFcMclQfxC4+r3HmE1nb/YHENY7NeePOllc9y4e/dfLPx9ve9rYrN+TEKZ/sInqN08c+9rHrs2fE7zAMwzD84TACeBiG4e5iBPAwDMMwDMMfAIgDu6d8b4q0QJp46O7BvofsHmIjtLwOk5yH3+CBvYf4Hpo7enCOGDiPCAukgofvHph78O6hv6KObQ/fgW8P2T2Y559v54iLiDpHD9rF4EE/gpcsX4gAcB4igOmdIFMBMSiRUxGBlVNeuV+9a34U1x1PnLKdn+WmzM3r+x3Pcj/5m/WV2sF58SLvjL3cy99JGlZAjuRK/uUYGldgk8yppx1ZgmDSHrR1HeHkumJemB+IHHEiWJA9fGs39saO3eJjx7ykY56LlX9EonmsfwiyiErrh8aPLhLJHGfbq14jCslB/VHUuSbrmm51UB174uUD+EFenTGDvkX4sXmS2WTZCI2POvpidC+TBW18K5G66oqVf7lFXJUj91gkdCDTPapv9Pkj7xxhZjy6x/kTRyRs/tho3hQPFKN+Zts1u/TMG/MinxG69VWdEjFr3I2ra2Pt84RP/QQkY3Hxo78+58wHpB6b2ulrMwZ01Rer+shhvr0a2FGb8SMvH3LATjqNLVKQPW36ow3J71zsjtp8Thon9tg4dwfTQRobE0f1+k/WveyeoC8uNsx985B/sWsr1+rNTfcNWf2SO21yLz98k7lJAjt3D+qPPLMhFv7E0+eCPPIDbOifOeUof2D+mRvGRpyuxSlvxk1f1JkrfPo8MKfEQZ4tfdMv/Sfrs8zOXkSvGN7xjndc9pG9CF59QiK3w9uPL7zeWewvvPDCvaeeeuqyLQ596/8xaxO/uOTHTuFf+IVfuEhff1uHYRiGYfjaYATwMAzD3cUI4GEYhmEYhj8EPFj38Bsx5OG6h+kejnuwjpDwQN8DdA+5EQvqPBhHqrjuf/d6gO9IzjFyGEHoQT0dpAR/iAMEgDp+ERkepEc68+n7HPuIDfAAvl2QdMTkwf1JiIkbtCONEDARlOqUE3TTjwBWkq0kp9yvTikWJZnQuTaoXUEc6DOiQs5eeeWVK/fikT9tJ2mpTSmP+nkSiad/5czJWW7KyZNSH8wFhA4gQcUQMaToLz1wRJzYFRcRq+TDUXvkjbHXB2PIZjbAHNFPY6//JwmtL+wglG7avVmnnAQpyFevghU/PWQTn/zJtdj0QQyKnCCNkI2O8mBOOsoPm6eu/hgbhV3j2A8W0tV3dScpC2ffqkM0iZcvdeKWB/WNj3p1yC/zofsPkZUv8wz5lm3yYsofyAcb+hppB3y5V8VxErfi1YawhMYnYo9f12e9+4vfXrXLNxvaxKg9kk17bfw6l2evPBZjvsVd7MbCZ4XXwauL1COrD8bQGMiddm2Nl88LbXxpS0dbpHLxyLF5os3nVW3p+EwTR3HSieTVrtBpPpiXEcFy0P1GRh1//CAZ2dTOv7piNDfKrfruHzkTg89XPpI3d+W/vpLVT+38ia0xMB/c35HA5nUksL45l1t5Fb97Qez6xp56ttz/+uXcfVmc4mlMyBgD9VB+y6XYjJt+kmdHrv0dMzdc8yuGp59++oq3Xb7mh3vEjwvYcm6uma+IW6+b5s+5XPnBkc/k7nf9Ru7S9dlgnv3sz/7sRQZvp+8wDMMwfH0wAngYhuHuYgTwMAzDMAzD1wAebHuY7UG/h/PIEISMh+iIHA/oPWz38J2sh/qOZD1U95DdQ3dHpMB5zWaEl+JBPtLALjEyyGP2PcRHmiEQkAQe2CM5fLfTxlZAEojxrIPIrPTYCmLVfrMgDNgj6+H//WQqZDsq/FennOjakf2TcEOqygF/9B3lVJ4ijpAx6h0RFAiHYtT3/CNqkETGypjJXT7kofjOOP9rhU1HYy0m/sH37GSCc3YRMmJCNhlL5I2xTp6Mfumfvqgzn7LJZ/0rp2wiaE7ikg4iSh/BmMojciZE+KnLtjq6N0lQcyRSSx0gsiKp1NFtnsqFfoI+IpLoBvfEmXM26CKSjCldcZgP8iQesuTEHFEL9NWJh6z7AciykaxrUOccMUZeLsVu3sn7OX5yVo7o1E/1xqd5ZJekHIndWCDYssEfefXGmp1yb6z1kX112RaL+Su+PmsU88w8lku7cAMdc1u9vBavegSd8fD5YyyQvX1WqdMPPvgWKx9sqW8XbXbkUZu4kY1kfY4hTcm71mYszZdk6OhPn2l0tKdjDNwTkav1R3z6QbZYIl3BGJJNvs9FR7LGwVHsbPHLnjGTd/NSjGxqpyumfpSAiFZHz/xTtMud8TZX1JFnlx+5FGs/FGFHv8jrq/ZIYPPc59G5E9gccdQnPxQyh8jInXEz54rZeOmjsVMvXgQrfX0jIy/GR51XPLOJ6GXz0UcfvWLUv+55Mu9+97uv/njleZ9FL7/88kX8qkf2Gk+2jBt773nPey5bCGbzF/T9Ix/5yPWq6mEYhmEYvr4YATwMw3B3MQJ4GIZhGIbhawgPuT1Q9xA8gsYDe/BAHBFgt5oH9M49YI+g8nDc0bWH80gb58gMdh2RBx6aI1LYRjBE2kRwkKHrobujB/rqFKCH6ED6IBo83D8RQeUh/dl+HhX2q0PEsKfftUNykPwZW22n/AkkrDwVu/7pqz4XGzudK2ze9HWzJEcvXddyGMEExlAbIk//EDbJOoLjzZLdivGOeLWz1PftiBXQP74bO8QNIirSkj/12TeGZJBFoO7sl2PEXcQluEZ8RVBVFxmabTKRmNWxFyktlmzJEeTzZh1dstlPTl35FUt1yRUf/VOXjHsM8aUfgd+b+q7NH+ShfLoX3HOOyLL0yZ8x8Qnq3L/0z1yTK0eNEahzbyLAzFPjyYb7GFlGz9gjKh3Vsy2v5IA/nwXqEYDqi63PFraKB+jUz3IP6pF9yD8xsdP4qu//3yKN82P+K/knq/CtHpGrTR/k2jnf2hpH80Z+sxcx22df8fpM1B8/nuFDjPXTPehzz1yTH+30fAawzy4f7KhDcIvNtSPb8ogIFSN7Cn/0nBsD5Kh71LwAMdRXNswRR3Hpr3jYjQTWRl9fjWc/niErH9q1iZ0NOSXPDnly5Pk0dn4QwKd289Rnnc9iMl6/zJa/AfSMpTZ9FgsfPovNPUVf9N1nD5/mIz/mrP4bF4SwNmRt5LC4nJvL4nn7299+2TFO+ucesJNXbOYhEti5+PkybkhhY/b8889f9wlfzzzzzPX/gO3EHoZhGIbhG4MRwMMwDHcXI4CHYRiGYRi+xvDQHJngYb/vVkgAD+Z7QO4BfOSJB+nJtGPL/0+k60G+B+jaEB4e2Kv3cN5DdyREhCuyAgFAznc5D/rJavMgX52H/BFbgNAERIH46JOvsB9RAWITyylTiYhg46aM6+o6V0IxJR+QCIif/IuleG7a6job92sH53DKdc0u+8m7RoYo+qX/xu/UVeCm3fvZQqAYe+SdPp8Ea7pAh6/mTzZrBzLaTxs3Cx/mj5idp6Muu4jtyFBygZw5EYkF9E7CU4zqkFER49UZN1DHlvrI4+TYj8SUHzGSFQef8kdOKe/lRh7lk/5pk+/qsqkOWaePdNjofjxjqs+Ra0FdJFr9zK5SLuHsg3MxuSbHP/LMvQn5Mx+0kU0foadejDfr3fMITkRisZPJVnHSUSdXPh98foirmNz36stjcfJBh5925iqITJ9J7LDh3FGhw57cKhHXxcwPe0hDR58VyGC+2aOr3ng4R+gaM+3F4vNKnZjUuS/B51TkNj8+W/Xd57Cd42QVftwr2sk2Fj4bnfNB37xzT5gD7VqWNzE13uaWz2E5YIM8n/IYwSve5kYEcnOGDBvaulf8zRAnX36sQJdN9xwClr74yq3cNMZyLn4xyW3yfW6xS1asbMuZHd9yEqEO7dgVl3OxmZt2/MoR/36U8+STT17xsWW++T/3/NJh0//9NdbKL/3SL131/dhgGIZhGIZvLEYAD8Mw3F2MAB6GYRiGYfg6AInhwbgH5B6g96BeQdIgCDyUd47IQNj0ul7fxRAJEQiIAg/y+x+JCBDwwJ2udq/yRAho45s+MsAD/wgXIC8G9Wx7wO+oPcIyIAWQYV7d6+G9NnqRThVwRCpoFw+Qj+SonPZBXoLz7CEcxJ189aetCpnOg7pK9cmccmfdzXLadY5QkVNkD0LnJHiU7JVHfVecqysXjvrKXoSiUv8V0G7M23EXktGuyHs/OhDfTZLSPIzgUW+MEDnmiXMyiEZzwNxDkDk3b+ufPkReqQvV9WOG+nUSxcVRHTmklDpzXmzsk1NPTp/VAbmzD4BUpOseEqvxELt+JFs8bLoPb5K69N0P1ReTUh/ZICuv7q1k7eLmtx91hPTdV3yDfHTfs+G6uKrX55M0rl7cfBgThKlzfTbmxq846cmJOH0OsCUOtoy1wh7yUL0YInqrV6dNHdvuP/PW3DIe+qSNLfNGnn3O1Gdt6sWAVCSvHwhBxK2jcRJrP3Kh11x0X4iXr+IxPmw5R/Lmi7z57nPR5xM/1fHFpnj0IYJXP/mXG3LsqI9QjgxVb5zd5/JLjw162vlSb36aQ2zodySwWCN4I5IhEpg+H/KTjDzqu1yLxRgaH299IEfG/ahd37T7bCbrh0NkfVaUV31BXquTQ38f6LlG/Do3h9SzqR/t8kXW8is2BC57cmCXr1jcW0hgfRa3vHctLz4XP/WpT9375V/+5WvH8DAMwzAMby1GAA/DMNxdjAAehmEYhmH4OsLDdw/UEV+IBoQSIsARuYJQQFggk5A8CAsP1D3sd43w8dD93P3mYbx2OuQVD+WRBO1YU4cY8L3Og3nkBR0P7CNqI9jEgrQRj3IiEg/JIYZ2+Cr0ziKG/Lk+ZU4gSc7jTagXDztIiGyc8d6v3C8m5dTJBtyv/X7XFbEgSBSxIXHk9ZRhXyF7xn62B4RPhCjclJUH+TYPzBnjdvpjv7ngCMgahBNdvlyL03f8dBpj1xDxZL6wo56euYIoQyaJIRJUfTGc9oPr06dy1gW6xVuuxKJOXspVPiLSQB0gRcWkT3QjevVHziLFT6JWbthDmtHVbm6TdQ+pc8/BGZP7z70pF2JjI1k2k1WP0NVX1wrSjx/EW7brV3lVf8rrS2OItOND0Yb4M1Z83K++XaJ0I3p9Npgr1YvFDtbyQF/O1JE17ghAYyN3xcuW2PJDx32PTJQfdptjxpFPtmujV12x+Rzs80ce+Dl1xOO+I+c+aPewdjHqk/GJIBe78RSzeuMbCewz2Rjypd/smy9yKd980HPtM9Z84iuCWK7FLSfk5ULO5Ikf7eLXD+3q2Dht65viXlJv3pHXJi62XDeP2OwHB476Zr7Ji/54y4PPJK9W9jpvOfAGBfNGu9h9PsvTq6++eu9973vf5cPfJvaNoXkoV+JC6CKT2dc/OUIQi5MPcnIg981rffbDiF/91V+9YhyGYRiG4cHACOBhGIa7ixHAwzAMwzAM3wB4aO6Bu4f7HrB76O86MsIDfSQCcgN5i5BA/iEP6ERoOXrQ7uG/h/eIASQIQsUOOTKIvJv65MmCh/ORDAFRQxaJEMRIDxzpRcpWl93kEARkug5kKukoofPaxeM7qTiRC2fOgMxZtKXbteMp23nHZM4Sbl5D17Xx1w5A18Xn/Mxt8ue5guxB8CBQjJf+6jewJY/GkC3XyBtkDBn61WcPyJwkMR+O5pe55siG7/yQXXIRrNkyH5B6xkBfzTsy5iuCiS3zFTnWrtv+VzLyypEMeXLIqwivm3751J9XXnnlip88ffM54qt+nbr8NO6OgAwrb+rMHUQfsjKZ9MlqQ3Sxkexpl6xS7kGbfKhz/5XP/Cn66jpZhJlcRc5WTzYyNXmxmfc+C8TGVtCWrUhD9thJR77Yrt548eFzyLzSj5OYbaenzw56+VeKjaw2ZKI5b1zZ97mlXVvka+NUfNqyV3z0I471sbbIaH00/nw5ssFXNvlSb/7wyWZ1xsO8RbjS89nInnb3gXrtjWv/75aeeRNRKzY6YuPDOJjT+Sevrz4HThKYXTLNWZ+LyfJDVh0bZMA81zf2xSxeeXVvqScLYpUfthRz3X2HsNUnu279/1059eYGu3n1h33ksHnjDQvGmjyimB/zgR19EJMdv2yYK3Lqs8r/IObT3x79NFf1B+mLWJaHYRiGYRgeLIwAHoZhuLsYATwMwzAMw/ANAqLPQ3MkggfvHuh78I4oUOeVno8++uj1sN7DeQ/aPehHhiFTEBwexvc/Vz3MR9wgBJAFiBMP6pEokadIBmQEm0gMde3mdQ7881WdcwQEn84VD/ojgJ2zxS6y4JRLpjqyHcE5dH3WV7qWE312lLtIvdrvp6Pwnf9w87zr8zzcvA7qs62A+BGftcGpr12e6mcgqy/GDMEmZ8ahnYJs3CxskOHvlIHOtSOMIqX5kDeFTjYinaA6eQZ6EVjWBoGc8bX7L5Axx8xXxFpzCsFlHvIL5hMSy/xEOCLTzDd9cR05zbf5jKSi79o9QNecNo8Rh+KIeI5AFp+41bk/1KV/ykJ9PnfqqhNnpO4py4a2M0eRdMbwzBO7xVxMZNUZa0Rb/siyIT6fBWe9e4y8PGhTn1+fHeJE7p321SPtfEYYY9fyhciM+GRDW3rqyfAj5uKne8ZgfIyr/jZm5q3+6APb5oYxp0snIpo98ualseebLF3t/CjsmbP9GIK+ev7Y1S/xaKfPh/ZIZ+Oj/+YZGfHWB/H7HJELfsw/oCMf5i4ZY8lX8bAj/9rZlSN91Qf9bp6w03zXL8SodvX6iHx/7LHH3pjL8igGfsiQJfPEE09cY12bPsqnuF37zO9//Jq/fOm/e8L/PHafI2OdywVC2H1mjP0tkRO6CF7/u1du6Mq7e895r4Tub5Y2+UX8qjOWbH384x9/418TDMMwDMPwYGIE8DAMw93FCOBhGIZhGIZvIDzAt+PKQ3sP+T3YjzRDaHig72G9esXDeXK+o9HxEB7p4wG9cw/6kRSRSx7ys+f7nCNbyA0Egwf26hT6dsUhntSzwQdSQtEeGRMQMsgLcTgiBhAe4gBH5IAjgkBxXv9AXxRI7ybU19Y5u8VeXSV0HTkanOfzRPKnbLipX2G7IjdIIPlDHCGH5E2e+NOmnH2Gs/9nG9nG//QHzoFshGf9PGURTUo2bsooEUfJsEkHuVadWMidRLFrcurOmJXk1LuWB/MpOTAnkVjnnEI2RVYDebon+aqYa8ip5pI4zAkEIT+h/ldH1zWy1Lx2H6gTY7Jn7PqIiGP37Huy6UOy2YVzDBDOZxzu0chltsVQfO5d/XY/mksIdTH4DNBPBF/55Fe9zxL3GHIyn+rdk3yQ9ZkiNr7I95ni/hWPol5RH2HLrjiMl88U+l67rI0OO0hQvvgRmzyUE/YQl9rZcU9oF5e+GEd9zU7EMV8ITnkUI39ssaHdPca/zy5HvuggW8UI9NjWF/2go00M/Lbj9ySB+TEmcsCmGMVV/f1IYEft+mC++iGEXEeW829+OyeHkDWW7Okf33JC3k5du2vpiI8dOuT4FCvy1c5e989rr712/QCIPnm5cQ/xZ5zo2eXrnC5iWZ/lyjyjJ2Y77r/jO77jyoHPBH2wk1huX3zxxetV0fTpmM/Gtx2//A7DMAzD8OBjBPAwDMPdxQjgYRiGYRiGtwAepiMQPOiPoPBwHuGCeIgoQEQgyJAMyAH1gKzzMB756KE9EgERg/DwwB6R56E9ewhnpIFzpIN2tj3kV480QDIgDSLiIoXV3w/kskUvIFnYdlTElg3xKSfy92aoXX98R0V0QPWOZ6kPyu+Fm3L301GXTcebRTxyLw9kjIn+G0/l7PPN846dk0f6IGbU3Ywn++aOOQFiSI4OG2QiiUM+QLuxT0bhN9+BXLvNT9vIrzNmtszj6sgo8gBiTF9dMZ+xnrrFBuRu2oP8IrL4hvpxU1YdUg+5105apT7Xn3zdlIXiUhe0JXszT2QVcWTDNfl2+6pX1CPT5IUfhBwbCvmTtL1Zb4enNp8Z5oR8IEx9lpiTckO+fmmjQ67+V28u+zxwzyIO6WpD2prj+fF54Ucorvmhwx4f2fM5pD92nyI2y4c2NtjTRj5iOV+KWOj7vNPOPps+D8Xvs9Dn5vnDANfG0A9U2JIfZKW5xCf/ztmVb5+H6anTHzqufdakJza54MsYyCsZY+gov/TI+9wkSyaC17F+gs9la26xymXjq9/6ZSzkxm5hYFNdn9XmB3lzTl/LK1KcDePHpji80lk/xYhAziZdMcmzc3k1pvrj//iat/Q/97nPXf7l45lnnrlskB+GYRiG4eHBCOBhGIa7ixHAwzAMwzAMbxEQGggGD9s93PcwH+GBPIiw8eDeQ30P4V1HNHrYj3xBriA56JKzcxAxgWCJnPFgP7LWQ33kAWIiX5G0yCjEAZ925NHhCzmAZCBLRp0jfXp8qKOHSHEexE1GPcIh/eAazrqQnyBOvhTQXsnvWdLtKD7Qdr8judNfdhT2b/o47Xdu3JA0dv3JmxwZX77pQPKK+lM/sgmq60iffWOBbLxpr3MyXstqDL3mVd4iJyGiDmGljh3XilhdV8fXTRKzna3qxE/ufnWnLeBXHagjS+4moXzKgTZ1N/MiFmRq5CvkW139U2deIw5v1rNxkrGKOiSd++mmrPryUZ0xdj/p/xnD2bfsuofdf+7XbOib+JBxYiweYIOOejrks4UUlEf6kXzFVH/Z0q6eHX3iR7zOrft8PpD1eWGOf9u3fdulo9CJzPW2Aa+o1ydx8aONrvHnK3lktCMfSEU+zUNkrf67NxT1fXa5Z8xZ97fPRX379m//9ssPf+wYZ3G65oNu/apv9N2DfjTic1AfjRmb8iIGsTqn0+ck+Lyip78+//gTlxjMP3WRwOqdi0uujEckMH9i4VuMfJsnkcB220YCk9cX7eTb2Wuu0mWbnDzzL1a2+dVPr3F2bec0++JgXx/00y5ffyfo2RVsh6/7hl/j9vTTT185fOGFFy6b73nPe66xQkKL+7nnnrv3+c9//op7GIZhGIaHDyOAh2EY7i5+BwFsMW5xaTE+DMMwDMMwfP2BgPDg3gN3D/HtAENuIE0ByYCo8GAfEeCBvof+gAwAO3zpICUQEP2gD7HjyJYjQoCch/3O+dQWkcI34lCdgtxQh8zgE4Hgu2LEFQIBECi+P2rThxOICDvWEA5eQ8qGOCGSK3TtWAF2gT+6kTWgrXZH/QK6rk9b2YN04GZbyN5ZQmRbJX3xyUF9NGaIo/vZr64jf8YnQlN9tivayRlPeTcuSJyIymJGIBkTcaiPiAQ2iyuoY/skXsmYK3SLgwwy7JRTp9y0Z+z1o7xV12umgV5ykA99qq5462cQB11z9CRf1SsnIe0aeWu9U+zZbUcm2fy758zZZKuvn6esHJE9x7l69xU/6vliVwz6Ql6bQta8sTPVvX7ap4PIPHXYQvQZX/GfBDAdbfyYB66rQ+ix5ZXD6ryWmI78aEcy+gyIIFV8BtHRbs6xa/4ha52TiSSWR7nQH59t5qB+8IHMVc+XNmOpXZs5YRxrU/jUv4hYvvgRg/pIbD+AOYlj/TJuPi/YTa77gR1Ep88/7T7r9FGc8ouIZc8c5Vuf2BWbdnYd2eJbrujrBz98G0dHttnQN7LGWDzyIG8+o80RNsUVwWscyoP/2Uu+z1K2yWvzgx+7fNnRb+dPPvnkdc+KSZ/ZpcOuuP0opLEHBLGx83ppMbk2vj6vP/vZz15+hmEYhmF4eDECeBiG4e7iv/2thd4/tKhVLHItFC0Ih2EYhmEYhm8cPGRH5CJPPPxHJiBcImA9mEdWICIcERge+ntY/4EPfOAiBzzcR2J44I9siDzx4F+da2QFggMxEQEcoUoXGYxUVB/EgnBwRL4gYXxvREyIDZEjJvb4COxF4oHzfCJTFHYUsh0VuHkUN6JCrsSABGOLXiRjyE7lJu7Xnv/Ogd3OT3m5UJIP5zU936sjOLNz4n7y8iKv+ToLmcbMNWIIcXPGImbn1RVDJCeoQygaN1Avn8g/dV2LRV3jqI6uuRaqQzoFvtXXbzElJw7Ihz5URza5clBeqgP1ruma28ny29yKuK7OPeTe6j6BfFWnkM3uSZyTI3+zn+y6b91byRZD9+UZBzKOPDv1y70eyWpunzrsm/vFU8w3dfgvxvraDzrcv8hCZGP3j3Y6Pkt8TmjXb/bouFcjKOkgE40FmXYJ6496NsxJn1fa+VdnLtHlS/G5pE5f6Pk84YsPej7frEcRsM75QG5rU9jyeUMPjHufc+yKgZ5xQKSnJxZ6jnJkvpmT8sKXGJCe9OTYGDV35YG8uNjikx45NsjahUtX/+SPzHnPkbF7mi0y7NL3Y4/GlLzdvz5/ydBDGMuTz2V9Q8rKvfyCPIqLrrkgV3ZqkxcbW2wj3/WPnHNx+D/C8vjEE09c/rz62WfCJz7xiWv3L5lhGIZhGB5+jAAehmG4u/hvf2sh+A8tBhWLSwviYRiGYRiG4a2BB/Ae7iOAPMBHeCBZPOj3sB7JQQZpEGnjaEcfIiRCxfc6+ggE1+whLch4sK8OgUs3Esf3QPXknCNLIoP4ZQOhgURQp5xANvCpsIm8QnQAMgPYQH4hirJDXrtCJ9mbOGNAzkRUR2ADf/fTP+vO8+KDm+dnKa6znCBT3SlDTy7buXrKnUierKKfyWUnGYSOeWHsvM71bHOukDnlzxjkOxl1ZIAMgktddsxFdXIO6siZg9k76yLN8hthWw7JVQeub8qZO+ojz8C1cuaFntjcG4hM0EaODXVkwFFfIlH5kYdTNruOyZ59Kn7ztrwp9N/sldEKf66rY7sfeiAr3YftftWm/tTRR/eheiQfsg5piTQlr6g/SVTELZLXPSx2O0flQPxsibX+k5V/56cOopE9canXt4jjfBUbe33WmJfi0yf1dIAsX+w51+bzymcSf0hupO1JOOuXok9idM6eHEBzx1ixqU0RC7vWuOZMuXbeD57NL58d7fh1LnY5l3/j7DMH6AFd+dY3P9h5+9vffvn3mUrH2LMhfmMsJn3S5vOKntjV65+4yCJuxYzg9YMO7UE/7OyNdPcaZw9yyZufYpRH42ZM+fXKZjuNzS1t/ncvklq/X3rppSvfTz311NUXr3jW149//OMXIT0MwzAMw+3CCOBhGIa7i9/xCuhhGIZhGIbhrQeCAFmA7EAceKjvYT+iRUFCRLQ4enhPDvFBFsGCBKCP5IgojfhFdiBGEB7aES6O2tUhvNhD+CCD6aiP2EI4JKsOqeBIHpHGN3JELMgLOtoV6NyuuGyf9Z3fRHXIiwgj+oq2yL7aT1unPW3nEc5zEHso/pv9OFHdTT+uETxyerY5v1myL58RTnD6VSLH7Cg8688C57XcGOPqXZsnpx/XirHnU4msi6AF8ak33sDezTrgI4IK2CN37ijOx83XXpOr7pSLkK1fYjP/5UM9ufpGVp/JAVlzxn1SnNntNdDpkzU/kWhnn8iKrXywrY68eS+fxUZOvXuC3bPO/FKHEK2NnZPMFUd9cd+7T+j70UdxujYfugfkIOL7JjlcnOr0NaJUGzv5YY+ONp9Fxg9xSY+ONn0lqx3J6D5HQEI7b8kjH+XP+LBHj5zPJH1XR8a9ol90+EfW2sXqqC/65XNPfpGm6ckJm/yRJePzjQ96xsnnZHriVRcJ65xv46+f7Plsk186cqmdje4f5/LAnpy5X8jok3O57HONvnnMFn/mpP/HS8ZclFd96X/z0vHZaScwGXObjP7QNR/lFyGsn/z6FwJk+CAjL8ZEjHx71bNc8KEOkUzPq6D1247fEb/DMAzDcHsxAngYhuHuYgTwMAzDMAzDAwgkSK9v9dAfuQAe3HvQjyhBoHhwj2SIaEXOOEeeRPIiNhAeCAPkhnPkFh8KEoMfPhBTfCCT2EO8RPa6JlsM4kK+ICm0K/QiotKD6k8ghsiwJw4ESf7hfjohHwgMhAeSQxz6AcgTpEd9Cdm+eQw3r88YTjs3bZ4yruUJafOn/tSfuvp5tp+6XVfS1RfjdrbJv/bGAXml/n52qg9yYcxqY0O+b77yWV0EYnV80Q2u5ff077q6cpiucQKyyUXMioOcuvKUHN1It+Ru1rl2L5iL5rQ5bH47IsXOeNjUP7LZ4Kv+IO9cwymbDW1nPs5YxeBekDsQW31AsBY/0hQ55yhec1cstbOjHmHKhzqkHR33iDb3vfpiV+++ca/pM7/axc8PolKJeDUn01Ovj2T5RKD+8T/+xy9Z9u1MJauwhWT1+aINgand54zPG589ZLQbA/2SE32MBBYbWXEWnzjo6ANy1XxEiorF3BEL/z6/ioUOu93vvcpZDrJBjz/j53NGPvW3XcHsy7dz4+uzhI3iaAxrZ0OfnNPVVh/IFIc5oL045QVh6wcbci1vEfzGVj/4pOv1zM69ntlOYPG6R+WZrPw3zl7/j0yWA7GzSSfi9/HHH79s2fErnne9611XvPS02/GLPB6GYRiG4XZjBPAwDMPdxQjgYRiGYRiGBxgInwgNxANiBPGAZEAcIAaQLR70IzMiHpALtdFjB/GC3EHeeHUpggPhwT5SgQ9kA4IpYgC5gBjzilVkDqKBLF3+2EIqkVOPDOHfeYSya8QDqKdD3xE50+41pCcgMBR9YJfczQL6hVjSr86RIBWESsS4GEA8gc79cNbThdMvnOdBHV3jY2zs6Esv+Zvnp6/a9JsNZNMpH+qPsdJ3OO3ctEnWrr9slfvGBcnGp3rX7EbGqhOLnYXqoPgiO/lTx15EmDzf7Ee5IXe/OnNBHbhWXx05dfloXMjwYx4j2lx73bHxTxbyY86ay+rZVfKvP9llk6w5T5a+cvY9WUeyyLp2HasjK5/Itn6UkZ77RCkWddlRIkSNjb6wlQ4i1T1Zjsj7XHCPIz218WsOygt5/9MVAclXOhGvXh/Pvs8G8fERcamwibS081S87MobW3yehDJZ/ZADRKsj0rO4+ZNTsj6b5EQdYpPNyFr1Phfo+SzQP/PSUbt+OWcDHPU5ApeMzzN29VesPg/Eow+RqeWcX+Or/+w6Z0NMzSN18tCc1D/EPDn9kXfxivOxxx67csqnfssjGX3im02fz2Jjl8/i7p70P3ntilZvjBC4kb3q5M3cIu8HMMZCQQqDVzuLx7U+vvDCC1c8zzzzzKU3DMMwDMPdwAjgYRiGu4sRwMMwDMMwDA84EAb+hyMiKCATEBGIWkfEBHjgjxhQkBZ0kQ4IwIjadgZH/CL2yCMpyKtDBiEjnCMpkAxIB74QOEgUJAJ7SBfEifqIV/VkXSuIMfrsAQLlPAJyhh1x0xWveBBOCAz6SkCcABsVUK+wp+9sImX4Fpe+IGCSO5ENxFDnX81RQfgowL4+JAPJVXe25U9xLufp35QD7YqxO/vQOR0584MA5NBpuyP94qy+usheda7lTD7TVRcpfNpTh7zKlmI8syU/dNW5hnwi0G7GQa46uuIwnuoUdfppbM0XpDvQvWnT0f1hnkZyn7KRt2QVvhBq5mB9Kn4/Vig2JdkI42yIDQEoPvX0kxcL0rgYFbbd2+a5sUynvqfjnhKXexlJqJ4eP/yK19zPnuI+agcs4tI9LDb17gs2xYsY1a4eccyeOcR/dviRQ58Z9Oiw67MACYywRDzyX9xkEJ1sqkfy9uMWnz/s+h+4bMuJWNQZE0dzmS0+jLUY9Yc/tpsL7LLh88s5PW0+07QXCx+OjYdci8f8kg/QN3Xa1Tt6RTOClz4fxlwefEa3yxdJ2zzQN3Gyyb95Z+yUSGDzSb1+6Ssdvu3efeKJJ66xkjd91jefvfTkzuctPTrGxzwUF8LeUVzmth2/Yh+GYRiG4W5hBPAwDMPdxQjgYRiGYRiGhwQe4iNQPOj38B8RgOhpl2+kChIAOYH0sJs3wtcRsYScQEwgKCIn2EZQIEkQHs61Ix4QGkiFSCm6ijp2FUCkqEdGqIuspSNOZISYEB+IkOzRU5/dCmJSQYxEKiva9J++c0iHvVAbyIucIFUQOXLAVqRR/sWGIMqWY/2qnLEihBBMSDh5Qsh7jWukqgKnfiXcr17/IpBC545iIBOJGbKhXT8UuYeb/aB/+qnOPDpJYdfKGU91yDk4dU8S1bW5VZxsklMnLtf1BalFjp76dNlL7qxzrV4f6SLixJ1+ftJXjJexP2WzS77YQR15RCpZYEO90j2WvnkUAZw/NsWn3tyrnrx697N5c8qzo16hc/qg494iiwgVAx1xInPNWe301PssMD8jZtW1Y5cOe0hE8x6MiXiypx65qbh/2Yl0NVbZREzKk9jpud+Que4x9y6ikw2fQXLpM4us+6b4xcI/fdA3vpC0dNzv+lIf6CsIU7K16bPPPfe5+NnzOSeWCF7X7Bqr9NjiR7zqyYhXv9SJ2SuW5TaCV0za6IpVH+SvzzgkrLFqHvqhjhj8IIeM/MmV2Mxh9WyITR2b7BsjOUII2wkshgjzJ5988uozfb59/uiz//nL/mc/+9nrFdTDMAzDMNxNjAAeHmZ87/d+7/Xjy3e+853Xjy39yNTbzBTfza0zrFN8Vx6G4XdjBPAwDMMwDMNDhIgIZAISB1mFoDhJLMQsePivHuHgiExAKiBdEARIE7LqEDgRvxZQiA2kAhID+FXHB+KCPTrIXjGoQ1xEiCChXLMhJtcIE8SFIyKEDSQGXTb4doyA6wjIJrvkECrINAQMX5G0wXlkWrYqtQN7+i8WpI9j18ia4o/I0e9iLQdIKLmPtFTO83ydqC6iWpwdi5k/tm/qu05GTpWISfbOPoM4LYQjgNM/C5l2w7puHiHJssWPOvnJNrn8qzMWZ11Il/1QHTk+wXV1+XUtfmNyxqsuuerkLPL1rFfSFx9dsn7YkCyI6eyP4po8Uu2MofjliM1sF4M5ri4b6pGfCLqI6+Tdf92Lrs0xP8Jw32hzzkaxu6cRw+4Vc69YtJmvtZm/7kP3S0So4jNAjOy4z/sscB1xXBwRx478Iyrd747skFH4kyMx+2yKbEZiig956d7Vl3YdK2zwRw7R6vOEPfH6LJI/eacjJvHTi5CmI5/6zB89+ohbuRE3Gf75kCd51m+y4havOvrGjB825V49GX2WM7bUI5fJtgtcnHInB3IudjumjYd49Uu87jOvJzdWZNgQK999novHwyx+zRfy2vlyj8mZen0TB1tIYHk2d12/+uqrl/wnPvGJq00cwzAMwzDcXYwAHh5G+O6r/K2/9bfuffd3f/e9973vfffe/e53X/P5qaeeuvf0009fpLD1jh88+t5989nAMAwjgIdhGIZhGB46IFq8DhT50HWEB7ICAYMssRhCTCAjkAsIAu0WUkgQbcgRxIa6SBeECmKCfYQHOSQiHwpihB31ztUhVJC62XSt8JkviCxDeNDVngzSg/8IskhSUE+n80hmcesbAiQSVCHzZuch++rOoq9i038+ixWJqVRHtkXm/cppG/g/UZt68SNqIo9qD6ds9XIkd1D9KScvilhrd6y4TiYCWHGNZFJ32qoO+FbXHISzjl51ylkXaamObXCtvrozNvPolKuu2Ogh7iJf1Sv5ISuGbJJFKmbjpmx1bGf3piyi7eZOZHXsqteP6tlwH7FT/5I35ua9a0SheZaOgvxDHro/3M/uNTp+AIHs1F/3Ojm/fncO4rSbVKzlR5v7xTVZfSofPkPEYT67t5CL7YBF+PpsoYcwFrfCnv572CJm56cOXwhNsdiVevbJPeYoRz4v3Mt9dukXwlYs9ZcvMZAtFjbkxZzkl577gV390s4GGbFFAuu3OkcxGhufeWTEy59xigQWv3O7DZxrN5aO8iBXEFEsFnbkhN1XXnnl3tve9rarj94Q0OexeUKenJzLkfqXX3753uOPP361eX2z8TfOSF19M65ygOgVgzGRc+1i+OhHP3oRv8MwDMMwDDACeHgY8Zf/8l++9z3f8z3Xd3Dfr8Hawfdv3599L/f92vrL2gMZ7EfUvnsPw/D/YATwMAzDMAzDQwqEHIKhBRCywuIISYCUsUBSkCWukTyRDfSQBepdIxLsLqOPUIlgjWQhh4Rhi4x2ZA7SxRGRwp44yCp8RexE/LCBlIk4RXgA8lgbMsVOOe1sij8Z8h3FiBxCkjgiaMgDv+Wk8/M6nPWVm9dnOdH1/drP6/x1Xdvpx7m8GgOvsar+bE83yI+8nERtspXGHpkF6m7KlV/EWfZdn3XkLLSrS9c1EsscAbL8GT9EnevsV3f6VEc3e3xEgBY7OUTdWUfurHM0vyJfEXmn75OoVegnWx+zjUSTz2zflFVfXGe81ZG92Y9kzVVxq9Omzpx3j7inTh390QbqkIN8aDNPtEUc21nLLj/a2HI/uN8Qh/ywJ9fuS+12qzpHHNL3eUCePXIIZoRzBDNZMj4jkJvy2S5gcSMkxSZ3+skfHZ85CEyx+nGDOaCd3whXsshvvtLjU5zuaZ8NiGHXp55+idVnkaP41MsFufpsfCOKI4HFYP6537o/9APoipsMAtZDJ20+x8TIFjvGjS/3IBty7770ecSGPsgLwtbrmB3htddeu0hg8YrBWIixMTPWHlzZ/Ws82Bezh1r6xJbPyXZaI3vF+8u//MsjfodhGIZh+F0YATw8TPC92/f2n/zJn7z+9Ym1ROun1oK+O4M1hu/evmcr1gkvvvjitU6xphiGYQTwMAzDMAzDQw3kCBIG8YAcsBiyCLLoQZ4gMZAHdvxZMCENLJoQMYgGeOSRR94gbxAXiA3nCAmEioUVgoMOEoddiytkCFJMDOq0W7C1aEOUqCeDKGlXnXgQI8gL8fKnjU1wRGTY2aYf7SwGfQR26SoWg2TkAJAhfLh2jpghA+RbQFZOOzfrzpL+TZ03a7tZ7me7OrHKh76fbdnteOoYTyWd6ivq5EFub7aVA3MnoqnCJj3jRo6d6tjKl2vFWKvLnyImMurMt1MuH+qTqw7ZdRKtZG7WKSchK0b2kZbmu/r80FfOHJA1D5NVR55dberYVOQnEtUcJqPeNVltZBCY7jt12pxnw8OHXtdL/ozZfPZQAxmo7bRv7rr3tLOnTT0dueQXKUqPLaRkr4+OQKWPIEXmqnff9RAFOVrM2tjrPut/7Kbj3jY/xUAvItb9yVeEKDKTns8NOupOchiJSl/u2asPrp2zK5fubzbkip7x0jf2fAbxzS5f7J0ksHafQWy595t35oD7TLt+iV1/yKr3eeWcT3GKB+lqXjV/IoH5NhYR4+yJWy7YdK7P+kHG0WeUcYDudX591iGi+5yqj4r+qTNOdvwauz7XvvzlL18x/8qv/MpFVA/DMAzDMNwPI4CHhwnf//3ff+/P/bk/98aPJ33nbo2kWLv4Puz7vQLqwFrEDy09R3j22WevumG46xgBPAzDMAzD8JADGYAQQ8IgKhAO7SpDEFggRdwgFSJiwDUgS8gjJexKQxSz53VKziNzkRjIi3OnLqKHfyRzRATiiWzkl1efIjbY4kMs9BA3bIDFnUUdiBHxAUgZOyuRRMhAcbAlXjIKm/pMBlGHpKHjKDY6/CR/s7SgVNgtDm35cVTg9H22n3JvVk4ZfZZnOYqoUvIRqk8X0WRhq9/qTv/ndQSX/lTPpzFEqJ3EaIWORXRkWHVK8myzqe4kUemxH4mqXp1SrDflzjrljJeMujPO+8m5RqAhyopbm3o2qpM3uTZXssG3oi9kEYPuB2Qdv+oUc+n0xxY99tTVB6Rdu1HLcbbNRdfIRW3mq7h7pbN4yLLh/tDGlni1u1/FpV2b+wDkkQwf6skCW+5hBCpSNzvuXfPOtTjdl8hFfWLT/RJZq56M84hP97qjOOiw6Z6XA74QoHTMabboydEZOz1x05VX9vn2WSMe58aBnrnjvm5XMBl12sXhnF85IyNP5COBteuPo8+B4jnnh9jZ7dw80G5O2RnOnphB/7WxJU4++dEHn3k+k/WBDD/GsLj5A/H68YW8felLX7r32GOPXX59VoLckTFOiOja2JBbO4FfeOGFS3YYhmEYhuHNMAJ4eFjw6KOP3vuBH/iB63/8+o5sLeP7tO/BvlODdYPv4Ip6IAe+v/ue7vu85wZ0fDcfhruMEcDDMAzDMAy3BMgJu8KQHBZKERgIHEA8RQ4jn8gjZBAwHZEdiAxkBQIDkeIcUWURhuxCYCA9TvIWOWbhhYBBtKhHYLBn4aWOHfXkIkHExLZYkRva2UYSKaFFH4IFAcJWMi38HJM7CxKFPOgzHQtH54pYiku942knu2epHm62VdKPNDzrnBsbRI7/D+r6fjhtOYqdjt3b2VLy0REBZeza3X3KatPvyNez6Lt5cNO+Il4k2FlH3hh2zddNOTLqTjl18q6OjuJa/yzYizm5CGU5qI4PRwQZctNcc20uljN+2TR3zZv+v3IxRfYpruXFXCFHX32xsauueNm9+Xpo9WzY1cmne7F+uK/cf67db2TVGw8FSWju03FPaOODfbGbJ91n6ovV3CUvfvaRmwhY4yh+0B99oCdf7tN29JI1VyJPtfsf49oUBLLc8se+ON2v5HwG+Cyhh2wFuZBXdZHAPoP0MTl9VMeGOUCOr0hgnxc+f/SPvFzJmzFnQz0f+k5X3/hB1EYeyxnbckYWxMt37WJ3NDZsO37lK1+5/gcv29rkTJ/5kAt5IGcMxdwOdSQ+++aiPMm/nDrSUU+PDpvGiy7b9JG5SGB9Mx76/YEPfOC6fv7556/8sfXMM89cMRrnYRiGYRiG3wsjgIeHBT/1Uz91kb++q/ve3BrA9+HWNY6+p1traXPuqIB63/390NJ3eeuaYbjLGAE8DMMwDMNwy4AEQRQgPhAgziOHEEXtirMgQmogLSyYEAoWW0gYMhZayBZ1CCBEqvPIXHLaETm1q0cwW3hFjqlDyogHcSQGZInFmkWdWPhHliRDlwx/Ykd+kGULUcMGn+y36COjP2Qi7hwRQxaB4nLkgy47iv7Qkyc2yhXSBlkjBuDDdYvPFqTB+f0KHfadA9sIHiQrMoq9CjiSra5zR8SRErGoFIcjX4g95JR+n22d658iF67ZFh8941R9eh3pyPfpWx0Si69iFIMdpvlP9/RHpjrtrsvTWUcGceY628Ypkk0evT4XyNAXT8QyeWOKZHMszmTrj+vqzC3xNzbq62eyYiFrzpPnT1315nT1yasTg/vFvGNTIa8v5rGiTdz06JiH7jP9ZtN9ioh0n2hzr9Ezj5G59M2R9PSdjPvc+LoP+EO+FpcfdjS3zANxa3cvuVfUu3bPO4pPLtwb7lO/tOfLeDQWSE33cf0Tt3p65GpXz58jWWOkP+LVHzFokyt+yapD/LJB1udV94UxReyq9xnnSNd9T9+PZBCt/IhDTvlyjmTXX5+hbMtPc0Q/2XPfyq3xkDf5NV/UyyUZfswdOm9/+9svO96OEAmsRA7za0zMCz8c0C95ac4he+XPa+xefvnlK7/DMAzDMAxfLUYADw8Dvud7vud6/bPv7NYEvk+D79Gt732vd21to5CrrXbFGsV3/J5V+K5uHTIMdxEjgIdhGIZhGG4hEBSIDmQQMsEiCHFiAeT/6VgAtZsXWWhRhIRARiAeIo3o9D94I34tyrSRQdb1/3ctsLSfZHA7drWfBC274kJi8d1CLcLNMRmLN8QIPX4t+ipIGv7IiKsYWwQmd79zfhQ5oqsvfHZE+iCGxIwUk1MkEzLGtXO+xM6evOWDDtJKbuVVjI76w768iCOccb3ZkX1+I5TOoh8dxaU/N9srFtP6EsHFvnjVie3Ucaz/yKt0KmKyy/hmHZ1k2VGHSEO6VUcGwUUuGUWuEGORbeQjIBF7YiSvjg3jdNpUjN9ZZz6ye8oWJ1nX2UTamU9k1ZFPNt9KBKC2/CXLDhtnPqs358Xi2lxCyqpDCro/jZ0xyqe5YmyMk7p8anNPmYN2zyM16amXq3ar0hOvuci/wq+864P71tykI+e9HcBYkQOyYkNOygtZfpy7bxGm4mLLXFFnrvNFB0GL8DxJYL4c2dUuh2TpRFTrnxjoneMQSUyPjc61i1UOfT6Bc59r4gLz2BjzR89nhrFSjIGj/vOtT8bKZ5z++PxhSyzsy588GwNErZ3W4heHIvfmnpy6lie7Ech47bMHsvpPlz8EtKN7hW19/+hHP3p9/qkfhmEYhmH4/WIE8PAw4O/8nb/zO95a5Pt262zftTtaS/gebr3hu7vv5+B7d/LWRGT8UNizD9+n2dQ+DHcNI4CHYRiGYRhuMZCrCBGLJYsjxAZiBbGAeHBENFgkIauQRIgK54gOiyRHC6oWWvQRFNlEsCCwECeIEvWIU7rsIoUUhEtksWLxxm5kkDrxWZwF7UpEMpCx2KNPxyJPIZd/Nsjpi0WkPp52xUU/3WLJTkcxKwgnMTgqSB8kEvuIJUQlEo5P50hlcsDOeWT7JvIJN2MQN8Lt5v/8PQs5/USUIbiqq4+OchYpbU6c+vKhL+aLflZnfiDIkLzFpLCFrOtVyRV+6EXc5l/+I66ra2zEwg8Cjx5ZOUWGJY/8kluyxUFXMT43bZ7Eq3hOojZ910qkLp9ybH7Tqf70ZZ6Ls/8tRU4x3qc/Y+Z+QB6ykS99cB+Zo/LKn3i00XFPKsbbOIiluezecx+xKVb3pX4ZC+3Ggy3zTpzGGumqzX1KV+xyyAcb5pN8888fH+5dhKV6/aKjX/TY4JcP9uXEvUBOHMaJHjvAFjLXfUpHPT054tv96ign2RUvsjVCurjoNW5yop5dMvScy5dxkVsx6bf4fUbRby6ZB8jY5o572ZiKlzzQQbyyp2/AvjjNV+fs668+yL16cZEXo/HRzpdxk3f3mP6S+9znPnedu4/02w5fcdLxque9sm4YhmEYhj8sRgAPDzre+c533vuhH/qha43ku761Avh+7/t26zfX1gXWWb7Dk6OjkAHnZLsmZx3zrne96yKEX3rppat+GO4KRgAPwzAMwzDccvRKXAsmxFCEBSIj8g0ZYUcaMucLX/jCJYMMQWQgLU4C16KKHH3tiCSkinrEC/BjcYbI4QtJ4zpiGDlCB4FED1lDB0mCjCHDNlLEItDCj/9INX3KjvizwxdiBnmDuGG3cyQM6NupY1FogdjRgvF+pbaOyDV5Qz65dq4AGfE5QnoV6FybAsk5tqhF7PV/gpM9Cxl98rpcObppV5Fr42T34Vmv1G95lpP6w64xUKdfZMjKHZLs0UcffcMGWfXG38I6XWQWEsw8o1sxbgg+v8jWP+QbGT74U4xdfRGbEunKRnX6fMaRLv+R1OrTJ5dNcZhb4o7Mzm7krVL/zBdHOWIj+XYy820s+iGFenNWvuTB3KSPWHRfib2+GCP3iFcFewhSHGJk0z1oTokl0jU97VC84uOr+wNee+21i2B3L/Lr3nR/mRf1wzh3nxoTbRG2Crv0+GRXTPriyK57wrn+ipUfO2CRwOroyyGb/LlftfNLz2cKG2zJJ4hFnbgiucVifNnjx/ix0c5kufFZZbz5Y9d5fdFvnzfyo58+I8g78sF2/SMr367lyX3GhpxX9IEduj6/3Bv6J+dPP/30FYM3Msidkp6Y2fWZy4ax80OS55577ur7MAzDMAzDHxYjgIcHHX/mz/yZa576Du57tu/c4Hu+NYDvyOp8p/b93DrKd231ZNRbI6RD9myzPvVjZN/jP/OZz1z6vuMPw13ACOBhGIZhGIY7ADt2kRsWQhZRSBK7Dx0RF0gcO0eRNEgicq4twBAnEbsIF4QMosViKoI3YjcCFulFjz3+6LNpt5tFG3INwUMvMsWCz8KNDe0KG8ggcSKjLNQs6BAn/CTDDvvIFKV+OpJXskseMUUnwi67+pS8uMTDRgvRrrN7HhXtN0v2zmvH0042HMUUcSqPcn7K3K/IHbJRH7N9ylsgsxtZWn3njmxYHLsuVnqKGLKLNFPHFz3n5oPyjne845Lhq7EofjlEHCLjzD2xkOPX/MseOfVIvOJTx6+6xoJf58k5V8eeuWjunbuI2bT7kh/XinjMR7bzn6x7RozsKurMd0dzN1m67PCJ6CxGc8qcJeOeyo74FLbIsidPdOmwRc+9Yu7bjWseaDM/Edau6cglPbJ0xKCY5+IyJu4Vc9w5khM5yad2cehjcar3YMRnhXY+zUXgm1866owrve4lJC37yFIxil0s2hX66ug5Nz78ybtc8ysn4tRmLOWZjezSi6Qtl81NMmLQRldue9UzP3x4TbZ+ilf+5NZ9w6a8G1N1bNdn84MfedRfcmJVp/gRA1t88Sn3yFyfn2J//vnnLxLYGJl/YqFj7F599dXLhlx7NR2SWI6GYRiGYRi+VhgBPDzo+Jt/829eP870Hdx39dai1vPgO7rv5a2DWoOpB9+5tZOnB47q1Vkb0KHrR7++y+8Hl8NdwQjgYRiGYRiGOwI7YC2CkBRIDWQGYhYh4RxBgfxAcPTqUjsZETKOL7zwwkU2IYwtzpAt9NlE1CBYEDf0WqRpd23hZcGG7GmHr3axIHIjayzMImSd04tUcdQHtsWLdLGYI4PUReIoCB8kSgSc/gS6YrcgJMemY2T0qaPou2s6Yk6fX+ctPtVlv/rKeV07+a6ds4G4ivQVU7IK2XROW47y0Gud9at6hY4+GLNspqvoh3xbBMth9clpV7INrs0ZdflJLhKV7CknDudilVexqKvvymmPbLtqXZNjLwLXtXp6xrxr9vWV/hmPdnUK39mkH6l75idZOWkeIPTUsZsN8gg8ft1T5pN6RT1dDzOqz6d6vzx3DyAu+zGGeuMB5BGF/KXDh7yYq0hl15HAziOJQex8R9a6T9hw7bxdq3Tct/QV88+cd++y67517ZhdnxXmq/xG2IofSeo+9jlCng3QJmfuIfcmOXGyAXJbHtkiK059NwdADovFmJOTT/H3IwN6ICdypv924SK9fc7IG3vmp/zzqS/yIib+1bWD22ea/uqT3DjnVxzGDJC2xa+NPXHZyQv8kAF64vRq53JmHD71qU9d7cMwDMMwDF9rjAAeHmQ88cQT9/7iX/yL13fp1ldg3WDNEnwX992+3b++b7f2ta5Q6LRmdVTn+zdZctZ9vsNbV/ghZmuVYbjNGAE8DMMwDMNwh4B0QIQgKRA9SA9APCKWECHIEwsmRItFUySv3aHa1CEtLKKQKRZVFmKIIIQQYhhx4hihzE872yKO2Vcs9vhD4iB2LMjYQdYgwxBBZCAZhW92EEB8O7JjoZgdMsgbi0Qy/DknQzZSzjlyB4HEt6NCn0822Jcn5I1zcVlARvTIUcRfi1cklOKcX22O/Dk3HmwguPlK7yzF6Fi7azEYD0TgTXnHs52/m+1sILzkRB9vyihi7LXR6SGtFPlKXp/UyR07imt9l8fsqWOTbn0io/7sf7rkzrp2p6fLVoSleWXc862cfsTIJhlzwdyU91P29GXeGlNy+sAuG0qybLuWa3GwnT82+POgQn33lvFWZ26zH7FMVlztBHZPkTNH7H513niJxT0oH+5nMbDPpjYx6Ic46dWernb50i9+3A/mgHZtxsJ9jpyOBBY7m/Tp0VFHTz/EKR7n7j12nff5oX/sOndvs0vGfcR+5Cy7bPHhKDfq2dGf/NJ1XU7LCVlw7nOn8TEGjq5PeXmRV/btCHAuj8hxvvzoQL/IKvqr3lg1f9QjmeUO4exBllx95Stfufx55Zz++axg3/zw/8c++9nPXrrDMAzDMAxfL4wAHh5k/L2/9/eudYl1ne/I4Hu69UGkru/e1lqKa9+zK3S0k08/VA+tYzwH4c9a4pOf/OTVNgy3GSOAh2EYhmEY7hiQmUghCyiERbv4EFFIWgsnBIgFkx16ZJEjZC2YLMQQKHT8ihbZazGFbKGj2EHHjwUagoNNfpDMyCykGkLI4gvJhJixCxVBwhYfyB0ECz02Ikz5tYCzmEM0IYqQPmT4a1EoXn4RNmwlh4jhT9+QMflLR1FHt3N64iDDnnOlfnmFccRei1OLys7lSTmvEXfs5ut+pXjO/ijiR0bfTx/hhFSTY+10a3PeNT3kFWLfeW3qEIHGQ35rKxa6ZOQ0u2dd9slWJybXxry6bGlTx5c649uOZe0Rp0g4ttkovkg9Jd/14bRZHTkw19Xp2+lfXTHxazxPu8lG6JpDbBlL9chCcddXMebTvC8eber9kt08dn9pN9/N83T4M87k1Svmm/6bs8aZb3G4l9yf5rR7QN+Ky/2cjPP6Qk7/zEc+5b55Ye50/5pL7mvx+4zQB/1U59o9IR423V9syJF7APjgn13xZ1ef2XDOhs8CR2Ngt/+73vWuy5Z29uTXDxLYUC9v4pQfuXNEwHrVM5/a5YC+OL0SnG3jKgfkxe2eNBbmmXZjIsdiAXHLnSMZvsj5PBKHfvphiFeg649XyrGBFDYeYmq+0P/EJz5xjd0wDMMwDMPXGyOAhwcVvof/9b/+169z6wjfq4Pv0r5Xk/HdXfF93TVYT/g+bl2SHH1FG6jrnKw2Nqwr1P/cz/3c1TYMtxkjgIdhGIZhGO4gECIWVYgLpBJyA1kS0WOBhLiIwOmVqBFAzpExjgiWiCztCCCLLSQJfTv9EEvkkEwtyujzi+BSr841QhkBSNe5ekQMfaQOolmsLRAt+hT1SOd27yGbgb7+tKhEGCGv2HROR3zisGMvff7liT495A8/kYFnYYeP+xXtnbNz1p9t57lSv5yL3XggnBDNFq330xG73NjJyJeSnwq5SFVyN9uMBV/6lH72xUMX6dm1+YPYeuSRR36HrJjlzDxwXn/omyfJuDa/xG7utMuXrnGJyFbHF+Is//zlJ3JVnaO68kQPoSd35M0NJGKy6pJV1MlBpCNZ9cVrTsifPqfPnzb3gX70+mb55E//2JOrdr+KCXHontMmTnLuA3pkPOyQBzbNSz7MX3Li4BeRam7QQwLTFYd2pCoi2j2ajL41P4wPm+rk1r1bXyNr+aOvXZz6zq97SLvPBHa1G4fI1/T4ZUvfka7GE8iKUxzs8vmbv/mb1w5abWJnQ70fWciFfKdn/OjxH6Fqbsg3H/Sc24GrT+XUD03kkj114PNFv9gTjzxo44t9/WiHs1jIiU8MCGTx+PwQR+PHT5+1Pmd+/dd//ToOwzAMwzB8ozACeHhQ4TvyT/3UT13fm31XB9/Dfd/23dsaiUzPFUC97+lKaz4yrlvf0A/OFd/ZFTZ9d2fvX/yLf/G61DDcXowAHoZhGIZhuKNAXlggIT4snJBMyCNkkQWRhRSCBNmBvPF/cpBOFlfkkCFII2QPkgR5YvGGtEW8IHoQSxZayCBAylh0KQgy5Ah7yK0WaogV8YgN+dOvfRFl6shbBKrT1q5h+mKhzyddsSMQyZ1krvgUEFMEIx3n9MnpC3JSPvwfz/yTFaNFJ1m2kHfaXWt3rLzZNXnnFfmTM/ZcGxNkofj1K9JS4ZtMR2QiUgspl+1kFTLsRoRGataeHf0yrtlR6KlDhPVK6MhY+WGrvrBjnpgPdher0y8kIJLdXDE3xIG41K/mCpvln0/1CNjG56zjky9FXeNeP9Qh5My1dkvXPyU/5NlFFNoJK24korlEjh3n5OTAfFKnmC/FoI/Giz/EMJvq5cI9pb/q5L4Y6MgBu+aVnIuBrHF3FLO+smsOG2N5ogPaXRuH4uUT9E8/6YnBPW0c2OabTX6RkuKo3ZEuW/Tdn/rqPtBuDB35iwQmw645oZ9ypk7s7Jk7bJ0ksM8FctrFyL6c6Q89MajPRz8CUecIbBh7dvmVF/eL/skJHZ91PjPEI26QW7LIWuf6IkfsmQP6aE7Ibw+a+GCHnrb3vOc9V6xf/OIXr3mhkEFi+4zUJg4PXe0CHoZhGIZh+EZjBPDwoOJHf/RH7z3++ONvPDMI1ga+S1sb+B7u3NF6wHdy607rGN/TFdegPZCDsw5cs8WHYm3lOccw3FaMAB6GYRiGYbjDQMoA0gnhgvxAWCBJkCiIEUQtQsnrUxFFyBgkj0UaUgi5EqlChzwbCBREjQWZRZtFmMWW+ghfR+3IIAsw7cgcr38FCzq2EDt2+CJzLAT56dXT4m4Rp7168SFrEErALr2bZG6Ek6M6RFA6kaD5ETsbSq9y7ppf0K+IOHrZV+davdJiVY6NQ2SVmB3ZRHLxX8meQp9NYxORSia7nVfk8SRlFTLZ0y4O41e/syF+pJc5oI59dRbrxi451+ZCpDC74kOK+cGA60g69ekqcqI/6oqvPJ5EqzqlOnJkjE166vlAyImJLB/pk89PvsXjQcKZH7L01Zvnkd/FIOf8yKs+mw/yYr7LJf9k5Jacuee6MUdMq3c/8QfNczGRk2sPJeSerno+xGvOd1/xzY5YPMjg01z0wwm5d+8in80vbWzXF2OlPZJYbtjXzpf2yFzzkg85NdbiEbM4tYtHHHIdWWs+qddn55HA5LXzpw8Rr478Nj8c9SU9Mj4j2HeNjLVrWA6Nk5zKgR+AyKX+iked+SdWY6po0xex6btd8UBOfOqNm2u54ot/Y/GlL33pGmuvevajBLkGPs0bsr/xG79x9WUYhmEYhuGtwAjg4UHF3//7f/9aW/iu7Ps5tLZpzd86XwFrArKurRMU3/PVK4GdbMLZZg3gO7y1ke/1L7/88htrtGG4bRgBPAzDMAzDcMeB4GhBhJxx7ZWpiBmEigWZBRhSBkGEGOoVp+qRPUilyBI6jggQCy+2tSNr1NNH3iBTenUqO0iYdiVb6FmYIbWQOC3e+GyBSN6i0OLNEUEF2VSPiGETLAzpOuqnmNKNyG2hqe/Aj37QAWSTYqEJ+trCU/8ibh3Fke3z2nnFtUWn3LFV4Y/Ns+4s2hFSdpxauKpL/qae+BFn5G4Su+woxgQ5Jg59vmmDDFJN+6lrDMXftfzyJxfZVidn8p09ekpy1ZEzrsm5zodj/UUI0hO3eYmYzZf5qC905IgNfpyTyaa606bxqL4ckdVv8xExTL96vuXKrlc64uWfvPpeAa2oI0eHHMJePviLjO2hRSQvHeNmjpLRL/PTjzTch87p9TCEH/ele869ELksNrvkn3zyySs+/ZUzss71h66x5Zs/fl3XLh7X7gtkr3mrL+TKu3p+I3uRw9r4d7/rM4ifbOSxsdROzz2v3RwQo7mknl914DOEnhjsqvV/d40dApstuTdeYpKbYhMve8aavnEg1w9GnItbn8UsDr78P2G6cs2OnPpMkSf2xWnH73vf+94rPn1h86Mf/eh1PgzDMAzD8FZiBPDwoOKv/bW/dn0vt2boeYTv49bPvmsr1gfqwLoEkoXWKq2JTviOD2zQIavO0ToMrCm8ven973//9f19GG4bRgAPwzAMwzAMF9Fh0WQBhKhCpti9qA45ZKGEMLI4Q6IgRCzMLMbII4Lo0kGUIODIIG0URAhCBRChXrHLDsIJUYcMQqxYnCmRRoDcqZ5s9fQRYxZvFnTkkGKKnXcg7oga7SeZTD8iyCIwcs81P/pDN+LWuT6fpE42WkS6Zgey5dg53+fxzQp5dpzro9wUK4IKASgmcSaf7lmMgTExBnyesbDPrnFBoJ7tSmNtbNoVnN1iE1MxqDNX1BlX5+rk5X51iDnjfOoh57pWzCVEpuO5y5muOWfXbbtExSo3Yi3O+/mWE3pk9b3/XVwMxVRhV67t8JRHdezKHdLQnKqejeTtMjfv3DfIXvV01JlTrvtRhTlrnCMhEZ7uIdfyr+8IYH6dIyCNKz15cG/UXySl+8F4mbN86rO+ik884J5nSx6MvTY29D+/bGiXH23GQu7ZVK/fjuWJvNj4EBMb2XRP8ic2ZLJjfTKGSNPaxcq2/PApPp9J6sA5Wbblzb3tKDafLfrCnjjEw55d0PIg3n7IUj7kwpgBm/LvnjAf+NYvc9AO3w984ANX37xeXExq6i0AAIWJSURBVB1/4nr22WcvvY985CNvfP4MwzAMwzC81RgBPDyIsD76sR/7sWtd4ft3JK+1UgRwa6Pge7m1ggKOdK0BfD8/20AbsAn0K9YB6hVrQ9/p3SfWDcNwmzACeBiGYRiGYbiABLHAsiBCiiBKEGeATLEIQ2pZXCFPLJaQYsgfBIzdf84RLhZUdFtUIXqQPIgUthA7QJYtNtmIQEImtYhTj0Bi3+KuhaB65GPElsWjdgs9vrQjyJBtiBm2EE/VR+bqLx0+kVGunWfLdQSYo1hO2+wgjxzttGzhiTxiQ04ggood8bo+/blWz4d8IbjyIZd8dl4sFT7UOUZwiYlOMumQEQcf7Qquj2cRE4Kr1z7Tz0fF2J3kp2sL6LMdQYl8o8smv8hNedR/4xZBR6bY9MH4yqPcOBcHu/QidotNnUIu/2IiJ4/lNL/8OJor9cu1Oe2BhLjFIIdsiJ1tNunKjfHQxqZ+lNMIUvNAftglp9195JXO6pDVjmzLFXmxdJ/Ik1iybZzI64e5q13ftEP5U+/IrjYx6I8+ioM+W/y4P8Uqj/pGxn3iHnOf+ixgg5y6yFn3IZv67sg3f2xG0LqHfa6wTUcBdRHVfPBrHPSVDnljK0bxyrP+kDO/jIPPAzLubTa8utk8aQ7SEYdjhDZ77Bs7cYvDeNjdrY989ZCJLnt0Eb/0jO1nP/vZS879Lh75QAbbBfzcc89dusMwDMMwDA8KRgAPDyKsg37wB3/w+o5vHWFd4Pu2NaxijeMafIfXDudRsTZiw7nv7dWzaU0B1ifvec973lin1G5943s9tHb++Mc/fl0Pw23BCOBhGIZhGIbhDSAzLIQQG5FGyKFIKMSK3W1eEW0hhrSx4LJYQviQsTMRCYN4QZxYlEUYIVeQU+wjlugiYtiJyLEgs+CrDiElBkDWqLdws6Djh022qwdEVASrdqSyeF2rt9BzzY/+WvAhk/q1sdgRgWIhFznEpyMb4tKurgXmTYLZeQSx865r40ebY9d0I3mz23mF78711TjJO1sRpdocT316+oLYlPfaz5KsMTI+8pot+vrOn52Z2uTD+BhPfUgG4UbG2KszVuoieo2FOrpI5urEp6Sn8GHeFQs5pTyREa86czU9dWIxxnzRr4/a2M2mEhFLRpzZIKfNPEMayjU/8iNWc6Sckm/+Nb/MP7mgg3xUzDPxy2W54pst/v0KPftk7LSPqBUrwlM7X+zx4yGJI79sIknFxa7YkMj6RM94ud8jgSOYy6N6iATmW5tx0R95c2+JzTl9cbFNF4EtT+4ROWObXfrO2WC39sas8WsXLXm2xGhuNw+MCcgbu+Jji77+i6OxlGf9oM+3OPlE/NITr3p1+iP3dLXJn/zKlTEli3yXX3Z8Rn3qU5+69IZhGIZhGB40jAAeHkT8hb/wF641oO/0vq/7Hm+t1drZd3/wHV1bcA3WIc7p+95fXXYij9VZH1h3WMt4XkBPIcM3RD6Tse6yxhCftwJZd6nzfGAYHjaMAB6GYRiGYRh+B5AgFkoWUggTi6HI2wg8JAyyEzFiJ98XvvCFiwBBkiCsWnwhgMhEcCJ9LLIs6LQjVxBHFlsWWRZc7EX8Inos2OgjghBaFmvqs4nYUo8IEyvf4o6oQg4h27RZUJKnx6cjcgn4FnuL0HyQy5dzNrz2F/SBX7miD0ihs7B1s+4s+tnxzUrtfDhHTiG0kOliux+ZS84xfXHS8Wvr+8kmF+GIBJMDdbWzYQ54bXLxIMXoyLk6RR3Zk6CVI7kVa3KuyRmr/KgTYzKKOdT/+W2czJf6x4aY0yNDx0Ld/GHTPDjz5Lo6OmI2F/Vbvf6wk332jLf2fBgHc1g7eX7YMO/NuUhVDw3qKzt+LOHIDn1z071jPMXhmi/zmqz7T/+ca2fbPVQ7wpOvHpSIv128cktfrM7ZkWe5yocHIvkQr76JNxI4GT60seMe1c6GecWfdn2i61pOxCrfzs0dbeYVP4hUYwTmsQcybBhr95s4+QL5MYcid/shCZ1sy43i3I9VxMaenbvicK/rp/MIXfH6nJArMcuTPPic48du7Q9+8INXbvXZ56O46Nsh0OfHMAzDMAzDg4gRwMODiH/wD/7BG+tQR9/bfUe31vZ933pIvTWD7+mOoM61drrWPdYX2dBmzc4WHXXWGdZMfe93TNaRnDprine/+93XWs56xP8Ffvrpp+89+eST19pj/yN4eBgxAngYhmEYhmH4XYhMsRhCfFhcIWosniyaLLgivcgik9TZUWfxhNwCsvQdkXbOLbSyw77FmXOLNufAF+LG4k3pVdNIFwQNktjrqNUBopFNZB+SBjFEX0wIHyQSsskCTxxsIHf4R/ogmyw22em10PonBxaU+k9HnXNkGFmLU37P/y1Mh20xKF3rn3P+T5vq9MPitbi0Jdt1hCaf4o2cZdexwlZHRQzsWrTKQ/IdtbMtt4gyOcx2+mJB8mnXlm1FX+yUZLtruT/J2HZSIlvZ1a/s0UtHXvmnR8bcsgAnU36QkQhV8RsPZB2Zk8iNAHRNjq5ivtYveSXDpr6L11wgJx5xOBe/I/keJrBvLNjTxqe5hRA1Nuw3f4yXeY849OBBO59yYs7QNzb80dHGllcKIyHN50hiOeNXn8XVjyXkjR9x8VWf9UffHNXLsTyRQ5yyz5dzsZHRJq7GPRKYDfcuG9qMCT0xgHnBlnY5Nrbua/7YFTsd9uXXuTjlkF35cQ+KV1/cX9r5INv9ZQzccz4T5NLc1Q913Wfuef5cs//KK6/ce+c733nZ50fbU089deVbLGyLS3/1jW+fL/zS8SYDr3fXNzoe/vicGIZhGIZheNAxAnh4EPHjP/7j13rFugmsdXzP993bueI7emuA4NraIbT+A2uRnh9Yf7Chznd7axa61lHqtbNj7RCsA6xj6FjzKeRcW19Ycz3++OOX/f0IdHhYMAJ4GIZhGIZhuC8QHQpY/FgQIUmQLggUJByCxyIpcgw5h/BBqnS04EJwtePRYskircWURR4yzJFthA+41m6BhgAjE2FnoYZwYgsxhTDyfzjJIHrEx7+F3kk+s8cHUgtxJh5xRcpqp8d3hLKjvtFroUdWHPqt/xF/5E49dpxrE6M65+pP2fNaIXvqqosQ46+jog/nuWLckFXyxi7dm/LJGdPIz/vJyE3kcXUW65HGdI2FMWULQckfGeNhTtg1S1feiit7ctjYulb4lF+2XeuvWPnwIwN14qCrnj/zszp+InvVi0WdXIq7GOnyZS7TVcixQZaeODwoQGg6BzbKVzLmFx3+9Ecu5E1/xa9/ZNnWD3MzPeNLzrylo15+kMD1ST37dNn3AIM9cw8ZKaceTPDLJp/6Ik73gKN2fWHb0dww38HclhcPQuRGu/4VKxt8uR/FqY0uWfcNYjUyWO7tnDV+7md67Jov5ooY9cm5+zASmI93vOMdb7TLizHST/nXZ/NJjvVRPZ/IWjvC9cd4is39o0/kfH7xI3Z91F6sYpJjc4BfMfiVP+hDr3oz5i+88MK9L37xi9f1MAzDMAzDw4ARwMODBt/Tf+zHfuyN7+3WAb6ztwZ2rUT+Olpv3Dx3tC6wdnLuu771gzVBNslbQ1mPOPedXpt1EVgzZc/aQjyRv+prY9NzBj88pePZg3XSMDzoGAE8DMMwDMMwvCmQLxGyFlAt0CyYEDFIFAstCy8LIPUWchFIFlrIIgsnOwwjclrseZXwyy+/fOkhXthosWXx5lXTFmr0kDn8sC0Wfu38FJ9FGBKJDhlklx17jmQs9JBNrhFByDb6ChJLG//6S9YiEnnmiGxrcUeOj8jZdgvrj4K0ahHq2rm+6GsE45ud3yzaKvp31juKVWz5lSM5R4SJUb/o3c+HuOhG7GY/f9rZReohxk4b6SI05cM13+WNX3LyJ29iO8lj9Ug1Y5Ev+sY1GXX6x9ap1ziecZhPrhUxy0Ex06mfcuI8G6ddO7j5EgcbyFR90X/5FL82fTHmbJlv6uiTNW9fe+21Ky46+kNeDjzM0N49Yf7Qcf+ISRx+Ue7ecl/RF4dYzR/z1HxzjaQk67x7g033in6Sl9uzXX/dJ45ssUEGcSo2/QN9cm+JST7EKzZjIVb3CN9syE3zhr7YkalioS9eudDm3j0JV759LsiPXCK6/V9xPtkVBzn+2VXvoYv+iEUfPcgRDzkPYuzS1X/y/PvRgb7Jp1jc9/T73LIrmByS2ueBOale/M8999w1rvopH3z4nJK3YRiGYRiGhwkjgIcHDU888cS97//+77/WSb7X+45ureT7vqPv4NYZ6n23J6Oo6xxa61gf0LP+ikAmw441n/VG6z/f/7V5/uB7fuf5hI75D+pdW8t4tvHpT3/69ZZheHAxAngYhmEYhmH4PYGsQUhF0llAWRBFjJ4LJjvskEJINO2ukVEWdBFXSFmLr0g1dpFe5NlByKlH4LQoQ/TRsagjg9ASi8UeGccWaOJhD+kD7COI+NcXBJNz7fyooytGhBXCyUKRL+fkIiAVOvIREY2MkhNHeggnPtutSFZxTpcNfXXtiAy0cHXtvDb5cU23xS0SSt/44UN8jnyKPWJT6ZwtuhHE+kVHu1yespGm8n2StOJGlFns0s2uuIoTmaZOG3/k27Gr3nV2XcsDci3Ctv4h8vSLXbGQ6X8Si0M/xKHPZOTHGCKl5UAd2xHY+sifon/snHlpbiEU9SGbxsFcqS8RyWIwN9k3z9XxJwfmKh365Vub3aR8mmeRwGS09fDi3JnMt3bzy/yma87S11f9p9v4ySm//POLzNQur/osL8hrNtiLBKavXb7NV3LuJe3ypN+NDZuIZn1mw7gUO9+O7t13vetdV9ziFJs2evpNni9H/uSMv2JUzy4ZvtXLhb7Ih88OOuwaj8YaxKwdkMrO3aPIXbblnE/y9I2hNn1x7/Z5IE6fL+L/xV/8xatP+j8MwzAMw/CwYQTw8KDB/9l9z3vec33vtrbx/d/38ghg382tURwVOOuC7/Ot73zPtyZLX2GbDrut/6z5tFmraHdu/VQc6UDXwTU96wrrBbuA+7H8MDyoGAE8DMMwDMMwfFWIELNAQghZYCFF1CNrnEdcIeaQLAgcpIpFFLLMgitEDCOTLJwsvpBabCDyLAj5QB4hhNhqYcY+WxZz1QH5dg1bDGpj10JQXOr5QOiIx//wYdciLkJYTIgkR7E4b2FIzuKRf77Fdu4CTse1gswTE3KJjoUpPeddayeX7HmuPV1HJVJWsYjtqCDGkH/FUfzySdc4nbpnsXCWU6SafpGRQ23ZQcJWh3hTL3/GUozqxGC8EJ/8ZVfesytX8k+mxTg9PhC2rvnRD+UkkcnRZys5YysW41Oda/GIQZ05KBZFbsVt7skLmwrfdBXzj93mpTERA7tiMr7yyh5Z9eTY4YttOdHOzjn3y4t642V+IiqNNzkkpZxG5gLfdPiAyGfyYhEzGfn0YEJ7JHB957dXPQPZ2uVK/twD2TAe5psY2Db+8iEXYoPz/oTv+I7veINoZkMbXTpkxap/7Dp3PzTnxSZGueSnV6zx5VqO5Ayxqz/Gjm3EPXuO8qCNPbt8H3vssUuPz0cfffTKsdywKz/6KEbz3oMcNsTv+Ou//utXn4ZhGIZhGB5WjAAeHjT4dyt2AVujWQ/4nu87OhJY8T0d1AfnZE/4zs+G4rt/P84mmy4dNq1zrBtas/BBhrySrgJkw+m3OKwn/euan//5n3+9ZRgeTIwAHoZhGIZhGL5qIF+QNAgU5IuFmkUQEso1EsmCyCLMQgsBi1hx7ei1zGSQK4gyII+QshhDhCG42FOHtGEfUcM3Qg1ZgyA6yV3EEuLKQi1iyuKterBIo6PY8VmsyC67g/kQI3JRHUJJn5SIMW0II7Ge+uJiH3mFzNZf/dMPcSuRmRaq7LDhSFZMjsgrx/OcDjtk+YyAdJ0tOZOHYlAi1U5SlH72FbrIPf02luTz66gPdkFqj3CtjW9tSMHqxMYm4s5YqI9IZfuUU59Nsq4j7tQp4mWvBbtC9/SpyM9JHItbzOmxq5/G3byMzM6Pdv755pMNMZuDN2MnK4YeULCF7JR7snx7gEAnvwjLfoTgaLzNEwS4+SqPjnyYd+auewTBy0cksHZx8geOfJlL5qL4xI/IBPfKSQJrMzfMafeO+5g/+StP5nMELoiNPTG4B5De7LLDt3bjKA/ikxs2/BBD/sRF3pH/Pifccx7+yIU4xC43jsaOLbL0tJsH9U9/EMdeuW2uyZHPJbu3+TIG8lm9c3PhxRdfvHw4Fze7Ypcj1+x5LfanPvWp/Zp/GIZhGIZbgRHAw4OGP//n//y1PrL+sEax9rEW9f3eGiU4VyKErRusDYCudUGELX3t5NWlA9YErYlbNwXrGdf00ndM3/V5TJcPayRrGm8UGoYHFSOAh2EYhmEYht83kFAWOxZaSnXIFIsqCyaEDSC9IqHsDrRoQtQgWpwriCULLaQXPeeA2GQPadO5xSIyCumKhKLjusVa9RFBLeTEiVBSb8FosaceOYTUEoNYIoq10/EqWUQcQox/i0fn5NnnW98QX4i0dMWBXIsc5ltcrtmVK+QUpKvvFrLsRurSI6vQVZwjGLvWHwtaRyRZ187ZRc458qPIoYVwMaWn6AvivV3DfGVLbGJElrGhTZ1y6qt3Tk9fEK/OyZkPcoa4U3eSpNmjY5zEID5zS+7NIzFlB4FnbOixk575ZQ7qKz1EZCQuW2TlrZhdk5UT8sZXOz/a+CSn7/KIuOVL3sRNLlntxg08FHAu/+TMNX1liw8PC+TG/GAPtJk35lEkcaSl2PRBrHJh7juab+yav/T1kW/XxpIv4yIn2hV5BfkiK27y+qHN3KSnTjwQQS7f5j+i18MP88F58fHvWt7Exq98iqU4jdHpQ5u42JcL94jPGHLyo8/d33xrF7dXa5snxuJLX/rS9To5PuXWHGu+OdIXqz4bD/Vi9sMU9c8888yV02EYhmEYhtuCEcDDg4a/8Tf+xrWG8P3dOsF3d9/3rcVc+85ufdD6Xh1YS9FpzeIcaUzGeimSlpw1SSBj/ULed/9sO/KdzchkdaCukrxYOorZGu2Xf/mXL/lheBAxAngYhmEYhmH4A8NOwS9+8YvXoiryrIUZ8sfiCKmD7EW6IHEsnhB3SDlEEUIH2Yl0QuSQtWBjm7yFGF2LvBZldJFqfDmPdCQXMQjiYrdduUiuFpLILASUGMWRbcSSV0S3uIt8oo/UQizxKWaEoEXfSfTyF4EW8SqmiCgFUQh06bQoFZNruZNDPiPsXDtW17XY+WEXceZaXGJkV1ziVvjjh42KWLLDhp2xZE/fcq/Is35HBGqrX4i8+iFf5sJJ2IoNyWZ3p/xkz5gh+fgTAz2krljVsR/pWkzs00Weuk5XvyOWFX2jL96zL2TFRV7u+BM7P+yKL5vqtEXi6l92zXnkoQcOCEy5Iysf5pk5dJO0Zg/MLbuW9aUcms/mP4LTjyXMU7GwX1zkxEaPbXPbWJu/YmT3bBeD+elBRg885EEfjLN2Y2D8xOZau34qoD2SWLu+iMU9qh6MLT980DdeyFQ2uuedF7/csuF+A/GIkQw98sbSrlzzQ17ZJU8/klhe7DZ2rg8vvfTS9apn+PznP3/9P2J5dD/yadexfCOO+TNuv/Zrv3bd88MwDMMwDLcNI4CHBw0/9mM/dq1JFGtaayrf5a2FwHf31lKO5Kx1HEG97/WurRFqJ6vQV5euNYT1jPVF7SGZE/nPDn/puO6cT7H/63/9r6/rYXgQMQJ4GIZhGIZh+EMDCYUAQtq1OEMEIYqcI86QNhE3FlTIt0hjiygLKwtAJBo95I/FGoLHwo4eIJ2QRXTZUdodjMhDgiHckEviUv+2t73tIp4sLBFMFoD8i5VvttSTFwd/6hUEKvJIvb6wYcEpVn7YaPFnx2JxyIcYEGRsk2ULSdbu4QhHhJR46Lpm39G1etdkXZNF7kVc8ovglHv++FIiiB3lU/zy75wdC+DsK8hUenQUckq+yUe4ip1/BKN86Fc6jvqebPL0kZrNA7L6EhGZPn+nPUV/1WWfjpj1O3vlTB7qu2txaHddjpGTjuKMfGabXQQ1PSSkHyTwK059kJ/yYU6rY0dukbDmZjtWtSEYjTsd+eKPffbMGXPXXDCOHniYo0hMNs1FBDGCUp+MMz2+zUP9cU3e/GSXTT7Y1Sft5pu+OGrng752eVUvfmNgvvMtF80bc4q8OIz36V8+5Ic/NtgCMuVPP9iWW/lBwNLvhw8IdPe8azHJlyPSmx05NG76L4d+cPLe9773avdjDGOpn+IWB/mvfOUrV+zG4oUXXrh8uw/pvPzyy1dfjLFf6++VbcMwDMMw3GaMAB4eNPzVv/pXr+/71gTWEL7nWwu0PqpYjziCdYrrjtYx5K0n2FB/6sLNI39Az3nX2s9ztipsazt1qne0TvulX/qla70yDA8iRgAPwzAMwzAMXxNYhCFzEDQWQogYpJbFETIGIqQsnJBedt0huRwt5ByRZfQQSMilyD/ySB4kDhLLgosdZA6Chz/XiDhEETkkHQJLDIgy/wOUf3bIs4tERE4imyzs2LUA1SYGRJd41Gtv8WdxiPxEILEPdBQ6CCf5QJqJxc5OpCCyWiyRleIRA58INfEiwVxH6LpWXLPPLx05U/hTLKQd+UEEygM/cqh/SEKLZLr8KmJgl55xUujQl0sLcr7Foc3iVr7O/6FL19imwz6fJ5EayZsfMRof/l3TFy+7ZIypcWrHcT7ImAvqyPAROa0f5o/5Ie+RiWJW1w5etsjRl09t/Ogre3KFrJZb+mT5NubsGhu6+qgfPVR45JFH3sih2M0b94NrOeCbHT6AbWOAGEVkmlv6F9GKHH7HO97xhn968uhoDopPHOaTPJpn5ikZdtinlw9y+RAzfbbl0r339re//ZIFdcaTfXk1x8VPX9/U09c39eLxw4r05Z6MnNpt+853vvOqi4xm27iwJwY5dY289YMNtt3b7iM7fJHcYjKHX3nllTd2hLPttc/skFNvTOSPDf3UX/2TN2P9sY997JIdhmEYhmG47RgBPDxo+Imf+Ik31hJgfWp92/q+tZXz6hRwbV1i/eC85w10HK0JkqPTdbYCeaV6uo7p3Yyh89C5ZwrWmL/6q7/6O9qH4UHBCOBhGIZhGIbhaw6kqFe3WkhZlFncIWuQQcgjBByCBiGF5EEaIqocLQYRNfTIIOiQRYDkoosEQr4hlBB3CB8++LUYtIhs0WYx2c5e9gGpxGakmHOxIZjEg9Timw0LPuQUXwg2cmwi57Q7t7BMjm8y4rcY5BuQXhGHYuEL2SUW8hHEYqSLPBYDstP/IZYfuUNmIcpca1dHz7U2uRETUo5PZGAkrxzJXwSoMdFXuUSM6TuiMn1tZMWsHcHHPlt0s8W3HLZrmx55sTdmkbDGSx7lgo18R87SNf7stXu38VDnBwPV6ftJIiuRwWQUuZGX9PQlPXk/5Y2LdnaMT30RI1JWH8wDdeyKux2o+mTcEJxs6x+Z5nJEsz5o1wb8GidzVxzss0fPAwXy5pncuJYHc4a+hwxiNpaO8sAXnUhgYy5+10hPY2KuGkN5aGwRtGJXzwZdPh3Zlw8xsocoBrkDNrQ7msdeu+yeMF/kynwSQ/NGLsRL33zgt/Gm53/5Gi9x8GknrxiaF/pgriZPxrxk2w7fD37wg1e9e0jfxOB+/eQnP3kRwcMwDMMwDHcFI4CHBw0//uM/fn23t66wzrF+tnZybn3jCM4jX60d1Lt27vu9c+uFiFdHawDQlr7jaQvYOv2xebO9Y/VkyWUPtFuT+IGqdU1r2tZJw/BWYwTwMAzDMAzD8HWDhQ/yEhGEnLLIA8SahR7CSB05BFILNgSPRR0yFImDXEPkWGgh7ZBgSDN2LcgQYkgzeo6ADEIsIZ6yi0CzaEP8IsSQvxZtjl4Vi2Dy/0PzJTY7D9lA/iliJpdN5FY7G9nmXyEnBj7F6Eg2otBRP+iAhaKFr37ziyRjlw2xOtceoetczlwj1rSzQZcO//Lsmk2EoTxblPKNqOMfcSan9CNryUe2Rdhmn+2bhG6EsX4jDOWTfHbyq090K8WE4HOdfgvncm2cLazFRwZxiZDNHjtiNV8iIukhCe0opuchAz0y9ORFHdmzX2JWZ36Sry/q+DaWEeXZlQvzyJzhj6x6cdEXC39809fndqr28CCSk085kNfmh5jIRc7Wf3ahvojffWFuyhkZY9R8cJ/oq7nIhvj5YBtBqk+O5lRx8skWO0hiYMOcAPmWj+4ZPwJANOs3sCE3+iBP5opXpX/nd37nlVf3l3PxsVEfvvCFL1y5ZNNcRQiTYcf4iVPsckD++eefv2zLj88cP7wQl3v3mWeeueSHYRiGYRjuGkYADw8avALauso6xzrBulbxvV9pXdW5kqzSete57/7VQ0e61kSttdWf9qx17gcyZ5vzSjHxny3njtYzfgT7gQ984Frn+eHpMDwIGAE8DMMwDMMwfN2BhEH6IJIQRxZsFmQWfhZtCCkEFkIH0WdHH7mIQ3LIXQQQ4soCDPFjoYj4s+hCJiGQkEHqLMYef/zxa5dtsFBDdrLBvhiQS+xaqNFlC/nGPvILMYeo47MFJKJJG5KJH7JsKfrDD7hGkCHE6CK3kGD6KgfaIogRjgp9RXxi038y6vRFHlzrh2vt2pBebGt3Tde1drZOAlcRv4JQU4yR/CHN6IhFfGKOGNVuTPjQHgEbMSfvxo999fKCCDz9kpVPc0HOxdC4RrbSLZ52HGePD0SlvPPJlpja4UsuEhmRSS575pGYk6PPN5/5q+/iMR8jxclpl1t5FZMjOf2gr00RD/LVPEnG+POnP/JKXpuxtCuVPfMJSWwO6qd8RAKLQV/0iY4+RgKbb+XW3FIPxZF/7WzwZc7xgRSOBPYjBrn1Awl66vjhk0wkMTl5yYbxJScG/dImp90zcq5dm6PXa3tDgDGTa/bIs2WM+OLHTmDjIW/uJXmWB7r+l7Cx13+x6hsyWYx8evX2Zz/72euzZBiGYRiG4a5iBPDwIMEPSn/gB37gWrMEayNrW2sPxVoIOu/a0frCekZxbe3lmC6Qgeo7t0awpoCO6aUDZIO2ZKtnRznl9MGayLrJusYaxJpqGN5qjAAehmEYhmEYvmFAAFkMISctphCTFn8tyCz8IraQm0ggO4ARvUgiiyqElQUfEpKMRR8dNiLuyCCGEEcINe2ISYQYgqhFnIWedoRTi05kLT2klXZ2ySCbEGeKxRwysHZEn0WfxR474o301b8Wpog7Mu1U1H8xi4lvfhFgyCy6ZOVMvXbXkce1u3beYtN1JJ84Iv3YYCt913KFdIvQpU9PG6ItwhUhLnb2jIE2BYEpD8ZLfyI21YlHP5PNJp/8ZUde+TDWrsVnPOScvWQinvPBToSlmNWxIxYy+nwSvWLNh+KaH4SsI0La7tVyJQ5zRg4ixc0JftRB8cgxe8bY/6R97LHHrjqy6puPxlsOxc4fm9oRpkjNSNxIYASwuSo+Y1zs7IH+RhKbH+nzKw90yLLFp2vt+tePHcpZfZJTRGu7eNnNBj/a5QXZygabbBhvPsRH35w2n1y7X/wvY7LmmfGUC303R/iWb/0WHx22ySHFja++iUnOnMuXX9rr63PPPXfdi+5znxfPPvvsFdMwDMMwDMNdxwjg4UHCX/pLf+lac/k+H6wjrDWtO+AkY6E1d+fWFNZsYA0KdNU5Wj9lqyPwaQ1CP38Vdh35yZdrNvlwzq4259Yr4Lp40rPO9UzgE5/4xHU9DG8lRgAPwzAMwzAMbwkQYHatIgstwCyiEFMWVMgji7PIYKSoNgszJJTFFYIHYYQYRFAikyzqkkHWRQIhkNhE1rGn3sLT/wtFWrVY4xOph5Cy0LOYQ0CJDVHFtoUfG+K3k5HvF1988SKr6LWDkRz5iNEIYQQXWcQVu+STRY4hCC0Y6Tmqc5Qj8Tl37PzNrk99513LORLN9Unoyq+Y5AbJRyZiW7siduOl7/rNHht0Fe3Go//Hqs640EEmiks8fCBVI54jUeWPvlf/nv6MMzKxGNW1e5gfdelZfKsrt8ZXrOmrM6/0j99s8uXa2OkfktM5X+f/NlbHnjEkT5cP9tjWX/aNp4cZcqRf5gxdsSFUtcuBXLGFBOarePgoz+ZHBK44kKVsuFcQpdr56CGFvsgF8ME3GXHdz4Y4xSM+13IZsQ7iMH7uE31GvmqPBOaTLF33HdvNNfeIuesNAL1C2viLQb/dl8ZSbO415Dk7fiTSa7H1wTyROz/6MB/ouqcQvux7ZfRnPvOZK4fDMAzDMAzDb2ME8PAg4U//6T/9xo9yI1etL05CFqw7nFujdLSOUV9bpKt1jnUCGXANZJJPh0w6rk+5cMpDsurSD8mc0N661/OGYXgrMQJ4GIZhGIZheEuBNPMKXKQS0sjizYLJIhAZhuix6NIGyCwkEHII+YNwQkRZOCLfEH3IKkQQG8gmCzP6SDSLNmSVBSOSDNHkyO7nP//5S69Fn0UbneKx6GRLPAg/5JU6RCYC65FHHrkIO0QaYjXiGTlHn2+658IRyaoPdkmyFYmIBHTtXEGE8a1dQfLpg/xokyu5S9d519qdk0HYurboZkP/EH5INf3UjhDMnr5ol9NIY7mmT4Z9fUcmao8ApNduUnViMhbsGCP5YZ8dNuS91z2Liz7iz9i41mf68su/OnklwwcCtT7XN3HRL1Y29CVCNz/G0vxAdEYsa9cPOtr4jojWF3kH/ZY3vsRvfOWUvjFGkBpjdvgyptrMN+0RoHJAzxhrN7+1y6H5Zf7QN+b6KAa5FHMEr3YgI2Y5BjYicNWJ1YMXu/HFL4eO5qWcArtsyKuYxCn32ht/toyt3LoXkbfAnvjExI5XPctBr7qWd1BHxpjItZjYZ1O+QNzq6Pq/Wnz5f92O+uWX9Yhm4zMMwzAMwzD8TowAHh4k/Nk/+2ev7/e+y1sXtRa21rHmCNYtrbvJdQRrFm3WROpcW/+wUXv657rbdTarc8xuUFc7kA/sdc0WXceuK9bT1nj+F7B10TC8VRgBPAzDMAzDMDwwQDIhZF955ZU3FmbIIkQsssfikAziKdKKvF2Gri3AEIkWXYg68hZeFl3INa+hJW+BiPyy8ESetYhEuiGXEYTsIGVbNLY4RHSJiW4LVQQgAoofxBi//CD/LHC1kWtxiURFrCGuxYLgYosPJGr+FP0gLwfaHC2UkXN0XCuutWtzLSbX6hXXyilLP5tit4iOaJUX7S3KtemzvsulfCME2RSjfle0Iy0jUiNekbHsIPvkFzmIVOWfD7KIVbLGLaKXPbI3iVu6fLCpXnyIVPEjY+lHRosXwWjuNB+MjdzSjygXRzGwoX9ipVv/yRpbRKrxY4eeNrb5M3bGXL06OWBHvs0Dsohn7ewpHhTQpWdu+EGB+OiKyTxhl09FPsC9EQmsXyB+46y//JnLxoR9ebEjt3zwF0lsvsspOb7F6ZoNsvzz4Vq+nMufHdN25CZrNy9/xkJfzBV9UIybvhgzPoxt/WRP3/iRN/r8ypNf0OuHsfz4xz9+kcLDMAzDMAzDm2ME8PAg4Ud+5Eeu7/2+74Pv+NabrZ2sRawlwLV66KgukLNWsUazxoLW7kD2JGuzd8pA/sgm0xG6VoLzbNPPRvHTEZM12qc+9amrbRjeCpjFv3uf+jAMwzAMwzA8IEAaIgMRbhGSCKwWehZf6izAEFLIIiSRNucWl+1ORYghK/3PUKRjpJt6xBTyyrliYUrm3/27f3ftbIyAZhO5pR34RRK6Rg5r56OFHxvia7eya+SwNgtEuo4tIsVJTgzIaPItNmtvwRyBi4AD7erkJnsKog208YVoU97+9rdfci1anXfNR9fyJD92cibLp0JO3skgAdPVFhmKoFcnLsSf8XR9Fjb4iCQmyybS1P96TYY+YtZ1/ivkzzb9brdrdR4SsInsV3f6IYeAdCSr8KcPSEttSFt2nSMizRk+xcYGohP5ao4Yd/OLHH/aja0fMZgv/TgAAcqnc+PpxwnGzTxCoka202e3hxZk7IrVR/NTXtlSz4axQxK7d8QUCctG8848EyMb7hP95c84ioUf84YNttnQd+SrcUH6NrfEC/LBn342f+UaxObcPWx+v+9977vsuSfEZq6w3TyWezE1dz796U9fdoZhGIZhGIbfGx/60IfuffjDH379ahgeXPzMz/zMvR/6oR+61lBfLaw3P/KRj9z7R//oH71eMwzDiRHAwzAMwzAMw0MDRNXb3va2i4xCKiGXEK7IIkckFmINeYWwQ/S+973vvQhhRBbSKRK2/1+KYEI82R1s5zEZ7ewkj1xD9CGfLEgff/zxi9yKuEV28U9PLBHIyDGkWDFFuJF59dVXr77QQYSR48/OTOQcu+SRcEgz+tA1Ek2f2BNTpKAS2ADkJDIze2whMR35iggln20EMfKQjOvzSBdpipCzS1Ud+8k4P8lYJTI44pUNec9Geo7iJRsZrLQ7FAntmr4jG9oQh+JXzxe7J5lMRh19PtTR5QeRSY+Ma2Rs50jtCOLI38jq5oXd3MhR/UAwm4dIVmOjaEf20kPyGmckqzZzMPJV3OwYE/OzuURXPf/0tUe8Gm8ksDnIhljZjkiWEzmPiGZDv8Rn7hlv9sSkjk+51DcxkAFy5ii/kbYR2eTkRd5cy4k+qaen/2Jxj7huPOW2H03Qcx/4P8F8f+lLX7rypfzSL/3SFcMwDMMwDMPw+8MI4OFhgX/t8uSTT15ria8W1iT+Pcz3fd/3vV4zDMOJEcDDMAzDMAzDQwmvU7IbMfLQNQISAYfMQoCpR4ghvRBO6hFu/t8wYhcRxgZiy+IRgYZIQ+4hVRFViCkkmGtySC1kmoUpwoodBJb6SFjkF1nknTgctQGCjS11dOgi4MSEiFSPBOaXDW1IPHaRZ9lxdI1AY1PfQZ2C4ER2QkRwR3D+ZkXekJH6zy77iDg5RA5G9FYiWeW6XcBdK+3gVSJj5buxywYf2VBnLI2Fko3qELKRz+o7ipttcbKJmET8mgOuK2TsmO2a38hf8saQfW2R14hYRyQs+2wgTc0hO3X1i54fHzhqM/7yZ5zVFz9CV70fJ7z73e++5NnhC7FqjiBbjaU5YW6ad+Kk2/yJ8PewxNwVI/tIVDJsGFMksDmLBNbv4jPHQP7Eiqx1LxVjBC8byZjz+kKffb7FrB3MTefuN/fIBz/4wStXSGD5FJddvnyzK2/uMf3z/3zlSPy/8Ru/cdkbhmEYhmEY/mBAAH/xi1+89xP/40/c+yP//R95vXYYHhy0BrX28sry3y+84tyPq62LWhsNw13Hf/k//8u9f/6//PMRwMMwDMMwDMPDD+SY3aZIOKQaggrRhIBCJvV6XWQWQhDRhmBDPCG6kJLaEJBsaVOPDLMrGDmFeENQOVfIR2Qh1rRZcCK/1D/77LNvELiRwIA0Ex995J62SDgLX/G7/uQnP3ntSnaOEA7aFb7I88eGa6SbI5+AEM0myIl21+pdg2u6yEeksTZFncJOxK1zYEe+5DYZOVPajauuNr7Ia28X8EnwRgaLAQGLBKajriIGRGJttRsbsfWK6tOX+Iy50njzT6Y6pK7dzurExL9iB3HksXqkM5LV/DGfvEKbb+fIUvMgctVcdETwGiNzxhzVP3bMLWQsctU8ePrppy87fLGJADVHThLYHPMDBjHxF0lsLMDOdDb5lifnbGg3Huy4JyKS9cVcRN7CKaOP4ucHea0fcktG/h0RxWzotz4hePVRTPIlVjpsyq+5jxB+//vff/XPjx6Mn5w5Fy/4/77DMAzDMAzDHx4I4Pd/9/vv/c//+/9877X/67XXa4fhAQJ26rfKs/+fZ//ABPB7/7/v/e2tjuN/h+HCd/6/v/Pe//TH/qcRwMMwDMMwDMPtAtIJYYVUQuYi9exWBKQgcgvhifxDoCGyEGIIM/IIQSQdwg7hSB4RRgbxpl0bYrbdv4gtJBgiFvmljp7due3qbcejejJsgFi0RyhHECMNxcYmWUQ0Iu/RRx+95Np97DyCzxHZBl0rgIgL6lyfevqq3/Llmh05QhQiZ8kh6+hV5EL7m70qmk3titddsx3he+4k5keb/J67cpViUHp9s3M22m2cnQodhKS4nCtiNY7tJGYb8dvOYHXmRzE4siFO+hHAyFJzTCz1D5lp/B2NL79ssY/UNIaK8XKNWEb2skMnErhdvPrFF1lzKZKXjV4nfRK87QQ2Rv2/an3gg77r5pOjeWiuI3cRu8hnfhG4IBZ2yZiP5ihSWd/LcXLNZ69uZsOPFeipb46SkytjIN92+SKYjZtz+XWvvPjii5fdYRiGYRiG4WsDBPAP/uAP3vvRl3/09ZpheMBwEMD+tc3vF88///wI4GG4D/7Xx/7Xe54O/cPfvhyGYRiGYRiGhx/tsES4fuELX3hjFyOyF4mHdELa2SWrDUGGQEOMkUN8IfOQUog2dshGptn1qA3B5f+UIuIgIpI+Igwhx0719BBcbLBLBiHHFgINOYZ0RJaBo3aFrUjJCDikMIIN2YYYRLzptzjFhHx01EfnCNPqHPNHXjxsRETLVa+elhd2ydKhj7hUTweZiNzTj3Kvb9kMdPSJbaQgHeORTblHfKpzzYacsdEuaoQnu/rOBp/ZibBFLJLnp7FXp2/GjD9ybOtrJK48kKs/5BQyEbl2E/PtWjwIUYRm86PXQJ++1cm9MTRuCFzzwnU+5UVM5JHD5mM/WiiXfJpHYlP8IEH/e4U06J86c0x++KAv92cMyGn+5A1ZzZ95ZEzJ82MsxWMXOn/6TVcexCZ2MvppbvqBgn5oExMiWu74kSP+5ZlN8XhNm+L/+7qPkNTDMAzDMAzD1xZ2VPpB4//2//vfXq8ZhgcTH3r0Q9e6x1rEOvSrPVpHfPgLHx4BPAw38CPf9CMjgIdhGIZhGIbbDYQcUsv/vkKOIdIQXcgobUgzQNohyJBj7ahERCLoyCIMEV6Rf4gvxBbizA5SpBtSzLn2iDSkGUIM2cYGu+rpIRsRYnZ+8ssPWMgC0k1ciEYkHKKRPiA0I5jViZMcXa8mdi0eba75iVxusawt8lG/kKj6HBmoH/yIUxyRwvpGB6knh871zU7RfET4pn+SteTZECMffImF33ZE0yErL3KNWIzgVCJtI5rFxg6Ckl12xHwSxdlC5LpWIsGRz66NceMujnzoi3Ekp1784lXntcfiFRNd/rSBI3l+9LFdzYjg4kUSi9ccsiPWK6qdkwf+ybJNX4z82w0uJx56iJd/Y1uOzA99srvWr+kbazLqySBvzVHxA5/O5Ykv13LrBwbIaXHaKWxHN33X+mZesO1oPMSk7+aIX+U/8cQT13jKlx9m/Mqv/Mr144phGIZhGIbh64cRwMPDAgSw9YP1ROvVr+ZonTsCeBh+N0YAD8MwDMMwDHcKCCrEFDILwWXBiPhEjCHUAJkVAYnI8gpcBJl2OxrtvERcIcMcI3zZRuohHJFySEGEHD/akXcWqWwpfCP16CHGEH7t8vR6YbuFI2QjAqGFrjoEp1iRcGwhSRGykYzayLAvpghcbV7Za7FMjz1koj6WC4VvBJ5jZC0b8qCfjnwD8jniVT2CkV9Fv04iNbJWbsUsTjpkkbQKn+loK5ftQKWjsG1MkfH6Ko/s6GOEPTti41dhJ8LatZjkoj4qdPghIxeIUuPLbrHQQ9byJ0/Gkl1t6pyz6yg2fdJv46PdXDKOctd8IaOP5LWD/omFLtv05VecSF5jSlas+uuaffEY23b5ioMNfRO7NoSsvvFhTEE8fjRgnjYWQM+8F7Pdu+apmPmKQHft3nFuh7o28Tp/6aWXLuLXjvdhGIZhGIbh6w8E8P/wx/6Hey/9Xy/d+z/+///H67XD8OABAexHxNYorXm/mqO12gjgYfid8D+A3//fvf+6JfY/gIdhGIZhGIY7DQtNu0KRh87tvEV6IfcQWa4tMJGRSEskoIJQQ5A5R5jZaYmsJK8gwpBlFqXIQ0dEIeJSm2NEKCJPXaSk81//9V+/jq7ZQ/iRZxMRiLBT55qe8xbCrpF8yDc62lyLtXb1rkE7RFCTBf1HpCL+oP9FjPzjS8kmuCaPiKajXrs4wDmSEikZEa4NcajIuWvEIRvy5X8Qkyu2ijFCPBZHBKTCfvVsqeO3/0Wsjm3EK1viUkcGIUqvuBDxCGZHpKpdFOSMgfkSMRp5i4xFqJoLEcLa+UOuyp9dtPrFf/GxY1z6n7/mlDlHn11xmSvgxwd0yZOhW4xsq9cv/TDP/JpeMTZsGW+kcLn0v5HpPf7449c8de71zwhs5C3b5OTEmNLnS4zi9aOKD37wg5cectiOY/eD1zt//OMfv2IehmEYhmEYvrHwP4C9Cekn/sefuPdH/vs/8nrtMDw4sLawZrFOs86yDmptCb/XtTcoWXO0Th6G4d69//J//pd7//x/+ecjgIdhGIZhGIbhJryuFhmMyIv8Rd7ZRauojwxDgkX4InGRZRae5NiIaKVPDxmHKCQbEUcegYcktvPTeb7ok2EfydYOY3IIPKCPsEUUgwUxn67FrpzXYqfDrvOzna5+gfauHcVCFjGLSESyqlfntb5IRNeRt0jN6vKTf/pKBG6ErGLxX37Tc0S2p+Na4UPxsCCd9CryjYCtnY0IftfIXzbUiVedeIwtklVfEL/nq6PJGz9jbCyRpuIihzTtxwLGi14kLzlzAGkqn3ZlIHgRxgjeSGBEqjESD+KWnh3K8mPXrvE+SWAy4hEDe+bGOf7G0vxjQ78Q0eYUOf3VJ7uMjRViWjxiePrpp98ggeVLnX7wVx6Q4HLLP5kvf/nL91577bXrBwzDMAzDMAzDWwcE8Ic//OHXr4bhwcWzzz77xvoyWA/9XtfWHu9973tfrxmG4cReAT0MwzAMwzAMN4D8Qq75JTHiDmGHcLPARLYh4JBsSFMkGoIQIYsMIxshhhxECCp2zCLRkG3IOGQjwpcdNtq9aUGLuEP4QQSeglTk27l4kHDIuYhNutqcq1Ncd65ks2t96Fx9JK9SLOe12Pl21O/IU3bEIiY2XMsFshVRqG/ITSSkNoVNpCEdNoFdJCeyl135kU/tYgG5kQuy8o5k5SOC2O7cU0YxBsYVycmOndW93lgMSFrEenV86kd27ObVxjZ5/7dX//RJXpDAyFQkLjti1w++2ZEbNl2zYWewo5jaXc2GvKiXJ3GwT0cb0lY9e/rjXA6Nndh6Bblcick1W87NCTLmc/1EQvuxg3b9S8Y8lVfjog98y69+2yHMn3q5ZB8pLB9i+tznPnfv53/+56+6YRiGYRiG4a2FHxv++3//71+/GoYHFz/5kz95rVMieZXWoG923Rrln/2zf/a6lWEYTowAHoZhGIZhGIb/ChCQflXs1VJIPIvOgJhDnEVSIusQcAg0BB8SGdHY658RiWxZqEb0ac8uQs+10s5TJGCELhmkcsQj0LFQ5oNPu07J+vV0MSGY2aSHPBS3OkSoa0dtXWt37dgCPP/O1XfuSK9FuHhOHUfXdJyfJPFNnWJRkJjJyJ2+qRN3hC8yE2GJeGRXfunILXsRtewhNOWJnoLANG7a5ZStCFfEpjZx0tPWLmA5ZV+e6fIXSYv0bT6os9u28aOH7K0fbLGvHbFr56x4jWPtdLSzx34ks37qX/XAhzgR8voD8ia3kcD6LUa58upmuWSPT7bkkpxxY5eeOOgZZ33zWmdzkzxd5DWbfrH/i7/4i1f7MAzDMAzD8GBgBPDwsMDawprIGsa66aspftD6j//xP77ePjQMw+/GXgE9DMMwDMMwDH9AvPOd73zj1cIIOWQbchCJhpBDxCHktNsBjDRUh3hD1iEA/U8uxJpCVhsg4BCdFrYINoSdxbA6hby25PmNPLUARviSc+3V0RDJWoyukY1dg3Z1CD+EoNcx10Y+tLNX37KhTqx06FciFWu3sFenTX1FzhCtCpliULQXrzyp67XPcskuUlee1OW7c/Lskus1z67tTO6V0hHDSFevQeZHzGTsEGbjJHYRwGTYMTaIVeNrNzeCtF2xjVM/BDBntCN8yXtwIX9217KBgKan3TiWd2ODxEYMiyt/5lX5ZMfcQBKLlQzimIw+k0PgkmOLbznjzw8WxEY2srh5LC/mkzo2Ir7J7n/8DsMwDMMwPJjYK6CHYRjuLrYDeBiGYRiGYRj+gECuIVvtDlbsFgWEIcIvcjYSUjvizi5JbeQQvY6I2nZrpoNgQ7q1kxeRh0BE2rlmE0GHrFOcK3TsKmUjok8dfSQkUpE+so9PRGNkqYLgUxCPiEHknxiRlvQBEduuYX3gJwJSHxCFCh3xs6WwyyZ7EbD6FHkO7Igf0RoZrQAbkdjJuJZPRKU8RAhHspIRo3jlgk9jh6BFliJ2I2p7hbNxUid2MZC3e1d/2UTy8nP6IuuoDgHLXjt+1euD/ImzXGjXF77MD21e8+3HAdrZlH8y51iLUbu4yOqTazKR0mToGlNkuXbzgExktpjEhhQm5wcJTz311FWvn+LUxg+y148ejJfXo3/605++9/nPf/4al2EYhmEYhuHBw3YAD8Mw3F2MAB6GYRiGYRiGrxG8tgoh7DXPSECkI7INQegaEIsIWCQesg95hzBFxvq/qQi6SDoEJSIPQReJiNSjgxhEmjp+4QtfuOwh/pB4jshEBB47yLyT3EXqicG1Nnb4ZBtBqdBNh4xCVwxKrwdGItJnkwx/+QHEJVuIzmSK07k6hU9H8vnST/2WR/Eq+oXwzX6xyLFjRDI9YFOM2uSSnB22+mtM7OTVLtfGQ7t+ITnbwSs2NiOckcaNC1KUvfzop3oEq/jpsCNmNkGO2NQHuemVzq7lRhzGgk86bCJ4yZQ/9hH6bLFhHuhj490OYYStOv2Vc/H2Cmf1EcXiVGd39IsvvnjFp6/8I5HJGIfnn3/+muPIX7rDMAzDMAzDg4sRwMMwDHcXI4CHYRiGYRiG4esAZBqiDDlnxy5CDoGHvEOcIensSEW8If4Qc4g/RB9d7Y7IRmQcghdRSBdpibxkS0E0RtKxgezjF/mHfBQHsg8BeB7pRtY6Zs81W5HCyEUxsEkPSYhodU1WO7LSubjFSs65ot9kHBW7dRG5johaMu1k7VqxgzbyVkFGkongZUOMjshJ/pNBuCajD8hj/vhnM19sgb7Ib3YA8WqMyCN5I+6NAT/1W26yVXx0yLr2wwA7i5PVZvzkUD8jiZHP0NggeMUitkhgoKPdnNJ/Y29M2Ab9RVwb03ZHmwNeQy4GP1AgLz629IMP9tSx7SjuiGuvh7bL/dd+7deueIdhGIZhGIYHHyOAh2EY7i5GAA/DMAzDMAzD1xFItXZjItBeffXV6xwQbYhARB0CMeIO6asNcYcURASDI0Kz1y5rV8ceojKClj022HZtJyffbPpftgg9bUi+5CMdO0c8OtaOOBUfglkdIrGdqkhE12TS8X+AkZ/iQOSS1U7Wa4bFjAAFdXy0k5gtfaQbwSsehR1AaLYDGTkqf8nwV5+RowjzCF/2k0H4IjgV9fyTkduIz/pgDNmJqCbTubwjUUEd3/oQCawNOasfxaPPkcBARp8R3SAf2umR1W82TxLYLmDjr97YyRXwa1z4MS/MIf0SJyLa+PjBgf/9Kz6vc/ZwkE3/L/qJJ564bJBng49nnnnm0hmGYRiGYRgeHowAHoZhuLv4b36r/PbP24dhGIZhGIZheEuAwHzssccuMhHxF+Hp/8wiISMgySEMEZvq7ERFHCL+EHZ2jCIKX3nllYvwAyRxr05GWiIZ1cFnPvOZi6jV7vXTiESIFHUtDrbo1Y4sVUdGvGS0sY9QFJdXWoO2ZBCMyMdI7+zQQWYq4okcJRsh7JpdeXjkkUcufXqIYNfFkg67fJJnly7Cs3j4UCJk/Y/c9JGwyGV2XSNXycg/eW1205JD+PaqaOOVLqIVgSrvCNRv//Zvv2JxLt8IaDEbC+Nu/OggeO0YR9zqBxkkMFv8s29nrnjJiVnRN7GQsUOXLj2krdzpq3ypc61P8kJersTz5JNPXgSw/1EtT3KGrEcQD8MwDMMwDA8fPvShD9378Ic//PrVMNxNPPXUU/d++qd/+lp3hX/7b//tvX/yT/7J61fDcDuxHcDDMAzDMAzD8BYD0faVr3zlIm6RhAhEBGA7RRF1CDskLKIOEIGIXzs9kXbakZORegoS0C5XO0DZQgCSc0QCIpMRi0hOpCQikF92kcm1kY8c7Zqd6rLpPGJVH8QoJiQkghlBHBFMhzwb4ozI7LXH2pNxTYYemXYKa0N4q3ct/vwDf+qQqAhz9uSn12zrn/+Hq+/y045eOYi4JaNO7rUjUPlqR7PY1IF+IuT5F2M7ehHEEfTikRty4tCfxgKxa7wQ3oh/strFLEZzBNHLt3Z+2efPvDDmdph7vTPb5pUd3/RBn4yT+WCeiYe+fqjXX33UJ/UvvfTSNbeGYRiGYRiGhxPbATzcdUT+Whv95E/+5L1/+S//5bVu++Ef/uHrB7yf/vSnX5cchtuHEcDDMAzDMAzD8AABEYl0tLMVGYeUQ1wi+ZB0iDukINIOUYvMQ9hFFiIHkX9IRjKIR8QfohiJ6Ki8/PLLlz3kqIJ0VOggF9liv9cws+sc0YiwRCxaOIuNnbOoEydytZ3DyNjI4ohf5476pg/Oxeb8tBUJnIxYQJ/ZVeoDW2TIAv/pAllyARHLDpxEarbokQFyxibiHAFLJpJY7v7zf/7P13l2+I9Y1Y8IZHXI3ghcfdUm19r55Sfi3jizJa/6Ly8R0mQ8vDAuZNSzqa/IZ/kQjzY+yUdQG0NzhT6SXl/sIP7Yxz52yQ3DMAzDMAwPL0YAD3cd3rT1Xd/1XdfbryJ7v/zlL997+umn7/3JP/kn7/3CL/zCVTcMtxEjgIdhGIZhGIbhAUW7ge0MtRMTMYv4s6sWSegc2VsdgtduT6RiQKgiD//Tf/pPly3EIx0kZUQiYhcZiLz0y+gIynbLKu0uRowiQtlEVka8tnPYK43FS8drj5HPiEXkp9dCIywRjPyqqyAv9Ve9+F2f7RHFgJgkIwZ+5MVuVhCbvijFhkylqzhHjFYnFkQt5COSlow6JGkEL9BBCjuSJUem//2rTb/lSz7FSw4xq3/a6bj+5m/+5qudH31G9hoXcSBjyeqrdnnQ3ljru92+5NmiD+Tk0nioF6dXa7OnD/riNc9ses01O14rbn589KMfvV4tPQzDMAzDMDz8GAE83HVYK733ve+91osn2fsrv/IrI3+HW48RwMMwDMMwDMPwkACZaCcnohZh9/nPf/4iGZG1CE9wjTxEFEZYIgyRoUhBZK/FL7IPaRmR69oRGdj/pXVETtod6pofNrMLXVeHgCWLVEV2IhktuvmNzOUfQdqOZO186YO4/X9bsSAuyWfHuWPFdaSu+BGlcqS/dj5H5EZQt1O4WOmyk5xzdfxG+FYn5khjiHAmazetNnb0QRz8Gys7bvktHmQ1ohaxyzZd8Ymfbf03hnKjL/ooH2KhS6ZxRDj7YYD/R8yvsXv00UcvGbLq2OCvHBrDiGa5MI8i0l944YXL9jAMwzAMw3A7MAJ4uOuwXrO2fP/733/vx3/8x+/9lb/yV65dwQjgYbjtGAE8DMMwDMMwDA8xELTIPq+x8j+EEX7IRKRiZKE6BCKSErGIIEUIWgirQwQiFO0QRV4iS8k7V2/RjJRE0pKPUGXLbl/2kYhIRXYjGM9SvRKpm5xrRzbYYpsf8fGPJEaUIoojhe0mRsIiWfVTHYIzwrbcKBGc8sIGsE+WXnX6hZjmX07VsyXWdgGnd/O1z+0IFidkG+kLEbjaHeXTObvpyK8+sUPmHe94x2VHHujLF1KXTfkyPmCXr9eXwRe/+MWLMAb/M/id73znpaf/bJMVF0SYI4G9Dm3k7zAMwzAMw+3CCOBhuHf90PVnf/Znr///i/z1+mdEsB8t738AD7cZI4CHYRiGYRiG4RYhQhj596UvfekNUtfOTwQnIEIRiIhBQHiSRwQjPdXbtaogDO0ARhqziwCNYEVOIoEVhCaylq1eJY3YRKgiOZGLZPLLnjgQmuwA/+TJIF0RlY5d850/1/TEyy+S1DlyVJ+1KWKt0NcfMs4jje3OFYs8Ka4Rushldskp6hCxXSNUI9oji/sfwM7JIHPpBTlhg93ypi8IXDpiF5/xkn+ksF2+SF25IctvY+haruXUNbte4fxt3/Ztl7/Pfe5z95588skr3y+99NK97/3e773Ggk11/+bf/Jurn8MwDMMwDMPtwwjgYfidsPM3Ivjd7373tcb1Y+phuI0YATwMwzAMwzAMtxjIPmQhYtHC1mujEZx2xSIvEaDIRuQjWUQighL56P8GI2UjGhGW2uipR04iOJGezr3+GcFKl0z2kKN28Z4krXNk67d8y7dcbYhjtviiE6F5lshSbZGwEa1KpC8f7OsnYpU/JCcZxG8EcLpss0s3X9qQruTUk6OnPr12/MqFc8SsNv5PPQSwa3EjZxGyYPe0XOm//5NMHykrx/63r4cRyN1e5YwA1v7qq69eDyzEKef6Kn5jaqzZMyZ2/fo/zHb4kjNGX/jCF65xNR9+7dd+7YpjGIZhGIZhuJ0YATzcdfzdv/t37/3tv/23rx/AWrOFJ5544nqr1LPPPjsCeLi1GAE8DMMwDMMwDHcMiNlzl7Di1cGIRiQhYlE7sjECGCmp2M2LcOyV0QhMxDCCkizysZ2rzpGmSEfEJHLYDmXEJLISukbuIikRx+Lggz3EqGtELqK6/1tMTwyIXeQnojXCFQkbYV29OMhXpy8Rudr9H11kLr9ip99uXTIK38jc7NND2iJ3QTs/HixEFtPjUy7/2B/7Y9c54pdtEL/86qPjt37rt17H7LL1J/7En7iOvb7Zq8rkgD15yAcZfRSbMUaql1+5E5OjBxxegzYMwzAMwzDcbowAHu46rJO+67u+6/oB7S/8wi9cdU899dS9H/7hH77WSv/0n/7Tq24YbiP+m98qv/20YhiGYRiGYRiG4cAf/aN/9N6jjz56EaLIWEQjUhIhifBF2CIZEb52pyJPkcIITPXqHOkCItLuWPYQu/QRnuRcI3+BfGQwIGFdK0Ebv8p//I//8Y1dzOwhrxGqEbPsiYOOOvaKGfGMLLUTOZ/6pq7XMpNRh3T1amV5QOi6pueavDqvf3b0MAFBG1Gr3Y5k7dq+6Zu+6bKJNOYX4WvXrjjlSD48qNAv9YhddV7HDeKM5H7ttdcuWbuLkfn8GQv/ExrB7LXPkfzGByE/DMMwDMMw3H586EMfuvfhD3/49athuJtA+P70T//0tRYLn/3sZ+/9zM/8zOtXw3A7MQJ4GIZhGIZhGIavGnbAIkHt7kWkIiyRk8hUxK8SwUoWuYnwRMIq5BCk2pGxEbFsIG0Rm3ScR+xaqEfOumYf+I6cRtSSUYf8JOtVx16BrM4rvyKWT0KZnOvIYnXOkb6Ibrt8vboZgasOwYzwReyqQ+J6dZg2u5yV9773vZeMNn1FfCOTHe3SlTuELxK43bl2BCNn9aPd02TJIIDFZecvEhjxK265lpt2/NITq1gc5ZBtr9qWi+eff/7q4zAMwzAMw3A3MAJ4GIbh7mIE8DAMwzAMwzAMfyggeu1a7VxBYEYII1SRvIhZRC9yEoGJqESEkvU6amQwIGqRl64RsEhO55G3vYZaUeeVyo4KwtWuWOf8Zot/5Kpdsghb19qQzGxGCgNCVREb0EPoFu8jjzxy7+WXX75kkLzf/d3ffZ3zjRh2RDwjdxHCSNj3v//9b8jYCYzMVe+1zHYsI38Rx17zjKz1f4IRv/Ikt/rol+vkxOz/EyODEb/yjeDWbyQwwpcdsfD50Y9+9OrHMAzDMAzDcLcwAngYhuHuYv8DeBiGYRiGYRiGPxSQkO1+RVr+5m/+5vX6Ya8ddq4+8tNuYMQkMhXhiljV7hphi4hF9p5kMCBCEb+IULJ29dL1imM6SNrIUiQtkpQ99dr5d44krR65jERW54iMVdhwjLTmK3gtNlJbfBHHztsVrH8IXv9T+GxHfItbDhQ2vM5Z3xG4gEz+5m/+5ovERVTz7ygGfZJTr3dm58UXX7z+jxWfn//85++9733vu4hf5+LVNySwPPAxDMMwDMMw3D3sfwAPwzDcXWwH8DAMwzAMwzAMbwns7EVsIjyRrohYQHwiVNUhdRGoSGakqWuvS45QRRAjXO0iVhCidhirJ4sMZY9tZHGEsl3BCGikKSCE+akeYcx+r4F2zbadu0hdthHJfHm9s3gRuOdroR3ZR8Da3RsJXmFbO5LX7t52BCNuxWoXsDokrh3FdvyKCUksJv2WI+fIcfEAXTuFP/OZz1zXwzAMwzAMw93EdgAPwzDcXYwAHoZhGIZhGIbhgQWSGLHb/8VF/toRi3hFeKpH8iKEvXK5ncCIXvWI317FTBa5i+TV5shGu3TVOUfIIlWfeOKJyxZfyFfw/4/ZtrOZL23vec97Lnk7npG86j7wgQ9chDXfiGM7d9lGCovFTmGkMCLYNcLWDg2krv4hge12fvzxxy9y98tf/vK97/u+77tie+21167YvDabHPIYSezV0YpdwMMwDMMwDMMwAngYhuHuYgTwMAzDMAzDMAwPLfr/w44IXUSuAojUCNzqEbV22UYS24Hrtc50kbAI2Uhku30RxuqCa3qKXb8IabLg2g5gpLU6pV3Cdvsih/lT+r/AyGEkMR9IYvrIZq+HRmojdO3+RQbT+9Zv/daL7CWnXjtSGKH87LPPXnEMwzAMwzAMA4wAHoZhuLsYATwMwzAMwzAMw60HshRJbBdwRDF4PbQ6u3MRsUhbBC6CFdmLoEUaI4PVtetYG1l2nJNR2PiWb/mWSwaRi0xG/Pp/vdkjox0p7Byxi8D1ymdEsHME8dvf/vaLlP4P/+E/XLuFvdr51VdfvV6bzZZdyB/72MeufgzDMAzDMAzDTYwAHoZhuLsYATwMwzAMwzAMw/A62lHsVc3IYf9zt/+zi/xF0Dr3quZeT43UtduYHKLYzl2yCpI4wpZNdWwihO0YRhI7IqW/6Zu+6dotjBAmw6YjQhhh7Ege8TsMwzAMwzAMvxdGAA/DMNxdjAAehmEYhmEYhmH4OgMxbJcxIteOYMQx4hfZixT2Sml1dv4il4dhGIZhGIbhD4sRwMMwDHcXI4CHYRiGYRiGYRiGYRiGYRiG4ZZhBPAwDMPdxf/r9eMwDMMwDMMwDMMwDMMwDMMwDMMwDMPwkGME8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIZhGIZhGIZhGIZhuCUYATwMwzAMwzAMwzAMwzAMwzAMwzAMw3BLMAJ4GIZhGIZhGIZhGIZhGIZhGIZhGIbhlmAE8DAMwzAMwzAMwzAMwzAMwzAMwzAMwy3BCOBhGIZhGIZhGIZhGIZhGIZhGIZhGIZbghHAwzAMwzAMwzAMwzAMwzAMwzAMwzAMtwQjgIdhGIZhGIZhGIZhGIZhGIZhGIZhGG4JRgAPwzAMwzAMwzAMwzAMwzAMwzAMwzDcEowAHoZhGIZhGIZhGIbh/27v3n4+u+r6gW+l0zkfOkw7PVPasaUnWjoWpC3FUqhQoAoqUKsBNdWAMWg9RCMXJv4FXnihiV6YkOiNF5hwQ4IRKIkJRDlVC8hBbMuhLRY6nVPV3++9+rwfdr88U1CZgX6/r1eystZea+21v0/jePPmszcAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAAAAsCQEwAAAAAAAAABLQgAMAAAAAADA98wll1wyvfjFL15vwKklAAYAAAAAAOD/bOfOndPu3btH6HvHHXdMr3vd60afuW3btq3tAk625/z/9odPDQEAAAAAgGVw8ODB6aMf/ejaFZwaP/dzPzfddNNN0wte8ILprLPOGsFvQuELLrhg2rFjx/SZz3xmbSdwMqkABgAAAAAA4H8s/0ODl73sZdNtt902vfa1r52uu+666ZprrpnOO++8ac+ePdPevXunM844Y7rqqqumK6+8cu0u4GQTAAMAAAAAAPBd27x587Rv377pVa961XjF85vf/Obprrvums4///zpuc997rR9+/Zp69ato09LCHzhhRd6DTScIgJgAAAAAAAAvmsXX3zx9La3vW26/PLLp4suumi87nnXrl3Tpk2bRvvhH/7h6b/+679Gy/j0008fIfC11167dgJwMgmAAQAAAAAA+K7cfPPN06233jpe9ZzXPKfS97TTTpv++7//ewS+x48fH+Mnn3xyjNMnBN6yZct09dVXr50CnEwCYAAAAAAAAL6js88+e3rNa14zQuAzzzxzPfhNyHvs2LH1/j//8z/XA+AGwtl74MCBtZOAk0kADAAAAAAAwHf01re+dXre85437d69+2mveU7g23ED4eh8A+B8Dxg4+QTAAAAAAAAAPKOf+qmfmq688srxrd+Ev6nsbegbCXl7nZY987m+BjrfAgZOLgEwAAAAAAAAJ3ThhRdOP/mTPznt27dvOv3000eF79GjR0fAGwl3f+iHfmhct1XC31QCx+bNm8d3g4GTSwAMAAAAAADACd19993jm7/Pec5zxnW/89vK3swnAG7Vb6RCOLKvAfDWrVvHHuDk8q8MAAAAAACADb3iFa+Ynv/850+bNm0a4W6C3VQBNwxO8NuAt9W/rfpNUNxxqoZz3/XXXz/2AiePABgAAAAAAIAN3XzzzeO7v5EQN6Fuwt8Ewn3tc0LeaADcuezPOH2C49z3ohe9aOwFTh4BMAAAAAAAABs677zzRuVuAtx897ff/833fBPoNvCthMJpkdc9JwhuGJx+7969Yw04eQTAAAAAAAAAfJuDBw+O6t8Eug1yI31e65wAuOP0DX8z3xC44XCD4h07doxr4OQRAAMAAAAAAPBtrr322mnr1q1j3CA3wW7GCX0b+Oa6AXCqftMW9Z4tW7aszQAniwAYAAAAAACAb3PhhRdOp5122trVt17vnIA3YW5e65xvAc/3NPxN4NvWvenzOmng5BIAAwAAAAAA8G22b9++XuGbPq92TsDbuYS66RMCNwjOWswrgRsaA6eGf20AAAAAAAA8zZ49e6Zt27atB7cNdNMS+kb7BMMJf9Oy3rC4Oje/Fzh5BMAAAAAAAAA8zYEDB0YA3OC21b1p+d5vZNzXPzcEbtjbfR13b6qGgZNLAAwAAAAAAMC6HTt2TLfddtu0e/fucX38+PHx/d75K6DTep2q3ow7v1jlm+B38+bNY/zoo4+OHjh5BMAAAAAAAACsO3jw4HT11VdPW7ZsGVW8CX9b9Ztwt63Vvw1+2yLrGbd6OO3xxx+f7r333rEOnDwCYAAAAAAAAIYEtm95y1umffv2jQC3r2xOCNzgN3O53rRp0/prnnNfdV9kz+mnnz7GDz300PTnf/7nYwycPAJgAAAAAAAAhjvuuGPau3fveuVvwt6ME+jmOi366ueGvw19e8+xY8fGOPdmXfgLp44AGAAAAAAAVtgtt9wy/fVf//X0nve859var//6r6/tYlX86I/+6LR9+/YR5ia8XQyAM46Euhn3lc8Zd70Vwrkn8n3gBx98cPrkJz85roETu+uuu8b/T07/vyUABgAAAAAApve9732j+rPtYx/72HTTTTeNgJjVsHnz5umCCy4Y3+udV/M2AG7Im3FC3fQJgNPP13NPriP70r785S+Pa+DkEwADAAAAAADf5v3vf//or7rqqtGz/N72trdNu3btGuOGuA16G+7mOgFxKoAXq39zTyuDc0/25RvAGWcdODUEwAAAAAAAACsuwe+11147bdmyZQS2rexNn0A3Eu4m1J0HwPNwOAFwQ+H02ZPA+Pjx49N999035oGTTwAMAAAAAAB8m1e84hWj993W1XD++edPe/bsWa/4TT+v8p2Hv2lzCYAT/qZF9rbl/qNHj04f/vCHxxpw8gmAAQAAAACA6VWvetX0nve8Z71deeWVo/+7v/u7tR0ss5e//OXT1q1bR5jbit4EvXmF8+mnnz76eeVvJSxO8NvXQ2c9e9PyTeHsfeSRR9Z2A6eCABgAAAAAAJje9773TXfcccf0rne9awR2Dz300PTud797bZVllpD2sssuG4FtX+mclnE00G0APK/u7d6EwO0TBCc0zp7HH3/c/4gATjEBMAAAAAAAsO7jH//49Jd/+ZfTvn37pj/6oz9am2WZ5dXP+/fvHyFvNNRNVW9fB52W9YS+aQ2AGxhnb771e+zYsbGWADhh8Gc/+9lRSQ6cOgJgAAAAAADgaVKx+aEPfWi8Bvquu+5am2VZvelNb5q2b98+wtyEuvPv/6bqt4FvzIPfaPibvvc3/H3wwQenP/3TP52eeOKJsRc4NQTAAAAAAADAt/n7v//76bHHHhuvhb7lllvWZllGl19++Xj987zqt8Fvq38znrdo9W/D4Oje/N/Oe9/73umLX/ziOBP47qXa/s1vfvPTvsv+J3/yJ2ur31n+hX7rXyUAAAAAAPCs9yu/8ivTn/3Zn61dwYnt2rVrBEvnnHPOqNrNK5wT2G7dunVcp5o3oW4k2G2Vb4LfvPL5yJEjo8I3e1MdvHPnztHff//90+/8zu+M+4BTSwUwAAAAAADAirrxxhunHTt2rFfzJrxN9W9C4FQhzl/53GrfxT66L2Fx5r7yla+MeeDUEwADAAAAAACsqHznecuWLSP0bYg776MhbzX8TdVvWsaReyLrH/jAB8YYOPUEwAAAAAAAACvqwgsvHK95buhbGc8D3blcp0I44W/6BMBp2Z/r9F/60pfWdgOnmgAYAAAAAABgRfWbvfmebyt60xLitrJ3rtW/8wA49zcUzjeEU0181113rd0BnGoCYAAAAAAAgBWV1z8nwG2omyC3Fb+LFcANf9MnMG61b+U68wmBL7/88umFL3zh2gpwKgmAAQAAAAAAVtCOHTumTZs2jXHC21T0JtxNqNu2GP62JehN332R65xx5MiR6YwzzphuuOGGafv27WMNOHUEwAAAAAAAACvo4osvHtW/DX/7SufnPOc564FvrqPj7Dl69Oio8o0GxjknGg6nvexlLxvPAE4tATAAAAAAAMAKSnVuKoDnQW8l2G31b4PhVve2+jda/dtK4N6XvZs3b55uv/32aevWrWMPcGoIgAEAAAAAAFbQWWedNQLghLvzwDcaCs8retNS+bvR/gbADYQTAOfe66+/frrmmmvGHHBqCIABAAAAAABWUILZvLq5QW77BLcZt+o3LcFvQ92GvIv3RdZ6f/bnddLveMc71laBU0EADAAAAAAAsIIS0qaddtppIwhuGJywt695TgjcFt3TELha/dtvAUfD4G3btk2/9mu/Nl122WVrK8DJJAAGAAAAAABYQffff/906NChEdSmUjctGvymr4S+WU/Am/kGvtXwt+fkOvekCjhnvfKVr5xe+tKXjjAYOLkEwAAAAAAAACvob/7mb6Yvf/nLI6BtaJs+FcEZZz59vhM8n08A3EA4MpfwN332dF9fHZ1q4njd6143nX322WMMnDwCYAAAAAAAgBWUgPYv/uIvpkceeWSEuVu3bh1hb8LbBrh93XMrf1sV3MA3reFxw9/TTz99nLN58+Zxf4LkI0eOjLU777xz3A+cPAJgAAAAAACAFfWRj3xkVAGnSreVvpHgNi2Bb8LetJgHwG25J63Bb1rC3obJuSfnJ1C+5pprphtuuGG68sorxznA954AGAAAAAAAYIV96UtfGt8Crga/CW9bAdxgOPOZS/Cb+a4l8E3F75YtW0YQ3FA48z3v6NGjY/x7v/d7KoHhJBIAAwAAAAAArLD9+/eP4Lbhbl7ZnD5StZvrrM0l2G2V77wl9E2f8xISJwxuCJyWubwO+sILL5zuvvvutdOA7yUBMAAAAAAAwIo6cODACIAT0ibobeDbADjjtsravPK3LdetCk4InLkEwGnz+44dOzZt27ZteulLXzrdeOONYw343hEAAwAAAAAArKhbbrllOuuss0YwmyrfhLPpc92q3XkAnArehsPZM/8GcK4r4wbAeTV01nNf9idkPnz48LRnz57p7W9/+3pADHxvCIABAAAAAABW1MUXXzz6Vuemcnfr1q3rIW+C2+PHj49QOH1C3Y2C37aeE+mzntdBp+U6AXPOyL6EwAmH77nnnrEf+N4QAAMAAAAAAKyohLBPPPHECHcjgW2k4reVwK0A7nW/9bsYAC+ah8qp8s3ezqVPGJxzr7nmmukVr3jF6IH/OwEwAAAAAADAivrUpz41PfLII9ORI0fWg9x+Bzh9JKxtAJxxX+ncauC07kurjlMtnBC4wXH0zDw347vvvnt6/etfP23fvn2sA/97AmAAAAAAAIAV9cEPfnB66KGHRtibgDdVua0GTjCbcfqsNRBOoJsW8wC4sjfmwXDC4rxaetu2baPPK6ETCDdoznMuueSSEQQD/zcCYAAAAAAAgBX11a9+dYSvCXTTNwheDHUjQW7m+/3fjcLfznVf17M/lcNpCX8TBKclGM7zEgRn/oUvfOH0yle+ctwD/O8IgAEAAAAAAFZYXrucb/QePXp0BLENcCN9g99epzUAXtT1xdbQuN8DTvi7Y8eOEfpmvdXH+S0///M/P+3du3e9yhj4n/EvBwAAAAAAYIWde+65I4RN+NuQNn1agtm+0jkSBp8o/J3LPdmbVhlnPsFuWp6TwDevhO6zUoGc+d/8zd8cITHwPycABgAAAAAAWFG33377qMRN+JtQtq9pTgjbytxomJuWfRsFwA17058oIO6e6PMS9KYSOGv5HXHgwIHpta997RgD/zMCYAAAAAAAgBV05plnTnfeeeeovs33f/Nq5n6jN+MEtAll0+YVvd8p3P1u9lSeneelCjjPy3MSAqf/6Z/+6enss89e2wl8twTAAAAAAAAAK+juu+8e1bf59m+D2FT+nnbaaWO8c+fOEQgnzG2gOx9vJMFtgtyaB74b3dsK4wbPlVdBZy2vggb+ZwTAAAAAAAAAK+bmm2+eXvCCF6xX3CYAbnCbkDbjVAHn9dDpE+S2RfsGuvOgdyMb3TuX0DnfA54/K1XJF1xwwfQzP/Mza7uA74YAGAAAAAAAYMXcdtttI9xNyBoJYBPmNnxNSyicCuG+njlzrdjNuLo/5hW+i+fN26Ls7beAK78tz3vTm940nX/++WuzwHciAAYAAAAAAFgxF1100Xr1b8LdvPo5fUPbuQTFeUXzRro39/b+uQbHi/LshsmR+xoCpwo4a7kvvy/9O9/5zrWdwHciAAYAAAAAAFghr3/960fQmnA1Uv2bAHge3ja0zVwqgRdfBV2LgW91X9qJ9kTPap/fkorj9JHfmErg8847b/xu4DsTAAMAAAAAAKyQ17zmNSPUTbjagDctGsRG1toSyqbFfE9sFPBmT9tG69Wz2qdiOM9JxXF+U+affPLJccbtt98+7d27d+wDTkwADAAAAAAAsEISoib8zWuWG87OQ9r565z7muaEsTt37hyVwg1rT6T35P6cUz13fn7X589PpXEqjuffJT527Ni0b9++6cYbb1zbBZyIABgAAAAAAGBFXHHFFeP1zw1p+xrohroNXDeSQHbLli3rwfE8tM09bfP5uROdG/N7Mk7QnErgjHNfnplK4JtuumltF3AiAmAAAAAAAIAVcfXVV48K2wbA7SNhawPXjaRaN69m3kjD32iY2z7m49poLnJOKo4TACcI7lyqgH/kR35kuvTSS8ccsDEBMAAAAAAAwIpIeJogt1W8HUdfxxwNZzPXcfYlmM1cKocb+vacRVnLPT1jfk5a1qtrkfnck7B5+/bt62uZP378+PSOd7xjXAMbEwADAAAAAACsiD179qwHsw1g03ccCVxPtJZgNq+C7v2L6/NxQ9/F9Xmge6I9XcsrpxMEN2BOAHzuueeObwQDGxMAAwAAAAAArIhdu3atf/d3UYPZaAi7OM6eVAG3ijfmaz3jRH0sjtN6RszPy+uq883iPDPX+e2Zu/baa8ce4NsJgAEAAAAAAFbE7t271wPgBK0JVRuuNnjtfPvOtQq3a13vnsVx+8VWuf+ZZG9+W14DnWdnnHb06NHpl3/5l9d2AYsEwAAAAAAAACvgzDPPHNWzTz755Lh+pgC2YW33dNzrBLENkueh7qJUCc+/F9z7Y/4a6vkZHXd/npUq4I6PHTs2XgF9xx13jH3A0wmAAQAAAAAAVkC+/5vv957oFdCR8LUB7LxPSwDb1z4niO16dH1RQ9za6Hou1z23e3OdbwHnmbluoPzyl7982r9//9gLfIsAGAAAAAAAYAVs3rx52rRp09rVU6FtA93IdVsC1vlcdT5BcqqJ5/s6nuu98z770ncuOt7ojMzld6cKONW/2XvkyJHpsssumw4ePLi2CygBMAAAAAAAwAq49dZbx+uf569ubiVvA9h5n+C1gWyD4nyLN3M5I/cmCJ7vi47n84vrfebic/r8+f7Ic1MFnCC4v+Xw4cPTbbfdNsbAtwiAAQAAAAAAVsALXvCCEQAnQE1rEBvzQLZtbjGQzf3ZkxA4GuK2dS6yr+O5hLp9znxvzefSEv5u3bp1zOX5qQa+6KKLphtuuGHMAU8RAAMAAAAAAKyAXbt2TcePHx/Vuw2Bo0Frw9x5VXDnNgpmI+u97rnVe76bPmf0OjrXcVteA53guJXMCYHf+c53rv9OQAAMAAAAAACwElI924A2fUPVBr5zXUsIm8C11bpp88rd7kl/onO6vrg2lzMb+FbP633p893h7du3j+vI35G5u+66a20GEAADAAAAAAAsuYSn/X7uPJCdh64NhzcKbDcKgRf1vHmLeZ/Wczsf82fFRs/pdYLshL65v6+0/rEf+7G1XYAAGAAAAAAAYMk1wI3FELZB7DwAjoawkXvn1+0j4/kZPbPjmL8euvu7FvNxzJ+zuJYAOK+CznwC4CNHjkznnHPOmAMEwAAAAAAAACtlHt5GwtmGrB03vM3etFynbxCc1zN3T6pxu2/esrfnzgPoyp7qPR3P+5rP55l5fgLgtMOHD0/vete7xjqsOgEwAAAAAADAimggO+/TWp3b667HPHhtkJtxWvZ1rm2jb/fOx/OzF3Vv9y/u7XUC4NNOO22M89uPHTs2XXHFFdOtt9465mCVCYABAAAAAACW3Pbt20efALWt1xs50fo8nE2f9fTzUDjjri+26D29jszN59M6V71OS/ibEDjPSgCcdujQoemXfumXph07dqzdAatJAAwAAAAAALDk3vCGN6yHp/OAdh6qpjWU7VqC1bwWOjrOfMz3btq0afQNY9N6ZnSu5s+KjOftu7Fly5axt2fl/ITCd91119oOWE0CYAAAAAAAgCX3/Oc/fz2UjYbAraCtxfC1AWvNz5j3ee3z3Pyc+f2Znz87rXPpe1/u6bM6nl9n3+bNm0clcO/N+vHjx6eDBw+OM2BVCYABAAAAAACWXMLSefXuPHDtXMPVuQay1T1pOW8xEO7+nh1dj4a+ff78d8zlnmcKgCNVx/m7ckZk/vDhw9P+/ftHg1UlAAYAAAAAAFhyqdB98skn166ektA1IW6D3Gg/D12j4WvM19qOHTs2qm/n++bjaNCbwLaVu5HflnH3Zty1xTMi153La6Aj+yLn51vAf/AHfzCuYRUJgAEAAAAAAJZcA9Z5eJp+XnnbcdejwWo1xJ2fkZZ9fUYC3bSEvH01dNbnQXPm+7z0nU/fvfOz01fHuS8BcCqBK2sJovPK68svv3xtFlaLABgAAAAAAGDJzYPbBqppnZ+3ecDaYLa6p+PoWaeffvq4bgXvPAROy1zvT8t15N7+pvRtPbdh8KKc0ddAz6XSORXJN9544/pvglUiAAYAAAAAAFgBDVwTkM6D1kXdFxk3fJ3380C2477WORW4DWET/O7evXs6++yzR9u1a9fY12f3/P6O9LnOfH/HvG9wPF/funXrGPe+9PkW8M033zyde+65Yw+skm/9CwYAAAAAAGBptfI2Grg2vN3IfC339Xrez8cJfRP4ZtwgNxW6CWH3798/AuDzzjtvuuCCC8armxMQ556ek5bnZO7o0aPjuubPr1znOXlG2nw9Z6T697Wvfe102223rc3CahAAAwAAAAAALLmEow1INwpTY3Fuvj96nb5nNOhNn0C3Vbn93m+C3jPPPHM644wzRjvrrLNGCJxxAtoEtd2bFpl74okn1q/zrD6vvyE6Tuic10D3Ontzb6uAX/Oa10z79u0ba7AKBMAAAAAAAABLLqFqJBhtSDsPU2MewrafB7oxX09rKJtxg9xcp4L3yJEj044dO9bPmDv//POnq666aoyzt9/5TZ8A99ChQ2Oce3t/n53r7OlaWoLmjrMW2Z/fkArkV7/61WMOVoEAGAAAAAAAYMk1AJ5L4LoYmkbmK+O0+Xr13rl8/zf7E+rmmQmAN5JXNmftwIED03Of+9wR1KaCuEFwWq6jz+7vzFq0j1QBpyX0zZ607G8wff31149XT8MqEAADAAAAAAAsuQSyiyFuQ9LFEHcerHa8eG80kI3280reBLhbt24d8xvJ/c973vPGt4G3b98+wuMG1Qlz52Fw9HfmupXGHef5uSfmvynr+dtTcXzw4MExD8tOAAwAAAAAALDk8k3dBrbp0xKYpp+HqdVgtfdExtF9i/f1OiFuzk5Vb+99Juecc8704he/eNyfexP8pkK4vy3X6Xt+tI+uxfzvif6eb3zjG9Mb3vCGMQfLTgAMAAAAAACw5N7//vc/LYxNMDoPTueBamRvWsPUees5GSecbUCb+Vbhpk977LHHxvUzOe2000YF8LXXXjv6VOw2eM4z5r+hv7fm870nfeYS/HY9vzHfCd61a9fanbC8BMAAAAAAAABL7uMf//ja6OkSjrbvuBKmLlbfLu7rdQPYVhWnT7D7ta99bW3n02X/4iufL7744vE66L42Omt5jXSD3P6G6lxafmeD6ZwV3dv5nHPdddeNMSwzATAAAAAAAMAKmIenCUUTlDZATWtQWtmfNq/ybYvF/bnuuZEA+IEHHhjjRanyPXTo0Hg19dzll18+XXLJJeMZjz/++GjzSt6cnz7r/U0JkhsAZy0BdKuBe09a9r397W9fexIsLwEwAAAAAADACmgwGw1H520u6+3bonsbvkbD1nk7/fTTRwCc8DbfAp7L9cc+9rHp/vvvn/71X/91evTRR9dWngqN803gVAMnsD18+PDTAug8u+FvqoMzzvPT51npO9ffmt/TUNgroFkFAmAAAAAAAIAVkCC0Mm44GglN5+uR63lr+JrW8aLsS9C6adOmafPmzeN68TXQue/zn//89OCDD05f+cpXRpXwww8/PALc2Llz53ThhRdO55577rg/YW/W0ue5lXH/hjyz6+kzN29ZTz8PwWFZCYABAAAAAABWRCtho+FoQtGGpjUPWqPBakPXuc5lPXrm9u3bRyVwqnzn8o3fVPamEjhVvp/97GfHN4rnlcL79u2bbrnllhEG91vBfU7OT8C8ZcuW9bmOo8F0qonTogFw98AyEwADAAAAAAAsuVbtzls0IO3rlBsCd0+u58Fpr2N+TnStLSFwWsLef//3f1/b9dS+hMO9NwFvvvV73333jUrgyJ5UEF933XUj7F2sNu5vy1qD3j179oxwOXORZ/c3JpxOiJw5WHYCYAAAAAAAgCWXELQhakPRtFbGJoSdB8CR9cp82ryCOHpOzO+t7E34mtc9t0I4zj///HFf5vLcVP/mVdBplXvzKugzzjhjVBL3/NyXe9L392Qt4W9aqoET9Gaue9sSdL/kJS8Z58CyEgADAAAAAACsgASgixKSpqU6dr7eUDd9WvY0VG2bh6zVwDXBbkLl7v3qV786wte6+uqrx97Mpc/+hMCf+9znnnZeKnsvuOCCadeuXeN5Pbutvzn3JAhuCJzq4ej+nvnYY49Nv/Ebv7H+u2AZCYABAAAAAABWQELeSiCacDQt474Keh6+RoPSxbA0YWv7+VrPTZ/Wtcx96lOfGuNISHvOOeeM10NH9yc0/sd//McxV+edd94IcRMA5/XO6bs/rWFwvxOcv2UeBOc35PlZz/MSOt9zzz2jwTISAAMAAAAAAKyAVuQmJI2GuJlrxWzXouHtvN+oNQRO6/3zPvMJaL/yla+MSuBIkHvRRRet35PQNuMEtf/2b/82Pfroo2Nf7NixY9q3b98Y51nZmz57G/42dI6ckz15bXReB92/LRoUHzx4cLr++utHg2UjAAYAAAAAAFgBR48eXQ9c0/cVzglLE5ImRK3Md+/8uq1zkTC2Z0XGkXvTsp7QNe3Tn/70+nMuueSSaefOnWOc5/ecb37zm+NV0HOXXnrpCG8j4XHC3ZydMxv+9vkNhXNWqoC3b9++/vvSel/Gb33rW8c9sEwEwAAAAAAAACvg0KFDa6OnXsncIDYBcKtqq2Fp2qIEqHMNYCN9WkPYjBO+btu2bYSuX//610eFb730pS8da/NvAed3PPDAA2NcfZVzA9+MEwLnd2fc52bcsDd9n52gOXu7L2cnEM/rpWHZCIABAAAAAABWwH333TcC0QSg0X6jAHhR9yZY3UjXq/vmIXMqdxP0JtxtGH322WdPz3/+89dD2ci9qQJ+8MEHx3Xkd+/fv3/syZn5ra0Y7rNyxuLvyJ48t98E7t+Yff1NN99885iDZSEABgAAAAAAWAH33nvv0wLgBqcNUhuORsPU7p3f0/tqft31hKvpc1+qdnN2AtjM5TvAX/va19bPvPrqq8darnt/gt68Lnour4xutXH3zVt0va1SLZwq4L5qOlIZ/MQTT0xve9vbxneG86poWAYCYAAAAAAAgBVw+PDhpwW9CUsTtKZlri0SqGac/WkJdNNH5xfvSeDaM6vhbMPYLVu2jOvPfOYz05EjR8Zcrl/84hePtZyV6zzvscceG+uVwDbyjOPHj6+HvfktuWdR1iNr2ZPzd+/ePULg3Jf5PC/h8Dvf+c7pV3/1V8d+eLYTAAMAAAAAAKyARx99dG30lISgaanQjXmI2nHD3Y3WGrA23O15lX0Nj3tOxg1377///hEWZ9+ZZ545XXrppWMt131ddKqFK2vPfe5zxz393X3eYgidPud0PnKdEDjVvukTIvesAwcOTFdcccV0++23Tz/xEz+xdgc8OwmAAQAAAAAAVsDDDz88QtMEoW2RILSBaXWcPmttlXGC1obAtXhOAuC2yFrG2fexj31sfOs34wS1L3zhC6ddu3Y9LTD+4he/OPo6//zz1wPrhLfzsDd9Wn9Tzkibz+fZCYDzOuj83W05M5XBv/iLvzj9wi/8wgia4dlKAAwAAAAAALAiUgWc6toEow1PG4BG5hOSNoCNhqjVgDf7ekY03I2+BrphbNfa8s3f/I4PfehD68+OgwcPTnv37h1zqdI9dOjQ2spTUikcWc85898Z+VvyrMy3j/yOtPyuzCXsTQjc3xupOM5rsnPGb//2b6/NwrNP/q/6D58aAgAAAAAAyyAh2kc/+tG1K/iWhJ/XXXfdCDkT3DbMTdg6D0MXde9GQXD6tgau87Pbx3xv9iTgTZ/K33yLN78jawljK1W/qQqO9A888MB6kJu97dPydyXozb78PZmL7qmMs55nz0PsyPVZZ501vlH86U9/em0Wnj1UAAMAAAAAAKyIv/3bvx2Vtw1oWxnbQHjeYj6OjLO/ckZbr+d9zUPWhLFpqQJOJW5eBZ1v/bZq+KKLLhotoXD2LVYB79mzZ230rbPS+urohMd9Xvqu93d2LeNt27aN35F7c535tJxx5513jiAYnm0EwAAAAAAAACskAei8OjYaAFfD07To3gSpMV/fqM0tXjeMTeiait/du3dPn/jEJ6b/+I//WNsxTeedd950xRVXTNu3b/+2APjss88ev3f+6uiGvDkv52Y9Fbzp+5sTMDdk7m9In2fkvu5L3wrjd7zjHWMOnk0EwAAAAAAAACvkC1/4wnrFayXwTKjbufTzFlnvnrSGqHNdz1rXF1sC4eyL/I5U4Cao/dznPjc98cQTYz6VwakAvuCCC8a3eudSAZwAO7+5Z7XlvAbACYjTL/5t8+dnLmclAG4InLXsyf2XXHLJdOutt4698GwhAAYAAAAAAFghH/nIR8b3dqOhaELStF5Hxm3RkLXX87XK9YnC387NK4Izl8A2ge+XvvSl8SrohLaRYDaVwPNXPkcC47y6Oec0BG6om/Nz3o4dO8a3ji+99NJxXtfS+nekVV6L3VdBR9ZyX+Zf97rXTWeeeeaYh2cDATAAAAAAAMAKee973zsC1wSsCWAjQWpemdxwdh6QZq4BbloD14au0XPm96RV93ZuHgSnT/CasPW+++6bHn744TH/TM4555xxX4PrjNv6W/O8Pidhbvr+DdHfGvlvkQA4r4PO7+i+o0ePThdffPH0xje+cW0n/OATAAMAAAAAAKyQfGv3oYceGqFnQ9D0CTsTkkaC00UNVBuOZtx9Hadl3/zcjit7cn+e3/szTgic1y5/8YtfnB555JExfyL79+8fZ0SfkZC3QW/Omv+e6G9uH1nrb8k9eQ10qosTAndv9lx11VWjEhieDQTAAAAAAAAAKyavgU7g2XA0Ep6mmnYj3ddQtSFqg9T5OHvTcj0fV/c2AM5Zafk9eTV1XgP94IMPrn8PeCN5LXT2J+xtaN1q4DyvAe782e2rvy36O3JfAuAEwQ2FE4zv27dvevWrXz0dOHBgOvfcc8c98INKAAwAAAAAALBi/uqv/moEngk5G6AmDD18+PCYjwam8+vsaejacLXr0bO6J/qcua5nredm3BD4C1/4wqgEfiYJafPa6r4CuqFvf1P6rOXZDZuzr/sb/vY6LXLOzp07xyuhc1//u+zatWv6rd/6relnf/Znxz74QSUABgAAAAAAWDGPPfbYdO+9946wswFuAtJjx46NSuBqoNq2OLeowWvPrISoDVw73ui8BK4JgXN/vgX8TCFwqoBTnRu5L/ekj4x7nb8nZzfk7e/oPXlexv0NCYQz39dB57vArQhOCHzZZZdNb3nLW8YZ8INIAAwAAAAAALCCPvCBD6yHn5HwM9/gbagamZvr9eJ8Q9XMpyVA7Tga+i6O53pffs/mzZtH1W2+VZxvFieYXpQAuFW7fV7D4+h1x3lm+vk4em/NK4ET/Oa3pBo4lclxxhlnTLfddtt0xRVXTBdeeOGYgx8kAmAAAAAAAIAV9E//9E8j+GwAnFA04We+vZv5BqkNbLveALWtexbl3J5d8305K61z80A29yVwTTXuJz/5yVGxvGjHjh0jpI2ckfsS+Kb1zJyTEDfXGWctgW6uE3SnOjh9gu+MM98zGgznN+aMvBY6ffblrHvuuWd64xvfOJ4DP0gEwAAAAAAAACsoQeZHP/rREXQm+Oy3cDN/6NCh9XA2ba7BbXXPvHXPfF/D1I4bJM9lPfMNgTOOBx98cHrggQfGuPI783rm7Is+r2dXzoo+q3sS8sZ8b/bMr6NBcMPkPLPVwZdeeul05513ru2EHwwCYAAAAAAAgBX14Q9/+GnBaFqC4FQBpyo2a4uhaOc2mq+elda9c4sha3R/1hK6NnhNUPvNb35z+upXvzp9/etfH+P8viNHjqyHsdnX5/S+uVz3t/TMVvnG/Pd1rmemT8t8npXXQaeKOPbu3Tu9/OUvn170oheNa/hBIAAGAAAAAABYUR/84AdHkJpws8Fn5Pu7CVkrAeiieTjaKtzORQPdBMrzvbHYx0YhbGRPgtf8pk996lPT5z//+enLX/7yuE4Vbl4Vnecn0M3ejPNa53/+538ee/vsBsBZzz2ZS8tcWp6Ztc6lj97TtTwnr5/etm3bCMkTQv/+7//+2As/CPKv8Q+fGgIAAAAAAMvg4MGD49W+8J0kFH344YenG264YYSZ0fA1wWfD1V43FE3f1vmc1bVoqNx9PbfX1eu0PKv7el4kRG77xje+MVr0Gdmb+/u8aujb33js2LH1eyJzvXf+vO5P65kNsvvfI0FwZD7/nc4555zpH/7hH8YcfD99638+AQAAAAAAwMq59957RzVtK1wjfb6xm+rgBqjPpOvd23MSnjbUnbe57G2LntEzYz73+OOPT4899tio8s09i+cvnpc+96VPWJu/ax729p6YB8YNhnturysBcCuQc+5NN900veQlL1lbhe8fATAAAAAAAMAKS9j50EMPjdcs9zpBZ9qhQ4fWw9J5+NmgtAFp9J7Knoamiy3aR8cNXtPn3IbSrbbNfPS3zqtys6+/Jdc9I/dm3H0JjnNfv3HcADn35zo6Tt+zuy/zuT8tv2Pnzp1jLiF6XgWdvfD9JAAGAAAAAABYce9+97tHiNkAdR6EpmJ2LnNt2TcPiBdb1runYe2J9nY+embvnd+XcULdnNcq5Yyj+7qn+xLeRqp18zfOz0yfuf7t87WExGk5o7+zZ0Wfk28Cp8/v+d3f/d21Vfj+EAADAAAAAACsuE984hPThz/84RFiRkPQ9K2YzXVbJAzNejUgTet19Ky2zs8tnhu57r0JVrueoDZ7spZv+s5f6Zw9WUvffZE+LXP9G6NnRtcz1/mc298QPScSBGc+15s3bx6vg85/p3xPOa+Dhu8XATAAAAAAAADTH//xH49wNCFogsy0VtgmaG0w2jA0GoDmnmg42vm0jqtzDVbT5gHzfE/n+y3iSBVv783vmge00Welz1ordntmXtucca+znn25bqg71+v087XuzW/M359XQUfm7rnnnqcFzXAqCYABAAAAAAAY/uVf/mUErA1NIwFnvm+bsDUSlM7Ng9Gu5f6GrJ3rvn57N21+VsdZn4fDkWenEjmBdMLW7M1axnnWfK3PjgbT6TvuffPn9TmZ69nzMxrmZi7784yGy52LXbt2jXtz7VXQfL8IgAEAAAAAABg++MEPTocOHRpVsgk+E3ImzEwFblpfxdxwtH3N5+ct9zRkbbVtdX6u91V+y7wKOdK3Yrm/M31krub3ZL7nZm+uu97WsLf3RPbO9/Rv6u/sfMLz/LfL/uuuu2768R//8bUT4FSZpv8Hp89PEyFJpBQAAAAASUVORK5CYII=) **Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
from pydicom import dcmread
import os
from operator import itemgetter
from matplotlib import pyplot as plt
# TODO: YOUR CODE FOR LOADING THE VOLUME AS A 3D NUMPY ARRAY
slices = []
all_files = sorted(os.listdir('ct'))
for i in range(len(all_files)):
files = dcmread('ct' + '/' + all_files[i])
slices.append(files)
imagings_shape = list(slices[0].pixel_array)
imagings_shape.append(len(slices))
data = np.zeros(imagings)
for i, slice in enumerate(slices):
img = slice.pixel_array
data[:, :, i] = img
print(np.shape(data))
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# TODO: YOUR CODE FOR AXIAL
axial = plt.imshow(data[:, :, 100], cmap='gray')
# TODO: YOUR CODE FOR SAGITTAL
sagittal = plt.imshow(data[:, 100, :], cmap='gray')
# TODO: YOUR CODE FOR CORONAL
coronal = plt.imshow(data[100, :, :], cmap='gray')
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
# TODO: YOUR CODE
image_slice = dcmread('ct/' + all_files[100])
# Window Center
wind_center = image_slice[0x0028, 0x1050]
# Window width
wind_width = image_slice[0x0028, 0x1051]
# Rescale intercept
rescale_intercept = image_slice[0x0028, 0x1052]
level = image_slice['WindowCenter'].value
window = image_slice['WindowWidth'].value
rescale_intercept = image_slice['RescaleIntercept'].value
vmin = level - window/2
vmax = level + window/2
hu_pixels = image_slice.pixel_array
plt.imshow(hu_pixels + rescale_intercept, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
level0 = 100
window0 = 100
vmin=level - window/2
vmax=level + window/2
plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
level1 = 70
window1 = 50
vmin=level - window/2
vmax=level + window/2
plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
level = 50
window = 30
vmin=level - window/2
vmax=level + window/2
print("\n vmin \t :", vmin)
print(" vmax \t :", vmax)
plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
level = 30
window = 10
vmin=level - window/2
vmax=level + window/2
print("\n vmin \t :", vmin)
print(" vmax \t :", vmax)
plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
level = 100
window = 200
vmin=level - window/2
vmax=level + window/2
print("\n vmin \t :", vmin)
print(" vmax \t :", vmax)
plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
plt.show()
# Which values make sense and why?
# I think the level of value is 100 and window value is 100
###Output
_____no_output_____
###Markdown
**Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
# TODO: YOUR CODE TO SEGMENT FAT
image = (data.copy()).astype(np.int16)
image[data < -70] = 0
image[data > -30] = 0
plt.imshow(image[:, :, 100])
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
image = (data.copy()).astype(np.int16)
image[data < 40] = 0
image[data > 20] = 0
plt.imshow(image[100, :, :])
# TODO: YOUR CODE TO SEGMENT BONES
image = (data.copy()).astype(np.int16)
image[data < 1000] = 0
image[data > 20] = 0
plt.imshow(image[100, :, :], cmap = 'gray')
# Are the segmentations good?
# TODO: YOUR ANSWER
# my segmentation is not displayed the result as the statement shown.
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____
###Markdown
![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==)Assignment 5
###Code
# In this assignment, we will visualize and explore a CT scan!
# load numpy and matplotlib
%pylab inline
# we are using pydicom, so lets install it!
!pip install pydicom
###Output
Collecting pydicom
[?25l Downloading https://files.pythonhosted.org/packages/f4/15/df16546bc59bfca390cf072d473fb2c8acd4231636f64356593a63137e55/pydicom-2.1.2-py3-none-any.whl (1.9MB)
[K |▏ | 10kB 11.8MB/s eta 0:00:01
[K |▍ | 20kB 15.6MB/s eta 0:00:01
[K |▌ | 30kB 18.1MB/s eta 0:00:01
[K |▊ | 40kB 19.6MB/s eta 0:00:01
[K |▉ | 51kB 13.7MB/s eta 0:00:01
[K |█ | 61kB 15.4MB/s eta 0:00:01
[K |█▏ | 71kB 13.2MB/s eta 0:00:01
[K |█▍ | 81kB 13.4MB/s eta 0:00:01
[K |█▋ | 92kB 14.4MB/s eta 0:00:01
[K |█▊ | 102kB 13.5MB/s eta 0:00:01
[K |██ | 112kB 13.5MB/s eta 0:00:01
[K |██ | 122kB 13.5MB/s eta 0:00:01
[K |██▎ | 133kB 13.5MB/s eta 0:00:01
[K |██▍ | 143kB 13.5MB/s eta 0:00:01
[K |██▋ | 153kB 13.5MB/s eta 0:00:01
[K |██▉ | 163kB 13.5MB/s eta 0:00:01
[K |███ | 174kB 13.5MB/s eta 0:00:01
[K |███▏ | 184kB 13.5MB/s eta 0:00:01
[K |███▎ | 194kB 13.5MB/s eta 0:00:01
[K |███▌ | 204kB 13.5MB/s eta 0:00:01
[K |███▋ | 215kB 13.5MB/s eta 0:00:01
[K |███▉ | 225kB 13.5MB/s eta 0:00:01
[K |████ | 235kB 13.5MB/s eta 0:00:01
[K |████▏ | 245kB 13.5MB/s eta 0:00:01
[K |████▍ | 256kB 13.5MB/s eta 0:00:01
[K |████▌ | 266kB 13.5MB/s eta 0:00:01
[K |████▊ | 276kB 13.5MB/s eta 0:00:01
[K |████▉ | 286kB 13.5MB/s eta 0:00:01
[K |█████ | 296kB 13.5MB/s eta 0:00:01
[K |█████▎ | 307kB 13.5MB/s eta 0:00:01
[K |█████▍ | 317kB 13.5MB/s eta 0:00:01
[K |█████▋ | 327kB 13.5MB/s eta 0:00:01
[K |█████▊ | 337kB 13.5MB/s eta 0:00:01
[K |██████ | 348kB 13.5MB/s eta 0:00:01
[K |██████ | 358kB 13.5MB/s eta 0:00:01
[K |██████▎ | 368kB 13.5MB/s eta 0:00:01
[K |██████▌ | 378kB 13.5MB/s eta 0:00:01
[K |██████▋ | 389kB 13.5MB/s eta 0:00:01
[K |██████▉ | 399kB 13.5MB/s eta 0:00:01
[K |███████ | 409kB 13.5MB/s eta 0:00:01
[K |███████▏ | 419kB 13.5MB/s eta 0:00:01
[K |███████▎ | 430kB 13.5MB/s eta 0:00:01
[K |███████▌ | 440kB 13.5MB/s eta 0:00:01
[K |███████▊ | 450kB 13.5MB/s eta 0:00:01
[K |███████▉ | 460kB 13.5MB/s eta 0:00:01
[K |████████ | 471kB 13.5MB/s eta 0:00:01
[K |████████▏ | 481kB 13.5MB/s eta 0:00:01
[K |████████▍ | 491kB 13.5MB/s eta 0:00:01
[K |████████▌ | 501kB 13.5MB/s eta 0:00:01
[K |████████▊ | 512kB 13.5MB/s eta 0:00:01
[K |█████████ | 522kB 13.5MB/s eta 0:00:01
[K |█████████ | 532kB 13.5MB/s eta 0:00:01
[K |█████████▎ | 542kB 13.5MB/s eta 0:00:01
[K |█████████▍ | 552kB 13.5MB/s eta 0:00:01
[K |█████████▋ | 563kB 13.5MB/s eta 0:00:01
[K |█████████▊ | 573kB 13.5MB/s eta 0:00:01
[K |██████████ | 583kB 13.5MB/s eta 0:00:01
[K |██████████▏ | 593kB 13.5MB/s eta 0:00:01
[K |██████████▎ | 604kB 13.5MB/s eta 0:00:01
[K |██████████▌ | 614kB 13.5MB/s eta 0:00:01
[K |██████████▋ | 624kB 13.5MB/s eta 0:00:01
[K |██████████▉ | 634kB 13.5MB/s eta 0:00:01
[K |███████████ | 645kB 13.5MB/s eta 0:00:01
[K |███████████▏ | 655kB 13.5MB/s eta 0:00:01
[K |███████████▍ | 665kB 13.5MB/s eta 0:00:01
[K |███████████▌ | 675kB 13.5MB/s eta 0:00:01
[K |███████████▊ | 686kB 13.5MB/s eta 0:00:01
[K |███████████▉ | 696kB 13.5MB/s eta 0:00:01
[K |████████████ | 706kB 13.5MB/s eta 0:00:01
[K |████████████▏ | 716kB 13.5MB/s eta 0:00:01
[K |████████████▍ | 727kB 13.5MB/s eta 0:00:01
[K |████████████▋ | 737kB 13.5MB/s eta 0:00:01
[K |████████████▊ | 747kB 13.5MB/s eta 0:00:01
[K |█████████████ | 757kB 13.5MB/s eta 0:00:01
[K |█████████████ | 768kB 13.5MB/s eta 0:00:01
[K |█████████████▎ | 778kB 13.5MB/s eta 0:00:01
[K |█████████████▍ | 788kB 13.5MB/s eta 0:00:01
[K |█████████████▋ | 798kB 13.5MB/s eta 0:00:01
[K |█████████████▉ | 808kB 13.5MB/s eta 0:00:01
[K |██████████████ | 819kB 13.5MB/s eta 0:00:01
[K |██████████████▏ | 829kB 13.5MB/s eta 0:00:01
[K |██████████████▎ | 839kB 13.5MB/s eta 0:00:01
[K |██████████████▌ | 849kB 13.5MB/s eta 0:00:01
[K |██████████████▋ | 860kB 13.5MB/s eta 0:00:01
[K |██████████████▉ | 870kB 13.5MB/s eta 0:00:01
[K |███████████████ | 880kB 13.5MB/s eta 0:00:01
[K |███████████████▏ | 890kB 13.5MB/s eta 0:00:01
[K |███████████████▍ | 901kB 13.5MB/s eta 0:00:01
[K |███████████████▌ | 911kB 13.5MB/s eta 0:00:01
[K |███████████████▊ | 921kB 13.5MB/s eta 0:00:01
[K |███████████████▉ | 931kB 13.5MB/s eta 0:00:01
[K |████████████████ | 942kB 13.5MB/s eta 0:00:01
[K |████████████████▎ | 952kB 13.5MB/s eta 0:00:01
[K |████████████████▍ | 962kB 13.5MB/s eta 0:00:01
[K |████████████████▋ | 972kB 13.5MB/s eta 0:00:01
[K |████████████████▊ | 983kB 13.5MB/s eta 0:00:01
[K |█████████████████ | 993kB 13.5MB/s eta 0:00:01
[K |█████████████████ | 1.0MB 13.5MB/s eta 0:00:01
[K |█████████████████▎ | 1.0MB 13.5MB/s eta 0:00:01
[K |█████████████████▌ | 1.0MB 13.5MB/s eta 0:00:01
[K |█████████████████▋ | 1.0MB 13.5MB/s eta 0:00:01
[K |█████████████████▉ | 1.0MB 13.5MB/s eta 0:00:01
[K |██████████████████ | 1.1MB 13.5MB/s eta 0:00:01
[K |██████████████████▏ | 1.1MB 13.5MB/s eta 0:00:01
[K |██████████████████▎ | 1.1MB 13.5MB/s eta 0:00:01
[K |██████████████████▌ | 1.1MB 13.5MB/s eta 0:00:01
[K |██████████████████▊ | 1.1MB 13.5MB/s eta 0:00:01
[K |██████████████████▉ | 1.1MB 13.5MB/s eta 0:00:01
[K |███████████████████ | 1.1MB 13.5MB/s eta 0:00:01
[K |███████████████████▏ | 1.1MB 13.5MB/s eta 0:00:01
[K |███████████████████▍ | 1.1MB 13.5MB/s eta 0:00:01
[K |███████████████████▌ | 1.1MB 13.5MB/s eta 0:00:01
[K |███████████████████▊ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████▎ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████▍ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████▋ | 1.2MB 13.5MB/s eta 0:00:01
[K |████████████████████▊ | 1.2MB 13.5MB/s eta 0:00:01
[K |█████████████████████ | 1.2MB 13.5MB/s eta 0:00:01
[K |█████████████████████▏ | 1.2MB 13.5MB/s eta 0:00:01
[K |█████████████████████▎ | 1.2MB 13.5MB/s eta 0:00:01
[K |█████████████████████▌ | 1.3MB 13.5MB/s eta 0:00:01
[K |█████████████████████▋ | 1.3MB 13.5MB/s eta 0:00:01
[K |█████████████████████▉ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████▏ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████▍ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████▌ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████▊ | 1.3MB 13.5MB/s eta 0:00:01
[K |██████████████████████▉ | 1.3MB 13.5MB/s eta 0:00:01
[K |███████████████████████ | 1.4MB 13.5MB/s eta 0:00:01
[K |███████████████████████▏ | 1.4MB 13.5MB/s eta 0:00:01
[K |███████████████████████▍ | 1.4MB 13.5MB/s eta 0:00:01
[K |███████████████████████▋ | 1.4MB 13.5MB/s eta 0:00:01
[K |███████████████████████▊ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████▎ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████▍ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████▋ | 1.4MB 13.5MB/s eta 0:00:01
[K |████████████████████████▉ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████▏ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████▎ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████▌ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████▋ | 1.5MB 13.5MB/s eta 0:00:01
[K |█████████████████████████▉ | 1.5MB 13.5MB/s eta 0:00:01
[K |██████████████████████████ | 1.5MB 13.5MB/s eta 0:00:01
[K |██████████████████████████▏ | 1.5MB 13.5MB/s eta 0:00:01
[K |██████████████████████████▍ | 1.5MB 13.5MB/s eta 0:00:01
[K |██████████████████████████▌ | 1.6MB 13.5MB/s eta 0:00:01
[K |██████████████████████████▊ | 1.6MB 13.5MB/s eta 0:00:01
[K |██████████████████████████▉ | 1.6MB 13.5MB/s eta 0:00:01
[K |███████████████████████████ | 1.6MB 13.5MB/s eta 0:00:01
[K |███████████████████████████▎ | 1.6MB 13.5MB/s eta 0:00:01
[K |███████████████████████████▍ | 1.6MB 13.5MB/s eta 0:00:01
[K |███████████████████████████▋ | 1.6MB 13.5MB/s eta 0:00:01
[K |███████████████████████████▊ | 1.6MB 13.5MB/s eta 0:00:01
[K |████████████████████████████ | 1.6MB 13.5MB/s eta 0:00:01
[K |████████████████████████████ | 1.6MB 13.5MB/s eta 0:00:01
[K |████████████████████████████▎ | 1.7MB 13.5MB/s eta 0:00:01
[K |████████████████████████████▌ | 1.7MB 13.5MB/s eta 0:00:01
[K |████████████████████████████▋ | 1.7MB 13.5MB/s eta 0:00:01
[K |████████████████████████████▉ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████▏ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████▎ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████▌ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████▊ | 1.7MB 13.5MB/s eta 0:00:01
[K |█████████████████████████████▉ | 1.8MB 13.5MB/s eta 0:00:01
[K |██████████████████████████████ | 1.8MB 13.5MB/s eta 0:00:01
[K |██████████████████████████████▏ | 1.8MB 13.5MB/s eta 0:00:01
[K |██████████████████████████████▍ | 1.8MB 13.5MB/s eta 0:00:01
[K |██████████████████████████████▌ | 1.8MB 13.5MB/s eta 0:00:01
[K |██████████████████████████████▊ | 1.8MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████ | 1.8MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████ | 1.8MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████▎| 1.8MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████▍| 1.8MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████▋| 1.9MB 13.5MB/s eta 0:00:01
[K |███████████████████████████████▊| 1.9MB 13.5MB/s eta 0:00:01
[K |████████████████████████████████| 1.9MB 13.5MB/s eta 0:00:01
[K |████████████████████████████████| 1.9MB 13.5MB/s
[?25hInstalling collected packages: pydicom
Successfully installed pydicom-2.1.2
###Markdown
**Task 1**: Download and visualize data with SliceDrop! [20 Points]
###Code
# Please download https://cs480.org/data/ct.zip and extract it on your computer!
# This is a CT scan of an arm in DICOM format.
# 1) Let's explore the data without loading it.
# TODO: Without loading the data, how many slices are there?
###Output
_____no_output_____
###Markdown
220 slices since there are 220 files in the ct folder
###Code
# 2) Let's visualize the data with SliceDrop!
# Go to https://slicedrop.com and drag'n'drop all .dcm files into the browser.
# Please use the 2D sliders to show axial, sagittal, and coronal slices in 3D.
# TODO Please post a screenshot of SliceDrop's 3D View in the text box below by
# using the Upload image button after double-click.
###Output
_____no_output_____
###Markdown
![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABVEAAAK5CAYAAAC2UG6yAAAgAElEQVR4nOzdeVxU5eI/8KPZ8m3xd731vd+8V/Ne02wTWdwnChTBvLiVuCtIbi038fa6ZaaJSyVoalQaoSkyisCwDbsgoi1umJnlkuGS4jVRS3EBBT6/P+iMZw7nzJwZZhiQj6/X+yVz5sw5z9me88xnnnOO4OnZHURERERERERERERUV9u3d0Ow98MeHl5wd/dCt24ecHNzR9eu7nBzczf7u1s3D7i7e8HDw8vlC0tERERERERERESNy7hxE1xeBmvsClHd3T3/CE490KNHL+h03vD17Qc/P3/4+w+Ev/9A+Pn5w9e3H3Q6b/To0Qtubh5/BKqeLl9oosap9scGIiIiIiJqfjw92fGIiJqv2y5E9fCo7XXq4eEFnc4b/v4DERDwvCb+/gOh03mbTcPVC0/UGPj69seYMeMwdux4IiIiIiJqxsaMGQdf3/4u/45CRNTQbqsQ1d3dC25u7ujbV6c5OFXTt68Obm7ucHdnkErNm4eHF8aMGYeBAwfV+7giIiIiIqKmbeDAQRgzZhw7HRFRs3PbhKju7p7w8PCCr28/h50cfH37wcPDi5f3U7Pm4eGFsWPHu7yxRkREREREjcPYseMZohJRs3NbhKhiyOnn5+/wk4Ofn7/ZPIiaFy+GqEREREREZOZWiMoglYiajyYforq714Y8zghQRX5+/n/0SOUJgpoX8QbyrghRg6eGubxxSEREREREdYkhKnujElFz0qRDVA+P2nugOvISfjW+vv3g5ubOkwQ1KwxRiYiIiIhIjiEqETVHTTpE7dbNwyEPkdKqb18dunXzcPkKIWooDFGJiIiIiEiOISoRNUdNNkQVHyTV0CcLPmiq/nQ6b+j1euj1euh03i4vD6ljiEpEREREdHv66KMorFgRZddnGaI2Hb5Lf4LvkiMuLwc5j5ubO7p27WYXNzd3h5QheFIoYtasQ3KqEekZWUgzZiI9I8skzZhpGpacakTMmnUInhTqlPXRv98/8fr0d7E0/AusX25A6spUGFbEIWr2BxgZOLxe026yIWq3bh7Q6bxtquiff/6fmD79FUyf/ordJxqdztthvVGDgychOHgSevbs3SArUqfzxqxZb0Ov16OwsBCFhYWIiVmNGTPCzMLMuLjagNNZZZDOn0Fq42ZriDpu3AS8+eZbWLBgEVau/AyJiQYkJhqwcuVnWLBgEd588y2MGzdB07RsDVGDgkZhxYoo6PUboddvxIoVUQgKGuXyxikRERERkSgiYinS0zPsEhm5xGHlmDFjpmm6M2bMtPnzDFGbjgGxwIBYOH0+zswRtJg1azYMhmRT1mCNwZCMWbPedkpZlLIXvV6PWbPedkr+IQ9GQ0Nfwpw5c9G37zOmYQMHDkJERCRCQ1+qM3595x8cEoqcvPw6YWl6RhZS0oxmQar0/5y8fASHODJI7YEJ42di0Tw9VryXjOjIVGz8xAhjdAa+jM3DD4YiHDLk4rN35qFXj152zaMphKg/Hf3ZPEStvReqB/z9B2qu5MUANSxsZr1CVH//gXBz83DIySI4eBJeeeXVBglSJ04MQVZWtmoFkpWVjYkTQ+Dp2d00zNFlkAaocXF6xMXpGaQ2craEqAsXLtLcAFy4cJHV6dkSogYFjUJioqHOfBITDQxSiYiIiKjRsDdAFTmqHHr9RtM09fqNNn+eIWrT0VAhqlqOIA1XnRW0zpo1W3N4Kjdr1myHlmXatOnIyspGZOQSBAYOMQ0PDByCyMglyMrKxrRp0x06T3mAevjwEej1epSWlmLOnLmIiIhEeXk5Vq5chfLycowYMdKhIWrMmnVIM5r3Pk1Nz0BKqhEFW7bWCVNvha1ZiFmzzgHroAe8vHpiypQF+GBROr6IzsGeHYeQkbAdsR8mIzMmE4VrMrErLhsHE7NxregrbPnkY/Tu2RteXj1smld9Q1QxB9PC3mMFgHmI6u7uhR49emmu4OUB6vPP/7NeJ5wePXrB3b3+J4uePXs3SJA6bdp000aIivrYFJbqdN6YODEEMTGrTe8HBY1ySogqD1B1Om/odN4MUhs5W0JUsREWEbEUc+e+i0mTJmPw4KEYPHgoJk2ajLlz3zX75d3a9GwJUT/6KArp6RlYtSoaQUGjEBQ0CqtWRSM9PQMffWTfJUpERERERI5mbxjqyBBV7IUqXsFlT29UhqhNhytCVGlYKh0u9hQNChrl0HmLHcakoaW0XEqfCQwcYuqR6qhyiAGqWI758xfAYEg26/UaGDjE4UGqNBBduXIV5syZa+p9WlpaitLSUlOvVOn7jgpRk1ONph6oacZMpKZnICMzG8ePH8epU6dx/PgJ5G8pRHKa0ay3apoxE8mpRoesg3FjwxC5OAMfLNiIS79fAQDcvHETGbH5SFlhwLZ12dgbn4dDSdk4ZkgHdmzHxoVzbZ9PA4aocXEOClFtuZTf0QFqQIBjL+l3dpCq03mbKhRLB2lk5BJTj1RHh6hKAar0PQapjZc9Iar4etKkyXj99bqNMWeEqElJyUhPzzDrdRoUNMquX9YXL45EUNBIhzWUHWXOnLkIDX3J6jCpysobCAh4HsuXr0Bl5Q2Ehdl+qZaasLCZOHHipMvXCxEREVFT4soQNShoFCZNesksOJUGqpMmvaT5Ki6GqE2HK0JUtb/nz1+AwsJCREYucdq8xaBWDEctZRuOzD7E7EUaoKr1ehWDVEflH/IQNSIiEl27dsOcOXNx+PARHD58BDNmhJneF/92VIgq7V2amZ2LlDQjvt33Ha5eu4acvHxcvnQZPxw8CENKWp17paZnZNV7/v18n8cHC+KxeMEmfBRhwOXfr6C6uhoAcKPyBg5s+w7bv8jE3vWZOJqSg9KMLPyeY8SN7RkIHfmCTfNqCpfz1wlR3dzc4evbz2rF7owANSDgefj69nPYzXc9PZ0bpIrd2qOiPrY6rrRHqiMrErUAVToOg9TGyd4Qde7cd1Ubes4IUes7LylrIap4fxlrwxwpKGgkPv10pdVhcmKI6gwMUYmIiIhsJ22fDh48FCtWROHll+vebm7cuAlYsSIKgwcPrfM5W7z88iumZxVIbwsg7WggvbRfvCXWypWfKZZLxBC16WhMIWr//gNMnbecNW+l9+z5nK1mzZqN+fMXmF4r3U5R2us1MnKJw24lIIahffs+g5UrV5l6nwLAnj3F2LOnGABMw/V6vdn9Uus7/zRjJoyZ2UhISsbPJSU4ceIkfvnlFMrKzmPbl1/jxo2b+HrHThhS0hTvj1rf+b86ZQ5WLcvAR4uTsDR8I7bn7wMA7Cr4FiXflwAArly4hJItO/FzYjr+m5KMS5nJQHEecj//wKZ5NckQtWtXd/j5+Vs8WQwa5JwANSDgefj5+aNrV8eFqJ6ezgtSxXBSqVu79H0l9Z23/CFSlsJRnc7b7L4PDFIbB4aodbkiRJ0xIwwzZoRZHSbHEJWIiIjIskmTJpsua580abLdw7WStk/HjZtgei2d1rhxE0yhp/hQVntD1DVr1po+m5SUjFWrorFw4aI6y7Rw4SKsWhVtusIrPT0Da9asVZ0uQ9TG6dl3i+C75Ai8+viahslDVK8+vvBdcgTPvbvVofPWEqJ6et7KIBx5OXtj6IkaF6c3y12shaiBgUPsvlxbTux1KgakYngaERFpunpx5cpVKCzcivLycrP7pTqyJ2pyajq+/OobnD9/AeVXrqCiogJnz/6KAz8cNF3m74yeqJHzVmP1ciM+/zAFK99PxPacPQCAjM8zkfFxMr7L/ApXzl2sDVN/OY0rB3/E9Z1FqNqRhv9+ucGmeTXJENXNzd3qQ6WmTXsZYWEz7TJt2ssWp137cCnrIaoYitojOHiSQ1aetUpBLUR1xMEsn7bWstbnBrrkWLaEqGKDKyDA/GmfSg3HpKRkq9PTEqJKG4XWWGoESokh6qefrjQR35sxI8xseGjoS4rDAgKeN31O+p40nJV/LiDgVu9SeYirFOxKhxkMyaisvGEijqP2d0DA8zhx4qTqZ0TLl68w+4z0PYMh2SxEPXHipOm2AdLpSecjD13F8aTTtXTLAbUyW5qHvFzffLOjzjxt/TJCREREtw+1ByzZOlwreRtZfGZAYqIB48ZNMAtQIyKWqn5OK2m7XMt9T7WOzxC1cfJ5by8GxMIsSJWGqGKAOiAW8Hlvr0PnrTVEDQubqflqWXvmrfSePZ+zpwzS10oPu5L3PHXUvAcOHITy8nJT79L0dKMpJBX/jRgxEqWlpUhPN5rul1peXu6wEFUMR1PSjEhJM+J06RlcvHgRRdu+QllZGY7+XAJjZjZS0zMcHqLGLjdgQ1Q6Nn5sxPqlBuze8i0AYNvGfGxfl4Xd67NwYGMGzu3ZZ1of1TcqceO7bFw7YLBpXk3ywVIMUW07kB15f1Nbdw7pDqK000h3CmmA66hfZKh+bAlRxQc5jRs3AZMmTVZs6Im/tq9aFW11elpCVGc8zXTx4kiz4HTOnLlmvUy19kSVB7ChoS/VCUul70mHSwPT0NCXsHhxZJ35icPCwmaaBYHS4FMtRD1x4qQpTAwIqA1hxeHi3+JnxFBT/t433+yoE6IqBZjy+UjHkYeYYtiptF0sldnSPOTlkoe1BkOy2eeJiIioeZH2vJSGorYO10rsBCD2MA0IMA9SlQJUsW2ttVOAnNZg1JbAlSFq4+TVxxe+S38yC1LFEFUaoPou/cmst6ojaA1RpVehOuoKVPk8xIc5ie9p/Vx9yyAfNmvWbMmDpepeuu/oEHXgwEFmIWpo6Eum0DA09CWzEFUMVR19T1QxSD36cwmuX7+On346ipMnf0F1dTXyt2xFiuThUo4KUVNWpiI9OgPGzzOQ+nEKvi/643L+hALs0efgx8Rc/LQxBWe3FKGmugY1AGqqqnHzYC6uH268IarDHizFy/lt30Bql/M3BHnFpHbbAFcGvqTMlhB14cJFSE/PwNy57yIg4Nav9NJxxMv8Fy5cZHV6rgxRpSFmUNBIsxDTlhBVqfdoaOhLqj1OlVh7oJQYoir14FQKUdWCyuXLV9QJQb/5ZgcMhuQ6Qa04X6WeqNL3leZz4sRJU1mVyi2fjqUya5mH9O+AgOdNjRi15SAiIqLmpaEv5xfbzNKQNCDgVpBq6T0tbWg10oBU6V6nL7/8ik09VhmiNl7yIFUMUZ0ZoHp6ag9RPT27IyrqYxQWFiIsbKbD5630nj2fs5X0cv6oqI8RE7MaU6dOg07nDZ3OG1OnTkNMzGpERX0Mnc67wS7nHzFiJEaMGImVK1fh8OEjKC8vNwtZHRmiiuFoanoGjv5cey/S3bv3Ii+/ADdu3EBBYZHiw6XqO//E5XHYFpuHwi+ysCU6HYe3fwegBvsT8/H9+nQcS8vFpZ9+Rg2A6vJLqPr1F9z4eSeqj6Sh7IeNNs2ryV7OzwdLaWPLg6WchSFq02VLiCr+Qq7Xb8TgwUNNr8X3Bw8eagpWtTQ4b4cQVT7tOXPmmu5jKr1lgDwktTYd+TAxTJRfgq8WoiqFhtJL3uWX7St9RilElYaVavORh6jy98XgVl42tTJbmwdDVCIiImpMgoJGmdqm0t6oAQG1Aas8KJXeNzUoaFS95i1eOabUFhfb7lquGAsIYIja2EmDVClnBaienraFqNOmTXfobfwaQ4gqPlhKfHiWJYGBQ5z+YCnxYVLSf3v2FKO0tBQREZGmzzjqwVLS3qXJqek4fuIkrl+/joysHGz78ivcvHkTO3ftwc5du5Gdu9l0Wb8jHiz10ewPcMBQhF36HOxca8SJnQcAAL8U7cR/d+5FVeUNoKYalUd+QEVROiq/TkLFdwbUnMtFYUqkTfNqkiFqt24e0Om8NVXuzghSdTpvdOvm4ZCFc2aA6ulZ21VevKGxpRs3h4XNdFrQqhaWBgWNYojayNkSokobZpGRS0yNPvE98Rd0rQ2z2yFEVeuJKp+nWpA6Y0ZYnekqDZOSBqm2hqhqYaJST1D5+EohqiN7oloKfi3NgyEqERERNTZib1TxPqhq40nvj1qfXqgi+a0EgoJGmYJZMazVessAhqiNnzxIdWaA6ulpW4jq6XnrwUv9+w9w6LyV3rPnc7YSs5fAwCGmkFjJrFmzERg4BFlZ2Q67nYE0EF25chVWrlyFrl27YcaMMBw+fMSs1+nKlascHqLK73GakmbE4SM/4VRpKZKSU5GZnYOzv/6KispK/HT0ZySnpju0J2pQ4DAcMuThYGIODhlyUfH7JdTUVKOmphoAcOP0L7hamIPrmxNRuTMNNw6k48bP6UBZFqaHjrRpXk0yRHV390KPHr00nywcHaT26NEL7u71P1k4O0AVSQ/gqKiPzS7tDwwcYupK76gKTI49UZsuW0PUceMmmO4TtWJFlOnm+CtWRJkeKGWpoSh1O4So0mEzZoSZphMa+pJZaKp2mb+1B0oFBNQGidLQURpCWnrIlDRIFP+WD5eSvye/16g8rBSHye9XKn0tfdCT+L44TfltCtTKbG0eDFGJiIioMZJevh8ZuQTTp79iCjWnT38FkZFLVC/vt5e0R6t0+pGRS8x6yGqZFkPUpkEMUp0doHp62h6iRkYuQWFhIebPX1DveYuBrNJtDNUyhsDAISgsLDTdO9URpk2bbgpSg4JGISrqY2RlZSMrKxtRUR9j4sQQU4BqqZObreQhqhiSDhw4CKWlpSgtLTXdL9UZIWpyqhFpxkwzGZnZyMjKMV3en5OXj6LtXyIlzXzc5FSjQ9bBp7Pn4trWr3A8LRNVlZUAgBoA13Z/gyuZCagoSkHlHiNufJ+Ga4cMQMVWZOgX2jyfJvlgKQ8PL7i5eVh9uJSUPEi198RT+1ApD4ecLBoiQBWJB7PaxsnKysbEiSFOmbe0y7r0tbQyFSswhqiNi60hakBA7aVA0hvui5KSkm26b1RjDVEDAp5XvAxfPkz8WxwuvQRfeim/NGyVhqhK81UaJr2UXx5KqoWo4mulJ9rLL+dXm4+1nqjW5iO+J7+NgHx+0mlqKbPSw60YohIREVFjJD4vwBLxeQP1JQ1JrdFy2wCGqCRna4gqXpnqiBBTvI2hPRx1Sb1IzF4iIiLrdGCLiIh0eIDq6Wkeoo4YMRLl5eXQ6/UoLS3FjBlhmDEjzHS/VOkDqBwVosasWYc0o/nDosSQVPpafKjUrfezELNmXb3n7+XVA7179saWTz4BvtyK8p1f48bZM6g4UIzrm5NQ+U06KovTUPl9Km78nAZUbMV3RSvxjK4vvLxsm1eTfLCUp6dtl/SLxCC1PiGqIy/lDw6e1CABqkin88asWbPNNlpcnB6zZs12WDdyJZbCWzlH/gpE9WdPiCo20iIilppuuh8RsdTmezhpCVHFS5K0sPdppvZSupepLaT3T7U0rClTuhSfiIiIqDkJChqFuXPfxapV0aa286pV0Zg799163wNVSrznqbR3q9jzVdorVuvzCxiiNk5K90FV47vkiEPnHRenN4U+an9b+kx9zZo1GwZDsk3Zg6MDVFFDZy/SQFQMUkNDX0Lfvs+Y3S81NPQljBgxss749Z1/cEgocvLyVcNT+YOkxNc5efkIDgl12Hro07M3Et6bh4rCVGBHJqq+TsWNYiNu7k9HzdEM4Ndc1JzLRMbGRXimb194enZv8BC1ISiGqO7unvDw8Grwk5yHhxfc3T1dvlKakqCgUZoqM4MhGUFBo1xeXrrF3hDVEbSEqI1ZfUNULQ+UauoYohIRERE1DPEhrx99FKUYzgYFjcJHH0WZHhJrbXoMURsnm0LUpT+5vLzkGG5u7nWCUa0c9dD04EmhiFmzDsmpRrOgVCk8TU41ImbNOgRPclyA6iX5e/KYF5EVE4EzX23EtQPJuHY4GWcPbER+6hK8MvlW5mRrgOrp2YRDVE/P2t6offvqGuzE07evzmG9UImaAoao9rvdAk9nYIhKRERE1DQxRCWi5qhJh6i190Z1h69vP6efJHx9+8HNzZ0nCWpWGKISEREREZEcQ1Qiao6adIjq6dkd7u61Fbefn7/TThB+fv5/XMbPEwQ1LwxRiYiIiIhIjiEqETVHTT5E9fTsbrpHqTOCVD8/f7N5EDUnrgxRiYiIiIiocWKISkTN0W0Ronp63nrQlCMv7ff17ccHSVGzxhCViIiIiIjkGKISUXN024Sonp61l/a7ubk75GFTffvq4Obmzkv4qZmrbRiNGTMOAwcOcnljjYiIiIiIXGvgwEEYM2bcHwEqvy8TUfNxW4Wonp61Pee6dfOAh4cXdDpv+PsP1Hwy8PcfCJ3O22warl54Ilfz8PCCj08/jBkzDmPHjiciIiIiomZszJhx8PHpx+/LRNTs3HYhqsjd3RPdunnAzc0DPXr0gk7nDV/ffvDz84e//0D4+w+En58/fH37QafzRo8eveDm5oFu3Tx4+T6RhHiZzi2ecHcnIiIiIqLmxMPDs853A1d/VyEiaki3bYgqqr2nqdcfgao7unZ1h5ubu9nftcEpTwJEauoGqURERERE1Jy5+jsKEVFDu+1DVCJyJNc31oiIiIiIyDV4D1QiosaNISoRERERERERERGRBQxRiYiIiIiIiIiIiCxgiEpERERERERERERkAUNUIiIiIiIiIiIiIgsYohIRERERERERERFZwBCViIiIiIiIiIiIyAKGqEREREREREREREQWMEQlIiIiIiIiIiIisoAhKhE1eb2feQ4DXpyAwAmvaDIgKAS9dT4uLzc1fWfPV+Bv/S7ib74X8acuv+Kv/S6i678r8ORr1/HEy9fw+LRr6Bx8BZ0mXEHniVfQeXzt/4+/dA1nz1e4vPx0e+jffwDCwxdAr9ejsLAQhYWF0Ov1CA9fgP79B7i8fMRtTE3LsWPHcOHCBZw7dw7nz5/HmTNnsG/fPpw/fx6rV6/G6tWrUVZWhn379uHMmTM4f/48zp07hwsXLuDYsWMuLz8516FDh0wOHjqIg4d+wIEDB3Bg3wF8/+0+7NuzB9/t24ODB77HwUMHzcY/dOiQy8tPRFQfDg9R4+L00Ov1Ll+w292sWbNNjejCwkJkZWUjMHCIy8tF1NC6d+8J38Ej8cqshTBu3orN277B5qKvlW37Bpn5RfjXO++h3+CR8OrRy+Xlp6btwafO4cGu5/CXHufx56fO4e9DD+ORwA342wA9/to/Du2f34AOw4rwWOjV2jB17B8h6pRreGLaNZeXn5q++fMXmLUHlISHL3B5OYnbmJqOCxcuQPqvoqICx48fx5QpUyAIAgRBwJQpU3D8+HFUVFSYjXvhwgWXl5+cq6amGjU1NaiursLNmzdw+defUbJ1LQ6vn4fSrUvx381zcCpzBs5ti0Ll72dxs+omqqqraj9TU+Py8hM5GrOZ5qXeIWpg4BDodN6m1+KOI77W6by5AzlBYWEhAgKeh49PP/j49MPIkaNhMCRzXf9h2Rt9gGPP2GXZG31cXn7SrkfPPvB/cTzWJaRB67+NKVkYEBSMHj37urz81LR1Cb6Cji9cQnv/39DW+wL+t8ditG/dFmM66zCmsw66v3XF//ZcjMdCrqLThCt4dHQ5Oo+/gscnX8MTUxmiUv3ExenNGuxxcXr07z8A/fsPQFycHllZ2Wa9Fl1dXuI2pqbh3LlzqKmpQVVVFaqqqlBdXY3s7GwIgoCWLVuiZcuWEAQB2dnZqK6uNo1XU1ODsrIyl5efnKum5mbt/lFZgYs/bsOpzNm4cjQd5T+sxMWds3H5WDLKj27E2dTXcCY2BOU/f4WbN6+juro2fHV1+an5iIlZDb1eb5ZXWRpuL2YzzUu9QtT+/QeYGm0TJgQjJma1qSEXE7MaEyYEmy474qVGjlVYWGg6SKUHq7QxrWbWrNkuL7+zHYjrjbFjx2Ls2LEYPXo0hg8fjoAAfzyj08HDrRs6dXwUbf/yf2jzwAP4nzvvwt13tDI5l/GMy8tP2nXv0Rt+LwZjQ3JmbQ+I8kqcOn8Np2VOnb+G85crAQCJxlz4vxiCHj0ZmFP9PP3qNTweehXtB/yGh/uex71tX8F/3Pqhyrs1qrxb4z9u/fCg5wdo+1wc2j4bh//TrUe7gEI8FnIVjwVfdXn5qekSeydmZWVj2rTpMBiSzdpa/fsPgMGQjGnTppvaBvPns7diU8JtTK5y/vx5AEBNTQ1qampw8+ZN7N69G23btjX1RH344Yexa9cu3Lx50xSkAsD58+ddXn5yrtrtfQNlh77FoQ+HoeTLaJw5kIvihGk4lRKMkzvX4+juZJz/9n1cyJmN/fN8ce3UAVTduInqmmqXl5+aDzGfkgemasPtxWymeal3T1TpL+Rq4uKs/zJeWXkDRUXbVOdRWXkDISGTHLrwRUXb6pRtwYKFKCrahpCQSRbnae19reydhtKBqkVAwPNmPYXlZZFasGChy3dQe4kh6ptvvons7GxkZmYiPS0d62NjMXXyFAwODET8xo149ZVXzAJUSyGqpfVTn31B7bPO2OebQjlsJQ1RK2/W4LPcY5gV+wPeXv+jmVmxP+DTrBJU3KjRHKI6ul5SGl9el7hqfd9Ox39D6jymHJ3HlqND4O/4W/+LEAQBK7r7wdD7ORh6P4f2rdua90z9a1c87B2HzhNq74+qZVvUZ38Q91OR2v7sin3LGfu6LdMsKTlmKlNJiWvu4WfvOhB/xC4sLERg4BCEhc1EZOSSOuNFRi5BWNhMBAYOMY1v7UdttfWyYMFCs+1obb9TWqe2DnfV+m0MnLmNHbG95dtMOr60ba02XL6dlOoIR7W167v/1Lccap9VWseuXmbRf//7X1RWVuL69euoqKjA77//jmPHjiEzMxMeHh7w9PREZmYmjh8/jgsXLph6oVZUVODMmTN1pldUtK3Odhb3CVcsc2Oo/23ZV7RqqHVZVVONivJy/LJ9DfalLcC5oldx9chiXNwxC+fSR6N8zzyU5YTgVO7rOLEnARe/+gylKW+g4ko5aqyEqI6qk6TTEesUeZtIaV9Tvz8AACAASURBVFpiFqC074rr1d5pU8PT6bxNeZU0MFUbbi9nZDPi/iivMy2Vw9HHvzO+HzZkfe+sedU7RJ04McTUaAsLm2m6xEh6X4iJE0M0LWBJyTHVEMdZjRf5F8qiom127Rz2lq8+IWp9NORO5gpiiDplyhTs2rULABAZEYmcnBwAwITx43HyxAls0OttClHF9SM2Uhyx7hpLeNlYymErMUTV/9ET9dT5a/jxl8uKfim7ZlNPVEfXS1rGd2WIqrZ/O3L5bjePjriMvw+9hL8PuYQ/P70WgiCg6pnWJoIgwND7OdPrMZ11aPtsHLqEXEWXSco9UaXrsT7hktiglw4rKtrmlCDV0rZvyLpFyzTF/Vt6rg8JmVSvhmFDtwG03CNTjVpPRUvrRV4nWFtfasGErcMbYn9orJyxjR25veXbTLqu5X9b2x6OOP84g6P2H6Xp2HpMNbS9e/fiyJEjOHz4MI4cOYIffvgBmzdvxqlTp3D06FH8/PPPOH36NPLy8nDs2DHs2rULOTk5OHbsGL777rsGWa/2cEb93xj3OWerrq7CxV9+xpkd8fj9p2xcyB+H37cOx5WDa3A05wMcSXkNF3e8hfNfheHKic049lUsDkePwJVfDqC6SvlyfkfXSfIfY5TmGRenrxNKSdv9Sh2u6jNtco2GCFKdkc2IAaq0TigpOWa2X8nfd0aI6ujzM0NUz1tdocPCZtZ5TwxSY2JWa1pASz1DnRFSKe0I9u4YrghRhwwZZpfmEqKOHj0aw4cNQ0ZGBgDA3a0b1qxZAwAI8PfHjz/+CGN6Ojbo9Xhl+ss2hajy1wxRXUceomr5Z2tPVEfVS00lRLW3HI19X3GGzmPL0fHFy+gw+BIe+MerGNNZhypda1TpWsPQ+zlTiCrtmdr2uTg8/tJV1QdLOWJbWGroqP0w4Mj9R8t7rgpRS0qOOfwLc0O3AcRGv3ivraysbMXeh/37D0BWVjY8Pbubeiqq3TfT0npZsGCh5pBT/OIoH9/W4a7cLo2BM7axo7a3fJuFhEwyG1/skCDvrKAWKjj7S6Cr9x+l6dhyTLmC9HJ+AKisrMRPP/2E6upqk5s3b+L06dPIyspChw4dcM8992Dy5MlW74nqyu3rjPrfkRrLvm9NVWUVDuUnoCTnTZxOnIzf4nvhUlZ/HMqdi3PFn+DUnjW4sGcZfsuZgLOGF3Fg00s4kfoujmbEoKq6yuZtY2udpLRetQwPCZlkVmdpOUa1Tptcy9lBqjOyGaVjwlpHLmeGqI6afrMOUeWX8SvtcNJLkaw16sQFlFc44pc9pRRcqVuxvHu9dDyl3jfSnVPe2JPOU6nbvrTM8nnIh8t3/MrKG/W6TUFDhqjydblgwUKzSxbkv4aoXbqltt3kvX/l29qeXlMH4npjxIgR+OegQUhPq33g0Pbt2wEAe/bsQdennsKPP/6IsrIyGNPTAQCjR47SHKJa2lekl33Iy25pPxL/Fj9jz7q0NH/xtbg/at0Xrf0IId/Htc5fOl/58tnCFKKmZAKVN3D1ozW4NP1tXHpltrnpb+PKh58DFZVINOZpDlG11Evyk5xaY0/LOra03eXrUMu6tqdsSvu3telprQe1Hu9Ky2ZpWdT2R2d7dORldJ5wBR0GX8L97YdjhZcfqvq2RlXf1vjP0/3QutMLuL/DcNz/yHDc32E4HugwHP948Qi6TLqKx6dY74mqVkcoHcfSukDpSgvpMSruV5bmpbZObT3Paj23aK3jLL2nVp/KP2tpm6ott1qdpbT8thyr9rYBpOdytfO6/D21Hg9a1ouWelo8huWhmq3DpfuqlvrC0jbTejm52nhq287SOdQR9ZGjt7GjtrfaNpP+OCNOWx6aqtVLWttX1o59rW1Ve9ro8mNVqT1n63Guto7VltlZtzdTUlZWZnqwVE1NDa5fv47Dhw/XedhUSUkJnnrqKdN9UgVBwOeff25135Ivg3SYI445pTDOnvpf7dxjbf+Qv+eI71HWzv9q89bSZrBV5c1KnPxKj9/2vI9dnwxF2aa+KCv8N07tWouyXYvx665lOJUXirKEvri6cyHOHTSidH8GThR9hqqbdS/nd0adZK3eEbeDdFhcnL5OO0pcV/WdNrmeNDCVdvJTG24LR2czWjpDqLU/tbatxeHyekLK0vnZ0W11W+pKW/IGpXkpncNt0aAhqqXKRFxAaYNZWhkqnYykGygkZJKmxpnScojlUmusa5mupZOntCKVlr0xhqhqB5a4HcQdW1we8bXSuPLwW8t2E0MRcfr2XgpxIK43hgweAr/+/ZGakgIA+Mcjj2DQwOcBAKtWrcKPP/6I3bt24+47WgGA6dJ+rfdE1bKPSYfLu9/Lx5EHRPasS2v7qFJDzdq+qGW4WmPB2vyl291a40lNnRB1eYx6iLrkM+B6hc0hqrV6Sen+SUoVstbtZGm7WwoBlMazpWyW9m8t09NSD9pzvGtZz1r2R2foPP4KOgZdRufxV2p7nfZ6DlV9WqOqT2vo2nZF+wfaYkwnHcZ00kHXtivaPheHjiMv4/Ep1/Dky9Z7osp7Vlg6jqX7S1yc3uL9fLWEqNa2n5Z929K+ZW8dZ+09pfpUpPYlS21Z5OdvtTpLaT1qPVYbS4iqpceN2OhUa3CKDXu1gE3rcJHW+sLSNlPbx+TbROt4SvuhpUDF3vrI2SGqvdtbbZtJvzBJ14vWEFVL+8rasW9LW9XWNrqW9pylcqsd50rrWOt+5kyuDFHtPeastYu11v9azj22hqiO+h6ldhmvfD/W0va3VNdZc7PqJs7+UIDze1ej7HA+yr9/D5cOfoBjBe/hyOZZOLp9FY4XTMOl7WNxJuMVfJv9CUoLonDuuwxUV9e9nN8ZdZK4jdTaQtIffqTrR/xb3DbiONIfoO2ZNrmeTudteui5PERVGm4LZ4SoaseEtI2rVPfI6xvpe2p1hqU2q6XzsyPb6rbUlfJyWKsnLXWctIfDLudXeqqYrZfzyytApS/I4kaQflYcT3xPvhNYOkFI5yev8OTztDRd6d/ShqRI7Rey+nyBasieqGoNA0vrQWnbqG03+TZX2yZaHYjrjQD/ADyjewbJBgMAoEf3HlgSGWkWop44cQKvTH/Z5p6oapWB9CAXqe230ukqNcjtWZdq81cqv9Z9Ua0hqrSPa52/ltdaOftyfi31kvi3+Dm1k57WLwlK213pPVvWtS1ls7R/W5qerfWgpePd0rLJ521pf3S2jkGX0WlsOdo/X1h7P9TerVHVuzVO9ulUG6r2fM40bEwnHdoP3IBHR5Wj48jLaP/876rbwlKDRb6OleoCSw0ELSGqpe1n63lWa92i9Xxhqf5Tq09Flupia/uSLec+e49VrepzqbdaTx5L60VpXHmQKt2vpMe5rcPl87JWX9iyzbT+wGXtnGrpHOqo+sjR29gR21ttm8nXh7j+7OmJqnb+0XLsa2lj2NtG19Kes/c4lx9TWvYzW/cnW50/fx7V1dWoqakxhahKl/OXlpbCaDSiXbt2uOeeezBp0iS7LufX0o62dsyp7Rta93ut31Us7R+2vmdtXLV9XGu9p7XNYOv+UV1dhd9/PYWTCbNwOO8j7Il/FWc2v4LzW4JwLmciru5fgvLvZuK3XbPwbcIrOLdtMQ4vH4mrZaWKD5ZydJ2kdFxK12tISN0rHeU/eMnrK6X6S+u0yfWkQWlcnPnl/ErDbeWKnqienvU/9yl9Rkrt/OyMtrotdaUt34Hl81L7LmOLeoeo0gdLzZr1tuTBUm+bhmt9sJS4wGJSrPTlXMsJQPx1SK1hpbQjyuen9DlL07V2khKHN7cQVVy31rabWBnIvxxp+VVSyYG43vB59jmMGzsWSYlJSNi0CfEbN2LDhg2IWByBu+9ohQ16PSIWR2D3rt2mYbbcE1XpVyD5csq/5KlNt6TkWJ0Trj3rUm3+SvuoI0JUpX1cy/y1vNZKDFE3/BGi/lj2C4pOHkDRLzInD+CHspN2hajS9a8WGooNOUu9p7WsYy37ma3r2p6yWfqVU216WupBLce7pWVTmrctjW9H6zTuCrpMvoYHPRdD17Yrqnq1RlWv1jD0fA533P03rPDwg6HnczD0fA7tH2iL/+sbi45Bl/HIP3/H34desmk/0bKOLTVgbDkura1TW86zWusWe+o46XviNJXqU/nyq31ptFZX2/Ml2ZZjVStnPHTI0npR2v5K5yw5sdeQLcPV9let9YWWfczefVGtx5Mtx44rt3F9t7faNlMLS+Whg7WrDZTKprX9b+sXSaVjvz4han2Pc+kxJZ2Xq0LUvXv34syZMzh8+DBOnjyJo0ePIjc3F6dPnzZ7sFRubi5KS0vx/fffIycnB2VlZdi/f7/FaSstg5Z2dH1DVGv7vbVzjytCVK1tQC3lsmU9WVNdVY0blRU4mpmAHf9+FmeKluPEzo04+Nlg/Br/T5w2jMIva/yxI3E2/lscj9PrR+GXrI9w43qFYohqbduoHS9azyNKWYF8XvKOO1pCVK3TJtdydoDq6dl474mqpW1iSxvemW11e5fF2vcate8F8u8ytqhXiCrd8SzRcoN76QLLu+/K35curNoOYamnjdoK1NKzRm26ShtMraHoiMtyGnOIqhZsWNpu4iWr0sskpJdN2OpAXG/06dULTz/5FP7xSAf85cGH0Pree01BqSX16YkqDdnk4yj94iL9rHx57VmXluavtN207IuWhsuXx9b5W3qtlTREvV59A+9+uQEjUj/AyLTFZkakfoDZ29bjalUlkmy8nF9cNkv1ktjAljfCbD3GrG13aXm0rmtby2bv9LTWg9aOd0vLJp23tf1RSrou1f62xxPTr+GJl69BEAT858l+qOrZGlU9W2PMozrc97fhuPfhYbj34WG456GhuO9vw9D++UN4YurV2h6s48pt2k+sHcfyZSkq2lZnPckbEtIGmrxHhbV1qvU8q6UOseV8Ye09pfpUSlxOaaNJrL8tLbe9X5JtOVa1kt4uKTBwCMLCZiIyckmd8SIjlyAsbKaph2JhYaFib0Zr60VsFEv3G2vHm9IXWVuHi/uZtfaBpW2m5ccWtfGsbTu1c6i1Y8dV29iR21u6zeQ/MIrbx9qXPaXhlo4RLce+1uPV1ja6tfacrce5pXWsZT9z1jlNdPHiRaxYsQJPPPEE/P398e2336K0tBTp6enw8PCAh4cH0tPTUVpait27d2PQoEFwd3fHsmXLcOHCBYvTVlofWtrR1raPte+G1vZ7LfuY2vytnUsd9T1K63dNLW3/+uwrVTXVqKq6iSsXy/FjSgx+eH8ASjPm4Me0mdilfxPHtn6K7wxv4Nev3kdpagj2fx6G679fQHV1Fapr6l7Ob23b2FMnWdrn5O1Xsd1gaV2K29jWaZNrNUSA6unpnGxGPCbk+5vad1Frr7W2b9Xes1QnO6KtrnVZ7Mkb1L4XWOpgZEm9QlTpZUMTJ4aYLu0vLKy9hH/ixBDT5UjWGnRKFZC1DSiShxwi8bPiL1VqPVPkK19pQylNV/oZ+TzkZVS61LkxhqhS1i73VDs4pb8Myk+ESttNaRuobROtnBWiqpVffpKtrLyhGjLJ1438s+K+b++6VJu/fDyt+6LafqG2j2udv7XXWsl7op6/dhknLv2Kk5fOmTlx6VeUXbtkd09Ua/WSWA9Y6gGn5RiztN3t3db2lM3atlGantZ6UMvxbuk4UvpFUW1e8vct/W2PJ1+7ji5Ta0PUqh6tUdWjNXb07A5BEPB/fdej0+hytPf/DX997iIe7HYOT0y5iqdeu4YuoVfw6Jj6h6iW6gJPz1tf9NTGkdYD4vZTO9eKvyLbep61tN/X53yhpS5WC+bk05COp7YvWZqXfPnrc6xqJfZUzMrKxrRp02EwJJu1tfr3HwCDIRnTpk1HVla2ph6KltaLpeNLaTqOClG11BeWtpnaPibfR9XGU9t2ls6h1uojV25jR21v+TaT1jXy3lta1r+WY1rrsW/ttT1tdKXpyZfL1uNcbR1Lx1crk7POaaKUlBSz+5z6+/vjwIED+Mtf/mIa9pe//AUHDhzAgAEDzMZNSUmxOG2l9SGvu+t7zFmqV22p/7W2my2dS23ZNy3VRbbUe2rztmU9WVJTU42ampuoqq7Gld/KcXJPEbZHjMGR2FE4mTkP333yEvJe74v0BaE4tDUeFZcu/HF7iCrVnqiOrJPk01EKPqWv1XqPSqej1ra1Nm1yLTGLkgelasPt5YxsxtOzbr4l39+stT+1nPss1QWWzs+ObqvbUlfakzeI46p9l7FFvS/nDwwcYrbjiSGq+Fqn8zbdz4kcp7CwEKNHj8GIEUE2GT16jNUHDtRHfU7IjrTsjT44naKzy7I3LAdr1Lj06NkH/i+OR2ximuZ7om5MzcKAoGD06NnXoWWx97KohuDosjXmZW1IT7x6HZ0nXTX7AikIAu5+sCc6jy1H5zHl6PDP39FuwG9o8+Sv6DzhCp567ToeC7mCR0crh6jOJG3UNIa6mupH/BIgBm16vd50WyW9Xm8K1sQvCq4ub2NV3+PBUcEVtzFZ4sz9TG7p0qUQBAF33XUXWrRogcceeww5OTkQBAEtW7ZEy5YtIQgCcnJy8Nhjj6FFixa46667IAgCli5deluti8Y4f1cDakz3y62urkZFRQUulZ3FqaMHcPT7nTiypwgnD3yLyxcuoLKywnR/XaCWq8svp/YjHjV9Yk9DeVCqNtxejTWbIeeod4gqZ2+XWLKN+NAueyg9BMxR+MWcGlr37j3Rf3AQXn17EXIKv8TWr3dj69e7VOxG7tav8Pqc99F/yCh49ejlsHJY6knlao4uW2Ne1ob2xGvX0TnkKrpMuYouk6+i8/greHRUOR4dWY4uk67isQlX8I+hl/A334v4X88y/M3vItzeqEDn8eX4+wvK90QlsoWWe2fa0juxOapv20Xp1hncxuRozt7PpI4dOwZvb28IgoA777wTy5cvR1lZGUJDQ00/FoaGhqKsrAzLly/HnXfeCUEQ4O3tjZKSkttqXTTG+bua9AFj1dXVqKquQlVVNapuVqOqqgpVVbW9VKuqq1BdU42aqlsPKQPg8vITOVpjzWbIORweolLzxhCVXKGX7jn4B4UgcMIrGDzxVYsCJ7yCgKAQ9HnG12HzFy8paIz7vqPL1piX1RXOnq9A59CreHT8FXSaeAWdxtf2MO00tjZE7TS6HB1fuIzOY8vxV5+L+F+vMnSZdAWdx1/B2fMVLi8/3R769x+A+fMXmPVajIvTY/78BZruj9nc2VOniXVhfS4H4zYmaxp6PxOdOXMGJSUlWLZsGVJSUnD+/Hns27cPZWVliImJwerVq1FWVobvvvsOFy5cQEpKCpYtW4Zjx47hzJkzt9W6aCzzb0ySk1PMGf6QnAyDmZS64yZbvt0DEVFjxxCViIiIiIiIiIiIyAKGqEREREREREREREQWMEQlIiIiIiIiIiIisoAhKhEREREREREREZEFDFGJiIiIiIiIiIiILGCISkRERERERERERGQBAAj33/8AiIiIiIiIiIiIiKguABAEQQARERER3SbCBbRA7f+mYT5/DHN12YiIiFxk6tSpLi8Dud6iRYuwd+9eFBcXa7Z3714sWrTI5WUn12KISkRERHS7CFcIT+XvubqMRERELsIQlQRBwN69e1FRUQFb/lVUVGDv3r0uLzu5FkNUIiIioqYuXMLCOAxRiYioOZs6dSp8x8zExoQkJCYmUjO0adMmFBcX2xSgiv+Ki4uxadMmly8DNbyNCUnwHTOTISoRERFRkxYuWA5PJeMxRCUiouZs6tSp2JiQhLav74QweS81O8UQXtpdrxBVeGl37XRcvizUkNq+vhMbE5IYohIRERE1SeGCtvBUMj5DVCIias6mTp2KxMRElwcy5CrFEEJrQ9SqqiqbFRfXfp4havOUmJjIEJWIiIioSQkXbA9Q//gcQ1QiImrOGKI2d7dC1IqKCpsxRG3eGKISERERNRXhFh4aZcvnXb0cRERELsIQtbm7FaJevXrVZgxRmzeGqERERESNXXg9w1P5dFy9PERERC7Ce6I2d7dC1MuXL9uMIWrzxXuiEhERETVm4QKErUL9w1PJ9BiiEhFRczZ16lT4jpmJjQlJLn/aNzW8hIQExMfHo7i4GBcvXrRZcXEx4uPjkZCQ4PJloYa1MSEJvmNmMkQlIiIialTCBfvueaplulsbwfIRERG5yNSpU11eBnK94uJilJWV2ay4uNjlZSfXckmI+sADD6BTp07o0qULHnvsMZMuXbqgXbt2aNWqlctXDBEREVGDCxccH55Kp80QlYiIXKBFixbo2LEjlixZgh49elgct3Xr1pg7dy6GDBmCu+++26HlYIhKgiBg7969OHXqFM6ePavZqVOnsHfvXpeXnVyrwUPUu+++G8HBwfjyyy+xc+dOM7t370ZSUhKefvppl68YIiIiogYTLjgvPJXOgyEqERE1sD/96U8YOnQoEhISYDQaYTQa8eKLL+Kuu+6qM+7jjz+OmJgYGI1GpKWl4Y033kCnTp0cVhaGqCQIAubPn4+CggLk5+drVlBQgPnz57u87ORaDR6i3nnnnTAajVi5ciX8/f3x/PPPm9m3bx8CAwNdvmKIiIiInC5caJgAVZwXQ1QiImpAPXv2xPvvv28KT0VpaWmYNm0a7rvvPtO4zz33HNatW1dn3NWrV2PUqFG49957610ehqhEVB82h6gtWrTA/fffb5f77rsPf/rTn5CWlobQ0FDF6WdlZSEoKAj33nuvXfN44IEHcM8997h8xRIRERGpCv/jIU/hDTjPrQJDVCIiahB33303pk+fDr1eXycUlfrggw/w0EMPYdSoUYiPj1cdLy0tDUuWLKl3r1SGqERUHzaFqPfffz/GjRuH3bt3Y8+ePSguLtZkz5492L17N3bv3o2dO3fi0qVLqiFqfn4+jh8/jh07dpg+o3U+ory8PN4SgIiIiBqfcBeEpyKGqERE1AAefPBBfPbZZxbDU1F8fDz69OmDefPmaRo/PT0dAwYMsLtsDFGJqD5sClH/8Y9/oLi4GElJSRg5ciRGjx6tydixYzFq1CgMHzYMgwcPxr59+zB9+nTFeRQWFiIqKgqDBw/G8OHDMW7sWM3zEZ09exYvvPCCy1cuERERkSAIDXvZvhqGqERE1ABatmyJUaNGITExUTUMTUxMxBtvvIEHHnjA9LmBAwdizZo1FgPURYsW4cEHH7S7bAxRiag+bApRO3TogD179uBf//qXzTOKjIxEfHw8DAYDbty4gVdffVVxvF27duHo0aPQ6/VINhgwMijI5nnt3bsXw4YNc/nKJSIiInJ5eCpiiEpERA3o2WefVQxFIyIioNPpFD/z17/+Fa+//rrZQ6jEy/lffvnlet8XlSEqEdWHzSHq7t278eabb9o8o8DAQABAfHw8JkyYgMcee0xxvEGDBiE4OBinT5/GiRMn0LZtW5vntX//fgwdOtTlK5eIiIiasXChcYSnIoaoRETUwDp16oT33nsPRqMRGzZswLBhw9CmTRuLn2nRogU8PT2xePFiGI1GpKSkICAgAK1atap3eRiiElF9NFiIKggCJk+ejIMHD+L//b//Z3G8wMBAHD9+HE8++aRd82GISkREPoKArRI+jaBM1EyEC40vQBUEhqhEROQS9913H8aOHYv27dvb9Lm77roLkyZNQocOHRxWFoaoZJOAeTAaozHl8UZQFmoUGiREbdWqFTw8PPDvf/8bZWVl2Lp1K+644w7FcZ9++mkAwPLly+1eKIaoREQULgjAH3/7COaBarjAUJWcIFxonOGpiCEqERG5QP/+/ZGamoqZM2eidevWmj5z5513ol+/fkhKSsKiRYvqfRm/iCEqafc4pkQbax96Ni+gEZSHGgOnhqj3338/pk+fjs2bNyMvLw+LFi2Ct7c3fvnlF0RFRdUZv3Xr1jh48CCSk5ORmJiIgoIC9O7d2+aFYohKREThwq0QVcpHUO6l6tMIykxNVLiAFhAab3gqYohKREQN6L777sPkyZPN7m26fv16eHt7q4aiLVq0QMeOHfH222+bfW7lypXo1KlTvcvEEJU0e3wKoo3zECD+7+ryUKPgtBDV3d0dW7ZsgcFgwAsvvICuXbuiZ8+e6N69O/r164eLFy5g3LhxZp9JSEjAzh070L59ezz88MMIDg5GXl4exo4da9NCMUQlIqJwQTlElfMReOk/2SlcqA0lwxtBWbRgiEpERA2kXbt2mDt3bp2HSoneeecdPPXUU2afadOmDcaOHYuNGzcqfiYmJgY+Pj71KhdDVNLq8SnRph6oAfOMiJ7yuMvLRK7nlBDV09MT+/btw5QpU9C1a1cMGTIEAwcORL9+/fDUU09h5MiRWL16Na5evWqqOMPCwnDz5k189NFHWLhwIQYPHow///nPePjhh/HFF18gKChI80IxRCUionBBW4gq5SPUDVXF165eHmpEwms9U/CM68tii6YU+BIRUZP15z//GatWrVINUEUbNmzAlClTcO+990Kn0yEqKsrqZ9LS0uDv72932RiikjYBmCe9F2rAPBijp+Bxl5eLXM3hIWrbtm2xY8cOTJ48GY8//jgCAgIwdOhQzJkzB/v378ecOXOQkZEBg8GAo0eP4ptvvsGIESNw6dIlxG/ahISEBGRnZ2PGjBkYOXIkOnTogCeffBIbNmzAE088oamcDFGJiChcsD1ElfMR2EuVZMIFDN8/HCkpKXjrrbdw1113ub5MWjFEJSKiBtCyZUv06tUL69evtxqKGo1GPPvss1i+fLnV8dLT0zFlyhTcc889dpeNISppUic0DcA8oxHzAhpB2cilHBqitmzZEhs2bMDbb7+Np59+Gn5+fhg9ejTi4+OxZcsWpKSkIDMzEykpKTAajViyZAmOHj0KAMjJycGyZcvwySefICoqChs2bMDq1auxdOlSPPXUCeGDZwAAIABJREFUUwgJCcGnn36qqZyOCFHbtGmDxMREAECbNm0gCLUVLgDk5+e7fMMREZFl4UL9Q1Q5H6E2SIXAB1Q1O+EChn83HOvXr0dcXBySk5Mxf/589OzZ0/Vl04ohKhERNaA2bdpg3rx5SE5OVgxF4+Li0KtXL0yZMgU9evTA/PnzVcPTNWvWoE+fPvUuE0NUsq72gVKKYT4fMNXsOTREHTp0KHJzc9GpUyc888wz+PDDD7Fs2TLo9Xp89dVX2LRpE3bt2oWcnBwYDAakpqYiISEBX335JdatWwe9Xo8PPvgA4eHheOedd/D+++/j448/Rt++ffHII4/giy++gE6ns1pOR4SoXl5eEP+JIepbb70FAEhMTHT5hiMiIst8BMeHqErz2CrcClV9BIaqt51wAcP2DcOwfcOwfv16U4iamJiITz75BFOmTHF9GbViiEpERC4QEBCATz/91CyMWrFiBfr162cKTuPj4zFo0CC89tprZuNt2rQJr732muk7eX0xRCWr1B4k9fgUREsv8admyWEhasuWLZGSkoJJkybhoYcegr+/P4qLixETE4M5c+ZAr9dj+/bt0Ov1KCgowL59+3Ds2DEcPXoUe/bswbZt21BUVASDwYCNGzfi008/xYcffoiIiAi88847aN26Nfr06YPVq1dbLaelELVjx46Ijo6G9F9xcXGdylQpRHW1Nm3awNK/4uJiLF68GF5eXi4vKxGRq/kIzg9R5fPbKvDS/9tGuIAWqO19um7dOqxbtw7r169HbGws4uLiEB8fj02bNiEiIgJPPvmk68urBUNUIiJykfbt22PatGkwGo2YPXs2+vXrh2XLlpkFpgkJCQgODkZgYCDS0tKwZMkSh/Q+lWKIStYEzFPvccoHTJHDQtQuXbogLS0N7dq1Q+fOneHv7w+DwYAvv/wSP/zwAzZv3ozdu3fj8OHDOHfuHH7//XeUlZXhWEkJDhw4gBMnTuDUqVPYv38/Nm7ciJSUFCxduhT/+c9/sHTpUjz66KNo0aIF1q5di86dO1ssp1qI2rFjR1y8eBH5+fno2LGjabjYw3Tx4sWmYY09RH3rrbfM3gsKCjLdfgAAoqOjXV5eIiJX8hEaNkSVz9tHqBuo+jSC9UJWhNeGp0P3DTX1PBVD1NjYWFOIqtfrkZCQgFWrVmHcuHGuL7cGLSAwRCUiogbXokULdO/eHZMnT8ajjz4Kf39/6PV61QdHzZ07Fx07doROp8NLL72EO++802FlYYhKlskeKCUXMA9GpV6q1Gw4LESdPn06Fi5ciDZt2uDVV1/FqlWr8Pnnn2Pz5s3Iy8vDjh07cOrUKZw9exalpaXIz8/HggUL4OfnB3d3d/j5+WH8+PEoKSnBf//7X0RFRWHJkiWYO3cuIiMjMXz4cAiCgPHjx2P69OkWy6kWooohozRAFYlBqtiLs6mFqCLxvq287QARNXc+gutCVKWy+Ajspdqohdcaf3Q84uPjsXbt2johqjRMXb9+PTZu3IgNGzYgMjIS7du3d/0yWMEQlYiIXKFFixbw9PREWlpancv61axbtw5JSUmYOXMmWrVq5bCyMEQlovpwWIiq1+sxaNAgvPjii8jNzYXBYEBycjJSU1PxzTff4MyZM7h8+TKqq6tx+vRpxMfH48yZMzhw4ABCQ0Ph5uaGYcOG4eeff8bmzZsRGRmJmJgYLFiwAG+//TaeeeYZCIKAdu3aYc6cORbLqRailpSUmAWlctHR0QgKCoIgKIeoQUFByM/PVwwnvby8kJ+fb3Z5fXR0tGJgK05H/FdSUqIaikppCVEFQcDixYsVxysuLjYNCwoKMr2WPyjLy8vLrFerGMjK15u0PF5eXujYsaPZcuXn5/PWAkTkMj5C4wlRlcrmI7CXaqMRLiDkeAgKCwuRm5uLTZs2IS4uztTzVBqgrl27FmvXrjUFqUlJSfj000/rfS/2hsAQlYiIXKlDhw54//33kZCQYDVEjY6ORmBgoMPLwBDVMe644w60atUKrVq1wp133qlIfN+RITiRqzkkRL377rthNBrRu3dvBAUF4ZtvvjH1zti5cycOHDiAkpISXLhwARUVFVizZg0OHTqEmzdvmkLEhIQEfPXVV8jNzcWnn36K6OhoREZGYtGiRRgyZAjuvvtutGzZEjqdDmFhYRbLaa0nanFxsWK4KaUUoorhpDxEfeutt3Dx4kWzwFIME0tKSsx6soo9XsXKu2PHjqphppzWELVNmza4ePGiaf7icHE+8oBUOo7Sw7OmTp1qCqCl85WWZ+rUqWa3QxDvO3vx4kWr65qIyBl8hMYboiqVdatg/oAqV5epWQgXMLFkIgoLC1FUVITCwkJs3rwZ6enp2LRpk6nXqfT/L774AmvWrDEN27RpE9avX4/w8HDcf//9rl8mCxiiEhFRY9C/f39ERkYqhqexsbGYOnUq2rZt65R5M0S13V133YV27dph1KhRWLJkCVavXo2EhASkpqYiKysLeXl5yM/PNykoKEBeXh6ys7NhNBqRmJiI2NhYfPTRRxg2bBgeeeQRp21fImdzSIh6//33Iy0tDV27dsULL7yAXbt2YceOHSgoKMDevXvxww8/IC8vD3FxcfjXv/6FV199FSdPngQAU5BaWlqKnJwcfP7551i3bh0WL16MhQsX4uWXX8bgwYPRqlUrtGnTBgMHDkRISIjFclq7J6r4Txr6yWkNUYOCghRDTXG49D1xmvL5SkNPcdw2bdqgpKTELITVGqIKgmDWI1QMMcUQFYCpx62UeCuA4uLiOu/5+fmZBaZayqM1HCYicgYfoemEqPJybxV46b9ThQsIPhaMZd8uQ0FBAQoLC7F161YUFhaioKAAmzdvRnJystml/OLfYogq9kjdsGEDDAYDPvnkE/Ts2dP1y2YBQ1QiImosHnroIQQFBSEpKckUoM6aNQtPPfUUWrRo4bT5MkTVpmPHjujbty9WrlyJhIQE5OTkoKCgAFu3bkVBQQHy8/ORk5OD3Nxc5ObmIjMzE9nZ2diyZQu+/PJLbNmyBVlZWcjJyUF2djays7ORk5ODvLw8bN68GZmZmdDr9Zg7dy569OiBP//5zy5fZiItHBKi/vWvf0VhYSH+/ve/IyoqCtu2bcP27duRl5dnOnDmz5+PsLAw6HQ6ZGRkmMK38+fPY+/evYiJiUFUVBQ+/vhjLFu2DK+//jpGjhyJfv364dlnn4W3tzeefPJJPP744xg0aBD69eunWk61EFUQhDqXnMt7kIq0hqglJSW4ePFinc9LQ1AxxBR7gCpd4i7tuSr/vD0hqvSSfj8/PwjCrVBTLTwWe5uqnViUyij+U1omsVerNMglImooPkLTDFGVlmOrwEv/HSJcQLffuiH4WDAKCwtNoemWLVtMIar4f0ZGBuLj481C1NjYWHzxxRdm4uLisGnTJqxduxbjx493/TJawBCViIgakxYtWuDhhx/GO++8Az8/vwa57JshqrrWrVtj6NChiImJQVpaGrZs2YLCwkLk5+cjNzcXeXl5yMjIQHJyshmDwYDExESkpKQgIyMDBQUFyM3NRWJiIpKSkrBp0yZs2rQJ8fHxMBgMSEtLQ05ODvLz801XAiUnJ+Pll1+Gj4+Py9cDkSUOCVE7deqE7du346GHHsLKlStx8OBBxMTEICMjAz/99BNiYmKQmpqK0tJSbNiwAfv27TOFa0eOHMHo0aPx7rvv4sMPP0Tr1q3Rpk0buLu7Y/jw4Rg+fDhefPFFvP7663jzzTexdOlSDB48GBMnToS3t7diOS2FqCKl+5JKg0AtIWrHjh1NQayW9SftBWvpn9qDrGwJUcXL6ZVCVKXPisuiFogKQt1Q1FqIKu2NK5aBiKih+Ai3R4gqX6Zwgb1UbRZeGyBKw9PCwkJs2bIFBQUFpt6oUuL93devX4+1a9di3bp1iIuLqxOirl+/Hnq9HklJSXjrrbfg5ubm+uVVwRCViIiaO4aodfn7+2PmzJlITEzE5s2bsWXLFuTm5iI7OxtZWVnIzc1FTk4O0tPTYTAYkJSUhLS0NKSlpSE1NRUJCQnYuHEjEhMTkZ6ebhovJSXF9H5CQoLpmTnp6enIyMhAdnY2Nm/ejIKCAmzbtg3btm1Dbm4uPv/8c+YH1Gg5JER98MEHsXPnTnTu3Blz5sxBcXEx8vLysHfvXqxbtw4TJkzA4cOHcenSJVy+fBlff/01KisrTQHb/PnzsWzZMrMHRnXp0gXjx4/HW2+9hXfffRehoaF477338N577yEiIgLdu3eHr68vOnTogDvuuMOsPFpCVJH0AUvSsE9LiCqGhFpCVGngaG+vTEddzq/0WS2Bp3i5vxiaWgtRpeuQlSARNTQf4fYLUeXL5yOYh6riMFeXrdEIvxWeij0dpMQAVeyNKhID1vT0dMTHx+OLL77A2rVrodfrzR4u9cUXX5h6qSYkJCAiIgLjxo1z/XKrYIhKRETNHUNUc5GRkUhLS0NeXp7p0nyj0YiUlBRTCFpUVISdO3ciPz/fdOuFzMxMZGRkIDU1FWlpaWbBaG5uLrKyspCRkWEaV7zcX3xfDFBzc3NN91EVbxMg/r9mzRp07drV5euISMohIaogCNiyZQteeOEFDBo0CDk5OUhNTcV3332HBQsWYN26dcjPz0dFRQX279+PkSNH4rfffjMFbNHR0Rg4cKBpWvfddx/atm2LwYMH4/3338fy5csRHR2Nzz//HP/6178wYcIEPPzww2jXrh369OmD//mf/zEriy0hqkh+/04tIar0PqFK9xgVp5OYmFjnIUxq5QgKClINWe15sJT0/qaWQlTp8qqVTx4aWwtRpeuHl/MTUUPzEWpDVJ9GUJaGWl4fgb1UBUGoDQrDax8aJYanRUVFpoBUHqJKH4QgDVRzcnJgMBjMwlLpfVLFcDU2NhYbNmzAunXrMH/+fLRr187160BBCwgQfFxfDiIiIldhiCrgnnvuwaJFi2AwGEwPgEpLSzMFpykpKabXqamp2Lp1K7755hsUFBQgJycHmZmZZmFqVlaWKSDNzMw03ftUHppmZmYiPT3d9Fnp+EVFRfj666+xbds2bN68GZs3b0Z+fj7y8vIQExOjerUuUUNzWIj6wQcfwGAwQBAEzJ49G8nJyfjss88QFBSEwsJCnD59GufOncP+/fsRGRmJ06dPAwCOHTuG1157DSEhIejZsyf++c9/on///ggNDcWbb76J8PBwzJ49G5999hmWL1+O4OBgPPHEExAEAYMHD0bPnj3RsmVLs7Iohaht2rRBdHS0avgoBn7i/T613hNVKayUys/PNwWsYoipNm7Hjh3N7oEqpzVElV52Lw13LYWognDrnqjS5VOarvi+1nuiiuuUiKgh+QjNK0RVWv5w4VagGt5c1kW4gInHJppCUum9TqUhqvRSfrGxLu2FKu2NumHDBsTGxiI2NtbUG1Uaoq5btw6xsbHYtGkTli1bZvbDcGPCEJWIiJq75h6iPvroo4iNjUVubq7pAU9iWGo0Gk2X44s9Tf8/e2ceJElZ5v9kELlBGEBgFBiuwVmOAYkBHY5WWEDAVUEOVwLUXRWX1WC9dgncoAXXhQVc5BpgprvuK6sqj8qsysqq7q6zj7k5HIb7VDllRFEEhe/vD/Z5f29mZfXMMNDF1Dwd8Ynuqcquqq4kiKc++X2ex7ZtcSwlVQl5aVSpVIJlWZ7fof04JFHpGMMwkM/nhay1LAv1eh333nsv1qxZg3q9jnK5DE3ToKoqHMdBoVDA4OBgz98/hnnPJOpnP/tZ/PrXv8ZRRx2FbbbZBqeddhquu+46/OhHP4LjOFi3bh2eeuopPPzwwxgeHsa6devwu9/9Dpdccgk+9alP4bTTTsNdd92FZrOJxYsX484770QsFsONN96IH/7wh7j88stxzjnn4KSTTsJRRx2FI488Ep/+9Kex/fbbd7yWbklUVVW7SkpqVb/77ruhKBsvUeUFTtVqVcjEPfbYA6qqeoSp3DIvH6so7wjHV155Zdr/qW+MRJWfw79AakMSVZavQe33JFnlv5G+gl73hp6PYRjm/WRA2bolatD7Ufu/96Sm9GHr/+A7yVNKkvolqrxEipCTqHQbbZ2l36E0aiwWQyQSEd9DoRCGhoY8IjWRSCAUCuEHP/gBPvzhD/f+PfHBEpVhGIbZ2tmaJeo111wDwzBQqVSEmCSBKrfbl0olIUgpZUri1HXdwPqJLkqTPCXJ6rquJ1kqH1MoFMQoANd10W630Wq1xAIrWmKlqqp43ZFIBAcddFDP30tm6+U9k6i77bYbHnzwQdx+++3Ya6+9sOOOO2L+/Pm45ZZbcMstt+Dmm2+GYRioVqvIZDJYu3Ytli1bhrPPPhuf/vSn8e1vfxuZTAbXXHMNDjjgAHz961/HBRdcgFNOOQVHH300jjvuOBx++OGYN28e9t9/f5x77rlYtGhR4OvsJlFp4/3KlSs9//P81re+hccff9wjWDdWoiqKd/6o/PX44493tLHL0tX/JctGeq3ya5pOotLYgOkk68ZITXp9stDdY489xN8oJ1vl1+MXwHR8t1QrwzDM+82AwhJ1uvempvRJ6//gO/KUBKrcku9fGCW37MuyleSpv+2fbrMsC+l0GvF4HLFYTCybIoEqL5nK5XK44YYbsM8++/T+vfHBEpVhGIbZ2tkaJeohhxyCO++8U4hQeZYpJUeDBCqJUEqiVqvVjvny8oVqqqlc1xXydLrjq9WqJ6VaLBaFXM3lcsjn80KyUnKV0rLf/e53e/6+Mlsn75lEVZR3BNyLL76Ia6+9FpdffjlOPfVUfO5zn8NFF12Ez33uc/jWt76FcrmMeDyORx99FFdffTU+//nP48c//jEuvfRS3HDDDTjmmGNw9tln48c//jE+9alP4eMf/zgOOugg7LPPPjj44INxyCGH4PDDD8eiRYtw1llnYdddd+14HRuaiXr99deLNnwSgP7U5qZIVEV5J8VJSU06pltb/gUXXOARr3LLP7EhiRr0tXLlSlx//fVdn3djk6H+1we8k9D1C2G/1JUXdK1cuZITqAzD9JQBhSXqprxXNWULW1A1+I4UvPQJrzyVkxH+5CklIeTba7UaarWamJ1K/yahWqvVREtZMpkUc1EpjRoKhbB06VIMDw8jEokgk8lg6dKlOPTQQ3v/HvlgicowDMNs7WxtEnXu3LmifZ/qmVwuB13XhUCl9ntZmpI4lWsnubtnOuSabGOOpwvdJFRpKRXNZaXZq7lcDvF4HKqqolQqIRQKdSwZZ5j3m/dUou6777547LHHsHbtWvzP//wPlixZgmuvvRbf/e53cdppp+Gkk07CD37wA5x11lnQdR1XXHEFLr74Ynz961/Hl7/8ZZx99tk45phj8NOf/hSXXHIJTj75ZJx22mn46le/ivPOOw+LFi3C6aefjiOPPBIHHnggBgYGsNdee3W8jnezWIrZdDY0E5VhGKaXDCgsUd/t+5b81rfwwvz5H8yU6uA7MvCyJy4TrfdyKxnJ0qC2fUpRyEU9iVKSqHQbCVVKptq2jUwmg3g8jkgk4hGp1NYfDoeRTCah6zpOPvnk3r9XMgMsURmGYRhma5Kos2fPRiqVguM4sCxLtM9rmibmnsop1CB5Ks+K74a/+2dTBKp/Hj39rixUaeyAYRjI5XJIJBLIZrNwHAeGYfT8fWa2Lt5TiaooCs4//3wAQLFYxFVXXYVms4lms4lbb70V3/ve93DhhRfi6KOPxi9/+UtcccUV+OhHP4r58+djYGAARxxxBL7//e/jkksuwQknnIDzzz8fX/3qV3HppZfioosuwj/+4z/i/PPPx3nnnYf9998fBxxwAHbaaaeO18ASdWZgicowzAeZAYUl6rvl+uuvRyaTQX1wEMtuuAGrf/ELrD/mmN4uqBp8h8ueuMwjOakIl4UqJVH9aQi5nV/+PVmW+h+XJKvjONA0DYlEAuFwOHA2Ks1FzefzuPzyy3t+Hj0MsERlGIZhmK1Fou66665Ip9NwHAemaXrmnhqG0dHKL888peQp1UtBQtQ/Fkk+Rq7B5IvUGxKpVHc1m03U63WMjIzAdV3P0ipKqOq6DtM0MTo6ilKphK985Ss9f8+ZrYP3XKLOmjULhmHgz3/+M77//e8jnU7jt7/9LUqlEm666SZcfvnl+OQnP4lrr70W55xzDvbee28cffTRuOiii3D44YfjK1/5ChYuXIhzzz0XZ511Fr7whS/gzDPPxOmnn47PfOYzOP300/HZz34WH//4x3HUUUdh77337ngNLFFnBpaoDMN8kBlQWKK+W2699VZkMhlxlV8udO+95RasX7Bg5hZUDShi7mlQUpQEKElQkqhBLWVBs1Dlx5HvkyXr2NgYKpUKTNNEKpXyJFFpHiq18ycSCWQyGSxevDhw+WXPGGCJyjAMwzBbg0SdM2cOotEoKpUKbNsWLfGU7Mzn88jn87AsS6RQXdcVKdSgeqhby/6GjtlYeRpUe1FtV61WxZxWkr6O48C2bfF3FQoFXHjhhT1/75n+5z2XqIqi4JRTTsFf/vIX3HPPPfj2t7+NWCyGK664AsceeywWLFiAhQsX4kc/+hGOOeYYzJ07FwsWLMApp5yC/fbbD2eccQbOOeccnHDCCRgYGMDJJ5+MU045BaeccgpOOukknHjiifjoRz+Kww47DPPnzw+cgcESdWZgicowzAeZAYUl6rtlaGgI6XQa2WwW5XLZU9hSMnN0dBSrf/ELPHHZZSKlWnuv3+9BBV978msdhbhfntK/5fZ9KsCnE6h+iepv9ZeL+5GRERSLRWSzWUQiEYTDYZFElRdLxeNxJJNJpNNpnHTSST0/l4IBlqgMwzAM0+8Sde7cuRgaGsLo6CiKxaJo3ZcXRpmmiUKhgGKxKNrmg0YebewMVPlYf+20MeMAuiVcqc6ji9n0+gmak5pIJIQo/vKXv9zzc8D0N++LRFUUBbfffjsymQy++c1v4j/+4z9w4YUX4pJLLsF5552HT33qU/jWt76F+fPn48ADD8T8+fMxZ84cfPjDH8ZJJ52EBQsW4KijjsLxxx+PI488EgsXLsSJJ56IhQsXYsGCBdhpp51wxBFHYN9998U222zT8dwsURmGYZgBhSXquyWRSCCdTiOXy3k2q5JsDCqq2+027r3lFvz+vUipDir4+lNfxy/v/aUonoMSokGvR14YJRfj/sSEv3WMCnR/8S9L13K5DF3XEY1GEQ6HxXeSqLFYDLFYDPF4HOl0GldeeWXPz6VggCUqwzAMw/S7RL322mtRqVREzWIYhmfmqeM4KBaLKBaL4jbXdbteSPbXfKOjo54RSNNJ0yB5Krf++++XL3DL9Rk9NqVl6fUXCgXRJZTJZFAqlaDrOr74xS/2/Dww/cv7JlHPOOMM3HrrrfjSl76Ef/mXf8HZZ5+NSy+9FBdeeCGOPfZY/MM//APmzZuHs88+G/PmzcOsWbOw55574vjjj8ecOXNw0EEH4YgjjsCxxx6L4447DvPmzcNRRx2FAw44APPmzcPHPvYx7LvvvoGtcixRGYZhmAGFJeq7JZ1OI5VKdUjUoIVLVFy3220xx2psbAz3/fKXWL9gAV455hjoCxZsXEp18J22/a8/9XU0m000Gg2PRJUL66Ci3p+Y3VSJKs9S9UtUut+yLNHST3NR5SQqkclkcNddd2HOnDk9P5+KorBEZRiGYRilvyXqj370I9i2Ddd1YZomNE2DbdtClJbLZdi27RGo8vxTqpOCLpbLHUByJ1BQy/50ydONGRUwXa1HM1IpiUoiNZvNIpfLia6hXp8Lpn953yQqLYlatGgRzjvvPHzyk5/E3/3d32G33XbDkUceiU9/+tPYYYcdMHv2bHz84x+HoijYa6+9MHfuXOy333446KCDcOCBB2LevHk44ogjcPjhh+PQQw/FPvvsgwMOOAC77LIL9tprL+ywww4dz80SlWEYhhlQWKK+W7LZrFiQ5E+i0vdGoyHa5mu1GlqtlpCotVpNLAUg0brq5puxfsEC/O6oozpTqoPvCL7LnrgMo6OjHoHarZVMbt/3C1a/RPUX7fS7/u/+wj5IvLqui1wuh2g0KiRqOBxGKBQSP4fDYaTTaUQiESxatKjn51NRFJaoDMMwDKP0r0Q97LDDkMlkUC6XYZomDMNAoVCA4zhiaVSpVIKmadB1XbTy+5dI+VOkdLssToMuVsvHbkiebmp7vz+VKstUEqm5XA7JZFLMeo1EIj0/J0x/8r5J1EMOOQTf+MY3MH/+fCxcuBBHH300TjzxRHziE5/AMcccg8MOOwxHH300DjnkEOyyyy5QFAU77rgj9t9/f+yzzz6YM2cODjzwQJFIJZl6wAEHYNddd8XOO++MXXfdFR/60Ic6npslKsMwDDOgsER9Nxx44IFivlQ+n4fruoFFLW1NpYLaL08bjQYajYanCK7VaiiVSljzv/+LB++8E28uWoTBQQXXvH2NWGAlb3KVW/KDXoM8A9WfnvAnWP0pVn8hvzESlR7bNE0kEglEIhGxZIrkKZFMJpHNZnHRRRf1/JwqisISlWEYhmGU/pSos2bNwl133QXHccT2ekprOo4jZokahiHGNTmOA9d1hUTdGKEpy1KaUdrt9/1ydWOeQ67BurX2y7e5rgvbtmHbNnRdRyqVQjweRyqVgm3buPrqq3t+bpj+432VqBdccAEOOeQQHHrooTj00EMxb9487LTTTjjyyCOxxx57YNttt8X++++P7bbbDoqiYLvttsPee++NPfbYA7vssouQqPR9r732wu677x6YPpVhicowDMMMKCxR39X7NjCAbDaLVCoFXdc9c0K7iUhKppJEJaFKt8kS1XEc1Go1TE1N4eWXXwYAvPXWW6IlTG6/p9+l5Vb+2adBC6P8G12DinK/gKXXF9TOLxf9JIxLpRLS6bRYLiUnUol4PA5d1/Fv//ZvPT+niqL8f4na69fYu/SHAAAgAElEQVTBMAzDMD2kHyXqNddcg2KxiJGRESFMq9UqXNftEKiqqoqEaqVSEXWev6W/m9ykWqtSqcAwDBSLRVSrVQ/yBWy/TN1QCrXb/XIS1Z9IpfmomqYhm82KRKphGB+csUpM3/CeStRtttlGLHo68sgjcdZZZ2HOnDnYf//9cfDBB2Pu3LnYZZddcMABB2C33XbDvvvui4985COYNWsWFEXBtttuK0Tp9ttvj9133x277rortt12W3HMxsASlWEYhhlQWKK+G772ta9BVVWk02mYpulZNBBU2Mot/VTkykJVnjdKQrRWq2HlypWQv371q1+J5/IX8bZtd7wG+bigtKg8p4uK9m7UajVP4b+hJGq1WkUulxMSNR6Pd0jUWCyGXC6Hm2++uefnVFEUlqgMwzAMo/SnRI3FYqJmodmnclq0WCwil8tBVVXk83kxN9VfS8lUq1VUKhVRH8m3O44jEqCUZqXncl1XPLa/04fqRb8QnU6s+heGyhfU5bqMWvsNw0AymUQ6nYbjOFi6dGnPzw/TX7xvSdRjjz0Wn/nMZ7Dvvvtijz32wG677YZddtkFu+66K/bZZx9su+222HnnnbH77rt7WvJJwm4OLFEZhmGYAYUl6rvh6quvhqqqyGQysCxLFOFy8jRIosoFLSU2aU4qtVyNjY2Jx7r33ns9EvWFF14QhbAsbkdGRqBpmqfdXxaj3USnfCzJUTm16k/VdpOo/mPp36ZpCnkaj8c7WvojkQjS6TSy2Sz233//np9XlqgMwzAM038S9eqrr4bjOCJ5SvNPqfYyTROFQgGGYUDXdei6jlKp1LFQimovkqfyY7mu25E29R9fqVTEsqpms4nJyUlMTk5iamoKjUYDzWYTrVZLdCvRWKjpUqlB99NFenosqjnL5bJIpMbjcViWhRUrVmBsbAxXXnllz88T0z+8pxKV0qKzZs3CiSeeiOOPPx5777039t13X8yePRu77LILdthhB+ywww748Ic//L79USxRGYZhGEVhifpuuPPOO6GqKlRVFa1hJED9LfNBSVT6uVarod1uC4lq27Yo2JvNJh566CH4vyYnJ0UhTsW54zhIp9OwLMuTRpBlalBSVk4+0PMGiVI5teov1mVR609L0OuS2/lDoVDHXFRVVXHmmWf2/LyyRGUYhmGY/pOo0WgUIyMjQnZSHVWv11GtVmHbNorFIorFoviZkqJUF9Hv+MWqf2ySv07y14N0//j4OJYvX45Vq1bh3nvvxbJlyzA+Po5Wq+WZl++/QL2xEpWkLMlYErI0EzaXy6FcLmPZsmWoVqtIp9M46KCDen6umP7gPW/nVxQFe+21Fz75yU9i7ty52GmnnfChD31ok9rxNxeWqAzDMIyisETdVPbcc09Eo1GoqopsNgvHcYRE9c8d9S99IllKiYB6vY7x8XHxb13XxTzUyclJ/OY3v+mQqGvWrPEsKqhWq7AsC8lkEpqmiZEAG5Ko/tleJE7pw0W3Qt2fPO02DoBuy+fziMViIoU6PDwsBGooFEIsFkM2m8WVV175nnTabBYDLFEZhmEYpp8k6te+9jVYloVKpSJSo+VyGaOjo5iamsL4+Liob+gYWZTKApWOkesqfx3kF5uyRJVrQaLVamFiYgLLly/H+Pi4+H2q54Jkqf/CddCFb3oeuQat1Wpi0RTNR81kMkgkEtA0DTfddFPPzxfTH7wv7fzbbbddTz8ssERlGIZhFIUl6qZy6KGHIp1OizZ0WaL6C14qnKm4pYJWTgaQRK3X69B1HZZloV6vY8WKFfjDH/7QIVEBYHR0FLZti0K/UCggm81CVVW4rhsoOOWEhF+gyvfTYwYtmgpKWviXTskfIuh1JhIJIU3lJOrQ0BAikQiy2Sx+/vOfiyWaPWOQJSrDMAzD9JNE/eUvf4lyuSyWR9FM0kajgZUrV2LFihVoNBqeVn9/vUMitduseX9idGMSpPIFbb/0lGuqoOfx12BBz++Xt1SHjoyMwLIsGIYhZteHQiHkcjlkMhnsueeePT9nzJbPJkvUVatW4Xvf+17PX/h0rF69Gl/84hd7/joYhmGY3gKFJeqmcOqpp0LTNCSTSWSzWZRKJZFMIPkoL5CSC1n6Ny2VqtfrmJiY8EhUwzBQr9exevXqQIEKALVaDZZloVgsolwuo1AoQNd1qKoqpK5fdFLStFsbGh0vS1RZAgcJWP9zyI8tL6NKpVKBrfxDQ0MIh8PIZDJYvHgxtt9++96e30GWqAzDMAzTLxL1ox/9KBKJhEhfUt1ULBbhOA5arRbGx8dFfRQkUINqoG5CNEiYBnXv+O+TL0DLteN0y6T83UXd2vtlmVqv1zE6OgrLsmCaJnK5HGKxGIaHh5FMJqHrOv7rv/6r5+eN2fLZJIl60EEHoVKpYGJiAtdffz1+8YtffCB56aWXcPbZZ/f8zWUYhmF6CxSWqJvCN7/5TeRyOSQSCeTzeSEtSSLK80iDJKo8m6rRaGB8fByNRgONRgO6riOfz6PRaOC+++7rKlGff/55lEolFAoFMduKFiLIc1UrlUpHSlRONmyMRKUi3n+cP4nhn6tKvz86OopsNiskqrxciiRqKpVCPp/H3nvv3dvzO8gSlWEYhmH6RaJ+4QtfgGmaKJVKsCxLXIA2DAOGYcBxHDEnlTqKaFSSvMBTFpxB4lKWod3Spn7R6h/5JEtTeSTUxqRdN0a0ypRKJbFEK5lMirosm80iFov1/LwxWz6bJFG33357HHLIIbj++utxxx134Pbbb//Acdddd+HSSy/lqDbDMAzDEnUTufHGG6GqKuLxOAzDQLlc9iwdIHlarVY9i6TkoliWqO12W2xONQwDmqah2Wziscce6ypRAYgEaqlU8mBZlvgwQMuuppOo/u2x8lgCeq2yOJU/EMgfBGSJKo8DGB0dha7riEajCIVCiEQiolin+aiJRAK6ruPcc8/t7fkdZInKMAzDMP0iUX/yk5+gVCrBtm1xwZlqpUKhIKRqqVTyLJySazqSp41GQ9RvVPf4E6BBaVS/WO2WZO0mPKeTqRvzuEGP6bouHMdBsVhENptFMplEJBIRAQEe+8hsLpskUWfNmoU99tgDZ511Fs4991ycc845Hzg+//nP47jjjsOOO+7Y8zeXYRiG6S1QWKJuLNtttx2WLFmCdDqNRCIhUp+VSkXIx+kkqpxkoPRpu90Wm1hN04Su62i1Wnj55Zenlagvv/yy2CJLM7yoKC6XyyiVSmJxgj856k9ayGKV/u1v/ZLndskfEOQ5XvSY/iVbtPjKPxOVfo7FYsjn871vIRtkicowDMMw/SJR77zzTjiOg0KhIHAcB47jwLZt2LYt5qRSLeQfe+SfKUo1HIlL/7H+BVD+C9DTzTjdWFka1EnU7fggCUtiuVQqwTRN5PN5JJNJIVFvvvnmnp87ZstmkyQqwzAMw2xJQGGJurHstNNOSCaTSKVSSKVSKJVKQkhSIpUkqtzO32g0POJRTjS0222RRqVB/+12G2+//fa0EpXSqLRgiiRutVoVCxRksSunUCml6m9ZC5rZJW+RHR0dFf+mBCp9oBgbG/M8v1zcl0olZDIZT/pUhpZLxeNx7Lzzzr07x4MsURmGYRimHyTqMcccg1QqJWqlQqEgZqLKYpVqOXk004YkaqvVEl1E0407mu6+jW3Jl+sw/4KroMSpfLx/ZAAdRzUgCdVCoQDTNJHJZJDP55FIJHp+/pgtG5aoDMMwTN8ChSXqxnLYYYdB0zQkEglkMhmRuJTb4IMkarPZ9CxbkuVju93GxMQE2u02bNuGYRiYmJjwyNI33ngjUKK+8sorInVKLfT0WoLa0uj1lctlzxgCuaCnx9iQRJU/TMhFOqVy5Q8Krusin8+LeajU0i//W1VVZDIZ7LXXXr07x4MsURmGYRimHyTqwoULkcvlhEQ1TVO07pdKJdHNI18EpzpGbuUPkpp0AbzdbotOnA1J06DUqv9xm80mms2mp5NJvngtXxB/N0lWfxq2Xq/DdV1ks1mk02moqgpVVWEYBr7yla/0/BwyWy4sURmGYZi+BQpL1I3loosuElfoc7mcWKDkTy7QYqlGo4FarYZmsymOJelIhXGr1cLExATGx8fFoP9ly5Z5ZOlrr73WNY1arVZFssIvTOWkqZw8Jck63YIof4pBXo7lT6LKRbyckpBnreq6jkQigWg0GihT0+k0dF3HnDlzeneOB1miMgzDMEw/SNRFixbBNE0hUQ3DEPWSPEtevggeJFD9F7/9S0IpISrPo5cvXMuCVpaZ/tmpNOaJHtN/0Zqek2pLOX0alGqVF1YRlMAtlUqeC+e0YCqXyyGfz6NQKOBf//Vfe34OmS0XlqgMwzBM3wJFweAH4HVsCdxwww1ibpSu6575V/72qpGREdHqRcnU0dFRMUdLTh2QRHUcB5qm4eGHH/aI0vXr1+O5554LnJP6+9//XnwQIJEqp1CpgKeWf7rdn5qg9Km8GEouyqmYl1On8uKpoDld8geIQqGAdDqNWCzmWS5FM1FTqRR0XcdZZ53Vu3M8qECp9f6/M4ZhGIbpJf0gUU855RSRNJXb+WmhktyV46+Z5MWZ8kxRqntoPrxc7ziOA8uyxIxVkrOylO2WQvXPLg2StbJEpbSq3A0UxPj4OJYtW4apqSm0221YlgVVVWHbtufieblchmmaYsRBsVjEf//3f/f8HDJbLixRGYZhmL4FCkvUjWXJkiXI5XJIJpMoFAqeoliWkFRYU6rA365Vq9U8M1EnJycxMTEBx3GQz+c7ZOkLL7yAxx9/HL/61a/w7LPPdohUWpBAIpUWTcmJVLqNbpdTEvKHhqB5W90SDvThwi9R5cclbNuGqqoeiUrzUWOxmFhmcNVVV/XuHA8qLFEZhmGYrZ5+kKinnnqqqI1s24ZlWZ5aieoluYvG3yovpz79nTdyXeWfs0ppT7p43W2+qtztEyRR5bSrP5FKv+e/6C3XaFNTU1i5ciXuvfde3HvvvajVamKsgfx6qtWqp4Z0XRehUKjn55DZcmGJyjAMw/QtUFiibiw0LyqVSnmu4vvb+Emi+meGyoVxu91GvV7HxMSESAhUKhXkcrkOSfrrX/8a999/PxqNBlatWtVx/29+8xshUh3H8Xw4kNOncquZv7AniRqUxqDvclpCLr7lv5nGB8jLqkZGRlAqlZDP5xGLxUQCNRQKIRKJIBaLIRaLIZvNIhaLYZdddunNOR5UWKIyDMMwWz39IFGvvfZaOI4jJKpfoMoXlf1t/H6J6ZeZ1L4/MTGBl19+GW+++Sb++te/4s0338Szzz6LSqUino8el5Kt/qSpLEVlORr03P6akmqsoNmojUYDk5OTmJqawurVq3Hvvfei0WjAsiwUi0XPnHuSyPLzptPpnp9DZsuFJSrDMAzTt0Bhibox7L///igUCshkMkin03AcRxSrsmSkRIBciDcaDXE7pVPHx8fRaDQwNTWF5cuXo9VqYXR0NFCiPvXUU1i1apUowB955JGOY1zX9WyelRMScktZpVLxtLD506N+iSqPKqC/cTqJ6k+50u3lchmGYSAejyMUComZqJFIRMxJzWQyUFUVH/vYx3pzngcVlqgMwzDMVk8/SNTrrrsOjuOIhCi19lOrPUlUql3kFKosL6l9XpaZtVoNTz31VEctJn/RrHu5RuwmZ/3zVv0zV4PmsfrHKMkSlUZKLVu2DMuXL8fy5csxOTmJcrmMQqEA13UDk6vj4+PiIr+u6z0/h8yWC0tUhmEYpm+BwhJ1Y/j85z+PQqGAVCqFXC4nWsDGxsbEIiZ5hpZciDebTSElm80mGo0G2u02Wq0WJicnMTk5iVarhVqtFihRn3jiCSxbtgyu68K2bUxNTXUc8/TTT8OyLBQKhcAUqrx1Vv7gIN/unwvm/y7/TfJSBL+M9X8YIYlK7x9JVJKn0WgUkUgEyWQSpmniE5/4RG/O86DCEpVhGIbZ6ukHiXrllVfCcRyxUMmfQpVrIapdKJ1J0rTZbKLVaqHZbIrbGo0GXnjhhWkFqixS5cWi9Dh+SNTKsnY6ueofoeQfo0SSmMTo5OQkarWaqAGD5rI2m01MTU2h2WxidHQUhmH0/BwyWy4sURmGYZi+BQpL1I3hpz/9KUzTRCKRgK7rotiu1WqeeajUQuVPnpJkpUK53W6j3W5jYmJCCNXR0VEUi8WOIvzRRx8Vi6csy8Lo6CjWrl3bcRxtn6WW/iAJSgU0FfWUFvW3tMkLE/wSlWZwyYuj5Lmo/uNpsVWxWEQmk8Hw8DBCoZBHotJsVNu2ccYZZ/TmPNcUlqgMwzDMVk8/SNRTTz0Vruui0WigXC6LJZyUTpUXbsoislardRWdjUYDjz/++EYJVAB49dVXRW0kS9RWq9VBu93ueM4giSrP4vePaJI7iGiUAN0mdyDJrfzyiAKajWrbNjRN6/k5ZLZcWKIyDMMwfQsUlqgbw5133gld1xGPx2FZlieV6ZeodBV/dHRUbFCVEwRULLfbbYyPjwupSsW7/+upp55Cq9VCsVgULWmNRqPjuAceeEAsDKBxA37ZKacQgiQqFdHyWAI54SAX3LIslWd9+W+n53QcB7lcTsxDjUajiMViQqJGo1GYpokrrriiN+eZJSrDMAzD9IVEXbRokWhdJ4FaLBbFz5RMlesfkot+4UkSdfny5fjrX//aUX89/fTTGBsbw9NPP+25/e2338Z9990n6i6qA+XHphZ6kqhUG7ZaLbGElC7EU43mOA6KxaKA/g6qxeiifKFQgGVZcBzHM/ueXossUHVdRy6Xg6qqUFUVuVyu5+eQ2XJhicowDMP0LVBYom4M1MYfj8c9W00bjUbHFntq3/dvUqWfqViWJWqr1UK1WkW9Xu8ozn/729+i0WigWCzCsixYlgXXdQPTqKZpdiwzkJMJlFiQ275kWepfUiAnUrtJVHlWKhXofkFLrWWGYSASiWB4eDhQomqahltvvbU355klKsMwDMP0hUQ94YQTkMvlUC6XYdu2EIp+iSqnOOXZpXIilNKiDz30UEfd9fbbbyOdTiMUCiGfz+PRRx/13P+nP/0JtVoN5XJZSE4aCyC38ZNEpeei+6gWq9frWLFiBR544AGsWbMGY2NjYowT/S3yjNNSqQTTNEUCVU6w0r9pPJPrutA0DclkEvF4HLlcDtlstufnkNlyYYnKMAzD9C1QWKJuiNmzZ4ulUvF4HKVSydMOJUtUatmXhaNcMNfrdU/qgBZMNZtNISX9Xy+88ALa7bZIHpimCdM0A1Ork5OTKBQKHS39NCOLPizIAtW/DEsWo3Kb/nQSlVINJGvl35HlrW3biMViCIVCnqVStGgql8th6dKlvTnXLFEZhmEYpi8k6rHHHotUKoVqtSrGHfklql+k+hdLySK13W7jueee66i7XnrpJWSzWQwPDyMWi8FxHPz5z3/2HLNu3TpxATto1iohX3yX67KHHnoIf/vb3zqe+7XXXkOtVhN1nzxGSX4+eUlVENVqFfl8HqlUCslkEpqmIZVK9fwcMlsuLFEZhmGYvgUKS9QNsXDhQrEUKZFIeNrh5SVSskSVW6rkDa+NRkNIVGrrlyWq4zgdRfIf//hHsViqVCpB0zRomoZisdiReFi/fr1IHpRKJbiu65mH5RerQfKU8G9u9d8mz9bypxz8M1VJpFqWhUQi0bFciiRqJpPpXeHOEpVhGIZh+kKiKoqCu+++W9RDskSV2/qDFk3J4pHm2jcaDbz66qsdNdqLL74o0q7lchnLly/Hm2++2VHH+buU5Nb9qakptNttUUeR1F25ciXefvvtjueUv/72t7+h3W6LDiS5xpM7oeSxAH6JWqlUoKoq4vE4otEoVFXF0NBQz88fs+XCEpVhGIbpW6CwRN0QF198sVgqlUql4LquRzTKEpWKbzmRSqkCkqW0TEpu36JjgyTqW2+9hbVr16JSqaBQKIg2q2w2GzgbdXR0VHw4kNv3/YsH/GMI5NtkoSr/jXKbvyxRKX3qb/+nOawkiElGh8PhQImaSqVgmiZ23nnnmT/XLFEZhmEYpm8k6s033yyWSZmmCcuyYNu2GHskQwun6OKzfFG5UqmgVqtNKzT/8Ic/dCRQ5a/77rtPiEy5m4nmoi5btkzUgitXrsTvf//7aeWp/PX88897ai/5QvmGRjPRRfZMJoNIJIJwOIxsNourrrqq5+eP2XJhicowDMP0LVBYom6IH//4x9B1HbFYDOl0WqQu5cLUL1HlealyiqHRaHg2sFI7F/1OkEQF3tnwWqlUoGmaGPqfzWbhOE5Hof3oo4+KtIWcrpC3z07Xsh9UbAdJVb8sDbqfnrNSqYhRBOl02iNRqWiPRCJIJpMwTRMHHHDAzJ9rlqgMwzAM0zcS9bzzzhPdOSROaT4qyVRKpTqO4xGqVD/R7UEXrTfl68EHHxQjoOTlo+Pj4+Li+po1a7B+/fpNfuxXXnlFLDXtVuPJHVP+Oq5cLiObzSISiSASifR2ySfTFwAsURmGYZg+BQpL1A1x2223IZ/PIx6PI5PJeK7i+6/q+6VpkESVFwbICwQoifrGG28EFsmjo6PI5/PIZDJCpGqahjVr1niOe/3118UHBjlZ0S2hENS2Ly+kotb9bsW3nEiVZbL8OJVKBaVSScyW7SZRE4kELMvC0UcfPfPnmiUqwzAMw/SNRP3Yxz6GZDIJ13WFGC0Wix1JVHkuKh1LUPt/tVrFH//4x023p3gnKToxMeHp7KFFo1NTU7jvvvu6ytO33noLq1evFl081Wq1Yz7qyy+/jMnJSdH5JEMX6Wu1mue7XPM5joN0Oo1IJIJ4PA7TNHHIIYf0/PwxWy4sURmGYZi+BQpL1OnYcccdkU6nkc1mOyQqzTyVi1Jq46fCVf43LZaSv8sSlVrvn3zyycBCempqCrlcDul0WiRRNU1Do9HAX//6145jTdP0fECQU6hB7fk03zVItMoztmRxSjNPg1Ks/k2wruuiUChAVVWEw2GEQiGPRI1Go4jH47AsC3//938/8+ebJSrDMAzD9I1EVZR3LoRTqpTqIcdxRBJVvtBcrVZF9448z911XZTLZbTbbTz22GN48cUX8frrr4v2/tdffx3PP/88nnzySTz66KN4+OGH8atf/Qpr1qxBrVZDuVwWyVOSmlRHrV27dtoxAZZl4c4778Sdd96JxYsXo1gs4q233vIc89BDD2Fqakp0OclzULstlJK7qAzDQCQSwdDQEFKpFNLpdM/PG7NlwxKVYRiG6VugsESdjv322w+5XA6qqiIajSKTyQixKF/d9/9M0pQSqLREQE4HyC39jUZDFO6tViuwkL7//vuRy+WQyWSQzWaRy+WgaRps28a6des8xz744IMwDAOmaaJYLHYslPInUWkBQbdZWvI4AEqWBknUoDEAMrZtI5vNCokai8U8qdRYLAbTNPFP//RPM3++awqUwd7/N8cwDMMwvaSfJOo3vvENmKaJarUqUqhEoVAQ8+NJmsr1kryYSa5lHMeBZVkeHMfxyFf5InalUkGj0cDExAQmJiZQq9VQrVZRq9XwyiuvdBWowDuLo1avXo1isYhVq1Z1CNdXX30V7XZbPDbVllR7dhOpcqcR1WWRSAS6ruP666/v+XljtmxYojIMwzB9CxSWqNNxxBFHwDRNMXCfkqgkSkmmysKUJGm73RYyldr35XYqKnJbrZZIe1L7WNDX888/D8MwkMvlhEDN5/MwDANTU1Mdx9u2jUwmA8uyPElUeTaqv5j2S1RKlsofDPyzVenDBrX8ywLWL1Edx0E+n0c4HMbw8DDi8XjHkinDMPCzn/1s5s83S1SGYRiG6SuJqigKIpGIqFdoBqq8YKpYLHpEKtV5JCKpdqNayXVdz1gASrnKvy9fYCZhKi8UpUWjjz76aEd7/sZ+vf322yLt2mq1MDU15RGp/lSqf54/zUNNJBIiKGBZVs/PF7PlwxKVYRiG6VugsESdjgULFniWIaXTac+cU1mc0s8kSScmJjxt+36JKstVEo+2bcM0TfzlL38JLJht24amaSKJSriui+eff95zrOM4ItlZLpfFc5AI9YvSoFb+0dFRkTyVf0+WqPJt8tzUIIlaqVSg67pIosbjcU87P6Ugbrvttpk/3yxRGYZhGKbvJOpPfvITFItFUdOQ9CSoY4fwXywn6N80ooja/Cl5Ks+fl2sr+cI1idTx8XGxVGr16tVYu3YtXn31Vc+YgA19PfHEE+Iiea1WE49J8pReLwlV+e+h10et/JqmwXVdhEKhnp8vZsuHJSrDMAzTt9QUlqjTccIJJ0DXdSSTSYTDYZFEbTQaYo4obUSV56DW63XRpt9oNNButz2yleQq3UYFd6FQQD6fx6pVqwILZsdxoOs6stksstmsWDBl2zbWrl3rOfa5555DOp1GPp9HuVwWhXOQCPUnVP3FPwnUIOkqP55/A6z/sehvjMViCIVCSCQSHRJV0zQMDQ3N/PlmicowDMMwfSdRFUVBIpHAyMgIXNf1yFNKpsoStVKpBG609y9lkusjmrUaVEfRv+U0qrxklDqXKL3abrfxyCOP4NVXX+2aQH3ooYdgGAYsy4Lrup7XSKlTuk2exSrXZuVyWdS3yWQSpmnizDPP7Pm5YrZ8WKIyDMMwfUtNYYk6Heeccw40TRNt56qqCkkqF6uyRCXJKtNqtTztVPRzo9HwFNimaULXdbTb7cDCuVKpwDAMIU9pwZSu66jX63jzzTc9x9MM1VKp5Jl/5W/D9886pSJbTqH6W/6DJGyQZPW3+JdKJVG0k0SV0TQN6XQa22+//Yye622gsERlGIZhtnr6UaJed911sG3bk0S1bRuWZQmRSiOV5JpHbuMPSnLKNZCcYqWL7XJ3DtV+cru9PO6JEq5Ud1G69P7778e6detw//33o9VqiTFT+XxeLMfyz7oPkr9ybUcLpYaHhxEKhZDL5XDPPfdgu26t1FcAACAASURBVO226/m5YrZ8WKIyDMMwfUtNYYk6HT/84Q+haZpYgJTNZjuKYVmmkhSlOVr+eajyGABqtZIlJi2DqlargRJ1fHwcuq4jk8kIiZrL5ZDNZlEul/Hkk096jl+5ciUymQwMw4Drup7lUfJ8U3nmqT+Z2q2VX5aw/lmp/hZ/+gBSr9fhui7S6bQniRqNRsVSg3w+j1wuh/32229GzzVLVIZhGIbpT4m62267YWhoSLThy3NRSUTKy6DkFnxZqFLNFzQKyXVdjI2NYXJyEsuXL8f4+Lio9WQBK19I988s9V+kptdZKBRgWZZH/DqOI6Sr/Dr8S6T8i7Gq1Socx0EqlUI4HEY8Hkc+n8f8+fN7fp6Y/oAlKsMwDNO31BSWqNOxZMkS6Lou2s/z+XyHBJWlKc039SdRSZ42m02RBqAEq+u6ohDWdR2FQqHrcqn169cLiZrJZDwt/YVCAa1Wq+N3KK3qOI4njeBfFtVNhAYdRx8W6DbXdTseg26Xi/p6vY5KpYJMJuOZiUqt/NFoFLlcDoZh4IgjjpjRc80SlWEYhmH6U6IqioKrrroKhmGIRZf+Nn55TqqcRpUTnlTLBM19p5qHWvSnpqawbNkyMdLJ/xhyMpV+lhd6lstlFAoFGIYhasOgGfZ+ZNlLj+uXqHTBmubx33jjjT0/P0z/wBKVYRiG6VtqCkvUbmy33XZipmg0GkUoFIKu6x0LBkZHR0UBTPNRKX1KApUKbpKocpKhXC6LDa+apqFQKMB1XbzxxhuBIrVQKHhSqKlUCul0GpqmoVqt4rnnnusYAZDL5YRElRcjBLXyy7fJi6XkFjN5qQIV+rJsJeSFVlTMVyoVqKqK4eFhxGIxRCIRz3ea8bpgwYIZPd8sURmGYRimfyWqoii4+eabxRIoqr38AtW2bU/9EiRR5fFMslCVj22321i9ejVWr16N8fHxrhJ1bGzMczG6WCzCNE1YlgXTNEWXEknaoFSsP31K3U9Ud8qpWsuyEI1GEY/HYds20ul0z88L01+wRGUYhmH6lprCErUbO+20EzRNg6qqot1c13XPjCn/0H65bV+eg+qXqHJbV6VSgWVZcBwHpmnCtm1UKhU89thjgRJ11apVyOfzyGazyOfzSKfTUFVVbFZdtmyZ5/hnn30WmqYJidqtZcwvUeUZqvKcLiry5QUMsmD1p1ZlgVqv11GtVpHL5YQ0pUI+Go0iFouJGa7HHXfcjJ5vlqgMwzAM098S9fDDD0csFhN1jSxP5YVTpVJJXEyW5WSQxKQEqf8ie71eFylU6jySu32oTqKloYZhiNb9QqEgnp9+L0iWUlcUffePmJJns1ICl9r4M5kMbNvGSSed1PPzwvQXLFEZhmGYvqWmsETtxm677QbLspBOpxGNRhGNRmEYRkehTMlTf0tWu932zMKSZ6JSaxWNA6DtqpSIGBkZwfLlywMl6m9/+1uYpikkai6XQy6Xg6ZpYoGU/4seV05N+FvR5MJeXjAlC1G/RO0mT+XttP7URrVaha7rQpzKEjUejyOTycBxHJaoDMMwDNMD+lmiKoqCL37xi8jlcuIisD+FSj8HJT/9orQb/sVUcqqVllBZliVGNKVSKWQyGVHTmaYpXo/ruh0CVRam/trOv1yKXovrulBVVcykdxwH1157bc/PB9N/sERlGIZh+paawhK1G7vvvjssy0IymRQpSb9EpSv8JEvl9n2SqHLBS8Uu/Z4sUeXFAFT4dvsaHR0VEtUwDOi6jnw+D13XUalUsG7duo70KhXh9JqCliJQIlVOPdDrkhOnrut2beGXU6zdFjEUCgXP+ypL1HQ6DcdxcPrpp8/o+WaJyjAMwzD9L1EV5Z35qJQ2peRpEOVy2dOBRLWMPNJpQxI1aCQAzVA1TROZTAaapgmBS7NP5dFK8nNONxZAPiZoDmo4HEY0GkWhUMCSJUt6fh6Y/oQlKsMwDNO31BSWqN34yEc+Atu2xQb5WCwm2vllGUqJUrl9ql6vo9VqecSpLFTlBQOVSkVshnUcR8jJQqGAJ598En/4wx86JOqyZcugqiry+byYl5XL5YSArNVqHS39JDj96QS5wKb2NnoNdDvJVLrP/12WqP6ZYPKHB7qvWCwilUoJiUpt/bFYDMlkEqVSCd/85jdn9HyzRGUYhmGYrUOiKoqC//3f/xWJU1o0VSwWhcwkuUqJVFlWbgr+hU9yPUR1V1DSNGhZVLd2/qBOo0qlIupMwzBEnaXrOpLJZM/ff6Z/YYnKMAzD9C01hSVqN/bYYw/Yto14PI5wOCwKT5KmVLBSO79fotL8U//yATqeCmZqJaM5WKZpolAoQNd1LF++HM8880yHRF2/fr2YoUWzs2jJlG3bgSnWVqvVsdlVlqmyJC2Xy572fmrDl+egOo4jPngEpVDpPZI/ANDf7DiOGJNAAjUSiYh/27aN66+/fkbP9zZQoAz0/r87hmEYhuklW4tEVRQFixcvhuM4KBaLKBQKYi5pt0VT0wnNoMQp1VjyxWR/UtWfJp1uDqv/+btJXbm2MwwD8XgcsVgMmqYhk8lg77337vl7z/QvLFEZhmGYvqWmsETtxp577gnbtsXm+Egkgnw+L9IIcssUzUKVW/qnk6g0E5Ukqm3bIk2qqipUVUUul0OpVEKz2Qxs6W+32zAMQxT9VBgbhoFqtYo///nPnuNXrFghllZRmlZusadWfb9ElWejkjiln8vlsue2DUlUeh8qlQoymUyHRCUKhQJuu+22GT3fLFEZhmEYZuuSqLvvvjvuuusuUdP45alfqFKLv7wkKmhWqV9o+gWqLErfTcK12xgB+f5yuQzTNJFKpYRAzWazOPTQQ3v+vjP9DUtUhmEYpm+p/R+9fh0fRGbPng3btsXyo3A4jGw2i2q12rEFtdVqiQLYL1EpmeqfpUVtVq7rwrIsZLNZJJNJpFIp5PN5aJomePzxxzsk6jPPPAPLsmCaJnK5HHRdRzabhaZpqFQqHQnWF198USRRZdkrpxXktKm8ZEreYhskUeX2f3l2mDwyQP6AIUtUEqjhcBjhcBiRSASmaWLx4sUzer5ZojIMwzDM1iVRFUXBfvvthyVLlmBkZATj4+Oo1WpCqJJILRaLor3ftm1YliWWefpTobIQlRc7TdeS7xepG5KkG5Kr1WpVdDeVy2UUCgWoqor58+f3/P1m+h+WqAzDMEzfUlNYonaDJColJYeHh5HJZFCpVDokKglJuaW/2WyiVquh2WyKGajyQimaNVoul2EYBlKpFEKhEOLxOAzDgGVZYmlUtzRqqVSCYRjIZrPQdV3MSa1UKli9enXH8SRuadwAjSMgiToyMuJp2ZdlKX2gkNMadDslV/2tbEGjA2g+l1+ihkIhsfBA13UsXbp0Rs83S1SGYRiG2fokqqIomDNnDu644w5x4VgWqPKMVMuyPLe7rjttCrWbQJUXU5FApYvvGytKg0Qr1XSlUkksrKrVatB1nQUqM2OwRGUYhmH6lprCErUbJFFpc/zQ0BBSqZSQqCRS5eKXfiapSuKUCuNms+lp66pWq3AcB/l8XjxPOp0WCQdZpL7xxhsdUrTRaIgkaj6fF2MAyuUyWq1W4AgAKthJ+pLklBOn8tIoEqe0dIH+LadSgxZKURKCbpeFaqVSgaqqHok6PDwsJKqmaQiFQjN6vlmiMgzDMMzWKVEVRcE+++yDm266CaZpeuakWpYl5Cn9LC+eoovImyI8ZYlKF9cpiSovJQ1KpvqXdfplbbFYRCaTQSqVEjP2586d2/P3l9l6YInKMAzD9C01hSVqN0iiJhIJRKNRLF26FMlkEq7retrzqfCl5CklU+X2/larJSQqiUS5RV5VVUQiESQSCRiG4SneTdOEpmlYuXJlhxRdt24dbNtGPp9HLpdDNptFNpuFbdsYGRnB66+/7jn+iSee8KQe5LRotVoV7fuULiVZKgtU27Y9LfxBM8FImsr3yTNWq9UqstlsoESNRCLQNA2RSGTmzvcAS1SGYRiGUZStV6IS//7v/w5VVUUdRBe05VQqLdWki800IzVoBmqQRJXno9LP081G7TYWwC9Sy+UystksEomEqB9nz57d8/eU2bpgicowDMP0LTWFJWo3Zs+ejWKx6JGo8Xgc5XLZk0Kl1AClTUmiUoqgXq97JColN0mgkkSNx+NQVRWlUgmVSgW2baNQKMC2bSFI/V8vv/wyqtUqDMNAPp8XElXXdRSLxY65qH/605/ETFR/OtQ//9R1XY88lWUqJVW7teuPjo6KxwuSqCMjI8jn84Ht/OFwGPl8HrFYbObO9wBLVIZhGIZRFJaoivJOKvWOO+4QtQ/VZLTQ079wiuamyoKV6h9ZgAbJ0w3NTJUfo9tFa2rhp9doWRbS6TT23Xffnr+XzNYHS1SGYRimb6kpLFG7MXv2bJRKJY9EjcViKJVKngSqLE7lpU0kFP3zUuVlTNQSls1moaqqaCErl8ti9laxWBQbVdevX98hUtesWQPbtqFpmpCttGjqvvvu6zieWvjlFKksUkdHR4Xo9QtUEqvUxk/pVbllXx5VIH+IkJ9nZGQEmqaJebO0uEuWqNFodObO9wBLVIZhGIZRFJaoMt/5zneQSCREHWRZFnRdFzKVvlPbP4lU/4VmWX76Lz4H3e8XpfKMekq90jLQYrGIfD6PVCqFXC6HUqmEcDiMgw8+uOfvH7N1AgDKTTfdBOKGG27AwoULe/7CGIZhGGZzqSksUbux5557dkjUSCSCYrEoUqY091QWp7JQpWNIoI6Ojoo5WrIkJelJgpWg42h5lOu6HVL06aefFmlUkq2ZTAaZTAaNRqPj+EajIVrQZMEpS1ASoPRhgNKn9HskWeX0qvyhQB4N4Jeo9KFB1/WuEpXb+RmGYRimN7BE7eTiiy9GOp0W3UKUTjVNE6ZpCpFqmiYsyxIXnIPmxfvnpMrHBM1WrVarME0TqqqK2fe6riObzSKdTiOZTCKZTELTNBSLRfz85z/v+fvFbN10JFH33ntv3HDDDT1/YQzDMAyzudQUlqjdkCVqLBbD8PAwIpEILMsSha0sUUmS0u3yz/S9UqnAsixPYoGG/hcKBdHiT2kDeRtsPp+HbdsdUhQAVq1aJYr5XC6HZDKJVCqFkZGRjmMnJibEOAGSvvRaKYlKAlRuV6P0qbxMSpapsiSVBWtQO9vY2BgMw5hWos7oYqkBlqgMwzAMoygsUbsxa9YsnHvuuViyZIlnfj218hcKBWiaBl3XRf1ULpfFrFR5kZRfnvovNsujkSzLgqqqSKfTSKVSosaji+b5fB6lUgmGYeAzn/lMz98nhgls57/pppt6/sIYhmEYZnOpKSxRu7HbbruJmajxeByhUAiRSASGYXgSp/7WfpqD6t+w2mg04LquSCvQTC3DMGCaJorFIqrVKorFoqfln1q1dF2HpmloNpsdYvTJJ5+E67piyVQymUQikYDjOB3Hrl69WshPeo0kOOX2M0qiUiqWpKgsTuXb5JY1f6t/UIuaaZqIxWKIRCJCokYiESFRh4aGZu58D7BEZRiGYRhFYYm6MRx88MG44oorMDQ0BE3TxAVwx3HExXG6QF4sFkXN5V82JUtUuXaii+o09qlQKEBVVVHf0fx7ErV33HEHDjzwwJ6/LwyjKCxRGYZhmD6mprBE7cYOO+yAQqGAZDKJeDzu2RxPErXZbAqBKidRSZzSd5KojuN42r40TUM+n0ehUECpVEK1WoVt26JVjBY8lctlFAoF5PN5lMvlwDTqM888g3K5DE3TkEqlukrUtWvXijSpv3inFn+So1Sc0+3dUqckUUnG+o/xS9RarYZCoSDeV1miRiIR6LqOe+65Z+bO9wBLVIZhGIZRFJaom8IOO+yAnXfeGT/96U8xPDwMwzBQLpfFRXKC6jy524hGAlDHkbxY1DAM5HI5pNNpIUxVVRWLQ6n+CoVCOPHEEzFr1qyevxcMQ7BEZRiGYfqWmsIStRvbbLMNdF1HKpUSSdRwOIxcLicWSJE8JWRJGJROLZVKQqKapol0Oo1MJiOKaxKX8uxRSiTYti2SDatXrw4UqSMjI7BtG6qqIpPJoFKpdBzzwgsviMUEsuQkWSsnTOk4kqR0X1Bywr8gQYbuo9+r1+uwLAuxWMyTQKXWfl3XsXjx4pk73wMsURmGYRhGUViibg6LFi3CNddcI7pq5K4i13VhmqZYTEWdSKZpIpvNwrZtLFu2DJVKBfl8XlxopwvZrutibGwMxWIRN954I84555ye/70ME4SQqNH4/98SyxKVYRiG6QdqCkvU6aCh/fF4HMPDwwiHw1BVFdVqFfV63SNSaeu9LE1liTo2NiZasmi7ayaTQTabFQkEEpk0e9Tf1kXJhFqtFihRf/e736HVakHXdeRyOYyNjXUc8/bbb4vnkhOj8nN3m3fq3yBL87qCts52k6kkmWWJSsgS9fbbb5+5cz3AEpVhGIZhFIUl6nvFTjvthH/+53/GlVdeiaGhIWQyGWiaJtKndNG8Wq2K7iNaTCqPdnIcB/l8Hj/72c9w6aWX9vzvYpgNAQDK8hXLsXzFcnEjS1SGYRimH6gpLFGnI5lMIpPJIBqNComaTqfhuq5o3yeB6p91RTNT5bmjNN+KZpdms1mYpulZQCCLTHnGKM3Z0jQNhmFg2bJlgSL1gQceEEsMHnroocBjHMcRxTsV8PJzy/NM5QUI3ZYgyGnTbgJVTquOjY2hUCh4JCrNnI1GozAMA7fccsvMneuB/5OoH4D/5hiGYRiml7BEff84+eSTceaZZ+JLX/oSLrjgAvzkJz/BbbfdhsWLF+Puu+/G3XffjVtvvRU33XQTLr74Ypx99tk47rjjev66GWZTYInKMAzD9C01hSXqdESjUaiqikgkguHhYYRCIaRSKZRKJY8slSWqPA+12WwK0To6OgrLssS8LNrgSjOy/BKV2rZIQNKMLV3Xkc1mMTIyEihIAeDFF1/EI488gtdeey3wfnosOX1KrWbycik5ceq/Td4suyGBKs9JpWNliUrvryxRf/azn83cuR5gicowDMMwisISlWGYzYMlKsMwDNO31BSWqNNxzz33IJfLIRwOC4maSCRgWZYQprI09d9GEpW+m6aJfD6P0dFR2LYtUqn+ZQMkNSnBSu32NA6ARgI8++yzXUXq66+/3vU+13VRKBQ8qVdZ3k6XOJUlqpws9ctS/xgAefmUP4kajUYxNDQkfjYMA9/73vdm7lwPsERlGIZhGEVhicowzObBEpVhGIbpW2oKS9TpuP3226Fpmmg3D4VCiMViMAwD1WpVyFKagToyMiLa++UEKn2nFGmj0RAt+pZlwXEcFItFMROLkqdyCpRSo7ZtiyTr1NRUV1E63dfIyAhM04TjOJ4lUvSzLFD9rfzy3xwkVEmeyuMI/EnUbhKVWvpN08QFF1wwc+d6gCUqwzAMwygKS1SGYTYPlqgMwzBM3zKosESdjmuuuQaGYXgkaiQSQT6fR6VSEfJQnnsqL5mShePIyIiYg9poNIQopaVStGRAXiYlt79TYpSO03UdxWIRDzzwwCZL1Hq9DsMwxIIpOYFK81S7tfL7l2XJfzu9TpKoslT1z1QliRqJRBCLxTA8PCzmzpqmiTPOOGPmzvUAS1SGYRiGURSWqAzDbB4AoCiKgmg8Km5kicowDMP0A4MKS9TpuOyyy2CapkhMhkIhhMNhqKrqmR0qzwVtNBpCKvq32OdyOSFRR0ZGxIKnYrEokqgkUeXlUvT7lUpFHG+aJjKZDMrl8iZL1Ha7DU3TYFkWisVixyKrbilUEqZBElX+O+VFUv5EKh1fKBQQj8cRjUYRj8eFRI1GozBNEyeffPLMnetBlqgMwzAMoygsURmG2TyERJVhicowDMP0A4MKS9TpGBgYQKFQQCKR8Cw/ymQycBynq0SV29vldvxcLgdVVT0SlVr05VSojNwmTyMASLrqug7DMPDggw9ukkSdmppCNpuFrusoFAriuUiA+lOo9H06iSqLVFmYyiKYxgH4JWosFhNJ33g8DsMwcPzxx8/cuR5kicowDMMwisISlWGYzYMlKsMwDNO3DCosUafjiCOOQKFQQDKZFBI1Go0ilUqhVCp1pDSpnd8vE0dGRlAul5HL5ZBOp0U7v+M4QoqWSiWPPCVpKktMarsvl8twXRfFYhGqqm5yGnXVqlXIZDLQdR2WZQl5GyRRZRHcbRaqX6TKEpUSqf7jDMNALBYTEjUcDiMcDiOZTELTNMydO3fmzvUgS1SGYRiGURSWqAzDbB4sURmGYZi+ZVBhiTodu+++OwqFAlKplCeJmkwmYVmWZ1kSLZYKSmSOjo6iXC4jm80inU6jXq+LVKk8D1WWqEEb7uVWfxKzpmmiWCzioYceetcStVwueyQqCU+/UKW/zT/rlWal+v92ul8ebUDHaJqGaDQqCIfDiEQiSKfTUFUVu++++8yd60GWqAzDMAyjKCxRGYbZPFiiMgzDMH3LoMISdUOYpol0Oi0kajgcRjweh67rYi7q2NiYSJf605h0v+M4QqJSm3upVBLQPFS5jV+ehyqLVNd1Pe39pmnCtu2NlqirV69GJpOBYRiwbVs8nvw8siCV5amcOJVnv3aTqLJAlcnlch6JGolEEIlEkM1mkUgkZvY8D7JEZRiGYRhFYYnKMMzmwRKVYRiG6VsGFZaoG4Ja8MPhMIaHhxEKhRCNRpHNZlEul0X6tNFooFKpeESiLCJLpRJUVUUmkxHCkmah0jxUWWTKSVZ5ORO181Ny1HVdWJYF27axbt06/O1vf9soiZrNZkWKNWhJliw86/W6oFardXwPkqjyiAP/HNXR0VGoqopIJOIRqdFoFPl8HosXL57Z8zzIEpVhGIZhFIUlKsMwmwdLVIZhGKZvGVRYom6IdDotJGooFOpYLkXp02aziXK57BGG1OJPEpWSqI7joFKpoFAoCIlKApUezz8LlSQnzVF1XVcI1WKxKITsww8/vFHt/LlcTjy/PHvVvzBKlqiUQqV/NxoNMQM2SJT6H0cWy5lMBuFw2CNQY7EYDMPAddddN7PneZAlKsMwDMMoCktUhmE2D5aoDMMwTN8yqLBE3RD33HMPVFUVEnXp0qViARIJSBKNrut6ZGKj0RByslgsIp/Pi3mqjuOI747jeGaeNptNsZDJPxu1XC6L9n/HccS/6bbJyUk888wz00rUlStXQtM02LYt0rSUGCVB6peojUZD3CZL1KA5qd2WTtG/q9UqUqmUmINKidRYLIZCoYDvfve7M3ueB1miMgzDMIyisERlGGbzYInKMAzD9C2DCkvUDfHzn/8cuq4jEokgFAphaGgIoVAI8XjcIyFrtVpHOz+lN0dGRlAqlaBpmtg+XywWhUSlxyBhSvJVvo2Eql+auq4rHoN+rtfr00rUFStWiNdO81fllny/GPVLVP9s1G4JVH8alUSt67pIJBJColIiNR6PwzRNnHHGGTN7ngcVKLXe/7fGMAzDML2GJSrDMJsDS1SGYRimbxlUFOAD8Do+yPzwhz+EZVmIxWJCog4PDyMajcI0Tc9yKf9iKUqj0gIowzDEeABd12GaphChsjQl4Si38dNjlsvlwJZ+edHUyMgIHnnkka4Sdfny5WIGa9DiJ78cpZmvQQui/MfLj0ev27+cynEcxONxhMNhQTQaRSqVgqZpmDt37sye50GFJSrDMAzDKCxRGYbZPFiiMgzDMH3LoMISdUN85zvfQbFYFNJveHhYtPRrmiYWPJE0pFZ4Eoj1eh2VSgXlchmFQgGqqiIajUJVVeTzeTEflaSmLB79C5/ocQj6PTquWq2K+8bGxvDSSy91lahyqlReBiUvhJIFqLw4yr88Sr5dfv1Bc1Hr9TqKxSJisZhIoZJEVVUVqqrO/HkeVFiiMgzDMIzCEpVhmM2DJSrDMAzTtwwqLFE3xGWXXQbHcUT7uZxGzWazIgXaTaKOjo4KuWnbNnK5HCKRCBKJBFRVFcupSKbKqVOSqjQrVRaofolKv0MydmxsDOvWrcObb77pEaivvfYaVqxYgWaziWaziUajEZhG9d9Gf5csWeUxAP60aZBEJRlrWRbi8biQqDQTNZ/PY2hoaObP86DCEpVhGIZhFJaoDMNsHixRGYZhmL5lUGGJuiHOOussOI6DZDIppN/Q0BCWLl2KTCaDYrEo2uJp5qcsUUmM0rxSTdMQi8UQjUaRSCRgWZZow6djSbzSHNRKpSKEaZBElWex0qKrsbExtFotPPzww3jppZfw4osvYv369XjyySexatUqTExMoN1uo9VqiZmncsu+nCilEQP+0QL+JKosSv1SlpKs9XodhUIB8Xgc0WjUs1RK13XcfPPNM3+eawpLVIZhGIZRWKIyDLN5sERlGIZh+pZBhSXqhjjssMNg2zZSqRRCoRDC4TCWLl2KJUuWIJlMwjRNkRidTqJSK75pmojH4wiFQohEItB1XQhWuZ1fTqGSRJXTqJQ4pVb+RqMhllvV63XU63U0m02sXLkS999/P1asWIE1a9Zg9erVWLFiBSYmJjA+Po7x8XG0Wi20Wi202+2O109Cl16TLHeD0qpEUBs/iVRd14VEJWKxGEzTxH/+53/O/HlmicowDMMwUBSWqAzDbB4sURmGYZi+ZVBhibohZs2aBdM0kclkMDQ0hEgkgiVLloifNU1DuVz2JDdJPpLUJBFKLf2pVArDw8MYHh6GqqoolUpiLAClPklYUtJUTrTSY9Lj0gIras1vNBqiVZ+EZrVaRbvdxsTEBCYnJzExMSF+Hh8fR7vdRrvd7jqOgG4jedptIZU8Z1Vu9Zflai6XE2lcOZVrGAbOP//8mT/PLFEZhmEYBorCEpVhmM2DJSrDMAzTtwwqLFE3hmQyiVwu55GotGAqm82iWCwKqUiSUU6i+meakpAdGhpCLBZDoVDwJFFJolL7Pj2GPB+VnoOSqLVaDa1WC7VaDc1motW4hAAAIABJREFUE+12W6RT5SVVY2NjaDabgRKVWvtlCewXqrJI3dAc1aAk6sjICDKZjJCn8XgcsVgMmUwG+Xwe8+bNm/lzzBKVYRiGYaAoLFEZhtk83jOJeuutt2LlypU9/4MYhmEYhhhUWKJuDPfccw90XRft/JREXbJkCdLpNAqFgid5Ki+FklvxSYzSAqWlS5dieHgYmqZ1LJYaGxuD4zie22VIaFJr/+joqBCnjUYDrVbLk0R1HEckZuv1OsbHxwMlarPZFGlSWcLK81Hl2aj++zYkUSuVCtLpNGKxmFiwlUgkoP0/9u49SLK6vv//WS6KgCm8RbFUKoKKFURLojHipSFaEI2oeP2l4iWRRTQmRPGuZAdFg4KiK+7u7GVm+nb6dJ8+ffr0/d4zs8stGTReYmJFc9NUmTIXyxSKfhNevz/gczjdM3uj1z2z3c+tetTM9Mz0ntMNpfXk/fl8SiXl8/l43mMiKgAAsiwiKoDJTBxR19bWtLa2piuvvDL2mwEAIGrOIqIeic985jOqVCpKpVLhBKrZFzWTyYT7ovb7/ZGIapbbmyX45vtBEIQRdmFhQfl8XpVKJVzSb6Jjo9FQo9EYiajRadBerxfG0cFgsO6gKBNE+/3+SIwdDofh0v7o3qgb/X50f9Pxv3t8CvVgE6ompi4vL6vZbMpxHGUyGSWTSdm2Ldu2ValUtGPHjnjeYyIqAACyLCIqgMk8rIh64YUXhvE07hsAAOBg5iwi6pF47WtfG54obyLqnj17tHfv3vBwqOiye2N8EtXsTVqr1bSwsKDdu3drYWFB6XRaruuqVquNRNhKpaJqtapmszkSKKMh0yz5j4ZRM0m6vLwc7mnaarVGthkwS/4PHDgQTqXecccd4b6oG0XUjaZhNwqsh4qo9Xo9jKipVEq2bctxHNVqNX3yk5+M5z0mogIAIMsiogKYzFFH1LW1Nfm+H/uFAwBwOHMWEfVIPPnJT1YQBLJte11EXVxcDAOoCanRPUyjYdWExnq9rsXFxfD3l5aWZNu2giAIl+c3Gg2VSiWVSiXV6/WRaBkNliaQLi8vh2E0ujdrdBrWfN3v90dCqZlEvf3228N9VQ82iXqwqdPDRVQzXVutVsOImk6nZdt2uK/sO97xjnjeYyIqAACyLCIqgMk8rEnUK6+8Umtra7ruuutivwEAAA5mziKiHolTTz1VruvKcRwtLCyES/H37t2rffv2ybZt+b6ver0eRlAzURoNqiY81ut1LS0thQHVTGS6rhtOn5bLZbmuK9d1FQRBGEE3mgptNBrhnqwrKyvhhKqJp9HJWPPRRM2VlZVwIvXAgQNaXV0Nv7e6uhruixoV/fuPNqJWKhXl83llMhllMhnlcjl5nqdKpaKLL744nveYiAoAgCyLiApgMhPtiXrdddexrB8AsGnNWUTUI3HyySeHE6eLi4sjEXXv3r1KpVIqFAqqVqtqNBrhRKqJqCZeRpfzJ5NJLS0tKZlMKpPJyLZtZbNZFQoFFQoF5fN52batXC6nYrGoRqOx4R6k3W5X1Wp15FCqZrOper2uWq0WbgUQnVw1P2eW2JsDpaIHS5nHzeFUUWafVfMcBwurGx0uFQSBCoVCOImay+VUKpXkuq7OPvvseN7joSVrLv5/zgAAiBsRFcAkJj5YyiCmAgA2mzmLiHqktm/frlKppKWlpZGIunv37nA5fqlUUqVSUbPZVLvdHomonU5Hw+FQ7XZblUpFS0tLWlhYUDKZVDqdVjqdViqVUjabDT9PpVJhYDXTqNEoaZbnm31TzfRpuVxWEATyff+g+6lGA+fKysoIE1HNZKqZWI1+30y+mqgaXe6/UTw13/N9f2Q5fy6XU7lcViaTie/9JaICACDLIqICmMwxi6gAAGw2cxYR9Uj9yZ/8iUqlklKplBYWFsKIumvXLi0sLCiVSslxHJVKJdVqtZGIavYlHQwGajQa8n0/jLHJZHJkKjWVSm34WD6fV7VaDaNkdFl/rVZTrVZTvV5Xo9GQ67ryPE+e54UR10RP8zvjBz5FD6MyX5vJVBNPV1dXR/ZJNXHVPG90KjUaUYfDYfgaFItF2bYdHizlOI6CIND8/Hx87y8RFQAAWRYRFcBkiKgAgKk1ZxFRj9Sll16qUqkk27bDQ6F2796t+fn58ICoTCajfD6vSqWyYUTtdruqVCoqFApaWFgIp1hNUDWfm71S9+3bp8XFxTDS+r6/bm/Ufr+vRqOhSqWiIAgUBIFyuZzy+bxc1w2X/UejZnQy9WDhs9/vj4TTKBNOTVw10TUaYMf3UG2326rX6+F+qGb61uwDe/PNN8f3/hJRAQCQZRFRAUzmYUXUCy+8MFy+b1x44YWx3wwAAFFzFhH1SJ177rlyHEeFQiEMm/Pz89q9e7f27NmjxcXFcPl9sVhUvV5Xp9MJD3cyPM9TNpsNp1jNc5lgaiwtLYVx1iz7d1033P/UhE5zUJXv+3JdV8ViUZlMRtlsVq7rjvxcdEl/NKKaqVITPfv9vjqdzsjhUhtNo0Y/jy75H4+o/X5fzWYzDLzRrQvMFgjXX399fO8vERUAAFkWERXAZI46ol555ZUb7n26tramK6+8MvYbAgDASFhE1CP1uMc9TrfccotKpZKSyaT27dun+fl57dmzR7t379bCwkK4z6nZ59PsU2oOejKTmMlkUrt379bOnTu1sLAQHlBlgqmJqOY5zddmu4B6va5msxlOt1ar1XCZvImUmUxGvu9veOjTeFQdDAbhRKn52Xa7reFwGEZUE0oPFlHHA+t4lK3VaioWi8pms0qlUuE+skEQyHVdvf71r4/tvd0ii4gKAIBFRAUwmaOOqL7vbzh1euGFF8r3/dhvCAAAI2ERUY/UySefrPe///2qVCpKpVLh4VK7d+/W7t27w+X4e/fuVTqdVrFYVLVaVavVUq1WC5fbZzIZ7d27V/Pz85qfn9fi4qL27ds3snTfRFOzJ6r53LZtFQoF+b6vcrmsVquldrutcrmsfD6vbDYr27bDpfLVajUMo9Fl/SagRidSo3ud9no9NZvNMKKaKGpi6UbRdHy5f3Spf7/fV6VSCQ+UMnu+Oo6jarWqdDqtc889N7b3logKAMADiKgAJnHUEXWjKdQj+R4AAMdbwiKiHo3XvOY1KpfL4XJ8s69pdF/UPXv2jEyNVioVVatVlUolua6rZDKpXbt2aefOnSPTpyaemqX70YhqJjez2ezIfqeVSkXlclmu6yqbzSqTyYzsN1qv19cdQhXdSzUaUscnURuNxsiEavTgKRNXDxVRo9/r9/sql8vK5XJhRE2lUioUCqrVatq9e3es7ysRFQCABxBRAUyCSVQAwNRKWETUo/GMZzxD+XxejuOE4dNMlJqoumfPHi0sLCidTiufz8vzPAVBoEKhoGw2q4WFBe3cuVM7d+4cmT6NRlQTTc3EpgmPZq9Tx3HCkOo4zsj0qTn1PpPJqNlshmF0OByORNTodGq/3w8nTU1Erdfr6vV6IxE1unfq+LTpRodQRSdRTXw295JOp1UqlRQEgW699dZY31ciKgAADyCiApgEe6ICAKZWwiKiHq1UKiXXdTc8XMosyzdL+zOZjPL5fDiFaSLrjh07tGPHjnDZ/r59+8KQavZBjU6hmn1ETSjN5XKybTuMp+MR1UytttvtkUnU6GFS43ulji/7r9Vq6na7IwF1/ACq8WX+G1leXlav15Pv++GkbCqVUi6XU6VSked5uvHGG2N9T4moAAA8gIgKYBJHHVEt64Gp07W1tREbTacCABCnhEVEPVpbt26V67pKpVLhcn7DLM/fu3dvGFLNIVO5XC48jMpMoppYOh5RTUiNRlRzor35OpfLKZvNKplMhhHVTKCa/VM7nc5IRDVMFDXB1ERU87mJqJ1OZ12AjU6sbvR59BAq83W325XneeEkbTqdluu6ajQachxHn/jEJ2J9T4moAAA8gIgKYBIPK6ICAHAiSFhE1KP1zGc+M1yab/ZANRHVLOU3h07t3btX2WxWvu/LcRzt3btXu3btCiWTyfBgKfN74xE1OmmaSqXCxxzHUTabDYNpOp1WMpkMw2oulxuZMB0XDajjEbXb7aparardboeP9fv9cAI1urTfLNc3cXajydROp6NisahMJiPbtpXJZFQqldRsNpVOp/Wxj30s1veUiAoAwAOIqAAmQUQFAEythEVEPVpbtmzRnj175DiOlpaWtHfv3pGIakLonj17tGfPHqVSKRWLRRUKhXA/VLOPaiqV0r59+8L9VM1HM6EanTo1U6gmrppJ1Gw2K9u2Rw6fikZUEzyjS/Kjy/ijgTQaUSuVilqtlrrdbvgz489jgqnZa9Us9R/fI7XVaoWHX5ltCIIgUL1e18LCgj760Y/G+54SUQEAkGURUQFM5qgj6vgy/nFx3xAAAEbCIqIerS1btuiaa64JI2Z0GtVMn0bj6uLiovL5vEqlkpaWlrRr164wuiaTyXAP1WhETafT4Z6qtm3LcZwwoi4uLo4cMmWmYk1ENQc35XI5dbvdcOp0fG/U6KFS42HVRNRms7nhz0W3BYgeWLWyshLG1OjhUs1mU67rhgHVcRzVajVVq1XNz89vjoiaiP+fLQAA4kZEBTCJhxVR475oAACORMIioj4cL3jBC2TbtvL5fDhNaiZPzV6oJqIuLCzItm35vq90Oh1G1D179mhpaWlkGf++ffu0uLgYLts3ETWfz4fL9c2kqplSNROeJqKaAGvb9rqIGj1MKvoxGlH7/X4YURuNxsjj0XAanUgdj6jD4TCcRF1eXlaj0ZDrunIcR7Zth/uhBkGg3bt3b47l/In4/7kCACBuRFQAkyCiAgCmVsIioj4cT3nKUzQ3N6dCoSDbtsPDpKIR1YTVffv2KZlMqlAoKJPJhBHVHEJl9kA1ny8uLo7sh5rJZMLfXVpaCidRzdJ4x3HCE+9NXB2PqGbPUzONaqLn+HRpNKKa5fZmanV8Cf/4ZOt4RDXTqIPBQLVaTYVCQblcTo7jKAgCNZtN+b6v3bt36+Mf/3is7ycRFQCABxBRAUyCiAoAmFoJ64GImtgE13IiefSjH62Xvexl4SRodG/TaETdu3dv+Fg2mw0j6vz8/MghUuZ3TCA1U6gmoppJVHO4VCaTUS6XUy6XCw9qii71TyaTG0bU8UnUcebxbrcr3/dVrVbXhVYTUKOTqNGDpqI/t7Kyol6vp3K5rHw+r2w2q3w+r2azqWazKc/ztGfPHiZRAQDYJIioACZBRAUATK2ERUSdRDKZVBAE4d6oJpyOT6Lu3btXqVRK6XQ6PFQqugfq3r17R+KpCaf5fF6O4yiXyymdTocHSZnDpsy+qCaimqX+0Yg6HA7Dg6NMEI1Ol47viWo+ep6nIAjWTa1utJzfxFPz0Xx/eXlZnU5HruuG1+u6rtrtdjiJuri4qI985COxvo9EVAAAHkBEBTCJo46oAACcKBIWEXUS7373u5XNZsODn8whUyaimj1RzaRpMpkcOXAquqTfxFMzYer7vkqlkorFomzbDvdANdOpZh9UM7EajaipVEq2bavT6Wy4d2k0nG70ebfbVbFYVLlcVrfbXbekf6MDpg42kdpsNsO9UM3kbrPZVKvVUqVSUSqV0gc/+MH43scEERUAAIOICmASRFQAwNRKWETUSTzrWc/SbbfdFp48n0qlwjAajahm4tQEUxNazc8mk0k5jiPXdeV5nvL5vGzb1rZt23TrrbfK87wwliaTSaVSqZGDpMyU60YRdaO9Szdawh/9vpkejUbUjbYAOFhYje69Wq1Ww4Cay+VULBbVaDTUarVUq9WUTqf11re+Nb73MUFEBQDAIKICmMTDiqi+72ttbW1Dcd8QAABGwiKiTurjH/+4HMdRoVBQNpvVvn37wv1OzQFSZlm/iajmsCnz0UxoVioVVatVFQoFzc/P60UvepGuvvpqVavVcPLUTLQmk8kwmJpDqMYjaqvVGplEPdQ+qNEg2mq15LqufN9Xp9M56CTq4SKq2VvVTNk6jiPP89RoNNRut9VoNJROp/WCF7wgvvcwQUQFAMAgogKYxFFH1O3bt2v79u2xXzgAAIeTsIiok3rlK1+pd73rXVpcXFQul1MymQynTvfs2aM9e/aEMdVE1MXFxXD5fzSglstl7d69W5/5zGf0iU98QpZl6dJLL9WXvvQlua6rTCYTBlQTUQ0TUc1z27atZrN52Oh5sIhaKBTk+77a7fZBf3ejfVXN/quDwUDtdlvFYjE8FCuXy6lUKoURtdlsKp1O6zGPeUx872GCiAoAgHFUEfX8rZqvVFRZZ5su2wT3AuAonb9V8xP++/uwDpa68MIL4795AAAOI2ERUSd11lln6ZxzztFtt92mfD4fHgJl9kWNHjC1uLgYRk7zM/l8XpVKJZz+/MAHPqBEIqGXv/zlsixLz3jGM/S6171OlUpF2Ww2jKfJZFKLi4thRI3GWxNRG41GGDTNNOl4DO31eiPL9fv9vlqtlvL5vEql0oYRdfy5xr+3vLysfr+vRqOhQqEQRlTbtsOI2ul01Gq1lEqldOaZZ8b3HiaIqAAAGEcfUdcHl8u2VVSZ36rzN8H9ADgKcUXU2G8cAIAjkLCIqMfKhz/8Ye3cuVOFQmHkoKnoAVLRpfaFQkG5XE6e56lcLiudTuuLX/yiXvWqV8myLJ1yyimyLEunnnqqzj77bOXzebmuG8ZX8/zRadRoRM1msyMRtdfrhcE0Gj4PFVE9z1Or1Vo3dTr+XOP7pS4vL4f7oZrXwkyi+r6vRqOhbrerVqulpaUlPfKRj4zvvUsQUQEAMI5FRD0WIQZADJhEBQDg4BIWEfVYednLXqY3vvGN2rlzp9LpdLgPaDqdDsNmNKCWy2WVSiUFQaB8Pq8PfehD+r3f+z2df/756577rLPO0s033xzujToeUc106sLCghYWFpRKpZTJZNRoNLS8vKzhcHjQ6VETRKMh1ERUcwjU+LL/aEQdP3RqeXlZy8vL6nQ6CoJAuVwujKjmvuv1unq9ntrtthYWFnTSSSfF994liKgAABhEVGCGxRFR2RMVAHCiSFhE1GPl1FNP1VlnnaUbb7xRi4uLsm1bruuGS/wdx1Eul1Mul1OhUJDrukqlUpqfn9eOHTt02WWX6dRTT91wKvOUU07Rtddeq2azKcdxlEwmRyKqOXDKRFRzWNV4RB2Pp+NR1ITQdrsdTr7WarUNo2n0c/O7w+Ew/PtarZZ835dt22FELRaLCoJAtVotnETdsWNHvO9dgogKAIBxzJbzb7ss9nsBcJTiiKiWZcn3fa2trW0o9hcFAIAHJSwi6rH29re/Xdu2bdP27duVTqfluq5KpZJKpZI8z5PjOMrn81paWtKnP/1pvf71r9cb3vAGPfvZzz7k877yla+U53kqlUpKp9NhODWHSpk9UhcXF5VMJsM9Uc1k6KH2QI0uzR8MBup0OmFELZfLarVaIxOrRxJRG42GPM9TNpsNJ2PNaxEEgdrtthqNhr74xS/G+54lHoyom+CfHQAA4nYsDpaa37p+VQ2AE0BcERUAgBNBwiKiHmsXXHCBEomErrjiCv3hH/6htm7dql27dunjH/+4Pv3pT+umm27S9u3bdfPNN+td73qXTjvtND3mMY/RGWecccjnfdKTnqRMJhMu6U+lUuEyfhNRo/uj2rater2+bjn/eESNhlXDRFQzMVuv19Xtdkd+d6OIaoJtv99XrVaT67qybVvpdFrZbFaFQkHFYjE8XKpWq+lTn/pUvO9ZgogKAIAx0STqg1F122Xx3weAhyGuPVEPxvf9+F8UAAAelLCIqL8KW7Zs0RlnnKEnPOEJOuecc/QXf/EX+q3f+i299KUv1Ste8QpdccUVuuKKK/Q7v/M7R/W8f/7nf64gCMIDpswhU+OTqWY5f7VaDadQo8FzfPJ0PIp2u10VCgXl83nl83n5vq92u33Y5fzm7zL7oZrDszKZjGzbluM4KhQK8jxPjUZDQRDommuuiff9ShBRAQAwJl7Of9k2VSrz2np+/PcC4ChtlklUE1Gvu+66+F8UAAAelLCIqMfD2972tpGvTz/9dD32sY/VYx7zmKN6nuc85zkql8sql8vhwVXRg6Wie6RmMhkFQaDhcPiwI6rjOOFBWOaAqehhVOOHTJmJ12azKd/3w31gzSSqbdvK5/NhRPU8T69+9avjfX8SRFQAAIxjtifq/FadvwnuB8BRiCuisg8qAOBEkLCIqMfD8573vGP2XH/5l3+pSqUix3HCiGr2Ql1YWBiJqL7vj0TUjQ6R2iiwdjodFQoF2bYdTpFWq1V1u92R0Dq+T+pgMFC321W9XlexWJRt2+HWAyakOo4TRtRcLqfnPve58b4/CSIqAADGsYiolnWZtlU4XAo44RyDfY4f1nL+K6+8Mv6bBwDgMBIWEfV4OPnkk4/de5ZIqFarqVgshhF1YWEhZCZTM5mMSqXSSCjd6BCp8elSE1Fd1w2X4mezWZVKJTWbzZGfGZ9sHQ6H6na7qtVqKhQKymQy4ZYDJuw6jiPf91Wr1fTVr35VZ599drzvzxwRFQAA46giKgCMmXgSdfv27bHfBAAAByOLiHqiWVxcVLlcVjabHYmoZjn/0tKSstmsXNcdmTo9koja7/fVbrfluq7y+Xy4bYDjOKpWq2q32+p2uyOTqOb5hsOhOp1OOCkbPfzKhF3HcRQEgSqVij796U/r1FNPjff1nCOiAgBgEFEBTGLiPVGvvPJK9kQFAGxasoioJ5qrrrpK9Xp9w4hqQmomk1E+n1er1QoDp4ml4wE1GlgHg4FarZaKxaIKhYLS6XS4pN/zvJGQGl3GPxwONRwO1Wq1VC6XZdt2GHTHr6larapUKumqq66K/bUkogIA8BAiKoBJPKzl/Afj+37sNwQAQJQsIuqJ5jd/8zdVq9WUz+eVSqXC/VCj06ipVEq2bYcHQo3vjbpRSDVTpY1GQ4VCIXx+M42ay+XkeZ5ardaGk6jmdz3PUzqdDqNuNKK6rqtaraZcLqcXvehFsb+WRFQAAB5CRAUwiYknUQEA2MxkEVFPNE9+8pO1fft2BUEwMo26b98+LSwsKJlMKpVKKZvNqlqtroumB5tGNVHURE7HccL4aQ6HchxHlUpF7XZ7JKL2+311u11Vq1W5rqtUKrUu6maz2fBQqcXFRT360Y+O/bUkogIA8BAiKoBJEFEBAFNNFhH1RHP66afrgx/8oFqtlhzHUTKZDCPq4uJiGDwzmYyCINgwog6Hw/AwqOjjZk/TbDarXC4XBlCzt2k6nVaxWBw5ZMpot9uqVqvhNgBmOjaZTCqdTsu2bfm+r0ajoVtvvTX219GyLCIqAAARRFQAkzimy/nX1tZivyEAAKJkEVFPRFdccYWCIJDneUomk+GSfjM5aiJqqVRSt9sdCam9Xk/Ly8vhx+i+qJ1OR+VyOdwH1RwKFd171Rwy1Wq11Ol0Qo1GQ5VKJTyQyvy82RLATLFWq9XNs0/8HBEVAACDiApgEg8roprPr7zySm3fvn3D7wEAsBnIIqKeiM4999zwpPtUKhVGzmhETafTKhQK4dL78Yg6PolqImqpVAojrDkcyhwQtbi4qGw2q1KppEqlomazqXa7rVarpWq1qiAI5DhOuCeqmWQ1h0rVajX5vq83vvGNsb+GlmURUQEAiCCiApgEERUAMNVkEVFPVJ/5zGdUq9XC2Lm4uKhkMhlG1OjhUmYJv4mlZjn/+L6o7XZbxWIx3MN0PKKaJf35fF6e56lWq6ler6terysIApVKJeVyuTCiRvdndV1X9XpdjuPoBS94Qeyvn2VZsuYsWcNNcB0AAGwCRFQAkyCiAgCmmiwi6onqd3/3d1Wv18O9S83Up5lCTSaTymazqtVqIxH1YHq9nhqNhgqFwkhENfHULO03cbZQKKhcLisIAgVBIN/3VSwWZdt2OB2bSqXC5yqVSqrX69q3b5/OPvvs2F8/y7KIqAAARBBRAUyCiAoAmGqyiKgnqi1btsj3fbmuq2QyGe5dakKqOdApCAJ1u92R/U8Hg0EYVc3HbrerarWqXC63bhLVPL+JombPVNd1VSwW5XmePM9ToVBQNpsNryV6qFQQBKrVatq+fbu2bNkS++tnWRYRFQCACCIqgEkcdUQFAOBEIsvS3Ca4Djw8n//85xUEgTKZjFKpVLiEPp1Oh0G1VCqp1WqtC6gmqpqP7XZb5XI5nCQ1EXU8nprP0+m0HMdRPp9XoVCQ67rhoVImoprl/7lcLtwz9dprr439dQvNWURUAAAeREQFMAkiKgBgqskiop7IrrnmGjWbzTB8Rg9zMvGzUCioXq+PHC41HA5Hlvebpfye54Wh1Oy1aqJpNKJG9zq1bVu2bctxnPBQqej0ajabVaFQULValeu6uuSSS2J/3UJzFhEVAIAHEVEBTIKICgCYarKIqCeyV7ziFfI8LzwManFxUYuLiyPTomYpfbvdVr/fX3egVL/fV6fTUa1Wk+u64aFU5nAos8dqNKJG92A1B1mZmDo+rWrbdngIVTab1VlnnRX76xYaWkRUAAAeREQFMAkiKgBgqskiop7Inva0pymVSqlSqayLmyZ+ZjIZFYtF1Wo1dbvdcCLVBNVut6tms6lyuax8Ph8G02QyqYWFhTCSjkdUM40a/V42m1U2mx35ux3Hke/7qlQq+upXv6rTTjst9tctREQFACBERAUwCSIqAGCqySKinuj+4i/+QkEQyLbtkbBpYmYqlVIul5Pv+2q32+p0Our1eup2u+p0Omo2m6pWqyoWi+GhUmZP04WFhZHnMQdWmX1XzcRpNNxms1llMpkwqubzeQVBoHK5rG3btumkk06K/TULEVEBAAgRUQFMgogKAJhqsoioJ7qXv/zlKpVKcl03nAg1AdPsT5rNZsNp1GazqU6no3a7rWazqVqtJt/3lc/nw71Vze+Z343ui2pCbXTZvplMNdOrm4KCAAAgAElEQVSnZiI1l8vJdV1Vq1V5nqetW7fG/nqNIKICABAiogKYBBEVADDVZBFRp8HevXvled7IFGgmkwknSlOplPL5vMrlsur1uprNpur1uqrVqsrlcjiFaiKqOVTKHAw1HlHNxGl0atXE1VQqFe6Pms/n5XmeyuWy9u3bp5e85CWxv1YjiKgAAISIqAAmQUQFAEw1WUTUafCWt7xFpVIp3NM0unfp4uKilpaWZNu2XNdVEASqVCoql8vyfV+u68pxnHB6NJVKKZvNjhwcFQ2k5vtm+wAzsRpd0m/bdjiFav6OW265RU984hNjf61GEFEBAAgRUQFMgogKAJhqsoio0+Cxj32sfN+X53nhcvrolOjS0lIYNz3Pk+d5KhQKIxOo0Yhqwqk5VMoE0mgkNT+7UUQ1e6F6nhfu1/qRj3wk9tdpHSIqAAAhIiqASRBRAQBTTRYRdVq8733vC/c2HV+CH42c+XxehUJBjuOE+6CagGqiqTk0KrqvajSimu0CNoqoJuKaKVTP83Tbbbfp8ssvj/01WoeICgBAiIgKYBJEVADAVJNFRJ0Wz3zmMxUEgTzPUy6XC0NoNHSaPU6j06cmiNq2HUZU83vRUBpd0j/+3MlkcuRgKTPxWi6Xlcvl9KlPfUpPfvKTY3+N1iGiAgAQIqICmAQRFQAw1WQRUafFSSedpE996lMqFotyXTecIh3ft9Qs0zfM5Gl0AtUE0+jX4/uimu+Px9VMJqNcLqdSqaQgCLSwsKD3vve9sb8+GxpasuY2wXUAALAJEFEBTIKICgCYarKIqNPipJNO0sUXX6xCoRBOgJql+OMR9VDh1EyXms9NhDUhdXzqNDq1apbym/1Qs9ms/viP/1gvf/nLY399NkREBQAgREQFMAkiKgBgqskiok6bL3/5ywqCQK7rKpPJjMTT8WX54/umRiOp+XpxcVGLi4vrHo9GVPN5NpuV4zjhNOyOHTv0rGc9S49//ONjf102REQFACBERAUwCSIqAGCqySKiTpvXvOY1KpfL8n3/kBE1+tHE0uiU6eLiopaWlrSwsKCFhYUND5GKTrOavVDNgVKO4+iGG26I/fU4JCIqAAAhIiqASRBRAQBTTRYRddo86lGPGlnSHw2l49OmJoyaaVMTSE1EjYr+bDTGmn1VzRSq53nyPE+7du3SFVdcEfvrcShbZBFRAQB4EBEVwCSIqACAqSaLiDqNrrrqKmUymZEDpsb3RjXTpNGIOj6JGg2rG0VUE0/NYVLFYlHlclmFQkE33XSTnvKUp8T+WhwKERUAgIcQUQFMgogKAJhqQ4uIOo3OOecc7dq1S+VyWfl8XtlsdmQidXwv1GgojT6+sLAwsh3A+JYAJp6aw6R831elUlEymdSf/dmfxf46HA4RFQCAhxBRAUyCiAoAmGpDi4g6rd7znvdoaWlJruuqUCiEE6lRJoqOR1Tzveh06vheqtGDpMwy/nK5LMdxdPXVV+uSSy6J/TU4HCIqAAAPIaICmAQRFQAw1YYWEXVaPec5z9GXvvQl+b6vUqmkfD6vTCYTSqfTI5Oo0VAa/V50MjU6qZrNZmXbtgqFglzXVRAE8n1fCwsLevazn63HPe5xsb8Gh0NEBQDgIURUHNb5WzVfqaiyzry2nr8Jrg+xIqICAKba0CKiTrP3v//9+uxnPyvP8+T7vmzblm3bIxE1ukTfhNJMJrNu+b/ZA9X8nomoxWJRvu+r0WjI8zzddNNN2rJlS+z3fiSIqAAAPISIisM6f6vmK9t02fjjl20jpIKICgCYbkOLiDrNLr74Yl166aUqFAqq1+vh0nsTSTeKqCaWRj+anzVTrKlUSrZtK5fLqVQqqVQqqVgsateuXXrd614X+30fKSIqAAAPIaLisA4WUa3ztXW+om2XbYJrRGyIqACAqTa0iKjT7NRTT9UjHvEI7dq1S5VKRcViUcViUblcLpxGje55Gt0bdWlpSZlMZmQZf3Q7gFwup0KhoHK5rGKxqGQyqRtvvFFPetKTYr/vI0VEBQDgIURUHBYRFYdARAUATLWhRUSddo94xCP0pje9Sel0WuVyWeVyOTxoKpPJHDKiHmwS1Rwo5XmeKpWKHMfR5z//eW3dujX2+z0aW2TJSsR/HQAAbAZEVBzWIZfzbxRXMUuIqACAqTa0iKjT7qSTTtJjH/tYffWrX5Xv+6pUKvI8T/l8XrlcbmRv1OgeqCaiRg+TMgE1l8spn8/LdV3Ztq2vfOUrev3rX68XvehFsd/v0SCiAgDwECIqDouDpXAIRFQAwFQbWkTUWfGOd7xD11xzjYrFosrlsnzfl+u6ymazI6HUMBE1nU6PxNNCoaBCoaBisSjHcbRr1y5t27ZNT3rSk3TmmWfGfp9Hg4gKAMBDiKg4rIMu5weIqACAKTe0iKiz4hnPeIZ+4zd+Q7Ztq1arqVaryfd9OY4TLu03zARqOp1WNpuVbdtyHEeFQkGlUkme56lUKmlhYUFzc3P6oz/6o9jv7+EgogIA8BAiKg6LiIpDIKICAKba0CKizpqPfvSjuuGGG1QoFBQEgUqlklzXVS6Xk23b4aFT0QOk8vm8CoWCXNeV67pKpVLauXOn3vnOd+pVr3qVXvjCF8Z+Xw8HERUAgIcQUXFYRFQcAhEVADDVhg+K+zpw/Pz2b/+2Lr/8ci0tLalaraparYaHTeXz+XCvVMMs3y8UCnIcR+l0Wtu3b9cnP/lJPf3pT9cTnvAEnXHGGbHf11FLEFEBAIgiouKwiKg4BCIqAGCqDS0i6iw655xztG3bNu3atUv5fF7FYlGe54Vc11WxWJTruvI8T8ViUfl8XrZta/fu3br++uv1B3/wB7Hfx0QSRFQAAKKIqAAmQUQFAEy1oUVEnUWPe9zjdOmll+raa6/V7t275TiOfN9XEAQKgkDlclmlUikMpzfddJM++clP6tprr9U73vEOvfrVr9bzn//82O9jIgkiKgAAUURUAJMgogIAptrQIqLOolNOOUWnnnqqXvWqV+nTn/60vvjFL+qWW25RMplUMpnU0tKS9u3bpx07dujWW2/VK1/5Sl188cV63vOep2c84xl60pOepDPPPDP2+5hIgogKAEAUERXAJIioAICpNrSIqLPswgsv1Ktf/Wq95jWv0eWXX66bbrpJX/jCF3Trrbfqlltu0Y033qiPfexjOvnkk2O/1mMuQUQFACCKiApgEkRUAMBUG1pE1Fl21lln6XGPe5we/ehH6+STT9af/umf6gMf+IDe//736z3veY/e9ra36XWve13s1/krkSCiAgAQRUQFMAkiKgBgqg0tIioecskll+jSSy/Vy172Mr3whS/Uc57zHJ133nmxX9evRIKICgBAFBEVwCSIqACAqTa0iKh4yBOe8AQ95jGP0RlnnKFTTjkl9uv5lUoQUQEAiCKiApgEERUAMNWGFhEVMypBRAUAIIqICmASRFQAwFQbWkRUzKjEgxE17usAAGCTIKICmAQRFQAw1YYWERUzKkFEBQAgiogKYBJEVADAVBtaRFTMqAQRFQCAKCIqbrzxRt1zzz1aW1s7Yvfcc49uvPHG2K8d8SOiAgCm2tAiomJGJYioAABEEVFxzz336L777tPR/Lnvvvt0zz33xH7tiB8RFQAw1YYWERUzKkFEBQAgioiKtbW1owqo5s/a2lrs1474EVEBAFNtaBFRMaPmiKgAAERdffXVuv6SS1S0bRUKBcyYXC43UUTN5XKx3wPiUbRtXX/JJURUAMB0m7OIqJhRc0RUAACirr76ahVtW/9z9tmSZWGG3G9Z+n/WA5Oo//u//3vU1tbW9P8efJ647wXH3/+cfbaKtk1EBQBMtzmLiIoZNUdEBQAg6uqrr1ahUIg9yOD4i0bU++6776gRUVEoFIioAIDpNmcRUTGj5oioAABEEVFnVzSi3nvvvUeNiAoiKgBg6s1ZRFTMqDkiKgAAUUTU2RWNqD/96U+PGhEVRFQAwNSbs4iomFFzRFQAAKLYE3V2RSPqf/3Xfx01IupsY09UAMBMmLOIqJhRc0RUAACirr76al1/ySUq2nbsp33j+Mrn88rlclpbW9OPf/zjo7a2tqZcLqd8Ph/7veD4K9q2rr/kEiIqAGC6zVlEVMyoOSIqAABRV199dezXgHjdc889+sEPfqAf/ehHR+wHP/iB7rnnntivHfEjogIAptqcRUTFjJqzZA03wXUAALBJEFFxww03qNfrqdvtHrFer6cbbrgh9mtH/IioAICpNmcRUTGj5iwiKgAAEURUAJMgogIAptqcRUTFjJqziKgAAEQQUQFMgogKAJhqcxYRFTNqziKiAgAQQUQFMAkiKgBgqs1ZlrQJrgM47oYWERUAgAgiKoBJEFEBAFNtziKiYkYRUQEAGEFEBTAJIioAYKrNWURUzCgiKgAAI4ioACZBRAUATLU5i4iKGUVEBQBgBBEVR+Z8bZ2vqFKJ2HbZJrguxI2ICgCYanMWERUziogKAMAIIiqOxGXbxqPpg1GVkDrziKgAgKk2ZxFRMaOIqAAAjCCi4vAu07bKvLaeP/b4+Vs1X9mmy2K/PsSJiAoAmGpzFhEVM4qICgDACCIqDo+pUxwcERUAMNXmLCIqZhQRFQCAEURUHJnLtI39ULEBIioAYKrNWURUzCgiKgAAI4ioOHqRoEpMnXlEVADAVJuziKiYUUNL1twmuA4AADYJIioetvO3an6jvVIxU4ioAICpNmcRUTGjiKgAAIwgouKwLtumyoYHSD2wV+q2yzbBNSI2RFQAwFSbs4iomFFEVAAARhBRcXgPHiw1v1XnRx4/f+v8uscwe4ioAICpNmcRUTGjiKgAAIwgouJIXbYtcrBUZX1UxWwiogIAptqcRUTFjCKiAgAwgogKYBJEVADAVJuziKiYTVtkEVEBAIggogKYBBEVADDV5iwiKmYTERUAgFFEVACTIKICAKbanEVExWwiogIAMIqICmASRFQAwFRLWERUzCYiKgAAo4ioACZBRAUATLWERUTFbCKiAgAwiogKYBJEVADAVEtYRFTMJiIqAACjiKgAJkFEBQBMtYRFRMVsIqICADCKiApgEkRUAMBUS1hEVMwmIioAAKOIqAAmQUQFAEy1hEVExWzaIktWIv7rAABgsyCibj5btmwJxX0twOEQUQEAUy1hEVExm4ioAACMIqJuLqeffrquv/56feYzn9EHP/jB2K8HOBwiKgBgqiUsIipmExEVAIBRRNTN44orrtCHPvQh1Wo1tdttua6rN7zhDbFfF3AoRFQAwFRLWERUzCYiKgAAo4iom8Ov/dqv6Stf+Yr6/b4OHDigO++8UysrK9qxY0fs1wYcyrqI+oQnPEGf+9znYr8wAACOhYRFRMVsIqICADCKiLo5vOhFL1KlUtEdd9wRRtQ77rhDxWIx9msDDkWSrFtuuUXG5z73Ob3whS+M/cIAADgWEtYDETWxCa4FOG4SRFQAAMYRUTeHz372sxoOhzpw4IBWV1d14MAB3XHHHapUKnrEIx6hU045JfZrBDay4XJ+AACmRcIiomIGJYioAACMI6JuDslkMgyo/X5f/X5fd9xxh3q9nnbu3KkPf/jDsV8jsBEiKgBgqiUsIipmUIKICgDAOCJq/N797ner1WppeXlZnU5HjUZDtVpNd955p1qtlprNporFot75znfGfq3AOCIqAGCqJSwiKmZQgogKAMA4Imq8XvKSlygIAg2HQ3U6HdVqNZVKJXmep36/r0qlosFgoH6/r3w+rwsuuCD2awaiiKgAgKmWsIiomEEJIioAAOOIqPH68pe/rAMHDqjT6aherysIAjmOo3w+r3K5rCAIdPfdd6vX62k4HOrjH/+4fv3Xfz326wYMIioAYKolLCIqZlCCiAoAwDgiaryCINDq6qpqtZqCIFCpVJLjOLJtW77vq1ar6dvf/rY6nY46nY5WVlb09re/PfbrBgwiKgBgqiUsIipmUIKICgDAOCJqvLrdrlZWVlQul+U4jorForLZrGzbVhAEqlQquvvuu1UsFhUEgdrttkqlUuzXDRhEVADAVEtYRFTMoAQRFQCAcUTUeHW7XfV6PZVKJeVyuXApfz6fV6lUUrFYVKvVkuM48n1fQRBoMBjove99b+zXDlgWERUAMOUSFhEVMyjxYESN+zoAANhEiKjx6vf7qtVq8n1fuVxO+XxehUJBxWJRpVJJruvK933l83n5vi/P89TpdFQqlfTiF7849usHiKgAgKmWsIiomEEJIioAAOOIqPF58pOfrNXVVQVBoGq1Ktu2VSwWVSgUwqlTs0+qWc5fLpdVq9XU6/V06623xn4PABEVADDVEhYRFTMoQUQFAGAcETU+V111lfbv3692u61Wq6VsNhtOoAZBoFqtplarpWq1GsbTVquler2uZrOplZUVnXnmmbHfB2YbERUAMNUSFhEVMyhBRAUAYBwRNT6pVEr79+/XcDhUs9mU67rh4VL1el3tdlvNZlOdTkftdlvD4VDD4VDdbletVkvdblef+tSnYr8PzDYiKgBgqiUsIipmUIKICgDAOCJqfOr1ulZWVrS8vKxmsxkeLuX7vqrVqhqNhhqNhrrdrg4cOKAf/OAH+u53v6vhcKher6d2u61yuazXvva1sd8LZhcRFQAw1RIWERUzaI6ICgDAOCJqfLrdrgaDgVZXV9VqtdRoNML9UMvlcrhsv9Pp6Nvf/rZ+8pOf6Cc/+YlWV1c1GAzUarXU6XS0c+fO2O8Fs4uICgCYagmLiIoZNEdEBQBgHBE1PoPBQIPBQAcOHNBgMNBwOAwDqud5qlarYUT90Y9+pH//93/Xvffeq2984xvq9/theO31enruc58b+/1gNhFRAQBTLWERUTGD5oioAACMI6LGx+yHun//fv3VX/2V7rzzTlUqFTWbTXmep1KpFEbSX/ziF/r+97+vf/7nf9ZPf/pTDQYDNZvNcLn/0tJS7PeD2UREBQBMPVlEVMyYOSIqAADjiKjxueuuu9Rut3XgwAGtra3p9ttvV61WU7/fV71eV6lUUq1WU6fT0c9+9jN961vf0t/8zd/ol7/8pb75zW8qCAJVKhW12231er3Y7weziYgKAJh6soiomDFzRFQAAMYRUeNx8cUXa//+/Wo2m7r77rt11113aTAYqN1ua2VlRZ1OR77vKwgCtdtt/fKXv9Tf/u3fam1tTf/6r/+q//mf/1G/31cQBKpWq+r3+7HfE2YTERUAMPVkEVExY+aIqAAAjCOixuOqq64K9zW96667tLy8rHa7rX6/r+XlZfX7ffm+L8/z1G63df/99+vv//7v9bWvfU3f/va3JUn/8i//olKppCAIVK/XdeWVV8Z+X5g9RFQAwNSTRUTFjJkjogIAMI6IGo+bbrpJnU5H3W5Xq6urarVaarfbGg6H6vf76vf78jxPnuepVqvp/vvv17/927+FS/p/8YtfSJIKhYLK5bJKpZIcx4n9vjB7JMk699xzZTz96U/Xox/96NgvDACAY0UWERUzZo6ICgDAOCJqPMwy/X6/r263q1qtFgbVXq+nXq8n13VVqVRUKBR077336v7779c//MM/6Nvf/rb++7//W5KUzWZVLBbl+776/b5OO+202O8Ns2XdJOqpp56qpz/96bFfGAAAx4osIipmzJwla7gJrgMAgE2EiBqPfr+vWq2marWqVqulZrOpXq+n1dVVDQaDMKI2m02Vy2XdfvvtkqR7771X//RP/6T7779f999/v1KplMrlsg4cOKBOp6MXv/jFsd8bZsuGy/nPPffc2C8MAIBjRRYRFTNmziKiAgAwhogaj8FgoGq1qkqlom63q3a7rV6vFy7n7/V6KhaLqtVq6nQ66nQ6uu+++/R///d/+vnPfy5JWl1dleM4qtfruuuuu9Rut3XZZZfFfm+YLURUAMDUk0VExYyZs4ioAACMIaLGw0TUer2uwWCgbrcb7oVqlvOXSiW1Wi0NBgO122194xvf0A9/+EP9/Oc/1z/+4z+qVCqpXq+r0WhodXVV/X5f1113Xez3htlCRAUATD1ZRFTMmDmLiAoAwBgi6vH39Kc/XcPhUPV6XZ1OJ5w+jU6h9no9VSoVDQYDDYdD9Xo9NRoN3XHHHfr+97+vTqejSqWifr+vdrutTqejXq+nhYWF2O8PsyWMqM9+9rPDB4moAIBpIouIihkzZxFRAQAYQ0Q9/rZu3ar9+/er0Wio3+9rMBhoMBiM7Idqouny8rKWl5c1GAzU6XTUarXU7XZVLBbV7XbV6/XU7/fDidRyuRz7/WG2SJL1/Iuer+df9PzwQSIqAGCayCKiYsYMLSIqAABjiKjH3w033KDV1VW1Wq0wkA6Hw5GIapbwm4hqplSbzaZWVlZGfndlZUWNRkPFYlGdTif2+8NsIaICAKaeLEtzm+A6gOOGiAoAwDpE1OMvk8mEU6UHDhwIJ1FNFB0MBur3++p0OiOTqCai7t+/f+Tx4XCoWq0mz/OIqDjuiKgAgKkni4iKGUNEBQBgHSLq8ddoNNRsNtVut3X77bePhNNoMDURdWVlJTx8qtVqaTgchtF1eXlZvV5PQRCoWCyq0WjoWc96Vuz3iNlBRAUATD1ZRFTMGCIqAADrEFGPPzNR2u12tX///pGIapb0m2i6UUQ1B0mtrKyE3wuCQJ7nqVKp6Pd///djv0fMDiIqAGDqySKiYsYQUQEAWIeIevwNh8MwokaX8Jv9Tc3nvV5Pw+Ew/BkTUev1utrttlZXV7W8vKz9+/er2WwqCAKVSiVt3bo19nvE7JAky7IsPfvZzw4fJKICAKaJLCIqZgwRFQCAdYiox9/q6qqazWZ4gFS/3x8Jqebx8YjabrfVbDZVq9XUarXCiGp+LggC5fN57d27N/Z7xOwII2oUERUAME1kEVExY4ioAACsQ0Q9/kxENfE0GlHH90QdDocaDofq9/tqNBoKgkDlcjmcYjURdXV1VZVKRY7jqFQqxX6PmB1EVADA1JNFRMWMIaICALAOEfX4W1lZUavVWjeBOhgMwsnT4XC4bhK10WioXq+rXq+Hh1CtrKyo0+lo//79qtfrchxHrVYr9nvE7CCiAgCmniwiKmYMERUAgHWIqMefiahm2f646IFR0ajaarX0ne98R9/5znfU7/fDCdRarabl5WU1Gg05jqNutxv7PWJ2EFEBAFNPFhEVM2ZoyZrbBNcBAMAmQkQ9/paXl9XpdA4aUZeXl7W6uqrhcKiVlZVw2X673Zb5c/fdd6tWq2llZUXlcjk8cMpxHDWbTT32sY+N/T4xG4ioAICpJ4uIihlDRAUAYB0i6vHX7/d1xx13hME0umQ/GlFNPDWPt1otRf/4vq/BYKBmsynP81SpVOS6rsrlst70pjfFfp+YDURUAMDUk0VExYwhogIAsA4R9fhrNBq68847R6ZMo9G03++HB0eZnxkOh2o0GiMR9T//8z/V6/XU7Xbluq5c11W321W1WtU111wT+31iNhBRAQBTTxYRFTOGiAoAwDpE1OOvUCiEy/WNlZWV8LFut6tKpRI+ZiLqgQMHRiLq/fffr+XlZfV6PdVqNfm+r+FwqH6/r89//vOx3ydmAxEVADD1ZBFRMVu2yCKiAgAwhoh6/H3hC18Ip037/X54gJSJqJ1OR+VyeV1E/bu/+ztJ0i9/+Uv9x3/8hyTpe9/7nrrdrrrdroIgUKPRULfb1eLiYuz3idlARAUATD1ZRFTMFiIqAADrEVGPvze96U3qdrtqNpvqdDoje6GaSdRarRaGVbMv6o9//GNJ0o9//GPddddd4URqu93WYDBQEAQqFAoqlUrKZrOx3ydmAxEVADD1ZBFRMVuIqAAArEdEjUe/31e9Xler1QojalS73Va/3w8jar/f189+9jNJ0te+9jXlcrkwojabTfX7fVUqFRWLReXzeRUKhdjvEbOBiAoAmHqyiKiYLURUAADWI6LGo9frqdlsql6vq9/vazgcjkRUs9x/dXVVKysrGg6HuvfeeyVJd911lxYWFsKI2uv11G631Ww2VSqVZNu2SqVS7PeI2UBEBQBMPVlEVMwWIioAAOsRUeORz+fVbrdVq9XU6XTWRVRzQJTZJ7XX6+lHP/qRJOmHP/yhlpaWwoi6f/9+9Xo99Xo91et15XI5BUEQ+z1iNhBRAQBTb2gRUTFbiKgAAKxHRI3Hm9/8ZvV6PTUaDTWbzXXL+U1UNVOo3W5X3/zmNyVJ9913n7rd7khENb/T6XRUr9dVLpdjv0fMBiIqAGDqDS0iKmYLERUAgPWIqPExB0vVajV1u90N90YdDofhJOpgMAjD6Q9+8IPw8263q+XlZa2srKjX62llZYU9UXHcEFEBAFNvaBFRMVuIqAAArEdEjU8qlVK321WtVjvoAVOG2UP1l7/8pcb/lMvl8ACqbrerTqej+fn52O8Ps+GoI+pFF110UBdccEHsNwQAwLihRUTFbNkiS1Yi/usAAGAzIaLG5w1veIP6/b5uv/12NRoN9ft9raysHDSk9vt9HThw4KARdTAYqN1uy/M83lccN8dkEtVE1Kc+9amx3xAAAOOGFhEVs4WICgDAesS2+DzxiU9UuVzWgQMH1Gw2w6BqlvBHD5vq9/vq9/vqdDr6+te/HgbUb33rW6pUKlpeXtby8rKazaaq1aqe9rSnxX5/mA0PK6KOT6DGfRMAABzK0CKiYrYQUQEAWI+IGq+3vvWt8jxPnU5HzWYz3Pu02+2GEXV5eTkMqWa5/ve+9z1997vfHVnCX61WFQSBqtVq7PeF2fGwlvM//vGPj/3CAQA4UkOLiIrZQkQFAGA9Imr83ve+96nb7apSqcj3fbXbbVUqlfCwqQMHDujAgQNaWVlRt9tVt9tVvV6X7/saDodaXl5WuVxWOp2W53m67bbbYr8nzI6JJ1HPO++82G8CAIBDGVpEVMwWIioAAOsRUTeHXC6nZrOpYrEo3/dVKDWuMuoAACAASURBVBRUrVbV6/V011136etf/7ruvPNOtdtt1et1VatVFYtFtVotNZtNOY6jZDKpIAh0+eWXx34/mB0T74n6+Mc/nj1RAQCb2tAiomKGJIioAABshIi6OVx00UVqNpvyPE/5fF62bct1Xfm+r16vp7/+67/WHXfcoX6/r3w+L8/zVCwW5XleGFCz2ayKxaJOO+202O8Hs+NhLec/mAsuuCD2GwIAYNzQIqJihiSIqAAAbISIujmcccYZ+sIXvqB6vS7HcZROp5XL5eR5nnzfV6fT0WAw0HA4DANqsViU67rKZrNaWFiQ53l6xzveEfu9YLZMPIkKAMBmN3xQ3NcBHBcJIioAABshom4eL33pS7V79275vq9sNqtsNqtCoSDHcVQqlVSr1dRut+V5ngqFggqFQrgXaiaTkeM4Ouecc2K/D8wWIioAYOoNLSIqZkiCiAoAwEaIqJvLm9/8ZuXzebmuq0wmI9u2lU6n5TiOXNdVqVRSPp9XNpuV4ziq1WrhMv5rr7029uvH7Dmmy/kvuuii2G8IAIBxQ4uIihmSIKICALARIurm88lPflKlUkm5XE6pVEpLS0vhpKmJqktLS+GEaqFQ0NLSUuzXjdn0sCKq+fzxj3+8zjvvvA2/BwDAr9zZF633/KvXGVpEVMyQBBEVAICNEFE3nzPOOEPJZFLlclmZTEZLS0taXFxUuVwO90tNpVJyHEfFYlHlclkXXnhh7NeN2UREBQD86mwUOQ8SOq3nXy3r1bs3dtU9hxZ9jg3+vqFFRMUMSRBRAQDYCBF1c7rgggtUKBTkeZ7y+bwcx1G/31e73Q6nUkulkkqlki699FKdfvrpsV8zZhMRFQBwaNHo+SuKnNbZv9r//RhaRFTMkMSDEfX/4/+XAQAQRUTdvK644opw39Nqtap+v69+vy/XdVWpVFQsFvWWt7xFZ5xxRuzXitlFRAUAHNp4JDXh9DjEz2NlaBFRMUMSkYh6sP/wMf4fNOK+ZgAAjgMi6ub23ve+V47jqFKpqF6vq9FoyHVd5XI5XXvttXrkIx8Z+zVith11RAUAzKDoMvyNpk83eVgdWkRUzJDEgxH1YN/faHuNQ8VWgisAYEoQUTe/a6+9VjfffLO+8pWvaMeOHfr85z+v6667LvbrAiyLiAoAmMSRhFUTYGK8zqFFRMUMSRwmoh6NjfYyjv67TWwFAJxAiKib3yMf+UiddtppetSjHqVHPepROu2005hAxabxsCLqBRdcoAsuuECW9cCS/osuukgXXXSRzjzzzNhvCAAQMxNQjiSsHqfQMrSIqJghiWMYUY8UsRUAcAIgogKYxFFH1PPOO09PfepTw69NPD3zzDPDsAoAwIhDhdXjsB3A0CKiYoYkYoioR+pQsfVw+7YSXAEAEyKiApjERAdLjYdTDpYCAByx8f0Yf4VhdWgRUfH/s3fvQZKd9X3/j4QkApYMhnXMUpEVXVCJIDsgpWxiQtJyjJRYpkjsQDAuA3aQUjEpQmxDxXZg2yBjDDIWQoBWe5udW99vp0+f++nTl7nsVRcEQiCLGKkgxDIGieAgY/vz+8Oc8+uZnZndVc/OmZ1+T9WrdqfndrrPSrV66/s8zwTJbeOIeqbYtxUAcA4QUQGMY6yIevnll58ylZr1EwIAnOfOwT6rsUFExQTJ74CIejbYSgAAcIaIqADGcdYR9frrr9euXbtO+f2uXbt0zTXXZP6EAAA70Jj7rMbGzo+ov/mbv6lisSjLstTpdBSGoQaDgRYXF3XkyBENBgP5vi/P8+T7vsIwVLfbTYVheMpjG+n1ehoMBhoOhxoMBorjWFEUqdvtpr+uZfRzfN+XaZoqFosqlUqqVqsyTVPdbleVSkVvfOMbM39dz0v5CYuoZ4rYCgATj4gKYBzP6WCp5CCpJJomh0tl/WQAABPkLPZZjY3zP6K+5jWvUaVS0fHjx3Xy5El5nqdKpaJGo6FWq6VGo6FOpyPf9xUEwYpoGQSBer2ejhw5ooceekgPPPCAjh49qmPHjmk4HCoIAtm2veLrwzA8JbYOh0MdPXpUS0tLaTj1fV+WZalSqaQR1PO8NJbGcZx+/ejvNwqta31Ocj2u66rZbKper8t1XZXLZf3zf/7PM78/20qeiHrGNgqrq/+dcgP/4Q0A5zsiKoBxPKeICgDAtrTOPqvx7huVG91D8TyYLvvpn/5pdTodVatVzc7OpoGy0+nIcZwVobTb7arf76vf76ehMoqiFVOfYRiueGz1VGmv11s3ZoZhqCAIdPz4cZ3t25/92Z/JdV35vq8oihTHseI4lud5sixLzWZTxWJRtVptRXw9E0ngDcNQjuPINE21Wi05jqPf/d3fzfweZiY/oRF1rSC61uFVq+PoWtbbczXr5wgAGAsRFcA4iKgAgB0vbxjKbfI+q+fSBz/4Qbmuq3q9rlarpXa7LdM0Zdv2itC4XvxcK0QmAXWjmDoqjuNTvn/y/mAwSJfyLywspO8PBoP0/eRnjX6/hYWFdIp19XUm3zuZtB0Oh6fE0tUTrb1eL/19GIZyXVe2badx1jRNOY6jUqmU+T3dUvkdEFHPdRBdvWQ/6+cLANgSRFQA43hOB0ttJOsnBADAanljjeX8Y+6zei7deuut6RL8MAzV6/UUBEEaBy3LOmUSdb1JzdUTqaNRcnl5WQ888ICOHz+e/qz19kVNPpZ8fDTKrhZFkb7yla+c8bTqF7/4Rfm+L9u206nV0W0FRn/uWtE4ialBEMhxnHRfWMuyZFlWGqDb7bZe/epXZ/7n8ZzLb7OIShAFAGwTRFQA4xhrEpXDpAAA54O8cYZ7op7JPqtbHGs+9alPyfM81Wo1FQoFFQqFdP9T27blOE76axIgV0fN0WnUJHKujqO2baeTrmdywFTyfUbD6ejBVY7jaDAY6Nlnnz2r5f/f+MY30j1fbdtOY3ISSkf3VQ3DMP28jbiuK9d1NTc3p3/yT/5J5n8ez7n8OY6oBFEAwHmKiApgHERUAMCOlzfGOFhqnX1W1zrAaquiz+HDhxVFkRzHUbPZVLlcVq1WU7PZTJexn00QXX3o01pbASTToWtF2kQSO1dPi47Gz16vp36/v+Jz+/1+uh3A6NcmkTQJpaPh2HXddMrUNE1ZliXHcVJJzG21WvrQhz6U+Z/BLZU/i4i6lUGUKAoAyBgRFcA4iKgAgB0vb4wRUdeyOqxuo31WL7vsMs3MzKTL7W3bTuNqs9lUq9WSZVlrTmquZtu2fN9fscTe8zy5rpuGzGQP0na7nYbb5HOS3yffY/Tx5LHR7zUq+dzk40lEtSxLrVZrxfNIIuvU1JRuuOGGzP+8ZSb5c/mbt8t4dI3T5gmiAIAJR0QFMA4iKgBgx8sbmxxR17ON91ndyJVXXqmPfvSjmpubk2maCoIgDaCdTketVkv1ej1Vq9VUr9fVaDTSX03TVKfTUafTOSWIrg6mo3uVJgF2NIh6nifTNFUsFvWpT31KV155Zeav0ZYbd0K0dTtBFACAVYioAMZBRAUA7Hh5Y4si6lq24T6ryOjPwFYtmc8bMuJt8LwBANhmiKgAxnHWEfXGG2/cUNZPCACA1fJGhhF1Ldtwn1U8h3u3XfcQzRtEVAAA1kBEBTCOsSZRAQA4H+SNbRZR17KN91ndsc6HIPpcxAYRFQCANRBRAYyDiAoA2PHyxnkQUddznu6zmtnrtNOC6HNBRAUAYE1EVADjIKICAHa8vHEeR9S17PR9Vkdj5XpRdFKC6HNBRAUAYE1EVADjIKICAHa8vGFI2+A6zqntus8qQXTrEVEBAFgTERXAOIioAIAdL29MQERdy5nus3qmAZIgen4gogIAsCYiKoBxEFEBADte3pjQiLqeMwmrBNHzFxEVAIA1EVEBjIOICgDY8fIGEfW0Vu+zShA9fxFRAQBYExEVwDiIqACAHS9v7IyIesEFF+iSSy7RpZdeqh/5kR/Rj/3Yj+nyyy/XlVdeqWuuuUaveMUrdNVVV2V+ncgYERUAgDURUQGMg4gKANjx8sbOiKivfe1r9YY3vEFvetOb9B//43/Ur/7qr+q2227T7bffrttuu03vfOc79Yu/+IuZXycyRkQFAGBNRFQA4yCiAgB2vLyxMyLqe97zHv3BH/yB7r77bh08eFAzMzOqVqsqlUo6dOiQPvWpT+kDH/iAdu/erd27d2d+vchIbMjIb4PrAABgmyGiAhgHERUAsOPljfM3or74xS/W7t27dfXVV+vuu+/WzMyMKpWKms2mms2mLMtSvV5XoVDQzMyM7r33Xr397W/Xf/gP/yHza0dGiKgAAKyJiApgHERUAMCOlzfO34h600036W1ve5v+63/9ryoWi2k4tW1bnU5Hruuq3W6r0WjINE21223Nzc3pT/7kT3TJJZfohS98YebPAVuMiAoAwJqIqADGQUQFAOx4eeP8jaj//b//d913332yLEtBEMjzPHmeJ9/3FYahgiCQaZqq1+tqtVqyLEvValUHDhzQG9/4Rv3cz/1c5s8BW4yICgDAmoioAMZBRAUA7Hh54/yMqK9+9at15513am5uTq1WS51OR+12W+12W77va2lpSVEUqVarqVgsqlKpqNVqqdFoqFwu65577tGePXsyfx7YYkRUAADWREQFMA4iKgBgx8sb52dEfe9736vZ2VnVajXVajXV63VVq1XV63V5nqcHH3xQcRyrWCxqdnZW8/PzqtVqajQaajQaqtVq2rt3r37mZ35G//gf/+PMnw+2xgUyiKgAAKyBiApgHERUAMCOlzfOr4j6/Oc/Xy960Ys0NTUl0zTVaDRUKpVUKpVUKBRUKpXU6XS0tLSkTqejubk5zc7Oam5uLp1IrVar6YTqnj179LM/+7OZPy9sDSIqAABrI6ICGAcRFQCw4+WN8yuivu51r9Ob3/xmlUqldGn+7OysZmdnNT09rdnZWTUaDYVhqGq1qpmZmfTjMzMzKhQKqlQqqtVqajabKpVK+s//+T/ryiuvzPy54dwjogIAsDYiKoBxEFEBADte3ji/Iup/+2//Tfv27ZNpmqrVapqbm9PU1JSmpqZ08OBBTU9Pq1KpyLIsFQoFzczMrFAoFNKl/8lE6p/8yZ/obW97W+bPDeceERUAgLURUQGMg4gKANjx8sb5EVEvuugiXXXVVfrjP/5j1ev1FVOoSUSdmprS7OysSqWS6vW65ufnNTc3p5mZGU1PT2t6elqFQkGtVkvValXFYlHNZlPtdlsf//jH9SM/8iOZP0+cW0RUAADWRkQFMA4iKgBgx8sb50dE/aEf+iG9/e1v19TUlOr1usrlcjppmgTUw4cPa35+XvPz8yoWi5qfnz9lGrVQKKjdbqter6tQKKhcLqter2v//v36xV/8xcyfJ84tIioAAGsjogIYBxEVALDj5YzzI6K+5CUv0ac//Wk1m03V63XNzc2tmDI9fPhwOmlaLBZVrVbTQ6SSidTZ2VkVCgVZliXTNFUoFDQ7O6tyuaxaraZPfvKTmT9PnFtEVAAA1kZEBTCOL33pSzJWP0hEBQDsJDlj+0fUV73qVXrTm96kmZmZdAp1dnY2DalJTE2W8pfLZTUaDbVaLTWbTRWLxXQytVgsqt1upxF1enpac3NzqtVqmp6e1utf//rMny/OHSIqAABrI6ICGAcRFQCw4+WM7R9R3/rWt+rOO+9Us9lUtVpdET8LhYIKhYLm5uY0Pz+varWqRqMhy7Jk27Ysy1KpVErjarlcTqdZ5+fndfjwYU1NTalUKqlarer3fu/3Mn++OHcukCEjl/11AACw3RBRAYyDiAoA2PFyxvaNqJdccon+4T/8h/rgBz+oSqUi0zRVKpXSydO5uTmVSiVVKhUVCgWVSiWZpqlOpyPP8xSGoVzXTeNpvV5XtVpVtVpNv8/U1JQOHDigubk5NZtNHTx4UNddd13mzx3nBhEVAIC1EVEBjIOICgDY8XLG9o2oV1xxhd761rfqvvvuU61WU7VaTfc2TSZPK5WKGo2GyuWyKpWKHMeR7/uK41i9Xk+O46QRtdFoqFarpZ87Pz+v6elpHTx4UDMzM2lcfde73pX5c8e5QUQFAGBtRFQA4yCiAgB2vJyxfSPqa1/7Wt13332ybVuNRiNdxp8cEJUcIGWaphqNhjqdjuI4VhzHWlxcVBzHajab6XL+JKIWCgVVq1UVi0XNzs5qampKU1NT6X6p99xzj1760pdm/vyx+YioAACsjYgKYBxEVADAjpcztmdEveqqq/TOd75T5XJZlmWl8XN2djY9IKpcLqtWq6nT6ci2bfV6PR05ckTD4VDHjx9Xt9tVuVxWsVg8JaIm+6IWi0XNzMxoampKc3NzajQampmZ0b/5N/8m89cAm4+ICgDA2oioAMZBRAUA7Hg5Y3tG1Le85S36xCc+oXa7rWazqUqlomKxqPn5eRUKBZXLZVWrVdXr9XT/0+XlZR0/flyLi4s6duyYwjBUvV5XvV5Xo9FQq9VKl/Pbtq1Op6N6va5CoaBDhw5pZmYm3Xv1f/7P/6kLL7ww89cBmyhHRAUAYD1EVADjIKICAHa8nLH9IurFF1+sP/iDP1Cr1ZLjOKrX66pUKiumSpMwalmWer2ehsOhTpw4oRMnTmhpaUlHjhyR7/tqt9uyLEuWZck0TdVqNdVqNYVhqDAMZVmWqtWqDh8+rJmZGZVKJVmWpXvuuUdXXHFF5q8FNlGOiAoAwHqec0S9ZY9Mc69uuy775wDgLF13m/aapsxTnP0/00RUAMCOlzO2X0T9uZ/7OR04cECdTkedTke1Wk2VSmXFoVCmaardbsv3fS0vL+vo0aN68MEHdfz4cS0tLWlxcVGe58lxHLmuK9u2ZZqm6vW6Wq1WevCU7/tqtVqanZ3VzMyMCoVCGlb/xb/4F5m/FthEOSIqAADreW4R9TrdttfUnj17ZO65JfPnAOAsXXeb9pp7dMvqx5/D/xwhogIAdryc8fcRNbcNriXx4Q9/WJ1OR67ryjRNVatV1Wq1dDl+vV6XbdtyHEdRFGlxcVFHjhxJl/IPBgMNh0MFQSDf9xUEgRzHSbcG6HQ66ef0ej3Ztq1SqaRCoaBSqaR2uy3btvVrv/Zrmb8W2EQ5IioAAOt5ThE1CTDrhRgA29u6/+z+4H+Q3HLm34uICgDY8XLG9oqoV155pWZnZxWGoWzbTg+AMk1TnU5HjUZD7XY73Qe12+2q3+9rcXFRy8vLiuM4nTKNomjF5xw5ckTHjh3TwsKC4jhWFEXqdrsrlv2PhtaPfexjmb8e2EQ5IioAAOt5LhH1utv2phOot+wxtfe26zJ/HgDOAhEVAIAzlzO2V0R917veJdM007DZaDTUaDTkuq5835dpmmlA7fV6iuNY3W5Xw+FQCwsLiqJIURQpjmMFQSDP8xTHsb7yla/or//6r/Xss8/qy1/+soIgkGVZCoJAcRyn3y+KItXrdVmWpWKxqIsuuijz1wSbJEdEBQBgPWcfUW/RntHlvrfskbn3Nl23DZ4LgDO04XL+s5suJ6ICAHa8nLG9ImqhUFC325XneenG5pZlpVOljuMoCAJ1u9106rTb7arX66nf7ysMwxUTpp1OR71eT9///veVvP3N3/yNFhcX1Wq15Pt+GmOTiJqE21qtpksvvTTz1wSbJEdEBQBgPWcdUU+Jprdoj3l2k2sAMsbBUgAAnLmckX1EvfTSS/VTP/VTesMb3qBWq6UwDNPDoDzPUxRF6vf76na7aSRN4ulqYRimS/g9z9OxY8e01tu3vvWt9GCqXq+XhtQoitRsNlUul2Wapn7sx34s83uETZIjogIAsJ6zi6h/v9T31PBicsAUcD7ZxP2MiagAgB0vZ2QfUS+//HL93u/9nu6++25FUSTf99OA2u/3tbS0pIWFhTR2JpOnqwNqEkETvu/r//yf/7NmRJW04mckkbbb7arVaqler8txHN18882Z3yNsktwPImrW1wEAwDZ0VhF1vfBy3W3a+xwm2ABkhIgKAMCZyxnZRtQLLrhAN954o2q1mnq9nobDYboM3/d99ft9LSwsaDAYrFi+v9Yk6uhhUWEYajAYrBtQJeno0aOyLCv9ucn3Tw6xCsNQ73//+zO/R9gkOSIqAADrOZuIesue9SdOOWAKOI8QUQEAOHM5I9uI+oIXvEC33357GjOXlpYUBMGKiDocDtecPF0rooZhqCAIFIahvv3tb28YUf/0T/9U9XpdvV5Pi4uL6ZL+5MCpKIr0x3/8x5nfI2ySHBEVAID1nHlEXXWg1GrP4UAaABkhogIAcOZyRrYR9cUvfrHuvfdehWGofr+vI0eOKAgCWZaVRtR+v7/uHqhr7Ynquq7CMNwwoErSd77zHVUqlXQSNYmojuOo1+spCAJ99rOfzfweYZPkiKgAAKznrA+WAoARRFQAwI6XM7KNqDfeeKPq9Xp6IFSv15PnebJtW0EQpGHzTCNqEARyHEcPPfTQaSOqJJVKJcVxrH6/n067+r6vxcVFeZ6nQ4cOZX6PsElyRFQAANZDRAUwDiIqAGDHyxnZRtT/8l/+ixzHURiGsm1bjuPIcRy5rqsoik4bT1cH1iSifuc73znjiJocVJV8n+Tn27atw4cPZ36PsEnyRFQAANZDRAUwDiIqAGDHyxnZRtR9+/al8dOyLFmWJcdx5Pv+aSNqHMfq9Xor9kv1fV+O45wSSx999FE9/vjjpzxeq9XSn5McTNVut1WtVmVZlmZmZjK/R9gkeSIqAADrIaICGAcRFQCw4+WM7CLqz/zMz6hcLiuKIvm+L8uy1Ol05HmewjA87dL9JKCujqhRFJ0SS8MwVBzHpzzebDYVRZG63a6iKFIQBGo2myoUCmq325qens78HmGT5ImoAACsh4gKYBxEVADAjpczsouov/3bv50eAuV5njqdjmzblu/7a06drl6+3+v1Vuxl2u125XmehsPhilD6+c9/XrOzsyqXy6dEVNM0FQSBwjBMp1hrtZoKhYJM09TU1FTm9wibJE9EBQBgPURUAOMgogIAdryckU1EfdGLXqR77703nUJ1HEedTifdH3X1Mv7RcJpE016vp8FgcMok6smTJ1eE0oWFBU1NTalQKJwSUdvttjzPk+/7sm1bpmmqUqlofn5epmnqwIEDmd8jbJI8ERUAgPUQUQGMQ5KM1Q8SUQEAO0nOyCai/vRP/7RKpZKiKJLjOLIsS7Zty3XdNJCuXro/egBUGIYrImoyoRqGoR599NEVodSyLO3bt08zMzMrHv/ud7+bbh+Q7MnabDZVqVRULBbVbre1d+/ezO8RNkmeiAoAwHqIqADGQUQFAEwEGVsfUd/+9reny+g7nY7a7bYcx5HneSsmT9eaRE0mTpOImizpTz72Z3/2ZytiaavV0oEDB9RoNFY8/vjjj8t1XQVBoCiK1Gq1VKvVVK1WVS6XZVmWPv3pT2d+f7BJ8kRUAADWQ0QFMA4iKgBgIsjY+oj6gQ98QEEQyPd9dTodWZaV7o96usOkwjCU67qnRNQ4jhVFkb761a+esmR/amrqlAOnFhcX04Oout2uGo2GqtVqynEc3XXXXZnfH2ySPBEVAID1EFEBjIOICgCYCDK2PqLu27dPvu/LdV1ZlpUuq98ooI4eHrVWRO12uwrD8JSI6rqu5ufn9dhjj614vN/vp1OoYRimEbVWq6nRaMjzPH384x/P/P5gk+QNGfE2uA4AALYhIiqAcRBRAQATQcbWRtQXvOAFarVaKw6Usm1bvu+fNqBGUSTbtuU4zpoRNQgCffnLX14RS48cOXLKUn5JaTxNvq5er6ter8s0TXU6Hfm+rz/8wz/M/P5gk+QNIioAAOsgogIYBxEVADARZGxtRH3Vq16V7n+aBFTHcU67lD+KovQAqPWW8wdBoM997nMrYukXv/hFua674rHvfOc76Z6syeFWzWZTpmmm+6/6vq8Pf/jDmd8fbJK8QUQFAGAdRFQA4yCiAgAmgoytjai33npruizfsiw5jrPmfqhxHKeSgOr7fjrFmkTU5FCpXq+nIAh07NixFcH0//2//6enn356xWOPPPKIwjBUGIYKgkCmaco0Tdm2rRMnTqT7pb773e/O/P5gk+QNIioAAOsgogIYBxEVADARZGxtRP2N3/iNNKJ2Oh25rrvigKfRQ6QSYRjK9315nqdms6kwDNeMqGEYajAYnLJ0f/Vbsn9qGIbpFGqz2ZTjOFpeXk6j7ete97rM7w82Sd4gogIAsA4iKoBxEFEBABNBxtZG1I997GOKoki+76d7oSYHPI1Oofb7/XSpfhJRbdtWs9lMI+vocv7Rz9vo7Zvf/KaiKEpDaavVUr1eV7Valeu6GgwGsm1bQRDosssuy/z+YJPEBhEVAIB1EFEBjIOICgCYCDK2LqI+73nP09zcXBpRk2X8o0v54zhOA+lgMFAcx2kcbbfbajabaThdHVGT/U03ejt27FgabH3fV6VSSSNqsleraZryPE8/9EM/lPn9wSYhogIAsC4iKoBxEFEBABNBxtZF1F27dqler6vb7aZ7nCaHRiUHOo0G1OFwmO6JmkTUTqeThtPRgJpEVMuy9P3vf3/NgPq3f/u3K7YOsG1blUpFrVZLjUZD7XZbjUZDjUZDtm3rBS94Qeb3B5uEiAoAwLqIqADGQUQFAEwEGVsXUa+55ho5jpOG0WRv09GDpEYnTPv9fhpXgyBQu91OD5VaL6K22239+Z//+ZoR9Qtf+IJc103DrWmaqlQqarfbMk1TjUZDxWJRjUZDrVZLl1xySeb3B5uEiAoAwLqIqADGQUQFAEwEGYbyW/SzXvWqVykIgjR+JpOnSUBN9kJNDowaDathGKrT6SiKohXhdPRzoihSp9PR448/fkpA/c53vpNOmCYRt16vq9lsyrZtWZalWq2mYrEoy7JULBZ14YUXZn5/sEmIqAAArIuICmAcRFQAwESQsXUR9Z/+03+qKIo0HA41HA5PjlTYKwAAIABJREFUiaHJdGmyF+rqQBoEQfp+ItlLdXRP1M997nOnRNTPf/7zarVachxH/X5fYRiqUqnItm25rqtOp6NqtapisSjXdbV3797M7w02EREVAIB1EVEBjIOICgCYCDK2LqK+5jWvURzHWlxc1MLCwopIOhpRR5fxj06rrrWEP9kKoN/vq9vtyvM8HTly5JSI6nmeLMuS67oaDocKgkDlcllBEMjzvHR/1GKxKN/39d73vjfze4NNREQFAGBdRFQA4yCiAgAmgoyti6g33HCD+v3+mhF1NJSOTpgmkXR0r9S1lvMn4dX3fcVxvCKgPvbYY2o2m3JdV77vazAYKAxDNZvN9Gts21a5XFapVFIURbrlllsyvzfYRERUAADWRUQFMA4iKgBgIsjYuoh64403ajgcanFxUcPh8JRJ01FrRdThcJjul5pMpY5OqCZB1Pd9fetb35Ik/d3f/Z2OHTsmz/MUhqGiKNJgMEiX/i8sLCgIAnU6HZVKJZXLZYVhqFe+8pWZ3xtsIiIqAADrIqICGAcRFQAwEWRsXUS99dZbNRwO06X3YRimkXR1OF2t1+tpYWEhjaWrtwFIplc9z1On09HRo0f16KOPqt/vy/O8FZ8zGAzU7XYVBIEWFhbkeZ7a7baKxaKq1ap839cP//APZ35vsIliQ0Z+G1wHAADbEBEVwDiIqACAiSBj6yLqe97znnQpved58n1/w3A6qt/vp4dRjQbXJKJ2u11FUSTP82SapmzbVhRF6T6oo9E1ibjdblfD4VCu68o0TRUKBdXrddm2nfl9wSYjogIAsC4iKoBxEFEBABNBxtZF1DvvvFODwUC+76f7k55pRB0MBhoMBmlAjaJoRUSN41hRFMl1XbVaLbXbbXU6HbVaLTmOs2JidXSCdTgcynEctVotFQoFtVotNRqNzO8LNhkRFQCAdRFRAYyDiAoAmAgyti6ifuITn1gRUYMgOKtJ1GTiNAzDdCuA0TAahqFc11Wz2VSz2VSr1VKr1ZJt2+n3SL7P6F6qtm2r2WyqUCjIsiwVi8XM7ws2GREVAIB1EVEBjIOICgCYCDK2LqLeddddGgwGCoJAruumIXQjyeRpsg9qGIbp4VGj06XJJGqyv+moTqezYpp19V6qnU5HzWZTxWJRtm3rM5/5TOb3BZvrAhlEVAAA1kFEBTAOIioAYCLI2LqIevfdd6eTqI7jKAzDFfubJkv014qoiTAMFQSBgiBYM6L6vq9OpyPbttNfLctKI2oyiZpMoQZBkEbUSqUiz/P0oQ99KPP7gs1FRAUAYH1EVADjIKICACaCjK2LqPfcc08aUW3bVhAE6RL9JKKuDqmr90CNoihdzp8cEjX6PYIgkOM4chwnDalrRdRkKtVxHFmWpUajoWq1qiAI9Fu/9VuZ3xdsLiIqAADrI6ICGAcRFQAwEWRsXUT9zGc+c0pEHQ2ma0XU0QnUjSJqEluTfVFd15XjOGlEjeM4XcqfHCgVRVG65L9Wq6larSoMQ/5DYgciogIAsD7+7oPTuu427TX36JasrwPbEhEVADARZGz9JKrnebJtW77vr4imSSBdPYmaLNVfK6KOHiyVfDyJqMnPsW07Da6JwWCgTqejRqOhdrutcrmsarWqKIr0S7/0S5nfF2wuIioAAOsjouK0iKjYABEVADARZGz9nqijEXV0X9S1JlFHA+noVOp6ETUMQzmOI9d15fu+PM+T53np5yZL+vv9vlqtlur1utrttiqVimq1moIg0PXXX5/5fcHmIqICALA+IipOi4iKDRBRAQATQcbWRdQ777xzzUnU1ZF01Oo9T5N9T0cj6ujnhGEo27blum4aW5O9V0c/t9frqdPpqNVqybIsVSoVNRoNeZ6niy66KPP7gs1FRAUAYH1EVJwWERUbIKICACZCbGxdRP3IRz6ifr+/IqImBz6tjqdRFK3Yx3StiJrE0ORzkknWZBI1mWQd3Ts1+V69Xk++78uyLFmWpWq1KtM01Wq1Mr8n2HxEVAAA1kdExWkRUbEBIioAYCLExtZF1Pe///0rImqyzH5hYSFd0j+6ZH+jiJpMsCbTpaNbAiSHSiUHSA0GgxWHSiV7rXa7Xdm2LdM0ValU1Ol0ND8/n/k9wea7QIaMXPbXAQDAdkREnWw//uM/fvrPI6JiA0RUAMBEiI2ti6i33nqr+v2+fN9Pl9z3ej0tLi6uGVFXB9Ikko5G1LWW+3uel0bUwWCQTrWu3l91MBjIcRy1Wi2Vy2W5rqs/+qM/yvyeYPMRUQEAWB8RdfJceeWVuvbaa3Xttdfqne98p6699lq94hWv0OWXX7721xBRsQEiKgBgIsTG1kXUK664QoPBYEVEjeNYS0tLp0TUKIrWDKRhGKbbAIxG1NGvH42ovV5PruuumFwd3W/VcRy12+10P9R/9+/+Xeb3BJuPiAoAwPqIqJPjwgsv1EUXXaTPfvazqlarajQaajabqtVqKpVK+shHPqKLLrpIz3ve81Z+LREVGyCiAgAmQmxsXUR96Utfql6vt+LwpyiK1oyoyaTo6DL9JKJ6nrciiq6Oo77vy3Gc9H3LsuQ4Tvo9ksnUKIpk27Zs25ZlWQqCQC972csyvyfYfERUAADWR0SdHL/0S7+kT3ziE5qfn9fs7KxmZmY0PT2tmZkZlctllUol3XPPPXrve9+78muJqNgAERUAMBFiY+si6otf/OL08KckooZhuOZy/tGImnzN6J6nowdLjUbUKIrSSdQgCBQEgZrNphqNhnzfVxiGaYBN9mYNw1Cu68rzPP3oj/5o5vcEm4+ICgDA+oiok+Hnf/7ndffdd6tYLOrw4cM6dOiQDhw4oAMHDmhqakqzs7Oan59XvV5XsVjUO97xjv//64mo2AARFQAwEWJj6yLqpZdemh4m5XmeXNdVEAQaDodpRB2NqaMRNQzDNJI6jiPf99eMqMmkanK4lO/76cFRtm2n+6kmh0q5rquFhYV0evUlL3lJ5vcEmyxHRAUAYCNE1J3vwgsv1F133aVSqaTp6WkdOHBA+/fvTyPqoUOHdPDgQR06dEiFQkGWZWl2dla33nqrdu/enfn1Y3sjogIAJkJsbF1EveSSS2SaZho8XddNY2gSTkdj6uqImvxq27Y8zzslokZRtGIp/5EjR/TQQw9pMBioXq+r1WqlWwEEQaB2uy3f93XixAkFQaBOp6PLLrss83uCTZYjogIAsBEi6s73Ez/xE2o0GpqZmUmj6X333ZeG04MHD6bvT01NqVAopIevvuUtb9ErX/lK7dq1K/Pnge2JiAoAmAixsXUR9YILLlClUtFgMFC/30+X0MdxfMokanJo1OiBUmEYKgiCdI/TOI7TQ6XiOFYQBHIcR51OR0tLS3rsscf0v//3/9bnPvc5tVot1ev1NMB6nqd2u604jnXixAn5vq9Wq6WLL74483uCTZYjogIAsBEi6s52ySWXqFqtan5+XgcPHtS+ffs0NTWl/fv3a//+/ZqentbU1JT27t2rgwcP6uDBgzp8+LDm5uZUKBTUaDRUqVT01re+Vc9//vMzfz7YfoioAICJEBtbF1ENw9C+ffs0HA7V6/XkOE56uNToNGoSUUf3OR2NqO12W5ZlpV+XhFff99XpdBTHsZ588kmNvj3xxBMyTVP1el2macpxHFmWpeFwqBMnTsjzPNVqtczvB86BHBEVAICNEFF3tt/+7d9WoVDQoUOHtG/fPu3bty/9/cGDB9VoNHTgwIF0EnX//v3pROrU1JTm5uZULBZVKBT0gQ98IPPng+2HiAoAmAixsbUR9c4779TCwoLiOE4jahAEKyJqFEXpdGlyqNRoRG21WjJNU77vpwE1OXBqaWlJf/d3f6e13mzb1vz8vMrlsizLUrvd1sLCgpaXl+X7vubn5zO/HzgHckRUAAA2QkTd2aanp3X48GHt379fe/fuTSPp/v37NTs7q3q9rv37958SUQ8ePKjp6WlNT09rbm5O5XJZrVZLN998c+bPCdsLERUAMBHiH9iqn/ee97xHg8FAURTJtu10ef3okv7kEKnVU6ijEbXRaKQHRXW73XQp/yOPPLJmQJWkxx9/PP0/6e12W6ZpamFhQYPBQEEQ6NChQ5nfD5wDOSIqAAAbIaLuXC9+8YtVq9XSSdNk+vTAgQOamppSqVRSpVLR/v37de+99+rAgQNpQE0+J1naPzMzoziO9bGPfSzz54XthYgKAJgIsbG1EfVf/+t/nUbPTqeT7m86uh9qcojU6inURKvVUq1Wk2ma6XYAyYFRp3ubnZ3V/Py8TNNUs9lUv99Pv++ePXsyvx84B3JEVAAANkJE3bluvvlmFQoF7d+/X/v27UvD6KFDh1QsFtVoNFStVnX48GF99rOf1f79+9ODppKgmizpP3z4sHzfl2VZ9DGsQEQFAEyE2NjaiLp79275vp/+BazdbqvT6aQTpVEUKQiCNKCuNY3aarVUrVbVbrdl27Ycx5Ft2zpx4sSKYPpXf/VX+trXvrbisVKppPn5eTUaDTWbTTmOk0bct73tbZnfD5wDOSIqAAAbIaLuXPPz8zp8+PApe53Ozs6mwwztdluVSmXFUv8koiaHTs3Ozmp6elqNRkP9fl8f+chHdNlll+myyy7L/Dkie0RUAMBEiI2tjag//MM/rHa7nR4QlUgmSqMoku/76TTq6L6oSUxNImqn01Gn05FpmjJNU48//ngaS59++ml99atf1WOPPaann346fdw0TRUKBVWrVTWbTdVqNbXbbTmOo9e+9rWZ3w+cAzkiKgAAGyGi7lytVksHDx7Uvn37tHfvXh08eFBzc3Oq1+vyPE++78vzPLXbbU1PT+vee+9dMYWaTKVOT0+nIbVarWpubi6dbs36OSJ7RFQAwESIja2NqBdddJEqlUoaUZMAmkyjhmEo13XTfVH7/X46jZpsAzAaUS3LUr1el23bKyZOn3zyST3yyCN65JFH9MQTT6SP+76vUqmkWq2WbgvgOI727dunl7/85ZnfD5wDuR9E1KyvAwCAbYqIujNdccUVqtfrK/ZDnZ2dXXG2QLJCzPM8VSqVdPo0mVg9dOhQusQ/OaBqZmYmPazVtm295CUvyfy5IltEVADARIiNrY2ohvH3J4RGUZQu5zdNU5ZlyfO89ICoJKIOBoMV06ijETX52iTKJm9PPfWUvvjFL+r+++/Xww8/rCeffDL9WBAEqlQqajabarVaqtfr7Ie60+WIqAAAbISIijvuuEMnT57UiRMnztjJkyd1xx13ZH7tyB4RFQAwEWJj6yPqZz/7WXW7XVmWJcuyZJpmuqTf9/30/4yvF1GbzWa6HL/VaqlSqejkyZNpKH3iiSf0hS98QSdOnNDDDz+sv/iLv0g/FoZheiiVaZpqtVqybVu/8Au/kPm9wDmSI6ICALARIipOnjyp733ve6c9pHX07Xvf+55OnjyZ+bUje0RUAMBEiI2tj6gf/vCHFcexbNtOje7J1Ol05HmeoihKI+rqg6VqtZrq9bparZZM09Rf/uVfpn+Ze/TRR/W5z31OR48e1cMPP6xnn302/cue67qq1+srAmq9XtfLXvayzO8FzpEcERUAgI3cfvvtuukdN2m+9PdLtDFZisXiKQe0nunbiRMnVCwWM38OyMZ8aV43veMmIioAYDLExtZH1F/+5V9WHMfyPC+dPu31egqCQK7rqtPppPui9vt99Xq9FQdLmaapWq2W7mvqum76F7knn3xSDz/8sB588EEdPXpUX/7yl9OPPfHEE7IsS81mU6Zpqtlsyvd9fepTn8r8PuAcyhFRAQDYyO2336750rx2f2S3jLyBSfNBY6yIanxwGzwHZGL3R3ZrvjRPRAUATIbY2PqI+pM/+ZPpdGmyD+pwOFQcx+meqMlkahzH6vV66na76QFTpmmq0WioVqup2WzK87z0L3Jf/epX9dBDD+nkyZM6duyYvvrVr6YfW1paSidQLctSq9VSGIb6lV/5lczvA86hPBEVAICN3H777SqXy5kHGWTkBxH1b/7mb84aERXlcpmICgCYDLGx9RF19+7d6XJ9x3Hkuq4WFhY0HA7V7XbTgOq6rqIoUr/fT/dFTZbzN5tN1ev1FRH1r//6r/W//tf/0gMPPKATJ07o/vvv17e+9S1J0je/+c30a03TlOM4Mk1TURTpmmuuyfw+4BzKE1EBANgIEXXC/SCifu973ztrRFQQUQEAEyM2tj6ivuAFL1C73Va321Wn05Ft21pYWNDS0lIaUpPAGgSBBoOB+v3+ioOlkuX4yZJ8SXrmmWf02GOP6YEHHtDJkyf1hS98QZL0/e9/X8ePH0+/pt1uy/M8WZYl27a1a9euzO8DzqE8ERUAgI0QUSfcDyLqd7/73bNGRMWaEfXiiy/WVVddlfm/3AAA2EyxsfUR1TAMlUol9Xo9OY4j27bV6/V09OhRHT16VMPhUP1+X7Zty3VdDQYDDQaDdPl/o9GQaZrp0vwkov7FX/yFvvjFL+r+++/X/fffr8cff1yS9LWvfU1xHMuyLHU6HVmWJdd15bquqtWqLr744szvA86hPBEVAICNsCfqhPtBRH3mmWfOGhF1sq3YE/Xqq69W4qqrrtJll12W+b/cAADYTLGRTUS97777NBgM5Pu+bNtWGIY6fvy47r//fi0vL2thYUGu68q27XRf1GS/1Hq9nu5rapqmgiDQ3/7t3+qpp57Sl770JX3+85/XI488oqeeekrf/e539eCDDyoMw3Sv1eTgKs/zdMcdd2R+D3CO5YmoAABs5Pbbb9dN77hJ86X5zE/7xtYqlUoqFAo6ceKE/vIv//KsnThxQoVCQaVSKfPngq03X5rXTe+4ae3l/AAA7DSxkU1E/ehHP6qFhYV02b7v+zpy5IhOnjypo0ePanl5WWEYqtPpKAgCdbtd+b4vy7JUq9VkmmY6VRoEgZ5++mk9/fTT+spXvqI//dM/1Ve+8hU9++yz+vrXv65er5fusep5nmzbTuPt61//+szvAc6xPBEVAICN3H777ZlfA7J14sQJPfXUU2ftxIkTmV87skdEBQBMhNjIJqL+p//0n9Jl+p7nyfM8LS4u6uTJkzpx4oSOHTuWLsFP9ka1bVutVku1Wk3tdjvdCiAMQ33ta1/Td7/7XT355JN64okn9I1vfEPPPvusHnnkkTTAuq6bfk0QBDJNM/PXH1sgT0QFAGAjRFScPHlSTz75pL7xjW+csSeffFInT57M/NqRPSIqAGAi5I1sIurrX/969ft9LS8vKwgC+b6vhYUFHT9+XMePH9eRI0c0GAxkWZba7bZc100DahJRk31NgyDQI488omeeeUZPPfWUvv71r+v//t//q7/6q7/S4uKiwjBUEARpRE2+12/8xm9k/vpjC+SJqAAAbISIit///d9P/858psIw1O///u9nfu3IHhEVADAR8kY2EfXqq69WEARaXl5WFEXqdrsaDoc6evSojh07lu6LalmWGo1G+mutVkvfT7YB8H1fy8vL+ta3vqVnnnlGf/7nf67vf//7+uY3v6k4jhVFURpRfd9XEASamZnRFVdckfnrjy2QN2TE2+A6AADYpoioAMZBRAUATIS8kU1EfdGLXiTLsrS8vKxer6fBYKDhcKilpSUtLS1pOByq3++r3W6rWq2mU6j1el3NZlO2bafL8j3PUxzH+vrXv65nnnlG3/zmN/X000/rscceU6/XUxzHaUQNw1CWZSmfz2f+2mOL5A0iKgAAGyCiAhgHERUAMBHyRjYR1TAMVSqVNKL2+30Nh0MNh0PFcaw4jtXtdmWaporFomq1mqrVqhqNhkzTTCNqMona7Xb15S9/Wd/+9rf17W9/W9/4xjd07Ngx9Xo9dbtdeZ4n13UVRZE+/vGP61/9q3+V+WuPLZI3iKgAAGyAiApgHERUAMBEyBvZRdSpqSktLy8rjmP1+30tLCyo1+vJ87x0wrTRaKhYLKper6cTqZZlpRE1+dxut6sHHnhAX/va1/Tkk0/qS1/6khYXF9XtdhUEgRzHked5iqJIv/7rv575644tlDeIqAAAbICICmAcRFQAwETIG9lF1E9/+tNpRO31ehoOh+p2u+kBUO12W5VKRaVSSc1mU9VqVe12Ow2ooxE1iiItLi7qwQcf1IMPPqgjR46o1+spDMP0+wVBoFqtpte97nWZv+7YQrFBRAUAYANEVADjIKICACZC3sguon7yk5/U8vKy+v2+4jjWYDBQGIZpQK3X6yoUCiqVSmo0GqpWq+p0OnIcJw2jSURNThONokj9fl/dbldhGMrzvPTz4jjWPffco127dmX+umMLEVEBANgQERXAOIioAICJkDeyi6if+MQntLy8rMFgoG63q36/ryAIZNu2ms2marWaisWiSqVSuieqbdtyXVe+78t1XXmel+p0OunhUclBUqMHUPV6Pf3Wb/1W5q85thgRFQCADRFRcWau0217TZnmiD23bIPrQtaIqACAiZA3souoH/zgB7W4uKiFhYV0gjSJqMkeqKVSSaVSSdVqVbVaTZ1OJw2pyWFRtm2r0+mo2WzKNE05jiPHcdTpdGRZlhzHURiGKhQKLOWfRERUAAA2RETFmbhlz+po+oOoSkideERUAMBEyBvZRdR3vvOdGg6HWlxcVBiG6vV6CoJAnU5H1WpVlUpF5XJZlUpFtVpNzWZT7XY7PVgqmUY1TVOtVkv1el3NZlO2bavdbqvVaqnT6aQHSt1111163vOel/lrji1GRAUAYENEVJzeLdpj7tVt1616/LrbtNfco1syvz5kiYgKAJgIeSO7iHrTTTep1+tpaWkp3c80mUQtl8sqFosql8uq1Wqq1+syTTOdNk1CqmVZajabajQaqtVqajQasixLjUZD9Xo9ja2zs7N6+9vfnvnrjQwQUQEA2BARFafH1CnWR0QFAEyEvGFIGf3sV7ziFfI8L42ovu8rDEPZtq1SqaT5+fl0CrVaraaxtNlsqtVqqdVqpXunVqtVVatVNRqN9FCqZrOpIAgUBIHe//7365prrsn89UYGiKgAAGyIiIozc4v2sB8q1kBEBQBMhLyRXUR94QtfKNM0tbS0JN/35TiOfN+XZVkqFotpRK1WqysmUpMp0+T9er2ucrmscrmser2eLu03TVO9Xk9hGOrf/tt/m/lrjYwQUQEA2BARFWdvJKgSUyceERUAMBHyRnYR1TAM3X333RoMBvJ9X6ZpyrZttVotFQqFNKJWKpUVh0slk6flcjmdTC0UCiqVSuneqY1GIz1Q6j3veY+uu+66zF9rZISICgDAhoioeM6uu01719orFROFiAoAmAh5I9uI+u53v1vdble+76ter6vdbqvRaKSTqKVSKZ0yTYJqtVpNl/s3m01ZlpXuoVqtVtVqtdRutxVFkWzb1o//+I/rkksuyfy1RkZiQ0Z+G1wHAADbFBEVp3XLHplrHiD193ul7rllG1wjMkNEBQBMhLyRbUR9wxveINd11e121W630/1Mk0iaTJgm4bRUKq2YTnVdV1EUyTTNNKK2223Ztq1er6e9e/dm/hojY0RUAAA2RETF6f3gYKm9t+m6kcevu23vKY9h8hBRAQATIW9kG1GvvvpqNZtNDYdDeZ6ndrutWq2mcrm8IqImUTUJpfV6XZVKRXEca3l5WZ7npXuiuq6rIAjkOI5+9Vd/NfPXGBkjogIAsCEiKs7ULXtGDpYyT42qmEySZFx99dVKXHXVVbrssssyvzAAADZT3sg2ol544YUql8taWlpSGIayLEu1Wk3FYjENqElQnZmZUaFQULVaVbPZVK1W03A41MmTJzUcDtVoNGSaprrdrnq9nhqNhq6++urMX2NkjIgKAMCGiKgAxnHKJOrFF1+sq666KvMLAwBgM+WNbCPq7t27Va1Wtby8rDiO1el0VK/XVSgUVCwWValUVC6XNTc3p5mZmfSwqSSiLi8v6+GHH9bJkydXLOM3TVPvfe97tWvXrsxfY2SMiAoAwIaIqADGseZyfqZZAAA7Td7INqLeeOONqtfrOnLkiAaDgWzbVqPRWHGIVKVSUaFQ0Nzc3IqIWq/XtbS0pIceekgnTpyQbdvyPE9xHOu+++7Ttddeq4svvjjz1xjZukAGERUAgA0QUQGMg4gKAJgIeSPbiHrDDTeky/kHg4Ecx5FpmmkkTfY+HT1oqlarqdlsqtFoKI5jHTt2TEeOHJHneYqiSJ7n6Xd+53cyf22xPRBRAQDYGBEVwDiIqACAiZA3so2oL3vZyzQ9Pa2FhYX0cCnLstTpdNRqtdRsNlWtVtOIWiqV1Gg01Gw21Ww25bquhsOhFhYW0sOk/sf/+B/K5XKZv7bYHoioAABsjIgKYBxpRH3lK1+ZPkhEBQDsNHkj24hqGIbuuecexXGshYUFRVEkx3Fk27Ysy5JpmqrX62lErVQqKyKr4zjpQVJRFKnZbOq1r30ty/iRIqICALAxIiqAcUiSccONN+iGG29IHySiAgB2mryRfUR93/veJ9u2tbCwoH6/L9/35bpuGlKTadRyuax6va4gCBRFUXqQlOd56RTqH/3RH+nlL3955q8rtg8iKgAAGyOiAhgHERUAMBHyRvYR9S1veYtKpZJ6vZ76/b6iKFIQBPI8T7Ztq91up3ukNptNDQYDLSwsyLZtua4rx3HUarV08OBB3XTTTXrhC1+Y+euK7YOICgDAxoioAMZBRAUATIS8kX1E/Wf/7J/p137t19RoNNKl+XEcp9OljuPIsiy1221ZlqXBYKDFxUUFQaAgCNTpdFQoFHTHHXdk/npi+yGiAgCwMSIqgHEQUQEAEyFvZB9RX/rSl+rlL3+5Dh48KNd1Fcex+v2+4jhWGIYKgkCu68p1XXmep8FgoKWlJfV6PXmep09+8pN63/vep3//7/995q8nth8iKgAAGyOiAhgHERUAMBFyRvYRNfGud71LBw4cUKfTUa/X03A4XBFTwzBUFEVv5y6kAAAgAElEQVQaDodaWlpSt9tVqVTSzTffrFe/+tXshYo1XSBDRi776wAAYLsiogIYhyQZhmHola98ZfogERUAsNPkjO0TUa+55hp96EMfUrlcVq/X09LSUnrYVLfbTSdUFxcX0+X89957b+bXje2NiAoAwMaIqADGkUbUUURUAMBOkzO2T0Q1DENvfetbdfDgQfm+n8bSfr+fWlhY0PLyshYXF+V5nv7wD/8w82vG9kZEBQBgY0RUAOMgogIAJkLO2F4R9dprr9Vdd90ly7K0sLCgpaUlDYfD1NLSUhpRXdfV+973vsyvGdsbERUAgI0RUQGMg4gKAJgIOWN7RVTDMPQ7v/M7KpfLiuNYi4uLWlhY0MLCghYXF7W8vKzl5WUtLCyo3W7rLW95S+bXi+2NiAoAwMaIqADGQUQFAEyEnLH9Iuqb3/zmdEl/Mo26uLiYTqEmh0oVCoUVe5cDp8gRUQEAOB0iKoBxEFEBABMhZ2y/iPqa17xGd9xxh+r1urrdrobDYRpTkylUz/N03333ZX6t2OZyRFQAAE6HiApgHERUAMBEyBnbL6L+o3/0j3TzzTfrwIED6nQ6iuN4xf6oQRCoVCrp13/91zO/VmxzOSIqAACnQ0QFMA4iKgBgIuSM7RdRn//85+sf/IN/oI9+9KO699571Wg0ZNu2XNeVZVkql8v69Kc/rWuvvTbza8U2lyOiAgBwOkRUAOMgogIAJkLO2H4RNfHGN75Rb3rTm3TPPfdoZmZGpVJJ8/Pz+sxnPqP3v//9uuSSSzK/RmxzOSIqAACnQ0QFMA4iKgBgIuSM7RtRX/KSl+jiiy/W+973Pu3Zs0e/+7u/q3e/+91685vfrH/5L/+lnve852V+jdjmckRUAABOh4gKYBxEVADARMgZfx9Rc9vgWtZz0003KZfL6ad+6qd07bXX6kd/9Ed18cUXZ35dOA/kiKgAAJwOERXAOIioAICJkDO2f0S97LLLdMEFF2R+HTgP5YioAACcDhEVwDiIqACAiZAztn9EBZ6z3A8iatbXAQDANkZEBTAOIioAYCLkDCIqdrAcERUAgNMhogIYBxEVADARcgYRFTtYjogKAMDpEFEBjIOICgCYCDmDiIodLEdEBQDgdIioAMZx1hH1xhtv3FDWTwgAgLXkDCIqdrAcERUAgNMhogIYx1iTqLt27dLll1+evk9EBQBsVzmDiIodLE9EBQDgdIioAMZBRAUATIScQUTFDpYnogIAcDpEVADjIKICACZCziCiYgfLE1EBADgdIiqAcWzqwVJEVADAdpUziKjYwfJEVAAAToeICmAcmxpRAQDYrnIGERU7WJ6ICgDA6RBRAYyDiAoAmAg5g4iKHSxPRAUA4HSIqADGcdYR9cYbb9xQ1k8IAIC15AwiKnawPBEVAIDTIaICGAeTqACAiZAziKjYwfJEVAAAToeICmAcRFQAwETIGURU7GB5Q0a8Da4DAIBtjIgKYBws5wcATAwZRFTsUHmDiAoAwGkQUQGM4zlF1OT3u3bt0jXXXLPmxwAA2G5kEFGxQ+UNIioAAKdBRAUwDiIqAGBiyCCiYofKG0RUAABOg4gKYBxEVADAxJBBRMUOlTeIqAAAnAYRFcA4zjqiXn/99br00ktlGIYuv/xyXX/99TIMQ5deemn6ewAAtiMZRFTsULFBRAUA4DSIqADGcdYR9dJLL00Pkbr++uu1a9eu9P0krgIAsB3JIKJihyKiAgBwWkRUAOM464gKAMD5SgYRFTsUERUAgNMiogIYBxEVADAxZBBRsUMRUQEAOC0iKoBxPKeDpTaS9RMCAGA9Moio2KGIqAAAnBYRFcA4nlNETX6/a9cuXfP/tXcvsY2k56GGa4Doxtb91t1xJkacXnjRiyAD2O7xTEzbCyObrLPI1kCCLGIgsRfJYsqIDSTOIqtcZtAt8SqSEiWqSZGiSFFk93RPTgIcxxkcG4Zh++TATpC5BAkQJHEyznxnIf3Fr34Wpeqmulki3wYeiGIVi1W8SNWv/ireuhU4DQCAqBGHiIoRRUQFAOBCRFQAgyCiAgDGhjiOuBFYD+DSEVEBALgQERXAIIioAICxIQ4RFSOKiAoAwIWIqAAGQUQFAIwNcYioGFFEVAAALkREBTCIJ46oAABcVeIQUTGi2o44bgTWAwCACCOiAhgEERUAMDbEIaJiRBFRAQC4EBEVwCCe6nD+8wx7gwAA6EccIipGFBEVAIALEVEBDGKgkagvvviivPjii973RFQAQJSJQ0TFiCKiAgBwISIqgEEQUQEAY0McIipG0wviEFEBALgAERXAIAaKqLdu3SKiAgCuDHGIqBhNRFQAAC5GRAUwiKeOqC+++KK89NJLRFQAwJUhDhEVo4mICgDAxYioAAbxVBH1pZdektu3b8vs7KwXTldXV+X27dtD3yAAAPoRh4iK0UREBQDgYkRUAIN44oj60ksvya1bt7zvzYjUl156SWZnZ4e+QQAA9CMOERWjiYgKAMDFiKgABvHEEZVQCgC4qsQhomI0EVEBALgYERXAIJ7qcP7bt297h+6vrq4yEhUAcCWIQ0TFaCKiAgBwMSIqgEE8cUS9detWz4dJzc7OyuzsLOdEBQBEmjhEVIwmIioAABcjogIYxFOdE9VctsOpngYAQNSIQ0TFaHpBHHHiw18PAACijIgKYBADRdQXX3yxZ1TqsDcIAIB+xCGiYjQRUQEAuBgRFcAgnjii3r59W1ZXV3sur66uyq1bt4a+QQAA9NN2iKgYTURUAAAuRkQFMIin+mAp80FSJpqaD5ca9sYAAHCetkNExWgiogIAcDEiKoBBPFVEBQDgKmo7RFSMJiIqAAAXI6ICGAQRFQAwNtoOERUjKE5EBQAgDCIqgEEQUQEAY6PtEFExguJEVAAAwiCiAhgEERUAMDbaDhEVIyhORAUAIAwiKoBBEFEBAGOj7RBRMYLiRFQAAMIgogIYBBEVADA22g4RFSMoTkQFACAMIiqAQRBRAQBjo+0QUTGC4kRUAADCIKICGAQRFQAwNtoOERUjKE5EBQAgDCIqgEEQUQEAY6PtEFExguJEVAAAwiCiAhgEERUAMDbaZ4a9HsClihNRAQAIg4gKYBBEVADA2Gg7RFSMoPhZRB32egAAEHFEVACDIKICAMZG2yGiYgTFiagAAIRBRAUwCCIqAGBstB0iKkZQnIgKAEAYRFQAgyCiAgDGRtshomIExYmoAACEQUQFMAgiKgBgbLQdIipGUJyICgBAGERUAIMgogIAxkbbIaJiBLlEVAAAwiCiAhgEERUAMDbaDhEVI8glogIAEAYRFcAgiKgAgLHRdoioGEEuERUAgDCIqAAGQUQFAIyNtkNExQhyiagAAIRBRAUwCCIqAGBstB0iKkaQS0QFACAMIiqAQRBRAQBjo+0QUTGCXCIqAABhEFEBDIKICgAYG22HiIoR5BJRAQAIg4gKYBBEVADA2Gg7RFSMIJeICgBAGERUAIMgogIAxkbbIaJiBLmOOO0IrAcAABFHRAUwCCIqAGBstB0iKkaQ6xBRAQAIgYgKYBBEVADA2HAdIipGkOsQUQEACIGICmAQRFQAwNhwHSIqRpDrEFEBAAiBiApgEERUAMDYcB0iKkZQ2yGiAgAQAhEVwCCIqACAseE6RFSMICIqAAChEFEBDIKICgAYG65DRMUIIqICABAKERXAIIioAICx4TpEVIwgIioAAKEQUQEMgogKABgbrkNExQgiogIAEAoRFcAgiKgAgLHhOkRUjCAiKgAAoRBRAQyCiAoAGBuuQ0TFCCKiAgAQChEVwCCIqACAseE6RFSMICIqAAChEFEBDIKICgAYG67jiERgPYBLRUQFACAUIiqAQRBRAQBjw3WIqBhBbUccNwLrAQBAxBFRAQyCiAoAGBuuQ0TFCCKiAgAQChEVwCCIqACAseE6RFSMICIqAAChEFEBDIKICgAYG65DRMUIIqICABAKERXAIIioAICx4TpEVIwgIioAAKEQUQEMgogKABgbrkNExeh5QRwiKgAAIRBRAQyCiAoAGBuuQ0TF6CGiAgAQDhEVwCCIqACAseE6RFSMHiIqAADhEFEBDIKICgAYG65DRMXoIaICABAOERXAIIioAICx4TpEVIweIioAAOEQUQEMgogKABgbrkNExeghogIAEA4RFcAgiKgAgLHhOkTUKIhHYB1GCREVAIBwiKgABkFEBQCMDdchog7zsW+ffY3HHXHajjjx4a/XKCCiAgAQDhEVwCCIqACAseE6RNTn/XhrPfO0HeLfJXhBHII0AAAhEFEBDIKICgAYG65DRH0ej7HrdEedXngb1yGkDoiICgBAOERUAIMgogIAxobrEFGfhbh6bN2nWUbc4fD+ARBRAQAIh4gKYBBEVADA2Ig7RNTL5DrdcOpexjI5vP+pEFEBAAiHiApgEERUAMDYiDtE1EG5zgXnOR2U6xBSnxARFQCAcIioAAZBRAUAjI24Q0R9Gq7zhOc5HVTcOQ2p8eFve+TFiagAAIT1RBH141+U18tlKfd4Tb4QgW0B8IQ+/kV5fcD3LxEVADA24g4R9UkeK9cZ4Dynl4HD+y8WJ6ICABDWk0fU3uDyhdfKUn79i/LxCGwPgCdARAUAILy4Q0S9iOt0w2k8AuvD4f0XiBNRAQAI6zIi6mWEGABDQEQFACC8uENEDeI63UP13QisT4+4w+H95zw2RFQAAMIhogJjjIgKAEB4cYeIarjOM/6AqGeBw/t7xYmoAACEdWmH87/2haFvC4AnREQFACC8uDPeEdV1nvMHRD0LrnMaU4e9HlERJ6ICABDWZXyw1Otf/PjQtwPAUyCiAgAQXtwZv4gadyLwAVGXLe5weL96LIioAACEM9BI1LOo+toXhr8dAJ4CERUAgPDizvhEVNfpjjiNR2B9ngkO7yeiAgDwBAY+nP8Lr0m5/Lp88ePD35Zx98ILL3iGvS64IoioAACEF3dGO6K6zhU8z+mgXGe8D++Pn0XUYa8HAABXwKWdE/X1L8rHI7A942Z6elpisZjEYjH5/d//ffn6178uX/va1+RTn/qUxGIxmZqaGvo6IsKIqAAAhBd3TiNqPALrcllcZwzDqS3ujO/h/XEiKgAAYV1GRHWcL8hrZT5c6nman5+X5eVl+ZM/+RPJZrNSLBalVCp57t+/L/l8Xr72ta/J4uLi0NcXEXUJ5zkmogIAxkbcGY2IGndG8Dynl2EcD++PE1EBAAjriSIqImF9fV3++I//WAqFguzu7srOzo4UCgXJZrOSTqclm81KPp+X3d1dKZfLcu/ePfnIRz4y9PXGaCKiAgDGRty52hHVdbrh9KpuwzPnOuN1eH+ciAoAQFhE1Kvl53/+5+WNN96Q/f192dnZkWw2K5lMRlKplKRSKUkkEt7lVCol+XxeyuWylEoluhaeCSIqAGBsxJ2rF1Fdp/sBUW4E1udKiDvjc3h/nIgKAEBYRNSr4xd+4Rfk7t27UqlUZG9vT7LZrKRSKUmn05JKpSSZTEoymfRiajKZ9EammkP8b9++PfTtwGghogIAxkbcuRoR1XU4z+mlGIfD+10iKgAAYRFRr44//dM/lf39fSkUCpLL5SSdTksymZREIuFFUxNO9fUmpBYKBSmVSvLLv/zLQ98WjA4iKgBgbMSd6EZU1/GPOh32+owM1xntw/tdIioAAGERUa+GO3fuSLFYlO3tbclkMpJOpyWdTnuhNJFIyObmpmxubnqjUjc3N72IahQKBSkUCvKJT3xi6NuE0UBEBQCMjbgTrYgad/iAqOci7ozu4f0uERUAgLCIqNF3584d2dnZkd3dXS+g6hGnhj0iVV/W50vN5XKSyWTkzp07Q982XH1EVADA2Ig70YiortMdcTrsdRkro3h4v0tEBQAgLCJq9CWTSSmVSpLP5yWdTnsjTFOplO9Dpcz5UU1gtZmomkqlpFgsSiqVGvq24eojogIAxkbcGV5EdR3OcxoJrjNah/e7RFQAAMIiokbb5z73OalUKlIoFCSTyUgmk/FGnJqIasKoCaqZTEay2axks1nJ5XKSy+Ukn897h/ibeUqlkvz6r//60LcRVxsRFQAwNuLO842orkM4jaS4MzqH97tEVAAAwiKiRtvrr78u9+/fl2w26wukZuSpPlw/m83K5uamZDIZ2draku3tbdnf35dyuSzlclkKhYLvEP+trS0plUrya7/2a0PfTlxdRFQAwNiIO88+osYdznN6ZbjO1T+83yWiAgAQFhE12vb392VnZ8c7p2k6nZbt7W3Z3t6WXC7nHbpvDuU3I1Tz+bxUKhVpNBrSbDal0WhItVqVXC4niUTCG9W6tbUluVxu6NuJq4uICgAYG3Hn2UVU1+mG02exfDwjrnO1D+93iagAAIRFRI2uW7duyeHhoWxtbUkqlZLNzU3JZrOyv78vBwcHUi6XJZfLeSNSTUBNpVJSKpWk2WzKycmJtNttabVa0mq1pFKpeAHVRNdisSi/+Zu/OfTtxdVERAUAjI24c7kR1XW6HxDlRmD78JTiztU9vN91rnYEBgDgOSKiRtcf/MEfSLlc9mLnxsaG5HI5qVarcnx8LI1GQyqViu/DpsyHSNVqNTk5OfEiqrl8fHzsjWw1tzNhdtjbi6uJiAoAGBtxZ/CI6jqc53Rkuc7VO7zfdYioAACERESNrr29PSkWi15ETSaTUiwWpdFoeEG02WzK/v6+F0XNh0kdHR1Jq9WSk5MT76tRqVR8H0qVTCZle3tbvvKVrwx9m3H1EFEBAGMj7jxdRHUd/6jTYW8HniHXuVpR8qqtLwAAQ0REjaaPfvSjcnh4KDs7O5LL5SSdTksul5P79+/L8fGxT6PRkP39fclkMpLL5WR3d1eOj4974qnRaDS8D6cyI1xTqZRUKpWhbzeuHiIqAGBsxJ3wETXu8AFRYyvuXJ3D+12HiAoAQEhE1Gj60pe+JI1GQ8rlsnf4faFQkKOjI3nw4IE3EvX4+Ni7XCgUZGtrS3Z3d6XVannnRDWH9Jt5W62WFAoFSaVSsrGxIXfv3pXNzU3Z3d2V1157bejbjquFiAoAGCvinB9RXac74vS8+TAGXCf6h/e7DhEVAICQiKjR9PWvf12azaZUq1VvJOr+/r60223pdDq+iGqYkavFYlFarZYvspr5zWV9rtXNzU25d++eJJNJOTg4GPq242ohogIAxoo4vXHUdTjPKfpwnWhHynbE1w8AgAghokbTN77xDalWq3JwcCD5fF4ymYzUajUvoOowqkejlstl2d7e9g7l7xdR6/W6L6JubGxIIpGQYrEof/mXfzn07cfVQUQFAIwVcbqH6mvDXi9EWNw5DZXxCKyLjYgKAEBoRNRo+qM/+iPZ39+Xer0umUxG8vm8tFot6XQ6PSHVaLVacnBwIIVCwTsnqh1ZT05OpNPpSKvVkmw2K4lEQhKJhGQyGUkkEpLNZqVWq8n169eH/hjgaiCiAgDGStvhA6LwlFwneof3E1EBAAiNiBpNf/iHfyjValUKhYLk83mpVCry8OFDX0ANGpVaq9VkZ2fH96FSQdG11WpJJpORjY0N2djY8CJqKpWSbDYrh4eHQ38McDUQUQEAAMJynWhFSyIqAAChEVGj6Rvf+IbUajXJZDKSzWbl4OBA3nzzTXn48GFPENWRtNlsSrFY9EKp/mpH1K2tLS+imsP6k8mkJBIJKZfL8nM/93NDfxwQfURUAACAJxF3onN4PxEVAIDQiKjRtLW1Jdvb25JMJiWTycjR0ZE8fPjw3NGozWZTWq2WlMtl73B++7yp+nZbW1uSSCRkY2PDi6epVMr7enBwIIuLi0N/LBBtRFQAAICn4TrDP7yfiAoAQGhE1OiZmJiQg4MDyWazkkwmpVAoSLPZlE6n44VUE1PN13a7LcfHx9JsNqVcLnvnQw0atXp8fCztdtuLqJubm5JKpSSdTks6nfYu53I5+bM/+zPOj4pzEVEBAACelusMN2ISUQEACI2IGj0vv/yy7O3teecnrVarvg+F0iH10aNH0m63PZ1OR6rVqu+wfT0qVdva2pJUKiXJZNKLpvl8XtLptGSzWUmlUpLP5+WNN94Y+mOC6CKiAgAADCLuDO/wfiIqAAChEVGj5+7du1IoFCSdTkuhUJCTkxNpt9te/NTB9MGDB15QNZfr9bo3T6vVklar5fveLCOXy0k6nZZEIiHZbNa7z83NTUkkEl5Y3dnZkV/6pV8a+uOCaOqJqBMTE/Kxj31s6CsGAABwpbjO8z+8n4gKAEBoRNTo2d/f9w7lz+VyvvhpznEaFFNNRDXR1P4gKfsQ/0wm4x2+v7W15d1fKpXy5PN5SaVSUq/Xh/64IJpERJxf/MVfFONjH/uYzM3NDX3FAAAArpy483yjZtsZ/nlZAQC4Ioio0TI1NeUdym8ipjnnqQmpOqjaI01NTH3w4IF3Oz2fjqnpdFoymYxkMhkpFApSLBalUCjI1taW79yoyWRSdnd35ROf+MTQHx9ET+Dh/AAAAHhKcef5Hd5PRAUAIDQiarR8/vOfl3w+L8lk0ouXOobah/abkakmrJpRqZ1ORx49eiSPHj2STqfTMxq1Wq1KoVCQ7e1tKRaLUqlUpNlsSq1Wk1KpJPl8XhKJhCSTSUkkEpLL5SSZTA798UH0EFEBAACeBdd59oGTiAoAQGhE1Gj58z//c2/0ZzKZlEql4htNquOpPuepCan6Q6YeP34sjx8/ljfffFMePHjgzddoNKRarcrBwYHU63U5PDyUer0ux8fH0m635fj4WO7fvy/JZFI2Nzfl3r17kkqlpFQqDf3xQfQQUQEAAJ6VuPNsR6USUQEACI2IGi3FYlGSyaR3OP/h4aEvmOqYqq8z5zrVQfXRo0fy+PFjeeutt+Tx48dycnIiR0dH0mg0vNuaUatHR0e+ENtsNmVnZ0dSqZTcu3dPNjc3ZW9vT371V3916I8RooWICgAA8CzFnWcWUl8Qh4gKAEBIRNRo2d/fl83NTe+cpM1ms2fUqT6kXx/q/+DBA99h++bQfhNS//qv/9obkdrpdLwRqo8ePZJGoyFHR0e+86YeHR3J9va2NzI2kUjIvXv35Dd+4zeG/jghOoioAAAAz4PrXHrwJKICABAeETVa7t+/L4lEwhuJakaHtlotaTab3ohTc55TE1HNh0rZ503V50r9+7//e3nnnXfkxz/+sfzN3/yNt4yHDx96o1TN4fzGwcGB3G99S37wD/9Hjs8+aKpYLA79cUJ0EFEBAACel7hzOir1kpZHRAUAIDwiarTcv39fUqmUF1JNOG02mx5z7lL7/Kj6e31of71el0qlIv/xH/8h5t+//Mu/eIfxm9B6dHQkzWbT+9psNqVer8vRo2/L/3vne/JmsSipVEpyuZz8zu/8ztAfK0QDERUAAOB5ijuXdng/ERUAgPCIqNGiI2oymZRms+kbhWriZtCHTJlRq+a6ZrMpjUZDDg4OpFgsyocffij633vvvSf1el0ajYYXT+v1+mk4PTt36tHRkTQef1t+9O735X8fHUm5XPZC6m//9m8P/fHC8BFRAQAAhsF1Bg6gRFQAAMIjokaLPpw/mUxKvV73jSo19LlLTTTVEdXM32g05P79+7K9vd0TUUVE3n//fTk8PJRarSb1et27bEaoNptNab71HfmH934of3v2/f379yWZTMrdu3flK1/5ivzWb/3W0B83DA8RFQAAYFjizkCjUomoAACER0SNlnK5LIlEQjY3NyWVSkm1WvUF1EajIY1Gw7usR6PqiGpGrZrRpKVSSX7yk5/0RFRzaP/e3p43+vTw8FDq9Xo32v7Vd+TH7/1Avnn2IVfHx8dSKpXk7t273odNffWrXx36Y4fhIKICAAAMU9x56pBKRAUAIDwiarTUajVJJBLe4fzlctl3OL8JqIZ9rlQTWc35TM2h+QcHB/L9738/MKKKiLzzzjuys7Pju70Xav/Xd+Uf3v+/8rcq5jabTcnlcrKxsSF7e3uSyWRkfX196I8fnj8iKgAAQBS4zhMHUSIqAADhEVGjpVarSTab9Q7n393dlU6nEzjSVI9ONcG0VqvJ4eGhHB8fex8QZS4/ePDAF07/+7//27v84Ycfyptvvil7e3tSrVa9kaitVkuO/+rscP6zD6nSITWbzUoymZR79+5JOp0e+uOH54+ICgAAEBVx54lGpb4g4ecFAGDcEVGjJZ1OSzablXQ6LalUSvL5vBwfH3vhVB+6b8JpvV6Xg4MDqdVq3vlMTTw9Pj6WTqfjHdZv/v30pz+VarUq7Xbbu+6DDz6QarXqHc5vThfQePxt+fF7P5C/PVuePudquVyWTCYjd+/elWw2KwsLC0N/DPF8EVEBAACiJO6EDqlEVAAAwiOiRstXv/pVbyRqOp2WRCIhzWbTG41qAqo57F6HVDMS1VzW50813+uImslkpFAo+K7/n//5H8nn81Iul7ujTs9Gor7d6Ui73fYF3Var5Z3H9e7du5LL5Yb+GOL5IqICAABEketceKg+ERUAgPCIqNGyvr7eE1HNiFAdTzUTTZvNpjdC1HxvRqSaCPv+++97wXR7e1u2tra8865++OGHIiLywx/+UPb29qRWq/ki6rfOomm73ZZOpyOdTse7v8PDQ0kmk5JIJOTLX/7y0B9HPD9EVAAAgKiKO+eOSiWiAgAQHhE1enZ2diSZTEoymZSNjQ3Z39/3zk9qRpbao1B1VNWH+jcaDWm1WvLw4UPpdDpSr9e9iNpsNqVUKkmxWJRSqSRvv/22N61cLsv+/r4cHR357tOEWn2OVjPt6OhIksmklEoluXbt2tAfRzwfRFQAAICo6xNSiagAAIRHRI2ecrksyWRSUqmUbG5uyvb2thcy7fOi6tGpJnjasVV/CFW1WpWf/OQnIiLygx/8QGq1mmxvb0uxWJRqtSr/9V//5YXUnZ0dKZfLUqvVvICqQ6o+76q537yOdsMAABTxSURBVL29Pdna2pJCoSCrq6tDfyzx7BFRAQAArgLX8R/eHyeiAgDwJIio0bOzsyOJREJSqZQkEgnZ2tqSg4ODniBq6MP49WhUHVHNYfgnJye+D5NqNBpSKBRkf39f9vb25PHjx960Tqcje3t7Uq/X5ejoSA4PD71lmVMFmFGpJqIeHx/Lzs6O5HI52dnZkZs3bw798cSzRUQFAAC4KuJOd1RqnIgKAMCTIKJGz5e//GVvJGoqlZJkMukdmq9Ho9oh1YwStUOnDqudTkcqlYr88z//s4iIvPXWW7K7uyt7e3tSqVTk4OBA/vVf/9ULqdVqVarVqm8Z9td2u+0bqXp8fCy5XE7S6bTcu3dPPvrRjw79McWzQ0QFAAC4atqOOC4RFQCAJ0FEjZ61tTUpFApeQN3c3JRyuewbcWqPQD05+9AnfZ5Sfe5SE1RPTk6kUqnI9va2fPDBB/LBBx/IycmJ7O7uyu7urhSLRTk8PPQ+ZOq73/2ulEqlnlMFHB0decvUsVYf6m8+uOqNN96Q69evD/1xxbNBRAUAALiK3DPxCKwLAABXABE1mkqlkndO1GQyKdvb216gNDFTn5fURFQdU825U811Zt5msynFYlG+/e1vi4jId77zHSmVSpLP56VUKsnOzo784z/+ozcatV6vS6VSkaOjI3nw4IEXUU1INYf6m8vHx8dydHQk9XpddnZ2JJvNyu/+7u8O/THFs0FEBQAAAAAAI4+IGk2vvfaaNxI1mUxKJpORer3uG1WqR6O2221v9Kmhp+sRrEdHR3JwcCBbW1vy7//+7yIi8vjxYykWi975TPf3972I+s4770ipVJKDgwNpt9ve/ZuIas6Zaq+bGb2ay+WkUCjIq6++OvTHFZePiAoAAAAAAEYeETWaPvKRj0g+n5eNjQ1JJBKSSCRkf38/MI6aD4vSodSMPNUf+KQ/eOrw8FBqtZp861vfEhGRd999V/b3972Quru7K//2b//mhdTDw0PvQ6YODw97lqkP59enGWg2m3J4eCjb29uSy+XkV37lV4b+2OJyEVEBAAAAAMDII6JG1/b2tmxubkoikZDNzU3JZrO+c5y2Wi3pdDrSbrd9h+3bwVRfZz6gyowgPTg4kHfffVdERP7u7/5Odnd3ZWdnR3Z2duTtt9/2Iuo3v/lNqVQqUqlUpFQq+U4pUK/XfYfy68BrLpuQmkgk5JVXXhn6Y4vLQ0QFAAAAAAAjj4gaXV/60pckk8l4ETWZTEqtVvNF0U6n4wuoelRq0ChU+7D7w8ND6XQ6IiLyn//5n1Kr1SSfz0s+n5dms+lF1Pfee09OTk6kWq1KpVKRarXqRVz9wVI6oprge3Jy4kXWQqEgf/EXfyGrq6tDf3xxOYioAAAAAABg5BFRo+vmzZtSKBQkkUhIMpmUdDotW1tbXjC1P0jKfDVhVV+nz1Fqgmaj0ZB6vS7lclm+973viYjIj370I9nb25Pd3V2p1+vyT//0TyIi8tOf/lTa7bYcHR1JqVSSWq0m9XrdC7bmPKhB52Ftt9ve+VwPDw8ll8vJ7/3e7w398cXlEBFxJmMLMnVtUSZjC2IuT88uydS1RR8zXU+bnl2Smbll77rp2aXA287MLfumm++D7kMve3p2SSZjCzIzt+xbhlkXM//M3LJ3eWJmvmc+s05mmn1/+r7Omx6bX+nZRr3t9jbb9xf0OOnHw1xvr49ef/vxO/1+RaZnl2Uytuh9nbq2JNOzy76vM3MrMnVtybtO385cNz27LDPzp/OZ5ZnvzXRz2dzOzGeWrZdn7lNPN8vRyzDMOs3MrfQsP7aw6ptvMrbo2yZ9nV6Onkevn70dNjPNfjxjC6u+7bK3UT8P+r71suzb+h7/s+vs7dWPob2e+rm58+pn5c6rn5WXX/2c3Hn1s/KpT8flziuf9a7/1Kfjp9Ne6U771Kfj/vle8fOm62nqPvTy7dua+/Kt1yvB93Hnlc/6tkk/n/ZzYh5L/VqZmFnomc88LmZa0Os06HVgT48trPoe56D3mH6+7Pvz5p1blum5ZZmaPb08M7/iXdYmry1610/NLslEbME3v769mT/oduar/3YrMjO/KtNzK2fzL8v03Ip3/dSsWV73ezPd3N7cj5nPLLt7/+a2S2fTl33LOV3Gknf91Oyyt04z86vetMlrp7ePLaz55pu8tuStm32dXo6exyxPr4u9DvY0M91sQ2xhzfeY6Mt6fe3108uybxv0+Pu3d02m51Zlanalx/Tcqkd/b24zeW3Zd/3ktdPlTc2unE6bW5HJ2WWZnD29PD2/KlNzKz5mup42Pb8qMwtr3nXT86uBt51ZWPNNN98H3Yde9vT8qkzOLsvMwppvGWZdzPwzC+syObsiU3OrMnFtWabmVmV6fk0mZ1dkcnZFpufXzqYtydTcqsfcZnp+zZv/vOmxxeveZTOPuayvm5pblZmFdW9dgubVl2cW1n3X2+vTXf9l3/z69mb+oNuZr723uy7T8+axW5Pp+XWZnl+XmYXrMjW3JpOzq6ffL55+P72wLtML6zI1f3p5cm5VpubXZHJu1bvefG/mmVm87l02081y9DKMmcXr3u3s5ceWbp7Nty4zizdkcm5NZhZvyNT8es91M4s3ZHrhunfZzDM1vy6Tc2ve5emF677vNTPNTJ9euC5T86frYZZp7kNPn5xb826r71svy7vt0g31/Q2ZXrghM0s3ZWr+usSWf1am5q/L1Px1mVm6KdMLN7zvtenFG56phe73M0s3ZXrxhkzOr/uun5xfP72PhevetMn5dd98UwvXfcx0PU3fh16+fVtzX9OL/u2zTc6td7fp7LGYnDtdV3Mbc50xNX9dZhZveo/rxOxqz3NnnhczzX4dTC9cD3wd2NNjSze9y/o51F/1a9G+Pz2vvqxfP5r9upuYXfXNr29v3kv6PaXfq+a9pd+D5r2m59fvWbM8/R5+ru//2VXfz6HY4g2ZmluTqbnTn12Tsyu+n2nm98DMwrr3M9FcDvq5rn9P6On2NPvnaWzxuu9ntr5slm9uq+9bL8u+bffnf/f3Y2xx3fc7NOh3q/5daf8eNrfRv9f171T9O/dSf/9b+ydmX0PvlwTtx5zuG60E7qvoZZj9KLMvpfcVJ2LdfVkzj9kvM9Ps/cB++6L29NjCmtoH7u6z6a96X9S+Pz2v3lezH5/uti9510/NrshEbKnn8bP383z7e+p50fth9nNnvz70a0N/b7/G9GtHvy708vRtg16vep9Pv85nFtZ6lm+/H/Tr2L5OL8fe37T3M+11sKfZ74vY4rpvu+xtNOvrW7+zfW3z3NjPfdD7I7aw3n0PXaX3/3Pf/+++Vk738f3zmXUy04Jep0GvA3t6bHG9ZxsnYwunEVVHPRNF7ViprzeXdcybmJn3YqZZlg60/aKpHS8nZuYDY6WOk/b66mn6/s20ydiCxOZXfOHRXkZQDA4KpHb81POZyKm3RT+++nHR260f534BNeirPzSvSGx+tSeq6YBqQpyeT8elmfkVL5rat9eBzw56dmQ0UcsOTvq2QUFRz6+nmYBmRy47rAbFWjsY6+2YmFno2R49r9kOHePs+7PDXdDjH/S49AtwOhzay9PrYk/X1+kY6kVOHTpV2PTCqL5exVZDX28u6xj6yZc/Iy+/+jlfaNWBtm80fdU//ZMvfyYwVtoxOegPA/bjq183+nk7770RND3Ma8q8DvS8EzMLvveBL7Kr2Dl5bdHTL6AGfdXzzsyvnG5jQIw1AdXEVhPnujuXCz0BU4dOO/CZ6UHR9PS+Vr2w2p1v2XfboKCo709P60Zd/06uHVbtnVm97vY2mZ1c//b45zU753pn3L4/e8fd3ha9TWZ59o63vc4mvnand/9DYeioqv+zYUdTc52eV//HZvLasm/HwOwU2Tsr+nq9g2p2JiauLXk7M3pn0t45sndS7J2XiWtLgTsr9k52d317/xNtR8PJ2RWJLV63/rO65pvP/Kc7aHq/aGpuY0dOM+/EtWVvffR8duw0/7nW8/YLoUHraP6zbrbRjrH2Nur5uuG5G1XNV0MH1JnF617oCIomJoSYsKLn07ednFv1BRQdVvQydfzpXtcNSKdhtTdU2dHKRFXDRC4dXO15TZTVMc6+PzvcBcUwc71Znh3euuvcjYQ6muqgaPimqzBqR1NznQ6dOmyaMKqv17HV0Neby2YZUwvX5Wfm1mRm6aYvtOpAO7UQHE3NdpnrJmbXJLb8s94227cx222mzSx2g6yJqfrxNjFUP29Bz4v9GrBD50WvKR059evCrI+eT0d2Hdf1vHZADfranXdVZhavS2zpRs97Sb8nzXtPzzc1vyYTsys9AdO+/XN9/5+FUx1M7evMH31Ow2rvH6rO+1ltvp+4thz4s18HWPtns/49on/O9/vZbP8M1r+fen+/dP/zHxQngn4fB93Gjlh2YNFhw/wOHvj3f+zs9/9ZyPP+QKu+9kZTE/268XQitiixhXXfPosdZO19KvuP6Pa0yWtLvv22oP0yex/Q3ve9aJ/S7AfqeSdii9762Pu2erv1fl2/gBr01Q7NJrpNz3Wjmn5ezfNmYqQdl/Tzel6Ys4OeHd/0fqCeT99WR7CgEKin2cFP30aH1aBYG/Q+MN+bfVb7vWSHXB3j7Puzw529LfoPC/b7w/4atC9v/0Fh8lpE3/9D2//vjehB26ift/PeG0HTz3tNTV1bFMdEOHtEpA6A9ihSM5+JiCZSmmn9omlQANR0lLQjpw6rel57HXUM7TfK1J4+GVvwjRq1798E2KDl2NfZ8+rHVEdee731SFp71K2OvEH3f/r9abiJza96Ucgw15mAY+KhL46q0aY6GgaFJRMgTeyyI6sdWO2oZS/LjrD9RlrawSzo/oJGFer4a48u7DeyUD8WgREs4Pb6ezu+BgXj80aw6lGsdjTVj60XxtVz0RM5Xw0Omb6Iac3/yZc/4x95qgKqmU+PdP30Zz7vTesXTXtGrFrM8oIipw6r+nm3o3TY14P9HOtRo/b9mwDb73nU19nz+kYsz6/4AqoOoiZETsQWfCNTTSDVo031KFSzDCO2sOrNb5jrTPA8vY/VnjjajXgm6PZG1G6AXPD+Mm8Cqt6x1IHV3qm1l2WP9AwaEWqP2rSDrz1v0IhP+77sndt+QTN4J7j39vYIWx1fg4LxeSNY9SjW08u9I1H1zpb5j0f3Pxr+EQ5B/5ExAdV81X/tnbi25NshsXei9Q6N2Ukx0877y36/ncygHV29k6N3rPS8p+vY+x9gO5ierlPwf7QnZ1d8o0bt//zq6GiPJLKvs+e1RyzpgKrXW4+k1bexR1YF3b/+D70ZJaXpkVNmW7v34R99ejoyddUb/WVHVBNSJq6tSGzpRmBksQOLPRLOXpYdYWz2CDkTvnTIsgNZz4jPs1Bmjy7sN7JQj2bsF8GCYpu+fx3G7PXRo1D9y73hC6UmNgaNRPWuX7xxGh6tEKpHgHoh0xpdaqbZ8//M3JoviOqAaubT0Ta2/LPetKD78gKuisI2HUh9oXTppi+s6nlNiDXLnVm8Ger1YD/HetSo/byaANvvedTX2fPaI5b160G/bvRIWn0be2R10P3r90ps6YYXKw1znXk/Tsyu9LwX9fcmcAZFVO/9P/sM3/9zfvbPKP8ffPr9vPf/nDQ/C+2jC/odWaB//vb7I1jQH9v0/ev4GvRzu3cEazcq2KPFgkai6evN70c7hunIcl7EuNTf/32iaVAA1PQffHVENYGwe70+isr/R+mw+4P2Pp4eNWrv1+lBB0H7cfo6e177iCW9j2f/EdxEZHvUrRmJa+/HBYVm/TwYeuTk5OyyF7zs51W/Vuy4FhQg9WvO3i+zX2M6atnLOm8EZNA6BgVfe1470Nr7pkH7mUHBTD8OOg4G3V5/b8fXoOdNP8/+Pyp0B0p4+/lX6f3/3Pf/ewP4Ra8H+znWo1Tt+9d/dOhZjomo0T1MfblnmfYo0/NOD2C2TcdMEzrtcHne9tiB2B5p22+k6XkjV+31DtoGe72CnpfTSOOPena86ReOgoKRGZEaFDWDRpfqEGqPjDTz6hF65vvzAqmOUvZlO1gGLSdoJGJQfLQDpvmqp3ujdOdWetbFhD19WT8HdmCz7+dZHaYedPh+0GHzZvqnP/P53kPxrZGr+tD8T778mfPjqB4FGxBN7dMGfPLlz/jm7zfSM2gEqv3aseN7bGG157QK+nkLevzs+zOnb7Bf5/3eQ/1GG5sQaujD6/Vl43SEqP+6fofoB41MDZrffyh+7w6jHTv9hzfZo0uX1M5h95B+s3wz7+lpCLo7ukE7rPZh7/0u28EyaDlBIxH827EUGDDNVz3drKsOsHp0g9kh1od66Z37foeI6dMO6J3u8w9T6+5w679oT8S6Izn0yA/9F+y+0+eDD1MJ+ius2aGxD4uxd458f6kN2Gmyd5onri317CTbO1X28rvL9h8SaY9Kii32HvZpDsu3TwEQNILJLEP/J1z/ZzoouvYbbRQ0GkqPou03wqnfuunbBY1MDR7t5I+29ohTPfJLX+cdVmuNLtMhxD7U18w7Mbvim2ZGvgUHUv8oVPvy6XL8h0T7wqSKqnokon2YdtAh+HpUoJlu4poOsHp0owlb+lBvHcn6HSI+ObcmM0s3VEg1o17VYepLN73LE7NrYh/ObuLhxOxaTxy1D98/b7oJsPah+kGH9s8s3ZSfmVvrjaNqeXoUrL7eHM5vtkmvv32Yvx1VdXw1j40Op6e36z0dQ2zpZs9pFfTzFnSYvx3Fzekb7JHEQUFeR/Ywo6H14f/9Rjj3W7fu7fxh0oxMDTqk3z4U3x5xasdOHWiDRpc+1fv/2gXvfzXi1L7s/VHH+5nVGyaDjkTQP0ODfj7aRwWY6eZnsA6wZj79e0Sf6kXH136niNF/HOv+TI7OYaoD/f5XR8EEH6be3f8w4fC8w9Tt+Grvo9qna7KPVLJPq6T32/Q+XtBoU7MM/Ud4e5+x3x/Zzxu5am9v0CkOzD5fvxAddKoEbySj2vfqF46CgpEdO4Oim70sewSgHR/N/p0OdOcFUv16tS8H7Qv2i7j9RrDqP1DoMGq+6ul6lK69LuZ9oy/r95zvvWftewefpqI7Itk8t924fjYy+Sq8/5/r/n//0wOYbdPPp4miej0u2h47EOvXzGRsQf4/uX/pyZgJGvIAAAAASUVORK5CYII=)
###Code
###Output
_____no_output_____
###Markdown
**Task 2**: Load the data using pydicom as a 3D volume and then reslice it! [35 Points]
###Code
# TODO: Please upload ct.zip using the file panel on the left.
# Then use the following snippet to extract the data.
import os
import zipfile
with zipfile.ZipFile('ct.zip', 'r') as zip_ref:
zip_ref.extractall('.')
# 1) Now loop through all the DICOM files and store them in a 3D numpy array.
# Hint: You can either store them in a list first or read the dimensions of a
# single image slice to properly create the 3D numpy array.
# Hint 2: os.listdir(DIR) gives a list of filenames in a directory.
# Hint 2b: This list is not sorted - make sure you sort it.
# Hint 3: The dcmread function loads a single DICOM file.
# Hint 4: You can then use .pixel_array to access the image data.
sorted_ct_Scans = sorted(os.listdir('ct'))
from pydicom import dcmread
# TODO: YOUR CODE FOR LOADING THE VOLUME AS A 3D NUMPY ARRAY
# Test: dcmread('ct/'+ctScans[0])
slices_array = []
for dataset in sorted_ct_Scans:
slices_array += [dcmread('ct/'+dataset).pixel_array]
np_slices_array = numpy.array(slices_array) #slices converted to numpy
# 2) Now create and show axial, sagittal, and coronal slices from the 3D volume.
# Hint: Please use imshow(XX, cmap='gray') to show the image.
# TODO: YOUR CODE FOR AXIAL
imshow(np_slices_array[100, :, :], cmap='gray')
# TODO: YOUR CODE FOR SAGITTAL
imshow(np_slices_array[:, :, 120], cmap='gray')
# TODO: YOUR CODE FOR CORONAL
imshow(np_slices_array[:, 125, :], cmap='gray')
###Output
_____no_output_____
###Markdown
**Task 3**: Use the Window/Level-technique to visualize the data! [45 Points]
###Code
# We will now enhance the visualization from above by performing
# Window/Level adjustment.
# Here is one way of doing that:
# vmin = level - window/2
# vmax = level + window/2
# plt.imshow(hu_pixels + rescale, cmap='gray', vmin=vmin, vmax=vmax)
# plt.show()
# 1) Please load the Window/Level values from the DICOM file,
# print these values, and then visualize one slice with window/level adjustment.
# Hint: The DICOM header has the following tags.
# (0028, 1050) Window Center
# (0028, 1051) Window Width
# Hint 2: You can use slice[key].value to access DICOM tag values.
# Hint 3: (0028, 1052) Rescale Intercept might be important.
dcmread('ct/'+sorted_ct_Scans[0])
level = dcmread('ct/'+sorted_ct_Scans[0])[0x0028, 0x1050].value #window_center
window = dcmread('ct/'+sorted_ct_Scans[0])[0x0028, 0x1051].value #window_width
rescale = dcmread('ct/'+sorted_ct_Scans[0])[0x0028, 0x1052].value
print(level, window, rescale)
vmin = level - window/2
vmax = level + window/2
plt.imshow(np_slices_array[:, 125, :] + rescale, cmap="gray", vmin=vmin, vmax=vmax)
plt.show()
# 2) Play around with different Window/Level values that enhance
# the visualization.
vmin = level - window/5
vmax = 10*level + window/5
plt.imshow(np_slices_array[:, 125, :] + rescale, cmap="gray", vmin=vmin, vmax=vmax)
plt.show()
# Which values make sense and why?
###Output
_____no_output_____
###Markdown
Changing level and window allows yus to see different tissues, mostly see the bones since level and window control the contrast of the grayscale **Bonus**: Create segmentations (label maps) for the volume using thresholding HU! [33 Points]
###Code
# Similar to Window/Level adjustment for visualization, we can threshold
# the volume to highlight the following components using the Hounsfield Units:
# 1) Fat
# 2) Soft Tissue
# 3) Bones
#
# Please create 3 segmentation masks for these structures.
# Then, please visualize each 3 slices per structure to showcase the segmentation.
# Hint: As a reminder, the following code allows thresholding of a numpy array.
# new_mask = imagevolume.copy()
# new_mask[new_mask < XXX] = 0
# Hint2: You might need to cast new_mask to int16 not uint16.
# TODO: YOUR CODE TO SEGMENT FAT
# TODO: YOUR CODE TO SEGMENT SOFT TISSUE
# TODO: YOUR CODE TO SEGMENT BONES
# Are the segmentations good?
# TODO: YOUR ANSWER
#
# Thank you and Great job!!
#
# _.---._
# .' `.
# :) (:
# \ (@) (@) /
# \ A /
# ) (
# \"""""/
# `._.'
# .=.
# .---._.-.=.-._.---.
# / ':-(_.-: :-._)-:` \
# / /' (__.-: :-.__) `\ \
# / / (___.-` '-.___) \ \
# / / (___.-'^`-.___) \ \
# / / (___.-'=`-.___) \ \
# / / (____.'=`.____) \ \
# / / (___.'=`.___) \ \
# (_.; `---'.=.`---' ;._)
# ;|| __ _.=._ __ ||;
# ;|| ( `.-.=.-.' ) ||;
# ;|| \ `.=.' / ||;
# ;|| \ .=. / ||;
# ;|| .-`.`-._.-'.'-. ||;
# .:::\ ( ,): O O :(, ) /:::.
# |||| ` / /'`--'--'`\ \ ' ||||
# '''' / / \ \ ''''
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# / / \ \
# /.' `.\
# (_)' `(_)
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# \\. .//
# jgs \\. .//
# ///) (\\\
# ,///' `\\\,
# ///' `\\\
# ""' '""
###Output
_____no_output_____ |
Tensorflowbasics.ipynb | ###Markdown
- artificial intelligence- machine learning - deep learningWhat to learn 1. tensorflow basicsand fundamentals2. preprocessing data3. building and using pretrained models4. fitting a model to the data - the approach is experimentation introduction to tensors
###Code
# import tensorflow
import tensorflow as tf
print (tf.__version__)
# create tensors with tf.constant
scalar = tf.constant(7)
scalar
# check number of dimensions in a tensor
scalar.ndim
# create a vector
vector= tf.constant ([10, 10])
vector
# check the number of dimension of a vector
vector.ndim
# create a matrix
matrix = tf.constant([[2,10],
[7,10]])
matrix
# check the dimensions of a matrix
matrix.ndim
# create a noter matrix
another_m = tf.constant([[10., 7.],
[3., 2.],
[8., 9.]], dtype = tf.float16)
another_m
# check the number of dimensions
another_m.ndim
# lets create a tensor
tensor= tf.constant([[[1, 2, 3,],
[4,5,6]],
[[7,8,9],
[10,11,12]],
[[13,14,15],
[16, 17,18]]])
tensor
tensor.ndim
###Output
_____no_output_____
###Markdown
What we've created so far - scalar: a single number- vector: a number with direction- matrix: a 2dimensional array of numbers- tensor: a n-dimension array of number Tf.variable
###Code
# create a tensor with tf.variable
changable_tensor = tf.Variable([10,7])
unchangable_tensor = tf.constant([7, 10])
changable_tensor, unchangable_tensor
# lets change elements in changable_tensor
changable_tensor[0] = 7
changable_tensor
# the correct way to do this is with .assign
changable_tensor[0].assign(7)
changable_tensor
###Output
_____no_output_____
###Markdown
create random tensors
###Code
# create random tensors
random_1 = tf.random.Generator.from_seed(42)
random_1 = random_1.normal(shape=(3,2))
random_2 = tf.random.Generator.from_seed(42)
random_2 = tf.random.normal(shape = (3,2))
# are they equal
random_1, random_2, random_1 == random_2
###Output
_____no_output_____
###Markdown
shuffle the order of elements in a tensor
###Code
# valuable when you wnat to shuffle your data
not_shuffled = tf.constant([[10,7],
[3,2],
[9, 3]])
not_shuffled
# shuffle our not shuffled
tf.random.set_seed(42)
tf.random.shuffle(not_shuffled, seed = 42)
###Output
_____no_output_____ |
PS7 (1).ipynb | ###Markdown
Problem Set 7
###Code
import pandas as pd
pd.options.mode.chained_assignment = None
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import statsmodels.formula.api as smf
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
1. Heart Attacks 1.
###Code
heart = pd.read_csv('../Data/heart.csv')
heart.head()
heart = heart.drop(['slp','thall','oldpeak'], axis=1)
heart.dropna()
print(heart.output.describe())
print(heart.dtypes)
print(heart.shape)
###Output
count 303.000000
mean 0.544554
std 0.498835
min 0.000000
25% 0.000000
50% 1.000000
75% 1.000000
max 1.000000
Name: output, dtype: float64
age int64
sex int64
cp int64
trtbps int64
chol int64
fbs int64
restecg int64
thalachh int64
exng int64
caa int64
output int64
dtype: object
(303, 11)
###Markdown
2.
###Code
heart.cp = heart.cp.astype('category')
heart.restecg = heart.restecg.astype('category')
heart.caa = heart.caa.astype('category')
m = smf.logit('output ~ age + sex + cp + trtbps + chol + fbs + restecg + thalachh + exng + caa', data=heart).fit()
print(m.summary())
print(m.get_margeff().summary())
### age, chol, fbs, restecg arent significant
###Output
Optimization terminated successfully.
Current function value: 0.355396
Iterations 7
Logit Regression Results
==============================================================================
Dep. Variable: output No. Observations: 303
Model: Logit Df Residuals: 286
Method: MLE Df Model: 16
Date: Mon, 07 Mar 2022 Pseudo R-squ.: 0.4843
Time: 04:54:26 Log-Likelihood: -107.69
converged: True LL-Null: -208.82
Covariance Type: nonrobust LLR p-value: 2.759e-34
================================================================================
coef std err z P>|z| [0.025 0.975]
--------------------------------------------------------------------------------
Intercept 0.4035 2.469 0.163 0.870 -4.435 5.242
cp[T.1] 1.5343 0.543 2.827 0.005 0.470 2.598
cp[T.2] 1.6907 0.441 3.838 0.000 0.827 2.554
cp[T.3] 1.8970 0.667 2.843 0.004 0.589 3.205
restecg[T.1] 0.4206 0.361 1.165 0.244 -0.287 1.128
restecg[T.2] -0.9518 2.118 -0.449 0.653 -5.103 3.199
caa[T.1] -1.8571 0.458 -4.052 0.000 -2.756 -0.959
caa[T.2] -3.0485 0.647 -4.712 0.000 -4.317 -1.780
caa[T.3] -2.3741 0.783 -3.032 0.002 -3.908 -0.840
caa[T.4] 0.4676 1.586 0.295 0.768 -2.642 3.577
age 0.0237 0.024 0.992 0.321 -0.023 0.070
sex -2.1367 0.457 -4.671 0.000 -3.033 -1.240
trtbps -0.0285 0.011 -2.656 0.008 -0.050 -0.007
chol -0.0061 0.004 -1.524 0.128 -0.014 0.002
fbs 0.4437 0.501 0.886 0.375 -0.537 1.425
thalachh 0.0357 0.011 3.391 0.001 0.015 0.056
exng -1.1615 0.418 -2.781 0.005 -1.980 -0.343
================================================================================
Logit Marginal Effects
=====================================
Dep. Variable: output
Method: dydx
At: overall
================================================================================
dy/dx std err z P>|z| [0.025 0.975]
--------------------------------------------------------------------------------
cp[T.1] 0.1716 0.058 2.975 0.003 0.059 0.285
cp[T.2] 0.1891 0.045 4.234 0.000 0.102 0.277
cp[T.3] 0.2122 0.071 2.986 0.003 0.073 0.351
restecg[T.1] 0.0470 0.040 1.173 0.241 -0.032 0.126
restecg[T.2] -0.1065 0.237 -0.450 0.653 -0.570 0.358
caa[T.1] -0.2077 0.046 -4.532 0.000 -0.298 -0.118
caa[T.2] -0.3410 0.062 -5.515 0.000 -0.462 -0.220
caa[T.3] -0.2655 0.082 -3.241 0.001 -0.426 -0.105
caa[T.4] 0.0523 0.177 0.295 0.768 -0.295 0.400
age 0.0026 0.003 0.998 0.318 -0.003 0.008
sex -0.2390 0.045 -5.331 0.000 -0.327 -0.151
trtbps -0.0032 0.001 -2.776 0.006 -0.005 -0.001
chol -0.0007 0.000 -1.539 0.124 -0.002 0.000
fbs 0.0496 0.056 0.890 0.374 -0.060 0.159
thalachh 0.0040 0.001 3.628 0.000 0.002 0.006
exng -0.1299 0.045 -2.912 0.004 -0.217 -0.042
================================================================================
###Markdown
- Sex: Being a male decreases the chances of heart attack, this number is significant.- Cp: Chest pain level 3 has the highest chances of bringing a heart attack, CP is significant- Age: The higher your age, the higher chances of getting a heart attack. This number is not significant.- Variables such as caa(number of major vessels) from 1-3, sex, trtbps(resting blood pressure), and exng(exercise enduced angina) are all significant and reduce heart attack probabilities. Some of these makes sense, some don't, like resting bloop pressure, angina, and being a male decreasing the chances. 3.
###Code
from sklearn.linear_model import LogisticRegression
X = heart[['age','sex', 'cp', 'trtbps', 'chol', 'fbs', 'restecg', 'thalachh', 'exng', 'caa']]
y = heart.output
X1 = pd.get_dummies(X, drop_first=True, columns=['cp','restecg','caa'])
m1 = LogisticRegression(penalty='none', solver='newton-cg').fit(X1,y)
print(m1.coef_)
print(m1.intercept_)
###Output
[[ 0.02366509 -2.13671455 -0.02850619 -0.00606629 0.44373485 0.03574638
-1.16147274 1.53426412 1.69073314 1.89700821 0.4206284 -0.9518241
-1.85714506 -3.04848106 -2.37405207 0.46761275]]
[0.40345569]
###Markdown
4.
###Code
train_y_prob = m1.predict_proba(X1)
train_y_prob1 = train_y_prob[:,1]
train_y_prob1[:10]
train_y_problab = m1.predict(X1)
train_y_problab[:10]
threshold = 0.5
1.0*(train_y_prob1 > threshold) == train_y_problab
###Output
_____no_output_____
###Markdown
5.
###Code
from sklearn.metrics import accuracy_score
accuracy_score(train_y_problab, y)
###Output
_____no_output_____
###Markdown
I think I would be comfortable using this model, however it's always important to consider that there are human lives at stake here, so higher accuracy scores are always more dsireable. 6.
###Code
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, train_y_problab)
cm
from sklearn.metrics import accuracy_score
a = accuracy_score(y, train_y_problab)
from sklearn.metrics import precision_score
p = precision_score(y, train_y_problab)
from sklearn.metrics import recall_score
r = recall_score(y, train_y_problab)
print('Accuracy: ',a,'precision: ',p,'recall: ',r)
###Output
Accuracy: 0.858085808580858 precision: 0.8546511627906976 recall: 0.8909090909090909
###Markdown
2. Predict Airbnb Price 1.
###Code
airb = pd.read_csv("../Data/airbnb-beijing-listings.csv.bz2", usecols = ['price','bedrooms','room_type','accommodates','bathrooms'], thousands = ',')
airb['price'] = airb['price'].str.replace(',', '')
airb['price'] = airb['price'].str.replace('$', '')
airb['price'] = pd.to_numeric(airb['price'], errors='coerce')
airb.drop(airb.index[airb['price'] == 0], inplace = True) # gets rid of the $0 air b and b's as these are not reasonable
airb = airb.dropna()
airb["bedrooms2"] = pd.cut(airb.bedrooms,
bins = [0, 1, 2, 3, 4, np.inf],
labels = ["0", "1", "2", "3", "4 or more"],
right=False) # categorizes the variabel: bedrooms
airb['logarithm'] = np.log(airb['price'])
airb.replace([np.inf, -np.inf], np.nan, inplace=True) # gets rid of the infinite values created by the log function
airb.dropna() # drops the na's created from getting rid of the inifinite values
airb["bathrooms2"] = pd.cut(airb.bathrooms,
bins = [0, 1, 2, 3, np.inf],
labels = ["0", "1", "2", "3 or more"],
right=False)
airb["accommodates2"] = pd.cut(airb.accommodates,
bins = [1, 2, 3, 4, np.inf],
labels = ["1", "2", "3", "4 or more"],
right=False)
m3 = smf.ols("logarithm ~ bedrooms2 + room_type + accommodates2 + bathrooms2", data = airb).fit()
m3.summary()
###Output
/tmp/ipykernel_87/1323061788.py:3: FutureWarning: The default value of regex will change from True to False in a future version. In addition, single character regular expressions will *not* be treated as literal strings when regex=True.
airb['price'] = airb['price'].str.replace('$', '')
###Markdown
2.
###Code
bnb_pred = m3.predict(airb)
bnb_pred
###Output
_____no_output_____
###Markdown
3.
###Code
bruh = airb['logarithm'].values
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(bnb_pred, bruh))
###Output
_____no_output_____
###Markdown
4.
###Code
newX = {"bedrooms2":['2'], 'room_type':['Shared room'], 'accommodates2':['4 or more'], 'bathrooms2':['2']}
two_room = m3.predict(newX)
print('Predicted Price: ',two_room)
compute = airb[(airb["bedrooms2"] == '2') & (airb['accommodates2'] == '4 or more')]
two_room = m3.predict(compute)
print('Predicted Price: ', np.mean(two_room)) #????
###Output
Predicted Price: 6.375890864331857
###Markdown
5.
###Code
compute = airb[(airb["bedrooms2"] == '2') & (airb['accommodates2'] == '4 or more')]
print('calculated price: ',compute.logarithm.mean())
###Output
calculated price: 6.384548388368513
|
notebooks/datasets_ames_housing.ipynb | ###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
data = ames_housing.drop(columns=["Id", "SalePrice"])
target = ames_housing["SalePrice"]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with interger number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black", density=True,
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
data = ames_housing.drop(columns=["Id", "SalePrice"])
target = ames_housing["SalePrice"]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with interger number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black", density=True,
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
data = ames_housing.drop(columns=["Id", "SalePrice"])
target = ames_housing["SalePrice"]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with interger number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black", density=True,
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
ames_housing = ames_housing.drop(columns="Id")
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
target_name = "SalePrice"
data, target = ames_housing.drop(columns=target_name), ames_housing[target_name]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with integer number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black",
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
Plotting this information allows us to answer to two questions:* Is there few or many categories for a given features?* Is there rare categories for some features?Knowing about these peculiarities would help at designing the predictivepipeline. NoteIn order to keep the content of the course simple and didactic, wecreated a version of this database without missing values.
###Code
ames_housing_no_missing = pd.read_csv("../datasets/ames_housing_no_missing.csv")
ames_housing_no_missing.head()
###Output
_____no_output_____
###Markdown
It contains the same information as the original dataset after using a[`sklearn.impute.SimpleImputer`](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)to replace missing values using the mean along each numerical column(including the target), and the most frequent value along each categorical column.
###Code
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
numerical_features = [
"LotFrontage",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageCars",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
target_name,
]
categorical_features = data.columns.difference(numerical_features)
most_frequent_imputer = SimpleImputer(strategy="most_frequent")
mean_imputer = SimpleImputer(strategy="mean")
preprocessor = make_column_transformer(
(most_frequent_imputer, categorical_features),
(mean_imputer, numerical_features),
)
ames_housing_preprocessed = pd.DataFrame(
preprocessor.fit_transform(ames_housing),
columns=categorical_features.tolist() + numerical_features,
)
ames_housing_preprocessed = ames_housing_preprocessed[ames_housing.columns]
ames_housing_preprocessed = ames_housing_preprocessed.astype(ames_housing.dtypes)
(ames_housing_no_missing == ames_housing_preprocessed).all()
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
data = ames_housing.drop(columns=["Id", "SalePrice"])
target = ames_housing["SalePrice"]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with integer number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black", density=True,
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
ames_housing = ames_housing.drop(columns="Id")
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
target_name = "SalePrice"
data, target = ames_housing.drop(columns=target_name), ames_housing[target_name]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailables and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with integer number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black", density=True,
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
Plotting this information allows us to answer to two questions:* Is there few or many categories for a given features?* Is there rare categories for some features?Knowing about these peculiarities would help at designing the predictivepipeline. NoteIn order to keep the content of the course simple and didactic, wecreated a version of this database without missing values.
###Code
ames_housing_no_missing = pd.read_csv("../datasets/ames_housing_no_missing.csv")
ames_housing_no_missing.head()
###Output
_____no_output_____
###Markdown
It contains the same information as the original dataset after using a[`sklearn.impute.SimpleImputer`](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)to replace missing values using the mean along each numerical column(including the target), and the most frequent value along each categorical column.
###Code
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
numerical_features = [
"LotFrontage",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageCars",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
target_name,
]
categorical_features = data.columns.difference(numerical_features)
most_frequent_imputer = SimpleImputer(strategy="most_frequent")
mean_imputer = SimpleImputer(strategy="mean")
preprocessor = make_column_transformer(
(most_frequent_imputer, categorical_features),
(mean_imputer, numerical_features),
)
ames_housing_preprocessed = pd.DataFrame(
preprocessor.fit_transform(ames_housing),
columns=categorical_features.tolist() + numerical_features,
)
ames_housing_preprocessed = ames_housing_preprocessed[ames_housing.columns]
ames_housing_preprocessed = ames_housing_preprocessed.astype(ames_housing.dtypes)
(ames_housing_no_missing == ames_housing_preprocessed).all()
###Output
_____no_output_____
###Markdown
The Ames housing datasetIn this notebook, we will quickly present the "Ames housing" dataset. We willsee that this dataset is similar to the "California housing" dataset.However, it is more complex to handle: it contains missing data and bothnumerical and categorical features.This dataset is located in the `datasets` directory. It is stored in a commaseparated value (CSV) file. As previously mentioned, we are aware that thedataset contains missing values. The character `"?"` is used as a missingvalue marker.We will open the dataset and specify the missing value marker such that theywill be parsed by pandas when opening the file.
###Code
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
ames_housing = ames_housing.drop(columns="Id")
###Output
_____no_output_____
###Markdown
We can have a first look at the available columns in this dataset.
###Code
ames_housing.head()
###Output
_____no_output_____
###Markdown
We see that the last column named `"SalePrice"` is indeed the target that wewould like to predict. So we will split our dataset into two variablescontaining the data and the target.
###Code
target_name = "SalePrice"
data, target = ames_housing.drop(columns=target_name), ames_housing[target_name]
###Output
_____no_output_____
###Markdown
Let's have a quick look at the target before to focus on the data.
###Code
target.head()
###Output
_____no_output_____
###Markdown
We see that the target contains continuous value. It corresponds to the priceof a house in $. We can have a look at the target distribution.
###Code
import matplotlib.pyplot as plt
target.plot.hist(bins=20, edgecolor="black")
plt.xlabel("House price in $")
_ = plt.title("Distribution of the house price \nin Ames")
###Output
_____no_output_____
###Markdown
We see that the distribution has a long tail. It means that most of the houseare normally distributed but a couple of houses have a higher than normalvalue. It could be critical to take this peculiarity into account whendesigning a predictive model.Now, we can have a look at the available data that we could use to predicthouse prices.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Looking at the dataframe general information, we can see that 79 features areavailable and that the dataset contains 1460 samples. However, some featurescontains missing values. Also, the type of data is heterogeneous: bothnumerical and categorical data are available.First, we will have a look at the data represented with numbers.
###Code
numerical_data = data.select_dtypes("number")
numerical_data.info()
###Output
_____no_output_____
###Markdown
We see that the data are mainly represented with integer number. Let's havea look at the histogram for all these features.
###Code
numerical_data.hist(bins=20, figsize=(12, 22), edgecolor="black",
layout=(9, 4))
plt.subplots_adjust(hspace=0.8, wspace=0.8)
###Output
_____no_output_____
###Markdown
We see that some features have high picks for 0. It could be linked that thisvalue was assigned when the criterion did not apply, for instance thearea of the swimming pool when no swimming pools are available.We also have some feature encoding some date (for instance year).These information are useful and should also be considered when designing apredictive model.Now, let's have a look at the data encoded with strings.
###Code
string_data = data.select_dtypes(object)
string_data.info()
###Output
_____no_output_____
###Markdown
These features are categorical. We can make some bar plot to see categoriescount for each feature.
###Code
from math import ceil
from itertools import zip_longest
n_string_features = string_data.shape[1]
nrows, ncols = ceil(n_string_features / 4), 4
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(14, 80))
for feature_name, ax in zip_longest(string_data, axs.ravel()):
if feature_name is None:
# do not show the axis
ax.axis("off")
continue
string_data[feature_name].value_counts().plot.barh(ax=ax)
ax.set_title(feature_name)
plt.subplots_adjust(hspace=0.2, wspace=0.8)
###Output
_____no_output_____
###Markdown
Plotting this information allows us to answer to two questions:* Is there few or many categories for a given features?* Is there rare categories for some features?Knowing about these peculiarities would help at designing the predictivepipeline. NoteIn order to keep the content of the course simple and didactic, wecreated a version of this database without missing values.
###Code
ames_housing_no_missing = pd.read_csv("../datasets/ames_housing_no_missing.csv")
ames_housing_no_missing.head()
###Output
_____no_output_____
###Markdown
It contains the same information as the original dataset after using a[`sklearn.impute.SimpleImputer`](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)to replace missing values using the mean along each numerical column(including the target), and the most frequent value along each categorical column.
###Code
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
numerical_features = [
"LotFrontage",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageCars",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
target_name,
]
categorical_features = data.columns.difference(numerical_features)
most_frequent_imputer = SimpleImputer(strategy="most_frequent")
mean_imputer = SimpleImputer(strategy="mean")
preprocessor = make_column_transformer(
(most_frequent_imputer, categorical_features),
(mean_imputer, numerical_features),
)
ames_housing_preprocessed = pd.DataFrame(
preprocessor.fit_transform(ames_housing),
columns=categorical_features.tolist() + numerical_features,
)
ames_housing_preprocessed = ames_housing_preprocessed[ames_housing.columns]
ames_housing_preprocessed = ames_housing_preprocessed.astype(ames_housing.dtypes)
(ames_housing_no_missing == ames_housing_preprocessed).all()
###Output
_____no_output_____ |
Sentiment_RNN_Completed.ipynb | ###Markdown
Sentiment Analysis with an RNNIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the *sequence* of words. Here we'll use a dataset of movie reviews, accompanied by labels.The architecture for this network is shown below.Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
###Code
import numpy as np
import tensorflow as tf
with open('../sentiment-network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment-network/labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
###Output
_____no_output_____
###Markdown
Data preprocessingThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines `\n`. To deal with those, I'm going to split the text into each review using `\n` as the delimiter. Then I can combined all the reviews back together into one big string.First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
###Code
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
###Output
_____no_output_____
###Markdown
Encoding the wordsThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.> **Exercise:** Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers **start at 1, not 0**.> Also, convert the reviews to integers and store the reviews in a new list called `reviews_ints`.
###Code
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
###Output
_____no_output_____
###Markdown
Encoding the labelsOur labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.> **Exercise:** Convert labels from `positive` and `negative` to 1 and 0, respectively.
###Code
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
###Output
Zero-length reviews: 1
Maximum review length: 2514
###Markdown
Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.> **Exercise:** First, remove the review with zero length from the `reviews_ints` list.
###Code
non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]
len(non_zero_idx)
reviews_ints[-1]
###Output
_____no_output_____
###Markdown
Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general.
###Code
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
labels = np.array([labels[ii] for ii in non_zero_idx])
###Output
_____no_output_____
###Markdown
> **Exercise:** Now, create an array `features` that contains the data we'll pass to the network. The data should come from `review_ints`, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is `['best', 'movie', 'ever']`, `[117, 18, 128]` as integers, the row will look like `[0, 0, 0, ..., 0, 117, 18, 128]`. For reviews longer than 200, use on the first 200 words as the feature vector.This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
###Code
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
###Output
_____no_output_____
###Markdown
Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets.> **Exercise:** Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, `train_x` and `train_y` for example. Define a split fraction, `split_frac` as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
###Code
split_frac = 0.8
split_idx = int(len(features)*0.8)
train_x, val_x = features[:split_idx], features[split_idx:]
train_y, val_y = labels[:split_idx], labels[split_idx:]
test_idx = int(len(val_x)*0.5)
val_x, test_x = val_x[:test_idx], val_x[test_idx:]
val_y, test_y = val_y[:test_idx], val_y[test_idx:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
###Output
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
###Markdown
With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:``` Feature Shapes:Train set: (20000, 200) Validation set: (2500, 200) Test set: (2500, 200)``` Build the graphHere, we'll build the graph. First up, defining the hyperparameters.* `lstm_size`: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.* `lstm_layers`: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.* `batch_size`: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.* `learning_rate`: Learning rate
###Code
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
###Output
_____no_output_____
###Markdown
For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be `batch_size` vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability. > **Exercise:** Create the `inputs_`, `labels_`, and drop out `keep_prob` placeholders using `tf.placeholder`. `labels_` needs to be two-dimensional to work with some functions later. Since `keep_prob` is a scalar (a 0-dimensional tensor), you shouldn't provide a size to `tf.placeholder`.
###Code
n_words = len(vocab_to_int) + 1 # Adding 1 because we use 0's for padding, dictionary started at 1
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
###Output
_____no_output_____
###Markdown
EmbeddingNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.> **Exercise:** Create the embedding lookup matrix as a `tf.Variable`. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with [`tf.nn.embedding_lookup`](https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup). This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer as 200 units, the function will return a tensor with size [batch_size, 200].
###Code
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
###Output
_____no_output_____
###Markdown
LSTM cellNext, we'll create our LSTM cells to use in the recurrent network ([TensorFlow documentation](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn)). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.To create a basic LSTM cell for the graph, you'll want to use `tf.contrib.rnn.BasicLSTMCell`. Looking at the function documentation:```tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=)```you can see it takes a parameter called `num_units`, the number of units in the cell, called `lstm_size` in this code. So then, you can write something like ```lstm = tf.contrib.rnn.BasicLSTMCell(num_units)```to create an LSTM cell with `num_units`. Next, you can add dropout to the cell with `tf.contrib.rnn.DropoutWrapper`. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like```drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)```Most of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with `tf.contrib.rnn.MultiRNNCell`:```cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)```Here, `[drop] * lstm_layers` creates a list of cells (`drop`) that is `lstm_layers` long. The `MultiRNNCell` wrapper builds this into multiple layers of RNN cells, one for each cell in the list.So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.> **Exercise:** Below, use `tf.contrib.rnn.BasicLSTMCell` to create an LSTM cell. Then, add drop out to it with `tf.contrib.rnn.DropoutWrapper`. Finally, create multiple LSTM layers with `tf.contrib.rnn.MultiRNNCell`.Here is [a tutorial on building RNNs](https://www.tensorflow.org/tutorials/recurrent) that will help you out.
###Code
with graph.as_default():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
###Output
_____no_output_____
###Markdown
RNN forward passNow we need to actually run the data through the RNN nodes. You can use [`tf.nn.dynamic_rnn`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) to do this. You'd pass in the RNN cell you created (our multiple layered LSTM `cell` for instance), and the inputs to the network.```outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)```Above I created an initial state, `initial_state`, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. `tf.nn.dynamic_rnn` takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.> **Exercise:** Use `tf.nn.dynamic_rnn` to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, `embed`.
###Code
with graph.as_default():
outputs, final_state = tf.nn.dynamic_rnn(cell, embed,
initial_state=initial_state)
###Output
_____no_output_____
###Markdown
OutputWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with `outputs[:, -1]`, the calculate the cost from that and `labels_`.
###Code
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
###Output
_____no_output_____
###Markdown
Validation accuracyHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
###Code
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
###Output
_____no_output_____
###Markdown
BatchingThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the `x` and `y` arrays and returns slices out of those arrays with size `[batch_size]`.
###Code
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
###Output
_____no_output_____
###Markdown
TrainingBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the `checkpoints` directory exists.
###Code
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
###Output
_____no_output_____
###Markdown
Testing
###Code
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
###Output
Test accuracy: 0.830
|
PermutationBoosting-m3/build2-233a.ipynb | ###Markdown
*Unit 2, Sprint 3, Module 1*---
###Code
import pandas as pd
import numpy as np
# !pip install category_encoders==2.*
data = pd.read_csv('https://github.com/skhabiri/FORESTCOVER-METRICS/blob/master/data/train.csv?raw=true')
print(data.shape)
data.head()
data.describe()
data.nunique().sort_values(ascending=False)
# pd.Series({c: data[c].unique() for c in data})[-40:]
# [data[col].unique() for col in data]
###Output
_____no_output_____
###Markdown
Our target label is "Cover_Type"We will drop imb% imbalance low cardinal features. We also drop "id" column.
###Code
def wrangle_pre(X, imb=0.95):
'''
Returns the sorted list of feature names
with imbalance exceeding imb value
'''
X=X.copy()
# drop the binary features with imb% imbalance
# mask = X.nunique().sort_values(ascending=False) < 5
# lowcard_col = X.nunique().sort_values(ascending=False)[mask].index
mask2 = pd.Series({col: X[col].value_counts().
max()/X[col].value_counts().
sum() for col in X.nunique().index}).sort_values(ascending=False)
mask2 = mask2[mask2 >= imb]
Id_skew_cols = ["Id"] + list(mask2.index)
return Id_skew_cols
from sklearn.model_selection import train_test_split
# Split train into train & val
train, val = train_test_split(data, train_size=0.80, test_size=0.20, stratify=data["Cover_Type"],
random_state=42)
print(f'train: {train.shape}, val: {val.shape}')
# Separate class label and data
y_train = train["Cover_Type"]
X_train = train.drop("Cover_Type", axis=1)
y_val = val["Cover_Type"]
X_val = val.drop("Cover_Type", axis=1)
Id_skew_cols = wrangle_pre(X_train, imb=0.01)
def wrangle(X, drop_count=1, cols=Id_skew_cols):
'''
drops drop_count number of features from col starting from index=0 (Id)
'''
print("drop_count parameter: ",drop_count)
X = X.copy()
X = X.drop(labels=cols[:drop_count], axis=1)
print(f'X shape before return: {X.shape}')
return X
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import validation_curve
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, mean_absolute_error
import random
###Output
_____no_output_____
###Markdown
Baseline model
###Code
y_train.value_counts(normalize=True)
# Instantiate
log_model = LogisticRegression()
# Fit with training data
log_model.fit(X_train, y_train)
print('training accuracy:', log_model.score(X_train, y_train))
print('validation accuracy:', log_model.score(X_val, y_val))
y_pred = log_model.predict(X_val)
print(classification_report(y_val, y_pred, target_names=None))
###Output
training accuracy: 0.3837632275132275
validation accuracy: 0.37566137566137564
precision recall f1-score support
1 0.34 0.21 0.26 432
2 0.35 0.21 0.26 432
3 0.36 0.37 0.37 432
4 0.53 0.62 0.58 432
5 0.31 0.42 0.36 432
6 0.28 0.27 0.27 432
7 0.41 0.53 0.47 432
accuracy 0.38 3024
macro avg 0.37 0.38 0.36 3024
weighted avg 0.37 0.38 0.36 3024
###Markdown
Randomforestclassifier pipeline, and feature_importances_
###Code
max_depth = list()
for tree in clf.estimators_:
max_depth.append(tree.tree_.max_depth)
print("avg max depth %0.1f" % (sum(max_depth) / len(max_depth)))
print(f' X_train shape before pipeline: {X_train.shape}')
# Make pipeline!
pipeline = make_pipeline(
FunctionTransformer(wrangle, validate=False),
# ce.OrdinalEncoder(),
# SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=50, criterion="entropy", max_depth=20,
min_samples_split=2, min_samples_leaf=8, min_weight_fraction_leaf=0.0,
max_features=20, max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=-1,
random_state=42, verbose=0, warm_start=False, class_weight=None,
ccp_alpha=0.0, max_samples=None)
)
drop_count = int(0.1*len(Id_skew_cols))
pipeline.set_params(functiontransformer__kw_args={'drop_count': drop_count})
#.fit: WITHOUT CHANGING X APPLIES THE TRANSFORM AND CHECK TO SEE IF y FITS TRANSFORMED OF X
print("\n fitting ...")
pipeline.fit(X_train, y_train)
print("\n getting X_train transformed column labels")
feat_name = pipeline.named_steps['functiontransformer'].transform(X_train).columns
print("X_train: ", len(X_train.columns), "X_fit: ", len(feat_name))
assert len(X_train.columns) == len(feat_name) + drop_count
print("\n predicting ...")
y_pred = pipeline.predict(X_val)
print("\n Accuracy ...")
print('Training Accuracy', accuracy_score(y_train, pipeline.predict(X_train)))
print('Validation Accuracy', accuracy_score(y_val, y_pred))
rf = pipeline.named_steps['randomforestclassifier']
print(pipeline.named_steps['randomforestclassifier'].n_features_)
importances = pd.Series(rf.feature_importances_, feat_name).sort_values(ascending=True)
plt.figure(figsize=(15,10))
importances.plot.barh()
###Output
_____no_output_____
###Markdown
In the presence of all the features, feature_importance_ of Id column ranks relatively high. Hence, feature_importance_ by itself cannot be a deciding factor. We drop 10% of skewed columns to get 82% accuracy Cross Validation Curve for skewed features
###Code
par_name = "drop_count"
param_range = [{par_name: i} for i in range(len(Id_skew_cols))]
param_rangex = [i for i in range(len(Id_skew_cols))]
# par_name = "max_features"
# param_range = np.arange(0.1,1.1,0.1)
# param_rangex = param_range
# par_name = "max_depth"
# param_range = range(1,25,1)
# param_rangex = param_range
# par_name = "min_samples_split"
# param_range = np.linspace(10, 0.01*len(X_train), 10, endpoint=True).astype(int)
# param_rangex = param_range
# par_name = "min_samples_leaf"
# param_range = np.linspace(2, 0.001*len(X_train), 10, endpoint=True).astype(int)
# param_rangex = param_range
# par_name = "criterion"
# param_range = ["gini", "entropy"]
# param_rangex = param_range
train_scores, val_scores = validation_curve(
pipeline, X_train, y_train,
param_name='functiontransformer__kw_args',
# param_name='randomforestclassifier__'+ par_name,
param_range=param_range,
scoring='accuracy',
cv=5,
n_jobs=-1
)
# for different values of param_range
print("val scores", val_scores)
print("val scores mean", np.mean(val_scores, axis=1))
# Averaging CV scores
plt.figure(dpi=150)
plt.plot(param_rangex, np.mean(train_scores, axis=1), color='blue', label='training accuracy')
plt.plot(param_rangex, np.mean(val_scores, axis=1), color='red', label='validation accuracy')
plt.title('Validation Curve')
plt.xlabel(f'model complexity: Pipeline {par_name}')
plt.ylabel('model score: Accuracy')
plt.legend()
param_range
###Output
val scores [[0.83305785 0.83836296 0.84497726 0.82389417 0.83298884]
[0.82231405 0.82720132 0.82141381 0.81810666 0.8222406 ]
[0.8231405 0.82100041 0.82430757 0.82306738 0.82348078]
[0.81983471 0.82926829 0.82430757 0.82017363 0.82058702]
[0.81859504 0.82430757 0.82802811 0.81934684 0.8222406 ]
[0.82561983 0.8284415 0.82720132 0.8218272 0.81893344]
[0.82066116 0.83216205 0.82554775 0.82554775 0.82472096]
[0.82603306 0.82430757 0.82678793 0.8218272 0.82058702]
[0.82396694 0.82720132 0.82637453 0.82513435 0.82265399]
[0.82107438 0.83009508 0.82389417 0.82141381 0.82265399]
[0.82272727 0.8222406 0.82100041 0.81893344 0.81397272]
[0.81859504 0.83257544 0.82513435 0.82141381 0.82678793]
[0.82190083 0.83298884 0.83050847 0.82058702 0.82141381]
[0.8214876 0.83009508 0.82306738 0.82389417 0.82554775]
[0.81818182 0.82637453 0.8218272 0.81893344 0.82554775]
[0.82107438 0.82802811 0.82513435 0.82017363 0.81976023]
[0.81652893 0.82968169 0.82472096 0.82058702 0.8152129 ]
[0.81859504 0.82761472 0.8222406 0.81976023 0.81852005]
[0.81900826 0.82678793 0.82926829 0.82058702 0.82306738]
[0.82024793 0.83381563 0.82430757 0.81976023 0.82017363]
[0.82603306 0.82637453 0.82761472 0.82058702 0.81397272]
[0.81983471 0.82926829 0.82348078 0.82017363 0.82265399]
[0.81900826 0.82802811 0.81893344 0.81893344 0.81934684]
[0.82024793 0.83340223 0.82761472 0.81893344 0.81852005]
[0.82024793 0.83050847 0.82430757 0.81852005 0.82348078]
[0.81942149 0.83050847 0.82017363 0.82430757 0.82017363]
[0.81652893 0.83422902 0.8218272 0.81810666 0.82265399]
[0.82107438 0.83257544 0.82348078 0.81893344 0.82141381]
[0.81983471 0.82637453 0.82389417 0.8152129 0.81397272]
[0.81942149 0.82802811 0.81976023 0.81852005 0.81893344]
[0.8161157 0.82472096 0.81934684 0.81893344 0.81686647]
[0.81239669 0.82637453 0.81686647 0.8147995 0.82017363]
[0.81694215 0.82720132 0.81603969 0.81190575 0.81810666]
[0.81570248 0.82348078 0.81066556 0.81190575 0.81355932]
[0.81694215 0.82513435 0.81231914 0.81149235 0.81190575]
[0.80909091 0.81686647 0.81231914 0.80405126 0.8081852 ]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]
[ nan nan nan nan nan]]
val scores mean [0.83465622 0.82225529 0.82299933 0.82283424 0.82250363 0.82440466
0.82572793 0.82390855 0.82506623 0.82382629 0.81977489 0.82490132
0.82547979 0.8248184 0.82217295 0.82283414 0.8213463 0.82134613
0.82374378 0.823661 0.82291641 0.82308228 0.82085002 0.82374368
0.82341296 0.82291696 0.82266916 0.82349557 0.81985781 0.82093266
0.81919668 0.81812217 0.81803911 0.81506278 0.81555875 0.8101026
nan nan nan nan nan nan
nan nan nan nan nan nan
nan nan nan]
###Markdown
beyoun 30 features drop, validation accuracy starts going down. RandomizedSearchCV
###Code
# pipe = make_pipeline(FunctionTransformer(log_columns, ), PCA(), SVC())
# param_grid = dict(
# functiontransformer__kw_args=[
# {'col_idx': None},
# {'col_idx': [1]}
# ],
# pca__n_components=[2, 5, 10],
# svc__C=[0.1, 10, 100],
# )
# grid_search = GridSearchCV(pipe, param_grid=param_grid)
# digits = load_digits()
# res = grid_search.fit(digits.data, digits.target)
print('Model Hyperparameters:')
print(pipeline.named_steps['randomforestclassifier'])
x_n_iter = 50
param_distributions = {
# 'simpleimputer__strategy': ['mean', 'median', 'most_frequent'],
'functiontransformer__kw_args': [{par_name: i} for i in range(int(0.5*len(Id_skew_cols)))],
'randomforestclassifier__min_samples_leaf': [random.randint(1, 1000) for i in range(20)],
'randomforestclassifier__min_samples_split': [random.randint(2, 1000) for i in range(20)],
'randomforestclassifier__max_features': [random.randint(2, 54) for i in range(20)],
'randomforestclassifier__criterion': ["gini", "entropy"]
}
rscv = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=x_n_iter,
cv=4,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
rscv.fit(X_train, y_train)
bestpipe = rscv.best_estimator_
print('Cross-validation Accuracy', rscv.best_score_)
print('Best hyperparameters', rscv.best_params_)
rscv.best_estimator_
rscv.best_params_
best_feat = bestpipe.named_steps['functiontransformer'].transform(X_train).columns
best_feat, best_feat.shape
#.predict: WITHOUT ACTUALLY TRANSFORMING X, APPLIES THE TRANSFORMS TO X AND PREDICT a fitted y
# wrangle_col = pipeline.named_steps['functiontransformer'].transform(X_train).columns
print(f' X_val shape before predict : {X_val.shape}')
print("predicting ...")
y_pred = bestpipe.predict(X_val)
print(f' X_val shape after predict : {X_val.shape}')
print('Validation Accuracy', accuracy_score(y_val, y_pred))
###Output
X_val shape before predict : (3024, 55)
predicting ...
drop_count parameter: 17
X shape before return: (3024, 38)
X_val shape after predict : (3024, 55)
Validation Accuracy 0.708994708994709
|
Keras_Deep_Space_Signal_Classifier/Research_Notebook.ipynb | ###Markdown
Classify Radio Signals from Space with Keras Task 1: Import Libraries
###Code
from livelossplot.tf_keras import PlotLossesCallback
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import numpy as np
np.random.seed(42)
import warnings;warnings.simplefilter('ignore')
%matplotlib inline
print('Tensorflow version:', tf.__version__)
###Output
_____no_output_____
###Markdown
Task 2: Load and Preprocess SETI Data
###Code
train_images = pd.read_csv('dataset/train/images.csv', header=None)
train_labels = pd.read_csv('dataset/train/labels.csv', header=None)
val_images = pd.read_csv('dataset/validation/images.csv', header=None)
val_labels = pd.read_csv('dataset/validation/labels.csv', header=None)
train_images.head()
train_labels.head()
print("Training set shape:", train_images.shape, train_labels.shape)
print("Validation set shape:", val_images.shape, val_labels.shape)
x_train = train_images.values.reshape(3200, 64, 128, 1)
x_val = val_images.values.reshape(800, 64, 128, 1)
y_train = train_labels.values
y_val = val_labels.values
###Output
_____no_output_____
###Markdown
Task 3: Plot 2D Spectrograms
###Code
plt.figure(0, figsize=(12,12))
for i in range(1,4):
plt.subplot(1,3,i)
img = np.squeeze(x_train[np.random.randint(0, x_train.shape[0])])
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.imshow(np.squeeze(x_train[3]), cmap="gray");
###Output
_____no_output_____
###Markdown
Task 4: Create Training and Validation Data Generators
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen_train = ImageDataGenerator(horizontal_flip=True)
datagen_train.fit(x_train)
datagen_val = ImageDataGenerator(horizontal_flip=True)
datagen_val.fit(x_val)
###Output
_____no_output_____
###Markdown
Task 5: Creating the CNN Model
###Code
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
# Initialising the CNN
model = Sequential()
# 1st Convolution
model.add(Conv2D(32,(5,5), padding='same', input_shape=(64, 128,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(64,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(4, activation='softmax'))
###Output
_____no_output_____
###Markdown
Task 6: Learning Rate Scheduling and Compile the Model
###Code
initial_learning_rate = 0.005
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=5,
decay_rate=0.96,
staircase=True)
optimizer = Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
###Output
_____no_output_____
###Markdown
Task 7: Training the Model
###Code
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_loss',
save_weights_only=True, mode='min', verbose=0)
callbacks = [PlotLossesCallback(), checkpoint]#, reduce_lr]
batch_size = 32
history = model.fit(
datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size,
validation_data = datagen_val.flow(x_val, y_val, batch_size=batch_size, shuffle=True),
validation_steps = len(x_val)//batch_size,
epochs=12,
callbacks=callbacks
)
###Output
_____no_output_____
###Markdown
Task 8: Model Evaluation
###Code
model.evaluate(x_val, y_val)
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import seaborn as sns
y_true = np.argmax(y_val, 1)
y_pred = np.argmax(model.predict(x_val), 1)
print(metrics.classification_report(y_true, y_pred))
print("Classification accuracy: %0.6f" % metrics.accuracy_score(y_true, y_pred))
labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"]
ax= plt.subplot()
sns.heatmap(metrics.confusion_matrix(y_true, y_pred, normalize='true'), annot=True, ax = ax, cmap=plt.cm.Blues); #annot=True to annotate cells
# labels, title and ticks
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(labels); ax.yaxis.set_ticklabels(labels);
###Output
_____no_output_____ |
GridSearchKNN_Case_Study/GridSearchKNN_Case_Study.ipynb | ###Markdown
Grid Search Hyperparameter optimization This case study is all about using grid searches to identify the optimal parameters for a machine learning algorithm. To complere this case study, you'll use the Pima Indian diabetes dataset from Kaggle and KNN. Follow along with the preprocessing steps of this case study. Load the necessary packages
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Load the diabetes data
###Code
diabetes_data = pd.read_csv('diabetes.csv')
diabetes_data.head()
###Output
_____no_output_____
###Markdown
** Start by reviewing the data info.**
###Code
diabetes_data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 768 entries, 0 to 767
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Pregnancies 768 non-null int64
1 Glucose 768 non-null int64
2 BloodPressure 768 non-null int64
3 SkinThickness 768 non-null int64
4 Insulin 768 non-null int64
5 BMI 768 non-null float64
6 DiabetesPedigreeFunction 768 non-null float64
7 Age 768 non-null int64
8 Outcome 768 non-null int64
dtypes: float64(2), int64(7)
memory usage: 54.1 KB
###Markdown
** Apply the describe function to the data.**
###Code
diabetes_data.describe()
###Output
_____no_output_____
###Markdown
** Currently, the missing values in the dataset are represented as zeros. Replace the zero values in the following columns ['Glucose','BloodPressure','SkinThickness','Insulin','BMI'] with nan .**
###Code
diabetes_data = diabetes_data.astype(float)
diabetes_data.loc[:,['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0 , np.nan, inplace=True)
###Output
_____no_output_____
###Markdown
** Plot histograms of each column. **
###Code
diabetes_data.hist()
plt.gcf().set_size_inches(10,10)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Replace the zeros with mean and median values.
###Code
diabetes_data['Glucose'].fillna(diabetes_data['Glucose'].mean(), inplace = True)
diabetes_data['BloodPressure'].fillna(diabetes_data['BloodPressure'].mean(), inplace = True)
diabetes_data['SkinThickness'].fillna(diabetes_data['SkinThickness'].median(), inplace = True)
diabetes_data['Insulin'].fillna(diabetes_data['Insulin'].median(), inplace = True)
diabetes_data['BMI'].fillna(diabetes_data['BMI'].median(), inplace = True)
diabetes_data.head()
###Output
_____no_output_____
###Markdown
** Plot histograms of each column after replacing nan. **
###Code
diabetes_data.hist()
plt.gcf().set_size_inches(10,10)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Plot the correlation matrix heatmap
###Code
plt.figure(figsize=(12,10))
print('Correlation between various features')
p=sns.heatmap(diabetes_data.corr(), annot=True,cmap ='Blues')
###Output
Correlation between various features
###Markdown
** Using Sklearn, standarize the magnitude of the features by scaling the values. **
###Code
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
X = diabetes_data.drop(['Outcome'], axis=1)
scaler = MinMaxScaler()
scaler.fit(X)
scaled_df = scaler.transform(X)
###Output
_____no_output_____
###Markdown
** Define the `y` variable as the `Outcome` column.**
###Code
y=diabetes_data.pop('Outcome')
###Output
_____no_output_____
###Markdown
** Create a 70/30 train and test split. **
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
###Output
_____no_output_____
###Markdown
Using a range of neighbor values of 1-10, apply the KNearestNeighbor classifier to classify the the data.
###Code
from sklearn.neighbors import KNeighborsClassifier
test_scores = []
train_scores = []
max_k = 10
for i in range(1,max_k):
knn = KNeighborsClassifier(i)
knn.fit(X_train,y_train)
train_scores.append(knn.score(X_train,y_train))
test_scores.append(knn.score(X_test,y_test))
###Output
_____no_output_____
###Markdown
** Print the train and test scores for each iteration.**
###Code
scores = pd.DataFrame({'Train_score':train_scores, 'Test_score':test_scores})
print(scores)
###Output
Train_score Test_score
0 1.000000 0.688312
1 0.841713 0.727273
2 0.843575 0.675325
3 0.811918 0.722944
4 0.802607 0.688312
5 0.795158 0.701299
6 0.800745 0.692641
7 0.789572 0.714286
8 0.789572 0.701299
###Markdown
** Identify the number of neighbors between 1-15 that resulted in the max score in the training dataset. **
###Code
best_train_knn = scores.Train_score.idxmax()+1
print(best_train_knn)
###Output
1
###Markdown
** Identify the number of neighbors between 1-15 that resulted in the max score in the testing dataset. **
###Code
best_test_knn = scores.Test_score.idxmax()+1
print(best_test_knn)
###Output
2
###Markdown
Plot the train and test model performance by number of neighbors.
###Code
plt.figure(figsize=(12,5))
p = sns.lineplot(range(1,max_k),train_scores,marker='*',label='Train Score')
p = sns.lineplot(range(1,max_k),test_scores,marker='o',label='Test Score')
###Output
_____no_output_____
###Markdown
** Fit and score the best number of neighbors based on the plot. **
###Code
#let's choose k=9 because it's odd and it looks like a good balance between variance ansd bias
k=9
knn = KNeighborsClassifier(9)
knn.fit(X_train,y_train)
train_scores.append(knn.score(X_train,y_train))
test_scores.append(knn.score(X_test,y_test))
from sklearn.metrics import confusion_matrix
y_pred = knn.predict(X_test)
pl = confusion_matrix(y_test,y_pred)
print(pl)
###Output
[[115 36]
[ 33 47]]
###Markdown
** Plot the confusion matrix for the model fit above. **
###Code
from sklearn.metrics import plot_confusion_matrix, classification_report
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(knn, X_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
###Output
Confusion matrix, without normalization
[[115 36]
[ 33 47]]
Normalized confusion matrix
[[0.7615894 0.2384106]
[0.4125 0.5875 ]]
###Markdown
** Print the classification report **
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0.0 0.78 0.76 0.77 151
1.0 0.57 0.59 0.58 80
accuracy 0.70 231
macro avg 0.67 0.67 0.67 231
weighted avg 0.70 0.70 0.70 231
###Markdown
In the case of the K nearest neighbors algorithm, the K parameter is one of the most important parameters affecting the model performance. The model performance isn't horrible, but what if we didn't consider a wide enough range of values in our neighbors for the KNN? An alternative to fitting a loop of models is to use a grid search to identify the proper number. It is common practice to use a grid search method for all adjustable parameters in any type of machine learning algorithm. First, you define the grid — aka the range of values — to test in the parameter being optimized, and then compare the model outcome performance based on the different values in the grid. Run the code in the next cell to see how to implement the grid search method for identifying the best parameter value for the n_neighbors parameter. Notice the param_grid is the range value to test and we apply cross validation with five folds to score each possible value of n_neighbors.
###Code
from sklearn.model_selection import GridSearchCV
param_grid = {'n_neighbors':np.arange(1,50)}
knn = KNeighborsClassifier()
knn_cv= GridSearchCV(knn,param_grid,cv=5)
knn_cv.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Print the best score and best parameter for n_neighbors.
###Code
print("Best Score:" + str(knn_cv.best_score_))
print("Best Parameters: " + str(knn_cv.best_params_))
###Output
Best Score:0.737417791623399
Best Parameters: {'n_neighbors': 24}
###Markdown
Here you can see that the ideal number of n_neighbors for this model is 14 based on the grid search performed. ** Now, following the KNN example, apply this grid search method to find the optimal number of estimators in a Randon Forest model.**
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
param_grid = {'n_estimators':np.arange(50,200,50),
'criterion':['gini', 'entropy'],
'max_depth':np.arange(1,11,2),
}
rfc = RandomForestClassifier()
rfc_cv= GridSearchCV(rfc,param_grid,cv=5)
rfc_cv.fit(X_train,y_train)
print("Best Score:" + str(rfc_cv.best_score_))
print("Best Parameters: " + str(rfc_cv.best_params_))
###Output
Best Score:0.7783489096573208
Best Parameters: {'criterion': 'entropy', 'max_depth': 7, 'n_estimators': 100}
|
courses/machine_learning/deepdive2/launching_into_ml/labs/automl-tabular-classification.ipynb | ###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML Tabular model.* Deploy the `Model` resource to a serving `Endpoint` resource.* Make a prediction by sending data.* Undeploy the `Model` resource. IntroductionThis notebook demonstrates, using the Vertex AI Python client library, how to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around an hour to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around two hour and half to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around an hour to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around two hour and half to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML Tabular model.* Deploy the `Model` resource to a serving `Endpoint` resource.* Make a prediction by sending data.* Undeploy the `Model` resource. IntroductionThis notebook demonstrates, using the Vertex AI Python client library, how to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# This will take around an hour to run
model = job.run(
dataset=ds,
target_column="Adopted",
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.**NOTE: It takes nearly 2 hours 15 minutes to complete the training. Please wait till the training get completed. If your training takes more time than lab time, please only review the next sections.**
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around two hour and half to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# Constructs a AutoML Tabular Training Job
job = # TODO 1 -- Your code goes here(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# Create and train the model object
# This will take around two hour and half to run
model = # TODO 2a -- Your code goes here(
dataset=ds,
target_column="Adopted",
# Define training, validation and test fraction for training
# TODO 2b -- Your code goes here
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# Deploy the model resource to the serving endpoint resource
endpoint = # TODO 3 -- Your code goes here(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# Make a prediction using the sample values
prediction = # TODO 4 -- Your code goes here(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# Undeploy the model resource
# TODO 5 -- Your code goes here
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
|
.ipynb_checkpoints/LIFX Single Set of Tiles Testing-checkpoint.ipynb | ###Markdown
PurposeThis notesbooks purpose is to experiment with writting to a single string of LIFX tiles using the AIOLIFX library.https://github.com/frawau/aiolifx
###Code
from lifxlan import *
#!/usr/bin/env python
# coding=utf-8
import sys
from copy import deepcopy
from time import sleep
from lifxlan import GREEN, LifxLAN, RED
def main():
num_lights = 3
if len(sys.argv) != 2:
print("\nDiscovery will go much faster if you provide the number of lights on your LAN:")
print(" python {} <number of lights on LAN>\n".format(sys.argv[0]))
else:
num_lights = int(sys.argv[1])
# instantiate LifxLAN client, num_lights may be None (unknown).
# In fact, you don't need to provide LifxLAN with the number of bulbs at all.
# lifx = LifxLAN() works just as well. Knowing the number of bulbs in advance
# simply makes initial bulb discovery faster.
print("Discovering lights...")
lifx = LifxLAN(num_lights,False)
# get devices
multizone_lights = lifx.get_multizone_lights()
if len(multizone_lights) > 0:
strip = multizone_lights[0]
print("Selected {}".format(strip.get_label()))
all_zones = strip.get_color_zones()
original_zones = deepcopy(all_zones)
zone_count = len(all_zones)
delay = 0.06
snake_color = RED
background_color = GREEN
snake_size = zone_count/2 # length of snake in zones
tail = 0
head = snake_size - 1
try:
while True:
# Case 1: Snake hasn't wrapped around yet
if head > tail:
if tail > 0:
strip.set_zone_color(0, tail-1, background_color, 0, True, 0)
strip.set_zone_color(tail, head, snake_color, 0, True, 0)
if head < zone_count - 1:
strip.set_zone_color(head+1, zone_count-1, background_color, 0, True, 1)
# Case 2: Snake has started to wrap around
else:
if head > 0:
strip.set_zone_color(0, head-1, snake_color, 0, True, 0)
strip.set_zone_color(head, tail, background_color, 0, True, 0)
if tail < zone_count - 1:
strip.set_zone_color(tail+1, zone_count-1, snake_color, 0, True, 1)
# update indices for the snake's head and tail
tail = (tail+1) % zone_count
head = (head+1) % zone_count
sleep(delay)
except KeyboardInterrupt:
strip.set_zone_colors(original_zones, 500, True)
if __name__=="__main__":
main()
#!/usr/bin/env python
# coding=utf-8
import sys
from lifxlan import LifxLAN
def main():
num_lights = None
if len(sys.argv) != 2:
print("\nDiscovery will go much faster if you provide the number of lights on your LAN:")
print(" python {} <number of lights on LAN>\n".format(sys.argv[0]))
else:
num_lights = int(sys.argv[1])
# instantiate LifxLAN client, num_lights may be None (unknown).
# In fact, you don't need to provide LifxLAN with the number of bulbs at all.
# lifx = LifxLAN() works just as well. Knowing the number of bulbs in advance
# simply makes initial bulb discovery faster.
print("Discovering lights...")
lifx = LifxLAN(num_lights)
# get devices
devices = lifx.get_lights()
print("\nFound {} light(s):\n".format(len(devices)))
for d in devices:
try:
print(d)
except:
pass
if __name__=="__main__":
main()
from lifxlan import LifxLAN
lifx = LifxLAN(23)
devices = lifx.get_lights()
devices
tilechain_lights = lifx.get_tilechain_lights()
tilechain_lights
for d in tilechain_lights:
try:
print(d)
except:
pass
def get_random_color():
return randint(0, 65535), randint(0, 65535), randint(0, 65535), randint(2500, 9000)
len(tilechain_lights)
print (tilechain_lights[5])
num_frames = 2
invader_matrix = \
[[[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 0, 1]],
[[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0, 1, 0]]]
duration_ms = 5
DIM_BLUE = PURPLE
DIM_BLUE[2] = DIM_BLUE[2]/3
palette = {0: GREEN,
1: BLUE#DIM_BLUE
}
t = tilechain_lights[5] # grab the first tilechain
print("Selected TileChain light: {}".format(t.get_label()))
original_colors = t.get_tilechain_colors()
num_tiles = t.get_tile_count()
while True:
for frame in range(num_frames):
sprite = []
for x in range(8):
for y in range(8):
sprite.append(palette[invader_matrix[frame][x][y]])
for index in range(num_tiles):
t.set_tile_colors(index, sprite, duration_ms, rapid=True)
sleep(1)
from lifxlan import *
from random import randint
from time import sleep
###Output
_____no_output_____
###Markdown
Multi Zone Chase
###Code
from lifxlan import *
from random import randint, choice
from time import sleep
from copy import deepcopy
lan = LifxLAN()
tilechain_lights = lan.get_tilechain_lights()
len(tilechain_lights)
for i in tilechain_lights:
print (i)
tile_chain = tilechain_lights[16]
def set_background(cols, rows):
hue = 0
background_colors = []
for row in range(rows):
color_row = []
for col in range(cols):
color_row.append((hue, 65535, 2000, 4900))
hue += int(65535.0 / (cols * rows))
background_colors.append(color_row)
return background_colors
def get_random_saturated_color():
return randint(0, 65535), 65535, randint(0, 65535), 3000
print("Selected TileChain light: {}".format(tile_chain.get_label()))
(cols, rows) = tile_chain.get_canvas_dimensions()
original_colors = tile_chain.get_tilechain_colors()
background_colors = set_background(cols, rows)
tile_chain.project_matrix(background_colors, 2000)
dots = []
max_dots = 50
duration_ms = 150
dot_rate = 0.1
matrix = deepcopy(background_colors)
while True:
dot = [choice(range(rows)), choice(range(cols))]
dots.append(dot)
if len(dots) > max_dots:
old_dot = dots.pop(0)
matrix[int(old_dot[0])][int(old_dot[1])] = background_colors[int(old_dot[0])][int(old_dot[1])]
matrix[int(dot[0])][int(dot[1])] = get_random_saturated_color()
#Catch exceptions when the computer sleeps so we can resume when we wake
try:
tile_chain.project_matrix(matrix, duration_ms, rapid=True)
except:
pass
sleep(dot_rate)
x = tilechain_lights[5]
x = TileChain("d0:73:d5:3c:56:6e", "10.101.30.80")
type(x)
x.get_tile_info()
x.get_canvas_dimensions()
x.get_tile_map()
help(x)
x.get_xy_vals()
help(x.get_xy_vals)
from random import randint, betavariate
from time import sleep
def get_fire_color():
return (int(800 + (5000 * betavariate(0.2, 0.9))), randint(60000, 65535), int(65535 * betavariate(0.05, 1)), randint(2500, 3500))
(cols, rows) = x.get_canvas_dimensions()
cols
rows
original_colors = t.get_tilechain_colors()
for row in range(original_colors[0]):
print(row)
#original_colors[0][1] = (0,0,0,3500)
#original_colors[0]
original_colors[0][0] = (0,0,0,3500)
original_colors[0]
hue = 0
coal_colors = []
for row in range(rows):
color_row = []
for col in range(cols):
color_row.append(get_fire_color())
hue += int(65535.0/(cols*rows))
coal_colors.append(color_row)
coal_colors
x.project_matrix(coal_colors)
duration_ms = 100
while(True):
proportion_change = 0.2
sample_size = int((rows * cols) * proportion_change)
if sample_size % 2 == 1:
sample_size = int(sample_size - 1)
col_samples = [randint(0, cols-1) for i in range(sample_size)]
row_samples = [randint(0, rows-1) for i in range(sample_size)]
for i in range(0, sample_size):
coal_colors[row_samples[i]][col_samples[i]] = get_fire_color()
x.project_matrix(coal_colors, duration_ms, rapid=True)
sleep(max(duration_ms/2000.0, 0.05))
x.get_tile_count()
x.get_tile_info()
tiles = x.get_tile_info()
num_tiles = x.get_tile_count()
x_vals = []
y_vals = []
y = tiles[0]
print (y.width)
print (y.height)
print (y.user_x)
print (y.user_y)
z = tiles[0]
print (z.width)
print (z.height)
print (z.user_x)
print (z.user_y)
w.height
x.set_tilechain_colors(original_colors)
len(original_colors[0])
new_colors = []
new_colors = []
for i in range(64):
my_color = (0,0,0,3500)
new_colors.append(my_color)
for i in range(5):
original_colors[i]= new_colors
x.set_tilechain_colors(original_colors)
len(new_colors)
new_colors
original_colors[0]= new_colors
#Full Vertical Lines
for i in range(8):
new_colors[i] = (red, full_color, full, warm)
new_colors[(i+8)]= (orange,full_color, minim, warm)
new_colors[(i+16)]= (yellow, full_color, threequarter, warm)
new_colors[(i+24)]= (green, full_color, full, warm)
new_colors[(i+32)]= (lightblue, full_color, full, warm)
new_colors[(i+40)]= (darkblue,full_color, full, cool)
new_colors[(i+48)]= (purple, full_color, full, cool)
new_colors[(i+56)]= (violet, full_color, half, warm)
original_colors[0]= new_colors
original_colors[1]= new_colors
original_colors[2]= new_colors
original_colors[3]= new_colors
original_colors[4]= new_colors
x.set_tilechain_colors(original_colors)
#colors
red = 0
orange = 5000
yellow =10000
green = 20000
lightblue = 30000
blue = 45000
indigo = 50000
violet = 65000
#color_saturation
no_color = 0
mid_color = 32500
full_color = 65000
#brightness
off = 0
minim = 1
quarter = 65000/4
half = 65000/2
threequarter = 48750
full = 65000
#warmth
warm = 0
balanced = 65000/2
cool = 65000
#equal divided colors
red = (0)
orange = (65000/7)
yellow = (65000/7)*2
green = (65000/7)*3
lightblue = (65000/7)*4
darkblue = (65000/7)*5
indigo = (65000/7)*6
violet = (65000/7)*7
#colors
col1 = 0
col2 = 8000
col3 = 16000
col4 = 24000
col5 = 32000
col6 = 40000
col7 = 48000
col8 = 56000
#Bottom Half vertical stripes
reset_lights(x)
for i in range(4):
new_colors[i+4] = (red, full_color, full, warm)
new_colors[(i+12)]= (orange, full_color, full, warm)
new_colors[(i+20)]= (yellow,full_color, full, warm)
new_colors[(i+28)]= (green, full_color, full, warm)
new_colors[(i+36)]= (lightblue,full_color, full, warm)
new_colors[(i+44)]= (darkblue,full_color, full, warm)
new_colors[(i+52)]= (indigo, full_color, full, warm)
new_colors[(i+60)]= (violet, full_color, full, warm)
original_colors[0]= new_colors
x.set_tilechain_colors(original_colors)
#Single Horizontol Blue Strip
i = 2
new_colors[i] = (blue, full_color, full, warm)
new_colors[(i+8)]= (blue, full_color, full, warm)
new_colors[(i+16)]= (blue, full_color, full, warm)
new_colors[(i+24)]= (blue, full_color, full, warm)
new_colors[(i+32)]= (blue, full_color, full, warm)
new_colors[(i+40)]= (blue, full_color, full, warm)
new_colors[(i+48)]= (blue, full_color, full, warm)
new_colors[(i+56)]= (blue, full_color, full, warm)
original_colors[0]= new_colors
original_colors[1]= new_colors
original_colors[2]= new_colors
original_colors[3]= new_colors
original_colors[4]= new_colors
x.set_tilechain_colors(original_colors)
#reset_lights(x)
# The orientation of the tiles need to be taken into account. When setting it all up need to make sure that you have them in the right orientation.
#I wonder if the orientation in the app makes a difference? I'm guessing probably note.
def reset_lights(tile):
original_colors = []
new_colors = []
for i in range(64):
my_color = (0,0,0,3500)
new_colors.append(my_color)
for i in range(5):
original_colors.append(new_colors)
tile.set_tilechain_colors(original_colors)
return original_colors
original_colors = reset_lights(x)
tilechain_lights = lifx.get_tilechain_lights()
for i in tilechain_lights:
print (i.get_label())
x = tilechain_lights[-3]
x.get_label()
(65000/8)
(65000/8)*2
(65000/8)*3
(65000/8)*4
(65000/8)*5
(65000/8)*6
(65000/8)*7
(65000/8)*8
for i in range(5):
print (i)
###Output
0
1
2
3
4
|
K-meansWithPython-master/Kmeans.ipynb | ###Markdown
###Code
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = (16,9)
plt.style.use('ggplot')
#importing data set
data=pd.read_csv('https://raw.githubusercontent.com/arivle/K-meansWithPython/master/xclara/xclara.csv')
print("input data and shape")
print(data.shape)
data.head()
#Getting the values and plotting it
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
plt.scatter(f1,f2,c='black', s=7)
#Euclidean Distance Calculator
def dist(a, b, ax=1):
return np.linalg.norm(a-b, axis=ax)
#number of clusters
k=27
#X coordinates of random centroids
C_x= np.random.randint(0,np.max(X)-20, size=k)
#Y coordinates of random centroids
C_y = np.random.randint(0, np.max(X)-20, size=k)
C= np.array(list(zip(C_x, C_y)), dtype=np.float32)
print("initial centroids")
print(C)
#plotting along with the Centroids
plt.scatter(f1, f2, c='#050505', s=7)
plt.scatter(C_x, C_y, marker='*', s=200, c='g')
#to store the value of centroids when it updates
C_old = np.zeros(C.shape)
#Cluster Lables(0,1,2)
clusters = np.zeros(len(X))
#Error func. - Distance between new centroids and old centroids
error = dist(C, C_old, None)
#Loop will run till the error between new centroids and old centroids
while error !=0:
#assigning each value to its closest cluster
for i in range(len(X)):
distances = dist(X[i], C)
cluster = np.argmin(distances)
clusters[i] = cluster
#storing the old centroid values
C_old= deepcopy(C)
#finding the new centroids by taking the average value
for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]
C[i] = np.mean(points, axis=0)
error = dist(C,C_old, None)
colors = ['r','g', 'b', 'y', 'c', 'm']
fig, ax = plt.subplots()
for i in range(k):
points = np.array([X[j] for j in range(len(X)) if clusters[j] ==i])
ax.scatter(points[:, 0], points[:,1], s=7, c=colors[i])
ax.scatter(C[:,0], C[:,1], marker='*', s=200, c='#050505')
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
plt.rcParams['figure.figsize'] = (16,9)
plt.style.use('ggplot')
#importing data set
data=pd.read_csv('https://raw.githubusercontent.com/arivle/K-meansWithPython/master/xclara/xclara.csv')
print("input data and shape")
print(data.shape)
data.head()
#Getting the values and plotting it
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
plt.scatter(f1,f2,c='black', s=7)
k=27
kmeans = KMeans(n_clusters=k).fit(X)
centroids = kmeans.cluster_centers_
print(centroids)
colors = ['r','g', 'b', 'y', 'c', 'm']
fig, ax = plt.subplots()
ax.scatter(X[:, 0], X[:,1], c= kmeans.labels_.astype('float64'), s=200, alpha=0.5)
ax.scatter(centroids[:, 0], centroids[:, 1],marker='*', c='#050505', s=200)
###Output
input data and shape
(3000, 2)
[[ 6.29899561 10.37145191]
[ 64.00225103 -11.1756371 ]
[ 44.70991309 65.90302552]
[ 40.67904794 46.12664143]
[ 65.31337396 -24.82118052]
[ 14.25438151 -0.75312941]
[ 79.734465 5.97227092]
[ 87.52683697 -9.87118712]
[ 56.02555217 49.02451072]
[ 24.32462925 67.40344204]
[ 22.46289677 24.81766154]
[ -0.16444241 -1.4551634 ]
[ 80.42920685 -24.22132055]
[ 53.95441294 -3.45654555]
[ 5.90748668 24.33237435]
[ 55.05046248 66.04031938]
[ 16.1985725 12.85218728]
[ 73.07015759 -4.5490788 ]
[ 35.30412048 62.30482348]
[ 52.58281712 -18.51006661]
[ -5.59509391 12.58599256]
[ 64.377702 3.85605061]
[ 27.09393753 5.00220245]
[ 26.89615043 51.96045991]
[ 40.46201688 77.47017413]
[ 43.95955164 55.57863399]
[ 73.99207336 -14.9406303 ]]
|
00_dynamic_graph.ipynb | ###Markdown
动态图**作者:** [PaddlePaddle](https://github.com/PaddlePaddle) **日期:** 2021.01 **摘要:** 从飞桨开源框架2.0版本开始,飞桨默认为用户开启了动态图开发模式。在这种模式下,每次执行一个运算,可以立即得到结果(而不是事先定义好网络结构,然后再执行)。在动态图模式下,你可以更加方便的组织代码,更容易的调试程序,本示例教程将向你介绍飞桨的动态图的使用。 一、环境配置本教程基于Paddle 2.0 编写,如果您的环境不是本版本,请先参考官网[安装](https://www.paddlepaddle.org.cn/install/quick) Paddle 2.0 。
###Code
import paddle
import paddle.nn.functional as F
import numpy as np
print(paddle.__version__)
###Output
2.0.1
###Markdown
二、基本用法在动态图模式下,ni可以直接运行一个飞桨提供的API,它会立刻返回结果到python。不再需要首先创建一个计算图,然后再给定数据去运行。
###Code
a = paddle.randn([4, 2])
b = paddle.arange(1, 3, dtype='float32')
print(a)
print(b)
c = a + b
print(c)
d = paddle.matmul(a, b)
print(d)
###Output
Tensor(shape=[4, 2], dtype=float32, place=CPUPlace, stop_gradient=True,
[[-0.98506504, 0.89734167],
[ 0.01853172, 1.28535342],
[ 2.63832688, 0.27384657],
[ 0.27094686, 1.21891129]])
Tensor(shape=[2], dtype=float32, place=CPUPlace, stop_gradient=True,
[1., 2.])
Tensor(shape=[4, 2], dtype=float32, place=CPUPlace, stop_gradient=True,
[[0.01493496, 2.89734173],
[1.01853168, 3.28535342],
[3.63832688, 2.27384663],
[1.27094686, 3.21891117]])
Tensor(shape=[4], dtype=float32, place=CPUPlace, stop_gradient=True,
[0.80961829, 2.58923864, 3.18601990, 2.70876932])
###Markdown
三、使用python的控制流动态图模式下,您可以使用python的条件判断和循环,这类控制语句来执行神经网络的计算。(不再需要`cond`, `loop`这类OP)
###Code
a = paddle.to_tensor(np.array([1, 2, 3]))
b = paddle.to_tensor(np.array([4, 5, 6]))
for i in range(10):
r = paddle.rand([1,])
if r > 0.5:
c = paddle.pow(a, i) + b
print("{} +> {}".format(i, c.numpy()))
else:
c = paddle.pow(a, i) - b
print("{} -> {}".format(i, c.numpy()))
###Output
0 +> [5 6 7]
1 +> [5 7 9]
2 -> [-3 -1 3]
3 +> [ 5 13 33]
4 -> [-3 11 75]
5 -> [ -3 27 237]
6 -> [ -3 59 723]
7 -> [ -3 123 2181]
8 -> [ -3 251 6555]
9 +> [ 5 517 19689]
###Markdown
四、构建更加灵活的网络:控制流- 使用动态图可以用来创建更加灵活的网络,比如根据控制流选择不同的分支网络,和方便的构建权重共享的网络。接下来我们来看一个具体的例子,在这个例子中,第二个线性变换只有0.5的可能性会运行。- 在sequence to sequence with attention的机器翻译的示例中,你会看到更实际的使用动态图构建RNN类的网络带来的灵活性。
###Code
class MyModel(paddle.nn.Layer):
def __init__(self, input_size, hidden_size):
super(MyModel, self).__init__()
self.linear1 = paddle.nn.Linear(input_size, hidden_size)
self.linear2 = paddle.nn.Linear(hidden_size, hidden_size)
self.linear3 = paddle.nn.Linear(hidden_size, 1)
def forward(self, inputs):
x = self.linear1(inputs)
x = F.relu(x)
if paddle.rand([1,]) > 0.5:
x = self.linear2(x)
x = F.relu(x)
x = self.linear3(x)
return x
total_data, batch_size, input_size, hidden_size = 1000, 64, 128, 256
x_data = np.random.randn(total_data, input_size).astype(np.float32)
y_data = np.random.randn(total_data, 1).astype(np.float32)
model = MyModel(input_size, hidden_size)
loss_fn = paddle.nn.MSELoss(reduction='mean')
optimizer = paddle.optimizer.SGD(learning_rate=0.01,
parameters=model.parameters())
for t in range(200 * (total_data // batch_size)):
idx = np.random.choice(total_data, batch_size, replace=False)
x = paddle.to_tensor(x_data[idx,:])
y = paddle.to_tensor(y_data[idx,:])
y_pred = model(x)
loss = loss_fn(y_pred, y)
if t % 200 == 0:
print(t, loss.numpy())
loss.backward()
optimizer.step()
optimizer.clear_grad()
###Output
0 [1.1373708]
200 [0.65635085]
400 [0.6270926]
600 [0.35788968]
800 [0.08681857]
1000 [0.04665717]
1200 [0.01439959]
1400 [0.00937668]
1600 [0.00736369]
1800 [0.01451359]
2000 [0.01145541]
2200 [0.00535691]
2400 [0.00316424]
2600 [0.00078524]
2800 [0.00091959]
###Markdown
五、构建更加灵活的网络:共享权重- 使用动态图还可以更加方便的创建共享权重的网络,下面的示例展示了一个共享了权重的简单的AutoEncoder。- 你也可以参考图像搜索的示例看到共享参数权重的更实际的使用。
###Code
inputs = paddle.rand((256, 64))
linear = paddle.nn.Linear(64, 8, bias_attr=False)
loss_fn = paddle.nn.MSELoss()
optimizer = paddle.optimizer.Adam(0.01, parameters=linear.parameters())
for i in range(10):
hidden = linear(inputs)
# weight from input to hidden is shared with the linear mapping from hidden to output
outputs = paddle.matmul(hidden, linear.weight, transpose_y=True)
loss = loss_fn(outputs, inputs)
loss.backward()
print("step: {}, loss: {}".format(i, loss.numpy()))
optimizer.step()
optimizer.clear_grad()
###Output
step: 0, loss: [0.3065048]
step: 1, loss: [0.27628338]
step: 2, loss: [0.24458247]
step: 3, loss: [0.21028072]
step: 4, loss: [0.17704524]
step: 5, loss: [0.14863843]
step: 6, loss: [0.12725674]
step: 7, loss: [0.11261991]
step: 8, loss: [0.10347761]
step: 9, loss: [0.09852622]
|
watermark/multi-class-text-classification-with-lstm.ipynb | ###Markdown
all credits to https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17. I made minor changes: data from kaggle The Data
###Code
# get data file
! pip install -q kaggle
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
# Then move kaggle.json into the folder where the API expects to find it.
!mkdir -p ~/.kaggle/ && mv kaggle.json ~/.kaggle/ && chmod 600 ~/.kaggle/kaggle.json
! pwd
!kaggle datasets download cfpb/us-consumer-finance-complaints
!unzip us-consumer-finance-complaints.zip
!ls
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/content'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv('/content/consumer_complaints.csv')
df.info()
df.head()
df['product'].value_counts()
#Plotly notebook mode with google colaboratory
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext',
},
});
</script>
'''))
from plotly.offline import init_notebook_mode, iplot
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
configure_plotly_browser_state()
df['product'].value_counts().sort_values(ascending = False).iplot(kind='bar', yTitle = "Number of complaints",
title = 'Number complaints in each product')
def print_plot(index):
example = df[df.index == index][["consumer_complaint_narrative",'product']].values[0]
if len(example) > 0:
print(example[0])
print('Product:', example[1])
print_plot(0)
###Output
nan
Product: Mortgage
###Markdown
Text Pre-processing
###Code
import re
from nltk.corpus import stopwords
import nltk
df = df.reset_index( drop = True)
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
nltk.download('stopwords')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
"""
text as string
return: modified initial string
"""
text = text.lower()
text = REPLACE_BY_SPACE_RE.sub(' ', text)
text = BAD_SYMBOLS_RE.sub(' ', text)
text = text.replace("x", '')
text = " ".join(word for word in text.split() if word not in STOPWORDS)
return text
df['consumer_complaint_narrative'] = df['consumer_complaint_narrative'].astype(str)
df['consumer_complaint_narrative'] = df['consumer_complaint_narrative'].apply(clean_text)
df['consumer_complaint_narrative'] = df['consumer_complaint_narrative'].str.replace('\d+', '')
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
LSTM Modeling
###Code
# LSTM Modeling
from keras.preprocessing.text import Tokenizer
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each complaint.
MAX_SEQUENCE_LENGTH = 250
# This is fixed.
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(df['consumer_complaint_narrative'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
from tensorflow.keras.preprocessing.sequence import pad_sequences
X = tokenizer.texts_to_sequences(df['consumer_complaint_narrative'].values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X.shape)
#Converting categorical labels to numbers.
Y = pd.get_dummies(df['product']).values
print('Shape of label tensor:', Y.shape)
#Train test split.
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.utils import to_categorical
#Y_train = to_categorical(Y_train, 11)
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(11, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
epochs = 5
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs,
batch_size=batch_size,validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss',
patience=3, min_delta=0.0001)])
###Output
Epoch 1/5
|
notebooks/tcrSeq/TCR-seq.ipynb | ###Markdown
TCR-seq protocol By Roman Sasik ([email protected]) This Notebook describes the sequence of commands used in TCR-seq analysis. The multiplexing barcodes are assumed to follow the design described in this paper: _"Linking T-cell receptor sequence to functional phenotype at the single-cell level",_ A Han, J Glanville and MD Davis, Nature Biotechnology, 2014, 32 (7), p.684-92In addition to original perl scripts below, you need to install the superfast TCR repertoir processing java program `mitcr.jar`, which can be downloaded at http://mitcr.milaboratory.com/. The relevant paper is _MiTCR: software for T-cell receptor sequencing data analysis_ by DA Bolotin _et al._, Nature Methods 10, 813-814 (2013).Perl and java are assumed to be installed. Demultiplexing TCR reads Processing starts with demultiplexing the reads from a single pair of large fastq files:
###Code
!perl demultiplex_fastq_TCRplates.pl Sample_S1_L001_R1_001.fastq Sample_S1_L001_R2_001.fastq
!ls *[A,B].fastq
###Output
01A01A.fastq 01B08B.fastq 01D04A.fastq 01E11B.fastq 01G07A.fastq
01A01B.fastq 01B09A.fastq 01D04B.fastq 01E12A.fastq 01G07B.fastq
01A02A.fastq 01B09B.fastq 01D05A.fastq 01E12B.fastq 01G08A.fastq
01A02B.fastq 01B10A.fastq 01D05B.fastq 01F01A.fastq 01G08B.fastq
01A03A.fastq 01B10B.fastq 01D06A.fastq 01F01B.fastq 01G09A.fastq
01A03B.fastq 01B11A.fastq 01D06B.fastq 01F02A.fastq 01G09B.fastq
01A04A.fastq 01B11B.fastq 01D07A.fastq 01F02B.fastq 01G10A.fastq
01A04B.fastq 01B12A.fastq 01D07B.fastq 01F03A.fastq 01G10B.fastq
01A05A.fastq 01B12B.fastq 01D08A.fastq 01F03B.fastq 01G11A.fastq
01A05B.fastq 01C01A.fastq 01D08B.fastq 01F04A.fastq 01G11B.fastq
01A06A.fastq 01C01B.fastq 01D09A.fastq 01F04B.fastq 01G12A.fastq
01A06B.fastq 01C02A.fastq 01D09B.fastq 01F05A.fastq 01G12B.fastq
01A07A.fastq 01C02B.fastq 01D10A.fastq 01F05B.fastq 01H01A.fastq
01A07B.fastq 01C03A.fastq 01D10B.fastq 01F06A.fastq 01H01B.fastq
01A08A.fastq 01C03B.fastq 01D11A.fastq 01F06B.fastq 01H02A.fastq
01A08B.fastq 01C04A.fastq 01D11B.fastq 01F07A.fastq 01H02B.fastq
01A09A.fastq 01C04B.fastq 01D12A.fastq 01F07B.fastq 01H03A.fastq
01A09B.fastq 01C05A.fastq 01D12B.fastq 01F08A.fastq 01H03B.fastq
01A10A.fastq 01C05B.fastq 01E01A.fastq 01F08B.fastq 01H04A.fastq
01A10B.fastq 01C06A.fastq 01E01B.fastq 01F09A.fastq 01H04B.fastq
01A11A.fastq 01C06B.fastq 01E02A.fastq 01F09B.fastq 01H05A.fastq
01A11B.fastq 01C07A.fastq 01E02B.fastq 01F10A.fastq 01H05B.fastq
01A12A.fastq 01C07B.fastq 01E03A.fastq 01F10B.fastq 01H06A.fastq
01A12B.fastq 01C08A.fastq 01E03B.fastq 01F11A.fastq 01H06B.fastq
01B01A.fastq 01C08B.fastq 01E04A.fastq 01F11B.fastq 01H07A.fastq
01B01B.fastq 01C09A.fastq 01E04B.fastq 01F12A.fastq 01H07B.fastq
01B02A.fastq 01C09B.fastq 01E05A.fastq 01F12B.fastq 01H08A.fastq
01B02B.fastq 01C10A.fastq 01E05B.fastq 01G01A.fastq 01H08B.fastq
01B03A.fastq 01C10B.fastq 01E06A.fastq 01G01B.fastq 01H09A.fastq
01B03B.fastq 01C11A.fastq 01E06B.fastq 01G02A.fastq 01H09B.fastq
01B04A.fastq 01C11B.fastq 01E07A.fastq 01G02B.fastq 01H10A.fastq
01B04B.fastq 01C12A.fastq 01E07B.fastq 01G03A.fastq 01H10B.fastq
01B05A.fastq 01C12B.fastq 01E08A.fastq 01G03B.fastq 01H11A.fastq
01B05B.fastq 01D01A.fastq 01E08B.fastq 01G04A.fastq 01H11B.fastq
01B06A.fastq 01D01B.fastq 01E09A.fastq 01G04B.fastq 01H12A.fastq
01B06B.fastq 01D02A.fastq 01E09B.fastq 01G05A.fastq 01H12B.fastq
01B07A.fastq 01D02B.fastq 01E10A.fastq 01G05B.fastq
01B07B.fastq 01D03A.fastq 01E10B.fastq 01G06A.fastq
01B08A.fastq 01D03B.fastq 01E11A.fastq 01G06B.fastq
###Markdown
This script demultiplexes reads multiplexed in a single pair of large fastq files and saves them into separate fastq files whose names indicate Plate, Well, and TCR isoform (A or B), for instance 01H12B.fastq. Up to _one mismatch_ is allowed in any of the Plate, Well Row, Well Column, and TCR Isoform barcodes.It will create 2x96 files (one per TCR isoform) per each Plate (a lot of files!)This script will ignore all reads from plates whose code is commented out (see below in source code). This is useful when there is a mixture of TCR genotyping reads and phenotyping reads. There is a separate demultiplex script for the phenotyping reads (see below). This is `demultiplex_fastq_TCRplates.pl`:
###Code
#!/usr/bin/perl
$fileR1 = $ARGV[0];
$fileR2 = $ARGV[1];
open(F1,$fileR1);
open(F2,$fileR2);
%plate = (
"GCAGA" => "01", #uncomment this line if plate code 01 is among the sequences to be demultiplexed
# "TCGAA" => "02",
# "AACAA" => "03",
# "GGTGC" => "04",
# "TTGGT" => "05",
# "CATTC" => "06",
# "ATTGG" => "07",
# "CGGTT" => "08",
# "ATCCT" => "09",
# "ATGTC" => "10",
# "TCACG" => "11",
# "AGACC" => "12",
# "CCCCA" => "13",
# "GCGCT" => "14",
# "TCCTT" => "15",
# "TATAT" => "16",
# "CGTAA" => "17",
# "AAGGT" => "18",
# "AGCTC" => "19",
# "CTTGC" => "20",
# "GTATC" => "21",
# "TATGA" => "22",
# "CACAC" => "23",
# "ACACT" => "24",
# "ACTAC" => "25",
# "GTTAC" => "26",
);
%row = ( #if you want output for all rows, leave them all uncommented
"TAAGC" => "A",
"TGCAC" => "B",
"CTCAG" => "C",
"GGAAT" => "D",
"CGAGG" => "E",
"AGGAG" => "F",
"TGTTG" => "G",
"CAACT" => "H",
);
%col = ( #if you want output for all columns, leave them all uncommented
"GTTCA" => "01",
"CAGGA" => "02",
"TTATA" => "03",
"CCTGT" => "04",
"ACCGC" => "05",
"ACTTA" => "06",
"GCTAG" => "07",
"GACGT" => "08",
"GGCTA" => "09",
"GAATG" => "10",
"CCAAC" => "11",
"GAGAC" => "12",
);
%TCR = (
"GTCAC" => "A", # TCRA
"GAGAT" => "B",
);
foreach $plateID (keys(%plate)) {
foreach $rowID (keys(%row)) {
foreach $colID (keys(%col)) {
foreach $TCRID (keys(%TCR)) {
$fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID};
open $fh, '>', $fh.".fastq"; #open file for writing at the end
}
}
}
}
while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2
$A2 = <F1>;
$A3 = <F1>;
$A4 = <F1>;
$B1 = <F2>;
$B2 = <F2>;
$B3 = <F2>;
$B4 = <F2>;
$ID = substr($A2, 2, 5); #plate ID barcode
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%plate)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {#accept $true_plateID as the true plate ID
$rowID = $trueID;
} else {#leave $plateID blank - sequence won't be output
$rowID = ""
}
$ID = substr($B2, 2, 5); #column ID
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%col)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {#accept $true_plateID as the true plate ID
$colID = $trueID;
} else {#leave $plateID blank - sequence won't be output
$colID = ""
}
$ID = substr($B2, 7, 5); #TCR ID
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%TCR)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {
$TCRID = $trueID;
} else {
$TCRID = ""
}
if (exists $plate{$plateID} and exists $row{$rowID} and exists $col{$colID} and exists $TCR{$TCRID}) {
$fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID};
print $fh $A1.$A2.$A3.$A4.$B1.$B2.$B3.$B4;
};
}
close(F1);
close(F2);
###Output
_____no_output_____
###Markdown
Analyzing demultiplexed fastq files for TCRA/B species After demultiplexing, each individual fastq file will be processed by `mitcr`. The output is a separate result file for each well, e.g., `01A06A_result.txt`. The example below will produce reports for plate 01, row A and columns 06 through 09 (see source code below).
###Code
!perl analyze_wells.pl
!ls *_result.txt
###Output
01A06B
Initialisation: progress unknown
01A06A
Initialisation: progress unknown
01A08B
Initialisation: progress unknown
01A08A
Initialisation: progress unknown
01A07B
Initialisation: progress unknown
01A07A
Initialisation: progress unknown
01A09B
Initialisation: progress unknown
01A09A
Initialisation: progress unknown
01A06A_result.txt 01A07A_result.txt 01A08A_result.txt 01A09A_result.txt
01A06B_result.txt 01A07B_result.txt 01A08B_result.txt 01A09B_result.txt
###Markdown
The output is a tab-delimited file whose main components are these (this is the content of file 01A06A_result.txt):The first column is the number of times this sequence is seen; the second column is the fraction (not a percentage) of the total count of sequences in the well. This is especially useful when there are two species of TCRA expressed in a single cell (as in this case). It does not happen with TCRB.The v- j- and d- alleles of the TCR are listed. The last two lines (a tiny fraction of the number of reads) are a result of sequencing/PCR errors. The program _mitcr_ has an error-checking algorithm that reduces these calls. For details see _MiTCR: software for T-cell receptor sequencing data analysis_ by DA Bolotin _et al._, Nature Methods 10, 813-814 (2013).This is the source of `analyze_wells.pl`:
###Code
#!/usr/bin/perl
%plate = (
"GCAGA" => "01",
# "TCGAA" => "02",
# "AACAA" => "03",
# "GGTGC" => "04",
# "TTGGT" => "05",
# "CATTC" => "06",
# "ATTGG" => "07",
# "CGGTT" => "08",
# "ATCCT" => "09",
# "ATGTC" => "10",
# "TCACG" => "11",
# "AGACC" => "12",
# "CCCCA" => "13",
# "GCGCT" => "14",
# "TCCTT" => "15",
# "TATAT" => "16",
# "CGTAA" => "17",
# "AAGGT" => "18",
# "AGCTC" => "19",
# "CTTGC" => "20",
# "GTATC" => "21",
# "TATGA" => "22",
# "CACAC" => "23",
# "ACACT" => "24",
# "ACTAC" => "25",
# "GTTAC" => "26",
);
%row = ( #uncomment line if you want output for row A, etc.
"TAAGC" => "A",
# "TGCAC" => "B",
# "CTCAG" => "C",
# "GGAAT" => "D",
# "CGAGG" => "E",
# "AGGAG" => "F",
# "TGTTG" => "G",
# "CAACT" => "H",
);
%col = ( #uncomment line if you want output for column 01, etc.
# "GTTCA" => "01",
# "CAGGA" => "02",
# "TTATA" => "03",
# "CCTGT" => "04",
# "ACCGC" => "05",
"ACTTA" => "06",
"GCTAG" => "07",
"GACGT" => "08",
"GGCTA" => "09",
# "GAATG" => "10",
# "CCAAC" => "11",
# "GAGAC" => "12",
);
%TCR = (
"GTCAC" => "A", # TCRA
"GAGAT" => "B",
);
foreach $plateID (sort (keys(%plate))) {
foreach $rowID (sort (keys(%row))) {
foreach $colID (sort (keys(%col))) {
foreach $TCRID (sort (keys(%TCR))) {
$fh = $plate{$plateID}.$row{$rowID}.$col{$colID}.$TCR{$TCRID};
print "$fh\n";
system("java -Xmx10g -jar ./mitcr.jar -pset flex -gene TR$TCR{$TCRID} $fh.fastq $fh\_result.txt")
}
}
}
}
###Output
_____no_output_____
###Markdown
Demultiplexing phenotyping reads The following command demultiplexes _phenotyping_ reads multiplexed in a single pair of large fastq files and saves them into separate fastq files whose names indicate Plate, Well, and "R1" or "R2" for left or right read, for instance 03H12R1.fastq. Up to one mismatch is allowed in any of the Plate, Well Row, or Well Column barcodes.It will create 2x96 files per each Plate.This script will ignore all reads from plates whose code is commented out (see below in source code). This is useful when there is a mixture of TCR genotyping reads and phenotyping reads.
###Code
!perl demultiplex_fastq_phenoplates.pl Sample_S1_L001_R1_001.fastq Sample_S1_L001_R2_001.fastq
!ls 03*.fastq
###Output
03A01R1.fastq 03B08R2.fastq 03D04R1.fastq 03E11R2.fastq 03G07R1.fastq
03A01R2.fastq 03B09R1.fastq 03D04R2.fastq 03E12R1.fastq 03G07R2.fastq
03A02R1.fastq 03B09R2.fastq 03D05R1.fastq 03E12R2.fastq 03G08R1.fastq
03A02R2.fastq 03B10R1.fastq 03D05R2.fastq 03F01R1.fastq 03G08R2.fastq
03A03R1.fastq 03B10R2.fastq 03D06R1.fastq 03F01R2.fastq 03G09R1.fastq
03A03R2.fastq 03B11R1.fastq 03D06R2.fastq 03F02R1.fastq 03G09R2.fastq
03A04R1.fastq 03B11R2.fastq 03D07R1.fastq 03F02R2.fastq 03G10R1.fastq
03A04R2.fastq 03B12R1.fastq 03D07R2.fastq 03F03R1.fastq 03G10R2.fastq
03A05R1.fastq 03B12R2.fastq 03D08R1.fastq 03F03R2.fastq 03G11R1.fastq
03A05R2.fastq 03C01R1.fastq 03D08R2.fastq 03F04R1.fastq 03G11R2.fastq
03A06R1.fastq 03C01R2.fastq 03D09R1.fastq 03F04R2.fastq 03G12R1.fastq
03A06R2.fastq 03C02R1.fastq 03D09R2.fastq 03F05R1.fastq 03G12R2.fastq
03A07R1.fastq 03C02R2.fastq 03D10R1.fastq 03F05R2.fastq 03H01R1.fastq
03A07R2.fastq 03C03R1.fastq 03D10R2.fastq 03F06R1.fastq 03H01R2.fastq
03A08R1.fastq 03C03R2.fastq 03D11R1.fastq 03F06R2.fastq 03H02R1.fastq
03A08R2.fastq 03C04R1.fastq 03D11R2.fastq 03F07R1.fastq 03H02R2.fastq
03A09R1.fastq 03C04R2.fastq 03D12R1.fastq 03F07R2.fastq 03H03R1.fastq
03A09R2.fastq 03C05R1.fastq 03D12R2.fastq 03F08R1.fastq 03H03R2.fastq
03A10R1.fastq 03C05R2.fastq 03E01R1.fastq 03F08R2.fastq 03H04R1.fastq
03A10R2.fastq 03C06R1.fastq 03E01R2.fastq 03F09R1.fastq 03H04R2.fastq
03A11R1.fastq 03C06R2.fastq 03E02R1.fastq 03F09R2.fastq 03H05R1.fastq
03A11R2.fastq 03C07R1.fastq 03E02R2.fastq 03F10R1.fastq 03H05R2.fastq
03A12R1.fastq 03C07R2.fastq 03E03R1.fastq 03F10R2.fastq 03H06R1.fastq
03A12R2.fastq 03C08R1.fastq 03E03R2.fastq 03F11R1.fastq 03H06R2.fastq
03B01R1.fastq 03C08R2.fastq 03E04R1.fastq 03F11R2.fastq 03H07R1.fastq
03B01R2.fastq 03C09R1.fastq 03E04R2.fastq 03F12R1.fastq 03H07R2.fastq
03B02R1.fastq 03C09R2.fastq 03E05R1.fastq 03F12R2.fastq 03H08R1.fastq
03B02R2.fastq 03C10R1.fastq 03E05R2.fastq 03G01R1.fastq 03H08R2.fastq
03B03R1.fastq 03C10R2.fastq 03E06R1.fastq 03G01R2.fastq 03H09R1.fastq
03B03R2.fastq 03C11R1.fastq 03E06R2.fastq 03G02R1.fastq 03H09R2.fastq
03B04R1.fastq 03C11R2.fastq 03E07R1.fastq 03G02R2.fastq 03H10R1.fastq
03B04R2.fastq 03C12R1.fastq 03E07R2.fastq 03G03R1.fastq 03H10R2.fastq
03B05R1.fastq 03C12R2.fastq 03E08R1.fastq 03G03R2.fastq 03H11R1.fastq
03B05R2.fastq 03D01R1.fastq 03E08R2.fastq 03G04R1.fastq 03H11R2.fastq
03B06R1.fastq 03D01R2.fastq 03E09R1.fastq 03G04R2.fastq 03H12R1.fastq
03B06R2.fastq 03D02R1.fastq 03E09R2.fastq 03G05R1.fastq 03H12R2.fastq
03B07R1.fastq 03D02R2.fastq 03E10R1.fastq 03G05R2.fastq
03B07R2.fastq 03D03R1.fastq 03E10R2.fastq 03G06R1.fastq
03B08R1.fastq 03D03R2.fastq 03E11R1.fastq 03G06R2.fastq
###Markdown
The source code of demultiplex_fastq_phenoplates.pl is here (in this example, Plate 03 contains phenotyping reads):
###Code
#!/usr/bin/perl
$fileR1 = $ARGV[0];
$fileR2 = $ARGV[1];
open(F1,$fileR1);
open(F2,$fileR2);
%plate = (
# "GCAGA" => "01",
# "TCGAA" => "02",
"AACAA" => "03",
# "GGTGC" => "04",
# "TTGGT" => "05",
# "CATTC" => "06",
);
%row = (
"TAAGC" => "A",
"TGCAC" => "B",
"CTCAG" => "C",
"GGAAT" => "D",
"CGAGG" => "E",
"AGGAG" => "F",
"TGTTG" => "G",
"CAACT" => "H",
);
%col = (
"GTTCA" => "01",
"CAGGA" => "02",
"TTATA" => "03",
"CCTGT" => "04",
"ACCGC" => "05",
"ACTTA" => "06",
"GCTAG" => "07",
"GACGT" => "08",
"GGCTA" => "09",
"GAATG" => "10",
"CCAAC" => "11",
"GAGAC" => "12",
);
foreach $plateID (keys(%plate)) {
foreach $rowID (keys(%row)) {
foreach $colID (keys(%col)) {
$fh = $plate{$plateID}.$row{$rowID}.$col{$colID};
$fh1 = $plate{$plateID}.$row{$rowID}.$col{$colID}."1";
$fh2 = $plate{$plateID}.$row{$rowID}.$col{$colID}."2";
open $fh1, '>', $fh."R1.fastq";
open $fh2, '>', $fh."R2.fastq";
}
}
}
while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2
$A2 = <F1>;
$A3 = <F1>;
$A4 = <F1>;
$B1 = <F2>;
$B2 = <F2>;
$B3 = <F2>;
$B4 = <F2>;
# now find out if the bar codes make sense
$ID = substr($A2, 2, 5); #plate ID
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%plate)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {#accept $true_plateID as the true plate ID
$plateID = $trueID;
} else {#leave $plateID blank - sequence won't be output
$plateID = ""
}
$ID = substr($A2, 9, 5); #row ID
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%row)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {
$rowID = $trueID;
} else {
$rowID = ""
}
$ID = substr($B2, 2, 5); #column ID
# now find what the true bar code should have been if imperfect match
$score = 0;
$trueID = "";
foreach $key (keys(%col)) {
my $count = ($ID^$key) =~ tr/\0//;
if ($count > $score) {
$score = $count;
$trueID = $key
}
}
if ($score >= 4) {
$colID = $trueID;
} else {
$colID = ""
}
if (exists $plate{$plateID} and exists $row{$rowID} and exists $col{$colID} ) {
$fh1 = $plate{$plateID}.$row{$rowID}.$col{$colID}."1";
$fh2 = $plate{$plateID}.$row{$rowID}.$col{$colID}."2";
print $fh1 $A1.$A2.$A3.$A4;
print $fh2 $B1.$B2.$B3.$B4;
};
}
close(F1);
close(F2);
###Output
_____no_output_____
###Markdown
Analyze demultiplexed phenotyping fastq files for expression levels of 17 cytokines and transcription factors The following command will produce expression counts for all 17 cytokines and TF's, separately for each well:
###Code
!perl count_cytokines.pl
!ls *.count
###Output
03A01R1.count 03B09R1.count 03D05R1.count 03F01R1.count 03G09R1.count
03A02R1.count 03B10R1.count 03D06R1.count 03F02R1.count 03G10R1.count
03A03R1.count 03B11R1.count 03D07R1.count 03F03R1.count 03G11R1.count
03A04R1.count 03B12R1.count 03D08R1.count 03F04R1.count 03G12R1.count
03A05R1.count 03C01R1.count 03D09R1.count 03F05R1.count 03H01R1.count
03A06R1.count 03C02R1.count 03D10R1.count 03F06R1.count 03H02R1.count
03A07R1.count 03C03R1.count 03D11R1.count 03F07R1.count 03H03R1.count
03A08R1.count 03C04R1.count 03D12R1.count 03F08R1.count 03H04R1.count
03A09R1.count 03C05R1.count 03E01R1.count 03F09R1.count 03H05R1.count
03A10R1.count 03C06R1.count 03E02R1.count 03F10R1.count 03H06R1.count
03A11R1.count 03C07R1.count 03E03R1.count 03F11R1.count 03H07R1.count
03A12R1.count 03C08R1.count 03E04R1.count 03F12R1.count 03H08R1.count
03B01R1.count 03C09R1.count 03E05R1.count 03G01R1.count 03H09R1.count
03B02R1.count 03C10R1.count 03E06R1.count 03G02R1.count 03H10R1.count
03B03R1.count 03C11R1.count 03E07R1.count 03G03R1.count 03H11R1.count
03B04R1.count 03C12R1.count 03E08R1.count 03G04R1.count 03H12R1.count
03B05R1.count 03D01R1.count 03E09R1.count 03G05R1.count
03B06R1.count 03D02R1.count 03E10R1.count 03G06R1.count
03B07R1.count 03D03R1.count 03E11R1.count 03G07R1.count
03B08R1.count 03D04R1.count 03E12R1.count 03G08R1.count
###Markdown
The output is a set of tab-delimited files such as 03F03R1.count. Only the R1 read is used for counting; the R2 read is redundant (and lower quality anyway). The content of this file looks something close to this:The source code of count_cytokines.pl is here (Plate 03 has pheno reads):
###Code
#!/usr/bin/perl
%plate = (
# "GCAGA" => "01",
# "TCGAA" => "02",
"AACAA" => "03",
# "GGTGC" => "04",
# "TTGGT" => "05",
# "CATTC" => "06",
);
%row = (
"TAAGC" => "A",
"TGCAC" => "B",
"CTCAG" => "C",
"GGAAT" => "D",
"CGAGG" => "E",
"AGGAG" => "F",
"TGTTG" => "G",
"CAACT" => "H",
);
%col = (
"GTTCA" => "01",
"CAGGA" => "02",
"TTATA" => "03",
"CCTGT" => "04",
"ACCGC" => "05",
"ACTTA" => "06",
"GCTAG" => "07",
"GACGT" => "08",
"GGCTA" => "09",
"GAATG" => "10",
"CCAAC" => "11",
"GAGAC" => "12",
);
%cyt = (
"GCCGGAGGAGGTGGATGTGC" => "GATA3",
"CCCAACACAGGAGCGCACTG" => "TBET",
"GGCAGCCAAGGCCCTGTCGT" => "FOXP3",
"AGAGGAAGTCCATGTGGGAG" => "RORC",
"GCGAGCTGGTGCGCACCGAC" => "RUNX1",
"GGACCACGCAGGCGAGCTCG" => "RUNX3",
"CCTACACGGCCCCACCTGCC" => "BCL6",
"CCACAGAACTGAAACATCTT" => "IL2",
"CCCAAGCTGAGAACCAAGAC" => "IL10",
"AGACCTCTTTTATGATGGCC" => "IL12A",
"GGTATGGAGCATCAACCTGA" => "IL13",
"CAACCTGAACATCCATAACC" => "IL17A",
"GGGTTCTCTTGGCTGTTACT" => "IFNG",
"GGAGGCGCTCCCCAAGAAGA" => "TNFA",
"CCGAGAAGCGGTACCTGAAC" => "TGFB",
"GCCAACTTTGCAGCCCAGAA" => "PRF1",
"CCACAATATCAAAGAACAGG" => "GZMB",
);
foreach $plateID (sort (keys(%plate))) {
foreach $rowID (sort (keys(%row))) {
foreach $colID (sort (keys(%col))) {
$fh = $plate{$plateID}.$row{$rowID}.$col{$colID};
open(F1,$fh."R1.fastq");
open $fh, '>', $fh."R1.count";
print $fh "\t$fh\n"; #print header
# zero out counters
foreach $key (keys(%cyt)) {$count{$cyt{$key}} = 0};
while($A1 = <F1>) { #read 4 lines from R1 and 4 lines from R2
$A2 = <F1>;
$A3 = <F1>;
$A4 = <F1>;
# now find out if the bar codes make sense
$seq = substr($A2, 36, 20);
if (exists $cyt{$seq}) {$count{$cyt{$seq}}++}; #add to count
};
foreach $key (keys(%cyt)) {
print $fh $cyt{$key}."\t".$count{$cyt{$key}}."\n"
};
close(F1);
close($fh);
}
}
}
###Output
_____no_output_____
###Markdown
Cleanup after exercize:
###Code
!rm 0*
###Output
_____no_output_____ |
titanic/in-depth-visualisations-simple-methods.ipynb | ###Markdown
*Women and kids first! (c) Titanic* ![Titanic](https://www.usnews.com/dims4/USNEWS/4f3cd50/2147483647/thumbnail/970x647/quality/85/?url=http%3A%2F%2Fmedia.beam.usnews.com%2F0e%2Fe187dd2f8f1fe5be9058fa8eef419e%2F7018FE_DA_080929titanic.jpg) Visualization of titanic datasetThis notebook presents a profound exploratory analysis of the dataset in order to demonstrate different visualization techniques as well as provide understanding of the dependencies and interesting facts. Four ML techniques are used to do prediction: RandomForest, LogisticRegression, KNeighbours and the Ensemble.Logistic Regression performed the best with a score of 0.799.UPDATE1: XGBoost was addedUPDATE2: The calculation of the values to be imputed should ONLY be done on train set and not on test set or both. *******************I will happy to hear some remarks or suggestions and feel free to upvote if you like it :)****Have fun with the data!*******************
###Code
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import collections
import re
import copy
from pandas.tools.plotting import scatter_matrix
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('bmh')
%matplotlib inline
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier, plot_importance
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
pd.set_option('display.max_columns', 500)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info()
###Output
_____no_output_____
###Markdown
1. Exploratory analysis Basic Information about the table
###Code
train.head(2)
train.describe()
###Output
_____no_output_____
###Markdown
Average Age is 29 years and ticket price is 32.As there are 681 unique tickets and there is no way to extract less detailed information we exclude this variable. There are 891 unique names but we could take a look on the title of each person to understand if the survival rate of people from high society was higher
###Code
train.describe(include=['O'])
## exctract cabin letter
def extract_cabin(x):
return x!=x and 'other' or x[0]
train['Cabin_l'] = train['Cabin'].apply(extract_cabin)
###Output
_____no_output_____
###Markdown
1.1 Superficial overview of each variable Just a quick look on variables we are dealing with.
###Code
plain_features = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked', 'Cabin_l']
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(20, 10))
start = 0
for j in range(2):
for i in range(3):
if start == len(plain_features):
break
sns.barplot(x=plain_features[start],
y='Survived', data=train, ax=ax[j, i])
start += 1
###Output
_____no_output_____
###Markdown
A citate from a movie: 'Children and women first'. * Sex: Survival chances of women are higher.* Pclass: Having a first class ticket is beneficial for the survival.* SibSp and Parch: middle size families had higher survival rate than the people who travelled alone or big families. The reasoning might be that alone people would want to sacrifice themselves to help others. Regarding the big families I would explain that it is hard to manage the whole family and therefore people would search for the family members insetad of getting on the boat.* Embarked C has a higher survival rate. It would be interesting to see if, for instance, the majority of Pclass 1 went on board in embarked C. 1.2 Survival by Sex and Age
###Code
sv_lab = 'survived'
nsv_lab = 'not survived'
fig, ax = plt.subplots(figsize=(5, 3))
ax = sns.distplot(train[train['Survived'] == 1].Age.dropna(),
bins=20, label=sv_lab, ax=ax)
ax = sns.distplot(train[train['Survived'] == 0].Age.dropna(),
bins=20, label=nsv_lab, ax=ax)
ax.legend()
ax.set_ylabel('KDE');
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
females = train[train['Sex'] == 'female']
males = train[train['Sex'] == 'male']
ax = sns.distplot(females[females['Survived'] == 1].Age.dropna(
), bins=30, label=sv_lab, ax=axes[0], kde=False)
ax = sns.distplot(females[females['Survived'] == 0].Age.dropna(
), bins=30, label=nsv_lab, ax=axes[0], kde=False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(males[males['Survived'] == 1].Age.dropna(),
bins=30, label=sv_lab, ax=axes[1], kde=False)
ax = sns.distplot(males[males['Survived'] == 0].Age.dropna(),
bins=30, label=nsv_lab, ax=axes[1], kde=False)
ax.legend()
ax.set_title('Male');
###Output
_____no_output_____
###Markdown
* Survival rate of boys is higher than of the adult men. However, the same fact does not hold for the girls. and between 13 and 30 is lower. Take it into consideration while engineering the variable: we could specify a categorical variable as young and adult.* For women the survival chances are higher between 14 and 40 age. For men of the same age the survival chances are flipped. 1.3 Survival by Class, Embarked and Fare. 1.3.1 Survival by Class and Embarked
###Code
sns.catplot('Pclass', 'Survived', hue='Sex', col = 'Embarked', data=train, kind='point');
sns.catplot('Pclass', 'Survived', col = 'Embarked', data=train, kind='point');
###Output
_____no_output_____
###Markdown
* As noticed already before, the class 1 passangers had a higher survival rate.* All women who died were from the 3rd class. * Embarked in Q as a 3rd class gave you slighly better survival chances than embarked in S for the same class.* In fact, there is a very high variation in survival rate in embarked Q among 1st and 2nd class. The third class had the same survival rate as the 3rd class embarked C. We will exclude this variable embarked Q. From crosstab we see that there were only 5 passengers in embarked Q with the 1st and 2nd class. That explains large variation in survival rate and a perfect separation of men and women in Q.
###Code
tab = pd.crosstab(train['Embarked'], train['Pclass'])
print(tab)
tab_prop = tab.div(tab.sum(1).astype(float), axis=0)
tab_prop.plot(kind="bar", stacked=True)
###Output
_____no_output_____
###Markdown
1.3.2 Fare and class distribution
###Code
ax = sns.boxplot(x="Pclass", y="Fare", hue="Survived", data=train)
ax.set_yscale('log')
###Output
_____no_output_____
###Markdown
* It appears that the higher the fare was in the first class the higher survival chances a person from the 1st had. 1.3.3 Class and age distribution
###Code
sns.violinplot(x='Pclass', y='Age', hue='Survived', data=train, split=True);
###Output
_____no_output_____
###Markdown
* Interesting note that Age decreases proportionally with the Pclass, meaning most old passangers are from 1st class. We will construct a new feature Age*Class to intefere the this findig. * The younger people from 1st had higher survival chanches than older from the same class.* Majority (from the 3rd class) and most children from the 2nd class survived. 1.4 Survival rate regarding the family members
###Code
# To get the full family size of a person, added siblings and parch.
train['family_size'] = train['SibSp'] + train['Parch'] + 1
test['family_size'] = test['SibSp'] + test['Parch'] + 1
axes = sns.catplot('family_size',
'Survived',
hue='Sex',
data=train,
aspect=4,
kind='point')
###Output
_____no_output_____
###Markdown
Assumption: the less people was in your family the faster you were to get to the boat. The more people they are the more managment is required. However, if you had no family members you might wanted to help others and therefore sacrifice.* The females traveling with up to 2 more family members had a higher chance to survive. However, a high variation of survival rate appears once family size exceeds 4 as mothers/daughters would search longer for the members and therefore the chanes for survival decrease.* Alone men might want to sacrifice and help other people to survive. 1.5 Survival rate by the title* Barplots show that roalties had normally 1st or 2nd class tickets. However, people with the title Master had mostly 3rd class. In fact, a title 'Master' was given to unmarried boys. You can see that the age of of people with this title is less than 13.* Women and roalties had higher survival rate. (There are only two titlted women in the train class and both have survived, I would put them into Mrs class)* The civils and reverends a lower one due to the fact that they had/wanted to help people.
###Code
train['Title'] = train['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
print(collections.Counter(train['Title']).most_common())
test['Title'] = test['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
print()
print(collections.Counter(test['Title']).most_common())
tab = pd.crosstab(train['Title'],train['Pclass'])
print(tab)
tab_prop = tab.div(tab.sum(1).astype(float), axis=0)
tab_prop.plot(kind="bar", stacked=True)
###Output
_____no_output_____
###Markdown
Investigate who were masters. The age is less than 12.
###Code
max(train[train['Title'] == 'Master'].Age)
sns.catplot('Title', 'Survived', data=train, aspect=3, kind='point');
###Output
_____no_output_____
###Markdown
We will group the roalties and assign masters to Mr and due to the fact that there were not so many roaly women, we will assign then to Mrs.
###Code
#train['Title'].replace(['Master','Major', 'Capt', 'Col', 'Countess','Dona','Lady', 'Don', 'Sir', 'Jonkheer', 'Dr'], 'titled', inplace = True)
train['Title'].replace(['Master', 'Major', 'Capt', 'Col',
'Don', 'Sir', 'Jonkheer', 'Dr'], 'titled', inplace=True)
#train['Title'].replace(['Countess','Dona','Lady'], 'titled_women', inplace = True)
#train['Title'].replace(['Master','Major', 'Capt', 'Col','Don', 'Sir', 'Jonkheer', 'Dr'], 'titled_man', inplace = True)
train['Title'].replace(['Countess', 'Dona', 'Lady'], 'Mrs', inplace=True)
#train['Title'].replace(['Master'], 'Mr', inplace = 'True')
train['Title'].replace(['Mme'], 'Mrs', inplace=True)
train['Title'].replace(['Mlle', 'Ms'], 'Miss', inplace=True)
sns.catplot('Title', 'Survived', data=train, aspect=3, kind='point');
###Output
_____no_output_____
###Markdown
1.6 Survival rate by cabinCabin is supposed to be less distingushing, also taking into consideration that most of the values are missing.
###Code
def extract_cabin(x):
return x != x and 'other' or x[0]
train['Cabin_l'] = train['Cabin'].apply(extract_cabin)
print(train.groupby('Cabin_l').size())
sns.catplot('Cabin_l', 'Survived',
order=['other', 'A', 'B', 'C', 'D', 'E', 'F', 'T'],
aspect=3,
data=train,
kind='point')
###Output
_____no_output_____
###Markdown
1.7 Correlation of the variables* Pclass is slightly correlated with Fare as logically, 3rd class ticket would cost less than the 1st class.* Pclass is also slightly correlated with Survived* SibSp and Parch are weakly correlated as basically they show how big the family size is.
###Code
plt.figure(figsize=(8, 8))
corrmap = sns.heatmap(train.drop('PassengerId',axis=1).corr(), square=True, annot=True)
###Output
_____no_output_____
###Markdown
2. FEATURE SELECTION AND ENGINEERING 2.1 Impute valuesNB: The calculation of values to impute should only be done on train set. For example, you want to impute the mean of age in the mussing values in test set. The mean of age should only be calculated on train set to avoid data leakage.First, we check how many nas there is in general. If there is only small amount then we can just exclude those individuals. Considering that there are 891 training samples, 708 do not have missing values. 183 samples have na values. It is better to impute. There are different techniques one can impute the values.
###Code
train.shape[0] - train.dropna().shape[0]
###Output
_____no_output_____
###Markdown
Check wich columns to impute in which set. It shows the number of na-values in each column.
###Code
train.isnull().sum()
test.isnull().sum()
###Output
_____no_output_____
###Markdown
Embarked: fill embarked with a major class
###Code
max_emb = np.argmax(train['Embarked'].value_counts())
train['Embarked'].fillna(max_emb, inplace=True)
###Output
_____no_output_____
###Markdown
Pclass: because there is only one missing value in Fare we will fill it with a median of the corresponding Pclass
###Code
indz = test[test['Fare'].isna()].index.tolist()
print(indz)
pclass = test['Pclass'][indz].values[0]
fare_train = train[train['Pclass']==pclass].Fare
fare_med = fare_train.median()
print(fare_med)
test.loc[indz,'Fare'] = fare_med
###Output
_____no_output_____
###Markdown
There are several imputing techniques, we will use the random number from the range mean +- std
###Code
ages = train['Age'].dropna()
std_ages = ages.std()
mean_ages = ages.mean()
train_nas = np.isnan(train["Age"])
test_nas = np.isnan(test["Age"])
np.random.seed(122)
impute_age_train = np.random.randint(mean_ages - std_ages, mean_ages + std_ages, size = train_nas.sum())
impute_age_test = np.random.randint(mean_ages - std_ages, mean_ages + std_ages, size = test_nas.sum())
train["Age"][train_nas] = impute_age_train
test["Age"][test_nas] = impute_age_test
ages_imputed = np.concatenate((test["Age"],train["Age"]), axis = 0)
train['Age*Class'] = train['Age']*train['Pclass']
test['Age*Class'] = test['Age']*test['Pclass']
###Output
_____no_output_____
###Markdown
Check if we disrupted the distribution somehow.
###Code
sns.kdeplot(ages_imputed, label = 'After imputation');
sns.kdeplot(ages, label = 'Before imputation');
###Output
_____no_output_____
###Markdown
2.2 ENGENEER VALUES Integrate into test the title feature
###Code
test['Title'] = test['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
test['Title'].replace(['Master', 'Major', 'Capt', 'Col',
'Don', 'Sir', 'Jonkheer', 'Dr'], 'titled', inplace=True)
test['Title'].replace(['Countess', 'Dona', 'Lady'], 'Mrs', inplace=True)
#test['Title'].replace(['Master'], 'Mr', inplace = True)
test['Title'].replace(['Mme'], 'Mrs', inplace=True)
test['Title'].replace(['Mlle', 'Ms'], 'Miss', inplace=True)
###Output
_____no_output_____
###Markdown
Seperate young and adult people
###Code
train['age_cat'] = None
train.loc[(train['Age'] <= 13), 'age_cat'] = 'young'
train.loc[(train['Age'] > 13), 'age_cat'] = 'adult'
test['age_cat'] = None
test.loc[(test['Age'] <= 13), 'age_cat'] = 'young'
test.loc[(test['Age'] > 13), 'age_cat'] = 'adult'
###Output
_____no_output_____
###Markdown
Drop broaden variables. As we have seen from describe there are too many unique values for Ticket and missing values for Cabin
###Code
train_label = train['Survived']
test_pasId = test['PassengerId']
drop_cols = ['Name', 'Ticket', 'Cabin', 'SibSp', 'Parch', 'PassengerId']
train.drop(drop_cols + ['Cabin_l'], 1, inplace=True)
test.drop(drop_cols, 1, inplace=True)
###Output
_____no_output_____
###Markdown
Convert Pclass into categorical variable
###Code
train['Pclass'] = train['Pclass'].apply(str)
test['Pclass'] = test['Pclass'].apply(str)
###Output
_____no_output_____
###Markdown
Create dummy variables for categorical data.
###Code
train.drop(['Survived'], 1, inplace=True)
train_objs_num = len(train)
dataset = pd.concat(objs=[train, test], axis=0)
dataset = pd.get_dummies(dataset)
train = copy.copy(dataset[:train_objs_num])
test = copy.copy(dataset[train_objs_num:])
droppings = ['Embarked_Q', 'Age']
#droppings += ['Sex_male', 'Sex_female']
test.drop(droppings, 1, inplace=True)
train.drop(droppings, 1, inplace=True)
train.head(5)
###Output
_____no_output_____
###Markdown
CLASSIFICATION
###Code
def prediction(model, train, label, test, test_pasId):
model.fit(train, label)
pred = model.predict(test)
accuracy = cross_val_score(model, train, label, cv=5)
sub = pd.DataFrame({
"PassengerId": test_pasId,
"Survived": pred
})
return [accuracy, sub]
###Output
_____no_output_____
###Markdown
1. Random ForestThere are many categorical features, so I have chosen random forest to do the classification.
###Code
rf = RandomForestClassifier(
n_estimators=80, min_samples_leaf=2, min_samples_split=2, random_state=110)
acc_random_forest, sub = prediction(rf, train, train_label, test, test_pasId)
importances = pd.DataFrame(
{'feature': train.columns, 'importance': np.round(rf.feature_importances_, 3)})
importances = importances.sort_values(
'importance', ascending=False).set_index('feature')
print(importances)
importances.plot.bar()
print(acc_random_forest)
sub.to_csv("titanic_submission_randomforest.csv", index=False)
###Output
_____no_output_____
###Markdown
2. Logistic Regression
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(train['Fare'].values.reshape(-1, 1))
train['Fare'] = scaler.transform(train['Fare'].values.reshape(-1, 1))
test['Fare'] = scaler.transform(test['Fare'].values.reshape(-1, 1))
scaler = StandardScaler().fit(train['Age*Class'].values.reshape(-1, 1))
train['Age*Class'] = scaler.transform(train['Age*Class'].values.reshape(-1, 1))
test['Age*Class'] = scaler.transform(test['Age*Class'].values.reshape(-1, 1))
lr = LogisticRegression(random_state=110)
lr_acc, sub = prediction(lr, train, train_label, test, test_pasId)
sub.to_csv("titanic_submission_logregres.csv", index=False)
# train.columns.tolist()
print(list(zip(lr.coef_[0], train.columns.tolist())))
###Output
_____no_output_____
###Markdown
3. KNeighbours
###Code
kn = KNeighborsClassifier()
kn_acc, sub = prediction(kn, train, train_label, test, test_pasId)
print(kn_acc)
sub.to_csv("titanic_submission_kn.csv", index=False)
###Output
_____no_output_____
###Markdown
4. Ensemble
###Code
from sklearn.ensemble import VotingClassifier
eclf1 = VotingClassifier(estimators=[
('lr', lr), ('rf', rf)], voting='soft')
eclf1 = eclf1.fit(train, train_label)
test_predictions = eclf1.predict(test)
test_predictions = test_predictions.astype(int)
submission = pd.DataFrame({
"PassengerId": test_pasId,
"Survived": test_predictions
})
submission.to_csv("titanic_submission_ensemble.csv", index=False)
###Output
_____no_output_____
###Markdown
5. XGBoost
###Code
xgb = XGBClassifier(n_estimators=200)
acc_xgb, sub = prediction(xgb, train, train_label, test, test_pasId)
print(acc_xgb)
plot_importance(xgb)
sub.to_csv("titanic_submission_xgboost.csv", index=False)
###Output
_____no_output_____ |
Titanic_Survival_v2.ipynb | ###Markdown
Predict survival on the Titanic
###Code
#import libraries for data visualisation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Read train data using pandas
train = pd.read_csv('train.csv',index_col = 'PassengerId')
# Read test data using pandas
test = pd.read_csv('test.csv',index_col = 'PassengerId')
full_data = pd.concat([train.drop('Survived',axis=1),test],axis = 0,sort = False)
# check the first 5 rows
full_data.head()
#cheaking for null
train[train['Embarked'].isnull() == True]
###Output
_____no_output_____
###Markdown
Data Visualization Use heatmap to check for missing values.
###Code
sns.heatmap(train.corr(), annot=True,cmap = 'viridis')
def missing_values(data, cmap = 'viridis'):
"""
Given the data, this function will return a graph for missing values
Parameters
----------
data : Pandas dataframe.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default is 'viridis'.
"""
return sns.heatmap(data.isnull(),yticklabels=False,cbar=False,cmap='viridis')
missing_values(full_data)
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
###Output
_____no_output_____
###Markdown
Check the ratio for male and female who survived
###Code
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
###Output
_____no_output_____
###Markdown
In terms of class
###Code
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
sns.distplot(train['Age'].dropna(),kde=False,color='darkred',bins=30)
sns.countplot(x='SibSp',data=train)
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
###Output
_____no_output_____
###Markdown
How many unique tickets are there?
###Code
full_data['Ticket'].nunique()
###Output
_____no_output_____
###Markdown
Create a new feature with the titles
###Code
full_data['title'] = full_data['Name'].apply(lambda myString: myString[myString.find(",")+2:myString.find(".")])
full_data['title'].value_counts()
###Output
_____no_output_____
###Markdown
Data Cleaning
###Code
plt.figure(figsize=(12, 7))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
full_data['Age'] = full_data[['Age','Pclass']].apply(impute_age,axis=1)
###Output
_____no_output_____
###Markdown
Let's check the heatmap again
###Code
missing_values(full_data)
###Output
_____no_output_____
###Markdown
Let's convert categorical features to dummy variables using pandas! **There are so many missing values in "cabin" column, that it's better to drop it**
###Code
full_data.drop('Cabin',axis=1,inplace=True)
#checking for the missing values again
missing_values(full_data)
full_data[full_data['Fare'].isna() == True] = full_data['Fare'].mean()
full_data[full_data['Embarked'].isna() == True]
full_data["Embarked"] = full_data["Embarked"].fillna('C')
sex = pd.get_dummies(full_data['Sex'],drop_first=True)
embark = pd.get_dummies(full_data['Embarked'],drop_first=True)
title = pd.get_dummies(full_data['title'],drop_first=True)
#drop the categorical features
full_data.drop(['Sex','Embarked','Name','Ticket','title'],axis=1,inplace=True)
# replace them with the nummeric features
full_data = pd.concat([full_data,sex,embark,title],axis=1)
full_data.head()
#split train and test again
def split_data(data,nrow):
"""
split data along the row
Paranmeters
--------------
data : pandas dataframe
nrow : split
Returns
--------------
Tuple of top and bottom part of the data
"""
top = data.iloc[:nrow]
bottom = data.iloc[nrow:]
return (top,bottom)
train_new,test_new = split_data(full_data,nrow = 891)
train_new.shape,test.shape, full_data.shape
correlation = pd.concat([train["Survived"],train_new],axis = 1).corr()
print(correlation['Survived'])
plt.figure(figsize = (20,20),dpi = 60)
sns.heatmap(pd.concat([train["Survived"],train_new],axis = 1).corr(), annot=True,cmap = 'PuRd',cbar=False)
###Output
_____no_output_____
###Markdown
Building models**We will be testing Logistic regression and Random forest for now. More models can be used depending on the accuracy**
###Code
#import libraries
from sklearn.model_selection import train_test_split #for train test split
#We can check precision,recall,f1-score using classification report!
from sklearn.metrics import classification_report,accuracy_score
#import models
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
###Output
_____no_output_____
###Markdown
Train Test split
###Code
X_train, X_test, y_train, y_test = train_test_split(train_new,
train['Survived'], test_size=0.30,
random_state=142)
X_train.head()
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
predict_log = logmodel.predict(X_test)
print(classification_report(y_test,predict_log))
print (accuracy_score(y_test,predict_log))
y_train = np.asarray(y_train).ravel()
regmodel = RandomForestClassifier(n_estimators=100,max_features='auto', min_samples_split=0.05)
regmodel.fit(X_train,y_train)
predict_reg = regmodel.predict(X_test)
print(classification_report(y_test,predict_reg))
print (accuracy_score(y_test,predict_reg))
predict_final = regmodel.predict(test_new)
submission = pd.DataFrame(
{
'PassengerId': test_new.index,
'Survived': predict_final
}
)
submission.to_csv("submission_final.csv", index=False)
###Output
_____no_output_____ |
import data in python part 2/1.1 Importing flat files from the web.ipynb | ###Markdown
Import the function urlretrieve from the subpackage urllib.request
###Code
# Import package
from urllib.request import urlretrieve
import pandas as pd
###Output
_____no_output_____
###Markdown
Assign the URL of the file to the variable url.
###Code
# Assign url of file: url
url=('https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv')
###Output
_____no_output_____
###Markdown
Use the function urlretrieve() to save the file locally as 'winequality-red.csv'.
###Code
# Save file locally
urlretrieve(url,'winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
print(df.head())
###Output
fixed acidity volatile acidity citric acid residual sugar chlorides \
0 7.4 0.70 0.00 1.9 0.076
1 7.8 0.88 0.00 2.6 0.098
2 7.8 0.76 0.04 2.3 0.092
3 11.2 0.28 0.56 1.9 0.075
4 7.4 0.70 0.00 1.9 0.076
free sulfur dioxide total sulfur dioxide density pH sulphates \
0 11.0 34.0 0.9978 3.51 0.56
1 25.0 67.0 0.9968 3.20 0.68
2 15.0 54.0 0.9970 3.26 0.65
3 17.0 60.0 0.9980 3.16 0.58
4 11.0 34.0 0.9978 3.51 0.56
alcohol quality
0 9.4 5
1 9.8 5
2 9.8 5
3 9.8 6
4 9.4 5
|
examples/advanced-tour.ipynb | ###Markdown
Advanced tour of the Bayesian Optimization package
###Code
from bayes_opt import BayesianOptimization
###Output
_____no_output_____
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by definying our function, bounds, and instanciating an optimization object.
def black_box_function(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
###Output
_____no_output_____
###Markdown
Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function.
###Code
optimizer = BayesianOptimization(
f=None,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'y': 1.3219469606529488, 'x': -0.331911981189704}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = black_box_function(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
for _ in range(5):
next_point = optimizer.suggest(utility)
target = black_box_function(**next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
print(optimizer.max)
###Output
-19.0 {'y': -3.0, 'x': 2.0}
-12.194801029414048 {'y': -2.412527795983739, 'x': -1.2447710918286998}
0.6381713808008993 {'y': 1.4965397889559267, 'x': -0.3395244574146384}
0.5052897389362041 {'y': 1.2837707069731576, 'x': -0.6435716330974743}
0.9493808230928116 {'y': 1.2241444765020055, 'x': -0.019453291773639306}
{'target': 0.9493808230928116, 'params': {'y': 1.2241444765020055, 'x': -0.019453291773639306}}
###Markdown
2. Dealing with discrete parameters**There is no principled way of dealing with discrete parameters using this package.**Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},
verbose=2,
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optmization:step` was observed
Event `optmization:step` was observed
Event `optmization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____
###Markdown
Advanced tour of the Bayesian Optimization package
###Code
from bayes_opt import BayesianOptimization
###Output
_____no_output_____
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by definying our function, bounds, and instanciating an optimization object.
def black_box_function(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'x': -0.331911981189704, 'y': 1.3219469606529488}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = black_box_function(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
for _ in range(5):
next_point = optimizer.suggest(utility)
target = black_box_function(**next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
print(optimizer.max)
###Output
-19.0 {'x': 2.0, 'y': -3.0}
-12.194801029414048 {'x': -1.2447710918286998, 'y': -2.412527795983739}
0.6381713808008993 {'x': -0.3395244574146384, 'y': 1.4965397889559267}
0.5052897389362041 {'x': -0.6435716330974743, 'y': 1.2837707069731576}
0.9493808230928116 {'x': -0.019453291773639306, 'y': 1.2241444765020055}
{'target': 0.9493808230928116, 'params': {'x': -0.019453291773639306, 'y': 1.2241444765020055}}
###Markdown
2. Dealing with discrete parameters**There is no principled way of dealing with discrete parameters using this package.**Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},
verbose=2,
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optmization:step` was observed
Event `optmization:step` was observed
Event `optmization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____
###Markdown
Advanced tour of the Bayesian Optimization package
###Code
from bayes_opt import BayesianOptimization
###Output
_____no_output_____
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by definying our function, bounds, and instanciating an optimization object.
import numpy as np
from scipy.stats import norm
def f(x):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
r = x * np.sin(x) + norm.pdf(x,loc=5,scale=0.35)*10
return r
###Output
_____no_output_____
###Markdown
Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function.
###Code
pbounds = {'x': (-10,10)} #bounds of input
= (-3,8) # expected range of the output (can also take single number to specify rance, (-3,8) is equivalent to 11)
optimizer = BayesianOptimization(
f=None,
pbounds=pbounds,
yrange=expectedYbounds,
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ei")
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'y': 1.3219469606529488, 'x': -0.331911981189704}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = f(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
for _ in range(5):
next_point = optimizer.suggest(utility)
target = f(**next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
print(optimizer.max)
###Output
-19.0 {'y': -3.0, 'x': 2.0}
-12.194801029414048 {'y': -2.412527795983739, 'x': -1.2447710918286998}
0.6381713808008993 {'y': 1.4965397889559267, 'x': -0.3395244574146384}
0.5052897389362041 {'y': 1.2837707069731576, 'x': -0.6435716330974743}
0.9493808230928116 {'y': 1.2241444765020055, 'x': -0.019453291773639306}
{'target': 0.9493808230928116, 'params': {'y': 1.2241444765020055, 'x': -0.019453291773639306}}
###Markdown
2.1: dealing with discrete parametesIn the section below you can see an example of a function that would require a discrete parameter (only accepting integers).this package has a simple way to deal with these
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
###Output
_____no_output_____
###Markdown
The way you tell the optimizer that 'd' is an integer is by giving only one element of the boundary. by doing so you are saying it can take any value from 1 to n (n=5 in this case)
###Code
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (5)},
verbose=2,
yrange=(-3,8),
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
2.2 Dealing with categorical datain the example below 'w' is a categorical variable. this means that it has no numerical meaning and the possible classes do not have any sense of order.
###Code
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (5,'d')},
verbose=2,
yrange=(-3,8),
random_state=1,
###Output
_____no_output_____
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optmization:step` was observed
Event `optmization:step` was observed
Event `optmization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____
###Markdown
Advanced tour of the Bayesian Optimization package
###Code
import os
import sys
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
print(module_path)
sys.path.append(module_path)
from bayes_opt import BayesianOptimization
###Output
/Users/uknowit/DSML/BayesianOptimization
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by definying our function, bounds, and instanciating an optimization object.
def black_box_function(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
###Output
_____no_output_____
###Markdown
Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function.
###Code
optimizer = BayesianOptimization(
f=None,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'x': -0.331911981189704, 'y': 1.3219469606529488}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = black_box_function(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
import numpy as np
xs = np.random.uniform(0, 6, size = (5,2))
for _ in range(5):
next_point2 = optimizer.suggest(utility)
next_point = xs[_]
target = black_box_function(*next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
# print(optimizer._gp.predict(np.array(list(next_point.values())).reshape(1,-1)))
print(optimizer._gp.predict((next_point.reshape(1,-1))))
print(optimizer.max)
###Output
-30.30918766760489 [5.53819505 1.7984881 ]
[-23.07072596]
-18.413723331269715 [4.38564231 0.57589527]
[-21.67642756]
-3.784117126553028 [0.93162704 2.97893613]
[-3.38397356]
-1.0022061687665516 [0.25064864 2.39261676]
[-2.30602992]
-21.856357065349346 [2.28173986 5.20119272]
[-22.0625289]
{'target': -1.0022061687665516, 'params': {'x': 0.25064863868183984, 'y': 2.3926167558569342}}
###Markdown
2. Dealing with discrete parameters**There is no principled way of dealing with discrete parameters using this package.**Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},
verbose=2,
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTIMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTIMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optimization:step` was observed
Event `optimization:step` was observed
Event `optimization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____
###Markdown
Advanced tour of the Bayesian Optimization package
###Code
from bayes_opt import BayesianOptimization
###Output
_____no_output_____
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by defining our function, bounds, and instanciating an optimization object.
def black_box_function(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
###Output
_____no_output_____
###Markdown
Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function.
###Code
optimizer = BayesianOptimization(
f=None,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'y': 1.3219469606529488, 'x': -0.331911981189704}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = black_box_function(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
for _ in range(5):
next_point = optimizer.suggest(utility)
target = black_box_function(**next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
print(optimizer.max)
###Output
-19.0 {'y': -3.0, 'x': 2.0}
-12.194801029414048 {'y': -2.412527795983739, 'x': -1.2447710918286998}
0.6381713808008993 {'y': 1.4965397889559267, 'x': -0.3395244574146384}
0.5052897389362041 {'y': 1.2837707069731576, 'x': -0.6435716330974743}
0.9493808230928116 {'y': 1.2241444765020055, 'x': -0.019453291773639306}
{'target': 0.9493808230928116, 'params': {'y': 1.2241444765020055, 'x': -0.019453291773639306}}
###Markdown
2. Dealing with discrete parameters**There is no principled way of dealing with discrete parameters using this package.**Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},
verbose=2,
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problem it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTIMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTIMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optimization:step` was observed
Event `optimization:step` was observed
Event `optimization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____
###Markdown
Advanced tour of the Bayesian Optimization package
###Code
from bayes_opt import BayesianOptimization
###Output
_____no_output_____
###Markdown
1. Suggest-Evaluate-Register ParadigmInternally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder.
###Code
# Let's start by definying our function, bounds, and instanciating an optimization object.
def black_box_function(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
###Output
_____no_output_____
###Markdown
Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function.
###Code
optimizer = BayesianOptimization(
f=None,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
###Output
_____no_output_____
###Markdown
One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works.
###Code
from bayes_opt import UtilityFunction
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
###Output
_____no_output_____
###Markdown
The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.
###Code
next_point_to_probe = optimizer.suggest(utility)
print("Next point to probe is:", next_point_to_probe)
###Output
Next point to probe is: {'y': 1.3219469606529488, 'x': -0.331911981189704}
###Markdown
You are now free to evaluate your function at the suggested point however/whenever you like.
###Code
target = black_box_function(**next_point_to_probe)
print("Found the target value to be:", target)
###Output
Found the target value to be: 0.7861845912690542
###Markdown
Last thing left to do is to tell the optimizer what target value was observed.
###Code
optimizer.register(
params=next_point_to_probe,
target=target,
)
###Output
_____no_output_____
###Markdown
1.1 The maximize loopAnd that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.
###Code
for _ in range(5):
next_point = optimizer.suggest(utility)
target = black_box_function(**next_point)
optimizer.register(params=next_point, target=target)
print(target, next_point)
print(optimizer.max)
###Output
-19.0 {'y': -3.0, 'x': 2.0}
-12.194801029414048 {'y': -2.412527795983739, 'x': -1.2447710918286998}
0.6381713808008993 {'y': 1.4965397889559267, 'x': -0.3395244574146384}
0.5052897389362041 {'y': 1.2837707069731576, 'x': -0.6435716330974743}
0.9493808230928116 {'y': 1.2241444765020055, 'x': -0.019453291773639306}
{'target': 0.9493808230928116, 'params': {'y': 1.2241444765020055, 'x': -0.019453291773639306}}
###Markdown
2. Dealing with discrete parameters**There is no principled way of dealing with discrete parameters using this package.**Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.
###Code
def func_with_discrete_params(x, y, d):
# Simulate necessity of having d being discrete.
assert type(d) == int
return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)
def function_to_be_optimized(x, y, w):
d = int(w)
return func_with_discrete_params(x, y, d)
optimizer = BayesianOptimization(
f=function_to_be_optimized,
pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},
verbose=2,
random_state=1,
)
optimizer.maximize(alpha=1e-3)
###Output
| iter | target | w | x | y |
-------------------------------------------------------------
| [0m 1 [0m | [0m-0.06199 [0m | [0m 2.085 [0m | [0m 4.406 [0m | [0m-9.998 [0m |
| [95m 2 [0m | [95m-0.0344 [0m | [95m 1.512 [0m | [95m-7.065 [0m | [95m-8.153 [0m |
| [0m 3 [0m | [0m-0.2177 [0m | [0m 0.9313 [0m | [0m-3.089 [0m | [0m-2.065 [0m |
| [95m 4 [0m | [95m 0.1865 [0m | [95m 2.694 [0m | [95m-1.616 [0m | [95m 3.704 [0m |
| [0m 5 [0m | [0m-0.2187 [0m | [0m 1.022 [0m | [0m 7.562 [0m | [0m-9.452 [0m |
| [0m 6 [0m | [0m 0.009975[0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 7 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
| [0m 8 [0m | [0m 0.09003 [0m | [0m 0.0 [0m | [0m 0.4916 [0m | [0m 10.0 [0m |
| [0m 9 [0m | [0m-0.007481[0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 10 [0m | [0m 0.01989 [0m | [0m 5.0 [0m | [0m-0.02203 [0m | [0m 10.0 [0m |
| [0m 11 [0m | [0m 0.0189 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m 0.238 [0m |
| [0m 12 [0m | [0m-0.2149 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 5.282 [0m |
| [0m 13 [0m | [0m 0.05995 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 5.786 [0m |
| [0m 14 [0m | [0m-0.01299 [0m | [0m 5.0 [0m | [0m-2.367 [0m | [0m-10.0 [0m |
| [0m 15 [0m | [0m 0.03637 [0m | [0m 5.0 [0m | [0m 3.773 [0m | [0m 3.575 [0m |
| [0m 16 [0m | [0m-0.01214 [0m | [0m 5.0 [0m | [0m-10.0 [0m | [0m 0.9779 [0m |
| [0m 17 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 10.0 [0m | [0m-10.0 [0m |
| [0m 18 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-4.58 [0m | [0m 5.518 [0m |
| [0m 19 [0m | [0m-0.04988 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m-10.0 [0m |
| [0m 20 [0m | [0m 0.1246 [0m | [0m 0.0 [0m | [0m 2.311 [0m | [0m 5.116 [0m |
| [0m 21 [0m | [0m 0.04988 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m 10.0 [0m |
| [0m 22 [0m | [0m 0.04567 [0m | [0m 2.029 [0m | [0m 0.1434 [0m | [0m 6.398 [0m |
| [0m 23 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m 4.685 [0m | [0m-4.937 [0m |
| [0m 24 [0m | [0m 0.06466 [0m | [0m 0.0 [0m | [0m 5.198 [0m | [0m 10.0 [0m |
| [95m 25 [0m | [95m 0.3751 [0m | [95m 5.0 [0m | [95m-0.6795 [0m | [95m 1.97 [0m |
| [0m 26 [0m | [0m 0.0 [0m | [0m 5.0 [0m | [0m-2.001 [0m | [0m-0.5515 [0m |
| [0m 27 [0m | [0m 0.1072 [0m | [0m 0.0 [0m | [0m 10.0 [0m | [0m-1.419 [0m |
| [0m 28 [0m | [0m-0.08895 [0m | [0m 0.0 [0m | [0m-2.048 [0m | [0m-10.0 [0m |
| [0m 29 [0m | [0m 0.1907 [0m | [0m 0.0 [0m | [0m 3.994 [0m | [0m-0.1557 [0m |
| [0m 30 [0m | [0m-0.0 [0m | [0m 0.0 [0m | [0m-10.0 [0m | [0m 10.0 [0m |
=============================================================
###Markdown
3. Tuning the underlying Gaussian ProcessThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe. 3.1 Passing parameter to the GPDepending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:
###Code
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=1,
n_iter=5,
# What follows are GP regressor parameters
alpha=1e-3,
n_restarts_optimizer=5
)
###Output
| iter | target | x | y |
-------------------------------------------------
| [0m 1 [0m | [0m 0.7862 [0m | [0m-0.3319 [0m | [0m 1.322 [0m |
| [0m 2 [0m | [0m-18.96 [0m | [0m 1.993 [0m | [0m-2.998 [0m |
| [0m 3 [0m | [0m 0.7858 [0m | [0m-0.3333 [0m | [0m 1.321 [0m |
| [0m 4 [0m | [0m 0.5787 [0m | [0m-0.429 [0m | [0m 1.487 [0m |
| [0m 5 [0m | [0m 0.7798 [0m | [0m 0.02543 [0m | [0m 1.469 [0m |
| [95m 6 [0m | [95m 0.9779 [0m | [95m 0.1301 [0m | [95m 0.9282 [0m |
=================================================
###Markdown
Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method.
###Code
optimizer.set_gp_params(normalize_y=True)
###Output
_____no_output_____
###Markdown
3.2 Tuning the `alpha` parameterWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed. 3.3 Changing kernelsBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. Observers ContinuedObservers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer.
###Code
from bayes_opt.event import DEFAULT_EVENTS, Events
optimizer = BayesianOptimization(
f=black_box_function,
pbounds={'x': (-2, 2), 'y': (-3, 3)},
verbose=2,
random_state=1,
)
class BasicObserver:
def update(self, event, instance):
"""Does whatever you want with the event and `BayesianOptimization` instance."""
print("Event `{}` was observed".format(event))
my_observer = BasicObserver()
optimizer.subscribe(
event=Events.OPTIMIZATION_STEP,
subscriber=my_observer,
callback=None, # Will use the `update` method as callback
)
###Output
_____no_output_____
###Markdown
Alternatively you have the option to pass a completely different callback.
###Code
def my_callback(event, instance):
print("Go nuts here!")
optimizer.subscribe(
event=Events.OPTIMIZATION_START,
subscriber="Any hashable object",
callback=my_callback,
)
optimizer.maximize(init_points=1, n_iter=2)
###Output
Go nuts here!
Event `optimization:step` was observed
Event `optimization:step` was observed
Event `optimization:step` was observed
###Markdown
For a list of all default events you can checkout `DEFAULT_EVENTS`
###Code
DEFAULT_EVENTS
###Output
_____no_output_____ |
other_stuff/Minkowski_Lorentz.ipynb | ###Markdown
Lorentz Transformations
###Code
from __future__ import (division, print_function, absolute_import)
import matplotlib.pyplot as plt
import numpy as np
import math
###Output
_____no_output_____
###Markdown
We are looking for a linear transformation between the coordinates of events in the unprimed system and their counterparts in the primed coordinate system. And we assume that the required four parameters of this transformation are themselves functions of the relative velocity $v$.We call that transformation a Lorentz-Transformation and denote it with $L_v(t, x)$.$$L_v(t, x) =\left( \begin{array}{c c}a_1(v) & a_2(v) \\b_1(v) & b_2(v)\end{array}\right) \cdot \left( \begin{array}{c c}t \\x\end{array}\right) =\left( \begin{array}{c c}t^\prime \\x^\prime\end{array}\right)$$If we find four different situations with known $x, t, x^\prime, t^\prime$, then we can hope to end up with a system of four equations for four unknowns, and hopefully that system is actually solvable. It turns out it is. Here are the four known situations:- We know what the origin of the moving system looks like in both coordinate systems- We know what the origin of the resting system looks like in both coordinate systems, since it must appear to move with $v^\prime=-v$ in the primed system.- We know that the speed of light is the same in both systems.- And finally we demand that the net result of consecutively applying a transformation $L_v(t,x)$ and its counterpart $L_{-v}(t,x)$ reproduces the original coordinates. Transforming into any system and back should have no net result. 1) Knowing that the origin of the primed system has $x^\prime=0$ in its own coordinate system and $x=vt$ in the resting observer's system we get$$\left( \begin{array}{c c}a_1 & a_2 \\b_1 & b_2\end{array}\right) \cdot \left( \begin{array}{c c}t \\v t\end{array}\right) =\left( \begin{array}{c c}t^\prime \\0\end{array}\right)$$$$\Rightarrow b_1 + b_2 v = 0 \text{ (from the x-component)}$$ 2) Knowing that the origin of the resting system $x=0$ is viewed as moving with $v^\prime = -v$ from the moving system we conclude:$$\left( \begin{array}{c c}a_1 & a_2 \\b_1 & b_2\end{array}\right) \cdot \left( \begin{array}{c c}t \\0\end{array}\right) =\left( \begin{array}{c c}a_1 t \\b_1 t\end{array}\right)$$$$ \Rightarrow v^\prime \equiv \frac{b_1}{a_1}=-v$$ We can already eliminate one parameter from these two results: $a_1 = b_2$ and for historical reasons we give them the new name $\gamma$:$$\gamma \equiv a_1 = b_2$$$$ b_1 = -\gamma v$$ 3) Knowing that the speed of light $c$ will be the same independent of the motion of the observer, we get$$\left( \begin{array}{c c}\gamma & a_2 \\-\gamma v & \gamma\end{array}\right) \cdot \left( \begin{array}{c c}t \\ct\end{array}\right) =\left( \begin{array}{c c}t^\prime \\ct^\prime\end{array}\right)$$Which leaves us with the two independent equations$$ \gamma t + a_2ct = t^\prime $$$$ -\gamma vt + \gamma ct = ct^\prime $$Replacing $t^\prime$ in the second equation by the left side of the first equation we get:$$-\gamma vt + \gamma ct = c(\gamma t + a_2ct)$$$$\Rightarrow a_2 = \frac{-\gamma v}{c^2}$$ We're almost done. We have it all apart from a *scaling* factor $\gamma$$$\gamma \cdot \left( \begin{array}{c c} 1 & -\frac{v}{c^2} \\-v & 1\end{array}\right) \cdot\left( \begin{array}{c} t \\x \end{array}\right) =\left( \begin{array}{c} t^\prime \\x^\prime\end{array}\right) $$ 4) Any consecutive pair of transformations $L_v$ and $L_{-v}$ must leave the coordinates unchanged. That's straight-forward to express in mathematical terms:$$\gamma \cdot \left( \begin{array}{c c} 1 & -\frac{v}{c^2} \\-v & 1\end{array}\right) \cdot\gamma \cdot \left( \begin{array}{c c} 1 & +\frac{v}{c^2} \\+v & 1\end{array}\right) =\left( \begin{array}{c c} 1 & 0 \\0 & 1\end{array}\right) $$which yields:$$\gamma = \frac{1}{\sqrt{1-\frac{v^2}{c^2}}}$$and thus concludes our derivation of the Lorentz transformation. The Lorentz transformation$$L_v(t, x) = \frac{1}{\sqrt{1-\frac{v^2}{c^2}}} \left( \begin{array}{c c} 1 & -\frac{v}{c^2} \\-v & 1\end{array}\right) \cdot\left( \begin{array}{c} t \\x \end{array}\right) $$Now it's time to face the often-times unintuitive consequences by visualizing the effect of such a transformation. Visualizing Lorentz transformationsIn the following demonstration we measure distances in light seconds, which computationally amounts to setting $c=1$.
###Code
class LorentzTrafo:
def __init__(self, v):
self.v = v
self.gamma = 1/np.sqrt(1-v*v)
self.matrix = self.gamma * np.array([[1, -v], [-v, 1]])
def __call__(self, obj):
"""
Takes a single spacetime coordinates (t1, x1),
or an array of spacetime coordinates [(t1, x1), (t2, x2), ...]
or pairs of coordinates representing lines [[(t1, x1), (t2, x2)], [...]]
and returns their respective Lorentz-boosted image
"""
if len(np.shape(obj)) == 3 and np.shape(obj)[1:3] == (2,2):
return self.transform_lines(obj)
elif len(np.shape(obj)) == 1 and np.shape(obj)[0] == 2:
return np.matmul(self.matrix, np.transpose(obj)).T
elif len(np.shape(obj)) == 2 and np.shape(obj)[1] == 2:
return np.matmul(self.matrix, np.transpose(obj)).T
else:
raise ValueError("Can't transform this object")
def transform_lines(self, lines):
"""transform a set of pair of points (in one go)"""
points = np.reshape(lines, [-1, 2])
transformed = self.__call__(points)
return np.reshape(transformed, [-1, 2, 2])
# Verifying:
v=.5
lt = LorentzTrafo(v)
res = lt([[4,2], [6,3]])
res[0], res[1], lt([[[4,2], [6,3]]])
class MinkowskiGrid:
"""A class to hold a grid of space time events and the characteristic world lines"""
def __init__(self, first, last, left, right, step, v=0.5):
N_wl = 10
self.line_color = '#F0C0C0'
self.left = left
self.right = right
self.first = first
self.last = last
self.step = step
self.v = v
self.events=[]
tr = range(first, last+1)
self.xlines = [[[t, left], [t, right]] for t in range(first, last+1)]
xr = range(left, right+1)
self.tlines = [[[first, x], [last, x]] for x in range(left, right+1)]
self.lines = np.append(self.xlines, self.tlines)
self.points = np.array([(t, x) for x in range(left, right+1)
for t in range(first, last+1)])
self.other = list(zip(np.linspace(first, last, N_wl),
np.linspace(first*v, last*v, N_wl)))
self.mytime = [(t, 0) for t in np.linspace(0, last, N_wl)]
r = list(zip(np.linspace(0, last, N_wl),
np.linspace(0, last, N_wl)))
l = list(zip(np.linspace(0, last, N_wl),
np.linspace(0, -last, N_wl)))
self.light_cone = np.append(r, l, axis=0)
def record_events(self, events):
self.events = events
def boost(self, events):
return LorentzTrafo(self.v)(events)
class MinkowskiPlot():
def __init__(self, grid):
self.grid = grid
self.psize=60
@staticmethod
def to_mpl_lines(minkowski_lines, color):
return np.array([[
[p[0][1], p[1][1]],
[p[0][0], p[1][0]],
color]
for p in minkowski_lines]).reshape(-1)
@staticmethod
def to_mpl(tx):
return np.transpose(zip(np.transpose(tx)[1],np.transpose(tx)[0]))
def points_xy(self):
return np.array(list(zip(self.grid.points.T[1], self.grid.points.T[0]))).T
def mytime_xy(self, primed=False):
mytime = LorentzTrafo(grid.v)(grid.mytime) if primed else grid.mytime
return self.to_mpl(mytime)
def light_xy(self, primed=False):
light_cone = grid.boost(grid.light_cone) if primed else grid.light_cone
return self.to_mpl(light_cone)
def other_xy(self, primed):
other = grid.boost(grid.other) if primed else grid.other
return self.to_mpl(other)
def events_xy(self, primed):
events = grid.boost(grid.events) if primed else grid.events
return self.to_mpl(events)
def tlines_xy(self, color):
return self.to_mpl_lines(self.grid.tlines, color)
def xlines_xy(self, color):
return self.to_mpl_lines(self.grid.xlines, color)
def any_xy(self):
return np.array([p for p in self.points_xy().T if not self.grid.is_light(p)
and not self.grid.is_other(p)
and not self.grid.is_mytime(p)])
def plot_grid(self, axis, color='#FFC0C0'):
#axis.plot([self.grid.left, self.grid.right], [0, 0], 'k');
axis.plot(*self.tlines_xy(color));
axis.plot(*self.xlines_xy(color));
def plot_mytime(self, axis, color='r', primed=False):
axis.scatter(*self.mytime_xy(primed), color=color, marker='o', s=self.psize);
axis.plot([0,0], [self.grid.first, self.grid.last], color);
def plot_lightcone(self, axis, color='y', primed=False):
axis.plot([self.grid.left, 0, self.grid.right],
[self.grid.last, 0, self.grid.last], 'y');
axis.scatter(*self.light_xy(primed), color='y', marker='o', s=self.psize);
def plot_other(self, axis, color='b', primed=False):
axis.plot([0,self.grid.v*self.grid.right], [0, self.grid.right], color);
axis.scatter(*self.other_xy(primed), color=color, marker='o', s=self.psize);
def plot_events(self, axis, color='k', primed=False):
axis.scatter(*self.events_xy(primed), color=color, marker='o', s=2.0*self.psize);
def display(self, axis, primed=False):
if primed:
tlines_d=self.to_mpl_lines(grid.boost(grid.tlines), '#FFC0C0')
xlines_d=self.to_mpl_lines(grid.boost(grid.xlines), '#FFC0C0')
self.plot_grid(axis, color='#A0A0FF')
axis.plot(*xlines_d, color='#FFD0D0');
axis.plot(*tlines_d, color='#FFD0D0');
else:
self.plot_grid(axis, '#FFC0C0')
self.plot_mytime(axis, 'r', primed)
self.plot_lightcone(axis, 'y', primed)
self.plot_other(axis, 'b', primed)
if (grid.events):
self.plot_events(axis, 'k', primed)
v=.5
grid = MinkowskiGrid(-1, 8, -8, 8, 1, v)
events=[[4, -2], [4, -3], [4, -4]]
grid.record_events(events)
plotter = MinkowskiPlot(grid)
_, plots = plt.subplots(2, figsize=(10,13))
for i in range(2):
plots[i].set_xlim([-8, 8])
plots[i].set_ylim([-1, 8])
plotter.display(plots[0], primed=False)
plotter.display(plots[1], primed=True)
###Output
_____no_output_____
###Markdown
We observe the following characteristics:- Events that appear simultaneous in one reference frame (black dots) will not be observed as simultaneous from within a reference frame moving relative to the former.- The light cones get stretched or squished but they are observed at the sample angle in space time, i.e the speed of light is constant, independent of the movement of the observer.- The distance between the outer events (2 light seconds in the upper plot) after the boost is slightly longer than from the perspective of the moving observer
###Code
lt=LorentzTrafo(v)
lt(events)
lt(events)[2][1]-lt(events)[0][1]-
v=.2
lt=LorentzTrafo(v)
i_see_him_at=[2, 1]
i_see_me_at=[2,0]
he_sees_me_at=lt(i_see_me_at)
he_got_my_speed_at=he_sees_me_at[1]/he_sees_me_at[0]
he_got_my_speed_at # hopefully -v
v=.5 #any
lt=LorentzTrafo(v)
i_see_light_at=[2, 2]
he_sees_light_at=lt(i_see_light_at)
he_got_light_speed_at=he_sees_light_at[1]/he_sees_light_at[0]
he_got_light_speed_at # hopefully 1
lv = LorentzTrafo(.5).matrix
lmv = LorentzTrafo(-.5).matrix
np.matmul(lmv, lv)
###Output
_____no_output_____ |
analysis.ipynb | ###Markdown
Table of Contents1 Imports2 Load Data (Gather)3 Data understanding - EDA3.1 Overview3.2 Missing values3.2.1 Per Feature3.3 Per Row4 Data Preparation (Preprocessing)4.0.1 Create data flag column5 Questions (Modelling and Evaluation)5.1 Which proportion of developers works with data?5.2 Any differences in working habits?5.2.1 Difference in remote working habits5.2.2 Difference in working hours5.3 Difference in job satisfaction6 References - Sources Imports
###Code
# import libraries here; add more as necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
from IPython.core.display import HTML
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from helper_functions import *
# Set base plotting style
plt.style.use('seaborn-ticks')
# # Set base plotting size
plt.rcParams['figure.figsize'] = 14, 9
# Magic word for producing visualizations in notebook
%matplotlib inline
# Increase figure resolution for high dpi screens
%config InlineBackend.figure_format = 'retina'
# Autoreload modules
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Load Data (Gather)
###Code
df = pd.read_csv('data/survey-results-public.csv')
schema = pd.read_csv('data/survey-results-schema.csv')## Load Data (Gather)
###Output
_____no_output_____
###Markdown
Data understanding - EDA Overview
###Code
df.head()
df.shape
schema.head()
###Output
_____no_output_____
###Markdown
Missing values
###Code
# How many nans?
print('Total percentage of Nans: ',
round(df.isnull().sum().sum() / np.product(df.shape) * 100, 2),
'%')
###Output
Total percentage of Nans: 45.32 %
###Markdown
Per Feature
###Code
feat_nan_perc = df.isnull().mean()
hist_box_plot(feat_nan_perc, x_label='Proportion of missing values', y_label='No. of features', bin_incr=0.05);
###Output
_____no_output_____
###Markdown
The biggest percentage of columns has 30 - 60 % missing values with two peaks at around 40% and just below 60%. There is also a spike of features with a percentage of missing values close to 100 %. Features with a percentage of missing values above 80% ca be considered outliers and not containing enough usefull infomation and will be dropped for this analysis.
###Code
df.isnull().mean().sort_values()[-20::]
df.isnull().mean().sort_values()[-20::].plot(kind='barh', color='b');
high_nan_features = df.columns[df.isnull().mean() > 0.8]
high_nan_features
len(high_nan_features)
###Output
_____no_output_____
###Markdown
There are 13 features with NaNs above 80% - 6 of them refer to the `Excoder` category. `ExpectedSalary` has 95% missing values. Per Row
###Code
row_nan_perc = df.isnull().mean(axis=1)
hist_box_plot(row_nan_perc, x_label='Proportion of missing values', y_label='No. of rows', bin_incr=0.01);
###Output
_____no_output_____
###Markdown
Looks like a bimodal distribution with one distribution centered around the median of 30% missing values and the smaller one around 90% of missing values Data Preparation (Preprocessing) We will drop the features with a percentage of NaNs over 80% and keep the rows treating each feature we exam below individually.
###Code
# Drop the high NaN features
high_nan_features = df.columns[df.isnull().mean() > 0.8]
df = df.drop(columns=high_nan_features)
df.shape
###Output
_____no_output_____
###Markdown
Create data flag column
###Code
df.Professional.unique()
get_description('DeveloperType', schema=schema)
###Output
_____no_output_____
###Markdown
The `DeveloperType` is the column of interest here. We will filter all the entries that contaiin the word Data or Machine to get the data professionals of all types. It could be interesting to see the following:- What kind of jobs do data professionals do and at which percentages?- Which are the most common job descriptons that they declare along with their data proffesion? - Is there a special meaning to which job is mentioned first?
###Code
devtypes = []
for dev in df.DeveloperType.str.split(';').dropna():
for devtype in dev:
devtype = devtype.strip()
if devtype not in devtypes:
devtypes.append(devtype)
devtypes
###Output
_____no_output_____
###Markdown
Let's first see which are the possible values here by slitting the strings
###Code
data_devtypes = set([dev.strip() for dev in devtypes if 'Data scientist' in dev
or 'Machine' in dev
or 'statistics' in dev])
data_devtypes
###Output
_____no_output_____
###Markdown
Replace all the Nones with np.nan for consistency
###Code
dev_types = df.DeveloperType.str.split(';', expand = True).apply(lambda x: x.str.strip()).replace({None: np.nan})
###Output
_____no_output_____
###Markdown
Let's create a flag for `data scientists/machine learning specialists/Developer with a statistics or mathematics background`
###Code
# Create a flag column for data professionals
df['is_data'] = 0
for col in dev_types.columns:
df.loc[dev_types[col].isin(data_devtypes), 'is_data'] = 1
###Output
_____no_output_____
###Markdown
Questions (Modelling and Evaluation) Which proportion of developers works with data?
###Code
df.is_data.value_counts()
###Output
_____no_output_____
###Markdown
Look like we have found 6353 individuals that have put data jobs in any position.
###Code
# Sanity Test
df[['DeveloperType', 'is_data']].sample(10)
# Rename for better plotting
df['is_data'] = df.is_data.map({0:'Other Developer', 1: 'Data Science Developer'})
Groupby_OneCol_comp_plot(df, 'is_data', plt_style = 'seaborn-ticks', color_palette = ['darkcyan','darkgrey'], title='')
# Create a developer and a data science dataset
df_dev = df[df.is_data == 'Other Developer']
df_ds = df[df.is_data != 'Other Developer']
df.columns
###Output
_____no_output_____
###Markdown
Any differences in working habits? Difference in remote working habits
###Code
get_description('HomeRemote', schema=schema)
print_perc_nans(df, 'HomeRemote')
###Output
Percentage of NaNs in HomeRemote: 14.37 %
###Markdown
We will not use the rows with missing data as they are not included in the `groupby` operations.
###Code
df.HomeRemote.value_counts()
group(df, 'is_data', 'HomeRemote')
group_plot(df, 'is_data', 'HomeRemote', prop=True, orient='h')
plt.xlabel('Percentage %')
plt.ylabel('');
###Output
_____no_output_____
###Markdown
Data scientists seem to have slightly better working habits with 5% more working remotely a few days per month and 5 % less that respond never. Let's see if that has something to do with job satisfaction. Difference in working hours
###Code
get_description('ProgramHobby', schema=schema)
print_perc_nans(df, 'ProgramHobby')
df.ProgramHobby.value_counts()
group(df, 'is_data', 'ProgramHobby')
# Tota; number of ds devs that program out of work
group(df, 'is_data', 'ProgramHobby').iloc[:, -1][::2][1:].sum()
# Tota; number of other devs that program out of work
group(df, 'is_data', 'ProgramHobby').iloc[:, -1][1:][::2][1:].sum()
group_plot(df, 'is_data', 'ProgramHobby', prop=True, orient='h')
plt.xlabel('Percentage %')
plt.ylabel('');
###Output
_____no_output_____
###Markdown
Difference in job satisfaction
###Code
get_description('JobSatisfaction', schema=schema)
print_perc_nans(df, 'ProgramHobby')
df.JobSatisfaction.value_counts()
group(df, 'is_data', 'JobSatisfaction')
group_plot(df, 'is_data', 'JobSatisfaction', orient='v')
plt.xlabel('Job Satisfaction Rating ')
plt.ylabel('Percentage % ')
plt.legend(title='', loc='upper left');
av_job_sat = df.groupby(['is_data'])['JobSatisfaction'].mean()
av_job_sat
(av_job_sat.diff()[-1] / av_job_sat[1]) * 100
###Output
_____no_output_____
###Markdown
Average job satisfaction is 7.11 for DS developers, compared to 6.93 to other developers which is a small difference of 2.67 %. Let's check for significance in this results.
###Code
from scipy.stats import chi2_contingency
cont_table = np.array([df_dev.JobSatisfaction.value_counts(),
df_ds.JobSatisfaction.value_counts()]
)
chi2, p, dof, ex = chi2_contingency(cont_table)
print(f' chi2: {chi2}\n p: {p}\n dof: {dof}\n ex: {ex}')
###Output
chi2: 47.80191282167782
p: 6.745505774525362e-07
dof: 10
ex: [[7632.52422231 6770.96577175 4735.17282544 4089.4288191 3450.48211809
3185.38721022 1584.62180503 1389.1992768 754.50089162 396.79269863
316.92436101]
[1350.47577769 1198.03422825 837.82717456 723.5711809 610.51788191
563.61278978 280.37819497 245.8007232 133.49910838 70.20730137
56.07563899]]
###Markdown
Connecting to the datasetThe dataset was downloaded here https://www.kaggle.com/nolanbconaway/pitchfork-data. The link provides more information on the dataset and it's tables
###Code
con = sqlite3.connect('database.sqlite')
query = """
SELECT r.reviewid,
title,
artist,
year,
score,
best_new_music,
pub_date,
pub_year,
genre,
label
FROM reviews r
LEFT JOIN genres g
ON r.reviewid = g.reviewid
LEFT JOIN labels l
ON r.reviewid = l.reviewid
LEFT JOIN years y
ON r.reviewid = y.reviewid
"""
df = pd.read_sql_query(query, con)
###Output
_____no_output_____
###Markdown
Exploration of the data setThe analysis we want to run will be around the a potential bias pitchfork has towards any particular genres.The questions we want to ask ourselves are: _"What genre does Pitchfork review the most"__"What genre does Pitchfork review the highest"_We'll only need a specific subset of columns for this.
###Code
#Selecting relevant columns and visualizing first rows
df = df[['reviewid','score','best_new_music','pub_year','genre']]
df.head()
#Checking for nulls
df.info()
#Seems like the genre column has some missing values, let's drop them
df.dropna(subset=['genre'], inplace=True)
df.info()
#since we'll want to run analysis of reviews throughout the years, let's check whether all years have enough data
df.pub_year.value_counts()
#seems like we can drop 2017 which only has 18 reviews
df = df[df.pub_year < 2017]
###Output
_____no_output_____
###Markdown
Data Modeling & ResultsNow that the data is ready, we can transform it in a way that makes it easy for us to answer the business questions we have. The dataset is at the individual review level, we want to aggregate it across the review years and the genres, aggregating the relevant metrics.
###Code
#group by the relevant columns and aggregate the metrics we're interested in
df_genre_year = (
df.groupby(['pub_year','genre'])
#we take the mean of the score (since it's at review level we don't have to weight it)
#the count of the reviews to get the total number of reviews per year and genre
#and the mean of the best_new_music column which was a binary column, hence returning its frequency
.agg({'score':'mean', 'reviewid':'count', 'best_new_music':'mean'})
.reset_index()
.sort_values(by=['pub_year','genre'])
)
#let's rename the reviewid column to something more relatable
df_genre_year.rename(columns={'reviewid':'n_of_reviews'}, inplace=True)
#visualize first rows
df_genre_year.head()
###Output
_____no_output_____
###Markdown
Let's visualize the first question we had _"Is there a genre Pitchfork reviews the most?"_
###Code
#We have to pivot the table in order to get a 100% stacked chart of the different genres per year
perc_values = df.pivot_table(
values=['reviewid'],
index='pub_year',
columns='genre',
aggfunc='sum'
)
perc_values = tps.div(tps.sum(1), axis=0)
#plotting stacked column to get a sense of the relative development of the genres reviewed
perc_values.plot(kind='bar', stacked=True, title='Percentage of Genres Reviewed', figsize=(15,10));
###Output
_____no_output_____
###Markdown
Seems like Rock and Electronic are the two genres Pitchfork reviews the most, with Rap becoming a popular one in most recent years! Let's now see if there are any differences across the scores.
###Code
#Let's group by genre and avreage out the results across all the years
data = df.groupby('genre')['score'].mean().reset_index().sort_values(by='score', ascending=False)
#We use seaborn to plot the data
sns.barplot(data=data, x='genre', y='score', linewidth=1.5,).set_title('Average Score 1999-2016')
#zooming in so the differences are clearer
plt.ylim(6.5,7.5)
#making the plot bigger
sns.set(rc={'figure.figsize':(13,8)});
###Output
_____no_output_____
###Markdown
Seems like Global, Experimental, Jazz and Folk/Country are the most favoured by the website, scoring consistently higher than the other genres. Another thing we could look at is the frequency with which an album is labeled as Best New Music
###Code
#The best new music column is a binary for each album row. By taking the mean we directly compute the frequency!
data = df_genre_year.groupby('genre')['best_new_music'].mean().reset_index().sort_values(by='best_new_music', ascending=False)
#visualizing the results
sns.barplot(data=data, x='genre', y='best_new_music', linewidth=1.5).set_title('Percentage Albums categorized as Best New Music')
#setting figure size
sns.set(rc={'figure.figsize':(13,8)});
###Output
_____no_output_____
###Markdown
Import Libraries
###Code
import matplotlib.pyplot as plt
import pandas as pd
from io import StringIO
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
import numpy as np
from final_data import final_df as df
###Output
'Loading Final DataFrame'
###Markdown
Import Data
###Code
df = final_df.copy()
df
df.columns
###Output
_____no_output_____
###Markdown
Define x and y
###Code
variables_df = df.copy()
independents = ['Lead (TSP) LC', 'Carbon monoxide', 'Sulfur dioxide', 'Nitrogen dioxide (NO2)', 'Ozone', 'PM10 - LC', 'PM2.5 - Local Conditions', 'Percent of adults with less than a high school diploma, 2015-19', 'Percent of adults with a high school diploma only, 2015-19', "Percent of adults completing some college or associate's degree, 2015-19", "Percent of adults with a bachelor's degree or higher, 2015-19", 'PCTPOVALL_2019', 'PCTPOV017_2019', 'PCTPOV517_2019', 'Total Pop', 'Pop Pct 0-4', 'Pop Pct 5-9', 'Pop Pct 10-14', 'Pop Pct 15-19', 'Pop Pct 20-24', 'Pop Pct 25-29', 'Pop Pct 30-34', 'Pop Pct 35-39', 'Pop Pct 40-44', 'Pop Pct 45-49', 'Pop Pct 50-54', 'Pop Pct 55-59', 'Pop Pct 60-64', 'Pop Pct 65-69', 'Pop Pct 70-74', 'Pop Pct 75-79', 'Pop Pct 80-84', 'Pop Pct 85+', 'avgtempC', 'maxtempC', 'mintempC', 'sunHour', 'uvIndex', 'windspeedKmph', 'humidity', 'pressure', 'precipMM', 'cloudcover', 'distance', 'Series_Complete_Pop_Pct', 'Series_Complete_12PlusPop_Pct', 'Series_Complete_18PlusPop_Pct', 'Series_Complete_65PlusPop_Pct', 'Administered_Dose1_Pop_Pct', 'Administered_Dose1_Recip_12PlusPop_Pct', 'Administered_Dose1_Recip_18PlusPop_Pct']
# variables_df.drop(['Lead (TSP) LC'], axis=1, inplace=True)
remove = ['Ozone']
independents = [i for i in independents if i not in remove]
dependent = 'percentage_new'
# variables_df = variables_df[independents + [dependent]].copy()
variables_df.dropna(inplace=True)
x = sm.add_constant(variables_df[independents])
# x = variables_df[independents]
y = variables_df[dependent]
np.seterr(divide='ignore', invalid='ignore')
pollutants = ['lead', 'carbon monoxide', 'sulfur dioxide', 'nitrogen dioxide', 'ozone', 'PM10', 'PM2.5']
independent = pollutants[0]
dependent = 'percentage_new'
variables_df = df[[independent, dependent]]
variables_df.dropna(inplace=True)
x = sm.add_constant(variables_df[independent])
y = variables_df[dependent]
x = list(x)
y = list(y)
x = sm.add_constant(variables_df[independent].tolist())
y
###Output
_____no_output_____
###Markdown
Split into Training and Test Sets
###Code
list_to_remove = ['date', 'fips']
x_columns = [x for x in df.columns if x not in list_to_remove]
x_columns
x_train.shape, y_train.shape
###Output
_____no_output_____
###Markdown
Train the Model
###Code
# model = sm.OLS(y_train, x_train)
x = [1, 2, 3]
y = [2, 4, 6]
model = sm.OLS(y, x)
result = model.fit()
###Output
_____no_output_____
###Markdown
Summary
###Code
result.summary()
###Output
C:\Users\natha\AppData\Local\Programs\Python\Python39\lib\site-packages\statsmodels\stats\stattools.py:74: ValueWarning: omni_normtest is not valid with less than 8 observations; 3 samples were given.
warn("omni_normtest is not valid with less than 8 observations; %i "
###Markdown
TODO:* error bars for the "by month" plots* top_x authors over time - all months not just top months* top_x_all authors over time* make by-month and per-month graphs more smooth
###Code
FILE_NAME = 'all'
# FILE_NAME = 'test'
OUTPUT_DATA_FILE = os.path.join('data', f'{FILE_NAME}_data.parquet')
OUTPUT_METADATA_FILE = os.path.join('data', f'{FILE_NAME}_meta.parquet')
AGGREGATION_MIN = 5
os.makedirs('pngs', exist_ok=True)
def plot_dataframe(dfs_to_plot, xvalues=None, title=None, xlabel=None, ylabel=None, ylim_bottom=0, yscale=None, lables=None, override_font=False, show_plot=True, output_file_name=None):
with plt.xkcd(scale=0.5):
# Make it into a list if it isn't
dfs_to_plot = [dfs_to_plot] if type(dfs_to_plot) is not list else dfs_to_plot
# The styalized font XKCD uses doesn't have very much unicode coverage, override font if you need to use unicode text
if override_font:
matplotlib.rc('font', family='Arial')
# Set fig size
plt.figure(figsize=(1920/80, 1080/80))
for df_to_plot in dfs_to_plot:
# Plot with labels if provided, else without
if xvalues is not None:
plt.plot(xvalues, df_to_plot)
else:
plt.plot(df_to_plot)
if ylim_bottom is not None:
plt.ylim(bottom=ylim_bottom)
# Style plot and add text
plt.grid(True, lw=0.5, zorder=0)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Semilog-y plots
# TODO: is this working?
if yscale is not None:
plt.yscale(yscale)
# Add lables if provided
if lables is not None:
plt.legend(lables)
# Save and plot!
if output_file_name is not None:
plt.savefig(output_file_name)
if show_plot:
plt.show()
def plot_windowed_msg_per_min(msg_per_min, window_length=10, show_plot=True, output_file_name='test.png'):
# Pad with zeros before the start of the server for filtering
pad_length = int(math.ceil(window_length/2.0))
pre_pad = msg_per_min[:pad_length]
pre_pad = pre_pad.tshift(-pad_length)
pre_pad[:] = 0
post_pad = msg_per_min[-pad_length:]
post_pad = post_pad.tshift(pad_length)
post_pad[:] = msg_per_min[-pad_length:].mean()
# Filter with centered blackman-harris window function and slice off the pad data
filtered_msg_per_min = pd.concat([pre_pad, msg_per_min, post_pad])
filtered_msg_per_min = filtered_msg_per_min.rolling(window_length, center=True, win_type='blackmanharris').mean()[pad_length:-pad_length]
# Plot filtered data
plot_dataframe(
filtered_msg_per_min,
title=f'Averaged Smoothed Message Rate Over History ({window_length*AGGREGATION_MIN/60/24} day window)',
xlabel='Datetime (ref:UTC)',
ylabel='msg/min (avg)',
show_plot=show_plot,
output_file_name=output_file_name
)
def plot_windowed_msg_per_min2(msg_per_min, window_days=7, show_plot=True, output_file_name='test.png', lables=None):
# Pad with zeros before the start of the server, and mean of half the window after the last data point, for filtering
window_lengths = np.atleast_1d(window_days)*np.timedelta64(1, 'D').astype('timedelta64[m]')/np.timedelta64(AGGREGATION_MIN, 'm')
pad_lengths = list(np.round(np.ceil(np.array(window_lengths)/2.0)).astype('int'))
filtered_msg_per_min_list = []
for i in range(len(window_lengths)):
window_length = int(window_lengths[i])
pad_length = pad_lengths[i]
pre_pad = msg_per_min[:pad_length]
pre_pad = pre_pad.tshift(-pad_length)
pre_pad[:] = 0
post_pad = msg_per_min[-pad_length:]
post_pad = post_pad.tshift(pad_length)
post_pad[:] = msg_per_min[-pad_length:].mean()
# Filter with centered blackman-harris window function and slice off the pad data
filtered_msg_per_min = pd.concat([pre_pad, msg_per_min, post_pad])
filtered_msg_per_min = filtered_msg_per_min.rolling(window_length, center=True, win_type='blackmanharris').mean()[pad_length:-pad_length]
filtered_msg_per_min_list.append(filtered_msg_per_min)
# Plot filtered data
plot_dataframe(
filtered_msg_per_min_list,
# title=f'Averaged Smoothed Message Rate Over History ({[str(i*AGGREGATION_MIN/60/24) + " " for i in window_lengths]} day window)',
title=f'Averaged Smoothed Message Rate Over History ({window_lengths*AGGREGATION_MIN/60/24} day window)',
xlabel='Datetime (ref:UTC)',
ylabel='msg/min (avg)',
show_plot=show_plot,
output_file_name=output_file_name,
lables=lables,
)
data = pd.read_parquet(f'{OUTPUT_DATA_FILE}')
metadata = pd.read_parquet(f'{OUTPUT_METADATA_FILE}')
data
metadata
print(f'Number of messages per channel out of {len(data)} total messages:')
msgs_by_user = data['channel_name'].value_counts()
msgs_by_user
msg_per_min = pd.Series(1, index=data['creation_datetime']).resample(f'{AGGREGATION_MIN}min').count()/AGGREGATION_MIN
# msg_per_min
# window_length_7 = int(round(datetime.timedelta(days=7).total_seconds()/60/AGGREGATION_MIN))
# plot_windowed_msg_per_min(msg_per_min, window_length_7, output_file_name='pngs/msg_rate_7day_window.png')
plot_windowed_msg_per_min2(msg_per_min, window_days=7, output_file_name='pngs/msg_rate_7day_window.png')
# window_length_30 = int(round(datetime.timedelta(days=30).total_seconds()/60/AGGREGATION_MIN))
# plot_windowed_msg_per_min(msg_per_min, window_length_30, output_file_name='pngs/msg_rate_30day_window.png')
plot_windowed_msg_per_min2(msg_per_min, window_days=30, output_file_name='pngs/msg_rate_30day_window.png')
# window_length_365 = int(round(datetime.timedelta(days=365).total_seconds()/60/AGGREGATION_MIN))
# plot_windowed_msg_per_min(msg_per_min, window_length_365, output_file_name='pngs/msg_rate_365day_window.png')
plot_windowed_msg_per_min2(msg_per_min, window_days=365, output_file_name='pngs/msg_rate_365day_window.png')
plot_windowed_msg_per_min2(msg_per_min, [7, 30, 365], lables=['7 Day Window', '30 Day Window', '365 Day Window'], output_file_name='pngs/msg_rate_7_30_365day_window.png')
# Message rate over hours of the day
plot_dataframe(
msg_per_min.groupby(msg_per_min.index.hour).mean(),
title='Average message rate over hour of the day',
xlabel='Hour of the day (ref:UTC)',
ylabel='msg/min (avg)',
# yscale='log'
output_file_name='pngs/hour_of_day.png',
)
# Message rate over day of the week
plot_dataframe(
msg_per_min.groupby(msg_per_min.index.dayofweek).mean(),
xvalues=list(calendar.day_name),
title='Average message rate over day of the week',
xlabel='Day of the week (ref:UTC)',
ylabel='msg/min (avg)',
# yscale='log'
output_file_name='pngs/day_of_week.png',
)
# Message rate over day of the week
plot_dataframe(
msg_per_min.groupby(msg_per_min.index.weekofyear).mean(),
title='Message rate over week of the year',
xlabel='Week of the Year (ref:UTC)',
ylabel='msg/min (avg)',
# yscale='log'
output_file_name='pngs/week_of_year.png',
)
# comulitive sum of messages across users
plot_dataframe(
data['author'].value_counts().to_numpy()/len(data),
title='Fraction of total messages by user',
xlabel='Users',
ylabel='Fraction of total messages',
# yscale='log'
output_file_name='pngs/msg_by_user_fraction.png',
)
# comulitive sum of messages across users, reverse-sorted
plot_dataframe(
data['author'].value_counts()[::-1].cumsum().to_numpy()/len(data),
title='Cumsum of fraction of total messages by user',
xlabel='Users',
ylabel='Fraction of total messages',
# yscale='log'
output_file_name='pngs/msg_by_user_cumsum.png',
)
author_counts_by_month = [(n, g['author'].value_counts()) for n, g in data.groupby(pd.Grouper(key='creation_datetime', freq='M'))]
mean_msgs_per_author_counts_by_month = pd.Series([i[1].mean() for i in author_counts_by_month], [i[0] for i in author_counts_by_month])
active_users_by_month = pd.Series([i[1].count() for i in author_counts_by_month], [i[0] for i in author_counts_by_month])
print('% of total messages for the top 10 most prolific authors:')
msgs_by_user = data['author'].value_counts()
print(msgs_by_user[:10]/len(data)*100)
# print('')
# # Replace "author" with the author string of your choice, the format is "name#1234"
# print(f'msgs by "author": {msgs_by_user["author"]/len(data)*100}% #{msgs_by_user.index.get_loc("author")+1} on the server')
# Average msgs per active user per month
plot_dataframe(
active_users_by_month,
title='Active users per month',
xlabel='Datetime (ref:UTC)',
ylabel='Active Users',
# yscale='log'
output_file_name='pngs/active_users_per_month.png',
)
# Average msgs per active user by month
plot_dataframe(
active_users_by_month.groupby(active_users_by_month.index.month).mean(),
title='Active users',
xlabel='Month (ref:UTC)',
ylabel='Active users/month',
# yscale='log'
output_file_name='pngs/active_users_by_month.png',
)
# Average msgs per active user per month
plot_dataframe(
mean_msgs_per_author_counts_by_month,
title='Average messages per active user per month',
xlabel='Datetime (ref:UTC)',
ylabel='Average messages/user',
# yscale='log'
output_file_name='pngs/msg_per_user_per_month.png',
)
# Average msgs per active user by month
plot_dataframe(
mean_msgs_per_author_counts_by_month.groupby(mean_msgs_per_author_counts_by_month.index.month).mean(),
title='Average msgs per active user by month',
xlabel='Month (ref:UTC)',
ylabel='Average messages/user',
# yscale='log'
output_file_name='pngs/msg_per_user_by_month.png',
)
TOP_N_PER_MONTH = 1
top_author_counts_by_month = [(i, j[0:TOP_N_PER_MONTH]) for i, j in author_counts_by_month]
all_top_authors = set()
for i, j in top_author_counts_by_month:
[all_top_authors.add(i) for i in j.index.to_list()]
print(f'All users that have been in the top {TOP_N_PER_MONTH} authors in any given month in the history of the server:')
pprint(sorted(list(all_top_authors)))
# Init the dataframe
top_authors_across_months_count = pd.DataFrame(index=[i for i, j in top_author_counts_by_month])
for i in all_top_authors:
top_authors_across_months_count[i] = 0.0
# over all months and the top authors of all time, calculate the number of messages send, zero if they had no messages that month
for i, j in author_counts_by_month:
for k in all_top_authors:
top_authors_across_months_count.at[i, k] = j.get(k, 0)
# Average msgs per active user by month
plot_dataframe(
top_authors_across_months_count/(30*24),
title=f'Average msgs per hour for each of the top {TOP_N_PER_MONTH} users in any month',
xlabel='Datetime (ref:UTC)',
ylabel='messages/hour',
# yscale='log'
output_file_name='pngs/rate_top_bymonth_users.png',
lables=all_top_authors,
override_font=True,
)
# Init the dataframe
top_authors_across_months_perc = pd.DataFrame(index=[i for i, j in top_author_counts_by_month])
for i in all_top_authors:
top_authors_across_months_perc[i] = 0.0
# over all months and the top authors of all time, calculate the percentage of messages sent by a particular user, zero if they had no messages that month
for i, j in author_counts_by_month:
for k in all_top_authors:
top_authors_across_months_perc.at[i, k] = 100*j.get(k, 0)/j.sum()
# Average msgs per active user by month
plot_dataframe(
top_authors_across_months_perc,
title=f'Percentage of total msgs per month for each of the top {TOP_N_PER_MONTH} users in any month',
xlabel='Datetime (ref:UTC)',
ylabel='Percent of total messages/month',
# yscale='log'
output_file_name='pngs/perc_top_bymonth_users.png',
lables=all_top_authors,
override_font=True,
)
TOP_N_EVER = 5
top_n_users = msgs_by_user[:TOP_N_EVER].index.to_list()
# Init the dataframe
top_authors_count = pd.DataFrame(index=[i for i, j in top_author_counts_by_month])
for i in top_n_users:
top_authors_count[i] = 0
# over all months and the top authors of all time, calculate the number of messages send, zero if they had no messages that month
for i, j in author_counts_by_month:
for k in top_n_users:
top_authors_count.at[i, k] = j.get(k, 0)
# Average msgs per active user by month
plot_dataframe(
top_authors_count/(30*24),
title=f'Average msgs per hour for each of the top {TOP_N_EVER} authors ever',
xlabel='Datetime (ref:UTC)',
ylabel='messages/hour',
# yscale='log'
output_file_name='pngs/perc_top_users.png',
lables=msgs_by_user[:TOP_N_EVER].index.to_list(),
override_font=True,
)
# Init the dataframe
top_authors_perc = pd.DataFrame(index=[i for i, j in top_author_counts_by_month])
for i in top_n_users:
top_authors_perc[i] = 0.0
# over all months and the top authors of all time, calculate the percentage of messages sent by a particular user, zero if they had no messages that month
for i, j in author_counts_by_month:
for k in top_n_users:
top_authors_perc.at[i, k] = 100.0*j.get(k, 0)/float(j.sum())
# Average msgs per active user by month
plot_dataframe(
top_authors_perc,
title=f'Percent of total msgs per month for each of the top {TOP_N_EVER} users of all time',
xlabel='Datetime (ref:UTC)',
ylabel='Percentage of messages/month',
# yscale='log'
output_file_name='pngs/rate_top_users.png',
lables=msgs_by_user[:TOP_N_EVER].index.to_list(),
override_font=True,
)
###Output
_____no_output_____
###Markdown
FASTGenomics Scanpy + R Analysis You might want to describe your analysis briefly here, if you are planning to share it.
###Code
# Place all your Python imports here.
import logging
import fgread
import scanpy as sc
import scipy.sparse as spsp
# do not delete these imports as they are required for R support
import rpy2.rinterface_lib.callbacks
from rpy2.robjects import pandas2ri
import anndata2ri
%load_ext rpy2.ipython
%%R
# Place all your R library imports here
suppressPackageStartupMessages({
library(scran)
})
# Place all your parameter values here.
sc.settings.verbosity = 1 # scanpy verbosity: errors (0), warnings (1), info (2), hints (3)
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
# Automatically convert rpy2 outputs to pandas dataframes
pandas2ri.activate()
anndata2ri.activate()
###Output
_____no_output_____
###Markdown
Raw DataFirst, the raw dataset(s) will be read into an AnnData object(s). You can describe your data here using markdown or delete this text.
###Code
# Print metadata of all attached datasets
ds_info = fgread.ds_info()
ds_info
# Load the attached dataset
data = fgread.load_data() # If multiple datasets are attached, you have to select one by its id or tile
data
###Output
_____no_output_____
###Markdown
PreprocessingYou can describe your preprocessing here or delete this text.If this is your first analysis, you might want to have a look at our tutorials onGetting Started with FASTGenomics Lab,the data loading (How to Load Data in FASTGenomics (Python)),Scanpy with R support (Advanced Scanpy with R Support (rpy2)), or theBest Practices Preprocessing Notebook.
###Code
# This is an example of how to prepare AnnData matrices for processing with R
if spsp.issparse(data.X):
data = data.X.T.todense() # if X in anndata is sparse
%%R -i data -o clusters
# You can use R code in cells with specified inputs and outputs
clusters <- quickCluster(data)
# The outputs are then available in Python
clusters
###Output
_____no_output_____
###Markdown
ContextThis is one of the dataset provided by the National Cardiovascular Disease Surveillance System.The system is designed to integrate multiple indicators from many data sources to provide a comprehensive picture of the public health burden of CVDs and associated risk factors in the United States. ContentThe data are organized by location (national, regional, state, and selected sites) and indicator, and they include CVDs (e.g., heart failure) and risk factors (e.g., hypertension). The data can be plotted as trends and stratified by age group, sex, and race/ethnicity.2011 to present. BRFSS is a continuous, state-based surveillance system that collects information about modifiable risk factors for chronic diseases and other leading causes of death.
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from textwrap import wrap
%matplotlib inline
from subprocess import check_output
print(check_output(["ls", "data/"]).decode("utf8"))
from IPython.display import display
df = pd.read_csv('data/dataset.csv')
pd.options.display.max_columns = None
df.head()
# looking at the unique values for the columns
df['LocationID'].unique()
# looking at the column names
df.columns
# LocationAbbr: Abbreviation of the State
# LocationDesc: Name of the State
# (drop) Datasource: Just BRFSS - can drop this since all the same
# PriorityArea1: Contains 'None' and 'Million Hearts'
# PriorityArea2: Contains 'None' and 'ABCS'
# PriorityArea3: Contains 'None' and 'Healthy People 2020'
# PriorityArea4: Contains only 'None'
# Category: Contains 'Cardiovascular Diseases' and 'Risk Factors'
# Topic: Topics of diagnosis of the person?
# Indicator: 'Prevalence of' blah blah blah with similar to topics? - have to check
# Data_Value_Type: Contains 'Age-Standardized' and 'Crude'
# Data_Value_Unit: Contains only 'Percent (%)'
# Data_Value: Numeric value (not sure what it stands for)
# Data_Value_Alt: Slight different to 'Data_Value', negative values here but 'nan' for 'Data_Value'
# Data_Value_Footnote_Symbol: Contains 'nan', '~', and '-'
# Data_Value_Footnote: Contains 'nan', 'Statistically unstable...', and 'Data not available'
# Confidence_Limit_Low: Numeric, similar to 'Data_Value'
# Confidence_Limit_High: Numeric
# Break_Out_Category: Contains 'Overall', 'Gender', 'Age', and 'Race'
# Break_out: Contains 'Overall', 'Male', 'Female', '18-24', '25-44', '45-64', '65+', '35+', '75+', 'Non-Hispanic White', 'Non-Hispanic Black', 'Non-Hispanic Asian', 'Hispanic', 'Other', and '20-24'
# CategoryID: Contains 'C1' and 'C2', not sure what it means
# TopicID: Contains T values, not sure what it means
# IndicatorID: Contains BR numbers, not sure what it means
# Data_Value_TypeID: Contains 'AgeStdz' and 'Crude' - same as 'Data_Value_Type'
# BreakoutCategoryID: Contains BOC values, not sure what it means
# BreakOutID: Abbreviation of Breakout
# LocationID: Corresponds to location
# Geolocation: Coordinates
df.describe()
from matplotlib import style
style.use('dark_background')
plt.figure(figsize=(12,6))
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
###Output
_____no_output_____
###Markdown
流距离嵌入- window:5, 一句话中,两个词最远距离为5。- 考虑到了词序,例如:“We may encounter many defeats, but we must not be defeated.”, encounter-defeats 词对会+1,而defeats-encounter 词对不会+1
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
avgdist_file = './data/dist_avg.npy'
count_file = './data/count.npy'
avgdist = np.load(avgdist_file)
count = np.load(count_file)
avgdist_flat = avgdist.flatten()
avgdist_flat_nonz = np.sort(avgdist_flat[avgdist_flat.nonzero()])
plt.plot(avgdist_flat_nonz)
plt.title('sorted data sequence')
plt.ylabel('value')
plt.show()
bins = np.linspace(np.ceil(np.min(avgdist_flat_nonz)), np.floor(np.max(avgdist_flat_nonz)), 30)
plt.hist(avgdist_flat_nonz, bins=bins, alpha=0.5)
plt.title('Histogram')
plt.xlabel('Value (30 evenly spaced bins)')
plt.ylabel('Count')
plt.show()
avgdist_var = np.var(avgdist_flat_nonz)
avgdist_mean = np.mean(avgdist_flat_nonz)
print '均值: %f' % avgdist_mean
print '方差: %f' % avgdist_var
avgdist_min = np.min(avgdist_flat_nonz)
min_idx = np.where(avgdist == 1.0)
print '最小值:%f' % avgdist_min
print '平均距离是:%f 的词对个数:%d' % (avgdist_min, len(min_idx[0]))
###Output
最小值:1.000000
平均距离是:1.000000 的词对个数:12814
###Markdown
结果分析avg_dist 值小的,可能是出现的频次不高,但是都是在一起出现的用 counts 大的排序,更有实际意义
###Code
from train import Vocabulary
vocab = Vocabulary()
words = []
for i, val in enumerate(zip(min_idx[0], min_idx[1])):
key_word = '%s-%s' % (vocab.get_word(val[0]), vocab.get_word(val[1]))
count_word = count[val[0], val[1]]
words.append({'key':key_word, 'cnt':count_word})
words.sort(key=lambda x:x['cnt'], reverse=True)
for i in words[:100]:
print i
count_flat = count.flatten()
val = np.partition(count_flat, -100)[-100:]
words_cnt = []
for i in val[::-1]:
idx_0, idx_1 = np.where(i==count)
key_word = '%s-%s' % (vocab.get_word(idx_0[0]), vocab.get_word(idx_1[0]))
avgdist_word = avgdist[idx_0[0], idx_1[0]]
words_cnt.append({'key':key_word, 'cnt':i, 'avgdist': avgdist_word})
words_cnt.sort(key=lambda x:x['cnt'], reverse=True)
for i in words_cnt:
print i
###Output
{'cnt': 1102693.0, 'key': 'united-states', 'avgdist': 1.0093471165591874}
{'cnt': 760376.0, 'key': 'new-york', 'avgdist': 1.0403366229339168}
{'cnt': 468649.0, 'key': 'high-school', 'avgdist': 1.0688297638531181}
{'cnt': 392738.0, 'key': 'world-war', 'avgdist': 1.0189693892620526}
{'cnt': 276908.0, 'key': 'may-refer', 'avgdist': 1.1273744348303407}
{'cnt': 265955.0, 'key': 'also-known', 'avgdist': 1.0779455170987573}
{'cnt': 211612.0, 'key': 'new-zealand', 'avgdist': 1.0130852692663932}
{'cnt': 201213.0, 'key': 'war-ii', 'avgdist': 1.0226227927618992}
{'cnt': 200990.0, 'key': 'los-angeles', 'avgdist': 1.0055077367033185}
{'cnt': 196407.0, 'key': 'world-ii', 'avgdist': 2.0065985428217936}
{'cnt': 195522.0, 'key': 'new-city', 'avgdist': 2.0166477429649858}
{'cnt': 192153.0, 'key': 'first-time', 'avgdist': 1.0718802204493294}
{'cnt': 190443.0, 'key': 'took-place', 'avgdist': 1.0701942313448118}
{'cnt': 187915.0, 'key': 'york-city', 'avgdist': 1.0241864672857408}
{'cnt': 177112.0, 'key': 'two-years', 'avgdist': 1.1502100365870185}
{'cnt': 165171.0, 'key': 'united-kingdom', 'avgdist': 1.0456496600492822}
{'cnt': 156134.0, 'key': 'made-debut', 'avgdist': 2.7813800965837037}
{'cnt': 147599.0, 'key': 'years-later', 'avgdist': 1.0655695499292002}
{'cnt': 144342.0, 'key': 'air-force', 'avgdist': 1.0444846267891534}
{'cnt': 135557.0, 'key': 'national-team', 'avgdist': 1.6866779288417419}
{'cnt': 130534.0, 'key': 'football-league', 'avgdist': 1.0818024422755756}
{'cnt': 128255.0, 'key': 'prime-minister', 'avgdist': 1.0330045612256833}
{'cnt': 122196.0, 'key': 'summer-olympics', 'avgdist': 1.0257373400111296}
{'cnt': 122174.0, 'key': 'world-cup', 'avgdist': 1.0515412444546304}
{'cnt': 120763.0, 'key': 'new-jersey', 'avgdist': 1.1022829840265644}
{'cnt': 119705.0, 'key': 'years-age', 'avgdist': 2.0365732425546135}
{'cnt': 119549.0, 'key': 'de-la', 'avgdist': 1.1842591740625183}
{'cnt': 118006.0, 'key': 'median-income', 'avgdist': 1.0587004050641493}
{'cnt': 117101.0, 'key': 'san-francisco', 'avgdist': 1.0201193841213996}
{'cnt': 114191.0, 'key': 'three-years', 'avgdist': 1.1805045931815992}
{'cnt': 112655.0, 'key': 'south-africa', 'avgdist': 1.0598553104611423}
{'cnt': 110945.0, 'key': 'civil-war', 'avgdist': 1.0329081977556447}
{'cnt': 110460.0, 'key': 'north-america', 'avgdist': 1.1023085279739273}
{'cnt': 110385.0, 'key': 'rural-district', 'avgdist': 1.6351315849073698}
{'cnt': 109746.0, 'key': 'village-district', 'avgdist': 3.8112915277094381}
{'cnt': 109451.0, 'key': 'hong-kong', 'avgdist': 1.015312788371052}
{'cnt': 109029.0, 'key': 'railway-station', 'avgdist': 1.1718533601151988}
{'cnt': 108805.0, 'key': 'head-coach', 'avgdist': 1.1326685354533339}
{'cnt': 108643.0, 'key': 'best-known', 'avgdist': 1.0095818414439954}
{'cnt': 106882.0, 'key': 'football-team', 'avgdist': 1.1508579555023297}
{'cnt': 103638.0, 'key': 'following-year', 'avgdist': 1.0483992358015399}
{'cnt': 102599.0, 'key': 'world-championships', 'avgdist': 1.6250060916773068}
{'cnt': 100959.0, 'key': 'state-university', 'avgdist': 1.1624223694767182}
{'cnt': 100894.0, 'key': 'national-historic', 'avgdist': 2.6839653497730289}
{'cnt': 100845.0, 'key': 'census-population', 'avgdist': 2.4456839704497}
{'cnt': 100527.0, 'key': 'studio-album', 'avgdist': 1.0718016055388104}
{'cnt': 99717.0, 'key': 'became-first', 'avgdist': 2.1817343080919001}
{'cnt': 99680.0, 'key': 'supreme-court', 'avgdist': 1.0484249598715891}
{'cnt': 99118.0, 'key': 'years-older', 'avgdist': 3.8679250993765009}
{'cnt': 99025.0, 'key': 'average-size', 'avgdist': 1.9921938904317091}
{'cnt': 98915.0, 'key': 'school-school', 'avgdist': 3.4493858363241165}
{'cnt': 98811.0, 'key': 'can-also', 'avgdist': 1.0773598081185294}
{'cnt': 98530.0, 'key': 'also-used', 'avgdist': 1.4339186034710241}
{'cnt': 97546.0, 'key': 'school-district', 'avgdist': 1.1931191437885715}
{'cnt': 97451.0, 'key': 'district-county', 'avgdist': 3.0161619685790808}
{'cnt': 97159.0, 'key': 'one-two', 'avgdist': 2.6380880824215978}
{'cnt': 96703.0, 'key': 'film-directed', 'avgdist': 1.5585038726823366}
{'cnt': 96575.0, 'key': 'may-also', 'avgdist': 1.1641729225990163}
{'cnt': 95517.0, 'key': 'age-older', 'avgdist': 2.0184888553869991}
{'cnt': 94680.0, 'key': 'north-carolina', 'avgdist': 1.0608259400084494}
{'cnt': 94484.0, 'key': 'years-old', 'avgdist': 1.0466322340290419}
{'cnt': 93606.0, 'key': 'national-register', 'avgdist': 1.0252334252077859}
{'cnt': 91420.0, 'key': 'african-american', 'avgdist': 1.9708160140013127}
{'cnt': 91377.0, 'key': 'film-festival', 'avgdist': 1.0901211464591747}
{'cnt': 90195.0, 'key': 'one-first', 'avgdist': 3.062486834081712}
{'cnt': 89197.0, 'key': 'album-released', 'avgdist': 2.5591443658419006}
{'cnt': 87747.0, 'key': 'district-district', 'avgdist': 3.2255461725187184}
{'cnt': 85007.0, 'key': 'two-later', 'avgdist': 2.0740527250697003}
{'cnt': 84918.0, 'key': 'national-league', 'avgdist': 1.7398666949292259}
{'cnt': 84478.0, 'key': 'historic-places', 'avgdist': 1.0030303747721301}
{'cnt': 84372.0, 'key': 'register-historic', 'avgdist': 2.0008652159484188}
{'cnt': 84282.0, 'key': 'television-series', 'avgdist': 1.1532474312427328}
{'cnt': 84107.0, 'key': 'first-season', 'avgdist': 1.7450866158583709}
{'cnt': 83373.0, 'key': 'new-south', 'avgdist': 1.2435200844398067}
{'cnt': 83246.0, 'key': 'register-places', 'avgdist': 2.9999639622324197}
{'cnt': 83244.0, 'key': 'general-election', 'avgdist': 1.0228364807073183}
{'cnt': 82775.0, 'key': 'national-places', 'avgdist': 3.9974267592872246}
{'cnt': 81946.0, 'key': 'south-wales', 'avgdist': 1.0285797964513217}
{'cnt': 81269.0, 'key': 'early-century', 'avgdist': 2.1853105120033467}
{'cnt': 80301.0, 'key': 'music-video', 'avgdist': 1.0784797200532994}
{'cnt': 79868.0, 'key': 'world-championship', 'avgdist': 1.6569840236390043}
{'cnt': 79737.0, 'key': 'first-two', 'avgdist': 1.5420193887404843}
{'cnt': 79282.0, 'key': 'first-round', 'avgdist': 1.1063797583310209}
{'cnt': 78219.0, 'key': 'four-years', 'avgdist': 1.1289456525908028}
{'cnt': 77397.0, 'key': 'can-used', 'avgdist': 2.2278512087031799}
{'cnt': 77107.0, 'key': 'soviet-union', 'avgdist': 1.0200241223235245}
{'cnt': 77043.0, 'key': 'washington-dc', 'avgdist': 1.0245966538166997}
{'cnt': 75994.0, 'key': 'human-rights', 'avgdist': 1.0461483801352738}
{'cnt': 75987.0, 'key': 'debut-album', 'avgdist': 1.2821403661152566}
{'cnt': 75756.0, 'key': 'two-one', 'avgdist': 2.9820740271397645}
{'cnt': 74052.0, 'key': 'every-females', 'avgdist': 2.0007832334035545}
{'cnt': 73820.0, 'key': 'can-found', 'avgdist': 2.1565564887564346}
{'cnt': 73732.0, 'key': 'many-years', 'avgdist': 1.1092470026582759}
{'cnt': 73710.0, 'key': 'roman-catholic', 'avgdist': 1.0117351784018451}
{'cnt': 73566.0, 'key': 'second-war', 'avgdist': 2.0189897507000518}
{'cnt': 73307.0, 'key': 'hall-fame', 'avgdist': 2.0010230946567176}
{'cnt': 72635.0, 'key': 'award-best', 'avgdist': 2.1954567357334618}
{'cnt': 72531.0, 'key': 'took-part', 'avgdist': 1.1381478264466227}
{'cnt': 71868.0, 'key': 'several-including', 'avgdist': 2.7977959592586408}
{'cnt': 71749.0, 'key': 'five-years', 'avgdist': 1.1184128001783997}
###Markdown
Microsoft Movie Studios Analysis**Authors:** Armun Shakeri*** OverviewThis project analyzes current movie trends, budgets, gross income, and ratings in order to help Microsoft Studios best decide which movies to produce in its new upcoming studio. Analysis will show that if Microsoft studios produces movies that are in high demand positive gross profit will be reflected. Business ProblemMicrosoft is seeking to enter into the movie industry and does not know what movies to create. We need to analyze what types of movies are currently trending, most popular movie genres, highest grossing movies of all time, highest budgeted movies, and movie title basics. For Microsoft's new movie studio to be profitable we need to pick a movie genre that is currently in demand and which movies had highest gross incomes, doing this ensures that the movie will have a positive inception and be profitable. Data Understanding The following files imported are from various film rating institutions that will help identify what type of movieMicrosoft Studios should create next. These files include information on income, genres, ratings, and movie budgets. We intend to use variables mostly related to domestic gross income since we want Microsoft's first film to be profitable within the United States.
###Code
# Import standard packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sqlite3
%matplotlib inline
# Here we run code to explore the data
income = pd.read_csv('zippedData/bom.movie_gross.csv.gz', compression='gzip', error_bad_lines=False)
basics = pd.read_csv('zippedData/imdb.title.basics.csv', error_bad_lines=False)
ratings = pd.read_csv('zippedData/imdb.title.ratings.csv.gz', compression='gzip', error_bad_lines=False)
budgets = pd.read_csv('zippedData/tn.movie_budgets.csv.gz', compression='gzip', error_bad_lines=False)
info = pd.read_csv('zippedData/rt.movie_info.tsv.gz', compression='gzip', sep='\t', error_bad_lines=False)
# the target variables here are title and domestic_gross
income.info()
#the target variables are primary title and genre
basics.head()
ratings.head()
budgets.info()
#the target variables are movie, production_budget, and domestic_gross
info.head()
###Output
_____no_output_____
###Markdown
Data Preparation We are going to drop studio since microsoft will be using their own, year, and foreign_gross(income) since it is irrelevant in analyzing gross profit for a new movie within the United States.
###Code
income.drop(['studio', 'year', 'foreign_gross'], axis=1, inplace=True)
income.sort_values('domestic_gross', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
The new film will be focusing on the domestic US market so for the budgets data release_date and worldwide_gross will be the dropped variables. Domestic_gross income will also be dropped since we are going to combine budgets and income.
###Code
info.drop(['id','synopsis','genre','director','writer','theater_date','dvd_date','currency','runtime', 'studio'],axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
For the info data fram we will drop 'synopsis','genre','director','writer','theater_date','dvd_date','currency','runtime', 'studio', and we will drop all NA values. These are irrelevant for analysis.
###Code
info=info.dropna()
info.sort_values(by='rating', ascending=True).head(20)
info.info()
info.box_office=info.box_office.str.replace(",","")
info.box_office=info.box_office.astype(int)
info.info()
budgets.drop(['id','release_date', 'domestic_gross', 'worldwide_gross'], axis=1, inplace=True)
budgets.head()
###Output
_____no_output_____
###Markdown
In budgets we will drop id, realease_date, domestic_gross, and worldwide_gross since they are irrelevant for analysis.
###Code
#rename 'movie' columns to title to merge income and budgets
budgets = budgets.rename(columns={'movie':'title'})
budgets.head()
#merge income and budgets by movie titles
movie_income_df = pd.merge(income,
budgets,
on=['title'],
how='left')
movie_income_df.dropna().head()
#-convert production budget to integer.
#remove "$" and commas using str.replace method
#sort new dataframe by highest domestic gross income
movie_income_df.sort_values(by='domestic_gross', ascending=False).dropna().head(20)
###Output
_____no_output_____
###Markdown
We will need to combine basics and ratings using the common variable 'tconst'. Doing so we will be able to analyze ratings of different movies in specific genres. This will allow us to decide what type of genre Microsoftstudios should focus on when creating the new movie.
###Code
basics.drop(['start_year', 'runtime_minutes', 'original_title'], axis=1, inplace=True)
ratings.drop(['numvotes'], axis=1, inplace=True)
#renamed 'primary_title' to 'title'
basics = basics.rename(columns={'primary_title':'title'})
basics.head()
ratings.head()
###Output
_____no_output_____
###Markdown
In order to accurately understand the ratings of each title we will need to combine basics and ratings by tconst.
###Code
#merge movie ratings and basics, and drop all NaN values in average rating
movie_basics_df = pd.merge(basics,
ratings,
on=['tconst'],
how='left')
movie_basics_df.sort_values(by='averagerating', ascending=False).dropna().head(20)
###Output
_____no_output_____
###Markdown
Finally we will combine movie_income_df and movie_basics_df. This gives us a final dataframe with all the data we will need included within a central data set.
###Code
movie_combined_df = pd.merge(movie_income_df,
movie_basics_df,
on=['title'],
how='left')
movie_combined_df.drop(['tconst'], axis=1, inplace=True)
movie_combined_df = movie_combined_df.sort_values(by='domestic_gross', ascending=False).dropna().head(30)
movie_combined_df
###Output
_____no_output_____
###Markdown
In order to make modeling this data easier, we will remove all "$" and "," from production_budget. We will also change the variable type of production_budget from string to integer.
###Code
movie_combined_df.production_budget=movie_combined_df.production_budget.str.replace("$","")
movie_combined_df.production_budget=movie_combined_df.production_budget.str.replace(",","")
movie_combined_df.production_budget=movie_combined_df.production_budget.astype(int)
movie_combined_df.head(30)
###Output
_____no_output_____
###Markdown
The above data set is the finished data set we will use in modeling. It has been arranged from highest grossing film to lowest and also shows the film's production budget, genre and average rating. Data Modeling
###Code
movie_combined_df.describe()
###Output
_____no_output_____
###Markdown
Calculating the statistical methods (mean, median, mode...etc) will help create a general idea of where the movie industry is currently at in todays market. This is a good baseline to start analysis.
###Code
productionbudgetloop = []
for production_budget in movie_combined_df['production_budget']:
if production_budget <= 150000000:
productionbudgetloop.append(1)
elif production_budget <= 175000000:
productionbudgetloop.append(2)
elif production_budget <= 200000000:
productionbudgetloop.append(3)
else:
productionbudgetloop.append(4)
movie_combined_df['production1'] = productionbudgetloop
movie_combined_df.head()
#Figure 1
#in this figure we are comparing production budgets to ratings. Production budgets have been separated the amount
#film's production budgets were. From 25th, 50th, 75th and max.
g = movie_combined_df.groupby('production1').mean()
sns.barplot(x=g.index, y= "averagerating", data=g, color= 'blue')
plt.title("production budgets compared to ratings")
###Output
_____no_output_____
###Markdown
As shown by figure one, the higher the film's production budget the higher rating the film will recieve.
###Code
movie_combined_df.describe()
#Figure 2
#movie_combined_df.groupby('title').sum().plot(kind='bar')
sns.barplot(y="title", x="production_budget", data=movie_combined_df[:20], color= 'blue')
plt.title("Film(s) Production Budget")
###Output
_____no_output_____
###Markdown
Figure 1 shows the average production budget of top 20 films. In figure 2 movies that are geared more towards the family demographic seem to have the highest domestic gross profits.
###Code
#Figure 3
new_movie=movie_combined_df.groupby("genres").agg('mean')
new_movie
###Output
_____no_output_____
###Markdown
In figure 4 action, adventure, and animation genre has the highest domestic gross income among all the genres presented and also has the highest rating.
###Code
new_movie.reset_index(inplace=True)
new_movie.info()
#Figure 4
sns.barplot(y="genres", x="domestic_gross", data=new_movie, color= 'blue')
plt.title("Genres and Their Respective Domestic Gross Profit")
###Output
_____no_output_____
###Markdown
Figure 4 shows the domestic gross profit among all the genres presented. We also see in this figure that action, adventure, and animation has the highest domestic gross profit by a fairly wide margin.
###Code
#Figure 5
sns.countplot(y="genres", data=movie_combined_df[:100], color= 'blue')
plt.xlabel("Count of Films")
plt.ylabel("Genres")
plt.title("Count of Film Genres")
#this basic scatter plot will also help us determine the genres. Will need to spread the x axis further apart. Also
#right off the bat we see that the majority of films have some sort of action aspect.
#50% of top 20 films fall under Action, Adventure, Scifi category
###Output
_____no_output_____
###Markdown
Figure 3 is a scatter plot that shows the genres of top 20 films. Currently in today's movie market there is a saturation of action, adventure and scifi movies. Microsoft should try to differentiate themselves by creating a movie that falls within a different genre.
###Code
#Figure 6
#We are trying to create 2 histograms, one with movie ratings of 7.0< and related production budgets, the other ratings of
#7.0> and production budgets.
budget_profit_fig, budget_profit_axis = plt.subplots(nrows=1, ncols=2, figsize=(30,6))
budget_profit_axis[0].set_title('Movies with ratings of 7.0<' )
budget_profit_axis[0].set_ylabel('production_budget')
budget_profit_axis[0].set_xlabel('title')
budget_profit_axis[1].set_title('Movies with ratings of 7.0>')
budget_profit_axis[1].set_ylabel('production_budget')
budget_profit_axis[1].set_xlabel('title')
budget_profits_high = movie_combined_df['title'][movie_combined_df['averagerating'] > 7.0]
budget_profits_low = movie_combined_df['title'][movie_combined_df['averagerating'] < 7.0]
budget_profit_axis[0].hist(budget_profits_high, bins=10)
budget_profit_axis[1].hist(budget_profits_low, bins=30)
budget_profit_axis[0].tick_params(labelrotation=90)
budget_profit_axis[1].tick_params(labelrotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Figure 4 allows us to see if having a higher budget will reflect in the movie's rating.
###Code
#Figure 7
sns.barplot(y="rating", x="box_office", data=info, color= 'blue')
plt.title("Ratings and Their Total Box Office Returns")
###Output
_____no_output_____
###Markdown
Summer Data Scientist Data Assessment Crime and Education Lab New York*Jesica Maria Ramirez Toscano* Part 1: Variable Creation
###Code
import pandas as pd
import numpy as np
arrests = pd.read_csv('arrests.csv')
demo = pd.read_csv('demo.csv')
demo['bdate'] = pd.to_datetime(demo['bdate'], utc=False)
arrests['arrest_date'] = pd.to_datetime(arrests['arrest_date'], utc=False)
###Output
_____no_output_____
###Markdown
1. We filter the arrest to the ones that occurred post-implementation. 2. Since we need information about past arrests and potential felony re-arrests, we merge the post-arrests with the total arrests by person_id. So each arrest will be linked to a post-arrest of the same individual.> Note: **arrest_post** refers to the data of arrests post-implementation. **tr** refers to the merged data of arrests_post with all the arrests. So each arrest in this data set is linked to a post-arrest of the same individual.
###Code
arrests_post = arrests[arrests['arrest_date'] >= '2010-01-01'].copy()
tr = pd.merge(arrests,
arrests_post.rename(columns={'arrest_date':'date_post',
'arrest_id':'aid_post',
'law_code':'code_post'}),
on='person_id')
###Output
_____no_output_____
###Markdown
3. We create different tables to obtain the number of prior misdemeanor arrests and felony arrests in the last 2 years and 6 months.
###Code
twoyear = tr[(tr['arrest_date'] >= tr['date_post']-pd.DateOffset(years=2)) & (tr['arrest_id'] != tr['aid_post'])]
sixmonth = tr[(tr['arrest_date'] >= tr['date_post'] - pd.DateOffset(months=6)) & (tr['arrest_id'] != tr['aid_post'])]
twoyear = twoyear.groupby(['aid_post', 'law_code']).size().unstack().reset_index().fillna(0)
twoyear.rename(columns = {'aid_post':'arrest_id', 'felony': 'fel_2y', 'misdemeanor': 'mis_2y'}, inplace=True)
sixmonth = sixmonth.groupby(['aid_post', 'law_code']).size().unstack().reset_index().fillna(0)
sixmonth.rename(columns = {'aid_post':'arrest_id', 'felony': 'fel_6m', 'misdemeanor': 'mis_6m'}, inplace=True)
###Output
_____no_output_____
###Markdown
>So for the table **twoyear**, we have the post_arrests variable with the number of prior felony and misdemeanor arrests in the last two years.
###Code
twoyear
###Output
_____no_output_____
###Markdown
4. To create the felony re-arrest binary variable, we need information about the potential future felony arrest of that individual. So first, we create a table called **year_ahead** using the **tr** dataset.
###Code
year_ahead = tr[(tr['arrest_date'] >= tr['date_post']) & (tr['arrest_id'] != tr['aid_post'])]
year_ahead = year_ahead[year_ahead['arrest_date'] <= year_ahead['date_post'] + pd.DateOffset(years=1)]
year_ahead = year_ahead.groupby(['aid_post', 'law_code']).size().unstack().reset_index().fillna(0)
year_ahead.rename(columns = {'aid_post':'arrest_id', 'felony': 'felony_arrests' }, inplace=True)
year_ahead[['arrest_id', 'felony_arrests']]
###Output
_____no_output_____
###Markdown
>With this table, we can create a binary variable of re_arrest (1 if the individual has one or more felony arrests during one year following the arrest, 0 if the individual has no felony re-arrest)
###Code
year_ahead['re_arrest'] = np.where(year_ahead['felony_arrests'] > 0,1,0)
###Output
_____no_output_____
###Markdown
5. With twoyear, sixmonth, year_ahead tables, we can now fill the data in arrests_post about the number of prior felony arrests and misdemeanor arrests in the last 2 years and 6 months, and the binary variable re_arrest (felony re-arrest).
###Code
arrests_post = arrests_post.merge(twoyear, on='arrest_id', how='left').fillna(0)
arrests_post = arrests_post.merge(sixmonth, on='arrest_id', how='left').fillna(0)
arrests_post = arrests_post.merge(year_ahead[['arrest_id', 're_arrest']], on='arrest_id', how='left').fillna(0)
arrests_post
###Output
_____no_output_____
###Markdown
6. Finally, we include data about the home precinct, age, and gender of the individual in each arrest.>For the age variable, we obtain the difference in the arrest date and the birthdate (the result is in days, we convert it to years.) For the gender variable, we noticed it has four unique values: M, F, male, female. So we changed male and female values as M and F.
###Code
final = pd.merge(arrests_post, demo, on='person_id')
final['age'] = ((final['arrest_date'] - final['bdate']) / np.timedelta64(1, 'Y')).round().astype(int)
final.drop(['bdate', 'arrest_id',], axis=1, inplace=True)
final.gender.unique()
final.loc[final['gender'] == 'male', 'gender'] = 'M'
final.loc[final['gender'] == 'female', 'gender'] = 'F'
print(final.gender.unique())
final
###Output
['M' 'F']
###Markdown
Part 2: Statistical Analysis >> Program Evaluation
###Code
import matplotlib.pyplot as plt
import statsmodels.api as sm
import seaborn as sns
from statsmodels.discrete.discrete_model import Probit
###Output
_____no_output_____
###Markdown
1. First, we import data about the treatment and control precincts2. Then, we are only interested in measuring the effect of the program for the first time an individual receives treatment, we filter the data to the first arrest of each individual in the post-implementation period.
###Code
treat = pd.read_csv('treatment_assignment.csv')
treat.rename(columns={'precinct' : 'home_precinct'}, inplace=True)
first = final.groupby('person_id').agg({'arrest_date':min}).reset_index()
first = first.merge(final, on=['person_id', 'arrest_date'])
###Output
_____no_output_____
###Markdown
> If we look at the data in the treatment_assignment data set, there are 30 precincts (control and treatment precincts), whereas in the data set of first arrests post-implementation period, there are 77 different precincts.
###Code
print('Treatment-control precincts: {}'.format(len(treat.home_precinct.unique())))
print('Arrests post-implementation precincts: {}'.format(len(first.home_precinct.unique())))
###Output
Treatment-control precincts: 30
Arrests post-implementation precincts: 77
###Markdown
>In this sense, we have two options: a) Assume that the precincts not included in the treatment_assignment data set are also CONTROL. b) Assume that the treatment_assignment is complete and those precincts were chosen to study because they are similar to each other. I'm going to follow the option b), and drop the observations that don't fall in the control and treatment precincts.
###Code
data_eval = first.merge(treat, on=['home_precinct'], how='right')
data_eval.drop(['person_id', 'arrest_date'], axis=1, inplace=True)
data_eval
###Output
_____no_output_____
###Markdown
3. Before evaluating the success of the program, we change the values of the following variables: -gender to 1 for Men and 0 for Female -treatment_status to 1 for treatment and 0 for control -law_code to 1 for felony and 0 for misdemeanor
###Code
data_eval['gender'] = np.where(data_eval['gender']== 'M', 1, 0)
data_eval['treatment_status'] = np.where(data_eval['treatment_status']== 'control', 0, 1)
data_eval['law_code'] = np.where(data_eval['law_code']== 'felony', 1, 0)
###Output
_____no_output_____
###Markdown
4. To analyze the effectiveness of this program, we regress the re_arrest variable on the rest of the covariates.> Since the dependent variable is binary, we must estimate heteroscedasticity robust standard errors
###Code
IND_VARS = ['treatment_status', 'age', 'gender', 'law_code', 'fel_2y', 'mis_2y', 'fel_6m', 'mis_6m']
all_ = sm.add_constant(data_eval[IND_VARS])
model1 = sm.OLS(data_eval['re_arrest'], all_).fit(cov_type='HC1')
model1.summary()
###Output
_____no_output_____
###Markdown
*In this linear probability model, we observe that the treatment_status is not statistically significant, which may imply that there is no evidence to say that the program reduced or even affected the probability of felony re-arrest. In fact, the only variable that signifincantly explains the variation in the re_arrest probability is the recent-history (prior 6 months) of felony arrests. We plot this variable (prior felony arrests in the last 6 months) with the binary variable re_arrest and the estimated probability values of this model. In the graph above, we observe that the some estimated values are above one, and below zero (which makes no sense in probability).*
###Code
plt.figure(figsize=(10,6))
sns.scatterplot(data_eval['fel_6m'],data_eval['re_arrest'], label='Real values')
sns.scatterplot(data_eval['fel_6m'],model1.fittedvalues, label='Estimated values')
plt.xlabel("Prior felony arrests (in the last 6 months)")
plt.ylabel("Felony re-arrest")
plt.show()
###Output
_____no_output_____
###Markdown
*Looking to the graph above, we might agree that the independent variables and re-arrest appropriate model may not be linear. In this sense, we can use a probit model to estimate the effects of the independent variables on re-arrest probability.*
###Code
probitm = Probit(data_eval['re_arrest'], all_).fit()
probitm.summary()
###Output
Optimization terminated successfully.
Current function value: 0.148459
Iterations 8
###Markdown
*In this probit model, again, the treatment_status has no impact on the re-arrest variable. In this sense, there is no evidence that the program had an impact on the felony re-arrest probability. Most of the variation of the re-arrest probability is explained by past felony arrests in the last 6 months, which in this model specification also shows significance. Now, in the graph above, we look at prior felony arrests in the last 6 months with the binary variable re_arrest and the estimated probability values of the OLS and probit models. The estimated probability with the probit model is bounded within 1 and 0.*
###Code
plt.figure(figsize=(10,6))
sns.scatterplot(data_eval['fel_6m'],data_eval['re_arrest'], label='Real values')
sns.scatterplot(data_eval['fel_6m'],model1.fittedvalues, label='Estimated values with OLS')
sns.scatterplot(data_eval['fel_6m'],probitm.predict(all_), label='Estimated values with Probit')
plt.xlabel("Prior felony arrests (in the last 6 months)")
plt.ylabel("Felony re-arrest")
plt.show()
###Output
_____no_output_____
###Markdown
Load module
###Code
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
import copy
plt.rc('text', usetex=True)
###Output
_____no_output_____
###Markdown
big picture Attributes that we want to include+ acc: - TIME; - WEEKDAY; (category) - RDSURF; (category) - LIGHT; (category) - WEATHER; (category)+ curv: - deg_curv;+ grad: - pct_grad;+ road: - AADT; - trkpcts; - mvmt; - RURURB; (category) - MED_TYPE, MEDWID; - LSHL_TYP, LSHL_TY2, LSHLDWID, LSHL_WD2; (2 represents decreasing direction) - RSHL_TYP, RSHL_TY2, RSHLDWID, RSHL_WD2; - SURF_TYP, SURF_TY2; (The composition of the driving surfacein two directions) - lanewid, rdwy_wid; (on average, need filter > 0) - FUNC_CLS; (? this might be an summary variable that includes info of all previous variables)(category)+ occ: None;+ peds: None;+ veh: - DRV_SEX; (need to summarize) - DRV_AGE; (need to summarize) - vehtype; (>4 is big ones?) - surf_typ; (Roadway surface type at the crash location? redundant info? No they do not agree) - drv_actn; (difficult) - intox; (need to summarize) functions and pre-test read function - everybody needs
###Code
def detect_files(directory, keyword):
"""
detect files in specified directory with specified keyword
input
-----
directory : string
dir to search
keyword : string
keyword to look for
output
-----
sorted list of file names
test
-----
(1) if output has larger than length;
"""
file_list = []
for file in os.listdir(directory):
if not (keyword is None):
if keyword in file:
file_list.append(file)
else:
file_list.append(file)
return sorted(file_list)
def read_files(directory, keyword):
"""
read files with specified keyword
input
-----
directory : string
directory to read files from
keyword : string
keyword to search for
output
-----
output_dic : dic
dictionary of datasets
test
-----
(1) output_dic should have length 5, for 2013 - 2017;
(2) keyword should not be empty;
"""
output_dic = {}
file_list = detect_files(directory, keyword)
for yr in range(2013, 2018):
output_dic[yr] = pd.read_csv(os.path.join(directory, file_list[yr-2013]))
return output_dic
###Output
_____no_output_____
###Markdown
test on veh aggregationThis function will be used in s3_merge.py
###Code
def veh_agg(df, crash_year):
"""
aggregate vehicle info
input
-----
df : pandas dataframe
df to be summarized
output
-----
df: pandas dataframe
aggregated df
"""
def sex(series):
for ele in series.tolist():
if ele > 1:
return True
return False
def young(series):
for ele in series.tolist():
if ele < 25:
return True
return False
def old(series):
for ele in series.tolist():
if ele > 65:
return True
return False
def drink(series):
for ele in series.tolist():
if ele == 1.0 or ele == 5.0:
return True
return False
def truck(series):
for ele in series.tolist():
if ele > 4:
return True
return False
def old_car(series):
for ele in series.tolist():
model_year = 1900
if ele < 10:
model_year += (100 + ele)
elif ele < 20:
model_year += (100 + ele)
else:
model_year += (ele)
if crash_year - model_year >= 15:
return True
return False
df = df.groupby(['CASENO']).agg({'DRV_SEX': [sex],
'DRV_AGE': [young, old],
'vehtype': [truck],
'vehyr': [old_car],
# 'surf_typ': ,
# 'drv_actn': ,
'intox': [drink]
})
df.columns = df.columns.get_level_values(1)
df = df.reset_index()
return df
veh_agg(veh[2017], 2017)
###Output
_____no_output_____
###Markdown
analysis read, extract, combine
###Code
crash = read_files("./merged", '20')
columns = [
'REPORT', 'ACCTYPE',
'TIME', 'WEEKDAY', 'RDSURF', 'LIGHT', 'weather',
'deg_curv',
'pct_grad',
'AADT', 'trkpcts', 'mvmt', 'RURURB', 'MED_TYPE', 'MEDWID',
'LSHL_TYP', 'LSHL_TY2', 'LSHLDWID', 'LSHL_WD2', 'RSHL_TYP', 'RSHL_TY2', 'RSHLDWID', 'RSHL_WD2',
'SURF_TYP', 'SURF_TY2', 'lanewid', 'rdwy_wid', 'FUNC_CLS',
'sex', 'young', 'old', 'drink', 'truck', 'old_car'
]
for year in crash:
df = crash[year]
crash[year] = df[columns]
crash[2017]
pd.unique(crash[2017]['weather'])
###Output
_____no_output_____
###Markdown
focus on type 33***Strikes Appurtenance***Need to drop NA values. + Notice, **AADT**, **trkpcts**, **mvmt** have NA values. This is strongly undesirable;+ We drop rows with NA in those rows and thus retrieve 4,658 rows from 4,694 rows. That's not much loss;
###Code
df = crash[2017]
df = df[df.ACCTYPE == 33]
# df = df.dropna()
df
df.isna().any()
df.dropna(subset=['AADT', 'trkpcts', 'mvmt'])
###Output
_____no_output_____
###Markdown
append and obtain the final large dataset
###Code
df = crash[2013]
df = df[df.ACCTYPE == 33]
for year in range(2014, 2018):
tmp = crash[year]
tmp.weather = tmp.weather.replace({'.': '10'})
df = df.append(tmp[tmp.ACCTYPE == 33])
print("Before dropping, has {} rows.".format(df.shape[0]))
df = df.dropna(subset=['AADT', 'trkpcts', 'mvmt'])
print("After dropping, has {} rows.".format(df.shape[0]))
###Output
_____no_output_____
###Markdown
write out
###Code
df.to_csv('./merged/final.csv', index=False)
###Output
_____no_output_____
###Markdown
Clearly we can see now the missing are in the types, of mdium, left shoulder, right shoulder.Interestingly, + *MEDWID* is non-zero but *MED_TYPE* is missing;+ and are missing for many cases;+ also have a lot of missings; Maybe we remove the type attributes?We just keep the width info and don't care about the types of materials. further delete type attributes
###Code
df = pd.read_csv('./merged/final.csv')
df.isna().any()
df.isna().sum()
df = df.drop(columns=['MED_TYPE', 'LSHL_TYP', 'LSHL_TY2', 'RSHL_TYP', 'RSHL_TY2', 'SURF_TY2'])
df = df.dropna()
df.to_csv('./merged/final_no_na.csv', index=False)
###Output
_____no_output_____
###Markdown
convert numerical to categorical and then create dummy variables The variables to be converted are:+ WEEKDAY;+ RDSURF;+ LIGHT;+ weather;+ RURURB;+ SURF_TYP;+ FUNC_CLS;
###Code
df = pd.read_csv('./merged/final_no_na.csv')
###Output
_____no_output_____
###Markdown
look at their unique values**Weather** needs some special attention.
###Code
pd.unique(df.WEEKDAY)
pd.unique(df.RDSURF)
pd.unique(df.LIGHT)
pd.unique(df.weather)
pd.unique(df.RURURB)
pd.unique(df.SURF_TYP)
pd.unique(df.FUNC_CLS)
###Output
_____no_output_____
###Markdown
convert some to integers
###Code
df = df.astype(
{'WEEKDAY':'int64', 'RDSURF':'int64', 'LIGHT':'int64', 'weather':'int64', 'FUNC_CLS':'int64'})
df['peak-hour'] = df['TIME'].apply(lambda x: 1 if (700 <= x <= 1000) or (1700 <= x <= 2000) else 0)
df['WEEKDAY'] = df['WEEKDAY'].apply(lambda x: 1 if x < 6 else 0)
df = df[df['LIGHT'].isin([1,2,3,4,5,6])]
df['LIGHT'] = df['LIGHT'].replace({5:4, 6:4})
df = df[df['AADT'] > 0]
df = df.drop(columns=['TIME'])
df = df.astype(
{'WEEKDAY':'category', 'RDSURF':'category', 'LIGHT':'category',
'weather':'category', 'RURURB':'category', 'SURF_TYP':'category',
'FUNC_CLS':'category', 'sex':'category', 'young':'category',
'old':'category', 'drink':'category', 'truck':'category',
'old_car':'category', 'peak-hour':'category'
})
df.to_csv('./merged/final_type_correct.csv', index=False)
###Output
_____no_output_____
###Markdown
SMOTE
###Code
from imblearn.over_sampling import SMOTENC
from sklearn.utils import resample
from collections import Counter
# Separate majority and minority classes
crash_1 = df[df.REPORT==1]
crash_23 = df[df.REPORT!=1]
# Downsample majority class
crash_1_downsampled = resample(crash_1,
replace=False, # sample without replacement
n_samples=4843, # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
crash_d = pd.concat([crash_1_downsampled, crash_23])
crash_d.columns
reg_data = crash_d[['WEEKDAY','RDSURF','LIGHT','weather','RURURB','SURF_TYP',
'FUNC_CLS','sex','young','old','drink','truck','old_car', 'peak-hour',
'MEDWID','LSHLDWID','LSHL_WD2','RSHLDWID','RSHL_WD2','lanewid','rdwy_wid',
'deg_curv', 'pct_grad', 'AADT', 'trkpcts', 'mvmt']]
y = crash_d['REPORT']
sm = SMOTENC(random_state=42, categorical_features=[0,1,2,3,4,5,6,7,8,9,10,11,12,13])
reg_data_res, y_res = sm.fit_resample(reg_data, y)
C = Counter(y_res)
print(C)
print(reg_data_res.shape)
print(y_res.shape)
reg_data_x = pd.DataFrame(data=reg_data_res)
reg_data_y = pd.DataFrame(data=y_res)
print(reg_data_x.shape)
print(reg_data_y)
reg_data = pd.concat([reg_data_x, reg_data_y], axis=1, sort=False)
reg_data.to_csv('./merged/final_smote.csv', index = False)
###Output
_____no_output_____
###Markdown
create dummy and regression
###Code
df = pd.read_csv('./merged/final_type_correct.csv')
df
df_log = pd.get_dummies(df, columns=['WEEKDAY', 'RDSURF', 'LIGHT', 'weather', 'RURURB', 'SURF_TYP', 'FUNC_CLS'])
###Output
_____no_output_____
###Markdown
learningI think the sklearn logistic regression can handle this by specifying the **class_weight** as **balanced**.+ [sklarn logistic regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)+ [a blog](https://towardsdatascience.com/machine-learning-multiclass-classification-with-imbalanced-data-set-29f6a177c1a)+ [blog code](https://github.com/javaidnabi31/Multi-class-with-imbalanced-dataset-classification/blob/master/20-news-group-classification.ipynb)+ [get dummies](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
y = df_log['REPORT']
X = df_log.drop(columns=['REPORT', 'ACCTYPE'])
msk = np.random.rand(len(df)) < 0.8
X_train = X[msk]
X_test = X[~msk]
y_train = y[msk]
y_test = y[~msk]
###Output
_____no_output_____
###Markdown
multinomial logistic
###Code
clf = LogisticRegression(multi_class='multinomial',
class_weight='balanced',solver='newton-cg',
penalty='none'
).fit(X_train, y_train)
clf.score(X_train, y_train)
prediction = clf.predict(X_test)
###Output
_____no_output_____
###Markdown
confusion matrix
###Code
from sklearn.metrics import confusion_matrix
import itertools
cnf_matrix = confusion_matrix(y_test, prediction)
fig = plt.figure(figsize=(6,6))
# fig.set_size_inches(14, 12, forward=True)
# fig.align_labels()
# fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
cm = cnf_matrix
normalize = True
classes = ['PDO','INJ','FAT']
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.xlim(-0.5, 2.5)
plt.ylim(-0.5, 2.5)
plt.xticks([0,1,2], classes, fontsize=15)
plt.yticks([0,1,2], classes, fontsize=15)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
fontsize=20
)
# plt.tight_layout()
plt.ylabel('True label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
plt.title('Confusion matrix of multinomial logistic modelling', fontsize=22)
plt.show()
###Output
_____no_output_____
###Markdown
multinomial logistic cv
###Code
clf_cv = LogisticRegressionCV(cv=5,multi_class='multinomial', random_state=0,
class_weight='balanced',solver='newton-cg',
max_iter=200
).fit(X, y)
clf_cv.score(X, y)
###Output
_____no_output_____
###Markdown
roc curve[When to use ROC: balance](https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/)
###Code
pd.unique(y_train)
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
y_train_bin = label_binarize(y_train, classes=[1, 2, 3])
y_test_bin = label_binarize(y_test, classes=[1, 2, 3])
###Output
_____no_output_____
###Markdown
[sklearn logistic](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)
###Code
n_classes = 3
# Learn to predict each class against the other
classifier = OneVsRestClassifier(
LogisticRegression(
class_weight='balanced',solver='liblinear',penalty='l2'))
y_score = classifier.fit(X_train, y_train_bin).decision_function(X_test)
from sklearn.metrics import roc_curve, auc
from scipy import interp
from itertools import cycle
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 4
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(14,10))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('False Positive Rate', fontsize=18)
plt.ylabel('True Positive Rate', fontsize=18)
plt.title('Some extension of Receiver operating characteristic to multi-class', fontsize=22)
plt.legend(loc="lower right",fontsize=22)
plt.show()
###Output
_____no_output_____
###Markdown
precision-recall[Desicion-recall](https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.htmlsphx-glr-auto-examples-model-selection-plot-precision-recall-py)
###Code
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
###Output
_____no_output_____
###Markdown
‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’
###Code
clf = LogisticRegression(multi_class='multinomial',solver='newton-cg',
penalty='none', max_iter=200
).fit(X_train, y_train)
y_score_log = clf.decision_function(X_test)
y_score_log
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(3):
precision[i], recall[i], _ = precision_recall_curve(y_test_bin[:, i],
y_score_log[:, i])
average_precision[i] = average_precision_score(y_test_bin[:, i], y_score_log[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test_bin.ravel(),
y_score_log.ravel())
average_precision["micro"] = average_precision_score(y_test_bin, y_score_log,
average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
plt.figure(figsize=(14,10))
plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,
where='post')
plt.fill_between(recall["micro"], precision["micro"], alpha=0.2, color='b')#,
#**step_kwargs)
plt.xlabel(r'Recall', fontsize=18)
plt.ylabel(r'Precision', fontsize=18)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(r'Average precision score, micro-averaged over all classes: AP={0:0.2f}'.format(average_precision["micro"]),
fontsize=22)
plt.show()
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(12,10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.1)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=18)
plt.ylabel('Precision', fontsize=18)
plt.title('Extension of Precision-Recall curve to multi-class',fontsize=22)
plt.legend(lines, labels, loc=(0.1, -0.4), prop=dict(size=22))
plt.show()
clf = LogisticRegression(multi_class='multinomial',solver='newton-cg',
penalty='none', max_iter=200,class_weight='balanced'
).fit(X_train, y_train)
y_score_log = clf.decision_function(X_test)
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(3):
precision[i], recall[i], _ = precision_recall_curve(y_test_bin[:, i],
y_score_log[:, i])
average_precision[i] = average_precision_score(y_test_bin[:, i], y_score_log[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test_bin.ravel(),
y_score_log.ravel())
average_precision["micro"] = average_precision_score(y_test_bin, y_score_log,
average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
plt.figure(figsize=(14,10))
plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,
where='post')
plt.fill_between(recall["micro"], precision["micro"], alpha=0.2, color='b')#,
#**step_kwargs)
plt.xlabel(r'Recall', fontsize=18)
plt.ylabel(r'Precision', fontsize=18)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(r'Average precision score, micro-averaged over all classes: AP={0:0.2f}'.format(average_precision["micro"]),
fontsize=22)
plt.show()
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(12,10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.1)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=18)
plt.ylabel('Precision', fontsize=18)
plt.title('Extension of Precision-Recall curve to multi-class',fontsize=22)
plt.legend(lines, labels, loc=(0.1, -0.4), prop=dict(size=22))
plt.show()
###Output
_____no_output_____
###Markdown
Analysis This code analyzes tables with the following removed:* **Multi** terrain type* Any row that has null entries which was a consequence of scraping thousands of webpages with sometimes different table structures; this killed about 27% of the scraped data* Chip time and gun time have been combined to form a minimum time in mins* Deleted **ALL** races without the corresponding GPX file therefore this is the cross-referenced output* Some GPX information has been added to tables, e.g. elevation and sigma
###Code
race_type = 'Mar'
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import metrics
from sklearn import linear_model
from sklearn import preprocessing
from sklearn import utils
import warnings
warnings.filterwarnings('ignore')
# Important maps
conversion_map = {'1M':1.0, '3K':1.86, '2M':2.0, '5K':3.1, '4M':4.0,
'5M':5.0, '6M':6.0, '10K':6.2, 'QM':6.55, '7M':7.0,
'10M':10.0, 'HM':13.1, 'Mar':26.2}
race_order= ['1M', '3K', '2M', '5K', '4M','5M', '6M', '10K',
'QM', '7M', '10M', 'HM', 'Mar']
age_order = ['U15','U17','U20','U23','SEN','V35','V40','V45',
'V50','V55','V60','V65','V70','V75','V80','V85']
#age_map = {'U11':0,'U13':0,'U15':0,'U17':0,'U20':0,'U23':0,
# 'SEN':1,'V35':2,'V40':2,'V45':3,'V50':3,'V55':4,
# 'V60':4,'V65':5,'V70':5,'V75':5,'V80':5,'V85':5}
age_map = {'U11':0,'U13':1,'U15':2,'U17':3,'U20':4,'U23':5,
'SEN':6,'V35':7,'V40':8,'V45':9,'V50':10,'V55':11,
'V60':12,'V65':13,'V70':14,'V75':15,'V80':16,'V85':17}
dist_map = {'1M':1, '3K':2, '2M':3, '5K':4, '4M':5,
'5M':6, '6M':7, '10K':8, 'QM':9, '7M':10,
'10M':11, 'HM':12, 'Mar':13}
speed = 3.1*1.6 #mph
if race_type == 'Mar':
speed = 3.1*1.4
dist = conversion_map[race_type]
walk_time = (dist / speed)* 60
print('60%% faster than walking is expected to take %1.1f minutes' % walk_time)
print('TIME CUT = %1.3f' % walk_time)
TIME_CUT = walk_time
datadir = '/home/freddy/insight/data/'
filename = datadir + 'data_overlaps_with_gpx_cleaned.csv'
df = pd.read_csv(filename)
print('rows, cols = {0}, {1}'.format(df.shape[0], df.shape[1]))
df=df.drop(columns=['Unnamed: 0'], axis=1)
df=df[df.age_group != 'V115']
df_old = df
df_old.groupby(['race_title'],as_index=False).size()
#df_old.groupby(['age_group'],as_index=False).size()
df=df[df.race_title==race_type]
subdf=df[df.race_title==race_type].groupby(['meeting_id','sex','age_group','race_title'],as_index=False)['min_time'].median()
if race_type == '10K':
f, ax = plt.subplots(1,1, figsize=(12,4))
A=df.groupby(['event_title','min_time'], as_index=False).count()
Ar = A[A.event_title=='RunThrough Olympic Park 10K']
Al = A[A.event_title=='RunThrough Chase The Moon Olympic Park 10K']
plt.hist(list(Al.min_time.values), 50,
alpha=0.5, label='Day', facecolor='g')
plt.hist(list(Ar.min_time.values), 50,
alpha=0.5, label='Night',facecolor='b')
plt.legend(loc='upper right',frameon=False, prop={'size':20})
plt.grid(True)
plt.xlabel('Median Finish Time (min)')
plt.show()
location_df = df.groupby(['meeting_id','race_location','event_title'],as_index=False).count()
events = set(list(location_df['event_title'].values))
event_map = {}
for i in events:
subdf = location_df.loc[location_df['event_title']==i]
subdf_map = {}
for index, row in subdf.iterrows():
ID = row.meeting_id
loc = row.race_location
subdf_map[ID] = loc
event_map[i] = subdf_map
temp = dict(zip(location_df.meeting_id,location_df.event_title))
id_avgtime = df.groupby(['meeting_id'], as_index=False)['min_time'].median()
times = dict(zip(id_avgtime.meeting_id, id_avgtime.min_time))
time_bar = 0.0
for i in list(times.values()):
time_bar += i
time_bar /= float(len(list(times.values())))
id_avgtime_sex = df.groupby(['meeting_id','sex'], as_index=False)['min_time'].mean()
ids,sex,time=[],[],[]
for index,row in id_avgtime_sex.iterrows():
ids.append(row.meeting_id)
sex.append(row.sex)
time.append(row.min_time)
times_sex = {}
for idx in range(0,len(sex),2):
tempsex = {}
tempsex[sex[idx]] = time[idx]
tempsex[sex[idx+1]] = time[idx+1]
times_sex[ids[idx]] = tempsex
fast=(df.sort_values('min_time').groupby(['meeting_id'],as_index=False).first())['min_time'].values
n, bins, patches = plt.hist(fast, 20,
facecolor='g', alpha=0.75)
plt.grid(True)
plt.xlabel('Fastest Run Times for %s' % race_type)
plt.show()
n, bins, patches = plt.hist(times.values(), 20,
facecolor='g', alpha=0.75)
plt.grid(True)
plt.xlabel('Median Time For %s Races (min)' % race_type)
plt.show()
def get_dt(row):
time = row.min_time
med_time = times[row.meeting_id]
return float(time-med_time)
df['dt'] = df.apply(get_dt,axis=1)
print(df.shape)
df = df.drop(df[df.min_time > TIME_CUT].index)
print(df.shape)
Y = []
for index,row in df.iterrows():
sex = row.sex
age = row.age_group
time = row.min_time
ID = row.meeting_id
avg_time = times[ID]
#avg_time = times_sex[ID][sex]
Y.append(0 if (time<=avg_time) else 1)
Y_dt = list(df['dt'])
from scipy.stats import norm
n, bins, patches = plt.hist(Y_dt, 100, density=True,
facecolor='g', alpha=0.75)
#plt.axis([10, 35, 0.0, 0.175])
plt.grid(True)
plt.xlabel('t - <t> (min)')
(mu,sig) = norm.fit(Y_dt)
y = mlab.normpdf(bins, mu, sig)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.show()
print('Mu,sigma = %1.3f, %1.3f' % (mu,sig))
X = df
norm = conversion_map[race_type]
X['sum_up'] = X['sum_up']/norm
X['sigma'] = X['sigma']/norm
print('Normalization = %1.4f miles' % norm )
gpx = X.groupby(['meeting_id'],as_index=False).mean()
gpx = gpx.drop(['position', 'race_dist', 'min_time'], axis=1)
X = X.drop(['position','meeting_id', 'race_title', 'race_dist',
'event_title','race_location', 'dt'], axis=1)
for xrow in [X]:
xrow['sex'] = xrow['sex'].map( {'W': 1, 'M': 2} ).astype(int)
xrow['age_group'] = xrow['age_group'].map( age_map )
###Output
_____no_output_____
###Markdown
Split into training and testing
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, Y_dt, test_size=0.2, random_state=42)
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.fit_transform(X_test)
###Output
_____no_output_____
###Markdown
Logistic RegressionThis was attempt number 1 and too simple as the output has been forced to be binary
###Code
clf = linear_model.LogisticRegression()
clf.fit(X, Y)
beta = pd.concat([pd.DataFrame(X.columns),pd.DataFrame(np.transpose(clf.coef_))], axis = 1)
print('Logistic Regression Results:')
print(beta)
X.head()
reg = linear_model.LinearRegression().fit(X, Y_dt)
lr = reg.coef_
beta_dict = {'age_group': lr[0], 'sex':lr[1],
'min_time':lr[2], 'sum_up':lr[3], 'sigma':lr[4],
'diff':lr[5]}
beta = pd.DataFrame(list(beta_dict.items()))
print('Linear Regression Results:')
print(beta)
reg_v2 = linear_model.LinearRegression().fit(X_train, y_train)
y_pred = reg_v2.predict(X_test)
score = reg_v2.score(X_test,y_test)
print('Score = {0}'.format(score))
print(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
from sklearn.metrics import mean_squared_error
ridge = linear_model.Ridge(alpha=0.5, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
ridge.fit(X_train, y_train)
# calculate errors
new_train_error = mean_squared_error(y_train, ridge.predict(X_train))
new_test_error = mean_squared_error(y_test, ridge.predict(X_test))
print(new_train_error, new_test_error)
ridge.coef_
d = []
for index, row in X.iterrows():
rowidx=0
sum = 0.0
for i in row:
sum += i*beta.values[rowidx][1]
rowidx+=1
d.append(sum)
from scipy.stats import norm
n, bins, patches = plt.hist(d, 100, density=True,
facecolor='black', alpha=0.75)
#plt.axis([20, 120, 0.1, 200])
plt.grid(True)
plt.xlabel('Score Distribution for Marathon Courses')
(mu,sig) = norm.fit(d)
y = mlab.normpdf(bins, mu, sig)
#l = plt.plot(bins, y, 'r--', linewidth=4)
#plt.yscale('log')
print('Mu,sigma = %1.3f, %1.3f' % (mu,sig))
plt.show()
def integrate(lo, hi, n, bins):
integral = 0.0
for idx in range(lo,hi):
integral += n[idx] * (bins[idx+1]-bins[idx])
return integral
xvals, yvals, dx = [],[],[]
for idx in range(0,len(n)):
integral = integrate(0, idx, n, bins)
binpos = 0.5*(bins[idx+1] + bins[idx])
dx.append(bins[idx+1] - bins[idx])
success = False
if integral > 0.995:
success = True
integral = 1.0
xvals.append(binpos)
yvals.append(integral)
if success:
break
plt.scatter(xvals,yvals,color='black')
plt.xlabel('Score Distribution for Marathons')
plt.ylabel('Difficulty Index')
plt.show()
dump_output = 'inputs/{0}/'.format(race_type)
#dump_output = 'inputs/testing/10K/'
# Print out age and sex map
f1 = open('{0}age_map_{1}.csv'.format(dump_output,race_type), 'w')
for key, val in age_map.items():
f1.write('%s,%d\n'%(key,val))
f1.close()
# save the betas
beta.to_csv('{0}beta_{1}.csv'.format(dump_output,race_type),sep=',',
index=False, header=False)
# Write out the integral plot
f = open('{0}d_dist_{1}.csv'.format(dump_output,race_type), 'w')
f.write('bin,xval,yval,dx\n')
for idx in range(0,len(xvals)):
f.write('%d,%1.5f,%1.5f,%1.5f\n' % (idx,xvals[idx],yvals[idx], dx[idx]))
f.close()
id_avgtime.to_csv('{0}avg_times_{1}.csv'.format(dump_output,race_type),sep=',',
index=False)
# Write out the GPX information used
gpx.to_csv('{0}gpx_info_{1}.csv'.format(dump_output,race_type),sep=',', index=False)
# Print out the event list
f2 = open('{0}event_title_list_{1}.csv'.format(dump_output,race_type), 'w')
f2.write('event\n')
for key, val in event_map.items():
f2.write('%s\n'%key)
f2.close()
f3 = open('{0}event_title_list_v2_{1}.csv'.format(dump_output,race_type), 'w')
f3.write('ID,event\n')
for key, val in temp.items():
f3.write('%d,%s\n'%(key,val))
f3.close()
# LETS MERGE ANYTHING USEFUL FOR LATER CALCULATIONS
frame2sql_temp = id_avgtime
frame2sql = pd.merge(frame2sql_temp,gpx,on='meeting_id')
evt_temp = pd.DataFrame.from_dict(temp,orient='index')
output = frame2sql.merge(evt_temp,left_on='meeting_id',right_index=True)
output['race_type'] = race_type
output.to_csv('{0}OUTPUT_{1}.csv'.format(dump_output,race_type),sep=',', index=False)
###Output
_____no_output_____
###Markdown
Initial / Personalized Accuracy
###Code
model = 'mobile' # cnn, mobile
dataset = 'cifar100' # cifar10, cifar100
num_classes = 100 # 10, 100
momentum = 0.90
wd = 0.0
personalization_epoch = 5 # fine-tuning epochs for personalization
server_data_ratio = 0.00
for shard_per_user in [100, 50, 10]:
for frac in [1.0, 0.1]:
for local_ep in [1, 4, 10]:
for local_upt_part, aggr_part in [('full', 'full'), ('body', 'body')]:
args = easydict.EasyDict({'epochs': local_ep,
'num_users': 100,
'shard_per_user': shard_per_user,
'server_data_ratio': server_data_ratio,
'frac': frac,
'local_ep': local_ep,
'local_bs': 50,
'bs': 128,
'lr': 1e-3,
'momentum': momentum,
'wd': wd,
'split': 'user',
'grad_norm': False,
'local_ep_pretrain': 0,
'lr_decay': 1.0,
'model': model,
'kernul_num': 9,
'kernul_sizes': '3,4,5',
'norm': 'batch_norm',
'num_filters': 32,
'max_pool': 'True',
'num_layers_keep': 1,
'dataset': dataset,
'iid': False,
'num_classes': num_classes,
'num_channels': 3,
'gpu': 1,
'stopping_rounds': 10,
'verbose': False,
'print_freq': 100,
'seed': 1,
'test_freq': 1,
'load_fed': '',
'results_save': 'run1',
'start_saving': 0,
'local_upt_part': local_upt_part,
'aggr_part': aggr_part,
'unbalanced': False
})
# parse args
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
base_dir = './save/{}/{}_iid{}_num{}_C{}_le{}_m{}_wd{}/shard{}_sdr{}/{}/'.format(
args.dataset, args.model, args.iid, args.num_users, args.frac, args.local_ep, args.momentum, args.wd, args.shard_per_user, args.server_data_ratio, args.results_save)
algo_dir = 'local_upt_{}_aggr_{}'.format(args.local_upt_part, args.aggr_part)
dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)
dict_save_path = os.path.join(base_dir, algo_dir, 'dict_users.pkl')
with open(dict_save_path, 'rb') as handle:
dict_users_train, dict_users_test = pickle.load(handle)
# build model
net_glob = get_model(args)
net_glob.train()
net_local_list = []
for user_ix in range(args.num_users):
net_local_list.append(copy.deepcopy(net_glob))
criterion = nn.CrossEntropyLoss()
before_acc_results = []
after_acc_results = []
for user, net_local in enumerate(net_local_list):
model_save_path = os.path.join(base_dir, algo_dir, 'best_model.pt')
net_local.load_state_dict(torch.load(model_save_path), strict=True)
acc_test, loss_test = test_img_local(net_local, dataset_test, args, user_idx=user, idxs=dict_users_test[user])
before_acc_results.append(acc_test)
net_local.train()
ldr_train = DataLoader(DatasetSplit(dataset_train, dict_users_train[user]), batch_size=args.local_bs, shuffle=True)
body_params = [p for name, p in net_local.named_parameters() if 'linear' not in name]
head_params = [p for name, p in net_local.named_parameters() if 'linear' in name]
optimizer = torch.optim.SGD([{'params': body_params, 'lr': args.lr},
{'params': head_params, 'lr': args.lr}],
momentum=args.momentum)
for iter in range(personalization_epoch):
for batch_idx, (images, labels) in enumerate(ldr_train):
images, labels = images.to(args.device), labels.to(args.device)
net_local.zero_grad()
logits = net_local(images)
loss = criterion(logits, labels)
loss.backward()
optimizer.step()
acc_test, loss_test = test_img_local(net_local, dataset_test, args, user_idx=user, idxs=dict_users_test[user])
after_acc_results.append(acc_test)
print ("-----------------------------------------------------")
print ("local update part: {}, aggregation part: {}".format(local_upt_part, aggr_part))
print ("shard: {}, frac: {}, local_ep: {}".format(shard_per_user, frac, local_ep))
print ("Before min/max/mean/std of accuracy")
print (np.min(before_acc_results), np.max(before_acc_results), np.mean(before_acc_results), round(np.std(before_acc_results), 2))
print ("After min/max/mean/std of accuracy")
print (np.min(after_acc_results), np.max(after_acc_results), np.mean(after_acc_results), round(np.std(after_acc_results), 2))
print ("-----------------------------------------------------")
###Output
_____no_output_____
###Markdown
without classifier accuracy
###Code
odel = 'mobile' # cnn, mobile
dataset = 'cifar100' # cifar10, cifar100
num_classes = 100 # 10, 100
momentum = 0.90
wd = 0.0
personalization_epoch = 5 # fine-tuning epochs for personalization
server_data_ratio = 0.00
for shard_per_user in [100, 50, 10]:
for frac in [1.0, 0.1]:
for local_ep in [1, 4, 10]:
for local_upt_part, aggr_part in [('full', 'full'), ('body', 'body')]:
args = easydict.EasyDict({'epochs': local_ep,
'num_users': 100,
'shard_per_user': shard_per_user,
'server_data_ratio': server_data_ratio,
'frac': frac,
'local_ep': local_ep,
'local_bs': 50,
'bs': 128,
'lr': 1e-3,
'momentum': momentum,
'wd': wd,
'split': 'user',
'grad_norm': False,
'local_ep_pretrain': 0,
'lr_decay': 1.0,
'model': model,
'kernul_num': 9,
'kernul_sizes': '3,4,5',
'norm': 'batch_norm',
'num_filters': 32,
'max_pool': 'True',
'num_layers_keep': 1,
'dataset': dataset,
'iid': False,
'num_classes': num_classes,
'num_channels': 3,
'gpu': 1,
'stopping_rounds': 10,
'verbose': False,
'print_freq': 100,
'seed': 1,
'test_freq': 1,
'load_fed': '',
'results_save': 'run1',
'start_saving': 0,
'local_upt_part': local_upt_part,
'aggr_part': aggr_part,
'unbalanced': False
})
# parse args
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
base_dir = './save/{}/{}_iid{}_num{}_C{}_le{}_m{}_wd{}/shard{}_sdr{}/{}/'.format(
args.dataset, args.model, args.iid, args.num_users, args.frac, args.local_ep, args.momentum, args.wd, args.shard_per_user, args.server_data_ratio, args.results_save)
algo_dir = 'local_upt_{}_aggr_{}'.format(args.local_upt_part, args.aggr_part)
dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)
dict_save_path = os.path.join(base_dir, algo_dir, 'dict_users.pkl')
with open(dict_save_path, 'rb') as handle:
dict_users_train, dict_users_test = pickle.load(handle)
# build model
net_glob = get_model(args)
net_glob.eval()
# build template
net_local_list = []
for user_ix in range(args.num_users):
net_local_list.append(copy.deepcopy(net_glob))
before_acc_results = []
for user, net_local in enumerate(net_local_list):
model_save_path = os.path.join(base_dir, algo_dir, 'best_model.pt')
net_local.load_state_dict(torch.load(model_save_path), strict=True)
acc_test = distance_test_img_local(net_local, dataset_train, dataset_test, args, user_idx=user, train_idxs=dict_users_train[user], test_idxs=dict_users_test[user])
before_acc_results.append(acc_test)
net_local.cpu()
print ("-----------------------------------------------------")
print ("local update part: {}, aggregation part: {}".format(local_upt_part, aggr_part))
print ("shard: {}, frac: {}, local_ep: {}".format(shard_per_user, frac, local_ep))
print ("Before min/max/mean/std of accuracy")
print (np.min(before_acc_results), np.max(before_acc_results), np.mean(before_acc_results), round(np.std(before_acc_results), 2))
print ("-----------------------------------------------------")
###Output
_____no_output_____
###Markdown
Get the amount of points received by experiment, execution and episode
###Code
cols = ['experiment', 'execution', 'episode', 'point']
result = pd.DataFrame(columns=cols)
for f in glob.glob("./execution/*/*/total_point.npy"):
path = f.split('/')
exp = path[2].split('_')[1]
exec = path[3].split('_')[1]
points = np.load(f)
episodes = range(1, 1000+1)
experiment = pd.DataFrame({
'experiment': [int(exp) for _ in episodes],
'execution': [int(exec) for _ in episodes],
'episode': [i for i in episodes],
'point': points,
})
result = pd.concat([result, experiment])
result = result.sort_values(by=cols).reset_index(drop=True)
result.tail()
###Output
_____no_output_____
###Markdown
Point average by experiment
###Code
result[['experiment','point']].groupby(['experiment']).mean()
###Output
_____no_output_____
###Markdown
Point average by experiment and execution
###Code
result[['experiment','execution','point']].groupby(['experiment', 'execution']).mean()
###Output
_____no_output_____
###Markdown
Point average by experiment for the last hundred episodes
###Code
result.loc[result['episode'] > 900][['experiment','execution','point']]
result.loc[result['episode'] > 900]\
[['experiment','execution','point']].\
groupby(['experiment', 'execution']).mean()
###Output
_____no_output_____
###Markdown
Get confidence of intervals each 50 episodes
###Code
result_ci = pd.DataFrame(columns=['experiment','mean','ci95_hi','ci95_lo','percentile'])
split = 50
experiment_total = 5
for k in range (0, int(1000/split)):
stats = result.loc[(result['episode'] >= split*k) & (result['episode'] < split*k+split)]\
[['experiment', 'execution','point']].\
groupby(['experiment', 'execution']).agg(['mean', 'count']).sort_values(['experiment'])
ci95_hi = []
ci95_lo = []
means = []
for exp in range(1,experiment_total):
m = np.average(stats.loc[exp]['point']['mean'])
c = experiment_total
s = np.std(stats.loc[exp]['point']['mean'])
ci95_hi.append(m + 1.96*s/math.sqrt(c))
ci95_lo.append(m - 1.96*s/math.sqrt(c))
means.append(m)
obs = pd.DataFrame({
'experiment': range(1,experiment_total),
'mean': means,
'ci95_hi': ci95_hi,
'ci95_lo': ci95_lo,
'percentile': (k+1)*split
})
result_ci = pd.concat([result_ci, obs])
result_ci = result_ci.sort_values(['experiment','percentile'])
result_ci.sort_values(['percentile'],ascending=False)
###Output
_____no_output_____
###Markdown
Plot the Confidence intervals each 50 episodes
###Code
# Plot the sinus function
exp1 = result_ci.loc[result_ci['experiment']==1]
exp2 = result_ci.loc[result_ci['experiment']==2]
exp3 = result_ci.loc[result_ci['experiment']==3]
exp4 = result_ci.loc[result_ci['experiment']==4]
plt.plot(exp1['percentile'], exp1['mean'], c='red', marker='o', label='No punish, No Norm')
plt.fill_between([i for i in exp1['percentile']], [i for i in exp1['ci95_lo']], [i for i in exp1['ci95_hi']], color='red', alpha=.3)
plt.plot(exp2['percentile'], exp2['mean'], c='yellow', marker='v', label='Yes punish, No Norm')
# plt.fill_between([i for i in exp2['percentile']], [i for i in exp2['ci95_lo']], [i for i in exp2['ci95_hi']], color='yellow', alpha=.1)
plt.plot(exp3['percentile'], exp3['mean'], c='green', marker='+', label='No punish, Yes Norm')
# plt.fill_between([i for i in exp3['percentile']], [i for i in exp3['ci95_lo']], [i for i in exp3['ci95_hi']], color='green', alpha=.1)
plt.plot(exp4['percentile'], exp4['mean'], c='blue', marker='s', label='Yes punish, Yes Norm')
plt.fill_between([i for i in exp4['percentile']], [i for i in exp4['ci95_lo']], [i for i in exp4['ci95_hi']], color='blue', alpha=.3)
plt.legend(loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Predicting the price of Bitcoin, intro to LSTM
###Code
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Data Exploration
###Code
data = pd.read_csv("data/bitcoin.csv")
data = data.sort_values('Date')
data.head()
price = data[['Close']]
plt.figure(figsize = (15,9))
plt.plot(price)
plt.xticks(range(0, data.shape[0],50), data['Date'].loc[::50],rotation=45)
plt.title("Bitcoin Price",fontsize=18, fontweight='bold')
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price (USD)',fontsize=18)
plt.show()
price.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2001 entries, 2000 to 0
Data columns (total 1 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Close 2001 non-null float64
dtypes: float64(1)
memory usage: 31.3 KB
###Markdown
Data Preparation Normalization
###Code
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
norm_data = min_max_scaler.fit_transform(price.values)
print(f'Real: {price.values[0]}, Normalized: {norm_data[0]}')
print(f'Real: {price.values[500]}, Normalized: {norm_data[500]}')
print(f'Real: {price.values[1200]}, Normalized: {norm_data[1200]}')
###Output
Real: [370.], Normalized: [0.01280082]
Real: [426.1], Normalized: [0.01567332]
Real: [8259.99], Normalized: [0.41679416]
###Markdown
Data split
###Code
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i)
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(dataset[indices], (history_size, 1)))
labels.append(dataset[i+target_size])
return np.array(data), np.array(labels)
past_history = 5
future_target = 0
TRAIN_SPLIT = int(len(norm_data) * 0.8)
x_train, y_train = univariate_data(norm_data,
0,
TRAIN_SPLIT,
past_history,
future_target)
x_test, y_test = univariate_data(norm_data,
TRAIN_SPLIT,
None,
past_history,
future_target)
###Output
_____no_output_____
###Markdown
Build the model
###Code
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense, LSTM, LeakyReLU, Dropout
num_units = 64
learning_rate = 0.0001
activation_function = 'sigmoid'
adam = Adam(lr=learning_rate)
loss_function = 'mse'
batch_size = 5
num_epochs = 50
# Initialize the RNN
model = Sequential()
model.add(LSTM(units = num_units, activation=activation_function, input_shape=(None, 1)))
model.add(LeakyReLU(alpha=0.5))
model.add(Dropout(0.1))
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer=adam, loss=loss_function)
model.summary()
###Output
Model: "sequential_13"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_6 (LSTM) (None, 64) 16896
_________________________________________________________________
leaky_re_lu_4 (LeakyReLU) (None, 64) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 64) 0
_________________________________________________________________
dense_6 (Dense) (None, 1) 65
=================================================================
Total params: 16,961
Trainable params: 16,961
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train the model
###Code
# Using the training set to train the model
history = model.fit(
x_train,
y_train,
validation_split=0.1,
batch_size=batch_size,
epochs=num_epochs,
shuffle=False
)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title("Training and Validation Loss")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
PredictionFor each of the items we used for the validation, let's now predict them so we can compare how well we did.
###Code
original = pd.DataFrame(min_max_scaler.inverse_transform(y_test))
predictions = pd.DataFrame(min_max_scaler.inverse_transform(model.predict(x_test)))
ax = sns.lineplot(x=original.index, y=original[0], label="Test Data", color='royalblue')
ax = sns.lineplot(x=predictions.index, y=predictions[0], label="Prediction", color='tomato')
ax.set_title('Bitcoin price', size = 14, fontweight='bold')
ax.set_xlabel("Days", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
###Output
_____no_output_____
###Markdown
Federal gun cases in Illinois Northern DistrictA tiny rig to generate graphics for Mick Dumke's [Why (Almost) No One Is Charged With Gun Trafficking in Illinois](https://www.propublica.org/article/gun-trafficking-charges-illinois).Notes:* Three cases filed prior to 2007 but later reopened are omitted from the analysis.* I used the [only Pacer bulk-data schema guide](https://www.pacer.gov/documents/bulk_data.pdf) I could find to understand the fields. I also talked with Mike Lissner of the Free Law project, who pointed me to documentation at https://free.law/pdf/PACER-API-Documentation.pdf and https://www.fjc.gov/research/idb to help clarify the meaning of key fields.* Because the field definitions are somewhat ambiguous and the existing documentation is not clear about how to use the fields for an analysis of this type, I tried three defendant counting methods to validate the work. Each method creates a different compound identifier out of partially unique fields that represent a distinct defendant and his or her charges. Every value of the results of these different counting methods precisely match.* This counts defendants, not discrete cases. For example, if John Doe was charged with 18:924C.F and 18:922G.F in the same year, he would be reflected in both categories. (This is the fundamental difference between the `cs_caseid` column, which, when grouped, aggregates charges for a given defendant, and `cs_casenumber`, which represents a given party and charges brought against them). Setup
###Code
df = pd.read_csv('processed/federal-gun-cases.csv', parse_dates=[
'cs_date_filed',
'cs_date_term',
'cs_date_reopen',
'lead_date_term',
'loc_date_end',
'loc_date_start',
'party_start_date',
'party_end_date'])
###Output
_____no_output_____
###Markdown
We also need to synthesize some columns.
###Code
df['year_filed'] = df.cs_date_filed.dt.year.astype(int)
df['month_filed'] = df.cs_date_filed.dt.month.astype(int)
df['year_month_filed'] = df['cs_date_filed'].dt.strftime('%Y-%m')
df['defendant_case_id'] = df.apply(lambda row: slugify(str(row['cs_caseid']) + row['party']) + '-' + slugify(row['charges']), axis=1)
###Output
_____no_output_____
###Markdown
18:922G.F (felon in possession) as percent (single bar chart)
###Code
charges = '18:922G.F'
df['defendant_case_id'] = df.apply(lambda row: slugify(str(row['cs_caseid']) + row['party']) + '-' + slugify(row['charges']), axis=1)
all_grouped = df[df['year_filed'] > 2006].groupby(['year_filed'])['defendant_case_id'].agg(['count'])
filtered = df[(df['charges'] == '18:922G.F') & (df['year_filed'] > 2006)]
grouped = filtered.groupby(['year_filed'])['defendant_case_id'].agg(['count'])
grouped['pct'] = grouped['count'] / all_grouped['count'] * 100
ax = grouped.plot(y='pct', kind="bar", figsize=[9,4], legend=False, width=0.7, color=['#B95949'], fontsize=11)
ax.set_ylim(0, 100)
ax.yaxis.set_visible(False)
ax.xaxis.grid(False)
xvals = ax.get_xticks()
xlabels = [str(x) for x in range(2007, 2018)]
# xlabels[-1] = '{0}*'.format(xlabels[-1])
ax.set_xticklabels(xlabels, rotation=0, fontproperties=font_regular, fontsize=11)
ax.xaxis.label.set_visible(False)
for spine in plt.gca().spines.values():
spine.set_visible(False)
rects = ax.patches
# Now make some labels
labels = []
for index, row in grouped.iterrows():
labels.append('{0}'.format(int(row['pct'])))
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 3, '{0}%'.format(label), ha='center', va='bottom', color='black', fontsize=12, fontproperties=font_regular)
ax.tick_params(labelsize=14)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off' # ticks along the top edge are off
)
plt.savefig('image/%s-pct.svg' % slugify(charges), transparent=True)
###Output
_____no_output_____
###Markdown
Defendants per statute per year for most common charges (Grouped bar)
###Code
statutes = ['18:922G.F', '18:924C.F']
colors = ['#B95949', '#525254']
legend = ["Possession of a firearm by a felon", "Use of a gun in drug trafficking"]
filtered = df[(df['charges'].isin(statutes)) & (df['year_filed'] > 2006)]
pivoted = filtered.pivot_table(index=['year_filed'], columns=['charges'], values='defendant_case_id', aggfunc=lambda x: len(x.unique()))
ax = pivoted.plot(kind="bar", figsize=[10,3.2], width=0.6, color=colors, fontsize=14, rot=0)
ax.set_ylim(0, 120)
ax.yaxis.set_visible(False)
ax.xaxis.grid(False)
ax.xaxis.label.set_visible(False)
for spine in plt.gca().spines.values():
spine.set_visible(False)
rects = ax.patches
# Now make some labels
labels = [str(value) for index, value in pivoted.unstack().iteritems()]
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2.9, height + 3, label, ha='center', va='bottom', color='#000000', fontsize=9.5, fontproperties=font_regular)
rect.set_width(0.2)
ax.tick_params(labelsize=13)
ax.legend(legend, prop=font_regular).get_frame().set_linewidth(0.0)
ax.patches[-12].set_facecolor('#B97A6F')
ax.patches[-1].set_facecolor('#999999')
for label in ax.xaxis.get_majorticklabels():
label.customShiftValue = 0.075
label.set_x = types.MethodType( lambda self, x: matplotlib.text.Text.set_x(self, x-self.customShiftValue ),
label, matplotlib.text.Text )
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off' # ticks along the top edge are off
)
plt.savefig('image/grouped-bar.svg', transparent=True)
pivoted
###Output
_____no_output_____
###Markdown
Defendants per statute per month for most common charges (grouped bar)
###Code
ym_pivoted = filtered.pivot_table(index=['year_month_filed'], columns=['charges'], values='defendant_case_id', aggfunc=lambda x: len(x.unique()))
ym_pivoted.plot(kind="bar", figsize=[160, 20], color=colors, fontsize=11, rot=0)
ym_pivoted
###Output
_____no_output_____
###Markdown
3 ways of counting defendantsI did these counts three different ways to validate my methodology. The following section compares the three methods.
###Code
# validate later
methods = {'method1': [], 'method2': [], 'method3': []}
###Output
_____no_output_____
###Markdown
Counting method 1 (caseid, party, charges)
###Code
df['defendant_case_id'] = df.apply(lambda row: slugify(str(row['cs_caseid']) + row['party']) + '-' + slugify(row['charges']), axis=1)
all_grouped = df[df['year_filed'] > 2006].groupby(['year_filed'])['defendant_case_id'].agg(['count'])
for charge in ['18:924C.F', '18:922G.F']:
filtered = df[(df['charges'] == charge) & (df['year_filed'] > 2006)]
grouped = filtered.groupby(['year_filed'])['defendant_case_id'].agg(['count'])
grouped['pct'] = grouped['count'] / all_grouped['count'] * 100
methods['method1'].append(grouped)
ax = grouped.plot(y='count', kind="bar", figsize=[7,4], legend=False, width=0.8, color=['#B95949'], fontsize=11)
ax.set_ylim(0, 120)
yvals = ax.get_yticks()
ax.xaxis.grid(False)
xvals = ax.get_xticks()
xlabels = [str(x) for x in range(2007, 2018)]
xlabels[-1] = '{0}*'.format(xlabels[-1])
ax.set_xticklabels(xlabels, rotation=0, fontproperties=font_regular, fontsize=11)
ax.xaxis.label.set_visible(False)
for spine in plt.gca().spines.values():
spine.set_visible(False)
rects = ax.patches
# Now make some labels
labels = []
for index, row in grouped.iterrows():
labels.append('{0}'.format(int(row['count']), row['pct']))
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height - 8, label, ha='center', va='bottom', color='white', fontsize=11, fontproperties=font_medium)
ax.tick_params(labelsize=11)
ax.patches[-1].set_facecolor('#B97A6F')
print(charge)
print(grouped)
print('')
###Output
18:924C.F
count pct
year_filed
2007 35 34.653465
2008 59 39.072848
2009 39 38.235294
2010 48 30.379747
2011 28 22.580645
2012 49 29.878049
2013 48 30.188679
2014 32 27.826087
2015 27 29.347826
2016 28 18.181818
2017 27 16.875000
18:922G.F
count pct
year_filed
2007 37 36.633663
2008 54 35.761589
2009 35 34.313725
2010 80 50.632911
2011 59 47.580645
2012 92 56.097561
2013 79 49.685535
2014 63 54.782609
2015 58 63.043478
2016 106 68.831169
2017 117 73.125000
###Markdown
Counting method 2 (case id + party)
###Code
df['defendant_case_id'] = df.apply(lambda row: slugify(str(row['cs_caseid'])) + '-' + slugify(row['charges']), axis=1)
all_grouped = df[df['year_filed'] > 2006].groupby(['year_filed'])['defendant_case_id'].agg(['count'])
for charge in ['18:924C.F', '18:922G.F']:
filtered = df[(df['charges'] == charge) & (df['year_filed'] > 2006)]
grouped = filtered.groupby(['year_filed'])['defendant_case_id'].agg(['count'])
grouped['pct'] = grouped['count'] / all_grouped['count'] * 100
methods['method2'].append(grouped)
ax = grouped.plot(y='count', kind="bar", figsize=[7,4], legend=False, width=0.8, color=['#B95949'], fontsize=11)
ax.set_ylim(0, 120)
yvals = ax.get_yticks()
ax.xaxis.grid(False)
xvals = ax.get_xticks()
ax.set_xticklabels([str(x) for x in range(2007, 2018)], rotation=0, fontproperties=font_regular, fontsize=11)
ax.xaxis.label.set_visible(False)
for spine in plt.gca().spines.values():
spine.set_visible(False)
rects = ax.patches
# Now make some labels
labels = []
for index, row in grouped.iterrows():
labels.append('{0}'.format(int(row['count']), row['pct']))
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height - 8, label, ha='center', va='bottom', color='white', fontsize=9, fontproperties=font_medium)
ax.tick_params(labelsize=11)
print(charge)
print(grouped)
print('')
###Output
18:924C.F
count pct
year_filed
2007 35 34.653465
2008 59 39.072848
2009 39 38.235294
2010 48 30.379747
2011 28 22.580645
2012 49 29.878049
2013 48 30.188679
2014 32 27.826087
2015 27 29.347826
2016 28 18.181818
2017 27 16.875000
18:922G.F
count pct
year_filed
2007 37 36.633663
2008 54 35.761589
2009 35 34.313725
2010 80 50.632911
2011 59 47.580645
2012 92 56.097561
2013 79 49.685535
2014 63 54.782609
2015 58 63.043478
2016 106 68.831169
2017 117 73.125000
###Markdown
Counting method 3 (just case number, which _should_ refer to unique defendants)
###Code
df['defendant_case_id'] = df.apply(lambda row: slugify(str(row['cs_case_number'])), axis=1)
all_grouped = df[df['year_filed'] > 2006].groupby(['year_filed'])['defendant_case_id'].agg(['count'])
for charge in ['18:924C.F', '18:922G.F']:
filtered = df[(df['charges'] == charge) & (df['year_filed'] > 2006)]
grouped = filtered.groupby(['year_filed'])['defendant_case_id'].agg(['count'])
grouped['pct'] = grouped['count'] / all_grouped['count'] * 100
methods['method3'].append(grouped)
ax = grouped.plot(y='count', kind="bar", figsize=[7,4], legend=False, width=0.8, color=['#B95949'], fontsize=11)
ax.set_ylim(0, 120)
yvals = ax.get_yticks()
ax.xaxis.grid(False)
xvals = ax.get_xticks()
ax.set_xticklabels([str(x) for x in range(2007, 2018)], rotation=0, fontproperties=font_regular, fontsize=11)
ax.xaxis.label.set_visible(False)
for spine in plt.gca().spines.values():
spine.set_visible(False)
rects = ax.patches
# Now make some labels
labels = []
for index, row in grouped.iterrows():
labels.append('{0}'.format(int(row['count']), row['pct']))
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height - 8, label, ha='center', va='bottom', color='white', fontsize=9, fontproperties=font_medium)
ax.tick_params(labelsize=11)
print(charge)
print(grouped)
print('')
###Output
18:924C.F
count pct
year_filed
2007 35 34.653465
2008 59 39.072848
2009 39 38.235294
2010 48 30.379747
2011 28 22.580645
2012 49 29.878049
2013 48 30.188679
2014 32 27.826087
2015 27 29.347826
2016 28 18.181818
2017 27 16.875000
18:922G.F
count pct
year_filed
2007 37 36.633663
2008 54 35.761589
2009 35 34.313725
2010 80 50.632911
2011 59 47.580645
2012 92 56.097561
2013 79 49.685535
2014 63 54.782609
2015 58 63.043478
2016 106 68.831169
2017 117 73.125000
###Markdown
Validate counting methods
###Code
methods['method1'][0] == methods['method2'][0]
methods['method2'][0] == methods['method3'][0]
methods['method1'][1] == methods['method2'][1]
methods['method2'][1] == methods['method3'][1]
###Output
_____no_output_____
###Markdown
All charges pct/count
###Code
all_grouped = df[df['year_filed'] > 2006].groupby(['year_filed'])['defendant_case_id'].agg(['count'])
for charge in df['charges'].unique():
filtered = df[(df['charges'] == charge) & (df['year_filed'] > 2006)]
grouped = filtered.groupby(['year_filed'])['defendant_case_id'].agg(['count'])
grouped['pct'] = grouped['count'] / all_grouped['count'] * 100
print(charge)
print(grouped)
print('')
###Output
18:922A.F
count pct
year_filed
2007 4 3.960396
2008 9 5.960265
2009 4 3.921569
2010 13 8.227848
2011 10 8.064516
2012 13 7.926829
2013 7 4.402516
2014 12 10.434783
2015 7 7.608696
2016 14 9.090909
2017 13 8.125000
18:922C.F
count pct
year_filed
2007 4 3.960396
2008 5 3.311258
2009 2 1.960784
2010 1 0.632911
2011 3 2.419355
2013 1 0.628931
18:922E.F
count pct
year_filed
2007 5 4.950495
2008 8 5.298013
2009 6 5.882353
2010 7 4.430380
2011 10 8.064516
2012 7 4.268293
2013 7 4.402516
2014 1 0.869565
18:922G.F
count pct
year_filed
2007 37 36.633663
2008 54 35.761589
2009 35 34.313725
2010 80 50.632911
2011 59 47.580645
2012 92 56.097561
2013 79 49.685535
2014 63 54.782609
2015 58 63.043478
2016 106 68.831169
2017 117 73.125000
18:924A.F
count pct
year_filed
2007 16 15.841584
2008 16 10.596026
2009 16 15.686275
2010 9 5.696203
2011 14 11.290323
2012 3 1.829268
2013 17 10.691824
2014 7 6.086957
2016 6 3.896104
2017 3 1.875000
18:924C.F
count pct
year_filed
2007 35 34.653465
2008 59 39.072848
2009 39 38.235294
2010 48 30.379747
2011 28 22.580645
2012 49 29.878049
2013 48 30.188679
2014 32 27.826087
2015 27 29.347826
2016 28 18.181818
2017 27 16.875000
###Markdown
Analysis
###Code
import numpy as np
import matplotlib.pyplot as plt
import torch
import h5py
from resnet import ResidualBlock, ResNet
from sklearn.metrics import mean_squared_error as MSE
from scipy.stats import norm
from tensorflow.compat.v1.train import summary_iterator
from collections import defaultdict
###Output
cuda:0
dataset loaded
train test split finished
###Markdown
Load data and create model
###Code
with h5py.File('data/uci_ml_hackathon_fire_dataset_2012-05-09_2013-01-01_30k_train_v2.hdf5', 'r') as f:
train_data = {}
for k in list(f):
train_data[k] = f[k][:]
with h5py.File('data/uci_ml_hackathon_fire_dataset_2013-01-01_2014-01-01_5k_test_v2.hdf5', 'r') as f:
test_data = {}
for k in list(f):
test_data[k] = f[k][:]
model0 = ResNet(ResidualBlock, [2, 2, 2])
model12 = ResNet(ResidualBlock, [2, 2, 2])
###Output
_____no_output_____
###Markdown
Training loss evaluation
###Code
resnet0_values = defaultdict(list)
for e in summary_iterator('log/resnet_0/events.out.tfevents.1590213759.LI-Desktop.12904.0'):
for v in e.summary.value:
resnet0_values[v.tag].append(v.simple_value)
resnet12_values = defaultdict(list)
for e in summary_iterator('log/resnet_12/events.out.tfevents.1590219309.LI-Desktop.28296.0'):
for v in e.summary.value:
resnet12_values[v.tag].append(v.simple_value)
###Output
_____no_output_____
###Markdown
***Note***: IoU compute during the training set the threshold as 0. That says for any pixel predicted with value greater than 0 is considered as the positive fireplace. +12 evaluation
###Code
fig = plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet0_values['Train/Loss']), label='training loss')
plt.plot(range(50), np.array(resnet0_values['Valid/Loss']), label='validation loss')
plt.title('Loss through epoch')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend()
# plt.savefig('fig/12loss')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet0_values['Train/Mean IoU']))
plt.title('Training Mean IoU through epoch')
plt.xlabel('epoch')
plt.ylabel('Mean IoU')
# plt.savefig('fig/12trainiou')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet0_values['Valid/Mean IoU']))
plt.title('Validation Mean IoU through epoch')
plt.xlabel('epoch')
plt.ylabel('Mean IoU')
# plt.savefig('fig/12validiou')
plt.show()
###Output
_____no_output_____
###Markdown
+24 evaluation
###Code
fig = plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet12_values['Train/Loss']), label='training loss')
plt.plot(range(50), np.array(resnet12_values['Valid/Loss']), label='validation loss')
plt.title('Loss through epoch')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend()
# plt.savefig('fig/24loss')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet12_values['Train/Mean IoU']))
plt.title('Training Mean IoU through epoch')
plt.xlabel('epoch')
plt.ylabel('Mean IoU')
# plt.savefig('fig/24trainiou')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(range(50), np.array(resnet12_values['Valid/Mean IoU']))
plt.title('Validation Mean IoU through epoch')
plt.xlabel('epoch')
plt.ylabel('Mean IoU')
# plt.savefig('fig/24validiou')
plt.show()
###Output
_____no_output_____
###Markdown
Load desired model
###Code
model0.load_state_dict(torch.load('model/resnet_0/best_valid_loss')['model_state_dict'])
model12.load_state_dict(torch.load('model/resnet_12/best_valid_loss')['model_state_dict'])
model0.eval()
model12.eval()
print()
###Output
###Markdown
Evaluation on test datasets MSE
###Code
ypred0 = model0(torch.Tensor(test_data['observed'])).detach()
ypred12 = model12(torch.Tensor(test_data['observed'])).detach()
ytrue0 = test_data['target'][:,0,...].reshape((-1,900))
ytrue12 = test_data['target'][:,1,...].reshape((-1,900))
print('MSE for +12 is:', MSE(ytrue0, ypred0))
print('MSE for +24 is:', MSE(ytrue12, ypred12))
###Output
MSE for +24 is: 0.042786136
###Markdown
`visualization when doing presentation` IoU for different threshold
###Code
def IoU(predict, target, smooth=1e-6, thres=0):
intersection = ((predict > thres) & (target > 0)).sum(1)
union = ((predict > thres) | (target > 0)).sum(1)
iou = (intersection + smooth) / (union + smooth)
return iou.numpy()
thres = np.linspace(0,1,101)
mean_iou0 = np.array([np.mean(IoU(ypred0, ytrue0, thres=t)) for t in thres])
mean_iou12 = np.array([np.mean(IoU(ypred12, ytrue12, thres=t)) for t in thres])
std_iou0 = np.array([np.std(IoU(ypred0, ytrue0, thres=t)) for t in thres])
std_iou12 = np.array([np.std(IoU(ypred12, ytrue12, thres=t)) for t in thres])
x = np.linspace(norm.ppf(0.01,loc=mean_iou0[0], scale=std_iou0[0]),
norm.ppf(0.99,loc=mean_iou0[0], scale=std_iou0[0]),100)
norm.pdf(x, loc=mean_iou0[0], scale=std_iou0[0])
np.where(thres==0.5)
plt.figure(figsize=(10,10))
x = np.linspace(norm.ppf(0.01,loc=mean_iou0[0], scale=std_iou0[0]),
norm.ppf(0.99,loc=mean_iou0[0], scale=std_iou0[0]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou0[0], scale=std_iou0[0]), label='threshold 0')
x = np.linspace(norm.ppf(0.01,loc=mean_iou0[20], scale=std_iou0[20]),
norm.ppf(0.99,loc=mean_iou0[20], scale=std_iou0[20]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou0[20], scale=std_iou0[20]), label='threshold 0.2')
x = np.linspace(norm.ppf(0.01,loc=mean_iou0[60], scale=std_iou0[60]),
norm.ppf(0.99,loc=mean_iou0[60], scale=std_iou0[60]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou0[60], scale=std_iou0[60]), label='threshold 0.6')
plt.legend()
plt.xlabel('iou range')
plt.ylabel('pdf')
plt.title('gaussian distribution of IoU for different threshold for +12 prediction')
# plt.savefig('fig/12gaussainiou')
plt.show()
plt.figure(figsize=(10,10))
x = np.linspace(norm.ppf(0.01,loc=mean_iou12[0], scale=std_iou12[0]),
norm.ppf(0.99,loc=mean_iou12[0], scale=std_iou12[0]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou12[0], scale=std_iou12[0]), label='threshold 0')
x = np.linspace(norm.ppf(0.01,loc=mean_iou12[20], scale=std_iou12[20]),
norm.ppf(0.99,loc=mean_iou12[20], scale=std_iou12[20]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou12[20], scale=std_iou12[20]), label='threshold 0.2')
x = np.linspace(norm.ppf(0.01,loc=mean_iou12[60], scale=std_iou12[60]),
norm.ppf(0.99,loc=mean_iou12[60], scale=std_iou12[60]),100)
plt.plot(x, norm.pdf(x, loc=mean_iou12[60], scale=std_iou12[60]), label='threshold 0.6')
plt.legend()
plt.xlabel('iou range')
plt.ylabel('pdf')
plt.title('gaussian distribution of IoU for different threshold for +24 prediction')
# plt.savefig('fig/24gaussainiou')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(thres, mean_iou0, label='+12 prediction')
plt.plot(thres, mean_iou12, label='+24 prediction')
plt.legend()
plt.xlabel('iou threshold range')
plt.ylabel('Mean IoU')
plt.title('Mean IoU over different threshold for +12/+24 prediction')
# plt.savefig('fig/testmeaniou')
plt.show()
fig, ax = plt.subplots(2,2, figsize=(10,10))
# ind = np.random.choice(range(test_data['target'].shape[0]))
ax[0,0].imshow(ypred0[ind].reshape((30,30)), cmap='gray')
ax[0,0].set_title('predicted +12 hour')
ax[0,0].axis('off')
ax[0,1].imshow(ypred12[ind].reshape((30,30)), cmap='gray')
ax[0,1].set_title('predicted +24 hour')
ax[0,1].axis('off')
ax[1,0].imshow(ytrue0[ind].reshape((30,30)), cmap='gray')
ax[1,0].set_title('true +12 hour')
ax[1,0].axis('off')
ax[1,1].imshow(ytrue12[ind].reshape((30,30)), cmap='gray')
ax[1,1].set_title('true +24 hour')
ax[1,1].axis('off')
plt.show()
large_fire_inds = np.where(
(np.sum(test_data['observed'][:,0],axis=(1,2)) > 50) &
(np.sum(test_data['observed'][:,1],axis=(1,2)) > 50) &
(np.sum(test_data['observed'][:,2],axis=(1,2)) > 50) &
(np.sum(test_data['observed'][:,3],axis=(1,2)) > 50) &
(np.sum(test_data['observed'][:,4],axis=(1,2)) > 50) &
(np.sum(test_data['target'][:,0],axis=(1,2)) > 50)
)[0]
fig, ax = plt.subplots(2,2, figsize=(10,10))
ind = np.random.choice(large_fire_inds)
ax[0,0].imshow(ypred0[ind].reshape((30,30)), cmap='gray')
ax[0,0].set_title('predicted +12 hour')
ax[0,0].axis('off')
ax[0,1].imshow(ypred12[ind].reshape((30,30)), cmap='gray')
ax[0,1].set_title('predicted +24 hour')
ax[0,1].axis('off')
ax[1,0].imshow(ytrue0[ind].reshape((30,30)), cmap='gray')
ax[1,0].set_title('true +12 hour')
ax[1,0].axis('off')
ax[1,1].imshow(ytrue12[ind].reshape((30,30)), cmap='gray')
ax[1,1].set_title('true +24 hour')
ax[1,1].axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
导入数据集 [TOC]
###Code
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import bokeh
import pylab
data1 = pd.read_csv('./data/covid19.csv')
data2 = data1.set_index('Observation Date')
data3 = data2.loc['15-03-2020']
###Output
_____no_output_____
###Markdown
分省份分析(湖北省单独分析)- 分省份的总感染人数、分省份的总死亡人数、分省份的总治愈人数- 分省份的平均每日新增感染人数、平均每日新增死亡人数、平均每日新增治愈人数- 分省份的治愈率、死亡率- 分省份的变化趋势- 高感染省份(TOP3)的变化趋势、高治愈率省份的变化趋势 - 分省份的总感染人数、分省份的总死亡人数、分省份的总治愈人数
###Code
confirmed = data3['Confirmed']
deaths = data3['Deaths']
recover = data3['Recovered']
region = data3['Province/State']
data_conf = pd.concat([region, confirmed], axis = 1)
data_death = pd.concat([region, deaths], axis = 1)
data_rec = pd.concat([region, recover], axis = 1)
data_conf2 = data_conf.reset_index().drop('Observation Date', axis = 1)
data_death2 = data_death.reset_index().drop('Observation Date', axis = 1)
data_rec2 = data_rec.reset_index().drop('Observation Date', axis = 1)
hb_conf = data_conf2[data_conf2['Province/State'] == 'Hubei']
hb_death = data_death2[data_death2['Province/State'] == 'Hubei']
hb_rec = data_rec2[data_rec2['Province/State'] == 'Hubei']
data_death3 = data_death2.drop(12, axis = 0)
data_rec3 = data_rec2.drop(12, axis = 0)
data_conf3 = data_conf2.drop(12, axis = 0)
all_data = pd.concat([data_conf3, data_death3, data_rec3], axis = 1, ).drop('Province/State', axis = 1)
all_data['Province'] = data_death3['Province/State']
###Output
_____no_output_____
###Markdown
Matplotlib分析:
###Code
fig1 = plt.figure(figsize = (20,15))
ax1 = fig1.add_subplot(311)
ax1.bar(data_conf3['Province/State'], data_conf3['Confirmed'])
pylab.xticks(rotation = 60)
ax2 = fig1.add_subplot(312)
ax2.bar(data_death3['Province/State'], data_death3['Deaths'])
pylab.xticks(rotation = 60)
ax3 = fig1.add_subplot(313)
ax3.bar(data_rec3['Province/State'], data_rec3['Recovered'])
pylab.xticks(rotation = 60)
fig1.show()
###Output
C:\Users\Administrator\anaconda3\lib\site-packages\ipykernel_launcher.py:14: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
bokeh展示:
###Code
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
from bokeh.models import Legend, LegendItem, ColumnDataSource, HoverTool
output_notebook()
p = figure(title = 'All Confirmed in China(Except Hubei)', plot_width = 800, plot_height = 500, x_range = all_data['Province'].tolist())
hover_tool = HoverTool(
tooltips = [
("Province", "@Province"),
("Confirmed", "@Confirmed"),
("Deaths", "@Deaths"),
("Recovered", "@Recovered")
],
#mode = 'vline'
)
p.add_tools(hover_tool)
source = ColumnDataSource(data = all_data)
bar1 = p.vbar(x = 'Province', top = 'Confirmed', source = source,width = 0.9, color = 'navy', alpha = 0.6, legend_label = 'Confirmed')
bar2 = p.vbar(x = 'Province', top = 'Deaths', source = source, width = 0.9, color = 'firebrick', alpha = 0.8, legend_label = 'Deaths')
bar3 = p.vbar(x = 'Province', top = 'Recovered', source = source, width = 0.9, color = '#a6cee3', alpha = 0.6, legend_label = 'Recovered')
p.legend.orientation = "horizontal"
p.x_range.range_padding = 0.1
p.legend.location = "top_right"
p.legend.click_policy = 'hide'
p.xaxis.major_label_orientation = 1.2
show(p)
###Output
_____no_output_____
###Markdown
- 分省份的平均每日新增感染人数、新增死亡人数、新增治愈人数
###Code
def added(x):
y = []
for i in range(1,len(x)):
y.append(x[i] - x[i-1])
y = np.array(y)
aver = np.mean(y)
return aver
data1.head()
data_prov = data1.set_index('Province/State')
Province = data1['Province/State'].unique()
conf_aver, death_aver, recov_aver = [], [], []
for prov in Province:
data = data_prov.loc[prov]
conf_aver.append(added(data['Confirmed'].values))
death_aver.append(added(data['Deaths'].values))
recov_aver.append(added(data['Recovered'].values))
Province = pd.DataFrame(Province)
hb_data = []
def return_df(x, name):
dir_df = pd.DataFrame(x)
dir_aver = pd.concat([Province, dir_df], axis = 1)
dir_aver.columns = ['Province', name]
hb_data.append(dir_aver[dir_aver['Province'] == 'Hubei'])
dir_aver.drop(12, axis = 0, inplace = True)
return dir_aver
conf_aver = return_df(conf_aver, 'confirmed')
death_aver = return_df(death_aver, 'deaths')
recov_aver = return_df(recov_aver, 'recovered')
all_data2 = pd.merge(conf_aver, death_aver, on = 'Province')
all_data2 = pd.merge(all_data2, recov_aver, on = 'Province')
#data1.groupby('Province/State').apply(added)
all_data2
###Output
_____no_output_____
###Markdown
Bokeh展示:
###Code
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
from bokeh.models import Legend, LegendItem, ColumnDataSource, HoverTool
#output_notebook()
p2 = figure(title = 'Average Increase in Confirmed/Deaths/Recovered in China(Except Hubei)', plot_width = 800, plot_height = 500, x_range = all_data2['Province'].tolist())
source = ColumnDataSource(data = all_data2)
hover_tool = HoverTool(
tooltips = [
("Province", "@Province"),
("Confirmed", "@confirmed"),
("Deaths", "@deaths"),
("Recovered", "@recovered")
],
#mode = 'vline'
)
p2.add_tools(hover_tool)
bar1 = p2.vbar(x = 'Province', top = 'confirmed', source = source,width = 0.9, color = 'navy', alpha = 0.6, legend_label = 'confirmed')
bar2 = p2.vbar(x = 'Province', top = 'deaths', source = source, width = 0.9, color = 'firebrick', alpha = 0.8, legend_label = 'deaths')
bar3 = p2.vbar(x = 'Province', top = 'recovered', source = source, width = 0.9, color = '#a6cee3', alpha = 0.6, legend_label = 'recovered')
p2.legend.orientation = "horizontal"
p2.x_range.range_padding = 0.1
p2.legend.location = "top_right"
p2.legend.click_policy = 'hide'
p2.xaxis.major_label_orientation = 1.2
show(p2)
###Output
_____no_output_____
###Markdown
- 分省份的治愈率、死亡率、治愈死亡比治愈率($Rec_1$): $Rec_1 = \frac{总治愈人数}{总感染人数}$死亡率($Dea_1$): $Dea_1 = \frac{总死亡人数}{总感染人数}$
###Code
#data_death2,data_rec2, data_conf2
recrate = pd.concat([data_rec2['Province/State'],data_rec2['Recovered']/data_conf2['Confirmed']], axis = 1)
recrate.columns = ['Province', 'Recover']
dearate = pd.concat([data_death2['Province/State'],data_death2['Deaths']/data_conf2['Confirmed']], axis = 1)
dearate.columns = ['Province', 'Deaths']
Rates = pd.merge(recrate, dearate, on = 'Province')
Rates
###Output
_____no_output_____
###Markdown
Bokeh展示:
###Code
from bokeh.layouts import column
#output_notebook()
p3_1 = figure(title = 'Recover Rate in China', x_range = Rates['Province'].tolist(), plot_width = 800, plot_height = 200)
source = ColumnDataSource(data = Rates)
hover_tool = HoverTool(
tooltips = [
("Province", "@Province"),
("Death Rate", "@Deaths"),
("Recover Rate", "@Recover")
],
#mode = 'vline'
)
p3_1.add_tools(hover_tool)
bar1 = p3_1.vbar(x = 'Province', top = 'Recover', source = source,width = 0.9, color = 'navy', alpha = 0.6)
p3_1.x_range.range_padding = 0.1
p3_1.y_range.start = 0.5
p3_1.xaxis.major_label_orientation = 1.2
p3_2 = figure(title = 'Death Rate in China',x_range = Rates['Province'].tolist(), plot_width = 800, plot_height = 200)
bar2 = p3_2.vbar(x = 'Province', top = 'Deaths', source = source, width = 0.9, color = 'firebrick', alpha = 0.6)
p3_2.add_tools(hover_tool)
p3_2.x_range.range_padding = 0.1
p3_2.xaxis.major_label_orientation = 1.2
show(column(p3_1, p3_2))
###Output
_____no_output_____
###Markdown
- 分省份的变化趋势
###Code
#这部分可以直接复制
###Output
_____no_output_____
###Markdown
- 高感染省份(TOP3)的变化趋势、高治愈率省份的变化趋势
###Code
#Top3 Infection:
print('Top3 Infection:\n', data_conf3.sort_values('Confirmed', ascending = False)[:3])
print('Top3 Recovered:\n',data_rec3.sort_values('Recovered', ascending = False)[:3])
print('Top3 Death Rate:\n', dearate.sort_values('Deaths', ascending = False)[:3])
print('Top5 Recover Rate:\n', recrate.sort_values('Recover', ascending = False)[:5])
###Output
Top3 Infection:
Province/State Confirmed
5 Guangdong 1360
11 Henan 1273
30 Zhejiang 1231
Top3 Recovered:
Province/State Recovered
5 Guangdong 1304
11 Henan 1250
30 Zhejiang 1211
Top3 Death Rate:
Province Deaths
12 Hubei 0.045506
28 Xinjiang 0.039474
8 Hainan 0.035714
Top5 Recover Rate:
Province Recover
15 Jiangsu 1.00000
27 Tibet 1.00000
24 Shanxi 1.00000
20 Qinghai 1.00000
16 Jiangxi 0.99893
###Markdown
- 对广东、河南、浙江、江苏作出每日新增、每日确诊、每日治愈趋势图数据提取:
###Code
data4 = data1.set_index('Province/State')
gd = data4.loc['Guangdong'].drop(['Latitude', 'Longitude', 'Country/Region'], axis = 1)
hn = data4.loc['Henan'].drop(['Latitude', 'Longitude', 'Country/Region'], axis = 1)
zj = data4.loc['Zhejiang'].drop(['Latitude', 'Longitude', 'Country/Region'], axis = 1)
js = data4.loc['Jiangsu'].drop(['Latitude', 'Longitude', 'Country/Region'], axis = 1)
def get_column(x):
conf = x.Confirmed.diff(1)
deat = x.Deaths.diff(1)
rec = x.Recovered.diff(1)
df = pd.concat([conf, deat, rec, x['Observation Date']], axis = 1).dropna()
df = df.reset_index()
df.columns = ['Province','Confirmed', 'Deaths', 'Recovered', 'Date']
df['Date'] = pd.to_datetime(df['Date'], format = '%d-%m-%Y')
return df
gd_added = get_column(gd)
hn_added = get_column(hn)
zj_added = get_column(zj)
js_added = get_column(js)
all_added = [gd_added, hn_added, zj_added, js_added]
js_added.head()
###Output
_____no_output_____
###Markdown
Bokeh展示:该图分为三个Part:1. 每日新增感染,新增感染的四条线颜色一样。2. 每日新增死亡,新增死亡的四条线颜色一样。3. 每日新增治愈,新增治愈的四条线颜色一样。4. 可以按照城市来筛选。
###Code
from bokeh.layouts import column, layout
from bokeh.models import ColumnDataSource as CDS
from bokeh.models import Toggle
hover_tool = HoverTool(
tooltips = [
("Province", '@Province'),
("Confirmed", "@Confirmed"),
("Deaths", "@Deaths"),
("Recovered", "@Recovered"),
("Date", "@Date{%F}")
],
formatters = {
"@Date":"datetime",
},
#mode = 'vline'
)
plot = figure(x_axis_type = "datetime",title = 'Recover Rate in China',plot_width = 800, plot_height = 500)
plot.add_tools(hover_tool)
toggles = []
for data in all_added:
source = CDS(data = data)
p1 = plot.line(y = data['Confirmed'], x = data['Date'], line_color = 'Firebrick',line_alpha = 0.6, line_width = 2, legend_label = 'Confirmed')
p2 = plot.line(y = 'Deaths', x = 'Date', source = source, line_color = 'Navy',line_alpha = 0.6, line_width = 2,legend_label = 'Deaths')
p3 = plot.line(y = 'Recovered', x = 'Date', source = source, line_color = '#a6cee3',line_alpha = 0.6, line_width = 2,legend_label = 'Recovered')
toggle2 = Toggle(label = data['Province'][0], button_type = 'default', active = True, width_policy = 'max', background = 'grey')
toggle2.js_link('active', p1, 'visible')
toggle2.js_link('active', p2, 'visible')
toggle2.js_link('active', p3, 'visible')
toggles.append(toggle2)
plot.legend.location = "top_right"
plot.legend.click_policy = 'hide'
layouts = layout([plot, toggles])
show(layouts)
###Output
_____no_output_____
###Markdown
全国形势分析- 全国总感染趋势、总感染人口、总死亡人口、总治愈人口- 全国死亡率、治愈率- 全国净增加趋势- 全国数据建模 - 全国总感染趋势、总感染人口、死亡、治愈人口以及净增加趋势
###Code
f = open('./data/timeseries.json', 'r')
content = f.read()
a = json.loads(content)
chdata = pd.DataFrame(a['China'])
chdata['date'] = pd.to_datetime(chdata['date'], format = '%Y-%m-%d')
chdata_add = pd.concat([chdata['date'], chdata['confirmed'].diff(1), chdata['deaths'].diff(1), chdata['recovered'].diff(1)], axis = 1).dropna()
p4_1 = figure(x_axis_type = 'datetime', plot_width = 800, plot_height = 250)
source1 = ColumnDataSource(data = chdata)
source2 = ColumnDataSource(data = chdata_add)
hover_tool1 = HoverTool(
tooltips = [
("Confirmed", "@confirmed{0}"),
("Deaths", "@deaths{0}"),
("Recovered", "@recovered{0}"),
("Date", "@date{%F}")
],
formatters = {
"@date":"datetime",
},
#mode = 'vline'
)
p4_1.add_tools(hover_tool1)
area1 = p4_1.varea(y2 = 'confirmed',y1 = 0, x = 'date', source = source1, color = 'firebrick', alpha = 0.5, legend_label = 'Confirmed')
line1 = p4_1.line(y = 'confirmed',x = 'date', source = source1, color = 'firebrick', alpha = 0.8, line_width = 2, legend_label = 'Confirmed')
area2 = p4_1.varea(y2 = 'deaths', y1 = 0, x = 'date', source = source1, color = 'navy', alpha = 0.5, legend_label = 'Deaths')
line2 = p4_1.line(y = 'deaths',x = 'date', source = source1, color = 'navy', alpha = 0.8, line_width = 2, legend_label = 'Deaths')
area3 = p4_1.varea(y2 = 'recovered', y1 = 0, x = 'date', source = source1, color = '#a6cee3', alpha = 0.5, legend_label = 'Recovered')
line3 = p4_1.line(y = 'recovered', x = 'date', source = source1, color = '#a6cee3', alpha = 0.8, line_width = 2, legend_label = 'Recovered')
p4_1.legend.location = "top_right"
p4_1.legend.click_policy = 'hide'
#show(p4_1)
p4_2 = figure(x_axis_type = 'datetime', plot_width = 800, plot_height = 250)
p4_2.add_tools(hover_tool1)
l1 = p4_2.line(y = 'confirmed', x = 'date', source = source2, color = 'firebrick', line_width = 3, legend_label = 'Confirmed')
l2 = p4_2.line(y = 'deaths', x = 'date', source = source2, color = 'navy', line_width = 3, legend_label = 'Deaths')
l3 = p4_2.line(y = 'recovered',x = 'date', source = source2, color = '#a6cee3', line_width = 3, legend_label = 'Recovered')
p4_2.legend.location = "top_right"
p4_2.legend.click_policy = 'hide'
layouts = layout([p4_1, p4_2])
show(layouts)
###Output
_____no_output_____
###Markdown
- 全国死亡率、治愈率死亡率($Dea_2$): $Dea_2 = \frac{全国总死亡人口}{全国总感染人口}$治愈率($Rec_2$):$Rec_2 = \frac{全国总治愈人口}{全国总感染人口}$
###Code
total_conf = chdata['confirmed'].iloc[-1]
total_death = chdata['deaths'].iloc[-1]
total_rec = chdata['recovered'].iloc[-1]
print('Death Rate:\t', total_death/total_conf)
print('Recover Rate:\t', total_rec/total_conf)
###Output
Death Rate: 0.054738456094828095
Recover Rate: 0.9392227398714396
###Markdown
- 全国时间序列数据建模 Bokeh建模:
###Code
from bokeh.layouts import column
from bokeh.models import CustomJS, ColumnDataSource, Slider
x = np.linspace(0, 161, 2000)
y = 1/(np.exp(1)**(-x)+1)
source1 = ColumnDataSource(data = dict(x = range(0,161), y = chdata['confirmed']))
source2 = ColumnDataSource(data = dict(x = x, y = y))
plot = figure(plot_width = 400, plot_height = 400)
plot.line('x', 'y', source = source1, line_width = 3, line_alpha = 0.6)
plot.line('x', 'y', source = source2, line_width = 3, line_alpha = 0.6, line_color = 'firebrick')
slider1 = Slider(start = 0, end = 1, value = 0, step = 0.01)
#slider2 = Slider(start = 0, end = 10, value = 1,step = 0.1)
slider3 = Slider(start = -10, end = 10, value = 2 , step = 0.1)
callback = CustomJS(args = dict(source = source2, slider1 = slider1, slider3 = slider3), code = """
var data = source.data;
var f1 = slider1.value;
var f3 = slider3.value;
var x = data['x'];
var y = data['y'];
for (var i = 0; i < x.length; i++){
y[i] = 84000*(Math.pow(Math.exp(1), f1*x[i])-1)/(f3+Math.pow(Math.exp(1), f1*x[i]))
}
source.change.emit();
""")
slider1.js_on_change('value', callback)
#slider2.js_on_change('value', callback)
slider3.js_on_change('value', callback)
show(column(slider1, slider3, plot))
###Output
_____no_output_____
###Markdown
Assessing the representativity of the UK Parliament following the 2017 General ElectionOr: how Theresa May came into power with less than a third of the UK casting a vote for her party.TL-DR: FPTP + abstention Background and definitionsThe UK (like many western democracies) is a representative democracy, meaning it elects people to represent them and make decisions.The basic idea is as follow: the UK is geographically divided into 650 **constituencies** of roughly the same number of voters, each of which holds a mini-election for one **seat** in the House of Commons.These mini-elections are done using what is called a **First-Past the Post** (FPTP) system, meaning all voters cast one vote for one of the candidate, and the candidate with the more votes wins the seat. This system has the advantage of being easy to understand and put in place, but also has several drawbacks. For example, a candidate does not need 50% support to win, just having the highest score (votes might be split e.g. 45-40-15).The population legally allowed to cast a vote is called the **electorate**. The number of **valid votes** (votes which are counted to determine seats) is however smaller than the number of people in the electorate, as voting is not mandatory and some votes might be spoiled (i.e. made unvalid for a variety of reasons). The ratio of valid votes against the electorate is called the **turnout**.After the election, the **government** is elected based on a majority of seats in the House of Commons. The government can then propose all sort of ideas to improve the country, in the form of laws that are voted in the House of Commons by their majority.To simplify the whole process, candidates are organized into **parties**, which publish a political **manifesto** prior to the election. When you are deciding which candidate to vote for, you can refer to the manifesto to know what are the laws they are planning to pass if they take part in a government.There are many additional subtleties to consider (electoral pacts, majority or coalitions governments, the official opposition, party leaders, the Monarch's role, etc.) but we will ignore them here. 2017 ResultSee https://www.bbc.com/news/election/2017/resultsTurnout was 68.7%.Following the election, a government was formed by the *Conservative* party, with support from the *Democratic Unionist Party*. Problem definitionWe will look at how well the parliament, and the government, actually represent the electorate, using two metrics:1) how many eligible voters have voted for an actual member of the House of Commons2) how many eligible voters have actually voted for the government's manifesto.Some hypotheses:- We assume voters vote for a party, not a candidate- We assume voters vote for their preferred choice- We assume non-voters were not satisfied with any of the choicesAbout the 2nd hypothesis, we know that it is not exactly the case: as a consequence of FPTP, some people will vote "tactically" for a less-liked but better-placed candidate. This means our analysis will be optimistic relating to the representativity of Parliament. DataThe data is from the Electoral Commission website and can be found here : https://www.electoralcommission.org.uk/who-we-are-and-what-we-do/elections-and-referendums/past-elections-and-referendums/uk-general-elections/results-and-turnout-2017-uk-general-election Analysis Before we do anything, let's import some libraries.
###Code
import csv
###Output
_____no_output_____
###Markdown
Let's load some administrative data about each constituency
###Code
constituencies_info = {}
with open("2017-UKPGE-Electoral-Data - Administrative data.csv", encoding='utf8') as f:
# Note: there's a typo in the CSV file
# Party Identifer => Party Identifier
reader = csv.DictReader(f)
for row in reader:
ons = row["ONS Code"]
name = row["Constituency"]
electorate_nb = row["Electorate "] # extra space because why not
valid_vote_nb = row["Total number of valid votes counted"]
assert ons not in constituencies_info
constituencies_info[ons] = {
'name': name,
'electorate_nb': int(electorate_nb.replace(',', '')),
'valid_vote_nb': int(valid_vote_nb.replace(',', ''))
}
print("Loaded administrative data for", len(constituencies_info.keys()), "constituencies")
###Output
Loaded administrative data for 650 constituencies
###Markdown
Now let's load the election results.
###Code
results_by_constituencies = {}
with open("2017-UKPGE-Electoral-Data - Results.csv", encoding='utf8') as f:
reader = csv.DictReader(f)
for row in reader:
ons = row["ONS Code"]
party = row["Party Identifer"] # typo in the data because why not
result = int(row["Valid votes"])
if ons not in results_by_constituencies:
results_by_constituencies[ons] = {}
# if party.startswith("Independent"):
# party += row["Surname"]
results_by_constituencies[ons][party] = result
print("Loaded results for ", len(results_by_constituencies.keys()), "constituencies")
###Output
Loaded results for 650 constituencies
###Markdown
We can check that we reproduce the election results:
###Code
seats = {}
for ons in results_by_constituencies.keys():
winner = max(results_by_constituencies[ons], key=results_by_constituencies[ons].get)
if winner not in seats:
seats[winner] = 0
seats[winner] += 1
print("== Party seats ==")
for party in seats.keys():
print("\t", party, seats[party])
###Output
== Party seats ==
Conservative 317
Labour 262
Liberal Democrats 12
Green Party 1
Speaker 1
DUP 10
Sinn Féin 7
Independent 1
SNP 35
Plaid Cymru 4
###Markdown
There are indeed the results reported by the BBC : https://www.bbc.com/news/election/2017/results(Note that the BBC counts the Speaker as a Conservative) Let's also calculate the total number of voters:
###Code
electorate_total = 0
for ons in results_by_constituencies.keys():
electorate = constituencies_info[ons]["electorate_nb"]
electorate_total += electorate
print("There were", electorate_total, "voters in the election")
###Output
There were 46835433 voters in the election
###Markdown
Let's look at our first point: how many people have voted for the candidate which ended up winning their seat?We call them "happy voters".
###Code
happy_total = 0
for ons in results_by_constituencies.keys():
winner = max(results_by_constituencies[ons], key=results_by_constituencies[ons].get)
nb_of_happy_voters = results_by_constituencies[ons][winner]
happy_total += nb_of_happy_voters
print("There were", happy_total, "happy voters in the election")
print("These represent", 100 * round(happy_total / electorate_total, 2), "% of the electorate.")
###Output
There were 17990241 happy voters in the election
These represent 38.0 % of the electorate.
###Markdown
And now let's look at the second point: how many people have voted for parties which eventually became part of the government?We call them "majority voters".
###Code
majority_total = 0
for ons in results_by_constituencies.keys():
nb_of_majority_voters = 0
if "Conservative" in results_by_constituencies[ons]:
nb_of_majority_voters += results_by_constituencies[ons]["Conservative"]
if "DUP" in results_by_constituencies[ons]:
nb_of_majority_voters += results_by_constituencies[ons]["DUP"]
majority_total += nb_of_majority_voters
print("There were", majority_total, "majority voters in the election")
print("These represent", 100 * round(majority_total / electorate_total, 2), "% of the electorate.")
###Output
There were 13929000 majority voters in the election
These represent 30.0 % of the electorate.
###Markdown
Tesla Energy Time Series Data Challenge > 💁 Author: Mei Mei > 📧 Email: [email protected] > 📌 GitHub Link: https://github.com/vickymei/tesla_energy_project Summary This project focuses on solving the problem of identifying malfunctioning Energy Production Sites by 1. Built a ETL pipeline to collect signal data through 2 Energy Realtime Data API Endpoints2. Collected 29 hours historical data 3. Provided a Plotly Dash dashboard for visualizing data from 42 sites4. Proposed solutions for more in-depth Anomaly Detection This Notebook will explain this project in the following order: Part 1. ETL Pipeline Building Part 2. Data Processing & Plotly VisualizationPart 3. Analysis and Insights on Malfunctioning SitesPart 4. More Anomaly Detection Thoughts Part 1. ETL Pipeline Building This section aims at building data pipeline and collecting signal data using pipelinel. 1. There will be one json file generated every minute. After running the script for roughly 29 hours, there are 29 hours * 60 = 1740 files in data/ directory2. Json files are named by timestamp of generation time3. Try Except is utilized for possible api call failure ![jsons.JPG](attachment:jsons.JPG) ```pythondef get_sites_from_api(): """Return list of all existing sites.""" load_dotenv() json_data = requests.get( "https://te-data-test.herokuapp.com/api/sites?token="+os.environ.get("api-token")).json() sites = json_data["sites"] return sitesdef get_signals_from_api(site): """Return site signal data.""" load_dotenv() json_data = requests.get( "https://te-data-test.herokuapp.com/api/signals?token="+os.environ.get("api-token")+f"&site={site}").json() return json_data``` ```pythondef main(): """Write signal data to local file system in json format every minute. """ sites = get_sites_from_api() site_data = [] timestamp_now = datetime.now() timestamp_file_name = f"signal-{timestamp_now.year}-{timestamp_now.month}-{timestamp_now.day}-{timestamp_now.hour}-{timestamp_now.minute}-{timestamp_now.second}.json" for site in sites: try: site_data.append(get_signals_from_api(site)) except: error_data = { 'signals': {'SITE_SM_batteryInstPower': None, 'SITE_SM_siteInstPower': None, 'SITE_SM_solarInstPower': None}, 'site': site, 'timestamp': None} site_data.append(error_data) with open(f"data/{timestamp_file_name}", 'w') as f: json.dump(site_data, f) return 'success'``` Note: By setting sleeping time and calling api at a relatively low frequency (per minute), I didn't notice data loss due to API issues during data collection. Part 2. Data Processing & Plotly Visualization Data Processing
###Code
import json
import pandas as pd
import os
import missingno as msno
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
%matplotlib inline
pd.set_option('display.max_rows', 20)
# Read all json files and print statements about number of records
path_to_json = 'data/'
json_files = [f'data/{pos_json}' for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
print(f"There are {len(json_files)} json files in total.")
js = []
for fn in json_files:
with open(fn, 'r') as f:
js = js + json.load(f)
print(f"There are {len(js)} signal from 42 sites in total.")
# Transform dataframe for later analysis
df = pd.DataFrame(js)
df.head(5)
# Transform dataframe for later analysis
df_main = pd.concat([df.drop(['signals'], axis=1), df['signals'].apply(pd.Series)], axis=1)
df_main = df_main[['site', 'timestamp', 'SITE_SM_solarInstPower']]
df_main['timestamp'] = pd.to_datetime(df_main['timestamp'])
df_main.head(5)
# Put all sites' name into a list
sites =['134a3fa6', '8d9fed87', '5688ed10', '2b33a48d', '07333ad0', '38c8ae33',
'adc42b19', 'e9ba8cec', 'e12c2148', '4b78aae6', 'e724ca65', '135433c1',
'90606897', '02ebf5c7', 'c8eb2d3d', '2b98cbdd', '39146e59', '55af2f9b',
'28731623', '3193e230', 'e6bcf7cf', '7da0acb7', 'c18b6195', '20abb173',
'f34b386a', 'f7f9ac09', '5fc96249', '82c74b9e', 'b255f7ad', '61bff705',
'619fd2b9', '260f359a', '4faff963', '499a251d', 'dfc6fdf5', '64e1616f',
'93c8a2c1', 'eec02ec5', '90791ae9', '49b6c0dd', 'd0926969', '7435e9d3']
# Ignore the minor 1 or 2 sec difference for all sites signal from one api call, using timestamp of site 134a3fa6
df_timestamp = df_main[df_main['site']=="134a3fa6"].reset_index(drop=True)[['timestamp']]
# We are investigating solar production signals, so variable 'SITE_SM_solarInstPower' is the target variable
for site in sites:
df_site = df_main[df_main['site']==site].reset_index(drop=True)
df_timestamp.loc[:, site] = df_site['SITE_SM_solarInstPower'].tolist()
# timestamp dataframe is saved in csv format for plotly dashboard building
df_timestamp.to_csv('timestamp.csv')
df_timestamp.head(5)
###Output
_____no_output_____
###Markdown
Plotly Visualization The screenshot below is a simple plotly dashboard to visualize all sites signals. You could select different site code to see data coming from any of the 42 sites. In order to run the app on your local machine, you need to run `python viz.py` in your terminal and visit http://127.0.0.1:8050/ in your web browser. ![2.JPG](attachment:2.JPG) >The Code snippet below creates the simple dashboard above. ```pythonapp = dash.Dash(__name__)sites = ['134a3fa6', '8d9fed87', '5688ed10', '2b33a48d', '07333ad0', '38c8ae33', 'adc42b19', 'e9ba8cec', 'e12c2148', '4b78aae6', 'e724ca65', '135433c1', '90606897', '02ebf5c7', 'c8eb2d3d', '2b98cbdd', '39146e59', '55af2f9b', '28731623', '3193e230', 'e6bcf7cf', '7da0acb7', 'c18b6195', '20abb173', 'f34b386a', 'f7f9ac09', '5fc96249', '82c74b9e', 'b255f7ad', '61bff705', '619fd2b9', '260f359a', '4faff963', '499a251d', 'dfc6fdf5', '64e1616f', '93c8a2c1', 'eec02ec5', '90791ae9', '49b6c0dd', 'd0926969', '7435e9d3']app.layout = dash.html.Div([ dash.html.H4('Tesla Energy - Solar Power Production Daily Monitor'), dash.dcc.Graph(id="time-series-chart"), dash.html.P("Select Site Code: "), dash.dcc.Dropdown( id="ticker", options=sites, value="134a3fa6", clearable=False, ),])@app.callback( dash.Output("time-series-chart", "figure"), dash.Input("ticker", "value"))def display_time_series(ticker): df1 = pd.read_csv('timestamp.csv') fig = px.bar(df1, x='timestamp', y=ticker) return fig``` Part 3. Analysis and Insights on Malfunctioning Sites Sites return No Signals (Missing Values)
###Code
df_missing = df_timestamp.drop('timestamp', axis=1)
msno.matrix(df_missing, color=(1, 0.38, 0.27))
###Output
_____no_output_____
###Markdown
Sites return negative signals (Solar Production should always be positive)
###Code
df_negative = df_missing.fillna(0)
df_negative[df_negative < 0] = np.nan
msno.matrix(df_negative, color=(0.27, 0.52, 1.0))
###Output
_____no_output_____
###Markdown
GitHub Community Health
###Code
from scipy.io import arff
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from sklearn import tree
import graphviz
# Models
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.cluster import AgglomerativeClustering, KMeans
# Apply label encoder to categorical columns
# Return indices of those columns
def categorical_to_numberic(df):
cols, indices = [], []
for i, col in enumerate(df):
if isinstance(df[col][0], bytes):
cols.append(col)
indices.append(i)
atts = [col for col in df if isinstance(df[col][0], bytes)]
df[cols] = df[cols].apply(LabelEncoder().fit_transform)
return indices
# Load arff file into pandas dataframe
data = arff.loadarff("github.arff")
df = pd.DataFrame(data[0])
# Delete features
# del df["forks_count"]
del df["seconds_since_updated"]
del df["seconds_since_pushed"]
# Convert categorical data to numeric
cat_cols = categorical_to_numberic(df)
# Impute missing values
df.replace("?", np.NaN, inplace=True)
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
idf = pd.DataFrame(imputer.fit_transform(df))
idf.columns = df.columns
idf.index = df.index
# Split off collumn we want to predict
predict_attribute = "contributor_count"
X_df = idf.drop(predict_attribute, axis=1)
y_df = idf[predict_attribute]
# Normalize inputs, convert to numpy
X = MinMaxScaler().fit_transform(X_df)
y = y_df.to_numpy()
y_log = np.log(y)
print("Contributor count statistics")
print(pd.DataFrame(y).describe())
print("\nContributor count (logged) statistics")
print(pd.DataFrame(y_log).describe())
###Output
Contributor count statistics
0
count 10000.000000
mean 27.007700
std 85.154676
min 1.000000
25% 1.000000
50% 1.000000
75% 13.000000
max 988.000000
Contributor count (logged) statistics
0
count 10000.000000
mean 1.360771
std 1.734924
min 0.000000
25% 0.000000
50% 0.000000
75% 2.564949
max 6.895683
###Markdown
Split into trainig and testing datasets
###Code
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
(8500, 36) (1500, 36) (8500,) (1500,)
###Markdown
Taking log10 of data makes regressions a much easier task
###Code
y_log_train, y_log_test = np.log10(y_train), np.log10(y_test)
###Output
_____no_output_____
###Markdown
Split off a sample of the data set to speed up feature analysis
###Code
# Randomly sample from the data set
selection = np.random.randint(1, len(X), 500)
X_sample = X[selection]
y_sample = y_log[selection]
# Split sample into train and test sets
X_sample_train, X_sample_test, y_sample_train, y_sample_test = train_test_split(X_sample, y_sample, test_size=0.3)
print(X_sample_train.shape, X_sample_test.shape, y_sample_train.shape, y_sample_test.shape)
###Output
(350, 36) (150, 36) (350,) (150,)
###Markdown
Feature Analysis Compute the mean and the baseline error for contributor count and the log of it
###Code
mean = np.mean(y)
print(f"Baseline error: {mean_absolute_error(np.full_like(y, mean), y)}")
mean_log = np.mean(y_log)
print(f"Log baseline error: {mean_absolute_error(np.full_like(y_log, mean_log), y_log)}")
# Plot with n best features
def view_pred(n, n_pred, n_feats):
pred = n_pred[n]
print(*[col for col in df.columns[:-1][n_feats[n]]], sep=" ")
plt.plot(pred, label="Prediction")
plt.plot(y_sample_test, label="True")
plt.title(f"{n} best features")
plt.legend()
plt.show()
# Sequential feature selection
# Define custom distance metric to handle numeric and categorical data
def my_dist(x, y, cat_cols=[]):
total = 0
for i, feature in enumerate(x):
if i not in cat_cols:
total += np.abs(feature - y[i])
elif feature != y[i]:
total += 0.5
return total
# regr = KNeighborsRegressor(metric=my_dist, metric_params={"cat_cols": cat_cols})
# regr = MLPRegressor(max_iter=10000)
regr = DecisionTreeRegressor(max_depth=10)
domain = np.arange(1, 35, 2)
n_err = {}
n_pred = {}
n_feats = {}
for i, n in enumerate(domain):
sfs = SequentialFeatureSelector(regr, n_features_to_select=int(n), direction="forward")
sfs.fit(X_sample_train, y_sample_train)
feats = sfs.get_support()
n_feats[n] = feats
X_reduce = X_sample_test.T[feats].T
regr.fit(X_reduce, y_sample_test)
pred = regr.predict(X_reduce)
n_pred[n] = pred
err = mean_absolute_error(y_sample_test, pred)
n_err[n] = err
# Plot n vs err
plt.rcParams["figure.dpi"] = 120
plt.plot(domain, n_err.values())
plt.xlabel("# of features")
plt.ylabel("Mean Absolute Error")
plt.savefig("feature_selection.png", dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
View how well the best prediction did
###Code
view_pred(13, n_pred, n_feats)
###Output
has_issues has_projects has_downloads has_pages topics_count has_contributing has_support_file has_funding_file has_codeowners has_changelog has_codespaces has_discussions labels_count
###Markdown
Reduce our dataset to the features these features that preformed best
###Code
feats = n_feats[27]
X_train = X_train.T[feats].T
X_test = X_test.T[feats].T
print(X_train.shape, X_test.shape)
###Output
(8500, 27) (1500, 27)
###Markdown
Regression PCAMost of the variation in the data can be compressed to ~6 dimentions/features.
###Code
pca = PCA(n_components=35)
pca.fit(X)
plt.plot(pca.explained_variance_ratio_)
plt.savefig("pca.png", dpi=300)
plt.plot()
pca = PCA(n_components=5)
X_pca_train = pca.fit_transform(X_train)
X_pca_test = pca.fit_transform(X_test)
###Output
_____no_output_____
###Markdown
K-Nearest Neighbors
###Code
knn = KNeighborsRegressor(metric=my_dist, metric_params={"cat_cols": cat_cols})
knn.fit(X_pca_train, y_log_train)
pred = knn.predict(X_pca_test)
err = mean_absolute_error(y_log_test, pred)
print(f"Error: {err}")
plt.figure(figsize=(12,6))
selection = np.random.randint(1, len(pred), 80)
plt.plot(pred[selection], label="Prediction")
plt.plot(y_log_test[selection], label="True")
plt.legend()
plt.savefig("knn.png", dpi=300)
plt.show()
###Output
Error: 0.6138631269503059
###Markdown
Multilayer Perceptron Decision TreeThe decision tree starts to overfit after 7 layers. It does much better then Knn.
###Code
# max_features <- The number of features to consider when looking for the best split
dom = np.arange(2, 40)
errs = []
for i in dom:
decisionTree = DecisionTreeRegressor(max_depth=i, max_features=1)
decisionTree.fit(X_train, y_log_train)
pred = decisionTree.predict(X_test)
err = mean_absolute_error(y_log_test, pred)
errs.append(err)
plt.plot(dom, errs)
plt.xlabel("Depth")
plt.ylabel("Mean Absolute Err")
plt.savefig("depth_error.png", dpi=300)
plt.show()
decisionTree = DecisionTreeRegressor(max_depth=10)
decisionTree.fit(X_train, y_log_train)
pred = decisionTree.predict(X_test)
err = mean_absolute_error(y_log_test, pred)
print(f"Error: {err}")
plt.figure(figsize=(12,6))
plt.plot(pred[selection], label="Prediction")
plt.plot(y_log_test[selection], label="True")
plt.legend()
plt.savefig("decision_tree.png", dpi=300)
plt.show()
tree_data = tree.export_graphviz(decisionTree,
feature_names=X_df.T[feats].T.columns,
out_file=None,
filled=True,
max_depth=3)
graphviz.Source(tree_data, format="png").render("tree.png")
###Output
_____no_output_____
###Markdown
Clustering Kmeans, HACThe Silhouette score is quite good, especially for k=2. The cluster sizes are also somewhat reasonable. My guess is that it basically partitioned the ~2.5% of the repos with > 100 contributors.
###Code
kmeans_sil, single_sil, complete_sil = [], [], []
data = np.column_stack((X, y))
dom = np.arange(2, 6)
for k in dom:
# K-means
kmeans = KMeans(n_clusters=k)
kmeans_clusters = kmeans.fit_predict(data)
print(f"{k} k-means cluster sizes: {np.unique(kmeans_clusters, return_counts=True)[1]}")
kmeans_sil.append(silhouette_score(data, kmeans_clusters))
# HAC single
hac_single = AgglomerativeClustering(n_clusters=k, linkage="single")
hac_single_clusters = hac_single.fit_predict(data)
print(f"{k} HAC single cluster sizes: {np.unique(hac_single_clusters, return_counts=True)[1]}")
single_sil.append(silhouette_score(data, hac_single_clusters))
# HAC complete
hac_complete = AgglomerativeClustering(n_clusters=k, linkage="complete")
hac_complete_clusters = hac_complete.fit_predict(data)
print(f"{k} HAC complete cluster sizes: {np.unique(hac_complete_clusters, return_counts=True)[1]}")
complete_sil.append(silhouette_score(data, hac_complete_clusters))
plt.figure(figsize=(12, 6))
plt.plot(dom, kmeans_sil, label="k-means silhouette")
plt.plot(dom, single_sil, label="HAC single link silhouette")
plt.plot(dom, complete_sil, label="HAC complete link silhouette")
plt.legend()
plt.savefig("clusters_silhouette.png", dpi=300)
plt.show()
###Output
2 k-means cluster sizes: [9746 254]
2 HAC single cluster sizes: [ 15 9985]
2 HAC complete cluster sizes: [9925 75]
3 k-means cluster sizes: [9349 131 520]
3 HAC single cluster sizes: [ 14 9985 1]
3 HAC complete cluster sizes: [ 75 312 9613]
4 k-means cluster sizes: [9067 178 95 660]
4 HAC single cluster sizes: [9985 13 1 1]
4 HAC complete cluster sizes: [ 312 60 9613 15]
5 k-means cluster sizes: [8822 119 60 744 255]
5 HAC single cluster sizes: [ 13 9957 1 1 28]
5 HAC complete cluster sizes: [ 60 256 9613 15 56]
###Markdown
Analysis regarding the k-means clusters for k=2
###Code
kmeans = KMeans(n_clusters=2)
clusters = kmeans.fit_predict(data)
unique, counts = np.unique(clusters, return_counts=True)
for label in unique:
cluster_points = data[clusters == label]
avg_pd = pd.DataFrame(cluster_points)
avg_pd.columns = df.columns
print(f"\n\n\nCluster size: {counts[label]}\n")
print(avg_pd.describe())
print(f"Silhouette: {silhouette_score(data, clusters)}")
sorted_y = np.sort(y)[::-1]
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.plot(sorted_y)
plt.subplot(122)
plt.semilogy(sorted_y)
plt.savefig("contributor_distribution.png", dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Keep first response for each question
###Code
answers_dict = {q: questions_df[questions_df['question_id']==q]['correct_answer'].to_numpy()[0]
for q in questions_df['question_id']}
df_resp = df[df['action_type']=='respond']
df_first_resp = df.drop_duplicates(['user_id', 'item_id'], keep='first')
df_first_resp = df_first_resp[df_first_resp['action_type']=='respond']
df_first_resp['correct'] = 0
df_first_resp.head()
df_resp
df_first_resp.head(20)
df_first_resp['user_answer']==questions_df[questions_df['question_id']==df_first_resp['item_id']]['correct_answer']
questions_df.loc[df_first_resp['item_id']]
###Output
_____no_output_____
###Markdown
Data processing
###Code
def process(data):
df = data.copy()
# create population-level model
df = df.drop('id', axis=1)
# extract possible predictive features from timestamp and make last start relative
df['timestamp'] = df['timestamp'].astype('datetime64[ns]')
df['lastStart'] = df['lastStart'].astype('datetime64[ns]')
df['dayofweek'] = df['timestamp'].dt.dayofweek.astype('category')
df['logLastStartH'] = np.log(((df['timestamp'] - df['lastStart']).dt.total_seconds() + 1) / 3600)
df = df.drop('timestamp', axis=1)
df = df.drop('lastStart', axis=1)
# drop categorical columns that seem to have too many distinct values to be useful
df = df.drop('sourceGameId', axis=1)
df = df.drop('deviceType', axis=1)
# drop categorical colums where distribution of labels doesn't match well between training and test data
df = df.drop('campaignId', axis=1)
df = df.drop('softwareVersion', axis=1)
# logarithmic transforms
df['logStartCount'] = np.log(df['startCount'] + 1)
df = df.drop('startCount', axis=1)
df['logViewCount'] = np.log(df['viewCount'] + 1)
df = df.drop('viewCount', axis=1)
df['logClickCount'] = np.log(df['clickCount'] + 1)
df = df.drop('clickCount', axis=1)
df['logInstallCount'] = np.log(df['installCount'] + 1)
df = df.drop('installCount', axis=1)
df['logStartCount1d'] = np.log(df['startCount1d'] + 1)
df = df.drop('startCount1d', axis=1)
df['logStartCount7d'] = np.log(df['startCount7d'] + 1)
df = df.drop('startCount7d', axis=1)
# set types
df['platform'] = data['platform'].astype('category')
df['country'] = data['country'].astype('category')
df['connectionType'] = data['connectionType'].astype('category')
# drop features with very little apparent predictive power
df = df.drop('dayofweek', axis=1)
df = df.drop('platform', axis=1)
df = df.drop('connectionType', axis=1)
# drop nan
df = df.dropna()
return df
tr = process(training_data)
te = process(test_data)
tr.head(20)
tr.info()
print("Check stats for continuous variables")
tr.describe()
print("Check number of categories for categorical variables")
for key in tr.select_dtypes(['category']).columns:
print(key, len(tr[key].unique()))
print("Compare marginal distributions of training and test data")
for key in te.keys():
print(key)
if key in ['campaignId', 'softwareVersion', 'country']:
tr_set = set(tr[key].unique())
te_set = set(te[key].unique())
print("- Training {} keys of which {} also in test set".format(
len(tr_set), len(tr_set.intersection(te_set))))
print("- Test {} keys of which {} not in training set".format(
len(te_set), len(te_set.difference(tr_set))))
else:
tr[key].hist()
plt.show()
te[key].hist()
plt.show()
###Output
_____no_output_____
###Markdown
Model definition and feature selection
###Code
def joint_encode(data1, data2):
# encode categorical variables as int
# make sure training and test data use same encoding schema
joint_data = pd.concat([data1, data2], axis=0)
joint_data['country'] = joint_data['country'].astype('category')
df1 = data1.copy()
df2 = data2.copy()
cat_columns = joint_data.select_dtypes(['category']).columns
for cat_column in cat_columns:
df1[cat_column] = joint_data[cat_column][:len(data1)].cat.codes
df2[cat_column] = joint_data[cat_column][len(data1):].cat.codes
return df1, df2
tre, tee = joint_encode(tr, te)
from sklearn.naive_bayes import CategoricalNB, GaussianNB
from sklearn.metrics import log_loss
class Predictor:
"""Naive Bayes predictor as probability of install was requested"""
def __init__(self, data, only_features=[], not_features=[], train_frac=0.8):
# set weights as dataset is biased
c0, c1 = data.install.value_counts().tolist()
self.w0 = c1 / (c1 + c0)
self.w1 = c0 / (c1 + c0)
# split
self.train = data.sample(frac=train_frac, random_state=200)
self.test = data.drop(self.train.index)
# features to use
self.cat_features = tr.select_dtypes(['category']).columns
self.con_features = tr.select_dtypes(['float64']).columns
if only_features:
self.cat_features = [c for c in self.cat_features if c in only_features]
self.con_features = [c for c in self.con_features if c in only_features]
if not_features:
self.cat_features = [c for c in self.cat_features if c not in not_features]
self.con_features = [c for c in self.con_features if c not in not_features]
# fit models
self._fit_categorical_model()
self._fit_continuous_model()
def _fit_categorical_model(self):
if not any(self.cat_features):
self.catm = None
return
self.catm = CategoricalNB()
self.catm.fit(self.train[self.cat_features], self.train['install'])
def _fit_continuous_model(self):
if not any(self.con_features):
self.conm = None
return
self.conm = GaussianNB()
self.conm.fit(self.train[self.con_features], self.train['install'])
def pred(self, data):
if self.catm and self.conm:
cat_pred = self.catm.predict_proba(data[self.cat_features])[:,1]
con_pred = self.conm.predict_proba(data[self.con_features])[:,1]
return (cat_pred + con_pred) / 2 # roughly equally accurate
if self.catm:
return self.catm.predict_proba(data[self.cat_features])[:,1]
if self.conm:
return self.conm.predict_proba(data[self.con_features])[:,1]
assert False
def pred_eval(self, data):
pred = self.pred(data)
weights = data['install'] * self.w1 - (data['install'] - 1) * self.w0
print("- Loss {:.3f}".format(log_loss(data['install'], pred, sample_weight=weights)))
all_features = tr.select_dtypes(['category', 'float64']).columns
print("All features")
p = Predictor(tre)
p.pred_eval(tre)
print("Individual feature predictivity")
for feature in all_features:
print(feature)
p = Predictor(tre, only_features=[feature])
p.pred_eval(tre)
print("Effect of removing individual feature")
for feature in all_features:
print(feature)
p = Predictor(tre, not_features=[feature])
p.pred_eval(tre)
###Output
_____no_output_____
###Markdown
Generating predictions
###Code
p = Predictor(tre)
preds = p.pred(tee)
print(preds[:10])
out_df = pd.DataFrame({'prob_install': preds}, index=te.index.values.tolist())
out_df.head(20)
out_df.to_csv('test_preds.csv')
###Output
_____no_output_____
###Markdown
Predicting Schizophrenia Diagnosis This notebooks contains an analysis of the COBRE dataset available on Nilearn. The dataset contains resting state fMRI data from 146 participants. Approximately half of the subjects are patients diagnosed with schizophrenia and the remainder are healthy controls. The anlaysis in this notebook attempt to predict schizophrenia diagnosis using resting state fMRI data.
###Code
#import data
from nilearn import datasets
data = datasets.fetch_cobre(n_subjects=None)
###Output
_____no_output_____
###Markdown
Phenotypic info for the subjects is included with the data ut requires some cleaning first.
###Code
#import phenotypic data
import pandas
pheno = pandas.DataFrame(data.phenotypic)
###Output
_____no_output_____
###Markdown
We'll extract subject ID from the niifti file names using index slicing and then merge the fMRI file paths to the phenotypic data.
###Code
#extract participant id from file paths
file_names = []
for path in data.func:
file_names.append(path[40:45])
#create dataframe of file paths and ids
files = pandas.DataFrame(data.func, columns = ['path'])
files['id'] = file_names
files['id'] = files.id.astype(int)
#merge phenotypic data with file paths
import pandas
pheno = pandas.merge(pheno, files, on = 'id')
#fix string decoding
pheno['gender'] = pheno['gender'].map(lambda x: x.decode('utf-8'))
pheno['handedness'] = pheno['handedness'].map(lambda x: x.decode('utf-8'))
pheno['subject_type'] = pheno['subject_type'].map(lambda x: x.decode('utf-8'))
pheno['diagnosis'] = pheno['diagnosis'].map(lambda x: x.decode('utf-8'))
###Output
_____no_output_____
###Markdown
Let's take a look at what we have now. And also sve the cleaned phenotypic data to a csv.
###Code
#pheno.to_csv('pheno.csv', index=False)
pheno
###Output
_____no_output_____
###Markdown
Now that we have the file paths matched with the phenotypic data, we can easily make subsets for patients and controls.
###Code
#create lists of filepaths for patients and controls
patients = []
controls = []
for i in pheno.index:
if pheno.loc[i, 'subject_type']=='Patient':
patients.append(pheno.loc[i, 'path'])
else:
controls.append(pheno.loc[i, 'path'])
###Output
_____no_output_____
###Markdown
The code below generates an interactive app using plotly express that will plot a histogram of subject age.
###Code
import plotly.express as px
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Load Data
df = pheno
# Build App
app = JupyterDash(__name__)
app.layout = html.Div([
html.H1("Age"),
dcc.Graph(id='graph'),
html.Label([
"Participant type",
dcc.Dropdown(
id='subject_type', clearable=False,
value='Patient', options=[
{'label': c, 'value': c}
for c in df.subject_type.unique() #get all unique values from column
])
]),
])
# Define callback to update graph
@app.callback(
Output('graph', 'figure'),
[Input("subject_type", "value")]
)
def update_figure(subject_type):
return px.histogram(
df[df["subject_type"]==subject_type], x="current_age", color="gender"
)
# Run app and display result inline in the notebook
app.run_server(mode='inline')
###Output
_____no_output_____
###Markdown
Connectivity This anlaysis uses the BASC atlas to defin ROIs. We'll focus on 64 ROIs for this analysis.
###Code
#import atlas
parcellations = datasets.fetch_atlas_basc_multiscale_2015(version='sym')
atlas_filename = parcellations.scale064
# visualize atlas
from nilearn import plotting
plotting.plot_roi(atlas_filename, draw_cross = False)
###Output
_____no_output_____
###Markdown
Let's generate correlation matrices for each subject and then merge them to the phenotypic data.
###Code
from nilearn.input_data import NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
# create mask
mask = NiftiLabelsMasker(labels_img=atlas_filename,
standardize=True,
memory='nilearn_cache',
verbose=1)
# initialize correlation measure
correlation_measure = ConnectivityMeasure(kind='correlation', vectorize=True,
discard_diagonal=True)
import pandas as pd
#initialize empty dataframe
all_features = pd.DataFrame(columns=['features', 'file'])
for i,sub in enumerate(data.func):
# extract the timeseries from the ROIs in the atlas
time_series = mask.fit_transform(sub, confounds=data.confounds[i])
# create a region x region correlation matrix
correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# add features and file name to dataframe
all_features = all_features.append({'features': correlation_matrix, 'file': data.func[i]}, ignore_index=True)
# uncomment below to keep track of status
#print('finished %s of %s'%(i+1,len(data.func)))
# create pandas dataframe of features and phenotypic data
full = pandas.merge(pheno, all_features, left_on = 'path', right_on = 'file')
###Output
_____no_output_____
###Markdown
Now we have a Pandas dataframe with all of our demographic data and a column that contains the correlation matrix for each subject as an array.
###Code
full
###Output
_____no_output_____
###Markdown
Visualizing Connectivity
###Code
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, savefig
patient_features = list(full.loc[full['subject_type']=='Patient']['features'])
control_features = list(full.loc[full['subject_type']=='Control']['features'])
figure(figsize=(16,6))
plt.subplot(1, 2, 1)
plt.imshow(patient_features, aspect='auto')
plt.colorbar()
plt.title('Patients')
plt.xlabel('features')
plt.ylabel('subjects')
plt.subplot(1, 2, 2)
plt.imshow(control_features, aspect='auto')
plt.colorbar()
plt.title('Controls')
plt.xlabel('features')
plt.ylabel('subjects')
savefig('features.png', transparent=True)
###Output
_____no_output_____
###Markdown
Classification This section contains the main analysis of this notebook. Namely, predicting schizophrenia diagnosis. The features used are the correlation matrices generated previously, and diagnosis labels are contained in the `subject_type` column from our phenotypic data.We first split the data into training and validation sets, with a ratio of 80/20.
###Code
from sklearn.model_selection import train_test_split
# Split the sample to training/validation with a 80/20 ratio
x_train, x_val, y_train, y_val = train_test_split(
list(full['features']), # x
full['subject_type'], # y
test_size = 0.2, # 80%/20% split
shuffle = True, # shuffle dataset
stratify = full['subject_type'],
random_state = 242
)
###Output
_____no_output_____
###Markdown
Our starting classifier with be a linear support vector machine, specified as `SVC()` in Nilearn. This is often the [first recommendation](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) for clssification problems with small sample sizes. We'll be using 10-fold corss validation to get a rough benchmark of performance for each classifier. We'll use F1 as our performance metric. After each run we'll look at the preformance of the classifier across the folds as well as the average performance.
###Code
# build SVC classifier
from sklearn.svm import SVC
svc = SVC(kernel='linear')
# F1 score by averaging each fold
from sklearn.model_selection import cross_val_score
import numpy as np
svc_score = cross_val_score(svc, x_train, y_train, cv=10, scoring = 'f1_macro')
print(np.mean(svc_score))
print(svc_score)
###Output
0.7981132756132755
[0.82857143 0.74825175 0.74825175 0.74825175 1. 0.74825175
0.90598291 0.81666667 0.80357143 0.63333333]
###Markdown
Linear SCV seems to perform very strongly, with an average F1 score of ~0.80We'll try gradient boosting next. The gradient boost model will use a greater number of estimators and a larger max depth than the defaults in order to try and improve performance.
###Code
# build gradient boost classifier
from sklearn.ensemble import GradientBoostingClassifier
boost = GradientBoostingClassifier(n_estimators=500,
max_depth=4,
random_state=242
)
#train model
boost.fit(x_train, y_train)
# F1 score by averaging each fold
from sklearn.model_selection import cross_val_score
import numpy as np
boost_score = cross_val_score(boost, x_train, y_train, cv=10, scoring = 'f1_macro')
print(np.mean(boost_score))
print(boost_score)
###Output
0.5752838827838829
[0.48571429 0.5 0.74825175 0.625 0.24475524 0.82857143
0.60714286 0.71794872 0.54545455 0.45 ]
###Markdown
The gradient boost model seems to be highly variable and doesn't come close to matching the performance of the SVC. We'll try K Nearest Neighbors next.
###Code
# K Nearest Neighbours
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn_score = cross_val_score(knn, x_train, y_train, cv=10, scoring = 'f1_macro')
print(np.mean(knn_score))
print(knn_score)
###Output
0.6219476356976357
[0.73333333 0.625 0.58041958 0.48571429 0.48571429 0.4375
0.71794872 0.71794872 0.71794872 0.71794872]
###Markdown
K Nearest Neighbors performs poorly with default paramaters. Given the large difference between KNN and the other classifiers I won't try to tweak this alogrithm.Lastly we'll try a Random Forest classifier. We'll increase the numebr of estimators like we did with the gradient boost model.
###Code
# Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators = 500, random_state = 242)
rfc_score = cross_val_score(rfc, x_train, y_train, cv=10, scoring = 'f1_macro')
print(np.mean(rfc_score))
print(rfc_score)
###Output
0.7322227772227772
[0.83333333 0.73333333 0.65714286 0.58041958 0.74825175 0.91608392
0.68571429 0.81666667 0.63333333 0.71794872]
###Markdown
The Random Forest model seems to perform well but not as well as the linear SVC. With some hyperparameter tweaking it might be possible to achieve the same performance but considering the random forest classifier is more complex, and takes longer to train, we'll use SVC as the final model. Hyperparameter Tuning Now that we've committed to a model, let's see if we can get a little more out of it by tweaking the hyperparameters. Unfortunately, the only option for a linear SVC is the `C` parameter.We can create a range of values for `C` and then compare each using cross validation.
###Code
from sklearn.model_selection import validation_curve
C_range = 10. ** np.arange(-3, 8) # A range of different values for C
train_scores, valid_scores = validation_curve(svc, x_train, y_train,
param_name= "C",
param_range = C_range,
cv=10,
scoring='f1_macro')
# Creating a Pandas dataframe of the results
tScores = pandas.DataFrame(train_scores).stack().reset_index()
tScores.columns = ['C','Fold','Score']
tScores.loc[:,'Type'] = ['Train' for x in range(len(tScores))]
vScores = pandas.DataFrame(valid_scores).stack().reset_index()
vScores.columns = ['C','Fold','Score']
vScores.loc[:,'Type'] = ['Validate' for x in range(len(vScores))]
ValCurves = pandas.concat([tScores,vScores]).reset_index(drop=True)
# Plotting the performance of different values of C
import seaborn as sns
g = sns.catplot(x='C', y='Score', hue='Type', data=ValCurves, kind='point')
g.set_xticklabels(C_range, rotation=90)
###Output
_____no_output_____
###Markdown
The best performance seems to be at a C value of 0.1 but it's a negligible difference. But there's one more thing to try.What if we changed the SVC kernel to the default 'rbf' which would let us adjust C and gamma? Let's use a grid search to see if optimizing an rbf kernel would perform better than a linear kernel.
###Code
# RBF SVC model
from sklearn.model_selection import GridSearchCV
svc_rbf = SVC(kernel='rbf')
C_range = 10. ** np.arange(-3, 8)
gamma_range = 10. ** np.arange(-8, 3)
param_grid = dict(gamma=gamma_range, C=C_range)
grid = GridSearchCV(svc_rbf, param_grid=param_grid, cv=10)
grid.fit(x_train, y_train)
print(grid.best_params_)
svc_rbf = SVC(kernel='rbf', C=100.0, gamma=0.001)
svc_rbf_score = cross_val_score(svc_rbf, x_train, y_train, cv=10, scoring = 'f1_macro')
print(np.mean(svc_rbf_score))
print(svc_rbf_score)
###Output
0.8061452436452436
[0.82857143 0.82857143 0.74825175 0.74825175 1. 0.74825175
0.90598291 0.81666667 0.80357143 0.63333333]
###Markdown
It seems like SVC with an RBF kernel and tuned hyperparameters performs slightly better than linear SVC, so we'll use this as the final model. Testing The Model We can now run the model on the left out data and see how it performs.
###Code
# Validation
from sklearn.metrics import f1_score, accuracy_score
svc_rbf.fit(x_train, y_train)
final_pred = svc_rbf.predict(x_val)
print('F1:', f1_score(y_val, final_pred, pos_label='Patient'))
print('Accuracy:', accuracy_score(y_val, final_pred))
###Output
F1: 0.6875
Accuracy: 0.6666666666666666
###Markdown
An F1 score of .69 isn't too bad for a binary classification problem. Let's see how the model is handling the labels by taking a look at the confusion matrix.
###Code
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
disp = plot_confusion_matrix(svc_rbf, x_val, y_val,
cmap=plt.cm.Blues,
normalize=None)
disp.ax_.set_title('SVC Schizophrenia Labels')
print(disp.confusion_matrix)
###Output
[[ 9 6]
[ 4 11]]
###Markdown
The model seems to handle each class equally well. Predicting Schizophrenia Subtype The phenotypic data also includes the schizophrenia subtype that each patient was diagnosed with. Maybe we can predict subtype as well. Let's take a look at how they are distributed.
###Code
full.diagnosis.value_counts()
###Output
_____no_output_____
###Markdown
The distribution of schizohprenia subtypes seems highly unbalanced. Most of the patients were diagnosed with the label "295.3" which refers to paranoid schizophrenia. There are very few observations for the other subtypes and so it's unlikely that any model could predict these with so little data. Maybe we can predict paranoid schizophrenia from the other subtypes.
###Code
# creating a new variable for subtype
diagnosis=[]
for i in full.index:
if full.loc[i, 'diagnosis']=='295.3':
diagnosis.append('Paranoid')
elif full.loc[i, 'diagnosis']=='None':
diagnosis.append('None')
else:
diagnosis.append('Other')
full['type'] = diagnosis
###Output
_____no_output_____
###Markdown
We'll split the data again. Stratified by our new subtype variable.
###Code
from sklearn.model_selection import train_test_split
# Split the sample to training/validation with a 80/20 ratio
x_train2, x_val2, y_train2, y_val2 = train_test_split(
list(full['features']), # x
full['type'], # y
test_size = 0.2, # 80%/20% split
shuffle = True, # shuffle dataset
stratify = full['type'],
random_state = 242
)
###Output
_____no_output_____
###Markdown
Let's avoid running all of the models separately again. It would be much easier to compare a lot of models at once. The cell below defines several models and then loops over them to generate cross validated performance metrics. A more detailed example of this can be found [here]().
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
np.random.seed(242)
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear"),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB()]
for name, clf in zip(names, classifiers):
score = cross_val_score(clf, x_train2, y_train2, cv=10, scoring='f1_macro')
print(name, np.mean(score))
###Output
Nearest Neighbors 0.39345543345543355
Linear SVM 0.4076200651200651
RBF SVM 0.21960784313725487
Gaussian Process 0.1431135531135531
Decision Tree 0.34980260480260483
Random Forest 0.36240516564045977
Neural Net 0.3840762723115664
AdaBoost 0.3908056540409482
Naive Bayes 0.41458892958892957
###Markdown
A Gaussian Naive Bayes model performs slightly better than linear SVC, so we'll use it in this case. But I think this is another example of how powerful SVM is as an approach.
###Code
# Validation
NB = GaussianNB()
NB.fit(x_train2, y_train2)
type_pred = NB.predict(x_val2)
f1_score(y_val2, type_pred, average='macro')
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
disp = plot_confusion_matrix(NB, x_val2, y_val2,
#display_labels=class_names,
cmap=plt.cm.Blues,
normalize=None)
disp.ax_.set_title('Naive Bayes: Schizophrenia Type')
print(disp.confusion_matrix)
###Output
[[10 3 2]
[ 4 3 0]
[ 4 2 2]]
###Markdown
Spiegel historiael Preprocessing
###Code
import glob
import os
import shutil
import re
import unidecode
from itertools import product
from collections import Counter
import random
import numpy as np
RND = 12345
random.seed(RND)
np.random.seed(RND)
from scipy.spatial.distance import cdist, pdist
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use("seaborn-deep")
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestCentroid
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_recall_curve
from tqdm import tqdm
from inspect import signature
DIRTY = re.compile(r'\s*\-\+')
vocabulary = {}
parts = {'P2':[], 'P3': [], 'P5': []}
for part in sorted(os.listdir('data')):
for fn in sorted(glob.glob(f'data/{part}/*.tag')):
print(fn)
with open(fn) as f:
lines = f.readlines()
chapter_rhymes = []
for line in lines:
line = line.strip()
if not line or line.startswith('###'):
continue
try:
words, rhyme = line.split('|')
word = words.strip().split()[-1]
if word not in vocabulary:
vocabulary[word] = Counter()
vocabulary[word][rhyme] += 1
chapter_rhymes.append(rhyme)
except ValueError:
print(line)
parts[part].extend(chapter_rhymes)
lines = [line.strip() for line in open('data/fragment.txt')]
with open('data/fragment.lemma.txt', 'w') as f:
for line in lines:
words = line.split()
word = line.split()[-1]
if '[…]' in word or '(…)' in word or '(.)' in word:
continue
words = []
for word in line.split():
words.append(''.join([c for c in word if c.isalpha()]))
if words[-2] + '+' + words[-1] in vocabulary:
rhyme_word = words[-2] + '+' + words[-1]
else:
rhyme_word = words[-1]
try:
lemma = vocabulary[rhyme_word].most_common(1)[0][0]
except KeyError:
try:
rhyme_word = rhyme_word.replace('u', 'v')
lemma = vocabulary[rhyme_word].most_common(1)[0][0]
except KeyError:
print(rhyme_word)
lemma = 'XXX'
f.write(' | '.join((line, lemma.upper())) + '\n')
fragment = [line.strip().split('|')[-1].strip() for line in open('data/fragment.lemma_correct.txt')]
fragment
fragment = [l for l in fragment if l != 'XXX']
fragment
size = len(fragment)
print(size)
fragment = ' '.join(fragment)
fragment
for k, v in parts.items():
print(k, len(v))
data = []
for part, rhymes in parts.items():
si, ei = 0, size
while ei < len(rhymes):
data.append([part, ' '.join(rhymes[si:ei])])
si += size
ei += size
import pandas as pd
src = pd.DataFrame(data, columns=('part', 'rhymes'))
src.head()
p_word = {'use_idf': True,
'max_features': None,
'analyzer': 'word',
'min_df': 1,
'lowercase': False,
'norm': 'l1',
'ngram_range': (1, 1)}
vectorizer = TfidfVectorizer(**p_word)
scaler = StandardScaler()
X = vectorizer.fit_transform(src['rhymes']).toarray()
X = scaler.fit_transform(X)
fragment = vectorizer.transform([fragment]).toarray()
fragment = scaler.transform(fragment)[0]
print(X.shape, fragment.shape)
class Verifier():
def __init__(self, iters=100,
rnd_prop=.5, random_state=1066,
num_instances=30, metric='cosine',
rnd_state=1234):
assert (rnd_prop >= 0.0) and (rnd_prop <= 1.0)
np.random.seed(rnd_state)
self.iters = iters
self.rnd_prop = rnd_prop
self.num_instances = num_instances
def predict_proba(self, target, source_X, imposter_X):
"""
target = (single) anonymous text
source_X = candidate author
imposter_X = imposter documents
"""
total_features = imposter_X.shape[1]
total_imposters = imposter_X.shape[0]
total_source = source_X.shape[0]
target = np.array([target])
hits = np.zeros(self.iters)
for it in range(self.iters):
imposters_ = imposter_X[np.random.choice(total_imposters, self.num_instances, replace=False), :]
source_ = source_X[np.random.choice(total_source, self.num_instances, replace=False), :]
if self.rnd_prop < 1.0:
idxs = np.random.choice(total_features, int(total_features * self.rnd_prop), replace=False)
imposters_ = imposters_[:, idxs]
source_ = source_[:, idxs]
target_ = target[:, idxs]
min_imp_dist = np.min(cdist(target_, imposters_, metric='cosine'))
min_src_dist = np.min(cdist(target_, source_, metric='cosine'))
if min_src_dist < min_imp_dist:
hits[it] = 1
return np.mean(hits)
authors = set(src['part'])
authors
for author in authors:
src_X = X[src['part'] == author]
imposters_X = X[src['part'] != author]
verifier = Verifier(iters=1000, num_instances=5)
proba = verifier.predict_proba(target=fragment, source_X=src_X,
imposter_X=imposters_X)
print(f'::: {author} > {proba} :::')
train_parts, dev_parts, train_rhymes, dev_rhymes = train_test_split(src['part'], src['rhymes'],
test_size=.25,
stratify=src['part'],
random_state=42)
train_X = vectorizer.fit_transform(train_rhymes).toarray()
train_X = scaler.fit_transform(train_X)
dev_X = vectorizer.transform(dev_rhymes).toarray()
dev_X = scaler.transform(dev_X)
def experiment(candidate):
source = train_X[train_parts == candidate]
imposters = train_X[train_parts != candidate]
targets = dev_X
target_y = np.array([1 if a == candidate else 0 for a in dev_parts])
verifier = Verifier(iters=1000, num_instances=5)
probas = [verifier.predict_proba(target=t, source_X=source,
imposter_X=imposters) for t in tqdm(targets)]
precision, recall, thresholds = precision_recall_curve(target_y, probas)
f1s = [f1_score(target_y, (probas > th) * 1) for th in thresholds]
max_idx = np.array(f1s).argmax()
max_f1 = f1s[max_idx]
max_th = thresholds[max_idx]
print(max_f1, max_th)
plt.figure(figsize=(10, 10))
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.plot(recall[max_idx], precision[max_idx], 'o')
plt.axhline(precision[max_idx])
plt.axvline(recall[max_idx])
plt.title(f'{candidate} | f1={round(max_f1, 4)} @ theta={round(max_th, 4)}')
plt.savefig(f'{candidate}.pdf')
for candidate in 'P2 P3 P5'.split():
experiment(candidate)
###Output
100%|██████████| 104/104 [00:42<00:00, 2.44it/s]
/Users/mikekestemont/anaconda3/envs/py36/lib/python3.6/site-packages/sklearn/metrics/classification.py:1135: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 due to no predicted samples.
'precision', 'predicted', average, warn_for)
0%| | 0/104 [00:00<?, ?it/s]
###Markdown
Dataset This project uses data scraped using the *New York Times* API. Due to the API's limitations, the dataset consists of headlines, dates, sections, subjects, and various other metadata but not the articles themselves. The majority of analysis, therefore, will be focused on the headline and abstract text.
###Code
# Load data from GitHub
df = pd.read_csv("https://raw.githubusercontent.com/vyoma-raman/nyt-disability/main/data.csv").fillna("")
# Create columns for year of publication and a concatenation of the headline and abstract
df["year"] = df["date"].apply(lambda d: int(d.split("/")[2]))
df["full_text"] = df.apply(lambda row: " ".join(row[1:3]), axis=1)
# Excluding data on articles published after 2020
df = df[df["year"] <= 2020]
df.tail()
# Split text data into 20-year publication bins
ranges = range(1860, 2001, 20)
data_bins = {}
for r in ranges:
data = df[(df["year"] > r) & (df["year"] <= r + 20)]
data_bins["-".join([str(r), str(r + 20)])] = data["full_text"].tolist()
# Get a list of all text data
all_data = df["full_text"].tolist()
###Output
_____no_output_____
###Markdown
Content Evolution This section examines how the *New York Times*' coverage of disability has topically changed over time.Let's start by looking at word embeddings, which represent the context around which words are used and provide insight into concepts that are related to those words. These embeddings are separately created for different 20-year bins of data. Comparing across bins can illustrate how different ideas are associated with each other in different time intervals. Word Embeddings
###Code
# Load English stopwords
stop_words = set(stopwords.words('english'))
# Clean the data for this task
def clean_wv(text):
text = "".join(ch for ch in text if ch.isalnum() or ch == " ").lower()
return [w for w in nltk.word_tokenize(text) if not w in stop_words]
# Create a dictionary of words associated with disability-related words
disability_similar = {}
# Train a new model and collect similar words for each time interval
for r, data in data_bins.items():
texts_wv = [clean_wv(t) for t in data]
model = Word2Vec(sentences=texts_wv, size=300, window=5, min_count=1)
dictionary = {}
# These words were selected based on how NYTimes categorizes disability-related articles
for word in ["disabilities", "blindness", "deafness", "amputee"]:
try:
similar_words = [w[0] for w in model.wv.most_similar(positive=[word], topn=10)]
except:
similar_words = []
dictionary[word] = similar_words
disability_similar[r] = dictionary
# Print the words most similar to the 4 terms of interest
for bin in disability_similar:
print(bin)
for word in disability_similar[bin]:
print(" ", word)
print(" ", disability_similar[bin][word])
###Output
1860-1880
disabilities
['per', 'case', 'royal', 'pierces', 'rigid', 'russells', 'policy', 'dull', '8', 'jeffriess']
blindness
['gleanings', 'family', 'eyes', 'lowell', 'becks', 'yesterday', 'life', 'elephant', 'republicans', 'road']
deafness
['8', 'examinations', 'tinkering', 'compensations', 'pierces', 'gleanings', 'mr', 'printing', 'foreign', 'anthonys']
amputee
[]
1880-1900
disabilities
[]
blindness
['tests', 'persons', 'quakers', 'remedy', 'coached', 'pilot', 'protest', 'reading', 'educational', 'commission']
deafness
['central', 'examinations', 'paper', 'royal', 'dr', 'philadelphia', 'results', 'national', 'invention', 'addresses']
amputee
[]
1900-1920
disabilities
[]
blindness
['entitled', 'father', 'exhibited', 'children', 'estranged', 'classics', 'scenes', 'watches', 'belasco', 'instrument']
deafness
['jewish', 'gathering', 'benefit', 'players', 'troops', 'doesnt', 'one', 'sleeping', 'endeavor', 'military']
amputee
[]
1920-1940
disabilities
[]
blindness
['bedell', 'tests', 'longrange', 'westinghouse', 'children', 'add', 'risks', 'nerve', 'exhibited', 'father']
deafness
['gathering', 'jewish', 'benefit', 'holder', 'hearingcrosby', 'hard', 'fifty', 'expects', 'warm', 'romance']
amputee
[]
1940-1960
disabilities
[]
blindness
[]
deafness
[]
amputee
['becomes', 'grant', 'wk', 'relieve', 'e', 'patients', 'avoid', '5', 'promote', 'hail']
1960-1980
disabilities
['revs', 'grunberg', 'ama', 'system', 'named', 'leads', 'bid', 'case', 'contains', 'awaits']
blindness
['covered', 'children', '356000', 'amongst', 'fraud', 'heavyduty', 'mothers', 'parents', 'total', 'proposal']
deafness
['breastfed', 'iq', '356000', 'behavior', 'hr', 'many', 'bebe', 'sue', 'dr', 'mfrs']
amputee
[]
1980-2000
disabilities
['blind', 'deaf', 'people', 'one', 'children', 'said', 'disabled', 'school', 'students', 'braille']
blindness
['blind', 'deaf', 'said', 'lead', 'one', 'children', 'people', 'two', 'help', 'years']
deafness
['one', 'blind', 'lead', 'mr', 'said', 'deaf', 'many', 'school', 'people', 'help']
amputee
['investigate', 'introduced', 'installed', 'orders', 'products', 'proved', 'becomes', 'reasonable', 'foreign', 'grant']
2000-2020
disabilities
['disabled', 'blind', 'deaf', 'one', 'people', 'says', 'help', 'children', 'disability', 'new']
blindness
['blind', 'children', 'disabled', 'deaf', 'one', 'help', 'says', 'people', 'life', 'disability']
deafness
['one', 'many', 'blind', 'disabled', 'disabilities', 'deaf', 'disability', 'dr', 'school', 'vision']
amputee
['deaf', 'disability', 'life', 'says', 'blind', 'disabled', 'care', 'people', 'many', 'first']
###Markdown
The similarities found by Word2Vec word embeddings highlight a number of patterns in the kinds of topics disability has been historically associated with in the New York Times. In the 1860-1880 bin, disability words are associated with "policy" and "republicans," suggesting that disability is seen as a phenomenon that can be addressed through legislation. In contrast, the 1880-1900 bin finds intervention-related words such as "dr" (Dr.), "tests," "remedy," and "invention" (potentially implying a link to Alexander Graham Bell, an inventor and teacher of Dead and hard-of-hearing students who lived during that time). Interestingly, most disability words were not found in the 1940-1960 bin, with the exception of "amputee" -- this is potentially due to disability-related coverage being focused on war veterans. Starting in the 1960s, the term "disabilities"" began to be found more. Beginning in the 1980s, it became more closely associated with children and schools, suggesting a shift in journalistic interest or national focus. Around the same time, the four terms investigated ("disabilities," "deafness," "blindness," and "amputee") began to show more similarity with each other. These factors indicate that journalists have started reporting on disability more cohesively. Topic Modeling
###Code
# Clean the data for this task
def clean_tm(text):
return "".join(ch for ch in text if ch.isalnum() or ch == " ").lower()
# Process data through cleaning and vectorization
texts_tm = [clean_tm(t) for t in all_data]
count_vectorizer = CountVectorizer(stop_words='english', max_features=1000, min_df=0.05, max_df=0.9)
vectorized = count_vectorizer.fit_transform(texts_tm)
# Perform LDA analysis
num_topics = 4
lda = LatentDirichletAllocation(n_components=num_topics)
lda_topics = lda.fit_transform(vectorized)
# Plot the occurrence of topics across the corpus
pd.DataFrame(lda_topics).plot(figsize=(20, 5))
plt.title("Topcs Represented in New York Times Coverage of Disability Over Time")
plt.xlabel("Article Number")
plt.ylabel("Transformed LDA Value")
plt.legend();
# To contextualize the x-axis, 2000 = March 6, 2011
df.iloc[2000, 0]
# Print the top 5 words associated with each topic
topic_words = pd.DataFrame(lda.components_, columns=count_vectorizer.get_feature_names())
for i in range(num_topics):
print("Topic " + str(i) + ": " + " ".join(topic_words.loc[i].sort_values(ascending=False).head(5).index.tolist()))
###Output
Topic 0: deaf hearing school students language
Topic 1: blind blindness help lead says
Topic 2: new york city says photo
Topic 3: people disabled disabilities said years
###Markdown
The four topics found using Latent Dirichlet Allocation appear to be as follows:0. Schools and deafness1. Blindness2. The city3. People with disabilities more generallyAs noted in the word similarity analysis, it is visually apparent that dialogue has shifted from blindness and deafness to become more encompassing of disability as a whole. The topic breakdown also suggests that there is a focus on young people with disabilities and people in the city. Subject Labeling Rather than examine the text itself, the graph below visualizes how the *New York Times* has self-labeled the subjects of its articles.The articles in this dataset were selected by filtering all articles by five subject keywords ("Disabilities," "Blindness," "Deafness," "Prostheses," and "Amputation") that comprise the "Disability" topic on their website. The volume of these keywords over time has been graphed here.
###Code
# Plot change in NYTimes article subject labels over time
plt.figure(figsize=(20, 5))
pd.to_datetime(df[df["subjects"].apply(lambda s: "Disabilities" in s)]["date"]).apply(lambda d: d.year).value_counts().sort_index().plot(label="Disabilities")
pd.to_datetime(df[df["subjects"].apply(lambda s: "Blindness" in s)]["date"]).apply(lambda d: d.year).value_counts().sort_index().plot(label="Blindness")
pd.to_datetime(df[df["subjects"].apply(lambda s: "Deafness" in s)]["date"]).apply(lambda d: d.year).value_counts().sort_index().plot(label="Deafness")
pd.to_datetime(df[df["subjects"].apply(lambda s: "Prostheses" in s)]["date"]).apply(lambda d: d.year).value_counts().sort_index().plot(label="Prostheses")
pd.to_datetime(df[df["subjects"].apply(lambda s: "AMPUTATION" in s)]["date"]).apply(lambda d: d.year).value_counts().sort_index().plot(label="Amputation")
plt.title("New York Times Coverage of Disability, Content Volume Over Time")
plt.xlabel("Year")
plt.ylabel("Number of Articles")
plt.legend();
###Output
_____no_output_____
###Markdown
A few notable patterns are apparent: Volume of coverage appears to increase significantly around 1980, though it is unclear whether this is due to archiving. In this peak, however, "Blindness" and "Deafness appear to be the most popular tags (as they are historically) until around 2010, when general disability coverage shoots up and those plateau. Finally, "Prostheses" becomes a tag around 2010.Interestingly, there is a spike in coverage of articles related to blindness in the mid-1920s. A manual revision of the articles from this time period confirms this finding but fails to pinpoint an event or other reason for this change. Lexical Evolution In the analysis of content in disability-related articles, a recurring theme has been the change in usage of different words. Let's take a look at word usage and variation more specifically. Type-Token Ratios
###Code
# Clean the data for this task
def clean_ttr(text):
text = "".join(ch for ch in text if ch.isalnum() or ch == " ").lower()
return [w for w in nltk.word_tokenize(text) if not w in stop_words]
df_ttr = df[["date", "full_text"]].copy()
df_ttr["cleaned"] = df_ttr["full_text"].apply(clean_ttr)
# Evaluate the type-token ratio of the articles over time
def type_token_ratio(ls):
return len(set(ls))/len(ls)
# Plot type-token ratios
df_ttr["cleaned"].apply(type_token_ratio).plot(figsize=(20, 8))
plt.title("Type-Token Ratio of New York Times Articles on Disability Over Time")
plt.xlabel("Article Number")
plt.ylabel("Type-Token Ratio");
###Output
_____no_output_____
###Markdown
Type-token ratios divide the number of unique tokens by the total number of tokens in a text. They are commonly used to measure linguistic diversity. The above graph of type-token ratios across articles shows an upward slant, indicating that there has been an increase in this diversity over time. Individual Word Frequencies
###Code
# Clean data for this task
def clean_wf(text):
text = "".join(ch for ch in text if ch.isalnum() or ch == " ").lower()
return [w for w in nltk.word_tokenize(text) if not w in stop_words]
# Get a list of all words in this
all_words = set(clean_wf(" ".join(all_data)))
# Calculate the frequency of a given word in a given document
def get_freq(word, doc):
return doc.count(word) / len(doc)
# Create dataframe of frequencies of each word in each time interval
freq = pd.DataFrame(index=all_words)
for bin, texts_wf in data_bins.items():
joined_texts = clean_wf(" ".join(texts_wf))
freq[bin] = [get_freq(word, joined_texts) for word in all_words]
# Calculate the largest difference for one word between frequencies in different time intervals
freq["diff"] = freq.apply(lambda x: max(x) - min(x), axis=1)
freq.head()
# Plot frequencies of words with a change of over 0.02
freq[freq["diff"] > 0.02].iloc[:, :-1].transpose().plot(figsize=(20, 5))
plt.title("Word Frequency Over Time")
plt.xlabel("Year Bin")
plt.ylabel("Word Frequency")
plt.legend();
###Output
_____no_output_____
###Markdown
Art DATIS: Data Analysis¶
###Code
import glob
txts_path = '/ivi/ilps/projects/ArtDATIS/artdatis/tagging/OCRed/typed/'
# check all paths are unique
paths = []
for file_path in glob.glob(txts_path+'*_path.txt'):
with open(file_path) as file:
paths.append(file.read().strip())
print("Loaded %d paths"%len(paths))
# make sure there are no duplicate paths
assert len(paths) == len(set(paths))
# 1. load OCRed texts into a corpus of documents
text_corpus = []
# filter out and collect text files
for file_path in glob.glob(txts_path+'*_text.txt'):
with open(file_path, encoding="utf-8") as file:
text = file.read()
# filter duplicates
if text not in text_corpus:
text_corpus.append(text)
print("Loaded %d documents"%len(text_corpus))
# 2. pre-processing: remove stopwords, split into words
import urllib.request
from pprint import pprint
def load_word_list(lang='en'):
url = 'https://raw.githubusercontent.com/stopwords-iso/stopwords-%s/master/stopwords-%s.txt' % (lang, lang)
stopwords = urllib.request.urlopen(url).read().decode('UTF-8').split()
print("Loaded %s stopwords, e.g. %s" % (lang, ", ".join(stopwords[:2])))
return set(stopwords)
# load stopwords
en_stoplist = load_word_list('en')
de_stoplist = load_word_list('de')
nl_stoplist = load_word_list('nl')
fr_stoplist = load_word_list('fr')
stoplist = en_stoplist | de_stoplist | nl_stoplist | fr_stoplist
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in text_corpus]
# Count word frequencies
word_list = [word for text in texts for word in text if word.isalpha()]
from collections import Counter
Counter(word_list).most_common()
# Visualise counter
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="darkgrid")
%matplotlib inline
labels, counts = [], []
for label, count in Counter(word_list).most_common(10):
labels.append(label)
counts.append(count)
plt.figure(figsize=(10, 6))
ax = sns.barplot(x=labels, y=counts)
# Count n-gram frequencies based on https://stackoverflow.com/questions/12488722/counting-bigrams-pair-of-two-words-in-a-file-using-python
from itertools import tee, islice
def ngrams(lst, n):
tlst = lst
while True:
a, b = tee(tlst)
l = tuple(islice(a, n))
if len(l) == n:
yield l
next(b)
tlst = b
else:
break
Counter(ngrams(word_list, 2)).most_common()
Counter(ngrams(word_list, 3)).most_common()
Counter(ngrams(word_list, 4)).most_common()
Counter(ngrams(word_list, 5)).most_common()
Counter(ngrams(word_list, 6)).most_common()
# check docs
keywords = ['verandering', 'brengt', 'orgaan', 'deelt', 'besluit', 'terstond']
results = [doc for doc in texts if set(keywords).issubset(set(doc))]
print("%d results"%len(results))
print(results[4])
print(results[1])
print(set(results[4]) - set(results[1]))
print("HELL YEAH")
###Output
_____no_output_____
###Markdown
Analysis of Covid-19 dataIn this notebook, the Covid-19 time series should be analyzed. The data is provided by John Hopkins University on a github repository: https://github.com/CSSEGISandData/COVID-19The data will be linked with country information, taken from Kaggle user koryto: https://www.kaggle.com/koryto/countryinfoAdditional information about natinal restrictions in South Korea, Italy and Germany were taken from the following sources:https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_South_Koreahttps://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Italyhttps://www.bundesregierung.de/breg-de/themen/coronavirus/coronavirus-1725960 Business understandingAs the coronavirus pandemic is spreading, it becomes clearer, that we are facing the major thread in the current century, so far. Every country tackles the pandemic with meassures of highly varying intensity and reaction timing. In combination with different national conditions of economy and health care system, the numbers of confirmed cases and deaths are diverging as well. With this analysis, the publibly available data should be used to compare the spread in multiple countries and to check for influences on th mortality ratio.The analysis should answer the following questions:1. How long did it take in China and South Korea to reach the turning point of declining new infections or deaths?2. Are effects of national restrictions visible in the time series?3. Is there a correlation of national key figures (e.g. health care capacity) and mortality ratio?
###Code
import os
import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.dates as mdates
import seaborn as sns
from datetime import datetime, timedelta
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from bokeh.plotting import ColumnDataSource, figure, output_file, show, save
from bokeh.palettes import Viridis
from bokeh.io import output_notebook
sns.set()
output_notebook()
###Output
_____no_output_____
###Markdown
Data Understanding
###Code
# Load all the data sets
# Covid-19 time series
source_covid_data = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'
covid_conf = pd.read_csv(source_covid_data + 'time_series_covid19_confirmed_global.csv')
covid_deaths = pd.read_csv(source_covid_data + 'time_series_covid19_deaths_global.csv')
# Country data
country_info = pd.read_csv(os.path.join('.', 'data', 'covid19countryinfo.csv'))
# Restrictions
restrictions = pd.read_csv(os.path.join('.', 'data', 'restrictions.csv'),sep=';')
restrictions['date'] = pd.to_datetime(restrictions['date'], format='%d.%m.%Y')
# List all present countries
#for c in covid_conf.columns:
# print(c)
covid_conf.head()
covid_conf['Country/Region'].value_counts()
country_info.head()
country_info['country'].value_counts()
country_info.isnull().sum()
restrictions.head()
###Output
_____no_output_____
###Markdown
In the Covid-19 time series, each row contains a region or country, with the columns listing day values. The values doesn't contain Nan values, if no case or death was confirmed, the value is zero.For some countries, the data is provided on regional level, as seen with the value_counts() query.The meta data for each country is also partly given on regional level, but does not match with the time series data. Therefore the common level of detail is national. In the data preparation step, the regional data has to be combined to national.The country_info datasets contains many Nan values. Cleaning of those values will be carried out after combining to national data. Data PreparationClean time series data
###Code
# transform data (date as index, country as column)
def transform_covid_data(df):
"""
Description: This function transforms the Covid-19 dataset in the following steps:
- removes unneccessary columns
- removes breakdown to regions, only keeps values on country level (returns sum over national regions)
- countries are moved to columns
- date is moved to index as datetime format
Arguments:
df: pandas DataFrame directly loaded from John Hopkins University repository (Covid-19 time series)
Returns:
transformed pandas DataFrame
"""
df.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df = df.groupby('Country/Region').sum()
df = df.transpose()
df.index = [datetime.strptime(d, '%m/%d/%y') for d in df.index]
return df
# transform time series data onto national level (only keep time series for each coutry)
covid_conf = transform_covid_data(covid_conf)
covid_deaths = transform_covid_data(covid_deaths)
###Output
_____no_output_____
###Markdown
Clean country information
###Code
# drop columns unused for correlation analysis:
# - all columns used with sex ratio (not relevant for question)
# - all restriction columns (are replaced by self researched restrictions)
# - columns about virus tests (present for too few countries)
country_info.drop(columns=['alpha3code','alpha2code','tests','testpop',
'quarantine','schools','publicplace','gatheringlimit',
'gathering','nonessential','sex0','sex14','sex25',
'sex54','sex64','sex65plus','sexratio'],
inplace=True)
# Convert numeral columns from string to float
def convert_string(x):
"""
Description: This function converts a string with ',' as thousand seperator into a float.
Arguments: x: string
Returns: float
"""
try:
return np.float(x.replace(',',''))
except:
return np.nan
for c in ['pop', 'gdp2019', 'healthexp']:
country_info[c] = country_info[c].apply(convert_string)
# reduce to national level (only US and China is split up into regions)
# only keep mainland China, data is filled for this row, not for regions
country_info = country_info.loc[~country_info.region.isin(['Hong Kong', 'Wuhan', 'Hubei'])]
# federal states of US don't contain any data, keep only US-row
country_info = country_info.loc[~((country_info.country=='US') & (~country_info.region.isnull()))]
# check for nan values of relevant countries (more than 50 deaths, mortality can be calculated)
relevant_countries = [c for c in covid_deaths.columns if covid_deaths.iloc[-1][c]>=50]
print(relevant_countries)
country_info.loc[country_info.country.isin(relevant_countries)].isnull().sum()
###Output
_____no_output_____
###Markdown
No nan values are present for the countries, where the mortality can be calculated. Therefore it's not neccessary to drop or impute data. Question 1: How long did it take in China and South Korea to reach the turning point of declining new infections or deaths?
###Code
def get_time_series(df, country, min_value):
"""
Description: This function returns the time series of a specific country as DataSeries.
The time series starts where min_value is reached. The index is the days since this value was reached.
Arguments:
df: pandas DataFrame containing Covid-19 time series on country level (output from function transform_covid_data)
country: string with country name
min_value: float, time series will be reduced to where country value is >= min_value
Returns:
pandas DataSeries with index as days, since min_value was reached
"""
s = df.loc[df[country]>=min_value, country]
s.index = np.array([datetime.timestamp(x) for x in s.index])/(3600*24)
s.index -= s.index[0]
return s
def plot_series(ax, s, xlabel, linelabel):
"""
Description: This function plots a time series and its gradient on a matplotlib Axis.
The series and gradient are plotted on two seperate Y-axes.
Arguments:
ax: matplotlib Axis, on which the series should be plotted
s: pandas DataSeries, to be plotted
xlabel: string, label on the x-axis
linelabel: string, label used in the legend
Returns:
None
"""
# display total values
color = cm.viridis(100)
ax.plot(s, color=color, label=linelabel+' (total)')
ax.tick_params(axis='y', labelcolor=color)
ax.set_xlabel(xlabel)
# display daily gradient on second y-axis
ax2 = ax.twinx()
color = cm.viridis(150)
ax2.plot(s.index, np.gradient(s, s.index), color=color, label=linelabel+' per day')
ax2.tick_params(axis='y', labelcolor=color)
# add legend
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
ax2.grid(None)
# Country to be plotted
country = 'Korea, South'
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(10,4.5))
# plot confirmed Covid-19 cases
s = get_time_series(covid_conf, country, 100)
plot_series(ax[0], s, xlabel='Days since 100 cases', linelabel='Cases')
# plot confirmed Covid-19 deaths
s = get_time_series(covid_deaths, country, 10)
plot_series(ax[1], s, xlabel='Days since 10 deaths', linelabel='Deaths')
# adjust figure
plt.suptitle(country)
fig.tight_layout()
plt.subplots_adjust(top=0.92)
###Output
_____no_output_____
###Markdown
Both China and South Korea showed success in reducing the number of new infections. From the time series, the duration until reaching the turning of declining new infections is visible. I will here count the days since 100 confirmed infections and 10 confirmed deaths, respectively.As the diagrams above are showing, China took around 10 days to reach the peak of new infections and 25 days to reach the peak of new deaths. The peak of new cases at 20 days is a result in updated counting procedure. In South Korea the number of new infections started to decline after 11 days, while deaths keep inclining (as of writing the article on 30.03.2020). Question 2: Are effects of national restrictions visible in the time series?South Korea managed to keep the number of new infections on a very low level. I will therefore compare the measures taken by South Korea with Italy and Germany, as two major European countries.
###Code
def add_annotations(ax, df, s):
"""
Description: This function adds annotation to a plot of a time series.
On the diagram area, the index of the restrictions is added, pointing to the time series on that specific date.
Beside the diagram the description of the restriction is visualized as text.
Arguments:
ax: matplotlib Axis, on which the annotation should be added
df: pandas DataFrame, listing the national restrictions in two columns:
- date (datetime, date of restriction)
- text (string, Description of the restriction)
s: pandas DataSeries, containing
Returns:
None
"""
last_y = 0
df.reset_index(drop=True, inplace=True)
for i, row in df.iterrows():
y = s.iloc[s.index.get_loc(row.date, method='nearest')]
x_text = row.date - timedelta(days=10)
y_text = y + s.max()/10
y_text = max(y_text, last_y+s.max()/12)
last_y = y_text
ann = ax.annotate(str(i+1),
xy=(row.date, y), xycoords='data',
xytext=(x_text, y_text), textcoords='data',
size=15, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=-0.2",
fc="k", color='k'),
)
plt.text(1.02, 0.92-i*0.06, '{:d}: {}'.format(i+1,row.text), horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes,
fontsize=11)
plt.text(1.02, 1, 'Restrictions / Actions:', horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes,
fontsize=13, fontweight='bold')
# Country to be plotted
country = 'Italy' # Restrictions were identified for Italy, Germany, South Korea
fig, ax = plt.subplots(figsize=(9,4))
s = covid_conf[country]
plt.plot(s)
# format axes
ax.set_xlim((s.idxmin(),s.idxmax()+timedelta(days=5)))
myFmt = mdates.DateFormatter('%m-%d')
ax.xaxis.set_major_formatter(myFmt)
ax.set_ylabel('Confirmed cases (total)')
# format figure
plt.suptitle(country)
fig.tight_layout()
plt.subplots_adjust(right=0.6, top=0.93)
# Add restrictions as annotations
add_annotations(ax, restrictions.loc[restrictions.country_region==country], s)
###Output
_____no_output_____
###Markdown
Comparing the reactions, it is clearly visible, that South Korea took early measures like closing schools and universities, when only few cases were present. An early reaction of the public is also reported, as residents of Daegu avoided public places as from February 18th on. The early and comparably soft measures resulted in quickly declining new infections.European countries like Germany and Italy showed later, increasingly stricter reactions. Even after more than 10 days since their shut-downs, no declining of new cases is visible. Question 3: Is there a correlation of national key figures (e.g. health care capacity) and mortality ratio?So far, the disease caused by the corona virus shows a highly diverging mortality ratio of deaths per confirmed cases. I will therefore investigate how key figures of the national health care systems are correlated to the mortality.
###Code
# ---------- Question 3: Correlation with death/cases ratio ------------
ratio = defaultdict(list)
country_info['death_ratio'] = np.nan
for c in covid_conf.columns:
df = pd.concat([pd.Series(covid_conf[c], name='Cases'), pd.Series(covid_deaths[c], name='Deaths')],axis=1)
# Keep only countries with relevant death count
df = df.loc[df.Deaths>50]
if len(df) == 0:
continue
death_ratio = pd.Series(df.Deaths / df.Cases, name=c)
country_info.loc[country_info.country==c,'death_ratio'] = death_ratio.iloc[-1]
ratio['date'].append(death_ratio.index)
ratio['death_ratio'].append(np.array(death_ratio))
ratio['country'].append(c)
# add line color
for i in range(len(ratio['country'])):
ratio['color'].append(Viridis[256][int(i/len(ratio['country'])*256)])
# clean dataframe
country_info.dropna(subset=['death_ratio', 'healthperpop'], inplace=True)
# drop very small countries
country_info = country_info.loc[country_info['pop']>1E6]
source = ColumnDataSource(ratio)
TOOLTIPS = [("country", "@country")]
p = figure(plot_width=600, plot_height=400, tooltips=TOOLTIPS,
title="Covid-19 death ratio over time", x_axis_type='datetime')
p.multi_line(xs='date', ys='death_ratio',
line_width=5, line_color='color', line_alpha=0.6,
hover_line_color='color', hover_line_alpha=1.0,
source=source)
p.xaxis.axis_label = "Date"
p.yaxis.axis_label = "Covid-19 deaths / confirmed cases"
show(p)
###Output
_____no_output_____
###Markdown
The mortality ratio is not constant over time, as the diagram above shows. In almost all countries, the ratio is increasing. This might be caused by overload of the health care systems or the testing capacities. Only future analysis will show if the values are converging to one global constant.
###Code
correlation_columns = ['pop', 'density', 'medianage', 'urbanpop',
'hospibed', 'smokers', 'lung', 'femalelung', 'malelung', 'gdp2019',
'healthexp', 'healthperpop']
correlation = [country_info['death_ratio'].corr(country_info[c]) for c in correlation_columns]
fig, ax = plt.subplots(figsize=(7,4))
plt.bar(range(len(correlation)), correlation)
plt.xticks(range(len(correlation)), correlation_columns, rotation=90)
plt.ylabel('Correlation with mortality')
fig.tight_layout()
###Output
_____no_output_____
###Markdown
The correlation analysis shows slight correlation of the mortality with the number of hospital beds (hospibed), the percentage of smokers (smokers) and the health care expenses (healthexp and healthperpop). The following diagrams visualize the influnce of the health care capacity in more detail.
###Code
source = ColumnDataSource(country_info)
TOOLTIPS = [("country", "@country"),
("Mortality", "@death_ratio")]
p = figure(plot_width=600, plot_height=400, tooltips=TOOLTIPS,
title="Influence of hospital capacity") #, x_axis_type="log"
p.circle('hospibed', 'death_ratio', size=10, source=source)
p.xaxis.axis_label = "Hospital beds per 1000 people"
p.yaxis.axis_label = "Covid-19 deaths / confirmed cases,\n as on {}".format(datetime.strftime(covid_conf.index[-1], "%Y-%m-%d"))
show(p)
source = ColumnDataSource(country_info)
p = figure(plot_width=600, plot_height=400, tooltips=TOOLTIPS,
title="Influence of health care expenses", x_axis_type="log") #
p.circle('healthperpop', 'death_ratio', size=10, source=source)
p.xaxis.axis_label = "Health care expenses per 1 Mio. people"
p.yaxis.axis_label = "Covid-19 deaths / confirmed cases, as on {}".format(datetime.strftime(covid_conf.index[-1], "%Y-%m-%d"))
show(p)
###Output
_____no_output_____
###Markdown
- By: Harkishan Singh Baniya- Email: [email protected] Reference: Advances in Financial Machine Learning by Dr Marcos Lopez De Prado This notebook is a part of article series **Alternative Bars on Alpaca** . In first part of the article I have explained how to generate *Alternative Bars* i.e. `tick bar`, `volume bar` and `dollar bar` using Alpaca API. In this second part we will explore them and look at some of there statistical properties. The analysis will be performed on historical bars of AAPL (Apple) trades data from *Jan 1st 2018* to *Dec 31st 2019*. The sampling freqency/ thresholds of different bars are as follows.- Tick Bars: 5,000 (ticks)- Volume Bars: 700,000 (volume/qty)- Dollar Bars: 150,000,000 (dollar)- Time Bars: 5 (minute)
###Code
#Imports
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import datetime as dt
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import style
from scipy import stats
from statsmodels.graphics.tsaplots import plot_acf
style.use('ggplot')
%matplotlib inline
#trim the after market data if any
def trim_df(df:pd.DataFrame):
try:
df = df.tz_localize('UTC').tz_convert('US/Eastern')
except TypeError as e:
df = df.tz_convert('US/Eastern')
idx = df.index
c1 = (idx.time < dt.time(9, 30))
c2 = (idx.time > dt.time(16, 0))
df=df[~(c1|c2)]
return df
#read data and store the bars in a dictionary
def read_data(symbol:str):
path = 'sample_datasets/analysis/'
bars = {}
bars['time_bar'] = trim_df(pd.read_csv(path+f'{symbol}_5minute_bars.csv', index_col=[0], parse_dates=True))
bars['tick_bar'] = trim_df(pd.read_csv(path+f'{symbol}_tick_bars.csv', index_col=[0], parse_dates=True))
bars['volume_bar'] = trim_df(pd.read_csv(path+f'{symbol}_volume_bars.csv', index_col=[0], parse_dates=True))
bars['dollar_bar'] = trim_df(pd.read_csv(path+f'{symbol}_dollar_bars.csv', index_col=[0], parse_dates=True))
return bars
AAPL = read_data(symbol='AAPL')
###Output
_____no_output_____
###Markdown
Bar Count
###Code
#Bar Count Analysis and Plots
def show_bar_count(bars:dict, time_group='1D'):
counts = {}
f,ax=plt.subplots(figsize=(16,9))
for bar in bars.keys():
if bar != 'time_bar':
df = bars[bar]
count = df.groupby(pd.Grouper(freq=time_group))['close'].count()
counts[bar] = count
count.plot(ax=ax, ls='-', label=bar, alpha=0.8)
print(f'The bar count for {bar} with time group {time_group} has a mean count of {count.mean()} and a standard deviation of {count.std()}')
ax.legend()
show_bar_count(AAPL)
###Output
The bar count for tick_bar with time group 1D has a mean count of 29.718792866941016 and a standard deviation of 25.044996663634983
The bar count for volume_bar with time group 1D has a mean count of 25.685871056241428 and a standard deviation of 21.890620465954125
The bar count for dollar_bar with time group 1D has a mean count of 23.403292181069958 and a standard deviation of 19.47185317502504
###Markdown
Bars are sample with threholds chossen arbitarily that gives a bar count between 25-30 bars per day. Overall bar counts are most stable for dollar bars since it has the least deviation from the mean count, while tick bars has a high deviation. Comparing with Time Bars Sampling
###Code
def plot_bars(bars:dict, date:str='2019-08-07'):
time_bar = bars['time_bar'].close.loc[date].tz_convert('UTC')
tick_bar = bars['tick_bar'].close.loc[date]
volume_bar = bars['volume_bar'].close.loc[date]
dollar_bar = bars['dollar_bar'].close.loc[date]
fig, ax = plt.subplots(figsize=(18,12))
no_lable = False
for timestamp in time_bar.index:
if not no_lable:
plt.axvline(x=timestamp, label='time bar', color='blue', linestyle='--', linewidth=0.7)
no_lable=True
else:
plt.axvline(x=timestamp, color='blue', linestyle='--', linewidth=0.7)
tick_bar.plot(ax=ax, label='tick bar', ls='', marker='D', color='yellow', alpha=0.5)
volume_bar.plot(ax=ax, label='volume bar', ls='', marker='o', color='purple', alpha=0.5)
dollar_bar.plot(ax=ax, label='dollar bar', ls='', marker='*', color='red', alpha=0.5)
ax.legend()
plt.title(f'Bar plots for {date}')
plot_bars(AAPL)
###Output
_____no_output_____
###Markdown
I have randomly choosen a date from the sample and ploted the alternative bars over the time bar as a reference. We can see some clustering at the start and end of the market hours this was expected as more orders are executed during this periods as a result more information is available. But time bar have note captured it due to its constant sampling frequency.
###Code
#Statistical Tests
def get_statistics(bars:dict):
res = []
for bar in bars.keys():
ret = bars[bar].close.pct_change()[1:]
jb = stats.jarque_bera(ret)[0]
kurt = stats.kurtosis(ret)
skew = stats.skew(ret)
mean = ret.mean()
std = ret.std()
res.append([mean, std, skew, kurt, jb])
return pd.DataFrame(res, index=bars.keys(),
columns=['mean', 'std', 'skew', 'kurtosis','jarque-bera stats'])
get_statistics(AAPL)
###Output
_____no_output_____
###Markdown
Here we see some important statistics for different bars returns. The dollar bar has the best statistics among all, especially has the lowest Jarque Bera stats and kurtosis. Also, the time bars has least attractive stats among all.
###Code
##ACF Plots
def plot_bar_acf(bars:dict, lags:int=120):
fig, axes = plt.subplots(2, 2, figsize=(20,15))
loc = [(0,0), (0,1), (1,0), (1,1)]
for i, bar in enumerate(bars.keys()):
ret = bars[bar].close.pct_change()[1:]
plot_acf(ret, lags=lags, zero=False, ax=axes[loc[i][0],loc[i][1]], title=f'{bar} Auto Correlation with {lags} lag')
plot_bar_acf(AAPL)
##Serial Correlations/ Auto-Correlations
def get_auto_corr(bars:dict):
for bar in bars.keys():
ret = bars[bar].close.pct_change()[1:]
auto_corr = ret.autocorr(lag=1)
print(f'Auto-correlations for {bar} with lag=1 is {auto_corr} ')
get_auto_corr(AAPL)
###Output
Auto-correlations for time_bar with lag=1 is -0.01144566799717028
Auto-correlations for tick_bar with lag=1 is -0.028345363282703682
Auto-correlations for volume_bar with lag=1 is -0.027059486204423024
Auto-correlations for dollar_bar with lag=1 is -0.02654303523363807
###Markdown
There is no auto-correlation in any of the given bars.
###Code
#Distribution Plot
def plot_return_distributions(bars:dict):
f,ax=plt.subplots(figsize=(14,10))
for bar in bars.keys():
ret = bars[bar].close.pct_change()[1:]
#normalize the returns
norm_ret = (ret - ret.mean()) / ret.std()
sns.kdeplot(norm_ret, label=bar)
sns.kdeplot(np.random.normal(size=100000), label="Normal", color='black', linestyle="--")
plt.title('Bar Returns KDE Plots')
plt.xticks(range(-5, 6))
plt.legend(loc=8, ncol=5)
plt.xlim(-5, 5)
plt.show()
plot_return_distributions(AAPL)
###Output
_____no_output_____
###Markdown
Como o número de épocas se relaciona com o learning_rate- complexidade do modelo - learning rate- época+complexidade -> mais épocas, learning rate menor (supondo que não ha minimos locais) -> menor épocas, learning rate maior **Explorando**
###Code
# a complexidade do modelo é definida pelo número de parâmetros
def model_complexity(x):
lista = eval(x)
lista.insert(0, 16)
return sum(i*j for i, j in zip(lista, lista[1:]))
df = df.assign(complexity = df.hidden_layers.apply(model_complexity))
df.hidden_layers = df.hidden_layers.apply(eval)
df = df.assign(min_neuronio_layer = df.hidden_layers.apply(min),
complexity_tier = pd.cut(df.complexity, 3).cat.codes.map({0:"baixo", 1:"médio", 2:"alto"}))
plot_df = df.rename(columns={"accuracy":"Acurácia",
"complexity_tier": "Rank de Complexidade",
"learning_rate": "Taxa de Aprendizagem",
"num_epochs":"Número de épocas",
"min_neuronio_layer":"Menor Camada"})
g = sns.FacetGrid(plot_df, row="Rank de Complexidade", hue="Número de épocas", height=4, aspect=2,palette="bright")
g.map(sns.stripplot,
"Taxa de Aprendizagem",
"Acurácia",
# alpha=0.4,
edgecolor="black",
linewidth=0.8,
jitter=0.15,
order=[0.001, 0.01, 0.1, 0.15])
g.map(plt.axhline, y=0.8, ls='--', c='gray')
g.map(plt.axhline, y=0.4, ls='--', c='gray')
g.map(plt.axhline, y=0.2, ls='--', c='gray')
g.map(plt.axhspan, ymin=.8, ymax=1, color='lightgray')
g.map(plt.axhspan, ymin=.4, ymax=.8, color='beige')
g.map(plt.axhspan, ymin=.2, ymax=.4, color='peachpuff')
g.add_legend()
sns.move_legend(g, "lower center", bbox_to_anchor=(.45, 1.01), ncol=4, title="Número de Épocas", frameon=False)
for lh in g._legend.legendHandles:
lh._sizes = [100]
plt.savefig("figures/analysis.eps", format="eps", bbox_inches='tight')
###Output
_____no_output_____
###Markdown
**Análise Imagem**
###Code
metrics = [np.mean, np.std, min, max]
plot_df.groupby("Menor Camada").agg({"Acurácia":metrics})
regiao3 = plot_df.query("0.2 < Acurácia & Acurácia < 0.4")
camadas3 = regiao3["Menor Camada"].value_counts().sort_index()
camadas3.name = "Região 3"
regiao2 = plot_df.query("0.4 < Acurácia & Acurácia < 0.8")
camadas2 = regiao2["Menor Camada"].value_counts().sort_index()
camadas2.name = "Região 2"
regiao1 = plot_df.query("Acurácia > 0.8")
regiao1["Menor Camada"].value_counts().sort_index()
camadas1 = regiao1["Menor Camada"].value_counts().sort_index()
camadas1.name = "Região 1"
camadas = pd.concat([camadas1, camadas2, camadas3], axis=1)
camadas.round(3)
###Output
_____no_output_____
###Markdown
Agent-Based Traffic Model BackgroundThis model is a looped implementation of the cellular automata (CA) described by Nagel and Schreckenberg (NaSch).The NaSch CA model splits agent (vehicle) actions into four stages:1. Acceleration2. Braking3. Randomisation4. Vehicle MovementIn this implementation the 4th action is separated from the other actions to simulate simultaneous activation of the agents.This isn't strictly necessary for non-multithreaded processes but ensures that vehicle positions wouldn't cause conflicts if it were multithreaded. ImplementationThe model is written in Python using the Mesa ABM framework which allows for easy visualisation.This is a demonstration of running a Mesa model in an IPython Notebook which is an alternative to running it using javascript visualisation in a webpage.The actual model and agent code are implemented in model.py, in the same directory as this notebook.Below, we will import the model class, instantiate it, run it, and plot the average speed of the agents.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = [10, 6]
plt.rcParams['figure.dpi'] = 100
from model import NaSchTraffic
###Output
_____no_output_____
###Markdown
Now we instantiate a model instance: a 1x30 grid, with a 20% chance of an agent being placed in each cell, and a max vehicle speed of 4.
###Code
model = NaSchTraffic(1, 60, 5, 4, seed=1)
###Output
_____no_output_____
###Markdown
We want to run the model until it's settles, but it's hard to tell when that is so let's just run it for 100 steps:
###Code
while model.running and model.schedule.steps < 100:
model.step()
print(model.schedule.steps) # Show how many steps have actually run
###Output
100
###Markdown
The model has a DataCollector object, which checks and stores the average speed of the agents at every step.It also collects the individual speed and position of each agent at each step.It can also generate a pandas DataFrame of the data it has collected.
###Code
model_out = model.datacollector.get_model_vars_dataframe()
###Output
_____no_output_____
###Markdown
The dataframe for the model:
###Code
model_out.head()
###Output
_____no_output_____
###Markdown
Finally, we can plot the 'AverageSpeed' series:
###Code
plt.plot(model_out.AverageSpeed)
plt.xlabel('Step Number')
plt.ylabel('Average Speed')
plt.show()
###Output
_____no_output_____
###Markdown
For testing purposes, here is the dataframe for the agents giving each agent's x position and speed at each step.
###Code
agent_out = model.datacollector.get_agent_vars_dataframe()
agent_out.head()
###Output
_____no_output_____
###Markdown
Effect of speed limit and traffic vehicle_quantity on traffic average speedNow, we can do a parameter sweep to see how speed changes against number of vehicles and the max speed.First we make a new function to collect the average speed during the second half of the simulation.
###Code
from mesa.batchrunner import BatchRunner
import itertools
def get_averages(model):
"""
Find the average speed of all the agents over the last 30 steps.
"""
total_averages = 0
list_length = 0
selected_averages = itertools.islice(model.averages, 60)
for average_speed in selected_averages:
total_averages += average_speed
list_length+=1
return total_averages / list_length
model_reporters={"AverageSpeed": get_averages}
###Output
_____no_output_____
###Markdown
Now, we set up the batch run, with a dictionary of fixed and changing parameters.Let's vary the maximum speed, and the number of vehicles.
###Code
fixed_params = {"height": 1, "width": 60}
variable_parms = {"general_max_speed": range(1, 6), "vehicle_quantity": range(1, 20+1)}
###Output
_____no_output_____
###Markdown
Then we create a batch runner object to conduct the parameter sweep.The number of iterations is the number of runs it does of the whole parameter space.
###Code
param_sweep = BatchRunner(NaSchTraffic,
variable_parameters=variable_parms, fixed_parameters=fixed_params,
iterations=10,
max_steps=120,
model_reporters=model_reporters)
###Output
_____no_output_____
###Markdown
Then we run the parameter sweep (this can take a few minutes).
###Code
param_sweep.run_all()
###Output
1000it [00:21, 45.57it/s]
###Markdown
Now we create the dataframe for the data collected like we did for the single model run.
###Code
df = param_sweep.get_model_vars_dataframe()
df.head()
###Output
_____no_output_____
###Markdown
A scatter plot can be used to show how the parameters affect each other.We have varied more than one parameter, so we should try to visualise the interactions.One way of achieving this is with coloured data points:
###Code
plt.scatter(df.AverageSpeed, df.general_max_speed, c=df.vehicle_quantity, cmap=plt.cm.coolwarm)
plt.xlabel('Average Speed')
plt.ylabel('Max Speed')
bar = plt.colorbar()
bar.set_label('Number of Vehicles')
plt.grid(True)
###Output
_____no_output_____
###Markdown
If coloured data points aren't showing the trends clearly enough another option is a 3D scatter plot:
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# fig.tight_layout(pad=4)
ax = Axes3D(fig)
ax.scatter(df.vehicle_quantity, df.general_max_speed, df.AverageSpeed, c=df.vehicle_quantity, cmap=plt.cm.coolwarm)
ax.set_zlabel('Average Speed')
plt.xlabel('Number of Vehicles')
plt.ylabel('Max Speed')
plt.show()
###Output
_____no_output_____
###Markdown
Bikers on the Fremont bridgeExample adapted from the [Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html) Set up: Download (and load) data
###Code
# Download data(you can download it by uncommenting and runing this line of code)
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler # scaling data
from sklearn.model_selection import train_test_split # splitting data
from sklearn.neighbors import KNeighborsRegressor # regressor
from sklearn.model_selection import GridSearchCV # for grid search
from sklearn.pipeline import make_pipeline # for making pipelines
%matplotlib inline
# Aggregate data to the daily level
counts = pd.read_csv('data/FremontBridge.csv', index_col='Date', parse_dates=True)
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # remove other columns
###Output
_____no_output_____
###Markdown
Data Prep: Adding Features
###Code
# Load weather data (downloaded from: https://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND)
weather = pd.read_csv('data/weather.csv', index_col='DATE', parse_dates=True)
# Create dry_day column
weather['dry_day'] = (weather['PRCP'] == 0).astype(int)
# Join selected weather columns
daily = daily.join(weather[['PRCP', 'dry_day', 'TMIN', 'TMAX']])
# Compute hours of daylight
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index))
daily[['daylight_hrs']].plot()
plt.ylim(8, 17)
###Output
_____no_output_____
###Markdown
Feature Generation: Categorical Variable(s)
###Code
# Get dummy variables from categorical columns (alternative: sklearn OneHotEncoding)
###Output
_____no_output_____
###Markdown
Abbreviated EDA
###Code
# What is the relationship between bikers and temperature?
# What is the relationship between bikers and date?
# What is the relationship between bikers and (min) temperature?
# What is the distribution of bikers on dry/wet days?
# How does the number of bikers vary by temperature and wet/dry?
###Output
_____no_output_____
###Markdown
Modeling: KNN Regressor
###Code
# Split data into training and testing data
# Create a scaler and your classifier
# Define a pipeline that uses your scaler and classifier
# Define a grid to search through
# Perform a grid search of your pipeline
# Compare prediction to (test) data
###Output
_____no_output_____
###Markdown
Feature Generation: Polynomial Transformations
###Code
# Add a polynomial transformation to the pipeline
# Define a pipeline that includes the polynomial transformation
# Define a grid to search through (including the degree of polynomial)
# Perform a grid search of your pipeline
# Visualize time trends
###Output
_____no_output_____
###Markdown
Error assessment: find systematic errors
###Code
# Why are we getting this wrong?
# Assess error by day of the week
# Assess error by temperature and dry_day
# Assess error by precipitation
###Output
_____no_output_____
###Markdown
Feature Selection: Select best featuresAs a form of dimensionality reduction, only select the top percentile features that have a certain threshold of variance.
###Code
# Create a percentile selector, add it to the pipeline
# (alternatives a K selectors, PCA, or others)
# Define a grid to search through (including the degree of polynomial AND percentile of best features)
# Fit the model
###Output
_____no_output_____
###Markdown
Classification of hazard in coal mines based on seismic data Load dependencies
###Code
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import arff
import pandas as pd
import seaborn as sns;
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import roc_auc_score, f1_score
from sklearn import preprocessing
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load and clean data
###Code
## load data and clean
data = arff.loadarff('./data/seismic-bumps.arff')
df = pd.DataFrame(data[0])
df['seismic'] = df['seismic'].str.decode('utf-8')
df['seismoacoustic'] = df['seismoacoustic'].str.decode('utf-8')
df['shift'] = df['shift'].str.decode('utf-8')
df['ghazard'] = df['ghazard'].str.decode('utf-8')
df['class'] = df['class'].str.decode('utf-8')
df['class'] = pd.to_numeric(df['class'])
df.head()
###Output
_____no_output_____
###Markdown
EDA
###Code
## EDA
df1 = df[['genergy', 'gpuls', 'gdenergy', 'gdpuls',
'nbumps', 'nbumps2',
'energy', 'maxenergy']].copy()
g = sns.pairplot(df1)
###Output
_____no_output_____
###Markdown
The plots above show some colinearity between attributes (e.g. `genergy` and `gpuls`, `energy` and `maxenergy`). The following will use regularization to mitigate the problem. Build models
###Code
df_x = df.loc[:,['shift', 'genergy', 'gpuls', 'gdenergy', 'gdpuls',
'nbumps', 'nbumps2', 'nbumps3', 'nbumps4', 'nbumps5',
'nbumps6', 'nbumps7', 'nbumps89',
'energy', 'maxenergy']]
# true response
df_y = df.loc[:,['class']]
# responses from seismic theories
df_y1 = df.loc[:, ['seismic']]
df_y2 = df.loc[:, ['seismoacoustic']]
df_y3 = df.loc[:, ['ghazard']]
le = preprocessing.LabelEncoder()
le.fit(['a', 'b', 'c', 'd'])
df_y1['seismic'] = le.transform(df_y1['seismic'])
df_y2['seismoacoustic'] = le.transform(df_y2['seismoacoustic'])
df_y3['ghazard'] = le.transform(df_y3['ghazard'])
le2 = preprocessing.LabelEncoder()
le2.fit(['W', 'N'])
df_x['shift'] = le2.transform(df_x['shift'])
Xtrain, Xtest, ytrain, ytest = train_test_split(df_x, df_y, test_size=0.2, random_state=42)
print("Xtrain shape: ", Xtrain.shape)
print("Xtest shape: ", Xtest.shape)
## find the best regularization coefficient
## use ROC as the score
C = [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 1e2]
scores = []
for c in C:
logit = LogisticRegression(penalty='l1', C=c, max_iter=500)
logit.fit(Xtrain, ytrain.values.ravel())
scores.append(roc_auc_score(ytrain['class'].values, logit.predict(Xtrain)))
C_best = C[scores.index(max(scores))]
print("Best C: ", C_best)
clf = LogisticRegression(penalty='l1', C=C_best, max_iter = 500)
clf.fit(Xtrain, ytrain.values.ravel())
roc_train = roc_auc_score(ytrain['class'].values, clf.predict(Xtrain))
# print("training score: %.4f" % clf.score(Xtrain, ytrain))
print("training score: %.4f" % roc_train)
# print("test score: ", clf.score(Xtest, ytest))
roc_test = roc_auc_score(ytest['class'].values, clf.predict(Xtest))
print("test score: %.4f" % roc_test)
print("n_iter: ", clf.n_iter_)
clf.coef_
ind = ytest.index.values
# get the responses from the seismic, seismoacoustic and ghazard methods
# that correspond to indices in ytest
yseismic = df_y1.loc[ind, ['seismic']]
yseismoacoustic = df_y2.loc[ind, ['seismoacoustic']]
yghazard = df_y3.loc[ind, ['ghazard']]
# responses as probabilies from the logit model
yprob = clf.predict_proba(Xtest)
ypred = yprob[:,1] > 0.2 # threshold
###Output
_____no_output_____
###Markdown
From the plot below, to use the probabilites from the prediction, we need to set a threshold to determine if the response should be hazardous or not. The hard labels from the prediction will be mostly 0's._Note:_ setting the threshold requires further study. One way is to tune the threshold in training sets and test the performance in test sets.
###Code
plt.plot([i for i in range(len(ytest))], ytest, 'x', yprob[:,1], '.')
plt.ylabel('Probability')
plt.title('Raw results from prediction')
plt.plot([i for i in range(len(ytest))], ytest, 'o', ypred, '.')
plt.ylabel('Probability')
plt.title('Probabilities after cut-off')
###Output
_____no_output_____
###Markdown
Results
###Code
dy = { 'logit': pd.Series(ypred) }
dfy = pd.DataFrame(dy)
frames = [dfy, yseismic.reset_index(drop=True),
yseismoacoustic.reset_index(drop=True),
yghazard.reset_index(drop=True)]
# build the responses data frame (each column is responses from one method)
df_result = pd.concat(frames, axis = 1)
df_result = df_result*1 # convert bool to int
df_result.head()
yvote = (df_result == 0).sum(axis=1) # number of zeros on each row
yvote = (yvote <= 2)*1
# final results based on the vote from each of the four methods
# 0 means no/low hazard, 1 means hazardous
# if tie, assume response is 1 (hazardous)
df_result['ensemble'] = yvote.values
df_result['true'] = ytest.values
df_result.head(20)
# score from the ensemble method with logit regression
roc_auc_score(ytest['class'].values, df_result['ensemble'].values)
## compare to the three methods already in the dataset
frames = [yseismic.reset_index(drop=True),
yseismoacoustic.reset_index(drop=True),
yghazard.reset_index(drop=True)]
df_result0 = pd.concat(frames, axis = 1)
df_result0 = df_result0*1
yvote0 = (df_result0 == 0).sum(axis=1)
yvote0 = (yvote0 <= 2)*1
df_result0['ensemble'] = yvote0.values
df_result0['true'] = ytest.values
df_result0.head(20)
# score from the ensemble of the three methods in the original dataset
roc_auc_score(ytest['class'].values, df_result0['ensemble'].values)
# score from the seismic method (no ensemble)
roc_auc_score(ytest['class'].values, yseismic['seismic'].values)
# score from the seismoacoustic method (no ensemble)
roc_auc_score(ytest['class'].values, yseismoacoustic['seismoacoustic'].values)
###Output
_____no_output_____
###Markdown
Single run
###Code
# setup parameters
time_step = 0.4
n_steps = 1000
model = src.model.Model(
length = 1000,
n_lanes = 2,
density = 30, # cars per 1km lane
fraction_autonomous = 0, # autonomous vehicles have p_slowdown = 0 and mean values, no error in speed estimation
max_speed_mu = 120,
min_spacing = 2,
min_distance_mu = 2,
min_distance_min = 1,
min_distance_max = 3,
car_acc = 3.333, # m/s^2
car_dec = 5, # m/s^2
p_slowdown = 3, # frequency (per hour) of slowing down randomly
bias_right_lane = 1,
time_step = time_step,
seed = None,
verbose = 3
)
# run simulation for `n_steps`
def run():
for i in range(n_steps):
model.step()
%time run()
# plot the density over time
df = model.data.get_model_vars_dataframe()
plt.plot(df.index * time_step, df.Flow, "o")
plt.xlabel("Time (s)")
plt.ylabel("Flow $k$")
###Output
_____no_output_____
###Markdown
Batch run Note that the `model_reporters` and `agent_reporters` of `BatchRunner` (unlike the `DataCollector`) won’t collect the data every step of the model, but only at the end of each run. Because of this the following function is used to extract the relevant data from the models datacollector.
###Code
def get_density(model, initialisation_steps=0):
"""Extract density from model datacollector.
Parameters
----------
model
initialisation_steps -- number of initial steps to exclude from the mean.
"""
# time-evolution of density
densities = model.data.get_model_vars_dataframe().Density
# return the mean
return densities[initialisation_steps:].mean()
def get_flow(model, initialisation_steps=0, flow_per=10):
"""Extract flow from model datacollector.
Parameters
----------
model
initialisation_steps -- number of initial steps to exclude from the mean.
flow_per -- return the flow per this number of time_steps.
"""
# time-evolution of flow
flows = model.data.get_model_vars_dataframe().Flow
# return the mean
return flows[initialisation_steps:].mean() * flow_per
# setup parameters
n_lanes = [2, 3, 4]
density = np.linspace(10, 35, 20).astype(int)
#fraction_autonomous = np.linspace(0.10, 1, 20)
n_steps = 500 # for analysis = 500
initialisation_steps = 100 # for analysis = 100
iterations = 2 # for analysis = 10
fixed_params = {
"length": 1000,
"fraction_autonomous": 0, # autonomous vehicles have p_slowdown = 0 and mean values, no error in speed estimation
"max_speed_mu": 120,
"min_spacing": 2,
"min_distance_mu": 2,
"min_distance_min": 1,
"min_distance_max": 3,
"car_acc": 3.333, # m/s^2
"car_dec": 5, # m/s^2
"p_slowdown": 3, # frequency (per hour) of slowing down randomly
"bias_right_lane": 1,
"time_step": 0.1,
"seed" : None,
"verbose": 3
}
variable_params = {
"n_lanes": n_lanes,
"density": density,
#"fraction_autonomous": fraction_autonomous
}
# create and run `BatchRunner`
batch_run = BatchRunner(src.model.Model,
fixed_parameters=fixed_params,
variable_parameters=variable_params,
iterations=iterations,
max_steps=n_steps,
model_reporters={
"flow": lambda x: get_flow(x, initialisation_steps)
},
agent_reporters={},
display_progress=True)
print("Total iterations: ", np.product([len(var) for var in batch_run.variable_parameters.values()]) * batch_run.iterations)
sys.stdout.flush()
batch_run.run_all()
# get the dataframe and select the relevant columns
df = batch_run.get_model_vars_dataframe()
df = df[["length", "n_lanes", "density", "fraction_autonomous", "flow"]]
df.head()
df
# plot flow rate versus vehicle density
fig, ax = plt.subplots(1, 1)
for n_lane in n_lanes:
data = df[df.n_lanes == n_lane]
ax.plot(data.density[::2], data.groupby(["n_lanes","density"])["flow"].mean(), label="{} lanes".format(n_lane))
ax.set_xlabel("Density $k$")
ax.set_ylabel("Flow $q$")
ax.legend()
###Output
_____no_output_____
###Markdown
Sensitivity Analysis OFAT
###Code
%matplotlib inline
from SALib.sample import saltelli
from src.model import Model
from src.car import Car
from mesa.batchrunner import BatchRunner
from SALib.analyze import sobol
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
# Set the repetitions, the amount of steps, and the amount of distinct values per variable
max_steps = 20 # for analysis = 500
initialisation_steps = 0 # for analysis = 100
distinct_samples = 5 # for analysis = 20
replicates = 10
# We define our variables and bounds
problem = {
'num_vars': 5,
'names': ['p_slowdown', 'n_lanes', 'density', 'fraction_autonomous', "max_speed_mu"],
'bounds': [[1, 6], [2, 4], [10, 35], [0, 1], [100, 130]]
}
fixed_params = {
"length": 1000,
"n_lanes": 2,
"density": 15, # cars per 1km lane
"fraction_autonomous": 0, # autonomous vehicles have p_slowdown = 0 and mean values, no error in speed estimation
"max_speed_mu": 120,
"min_spacing": 2,
"min_distance_mu": 2,
"min_distance_min": 1,
"min_distance_max": 3,
"car_acc": 3.333, # m/s^2
"car_dec": 5, # m/s^2
"p_slowdown": 3, # frequency (per hour) of slowing down randomly
"bias_right_lane": 1,
"time_step": 0.4,
"seed" : None,
"verbose": 3
}
# Set the outputs
model_reporters = {"flow": lambda x: get_flow(x, initialisation_steps)}
data = {}
def make_var_param(params, var_name):
new_params = params
del new_params[var_name]
return new_params
for i, var in enumerate(problem['names']):
# Get the bounds for this variable and get <distinct_samples> samples within this space (uniform)
samples = np.linspace(*problem['bounds'][i], num=distinct_samples)
# Keep in mind that wolf_gain_from_food should be integers. You will have to change
# your code to acommidate for this or sample in such a way that you only get integers.
if var == 'n_lanes':
samples = np.linspace(*problem['bounds'][i], num=5, dtype=int)
fixed_parameters_alt = make_var_param(fixed_params, var)
batch = BatchRunner(Model,
max_steps=max_steps,
iterations=replicates,
fixed_parameters=fixed_parameters_alt,
variable_parameters={var: samples},
model_reporters=model_reporters,
display_progress=True)
batch.run_all()
data[var] = batch.get_model_vars_dataframe()
def plot_param_var_conf(ax, df, var, param, i):
"""
Helper function for plot_all_vars. Plots the individual parameter vs
variables passed.
Args:
ax: the axis to plot to
df: dataframe that holds the data to be plotted
var: variables to be taken from the dataframe
param: which output variable to plot
"""
x = df.groupby(var).mean().reset_index()[var]
y = df.groupby(var).mean()[param]
replicates = df.groupby(var)[param].count()
err = (1.96 * df.groupby(var)[param].std()) / np.sqrt(replicates)
ax.plot(x, y, c='k')
ax.fill_between(x, y - err, y + err)
ax.set_xlabel(var)
ax.set_ylabel(param)
def plot_all_vars(df, param):
"""
Plots the parameters passed vs each of the output variables.
Args:
df: dataframe that holds all data
param: the parameter to be plotted
"""
f, axs = plt.subplots(len(problem['names']), figsize=(7, 20))
for i, var in enumerate(problem['names']):
plot_param_var_conf(axs[i], data[var], var, param, i)
## Wat moet hier ... op de plek van 'flow'
for param in model_reporters:
plot_all_vars(data, param)
plt.show()
###Output
_____no_output_____
###Markdown
Global Sensitivity Analysis
###Code
# Set the repetitions, the amount of steps, and the amount of distinct values per variable
max_steps = 20 # for analysis = 500
initialisation_steps = 0 # for analysis = 100
distinct_samples = 5 # for analysis = 20
replicates = 10
# We get all our samples here
param_values = saltelli.sample(problem, distinct_samples)
from IPython.display import clear_output
fixed_params = {
"length": 1000,
#"n_lanes": 2,
#"density": 15, # cars per 1km lane
#"fraction_autonomous": 0, # autonomous vehicles have p_slowdown = 0 and mean values, no error in speed estimation
#"max_speed_mu": 120,
"min_spacing": 2,
"min_distance_mu": 2,
"min_distance_min": 1,
"min_distance_max": 3,
"car_acc": 3.333, # m/s^2
"car_dec": 5, # m/s^2
#"p_slowdown": 3, # frequency (per hour) of slowing down randomly
"bias_right_lane": 1,
"time_step": 0.4,
"seed" : None,
"verbose": 3
}
batch = BatchRunner(Model,
max_steps=max_steps,
fixed_parameters=fixed_params,
variable_parameters={name:[] for name in problem['names']},
model_reporters=model_reporters)
count = 0
for i in range(replicates):
for vals in param_values:
# Change parameters that should be integers
vals = list(vals)
vals[1] = int(vals[1])
# Transform to dict with parameter names and their values
variable_parameters = {}
for name, val in zip(problem['names'], vals):
variable_parameters[name] = val
batch.run_iteration(variable_parameters, tuple(vals), count)
count += 1
clear_output()
print(f'{count / (len(param_values) * (replicates)) * 100:.2f}% done')
data = batch.get_model_vars_dataframe()
Si_flow = sobol.analyze(problem, data['flow'].as_matrix(), print_to_console=False)
def plot_index(s, params, i, title=''):
"""
Creates a plot for Sobol sensitivity analysis that shows the contributions
of each parameter to the global sensitivity.
Args:
s (dict): dictionary {'S#': dict, 'S#_conf': dict} of dicts that hold
the values for a set of parameters
params (list): the parameters taken from s
i (str): string that indicates what order the sensitivity is.
title (str): title for the plot
"""
print(i)
if i == '2':
p = len(params)
params = list(combinations(params, 2))
indices = s['S' + i].reshape((p ** 2))
indices = indices[~np.isnan(indices)]
errors = s['S' + i + '_conf'].reshape((p ** 2))
errors = errors[~np.isnan(errors)]
else:
indices = s['S' + i]
errors = s['S' + i + '_conf']
plt.figure()
l = len(indices)
plt.title(title)
plt.ylim([-0.2, len(indices) - 1 + 0.2])
plt.yticks(range(l), params)
plt.errorbar(indices, range(l), xerr=errors, linestyle='None', marker='o')
plt.axvline(0, c='k')
# First order
plot_index({k: Si_flow[k] for k in list(Si_flow)[:2]}, problem['names'], '1', 'First order sensitivity')
plt.show()
# Second order
plot_index({k: Si_flow[k] for k in list(Si_flow)[4:6]}, problem['names'], '2', 'Second order sensitivity')
plt.show()
# Total order
plot_index({k: Si_flow[k] for k in list(Si_flow)[2:4]}, problem['names'], 'T', 'Total order sensitivity')
plt.show()
###Output
1
###Markdown
FMA: A Dataset For Music AnalysisKirell Benzi, Michaël Defferrard, Pierre Vandergheynst, Xavier Bresson, EPFL LTS2. AnalysisTODO:* Figures and tables for the paper.* Genre tree with number of tracks per genre.
###Code
%matplotlib inline
import utils
import librosa
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os.path
from sklearn.preprocessing import MultiLabelBinarizer
df = pd.read_json(os.path.join('..', 'fma_small', 'fma_small.json'))
#df = pd.read_json(os.path.join('..', 'fma_medium.json'))
#df = pd.read_json(os.path.join('..', 'fma_large.json'))
###Output
_____no_output_____
###Markdown
1 GenresAnalysis* Genre hierarchy* Top- and sub-genresTodo* First plateau should be flat, no cross-over top genresObservations* Genres appearing most are the ones selected in the dataset.* Most songs only have one genre.
###Code
enc = MultiLabelBinarizer()
genres_indicator = enc.fit_transform(df['genres'])
genres_names = enc.classes_
cross_correlation = genres_indicator.T @ genres_indicator
genres_count = cross_correlation.diagonal()
sort = np.argsort(genres_count)[::-1]
genres_count = genres_count[sort]
plt.figure(figsize=(25, 10))
plt.plot(genres_count)
plt.xticks(range(len(genres_names)), genres_names[sort], rotation=90);
plt.xlim((0, len(genres_names)))
plt.figure(figsize=(17, 5))
plt.hist(genres_count, bins=100);
plt.figure(figsize=(17, 5))
tmp = genres_indicator.sum(axis=1)
plt.hist(tmp, bins=range(0, tmp.max()))
plt.yscale('log')
plt.xlim((1, tmp.max()+1))
plt.xticks(np.arange(tmp.max())+1.5, np.arange(tmp.max())+1);
np.fill_diagonal(cross_correlation, 0)
plt.figure(figsize=(28, 28))
plt.imshow(np.log(cross_correlation))
plt.yticks(range(len(genres_names)), genres_names);
plt.xticks(range(len(genres_names)), genres_names, rotation=90);
cross_correlation = np.tril(cross_correlation, k=-1)
sort = np.argsort(cross_correlation.flatten())
tmp = cross_correlation.flatten()[sort]
plt.figure(figsize=(17, 5))
plt.plot(tmp[tmp>0][::-1]);
N = 20
indices = np.unravel_index(sort[:-N:-1], cross_correlation.shape)
for i, j in zip(*indices):
print('{}: {} | {}'.format(cross_correlation[i, j], genres_names[i], genres_names[j]))
###Output
_____no_output_____
###Markdown
Data
###Code
with open('1g-word-1m-benchmark-r13output/training-monolingual.tokenized.shuffled/news.en-00001-of-00100') as sentences:
sentences = sentences.read().split('\n')
sentences.remove('')
len(sentences)
###Output
_____no_output_____
###Markdown
Parsing time evaluation of Spacy models Case 1: en-core-web-sm
###Code
df = pd.DataFrame(columns=['num_sentences', 'time'])
# df_sm.append({'num_sentences': 400, 'time': 5}, ignore_index=True, inpl)
spacy.require_gpu()
nlp = spacy.load("en_core_web_sm")
tic = time.time()
for size in tqdm(range(60000)):
sentence = sentences[size]
nlp(sentence)
tac = time.time() - tic
data = {'num_sentences': size, 'time': tac}
df = df.append(data, ignore_index=True)
df.to_csv('log_2.csv', index=False)
from matplotlib import pyplot as plt
df_sm = pd.read_csv('log_2.csv')
df_md = pd.read_csv('log_md_2.csv')
df_lg = pd.read_csv('log_lg_2.csv')
plt.plot(df_sm.num_sentences.to_list(), df_sm.time.to_list(), color='green', label='en_core_web_sm')
plt.plot(df_md.num_sentences.to_list(), df_md.time.to_list(), color='blue', label='en_core_web_md')
plt.plot(df_lg.num_sentences.to_list(), df_lg.time.to_list(), color='red', label='en_core_web_lg')
plt.xlabel('Nombre de phrases')
plt.ylabel('Temps d\'analyse (s)')
plt.legend()
plt.show()
df_sm.time.to_list()[-1]/ df_sm.num_sentences.to_list()[-1]
df_md.time.to_list()[-1] / df_md.num_sentences.to_list()[-1]
df_lg.time.to_list()[-1]/ df_lg.num_sentences.to_list()[-1]
###Output
_____no_output_____
###Markdown
Drop Rate in Fishing and Foraging DFK Quests Analysis and Visualization
###Code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import os
import pandas as pd
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
if not os.path.exists('imgs'):
os.makedirs('imgs')
IMG_SIZE = [1000,700]
def plot2d(df,cols3, t, y, decimals=2, img_size = [700,500], img_name = None):
df = pd.pivot_table(df, values=cols3[0], index=cols3[1], columns=[cols3[2]])
fig = ff.create_annotated_heatmap(
z=df.to_numpy().round(decimals=decimals),
x=df.columns.tolist(),
y=df.index.tolist(),
colorscale=['red', 'orange', 'yellow', 'green'],
hoverongaps=True
)
r = fig.update_layout(title_text=f'<i><b>{t}</b></i>',
yaxis = dict(title=y),
xaxis = dict(title='Profession Level')
)
r =fig['layout']['xaxis']['side'] = 'bottom'
r = fig.update_layout(width = img_size[0], height = img_size[1], margin=dict(t=50, l=100))
fig['data'][0]['showscale'] = True
fig.show()
if img_name:
fig.write_image(img_name)
def df_diff(df1,df2,col,min_cnt):
d1 = df1.groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
d2 = df2.groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
d = pd.merge(d1,d2,on=['stats','level'])
d[col] = d['mean_x']-d['mean_y']
return d[(d['count_x']>min_cnt) &(d['count_y']>min_cnt)]
usecols = ['level', 'stats', 'stamina', 'DFKGOLD', 'DFKTEARS', 'DFKSHVAS', 'DFKEGG']
fo = pd.read_csv('./data/foraging.csv', usecols = usecols)
fi = pd.read_csv('./data/fishing.csv', usecols = usecols)
###Output
_____no_output_____
###Markdown
DFKGOLD
###Code
for col in ['DFKGOLD']:
min_cnt = 100
df = fi[fi['stamina'] == 5].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
df = df[df['level']<=15]
plot2d(df[df['count']>min_cnt], ['mean','stats','level'], f'<b>FISHING</b>: Average {col} per Quest for Fishers','Stats (AGI+LCK)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_fishing_fishers.png')
df = fi[fi['stamina'] == 7].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
df = df[df['level']<=15]
plot2d(df[df['count']>min_cnt], ['mean','stats','level'], f'<b>FISHING</b>: Average {col} per Quest for Non Fishers','Stats (AGI+LCK)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_fishing_others.png')
df = df_diff(fi[fi['stamina'] == 5],fi[fi['stamina'] == 7],col,min_cnt)
df = df[df['level']<=15]
plot2d(df,['DFKGOLD','stats','level'],f'FISHING: Fishers vs Non-Fishers: Difference in Average {col} per Quest','stats (AGI+LCK)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_fishers_vs_non_fishers.png')
print(f"Fishers earn an average + {df[col].mean():.2} gold per quest in recpect to Non-Fishers")
df = fo[fo['stamina'] == 5].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
df = df[df['level']<=15]
plot2d(df[df['count']>min_cnt], ['mean','stats','level'], f'<b>FORAGING</b>: Average {col} per Quest for Foragers','Stats (DEX+INT)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_foraging_foragers.png')
df = fo[fo['stamina'] == 7].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
df = df[df['level']<=15]
plot2d(df[df['count']>min_cnt], ['mean','stats','level'], f'<b>FORAGING</b>: Average {col} per Quest for Non Foragers','Stats (DEX+INT)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_foraging_others.png')
df = df_diff(fo[fo['stamina'] == 5],fo[fo['stamina'] == 7],col,min_cnt)
df = df[df['level']<=15]
plot2d(df,['DFKGOLD','stats','level'],f'FORAGING: Foragers vs Non-Foragers: Difference in Average {col} per Quest','stats (DEX+INT)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_foragers_vs_non_foragers.png')
print(f"Foragers earn an average + {df[col].mean():.2} gold per quest in recpect to Non-Foragers")
min_cnt = 100
df = df_diff(fo[(fo['stamina'] == 5)],fi[fi['stamina'] == 5],col,min_cnt)
df = df[df['level']<=15]
plot2d(df,['DFKGOLD','stats','level'],f'FORAGING Foragers vs FISHING Fishers: Difference in Average {col} per Quest','stats (DEX+INT for Foragers, AGI+LCK for Fishers)', decimals = 1, img_size = IMG_SIZE, img_name=f'./imgs/{col}_2d_fishing_vs_foraging.png')
print(f"Foragers earn an average + {df[col].mean():.2} gold per quest in recpect to Fishers")
###Output
_____no_output_____
###Markdown
Gaia's Tears, Shiva Runes, and Eggs
###Code
def calc_means(d, main_prof, agg ):
d['main_prof'] = d['stamina'].apply(lambda x: main_prof if x ==5 else 'other')
d1 = d.groupby(['main_prof',agg])['DFKGOLD','DFKTEARS','DFKSHVAS','DFKEGG'].agg(['mean','std']).reset_index()
d1.columns = [col[0] if (col[1] == '' or col[1] == 'mean') else '_'.join((col[0], str(col[1]))) for col in d1.columns]
d2 = d.groupby(['main_prof',agg]).agg(count = pd.NamedAgg(column='DFKGOLD',aggfunc='count'))
return pd.merge(d1,d2,on=[agg,'main_prof'])
def plot(dfs,column,xaxis, title="T", img_size = [700,500], img_name = None):
fig = make_subplots(specs=[[{"secondary_y": False}]])
profession = ['foraging','fishing']
colors=['red','blue']
line_dash = ['dot',None]
i = 0
j = 0
for df in dfs:
for x, tmp in df.groupby('main_prof'):
r = fig.add_trace(
go.Scatter(x=tmp[xaxis], y=tmp[column], name=f"{profession[i]}_{x}", line=dict(color=colors[i], dash=line_dash[(j+1)%2])),
secondary_y=False,
)
j+=1
i+=1
r = fig.update_layout(title_text=f"<b>{title}</b>")
r = fig.update_xaxes(title_text=xaxis)
r = fig.update_yaxes(title_text='Drop per Quest')
fig.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
r = fig.update_layout(width = img_size[0], height = img_size[1])
fig.show()
if img_name:
fig.write_image(img_name)
fos = calc_means(fo, 'forager','stats')
fos = fos[fos['stats']<=25]
fis = calc_means(fi, 'fisher','stats')
fis = fis[fis['stats']<=25]
fol = calc_means(fo,'forager','level')
fol = fol[fol['level']<=8]
fil = calc_means(fi,'fisher','level')
fil = fil[fil['level']<=8]
for col,decimal in zip(['DFKTEARS','DFKSHVAS','DFKEGG'],[3,3,4]):
min_cnt = 1000
plot([fos,fis], col, 'stats', title=col, img_name = f"./imgs/{col}_1d_stats.png")
plot([fol,fil], col, 'level', title=col, img_name = f"./imgs/{col}_1d_level.png")
# df = fo[fo['stamina'] == 5].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
# plot2d(df[df['count']>min_cnt] ,['mean','stats','level'],f'Foragers Average {col} per Quest','Stats (DEX+INT)',decimal, img_size = IMG_SIZE, img_name = f"./imgs/{col}_2d_foragers.png")
# df = fi[fi['stamina'] == 5].groupby(['stats','level'])[col].agg(['mean','count']).reset_index()
# plot2d(df[df['count']>min_cnt] ,['mean','stats','level'],f'Fishers Average {col} per Quest','Stats (AGI+LCK)',decimal, img_size = IMG_SIZE, img_name = f"./imgs/{col}_2d_foragers.png")
###Output
_____no_output_____
###Markdown
Colorful graph
###Code
importlib.reload(lm)
Vasc = [[],[],[],[]]
NonResp = [[],[],[],[]]
for lesion_id in lesions:
P = lm.get_paths_dict(lesion_id, target_dir)
M = masks.get_mask(P['ct24Tx']['crop']['tumor'], img_path=P['ct24Tx']['crop']['img'], overlaid=True)
I,D = hf.nii_load(P['ct24Tx']['crop']['img'])
if not exists(P['ct24Tx']['mrbl']['enh']+".off"):
mrblM = np.zeros(M.shape)
else:
mrblM = masks.get_mask(P['ct24Tx']['mrbl']['enh'], D, I.shape)
if not exists(P['ct24Tx']['mr30']['enh']+".off"):
mr30M = np.zeros(M.shape)
else:
mr30M = masks.get_mask(P['ct24Tx']['mr30']['enh'], D, I.shape)
Masks = [(M!=0) & (M<liplvls[1]),
(M>liplvls[1]) & (M<liplvls[2]),
(M>liplvls[2]) & (M<liplvls[3]),
M>liplvls[3]]
for ix,M in enumerate(Masks):
if M.sum() > 0:
Vasc[ix].append((M*mrblM!=0).sum()/M.sum())
NonResp[ix].append((M*mrblM*mr30M!=0).sum()/(M*mrblM!=0).sum())
else:
Vasc[ix].append(np.nan)
NonResp[ix].append(np.nan)
#lm.reg_to_ct24(lesion_id, target_dir)
np.isnan(Vasc[3]).sum()
np.nanmean(Vasc,1)
###Output
_____no_output_____
###Markdown
Vascularization statistics
###Code
sum(master_df['selective=0']==1)
vasc_depo_df = pd.read_excel(C.data_xls_path, "Perfusion-Deposition Data")
def get_dvasc_df(vasc_depo_df, mode="density"):
dvasc_df = copy.deepcopy(vasc_depo_df)
if mode == "density":
for l in ["N", "V", "A"]:
for L_ix in range(3):
dvasc_df[str(liplvls[L_ix])+l] = dvasc_df[str(liplvls[L_ix])+l] - dvasc_df[str(liplvls[L_ix+1])+l]
elif mode == "V-N":
dvasc_df["%ddVN"%liplvls[1]] = dvasc_df["%dV"%liplvls[1]] - dvasc_df["%dN"%liplvls[1]]
return dvasc_df
###Output
_____no_output_____
###Markdown
Upper graph
###Code
dvasc_df = get_dvasc_df(vasc_depo_df)
print(scipy.stats.wilcoxon(vasc_depo_df["%dV"%liplvls[1]], vasc_depo_df["%dN"%liplvls[1]]))
for i in range(4):
subset = (dvasc_df["%dV" % liplvls[i]] - dvasc_df["%dN" % liplvls[i]]).dropna()
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset)*100,
np.std(subset)*100, np.std(subset)*100/(len(subset)**.5)))
#DV = dvasc_df.dropna()
#[scipy.stats.wilcoxon(DV["%dV" % liplvls[i]], DV["%dN" % liplvls[i]]) for i in range(4)]
for i in range(4):
subset = dvasc_df[["%dV" % liplvls[i], "%dN" % liplvls[i]]].dropna()
print(scipy.stats.wilcoxon(subset["%dV" % liplvls[i]], subset["%dN" % liplvls[i]]))
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#dvasc_df = dvasc_df.join(master_df, how='inner')
dvasc_df = vasc_depo_df.join(master_df, how='inner')
i=1
subset1 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 0, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 1, "%dA" % liplvls[i]].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
i=1
subset1 = dvasc_df.loc[dvasc_df["selective=0"] == 0, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["selective=0"] == 1, "%dA" % liplvls[i]].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
i=1
subset1 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 0, "%dA" % liplvls[i]].dropna()
subset3 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 1, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 2, "%dA" % liplvls[i]].dropna()
print(scipy.stats.kruskal(subset1, subset2, subset3).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
print_subset_stats(subset3)
kwargs = {"data":dvasc_df, "size":3, "kind":"bar", "color":"#C3C3C3", "legend":False} #, "aspect":.8
g = sns.factorplot(x="0=well delineated, 1=infiltrative", y="%dA"%liplvls[1], aspect=1., **kwargs)
set_g_bar(g, join(C.fig_dir, "Vascularization figures", "Upper graph", "well-del vs infilt.png"))
g = sns.factorplot(x="selective=0", y="%dA"%liplvls[1], aspect=1., **kwargs)
set_g_bar(g, join(C.fig_dir, "Vascularization figures", "Upper graph", "selective vs lobar.png"))
g = sns.factorplot(x="HCC(0), ICC(1), other(2)", y="%dA"%liplvls[1], order=[0,2,1], aspect=1.5, **kwargs)
set_g_bar(g, join(C.fig_dir, "Vascularization figures", "Upper graph", "tumor entity.png"))
g = sns.factorplot(x="0A", y="%dA"%liplvls[1], aspect=.5, **kwargs)
set_g_bar(g, join(C.fig_dir, "Vascularization figures", "Upper graph", "all tumors.png"))
###Output
_____no_output_____
###Markdown
Middle Graph (Necro to Viable diff, no Lip breakdown)
###Code
Vdf = vasc_depo_df.dropna()
dvasc_df = get_dvasc_df(vasc_depo_df, "V-N")
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#master_df = master_df.join(pattern_df)
dvasc_df = dvasc_df.join(master_df)
i = 1
subset1 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 0, "%ddVN" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 1, "%ddVN" % liplvls[i]].dropna()
print("%.2f" % scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
i = 1
subset1 = dvasc_df.loc[dvasc_df["selective=0"] == 0, "%ddVN" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["selective=0"] == 1, "%ddVN" % liplvls[i]].dropna()
print("%.2f" % scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
i = 1
subset1 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 0, "%ddVN" % liplvls[i]].dropna()
subset3 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 1, "%ddVN" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 2, "%ddVN" % liplvls[i]].dropna()
print("%.2f" % scipy.stats.kruskal(subset1, subset2, subset3).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
print_subset_stats(subset3)
df = pd.DataFrame(columns=["Any Coverage", "Lesion_id", "Tissue Type",
"Tumor Growth", "Tumor Type", "TACE Type"])
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
master_df = master_df.join(pattern_df)
modality = "mrbl"
importlib.reload(lvis)
ix = 0
for lesion_id, row in Vdf.iterrows():
const = lvis.get_df_entry(lesion_id, master_df, modality)
df.loc[ix] = [row["%dN"%liplvls[1]], lesion_id, "Necrosis"] + const
df.loc[ix+1] = [row["%dV"%liplvls[1]], lesion_id, "Viable"] + const
ix += 2
def set_g_bar(g, save_path):
g.set(yticks=[0.,.2,.4,.6,.8,1.], ylim=(0.,1.));
for gax in g.axes[0]:
gax.set_xlabel("")
gax.set_ylabel("")
#gax.tick_params('x',width=0)
gax.set_xticks([], minor=False)
gax.set_yticks([], minor=False)
plt.setp(gax.patches, linewidth=1, edgecolor='k')
g.set_titles(visible=False)
#g.axes[0][0].set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"]);
sns.despine(top=True, right=True, left=True)
g.fig.subplots_adjust(left=.2, top=.95)
#g.fig.tight_layout(w_pad=1)
#plt.setp(g.ax.lines,linewidth=1);
g.fig.savefig(save_path, width=5, dpi=150, pad_inches=0, transparent=True)
plt.close()
kwargs = {"x":"Tissue Type", "data":df, "size":3, "aspect":.8, "kind":"bar", "legend":False}#, "ci":None
g1 = sns.factorplot(y="Any Coverage", color="#D3D3D3", **kwargs)
set_g_bar(g1, join(C.fig_dir, "Vascularization figures", "Mid graph", "Mean.png"))
for category, order in [("Tumor Growth", None), ("Tumor Type", None),
("TACE Type", ["Selective", "Lobar"])]: #, ("Sparsity", ["Sparse", "Non"])
order = lan.get_actual_order(category, df, order)
g1 = sns.factorplot(y="Any Coverage", col=category, color="#D3D3D3", col_order=order, **kwargs)
set_g_bar(g1, join(C.fig_dir, "Vascularization figures", "Mid graph", "%s.png" % category))
###Output
_____no_output_____
###Markdown
Alternative Mid Graph (Lip density, no Necro/Viable separation)
###Code
Vdf = vasc_depo_df.dropna()
dvasc_df = get_dvasc_df(vasc_depo_df)
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#master_df = master_df.join(pattern_df)
dvasc_df = dvasc_df.join(master_df)
for i in range(4):
print(liplvls[i])
subset1 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 0, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["0=well delineated, 1=infiltrative"] == 1, "%dA" % liplvls[i]].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset1)*100, np.std(subset1)*100, np.std(subset1)*100/(len(subset1)**.5)))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset2)*100, np.std(subset2)*100, np.std(subset2)*100/(len(subset2)**.5)))
for i in range(4):
print(liplvls[i])
subset1 = dvasc_df.loc[dvasc_df["selective=0"] == 0, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["selective=0"] == 1, "%dA" % liplvls[i]].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset1)*100, np.std(subset1)*100, np.std(subset1)*100/(len(subset1)**.5)))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset2)*100, np.std(subset2)*100, np.std(subset2)*100/(len(subset2)**.5)))
for i in range(4):
subset1 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 0, "%dA" % liplvls[i]].dropna()
subset3 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 1, "%dA" % liplvls[i]].dropna()
subset2 = dvasc_df.loc[dvasc_df["HCC(0), ICC(1), other(2)"] == 2, "%dA" % liplvls[i]].dropna()
print(scipy.stats.kruskal(subset1, subset2, subset3))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset1)*100, np.std(subset1)*100, np.std(subset1)*100/(len(subset1)**.5)))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset2)*100, np.std(subset2)*100, np.std(subset2)*100/(len(subset2)**.5)))
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset3)*100, np.std(subset3)*100, np.std(subset3)*100/(len(subset3)**.5)))
def get_row(row):
char="A"
return [row["%d%s"%(liplvls[1],char)],
row["%d%s"%(liplvls[1],char)] - row["%d%s"%(liplvls[3],char)],
row["%d%s"%(liplvls[1],char)] - row["%d%s"%(liplvls[2],char)]]
df = pd.DataFrame(columns=["Any Coverage", "Low-Mid Coverage", "Low Coverage", "Lesion_id",
"Tumor Growth", "Tumor Type", "TACE Type"])
importlib.reload(lvis)
ix = 0
modality = "mrbl"
for lesion_id, row in Vdf.iterrows():
const = lvis.get_df_entry(lesion_id, master_df, modality)
df.loc[ix] = get_row(row) + [lesion_id] + const
ix += 1
def set_g_bar(g, save_path):
g.set(yticks=[0.,.2,.4,.6,.8,1.], ylim=(0.,1.));
for gax in g.axes[0]:
gax.set_xlabel("")
gax.set_ylabel("")
#gax.tick_params('x',width=0)
gax.set_xticks([], minor=False)
gax.set_yticks([], minor=False)
plt.setp(gax.patches, linewidth=1, edgecolor='k')
g.set_titles(visible=False)
#g.axes[0][0].set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"]);
sns.despine(top=True, right=True, left=True)
g.fig.subplots_adjust(left=.2, top=.95)
#g.fig.tight_layout(w_pad=1)
#plt.setp(g.ax.lines,linewidth=1);
g.fig.savefig(save_path, width=5, dpi=150, pad_inches=0, transparent=True)
plt.close()
kwargs = {"data":df, "size":3, "aspect":.8, "kind":"bar", "ci":None, "legend":False}
g1 = sns.factorplot(y="Any Coverage", color="#D3D3D3", **kwargs)
set_g_bar(g1, join(C.fig_dir, "Vascularization figures", "Mid graph", "Mean1.png"))
g2 = sns.factorplot(y="Low-Mid Coverage", color="#939393", **kwargs)
set_g_bar(g2, join(C.fig_dir, "Vascularization figures", "Mid graph", "Mean2.png"))
g3 = sns.factorplot(y="Low Coverage", color="#333333", **kwargs)
set_g_bar(g3, join(C.fig_dir, "Vascularization figures", "Mid graph", "Mean3.png"))
for category, order in [("Tumor Growth", None), ("Tumor Type", None),
("TACE Type", ["Selective", "Lobar"])]: #, ("Sparsity", ["Sparse", "Non"])
order = lm.get_actual_order(category, df, order)
g1 = sns.factorplot(y="Any Coverage", col=category, color="#D3D3D3", col_order=order, **kwargs)
set_g_bar(g1, join(C.fig_dir, "Vascularization figures", "Mid graph", "%s1.png" % category))
g2 = sns.factorplot(y="Low-Mid Coverage", col=category, color="#939393", col_order=order, **kwargs)
set_g_bar(g2, join(C.fig_dir, "Vascularization figures", "Mid graph", "%s2.png" % category))
g3 = sns.factorplot(y="Low Coverage", col=category, color="#333333", col_order=order, **kwargs)
set_g_bar(g3, join(C.fig_dir, "Vascularization figures", "Mid graph", "%s3.png" % category))
###Output
_____no_output_____
###Markdown
Response statistics
###Code
depo_resp_df = pd.read_excel(C.data_xls_path, "Deposition-Response Data")
Rdf = depo_resp_df.dropna()
scipy.stats.friedmanchisquare(*[Rdf[l] for l in liplvls])
for l in liplvls[1:]:
print(0,l,scipy.stats.wilcoxon(Rdf[0], Rdf[l]))
print(liplvls[1],liplvls[2],scipy.stats.wilcoxon(Rdf[liplvls[1]], Rdf[liplvls[2]]))
print(liplvls[3],liplvls[2],scipy.stats.wilcoxon(Rdf[liplvls[3]], Rdf[liplvls[2]]))
dresp_df = copy.deepcopy(Rdf)
for L in liplvls[3:0:-1]:
dresp_df[L] = dresp_df[L] - dresp_df[0]
dresp_df[0] = 0
for l in liplvls:
subset=dresp_df[l]
print("%.1f%%+-%.1f%% (s.e.=%.1f%%)" % (np.mean(subset)*100, np.std(subset)*100, np.std(subset)*100/(len(subset)**.5)))
###Output
0.0%+-0.0% (s.e.=0.0%)
3.1%+-12.0% (s.e.=2.0%)
10.1%+-18.5% (s.e.=3.0%)
15.2%+-23.2% (s.e.=3.8%)
###Markdown
Top graph
###Code
pattern_df = pd.read_excel(C.data_xls_path, "Patterns")
df = pd.DataFrame(columns=["Response", "Lesion_id", "Tumor Growth", "Tumor Type", "TACE Type", "Homogeneity", "Sparsity", "Rim Presence"])
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
master_df = master_df.join(pattern_df)
modality = "ct24"
master_df["lipcoverage_vol"] = master_df["lipcoverage_vol"].astype(float)
master_df["high_lip"] = master_df["high_lip"].astype(float)
master_df["rim_lipiodol"] = master_df["rim_lipiodol"].astype(float)
master_df["low_peripheral"] = master_df["low_peripheral"].astype(float)
master_df["mid_peripheral"] = master_df["mid_peripheral"].astype(float)
importlib.reload(lvis)
ix = 0
for lesion_id, row in depo_resp_df.iterrows():
const = lvis.get_df_entry(lesion_id, master_df, modality)
df.loc[ix] = [row["Avg"], lesion_id] + const
ix += 1
#master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#dvasc_df = dvasc_df.join(master_df, how='inner')
Rdf = depo_resp_df.join(master_df)
subset1 = Rdf.loc[Rdf["0=well delineated, 1=infiltrative"] == 0, "Avg"].dropna()
subset2 = Rdf.loc[Rdf["0=well delineated, 1=infiltrative"] == 1, "Avg"].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#dvasc_df = dvasc_df.join(master_df, how='inner')
Rdf = depo_resp_df.join(master_df)
subset1 = Rdf.loc[Rdf["selective=0"] == 0, "Avg"].dropna()
subset2 = Rdf.loc[Rdf["selective=0"] == 1, "Avg"].dropna()
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
#dvasc_df = dvasc_df.join(master_df, how='inner')
Rdf = depo_resp_df.join(master_df)
subset1 = Rdf.loc[Rdf["HCC(0), ICC(1), other(2)"] == 0, "Avg"].dropna()
subset3 = Rdf.loc[Rdf["HCC(0), ICC(1), other(2)"] == 1, "Avg"].dropna()
subset2 = Rdf.loc[Rdf["HCC(0), ICC(1), other(2)"] == 2, "Avg"].dropna()
print(scipy.stats.kruskal(subset1, subset2, subset3).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
print_subset_stats(subset3)
subdf = df.dropna(subset=["Sparsity"])
subset1 = subdf.loc[subdf["Sparsity"].str.contains("Sparse"), "Response"]
subset2 = subdf.loc[subdf["Sparsity"].str.contains("Non"), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
importlib.reload(lan)
focal_df = df.dropna(subset=["Tumor Growth"])
focal_df = focal_df[focal_df["Tumor Growth"].str.contains("Well")]
subdf = focal_df.dropna(subset=["Homogeneity"])
subset1 = subdf.loc[(subdf["Homogeneity"].str.contains("Homo")), "Response"]
subset2 = subdf.loc[(subdf["Homogeneity"].str.contains("Hetero")), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
subdf = focal_df.dropna(subset=["Rim Presence"])
subset1 = subdf.loc[(subdf["Rim Presence"].str.contains("Rim")) & (subdf["Sparsity"].str.contains("Sparse")), "Response"]
subset2 = subdf.loc[(subdf["Rim Presence"].str.contains("Non")) & (subdf["Sparsity"].str.contains("Sparse")), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
subdf = focal_df.dropna(subset=["Rim Presence"])
subset1 = subdf.loc[(subdf["Rim Presence"].str.contains("Rim")) & ~(subdf["Sparsity"].str.contains("Sparse")), "Response"]
subset2 = subdf.loc[(subdf["Rim Presence"].str.contains("Non")) & ~(subdf["Sparsity"].str.contains("Sparse")), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
subdf = focal_df.dropna(subset=["Sparsity"])
subset1 = subdf.loc[subdf["Sparsity"].str.contains("Sparse"), "Response"]
subset2 = subdf.loc[subdf["Sparsity"].str.contains("Non"), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
importlib.reload(lan)
infil_df = df.dropna(subset=["Tumor Growth"])
infil_df = infil_df[infil_df["Tumor Growth"].str.contains("Infilt")]
subdf = infil_df.dropna(subset=["Sparsity"])
subset1 = subdf.loc[subdf["Sparsity"].str.contains("Sparse"), "Response"]
subset2 = subdf.loc[subdf["Sparsity"].str.contains("Non"), "Response"]
print(scipy.stats.mannwhitneyu(subset1, subset2).pvalue)
print_subset_stats(subset1)
print_subset_stats(subset2)
pattern_df = pd.read_excel(C.data_xls_path, "Patterns")
df = pd.DataFrame(columns=["Response", "Lesion_id", "Tumor Growth", "Tumor Type", "TACE Type", "Homogeneity", "Sparsity", "Rim Presence"])
master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID")#"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\MASTER SS SOPHIE.xlsx")
master_df = master_df.join(pattern_df)
modality = "ct24"
master_df["lipcoverage_vol"] = master_df["lipcoverage_vol"].astype(float)
master_df["high_lip"] = master_df["high_lip"].astype(float)
master_df["rim_lipiodol"] = master_df["rim_lipiodol"].astype(float)
master_df["low_peripheral"] = master_df["low_peripheral"].astype(float)
master_df["mid_peripheral"] = master_df["mid_peripheral"].astype(float)
importlib.reload(lvis)
ix = 0
for lesion_id, row in depo_resp_df.iterrows():
const = lvis.get_df_entry(lesion_id, master_df, modality)
df.loc[ix] = [row["Avg"], lesion_id] + const
ix += 1
#kwargs = {"x":"Lipiodol Deposition", "y":"Response", "data":df, "size":3, "markers":["s", "o", "^"], "legend":False}
kwargs = {"y":"Response", "data":df, "size":3, "kind":"bar", "legend":False}
def set_g_bar(g, save_path):
g.set(yticks=[0.,.2,.4,.6,.8,1.], ylim=(0.,1.));
#g.set(yticks=[-1.,-.8,-.6,-.4,-.2,0.], ylim=(-1.,0.));
for gax in g.axes[0]:
gax.set_xlabel("")
gax.set_ylabel("")
#gax.tick_params('x',width=0)
gax.set_xticks([], minor=False)
gax.set_yticks([], minor=False)
plt.setp(gax.patches, linewidth=1, edgecolor='k')
g.set_titles(visible=False)
#g.axes[0][0].set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"]);
sns.despine(top=True, right=True, left=True, bottom=False)
g.fig.subplots_adjust(left=.2, top=.95)
#g.fig.tight_layout(w_pad=1)
#plt.setp(g.ax.lines,linewidth=1);
g.fig.savefig(save_path, width=5, dpi=150, pad_inches=0, transparent=True)
plt.close()
df["Response"] = -df["Response"]
importlib.reload(lan)
for category, order in [("Tumor Growth", None), ("Tumor Type", None),
("TACE Type", ["Selective", "Lobar"]), ("Sparsity", ["Sparse", "Non"])]:
g = sns.factorplot(x=category, order=lan.get_actual_order(category, df, order), **kwargs)
set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "%s.png" % category));
#for category, order in [("Tumor Growth", None)]:
# g = sns.factorplot(x=category, order=lan.get_actual_order(category, df, order), **kwargs)
# set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "%s with percentage.png" % category));
importlib.reload(lan)
focal_df = df.dropna(subset=["Tumor Growth"])
focal_df = focal_df[focal_df["Tumor Growth"].str.contains("Well")]
for ix, row in focal_df.iterrows():
focal_df.loc[ix, "Tumor Type"] = lvis.check_column(row["Lesion_id"], master_df, "HCC(0), ICC(1), other(2)",
{0: "HCCs", 1: "ICCs", 2: "Metastases"}, "WD")
focal_df.loc[ix, "Sparsity"] = lvis.check_sparse(row["Lesion_id"], master_df, modality, "WD")
kwargs["data"] = focal_df
for category, order in [("Sparsity", ["Sparse", "Non"]), ("Homogeneity", ["Homo", "Hetero"])]:
g = sns.factorplot(x=category, order=lan.get_actual_order(category, focal_df, order), **kwargs)
set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "Focal_%s.png" % category));
focal_df = focal_df.dropna(subset=["Sparsity"])
kwargs["data"] = focal_df[focal_df["Sparsity"].str.startswith("Sparse")]
for category, order in [("Rim Presence", ["Rim", "Non"])]:
g = sns.factorplot(x=category, order=lan.get_actual_order(category, focal_df, order), **kwargs)
set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "Focal_Sparse_%s.png" % category));
kwargs["data"] = focal_df[focal_df["Sparsity"].str.startswith("Non")]
for category, order in [("Rim Presence", ["Rim", "Non"])]:
g = sns.factorplot(x=category, order=lan.get_actual_order(category, focal_df, order), **kwargs)
set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "Focal_Non-Sparse_%s.png" % category));
###Output
_____no_output_____
###Markdown
infil_df = df.dropna(subset=["Tumor Growth"])infil_df = infil_df[infil_df["Tumor Growth"].str.contains("Infiltrative")]for ix, row in infil_df.iterrows(): infil_df.loc[ix, "Tumor Type"] = lvis.check_column(row["Lesion_id"], master_df, "HCC(0), ICC(1), other(2)", {0: "HCCs", 1: "ICCs", 2: "Metastases"}, "Infiltrative") infil_df.loc[ix, "Sparsity"] = lvis.check_sparse(row["Lesion_id"], master_df, modality, "Infiltrative")kwargs["data"] = infil_dffor category, order in [("Sparsity", ["Sparse", "Non"])]: g = sns.factorplot(x=category, order=lan.get_actual_order(category, infil_df, order), **kwargs) set_g_bar(g, join(C.fig_dir, "Deposition figures", "Top graph", "Infil_%s.png" % category)); Prediction of Lipiodol deposition
###Code
pattern_df = pd.read_excel(C.data_xls_path, "Patterns")
lesion_id, lesions.index(lesion_id)
cols = ["T_art", "DICE_art", "T_sub", "DICE_sub"]
T_df = pd.DataFrame(columns=cols)
importlib.reload(lan)
for lesion_id in lesions[0:]:
print(lesion_id)
T_df.loc[lesion_id] = lan.get_best_T_lip(lesion_id, target_dir, liplvls[2])
T_df["DICE_art"].mean(), T_df["DICE_sub"].mean()
lesion_id = "BM-01"
P = lm.get_paths_dict(lesion_id, target_dir)
art.min()
art = hf.nii_load(P['ct24Tx']['mrbl']['art'])[0]
art[M != 0].min()
img = masks.crop_img_to_mask_vicinity(P['ct24Tx']['mrbl']['art'], P['ct24Tx']['crop']['tumor'])
img = masks.draw_mask(P['ct24Tx']['crop']['tumor'], P['ct24Tx']['mrbl']['art']);
img.min()
hf.draw_slices(img)
ct = hf.nii_load(P['ct24Tx']['crop']['img'])[0]
M = masks.get_mask(P['ct24Tx']['crop']['tumor'])[0]
ct[M != 0] = np.nan
ct_U = ct >= T_lip
ct_L = ct < T_lip
art = hf.nii_load(P['ct24Tx']['mrbl']['art'])[0].astype(int)
sub = hf.nii_load(P['ct24Tx']['mrbl']['sub'])[0].astype(int)
M.sum()/M.max()
ct[M != 0] = np.nan
(~np.isnan(ct)).sum()
(ct < 99999).sum()
T_df
lm.reg_to_ct24(lesion_id, target_dir)
lesion_id
###Output
_____no_output_____
###Markdown
duplication err analysis
###Code
df_dup %>% filter(target=="Hate", pred=="Neither") %>% sample_n(10)
df_dup %>% filter(target=="Offensive", pred=="Hate") %>% sample_n(10)
df_dup %>% filter(target=="Neither", pred=="Hate") %>% sample_n(10)
df_dup %>% filter(target=="Neither", pred=="Offensive") %>% sample_n(10)
###Output
_____no_output_____
###Markdown
SLEP 014 Benchmark results
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [14, 8]
plt.rcParams['lines.linewidth'] = 2.5
def plot_results(df, x, hue=None):
fig, (ax1, ax2) = plt.subplots(1, 2, constrained_layout=True)
sns.barplot(x=x, y='peak_memory', data=df, ax=ax1, hue=hue)
ax1.set_title("Peak memory")
sns.barplot(x=x, y='time', data=df, ax=ax2, hue=hue)
ax2.set_title("Time")
###Output
_____no_output_____
###Markdown
Custom sparse example```pyclass SillyVectorizer(TransformerMixin, BaseEstimator): def __init__(self, n_features_out=1_000, density=0.01): self.n_features_out = n_features_out self.density = density def fit(self, X, y=None): return self def transform(self, X): data_wrap = _DataTransformer(X, needs_feature_names_in=False) n_samples = len(X) X_output = sparse.rand(n_samples, self.n_features_out, density=self.density, random_state=0) output = data_wrap.transform(X_output, self.get_feature_names) return output def get_feature_names(self): return [f'col_{i}' for i in range(self.n_features_out)]class PassthroughTransformer(TransformerMixin, BaseEstimator): def fit(self, X, y=None): X = check_array(X, accept_sparse=True) do some fitting return self def transform(self, X): data_wrap = _DataTransformer(X) X = check_array(X, accept_sparse=True) typically does some math return data_wrap.transform(X) def main(density, array_out) set_config(array_out=array_out) n_samples = 100_000 X = [None] * n_samples pipe = make_pipeline(SillyVectorizer(density=density), PassthroughTransformer()) pipe.fit(X) output = pipe.transform(X)```
###Code
df = pd.read_json("results/bench_sparse_custom.json")
df['density'] = (df['density'] * 10).astype(int)
plot_results(df, x='density', hue='array_out')
###Output
_____no_output_____
###Markdown
Simple sparse pipeline with chained scalers`maxabs_scalers` is the number of scalers to chain together in pipeline```pydata = fetch_20newsgroups(subset='train')set_config(array_out=array_out)estimators = ([CountVectorizer()] + [MaxAbsScaler() for _ in range(maxabs_scalers)])pipe = make_pipeline(*estimators)output = pipe.fit_transform(data.data)```
###Code
df = pd.read_json("results/bench_sparse_maxabsscaler.json")
plot_results(df, x='maxabs_scalers', hue='array_out')
###Output
_____no_output_____
###Markdown
Sparse pipeline with text input`max_features` is passed to `CountVectorizer````pydata = fetch_20newsgroups(subset='train')set_config(array_out=array_out)pipe = make_pipeline(CountVectorizer(max_features=max_features), TfidfTransformer(), SGDClassifier(random_state=42))pipe.fit(data.data, data.target)````array_out='pydata/sparse'` uses pydata.sparse (with no feature names). Without the feature names, it uses less memory than xarray.
###Code
df = pd.read_json("results/bench_sparse_text_input.json")
plot_results(df, x='max_features', hue='array_out')
###Output
_____no_output_____
###Markdown
Simple dense pipeline```pyX, y = fetch_openml(data_id=1476, return_X_y=True, as_frame=True)set_config(array_out=array_out)pipe = make_pipeline(StandardScaler(), PCA(n_components=64), SelectKBest(k=30), Ridge())pipe.fit(X, y)output = pipe[:-1].transform(X)```
###Code
df = pd.read_json("results/bench_dense.json")
plot_results(df, x='array_out')
###Output
_____no_output_____
###Markdown
Dense pipeline with column transformer```pyX, y = fetch_openml(data_id=1590, return_X_y=True, as_frame=True)set_config(array_out=array_out)cat_prep = make_pipeline( SimpleImputer(fill_value='sk_missing', strategy='constant'), OneHotEncoder(handle_unknown='ignore', sparse=False))prep = make_column_transformer( (StandardScaler(), make_column_selector(dtype_include='number')), (cat_prep, make_column_selector(dtype_include='category')))pipe = make_pipeline(prep, SelectKBest(), DecisionTreeClassifier(random_state=42))pipe.fit(X, y)output = pipe[:-1].transform(X)```
###Code
df = pd.read_json("results/bench_column_transform.json")
plot_results(df, x='array_out')
###Output
_____no_output_____
###Markdown
Dense pipeline with many repeated transformations`minmax_scalers` is the number of MinMaxScalers in the pipeline```pyn_features = 200X, _ = make_regression(n_samples=300_000, n_features=n_features, random_state=42)df = pd.DataFrame(X, columns=[f"col_{i}" for i in range(n_features)])set_config(array_out=array_out)pipe = make_pipeline(*[MinMaxScaler() for _ in range(minmax_scalers)])output = pipe.fit_transform(df)```It is a little strange how the default uses more memory for `minmax_scalers>=3`
###Code
df = pd.read_json("results/bench_dense_minmaxscaler.json")
plot_results(df, x='minmax_scalers', hue='array_out')
###Output
_____no_output_____
###Markdown
Nest Dataset Imports
###Code
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
###Output
_____no_output_____
###Markdown
Consider how you would analyze a real-time IoT dataset. We have a Nest device under normal conditions. How would detect malicious or anomalous events from this data using machine learning models? Questions to consider for your analysis include:1. What type of model(s) would you choose and why?2. What kind of feature engineering would you want to do? Is there additional data you would want to capture?3. What would be needed to integrate your approach into the product's overall architecture? Load Data
###Code
df = pd.read_csv('data/DeviceTraffic.csv')
pd.set_option('display.max_colwidth', None)
print(df.shape)
df.head()
###Output
(5955, 7)
###Markdown
Thoughts about data Features to engineer: - change in time - source port (first port in tcp header) - destination port (second port in tcp header) - Sequence number (Seq) - Acknowledgement number (Ack) - bytes in flight = Length - header (66 bytes for header in this case) resets when Ack == bytes in flight + previous Ack features I'd like to see captured: - Unique user id of some sort (i.p. is ok, but many people can have access to one i.p.) - UTC timestamps Feature Engineering
###Code
# Instantiate lists and variables needed for feature engineering
ack = []
seq = []
source_port = []
destination_port = []
client = '74.125.196.99'
client_bytes = []
client_seq = 0
server = '10.0.0.169'
server_bytes = []
server_seq = 0
df['Bytes'] = df['Length'] - 66
for i in range(df.shape[0]):
# Extract relevant information from 'Info' column
info_str = df.loc[i]['Info']
info_list = info_str.split(' ')
seq_ack = [string for string in info_list if ('Seq=' in string) or ('Ack=' in string)]
ports = [int(string) for string in info_list if string.isdigit()]
# Build port lists
if len(ports) > 1:
source_port.append(ports[0])
destination_port.append(ports[1])
else:
source_port.append(0)
destination_port.append(0)
# Build sequence and acknowledgement lists
if len(seq_ack) == 0:
seq.append(0)
ack.append(0)
if len(seq_ack) > 0:
for string in seq_ack:
break_down = string.split('=')
if break_down[0] == 'Seq':
seq.append(int(break_down[1]))
if break_down[0] == 'Ack':
ack.append(int(break_down[1]))
if len(seq) < len(ack):
seq.append(0)
if len(ack) < len(seq):
ack.append(0)
# Calculate bytes in flight for client and server side, build those lists
# Client side byte calculations
if df['Source'].iloc[i] == client:
prev_client_seq = client_seq
if seq[i] != 0:
client_seq = seq[i]
if len(client_bytes) == 0:
client_bytes.append(df['Bytes'].iloc[i])
else:
if seq[i] == 1:
client_bytes.append(df['Bytes'].iloc[i])
else:
cbif = df['Bytes'].iloc[i] + client_bytes[i-1]
if cbif + prev_client_seq == seq[i]:
client_bytes.append(df['Bytes'].iloc[i])
else:
client_bytes.append(cbif)
else:
client_bytes.append(0)
# Server side byte calculations
if df['Source'].iloc[i] == server:
prev_server_seq = server_seq
if seq[i] != 0:
server_seq = seq[i]
if len(server_bytes) == 0:
server_bytes.append(df['Bytes'].iloc[i])
else:
if seq[i] == 1:
server_bytes.append(df['Bytes'].iloc[i])
else:
sbif = df['Bytes'].iloc[i] + server_bytes[i-1]
if sbif + prev_server_seq == seq[i]:
server_bytes.append(df['Bytes'].iloc[i])
else:
server_bytes.append(sbif)
else:
server_bytes.append(0)
# Create dataframe columns from lists
df['Source_port'] = source_port
df['Destination_port'] = destination_port
df['Seq'] = seq
df['Ack'] = ack
df['Server_bytes'] = server_bytes
df['Client_bytes'] = client_bytes
df['Bytes_in_flight'] = df['Server_bytes'] + df['Client_bytes']
df['Time_change'] = df['Time'].diff()
df['Time_change'].iloc[0] = 0 #remove NaN value from first row
df.head()
###Output
c:\users\jon_9\.virtualenvs\perigee-example-udd_95il\lib\site-packages\pandas\core\indexing.py:670: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
iloc._setitem_with_indexer(indexer, value)
###Markdown
Data Analysis
###Code
plt.rcParams["figure.figsize"] = (20,10)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.suptitle('Data Packets Over Time')
ax1.plot(df['Time'], df['Bytes_in_flight'], '-')
ax1.set_ylabel('All Bytes in Flight')
ax2.plot(df['Time'], df['Client_bytes'], '.')
ax2.set_ylabel('Client Bytes')
ax3.plot(df['Time'], df['Server_bytes'], '.')
ax3.set_ylim(top = 40000)
ax3.set_xlabel('Time')
ax3.set_ylabel('Server bytes')
plt.show()
###Output
_____no_output_____
###Markdown
As expected, the server consists of the majority of the traffic observed in this set. A nest device will frequently be pinging the user or google servers to provide updates about its status and other meta-data. For the client-side, there is very little traffic, but there does appear to be an anomaly at the start of the service, indicated by a high data transfer rate. We can explore several techniques to see if this type of anomaly can be detected. Anomalies to consider may include: - Rate of data transfer - Size of data transfer - Frequency of queries - Types of queries - Time of queries Most of the anomalies may come from observing the client-side activity, but it would be wise to monitor the server-side activity to make sure its not exposing sensitive data to unauthorized users. There are a wide array of models which would be useful for detecting these types of anomalies, some of those are: - Neural networks (PyTorch or Tensorflow architechtures) - Decision trees - Clustering - Time-series analysis - Facebook Prophet Using unsupervised models may be best for the task, until there is enough labeled data to begin using supervised models. Decision trees tend to be good baseline models, which we can use to evaluate the performance-cost tradeoff of the more expensive models. (i.e. neural network models can be time, computationally, and ultimately, financially expensive.) Local Outlier Factor (Clustering) Using the Local Outlier Factor model, we can take advantage of the k-nearest neighbors algorithm. This gives us a measure of how closely related one point is to its k number of neighbors. Applying a threshold of 1200 bytes, I was able to leverage the model and isolate the anomalous data transaction.
###Code
clf = LocalOutlierFactor(n_neighbors=20, contamination=0.01)
y_pred = clf.fit_predict(df[['Client_bytes']])
lof = pd.DataFrame(y_pred)
plt.rcParams["figure.figsize"] = (10,5)
plt.scatter(df[(lof[0] < 0)&(df['Bytes_in_flight'] > 1200)]['Time'], df[(lof[0] < 0)&(df['Bytes_in_flight'] > 1200)]['Bytes_in_flight'])
plt.xlabel('Time')
plt.ylabel('Bytes')
plt.title('Anomalous Bytes in Flight (Local Outlier Factor)')
plt.ylim(bottom = 0)
plt.xlim(left = 0, right = 10)
plt.show()
###Output
_____no_output_____
###Markdown
Isolation Forest (Decision Trees) Using the Isolation Forest decision tree ensemble method, we are able to obtain a cleaner set of anomaly predictions, requiring no post-processing. This model can be further refined, by using a permutation of Isolation Forest, called Extended Isolation forest.
###Code
isoforest = IsolationForest(random_state=42, n_jobs=-1, contamination = 0.003)
anomaly_pred = isoforest.fit_predict(df[['Client_bytes']])
iso_df = pd.DataFrame(anomaly_pred)
plt.rcParams["figure.figsize"] = (10,5)
plt.scatter(df[iso_df[0] < 0]['Time'], df[iso_df[0] < 0]['Bytes_in_flight'])
plt.xlabel('Time')
plt.ylabel('Bytes')
plt.title('Anomalous Bytes in Flight (Isolation Forest)')
plt.ylim(bottom = 0)
plt.xlim(left = 0, right = 10)
plt.show()
###Output
_____no_output_____
###Markdown
Profitable App Profiles for the App Store and Google Play MarketsOur aim in this project is to find mobile app profiles that are profitable for the App Store and Google Play markets. We're working as data analysts for a company that builds Android and iOS mobile apps, and our job is to enable our team of developers to make data-driven decisions with respect to the kind of apps they build.At our company, we only build apps that are free to download and install, and our main source of revenue consists of in-app ads. This means that our revenue for any given app is mostly influenced by the number of users that use our app. Our goal for this project is to analyze data to help our developers understand what kinds of apps are likely to attract more users. Opening and Exploring the DataAs of September 2018, there were approximately 2 million iOS apps available on the App Store, and 2.1 million Android apps on Google Play.Collecting data for over four million apps requires a significant amount of time and money, so we'll try to analyze a sample of data instead. To avoid spending resources with collecting new data ourselves, we should first try to see whether we can find any relevant existing data at no cost. Luckily, these are two data sets that seem suitable for our purpose:* [A data set](https://www.kaggle.com/lava18/google-play-store-apps/home) containing data about approximately ten thousand Android apps from Google Play* [A data set](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps/home) containing data about approximately seven thousand iOS apps from the App StoreLet's start by opening the two data sets and then continue with exploring the data.
###Code
from csv import reader
with open("data_sets/AppleStore.csv", encoding='utf8') as file_opened:
file_readed = list(reader(file_opened))
apple_data = file_readed[1:]
apple_headers = file_readed[0]
from csv import reader
with open("data_sets/googleplaystore.csv", encoding='utf8')as file_opened:
file_readed = list(reader(file_opened))
google_data = file_readed[1:]
google_headers = file_readed[0]
###Output
_____no_output_____
###Markdown
To make it easier to explore the two data sets, we'll first write a function named explore_data() that we can use repeatedly to explore rows in a more readable way. We'll also add an option for our function to show the number of rows and columns for any data set.
###Code
def explore_data(dataset, start, end, rows_and_columns=False):
dataset_slice = dataset[start:end]
for row in dataset_slice:
print("%s\n" % row)
if rows_and_columns:
print('Number of rows:', len(dataset))
print('Number of columns:', len(dataset[0]))
###Output
_____no_output_____
###Markdown
To make it easier to explore the two data sets, we'll first write a function named explore_data() that we can use repeatedly to explore rows in a more readable way. We'll also add an option for our function to show the number of rows and columns for any data set.
###Code
explore_data(google_headers, 0, len(google_headers), False)
explore_data(google_data, 0, 1, True)
###Output
App
Category
Rating
Reviews
Size
Installs
Type
Price
Content Rating
Genres
Last Updated
Current Ver
Android Ver
['Photo Editor & Candy Camera & Grid & ScrapBook', 'ART_AND_DESIGN', '4.1', '159', '19M', '10,000+', 'Free', '0', 'Everyone', 'Art & Design', 'January 7, 2018', '1.0.0', '4.0.3 and up']
Number of rows: 10841
Number of columns: 13
###Markdown
We see that the Google Play data set has 10841 apps and 13 columns. At a quick glance, the columns that might be useful for the purpose of our analysis are 'App', 'Category', 'Reviews', 'Installs', 'Type', 'Price', and 'Genres'.Now let's take a look at the App Store data set.
###Code
explore_data(apple_headers, 0, len(apple_headers), False)
explore_data(apple_data, 0, 1, True)
###Output
id
track_name
size_bytes
currency
price
rating_count_tot
rating_count_ver
user_rating
user_rating_ver
ver
cont_rating
prime_genre
sup_devices.num
ipadSc_urls.num
lang.num
vpp_lic
['1', '281656475', 'PAC-MAN Premium', '100788224', 'USD', '3.99', '21292', '26', '4', '4.5', '6.3.5', '4+', 'Games', '38', '5', '10', '1']
Number of rows: 7197
Number of columns: 17
###Markdown
We have 7197 iOS apps in this data set, and the columns that seem interesting are: 'track_name', 'currency', 'price', 'rating_count_tot', 'rating_count_ver', and 'prime_genre'. Not all column names are self-explanatory in this case, but details about each column can be found in the data set [documentation](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps/home). Deleting Wrong DataThe Google Play data set has a dedicated [discussion section](https://www.kaggle.com/lava18/google-play-store-apps/discussion) , and we can see that [one of the discussions](https://www.kaggle.com/lava18/google-play-store-apps/discussion/66015) outlines an error for row 10472. Let's print this row and compare it against the header and another row that is correct.
###Code
print(google_data[10472]) # incorrect row
print('\n')
print(google_headers) # header
print('\n')
print(google_data[0]) # correct row
###Output
['Life Made WI-Fi Touchscreen Photo Frame', '1.9', '19', '3.0M', '1,000+', 'Free', '0', 'Everyone', '', 'February 11, 2018', '1.0.19', '4.0 and up']
['App', 'Category', 'Rating', 'Reviews', 'Size', 'Installs', 'Type', 'Price', 'Content Rating', 'Genres', 'Last Updated', 'Current Ver', 'Android Ver']
['Photo Editor & Candy Camera & Grid & ScrapBook', 'ART_AND_DESIGN', '4.1', '159', '19M', '10,000+', 'Free', '0', 'Everyone', 'Art & Design', 'January 7, 2018', '1.0.0', '4.0.3 and up']
###Markdown
The row 10472 corresponds to the app Life Made WI-Fi Touchscreen Photo Frame, and we can see that the rating is 19. This is clearly off because the maximum rating for a Google Play app is 5. As a consequence, we'll delete this row
###Code
print(len(google_data))
del google_data[10472] # don't run this more than once
print(len(google_data))
###Output
10841
10840
###Markdown
Removing Duplicate Entries¶ Part OneLet's analyze datasets to detect duplicates
###Code
def analyze_duplicates(data_set, data_set_name):
unique_apps = []
duplicate_apps = []
for row in data_set:
app = row[0]
if app in unique_apps:
duplicate_apps.append(app)
else:
unique_apps.append(app)
print("%s unique_apps: %s" % (data_set_name, len(unique_apps)))
print("%s duplicate_apps: %s" % (data_set_name, len(duplicate_apps)))
###Output
_____no_output_____
###Markdown
First let's check the OS data set
###Code
analyze_duplicates(apple_data, "OS")
###Output
OS unique_apps: 7197
OS duplicate_apps: 0
###Markdown
Luckily the OS data set is unique. Let's check Android data as well
###Code
analyze_duplicates(google_data, "Android")
###Output
Android unique_apps: 9659
Android duplicate_apps: 1181
###Markdown
As we can see, the Android data set contains 1181 duplicated entries.Examples of duplicate apps: ['Quick PDF Scanner + OCR FREE', 'Box', 'Google My Business', 'ZOOM Cloud Meetings', 'join.me - Simple Meetings', 'Box', 'Zenefits', 'Google Ads', 'Google My Business', 'Slack', 'FreshBooks Classic', 'Insightly CRM', 'QuickBooks Accounting: Invoicing & Expenses', 'HipChat - Chat Built for Teams', 'Xero Accounting Software']We don't want to count certain apps more than once when we analyze data, so we need to remove the duplicate entries and keep only one entry per app. One thing we could do is remove the duplicate rows randomly, but we could probably find a better way.If you examine the rows we printed two cells above for the Instagram app, the main difference happens on the fourth position of each row, which corresponds to the number of reviews. The different numbers show that the data was collected at different times. We can use this to build a criterion for keeping rows. We won't remove rows randomly, but rather we'll keep the rows that have the highest number of reviews because the higher the number of reviews, the more reliable the ratings.To do that, we will:* Create a dictionary where each key is a unique app name, and the value is the highest number of reviews of that app* Use the dictionary to create a new data set, which will have only one entry per app (and we only select the apps with the highest number of reviews) Part TwoLet's start by building the dictionary.
###Code
reviews_max = {}
for row in google_data:
name = row[0]
n_reviews = float(row[3])
if name in reviews_max.keys():
if reviews_max[name] < n_reviews:
reviews_max[name] = n_reviews
else:
reviews_max[name] = n_reviews
###Output
_____no_output_____
###Markdown
In a previous code cell, we found that there are 1,181 cases where an app occurs more than once, so the length of our dictionary (of unique apps) should be equal to the difference between the length of our data set and 1,181.
###Code
print('Expected length:', len(google_data) - 1181)
print('Actual length:', len(reviews_max))
###Output
Expected length: 9659
Actual length: 9659
###Markdown
Now, let's use the reviews_max dictionary to remove the duplicates. For the duplicate cases, we'll only keep the entries with the highest number of reviews. In the code cell below:* We start by initializing two empty lists, android_clean and already_added.* We loop through the android data set, and for every iteration: * We isolate the name of the app and the number of reviews. * We add the current row (app) to the android_clean list, and the app name (name) to the already_cleaned list if:The number of reviews of the current app matches the number of reviews of that app as described in the reviews_max dictionary; andThe name of the app is not already in the already_added list. We need to add this supplementary condition to account for those cases where the highest number of reviews of a duplicate app is the same for more than one entry (for example, the Box app has three entries, and the number of reviews is the same). If we just check for reviews_max[name] == n_reviews, we'll still end up with duplicate entries for some apps.
###Code
clean_google_data = []
already_added = []
for row in google_data:
name = row[0]
n_reviews = float(row[3])
if n_reviews == reviews_max[name] and name not in already_added:
clean_google_data.append(row)
already_added.append(name)
###Output
_____no_output_____
###Markdown
Now let's quickly explore the new data set, and confirm that the number of rows is 9,659.
###Code
explore_data(clean_google_data, 0, 3, True)
###Output
['Photo Editor & Candy Camera & Grid & ScrapBook', 'ART_AND_DESIGN', '4.1', '159', '19M', '10,000+', 'Free', '0', 'Everyone', 'Art & Design', 'January 7, 2018', '1.0.0', '4.0.3 and up']
['U Launcher Lite – FREE Live Cool Themes, Hide Apps', 'ART_AND_DESIGN', '4.7', '87510', '8.7M', '5,000,000+', 'Free', '0', 'Everyone', 'Art & Design', 'August 1, 2018', '1.2.4', '4.0.3 and up']
['Sketch - Draw & Paint', 'ART_AND_DESIGN', '4.5', '215644', '25M', '50,000,000+', 'Free', '0', 'Teen', 'Art & Design', 'June 8, 2018', 'Varies with device', '4.2 and up']
Number of rows: 9659
Number of columns: 13
###Markdown
We have 9659 rows, just as expected. Removing Non-English Apps Part OneIf you explore the data sets enough, you'll notice the names of some of the apps suggest they are not directed toward an English-speaking audience. Below, we see a couple of examples from both data sets:
###Code
print(apple_data[813][1])
print(apple_data[6731][1])
print(clean_google_data[4412][0])
print(clean_google_data[7940][0])
###Output
436672029
1144164707
中国語 AQリスニング
لعبة تقدر تربح DZ
###Markdown
We're not interested in keeping these kind of apps, so we'll remove them. One way to go about this is to remove each app whose name contains a symbol that is not commonly used in English text — English text usually includes letters from the English alphabet, numbers composed of digits from 0 to 9, punctuation marks (., !, ?, ;, etc.), and other symbols (+, *, /, etc.).All these characters that are specific to English texts are encoded using the ASCII standard. Each ASCII character has a corresponding number between 0 and 127 associated with it, and we can take advantage of that to build a function that checks an app name and tells us whether it contains non-ASCII characters.We built this function below, and we use the built-in ord() function to find out the corresponding encoding number of each character.
###Code
def is_english(string):
strint = string.encode('ascii', 'ignore').decode('ascii')
for character in string:
if ord(character) > 127:
return False
return True
print(is_english('Instagram'))
print(is_english('爱奇艺PPS -《欢乐颂2》电视剧热播'))
###Output
True
False
###Markdown
The function seems to work fine, but some English app names use emojis or other symbols (™, — (em dash), – (en dash), etc.) that fall outside of the ASCII range. Because of this, we'll remove useful apps if we use the function in its current form.
###Code
print(is_english('Docs To Go™ Free Office Suite'))
print(is_english('Instachat 😜'))
print(ord('™'))
print(ord('😜'))
###Output
False
False
8482
128540
###Markdown
Part TwoTo minimize the impact of data loss, we'll only remove an app if its name contains non-ASCII characters:
###Code
from langdetect import detect
import emoji
def give_emoji_free_text(text):
all_chars = [str for str in text]
emoji_list = [c for c in all_chars if c in emoji.UNICODE_EMOJI]
for emoji_value in emoji_list:
all_chars.remove(emoji_value)
clean_text = ''.join(all_chars)
return clean_text.strip()
def is_english(string):
clean_strint = give_emoji_free_text(string)
try:
clean_strint.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
print(is_english('Docs To Go™ Free Office Suite'))
print(is_english('ナビタイム ドライブサポーター - NAVITIMEのカーナビアプリ : 0'))
print(is_english('Instachat 😜'))
print(is_english('自転車ナビ by NAVITIME(ナビタイム) - 自転車のナビができるアプリ : 0'))
###Output
True
False
True
False
###Markdown
The function is still not perfect, and very few non-English apps might get past our filter, but this seems good enough at this point in our analysis — we shouldn't spend too much time on optimization at this point.Below, we use the is_english() function to filter out the non-English apps for both data sets:
###Code
google_data_english = []
apple_data_english = []
for app in clean_google_data:
name = app[0]
if is_english(name):
google_data_english.append(app)
for app in apple_data:
name = app[2]
if is_english(name):
apple_data_english.append(app)
explore_data(google_data_english, 0, 3, True)
print('\n')
explore_data(apple_data_english, 0, 3, True)
###Output
['Photo Editor & Candy Camera & Grid & ScrapBook', 'ART_AND_DESIGN', '4.1', '159', '19M', '10,000+', 'Free', '0', 'Everyone', 'Art & Design', 'January 7, 2018', '1.0.0', '4.0.3 and up']
['Sketch - Draw & Paint', 'ART_AND_DESIGN', '4.5', '215644', '25M', '50,000,000+', 'Free', '0', 'Teen', 'Art & Design', 'June 8, 2018', 'Varies with device', '4.2 and up']
['Pixel Draw - Number Art Coloring Book', 'ART_AND_DESIGN', '4.3', '967', '2.8M', '100,000+', 'Free', '0', 'Everyone', 'Art & Design;Creativity', 'June 20, 2018', '1.1', '4.4 and up']
Number of rows: 9282
Number of columns: 13
['1', '281656475', 'PAC-MAN Premium', '100788224', 'USD', '3.99', '21292', '26', '4', '4.5', '6.3.5', '4+', 'Games', '38', '5', '10', '1']
['2', '281796108', 'Evernote - stay organized', '158578688', 'USD', '0', '161065', '26', '4', '3.5', '8.2.2', '4+', 'Productivity', '37', '5', '23', '1']
['3', '281940292', 'WeatherBug - Local Weather, Radar, Maps, Alerts', '100524032', 'USD', '0', '188583', '2822', '3.5', '4.5', '5.0.0', '4+', 'Weather', '37', '5', '3', '1']
Number of rows: 5874
Number of columns: 17
###Markdown
Isolating the Free AppsAs we mentioned in the introduction, we only build apps that are free to download and install, and our main source of revenue consists of in-app ads. Our data sets contain both free and non-free apps, and we'll need to isolate only the free apps for our analysis. Below, we isolate the free apps for both our data sets.
###Code
google_final = []
apple_final = []
for app in google_data_english:
price = app[7]
if price == '0':
google_final.append(app)
for app in apple_data_english:
price = app[5]
if price == '0':
apple_final.append(app)
print(len(google_final))
print(len(apple_final))
###Output
8554
3020
###Markdown
We're left with 8864 Android apps and 4056 iOS apps, which should be enough for our analysis. Most Common Apps by Genre Part OneAs we mentioned in the introduction, our aim is to determine the kinds of apps that are likely to attract more users because our revenue is highly influenced by the number of people using our apps.To minimize risks and overhead, our validation strategy for an app idea is comprised of three steps:Build a minimal Android version of the app, and add it to Google Play.If the app has a good response from users, we then develop it further.If the app is profitable after six months, we also build an iOS version of the app and add it to the App Store.Because our end goal is to add the app on both the App Store and Google Play, we need to find app profiles that are successful on both markets. For instance, a profile that might work well for both markets might be a productivity app that makes use of gamification.Let's begin the analysis by getting a sense of the most common genres for each market. For this, we'll build a frequency table for the prime_genre column of the App Store data set, and the Genres and Category columns of the Google Play data set. Part TwoWe'll build two functions we can use to analyze the frequency tables:* One function to generate frequency tables that show percentages* Another function that we can use to display the percentages in a descending order
###Code
def freq_table(dataset, index):
tmp_table = {}
for line in dataset:
value = line[index]
if value in tmp_table.keys():
tmp_table[value] += 1
else:
tmp_table[value] = 1
result = {}
dataset_len = len(dataset)
for key, tmp_value in tmp_table.items():
key_frequency_value = tmp_value/dataset_len * 100
result[key] = key_frequency_value
return result
def display_table(dataset, index):
table = freq_table(dataset, index)
table_display = []
for key in table:
key_val_as_tuple = (table[key], key)
table_display.append(key_val_as_tuple)
table_sorted = sorted(table_display, reverse = True)
for entry in table_sorted:
print(entry[1], ':', entry[0])
###Output
_____no_output_____
###Markdown
Part ThreeWe start by examining the frequency table for the prime_genre column of the App Store data set.
###Code
display_table(apple_final, -5)
###Output
Games : 59.60264900662252
Entertainment : 7.6158940397351
Photo & Video : 4.966887417218543
Education : 3.80794701986755
Social Networking : 3.1788079470198674
Shopping : 2.4503311258278146
Utilities : 2.218543046357616
Music : 2.0860927152317883
Sports : 2.019867549668874
Health & Fitness : 1.95364238410596
Productivity : 1.6556291390728477
Lifestyle : 1.490066225165563
News : 1.2913907284768211
Travel : 1.0927152317880795
Finance : 1.0927152317880795
Weather : 0.8609271523178808
Food & Drink : 0.8609271523178808
Reference : 0.5298013245033113
Business : 0.49668874172185434
Book : 0.26490066225165565
Medical : 0.1986754966887417
Navigation : 0.16556291390728478
Catalogs : 0.09933774834437085
###Markdown
We can see that among the free English apps, more than a half (55.64%) are games. Entertainment apps are about 8-ish %, followed by photo & video apps, which are close to 5%, followed by social networking apps which amount for 3.52% of the apps in our data set.. Only 3.25% of the apps are designed for educationThe general impression is that App Store (at least the part containing free English apps) is dominated by apps that are designed for fun (games, entertainment, photo and video, social networking, sports, music, etc.), while apps with practical purposes (education, shopping, utilities, productivity, lifestyle, etc.) are more rare. However, the fact that fun apps are the most numerous doesn't also imply that they also have the greatest number of users — the demand might not be the same as the offer.Let's continue by examining the _Genres_ and _Category_ columns of the Google Play data set (two columns which seem to be related).
###Code
display_table(google_final, 1) # Category
###Output
FAMILY : 18.926817862988077
GAME : 9.72644376899696
TOOLS : 8.557400046761748
BUSINESS : 4.687865326163198
PRODUCTIVITY : 3.9396773439326633
LIFESTYLE : 3.904606032265607
FINANCE : 3.7175590367079727
MEDICAL : 3.624035538929156
PERSONALIZATION : 3.3785363572597613
SPORTS : 3.2382511105915364
COMMUNICATION : 3.20317979892448
HEALTH_AND_FITNESS : 3.1213467383680147
PHOTOGRAPHY : 2.9693710544774374
NEWS_AND_MAGAZINES : 2.7706336216974514
SOCIAL : 2.6654196866962825
TRAVEL_AND_LOCAL : 2.2913256955810146
SHOPPING : 2.221183072246902
BOOKS_AND_REFERENCE : 2.1627308861351415
DATING : 1.82370820668693
VIDEO_PLAYERS : 1.7535655833528174
MAPS_AND_NAVIGATION : 1.3327098433481412
FOOD_AND_DRINK : 1.227495908346972
EDUCATION : 1.1573532850128596
ENTERTAINMENT : 0.9469254150105214
LIBRARIES_AND_DEMO : 0.9352349777881692
AUTO_AND_VEHICLES : 0.9235445405658173
HOUSE_AND_HOME : 0.8183306055646482
WEATHER : 0.7832592938975917
EVENTS : 0.701426233341127
PARENTING : 0.6546644844517185
ART_AND_DESIGN : 0.6546644844517185
BEAUTY : 0.6195931727846622
COMICS : 0.5611409866729016
###Markdown
The landscape seems significantly different on Google Play: there are not that many apps designed for fun, and it seems that a good number of apps are designed for practical purposes (family, tools, business, lifestyle, productivity, etc.). However, if we investigate this further, we can see that the family category (which accounts for almost 19% of the apps) means mostly games for kids.Even so, practical apps seem to have a better representation on Google Play compared to App Store. This picture is also confirmed by the frequency table we see for the _Genres_ column:
###Code
display_table(google_final, -4) # Genres
###Output
Tools : 8.545709609539397
Entertainment : 6.055646481178396
Education : 5.342529810614917
Business : 4.687865326163198
Productivity : 3.9396773439326633
Lifestyle : 3.8929155950432546
Finance : 3.7175590367079727
Medical : 3.624035538929156
Personalization : 3.3785363572597613
Sports : 3.3083937339256484
Communication : 3.20317979892448
Action : 3.1564180500350716
Health & Fitness : 3.1213467383680147
Photography : 2.9693710544774374
News & Magazines : 2.7706336216974514
Social : 2.6654196866962825
Travel & Local : 2.2913256955810146
Shopping : 2.221183072246902
Books & Reference : 2.1627308861351415
Simulation : 2.104278700023381
Arcade : 1.8587795183539864
Dating : 1.82370820668693
Casual : 1.800327332242226
Video Players & Editors : 1.730184708908113
Maps & Navigation : 1.3327098433481412
Food & Drink : 1.227495908346972
Puzzle : 1.1456628477905073
Racing : 1.017068038344634
Strategy : 0.9469254150105214
Role Playing : 0.9469254150105214
Libraries & Demo : 0.9352349777881692
Auto & Vehicles : 0.9235445405658173
House & Home : 0.8183306055646482
Weather : 0.7832592938975917
Events : 0.701426233341127
Adventure : 0.6429740472293664
Beauty : 0.6195931727846622
Art & Design : 0.60790273556231
Comics : 0.5494505494505495
Parenting : 0.502688800561141
Card : 0.43254617722702826
Trivia : 0.4091653027823241
Educational;Education : 0.4091653027823241
Casino : 0.38578442833761983
Educational : 0.3740939911152677
Board : 0.3740939911152677
Education;Education : 0.33902267944821135
Word : 0.2571896188917466
Casual;Pretend Play : 0.2221183072246902
Music : 0.19873743277998598
Racing;Action & Adventure : 0.17535655833528174
Puzzle;Brain Games : 0.17535655833528174
Entertainment;Music & Video : 0.1402852466682254
Casual;Brain Games : 0.1402852466682254
Casual;Action & Adventure : 0.1402852466682254
Arcade;Action & Adventure : 0.10521393500116906
Action;Action & Adventure : 0.09352349777881692
Simulation;Action & Adventure : 0.08183306055646482
Parenting;Education : 0.08183306055646482
Educational;Pretend Play : 0.08183306055646482
Entertainment;Brain Games : 0.0701426233341127
Art & Design;Creativity : 0.0701426233341127
Parenting;Music & Video : 0.058452186111760576
Educational;Brain Games : 0.058452186111760576
Education;Pretend Play : 0.058452186111760576
Casual;Creativity : 0.058452186111760576
Board;Brain Games : 0.058452186111760576
Role Playing;Pretend Play : 0.04676174888940846
Education;Creativity : 0.04676174888940846
Role Playing;Action & Adventure : 0.03507131166705635
Puzzle;Action & Adventure : 0.03507131166705635
Educational;Creativity : 0.03507131166705635
Educational;Action & Adventure : 0.03507131166705635
Education;Music & Video : 0.03507131166705635
Education;Action & Adventure : 0.03507131166705635
Adventure;Action & Adventure : 0.03507131166705635
Video Players & Editors;Music & Video : 0.02338087444470423
Sports;Action & Adventure : 0.02338087444470423
Simulation;Pretend Play : 0.02338087444470423
Puzzle;Creativity : 0.02338087444470423
Music;Music & Video : 0.02338087444470423
Entertainment;Pretend Play : 0.02338087444470423
Entertainment;Creativity : 0.02338087444470423
Entertainment;Action & Adventure : 0.02338087444470423
Education;Brain Games : 0.02338087444470423
Casual;Education : 0.02338087444470423
Board;Action & Adventure : 0.02338087444470423
Video Players & Editors;Creativity : 0.011690437222352116
Trivia;Education : 0.011690437222352116
Tools;Education : 0.011690437222352116
Strategy;Education : 0.011690437222352116
Strategy;Creativity : 0.011690437222352116
Strategy;Action & Adventure : 0.011690437222352116
Simulation;Education : 0.011690437222352116
Role Playing;Brain Games : 0.011690437222352116
Racing;Pretend Play : 0.011690437222352116
Puzzle;Education : 0.011690437222352116
Parenting;Brain Games : 0.011690437222352116
Music & Audio;Music & Video : 0.011690437222352116
Lifestyle;Pretend Play : 0.011690437222352116
Health & Fitness;Education : 0.011690437222352116
Health & Fitness;Action & Adventure : 0.011690437222352116
Entertainment;Education : 0.011690437222352116
Comics;Creativity : 0.011690437222352116
Casual;Music & Video : 0.011690437222352116
Card;Action & Adventure : 0.011690437222352116
Books & Reference;Education : 0.011690437222352116
Art & Design;Pretend Play : 0.011690437222352116
Art & Design;Action & Adventure : 0.011690437222352116
Arcade;Pretend Play : 0.011690437222352116
Adventure;Education : 0.011690437222352116
###Markdown
The difference between the _Genres_ and the _Category_ columns is not crystal clear, but one thing we can notice is that the _Genres_ column is much more granular (it has more categories). We're only looking for the bigger picture at the moment, so we'll only work with the _Category_ column moving forward.Up to this point, we found that the App Store is dominated by apps designed for fun, while Google Play shows a more balanced landscape of both practical and for-fun apps. Now we'd like to get an idea about the kind of apps that have most users. Most Popular Apps by Genre on the App StoreOne way to find out what genres are the most popular (have the most users) is to calculate the average number of installs for each app genre. For the Google Play data set, we can find this information in the _Installs_ column, but for the App Store data set this information is missing. As a workaround, we'll take the total number of user ratings as a proxy, which we can find in the _rating_count_tot_ app.Below, we calculate the average number of user ratings per app genre on the App Store:
###Code
def get_avg_n_ratings_by_genre(dataset, genre_index, rating_index):
result = []
genres = freq_table(dataset, genre_index)
for genre in genres:
total = 0
len_genre = 0
for app in dataset:
genre_app = app[genre_index]
if genre_app == genre:
n_ratings = float(app[rating_index])
total += n_ratings
len_genre += 1
avg_n_ratings = total / len_genre
result.append((avg_n_ratings, genre))
result.sort()
for value in result:
print(value[1], ':', value[0])
get_avg_n_ratings_by_genre(apple_final, -5, 6)
###Output
Medical : 612.0
Catalogs : 5195.0
Education : 6099.417391304348
Business : 6839.6
Utilities : 11413.179104477613
Entertainment : 14481.995652173913
Book : 16671.0
Lifestyle : 17848.51111111111
Health & Fitness : 19230.86440677966
Games : 22820.230555555554
Productivity : 22842.22
News : 23382.17948717949
Sports : 25382.114754098362
Finance : 26729.090909090908
Shopping : 28517.72972972973
Photo & Video : 29249.766666666666
Food & Drink : 33333.92307692308
Travel : 34115.57575757576
Weather : 48275.57692307692
Music : 55396.01587301587
Social Networking : 75253.84375
Reference : 84258.25
Navigation : 102592.0
###Markdown
On average, navigation apps have the highest number of user reviews, but this figure is heavily influenced by Waze and Google Maps, which have close to half a million user reviews together:
###Code
for app in apple_final:
if app[-5] == 'Navigation':
print(app[2], ':', app[6]) # print name and number of ratings
###Output
Waze - GPS Navigation, Maps & Real-time Traffic : 345046
Geocaching® : 12811
ImmobilienScout24: Real Estate Search in Germany : 187
Railway Route Search : 5
Google Maps - Navigation & Transit : 154911
###Markdown
The same pattern applies to social networking apps, where the average number is heavily influenced by a few giants like Facebook, Pinterest, Skype, etc. Same applies to music apps, where a few big players like Pandora, Spotify, and Shazam heavily influence the average number.Our aim is to find popular genres, but navigation, social networking or music apps might seem more popular than they really are. The average number of ratings seem to be skewed by very few apps which have hundreds of thousands of user ratings, while the other apps may struggle to get past the 10,000 threshold. We could get a better picture by removing these extremely popular apps for each genre and then rework the averages, but we'll leave this level of detail for later.Reference apps have 84,258 user ratings on average, but it's actually the Bible and Dictionary.com which skew up the average rating:
###Code
for app in apple_final:
if app[-5] == 'Reference':
print(app[2], ':', app[6])
###Output
Bible : 985920
Dictionary.com Dictionary & Thesaurus : 200047
Dictionary.com Dictionary & Thesaurus for iPad : 54175
Muslim Pro: Ramadan 2017 Prayer Times, Azan, Quran : 18418
Merriam-Webster Dictionary : 16849
Google Translate : 26786
Night Sky : 12122
WWDC : 762
Jishokun-Japanese English Dictionary & Translator : 0
VPN Express : 14
New Furniture Mods - Pocket Wiki & Game Tools for Minecraft PC Edition : 17588
LUCKY BLOCK MOD ™ for Minecraft PC Edition - The Best Pocket Wiki & Mods Installer Tools : 4693
Horror Maps for Minecraft PE - Download The Scariest Maps for Minecraft Pocket Edition (MCPE) Free : 718
City Maps for Minecraft PE - The Best Maps for Minecraft Pocket Edition (MCPE) : 8535
GUNS MODS for Minecraft PC Edition - Mods Tools : 1497
Real Bike Traffic Rider Virtual Reality Glasses : 8
###Markdown
However, this niche seems to show some potential. One thing we could do is take another popular book and turn it into an app where we could add different features besides the raw version of the book. This might include daily quotes from the book, an audio version of the book, quizzes about the book, etc. On top of that, we could also embed a dictionary within the app, so users don't need to exit our app to look up words in an external app.This idea seems to fit well with the fact that the App Store is dominated by for-fun apps. This suggests the market might be a bit saturated with for-fun apps, which means a practical app might have more of a chance to stand out among the huge number of apps on the App Store.Other genres that seem popular include weather, book, food and drink, or finance. The book genre seem to overlap a bit with the app idea we described above, but the other genres don't seem too interesting to us:Weather apps — people generally don't spend too much time in-app, and the chances of making profit from in-app adds are low. Also, getting reliable live weather data may require us to connect our apps to non-free APIs.Food and drink — examples here include Starbucks, Dunkin' Donuts, McDonald's, etc. So making a popular food and drink app requires actual cooking and a delivery service, which is outside the scope of our company.Finance apps — these apps involve banking, paying bills, money transfer, etc. Building a finance app requires domain knowledge, and we don't want to hire a finance expert just to build an app.Now let's analyze the Google Play market a bit. Most Popular Apps by Genre on Google PlayFor the Google Play market, we actually have data about the number of installs, so we should be able to get a clearer picture about genre popularity. However, the install numbers don't seem precise enough — we can see that most values are open-ended (100+, 1,000+, 5,000+, etc.):
###Code
display_table(google_final, 5) # the Installs columns
###Output
1,000,000+ : 15.665185877951835
100,000+ : 11.526771101239186
10,000,000+ : 10.462941314005143
10,000+ : 10.39279869067103
1,000+ : 8.417114800093524
100+ : 7.002571896188918
5,000,000+ : 6.757072714519523
500,000+ : 5.541267243394903
50,000+ : 4.769698386719663
5,000+ : 4.500818330605565
10+ : 3.4837502922609307
500+ : 3.214870236146832
50,000,000+ : 2.2913256955810146
100,000,000+ : 2.1627308861351415
50+ : 1.9406125789104514
5+ : 0.806640168342296
1+ : 0.502688800561141
500,000,000+ : 0.2805704933364508
1,000,000,000+ : 0.2221183072246902
0+ : 0.04676174888940846
0 : 0.011690437222352116
###Markdown
One problem with this data is that is not precise. For instance, we don't know whether an app with 100,000+ installs has 100,000 installs, 200,000, or 350,000. However, we don't need very precise data for our purposes — we only want to get an idea which app genres attract the most users, and we don't need perfect precision with respect to the number of users.We're going to leave the numbers as they are, which means that we'll consider that an app with 100,000+ installs has 100,000 installs, and an app with 1,000,000+ installs has 1,000,000 installs, and so on.To perform computations, however, we'll need to convert each install number to float — this means that we need to remove the commas and the plus characters, otherwise the conversion will fail and raise an error. We'll do this directly in the loop below, where we also compute the average number of installs for each genre (category).Let's make refactoring of "get_avg_n_ratings_by_genre" function:
###Code
def get_avg_n_ratings_by_genre(dataset, genre_index, user_activity_index):
result = []
genres = freq_table(dataset, genre_index)
for genre in genres:
total = 0
len_category = 0
for app in dataset:
category_app = app[genre_index]
if category_app == genre:
n_user_activity = app[user_activity_index]
n_user_activity = n_user_activity.replace(',', '')
n_user_activity = n_user_activity.replace('+', '')
total += float(n_user_activity)
len_category += 1
avg_n_ratings = total / len_category
result.append((avg_n_ratings, genre))
result.sort()
for value in result:
print(value[1], ':', value[0])
###Output
_____no_output_____
###Markdown
Let's check result:
###Code
get_avg_n_ratings_by_genre(google_final, 1, 5)
###Output
MEDICAL : 121230.14193548387
EVENTS : 232885.83333333334
BEAUTY : 513151.88679245283
PARENTING : 535196.6071428572
AUTO_AND_VEHICLES : 645317.2278481013
LIBRARIES_AND_DEMO : 662421.375
DATING : 822459.9807692308
COMICS : 880440.625
HOUSE_AND_HOME : 1380033.7285714287
FINANCE : 1380165.5094339622
LIFESTYLE : 1381354.248502994
BUSINESS : 1710203.4663341646
EDUCATION : 1826767.6767676768
FOOD_AND_DRINK : 1911606.2
ART_AND_DESIGN : 1932519.642857143
FAMILY : 3696338.7331686225
SPORTS : 3860842.6606498193
HEALTH_AND_FITNESS : 4274711.20599251
MAPS_AND_NAVIGATION : 4304432.280701755
WEATHER : 5219216.7164179105
PERSONALIZATION : 5254790.269896193
SHOPPING : 7274624.131578947
BOOKS_AND_REFERENCE : 8999314.918918919
NEWS_AND_MAGAZINES : 9926131.265822785
TOOLS : 11010869.950819673
ENTERTAINMENT : 12177283.950617284
TRAVEL_AND_LOCAL : 14370321.867346939
GAME : 15799823.725961538
PRODUCTIVITY : 16852579.56676558
PHOTOGRAPHY : 18028223.68110236
SOCIAL : 24038639.263157893
VIDEO_PLAYERS : 24964878.133333333
COMMUNICATION : 35933961.6459854
###Markdown
On average, communication apps have the most installs: 35,933,961. This number is heavily skewed up by a few apps that have over one billion installs (WhatsApp, Facebook Messenger, Skype, Google Chrome, Gmail, and Hangouts), and a few others with over 100 and 500 million installs:
###Code
for app in google_final:
if app[1] == 'COMMUNICATION' and (app[5] == '1,000,000,000+'
or app[5] == '500,000,000+'
or app[5] == '100,000,000+'):
print(app[0], ':', app[5])
###Output
WhatsApp Messenger : 1,000,000,000+
imo beta free calls and text : 100,000,000+
Android Messages : 100,000,000+
Google Duo - High Quality Video Calls : 500,000,000+
imo free video calls and chat : 500,000,000+
Skype - free IM & video calls : 1,000,000,000+
Who : 100,000,000+
GO SMS Pro - Messenger, Free Themes, Emoji : 100,000,000+
LINE: Free Calls & Messages : 500,000,000+
Google Chrome: Fast & Secure : 1,000,000,000+
Firefox Browser fast & private : 100,000,000+
UC Browser - Fast Download Private & Secure : 500,000,000+
Gmail : 1,000,000,000+
Hangouts : 1,000,000,000+
Messenger Lite: Free Calls & Messages : 100,000,000+
Kik : 100,000,000+
KakaoTalk: Free Calls & Text : 100,000,000+
Opera Mini - fast web browser : 100,000,000+
Opera Browser: Fast and Secure : 100,000,000+
Telegram : 100,000,000+
Truecaller: Caller ID, SMS spam blocking & Dialer : 100,000,000+
UC Browser Mini -Tiny Fast Private & Secure : 100,000,000+
Viber Messenger : 500,000,000+
WeChat : 100,000,000+
BBM - Free Calls & Messages : 100,000,000+
###Markdown
If we removed all the communication apps that have over 100 million installs, the average would be reduced roughly ten times:
###Code
under_100_m = []
for app in google_final:
n_installs = app[5]
n_installs = n_installs.replace(',', '')
n_installs = n_installs.replace('+', '')
if (app[1] == 'COMMUNICATION') and (float(n_installs) < 100000000):
under_100_m.append(float(n_installs))
sum(under_100_m) / len(under_100_m)
###Output
_____no_output_____
###Markdown
We see the same pattern for the video players category, which is the runner-up with 24,964,878 installs. The market is dominated by apps like Youtube, Google Play Movies & TV, or MX Player. The pattern is repeated for social apps (where we have giants like Facebook, Instagram, Google+, etc.), photography apps (Google Photos and other popular photo editors), or productivity apps (Microsoft Word, Dropbox, Google Calendar, Evernote, etc.).Again, the main concern is that these app genres might seem more popular than they really are. Moreover, these niches seem to be dominated by a few giants who are hard to compete against.The game genre seems pretty popular, but previously we found out this part of the market seems a bit saturated, so we'd like to come up with a different app recommendation if possible.The books and reference genre looks fairly popular as well, with an average number of installs of 8,999,314. It's interesting to explore this in more depth, since we found this genre has some potential to work well on the App Store, and our aim is to recommend an app genre that shows potential for being profitable on both the App Store and Google Play.Let's take a look at some of the apps from this genre and their number of installs:
###Code
for app in google_final:
if app[1] == 'BOOKS_AND_REFERENCE':
print(app[0], ':', app[5])
###Output
E-Book Read - Read Book for free : 50,000+
Download free book with green book : 100,000+
Wikipedia : 10,000,000+
Cool Reader : 10,000,000+
Free Panda Radio Music : 100,000+
Book store : 1,000,000+
FBReader: Favorite Book Reader : 10,000,000+
English Grammar Complete Handbook : 500,000+
Free Books - Spirit Fanfiction and Stories : 1,000,000+
Google Play Books : 1,000,000,000+
AlReader -any text book reader : 5,000,000+
Offline English Dictionary : 100,000+
Offline: English to Tagalog Dictionary : 500,000+
FamilySearch Tree : 1,000,000+
Cloud of Books : 1,000,000+
Recipes of Prophetic Medicine for free : 500,000+
Anonymous caller detection : 10,000+
Ebook Reader : 5,000,000+
Litnet - E-books : 100,000+
Read books online : 5,000,000+
English to Urdu Dictionary : 500,000+
eBoox: book reader fb2 epub zip : 1,000,000+
English Persian Dictionary : 500,000+
Flybook : 500,000+
All Maths Formulas : 1,000,000+
Ancestry : 5,000,000+
HTC Help : 10,000,000+
English translation from Bengali : 100,000+
Pdf Book Download - Read Pdf Book : 100,000+
Free Book Reader : 100,000+
eBoox new: Reader for fb2 epub zip books : 50,000+
Only 30 days in English, the guideline is guaranteed : 500,000+
Moon+ Reader : 10,000,000+
SH-02J Owner's Manual (Android 8.0) : 50,000+
English-Myanmar Dictionary : 1,000,000+
Golden Dictionary (EN-AR) : 1,000,000+
All Language Translator Free : 1,000,000+
Azpen eReader : 500,000+
URBANO V 02 instruction manual : 100,000+
Bible : 100,000,000+
C Programs and Reference : 50,000+
C Offline Tutorial : 1,000+
C Programs Handbook : 50,000+
Amazon Kindle : 100,000,000+
Aab e Hayat Full Novel : 100,000+
Aldiko Book Reader : 10,000,000+
Google I/O 2018 : 500,000+
R Language Reference Guide : 10,000+
Learn R Programming Full : 5,000+
R Programing Offline Tutorial : 1,000+
Guide for R Programming : 5+
Learn R Programming : 10+
R Quick Reference Big Data : 1,000+
V Made : 100,000+
Wattpad 📖 Free Books : 100,000,000+
Dictionary - WordWeb : 5,000,000+
Guide (for X-MEN) : 100,000+
AC Air condition Troubleshoot,Repair,Maintenance : 5,000+
AE Bulletins : 1,000+
Ae Allah na Dai (Rasa) : 10,000+
50000 Free eBooks & Free AudioBooks : 5,000,000+
Ag PhD Field Guide : 10,000+
Ag PhD Deficiencies : 10,000+
Ag PhD Planting Population Calculator : 1,000+
Ag PhD Soybean Diseases : 1,000+
Fertilizer Removal By Crop : 50,000+
A-J Media Vault : 50+
Al-Quran (Free) : 10,000,000+
Al Quran (Tafsir & by Word) : 500,000+
Al Quran Indonesia : 10,000,000+
Al'Quran Bahasa Indonesia : 10,000,000+
Al Quran Al karim : 1,000,000+
Al-Muhaffiz : 50,000+
Al Quran : EAlim - Translations & MP3 Offline : 5,000,000+
Al-Quran 30 Juz free copies : 500,000+
Koran Read &MP3 30 Juz Offline : 1,000,000+
Hafizi Quran 15 lines per page : 1,000,000+
Quran for Android : 10,000,000+
Surah Al-Waqiah : 100,000+
Hisnul Al Muslim - Hisn Invocations & Adhkaar : 100,000+
Satellite AR : 1,000,000+
Audiobooks from Audible : 100,000,000+
Kinot & Eichah for Tisha B'Av : 10,000+
AW Tozer Devotionals - Daily : 5,000+
Tozer Devotional -Series 1 : 1,000+
The Pursuit of God : 1,000+
AY Sing : 5,000+
Ay Hasnain k Nana Milad Naat : 10,000+
Ay Mohabbat Teri Khatir Novel : 10,000+
Arizona Statutes, ARS (AZ Law) : 1,000+
Oxford A-Z of English Usage : 1,000,000+
BD Fishpedia : 1,000+
BD All Sim Offer : 10,000+
Youboox - Livres, BD et magazines : 500,000+
B&H Kids AR : 10,000+
Dictionary.com: Find Definitions for English Words : 10,000,000+
English Dictionary - Offline : 10,000,000+
Bible KJV : 5,000,000+
Borneo Bible, BM Bible : 10,000+
MOD Black for BM : 100+
BM Box : 1,000+
Anime Mod for BM : 100+
NOOK: Read eBooks & Magazines : 10,000,000+
NOOK Audiobooks : 500,000+
NOOK App for NOOK Devices : 500,000+
Browsery by Barnes & Noble : 5,000+
bp e-store : 1,000+
Brilliant Quotes: Life, Love, Family & Motivation : 1,000,000+
BR Ambedkar Biography & Quotes : 10,000+
BU Alsace : 100+
Catholic La Bu Zo Kam : 500+
Khrifa Hla Bu (Solfa) : 10+
Kristian Hla Bu : 10,000+
SA HLA BU : 1,000+
Learn SAP BW : 500+
Learn SAP BW on HANA : 500+
CA Laws 2018 (California Laws and Codes) : 5,000+
Bootable Methods(USB-CD-DVD) : 10,000+
cloudLibrary : 100,000+
SDA Collegiate Quarterly : 500+
Sabbath School : 100,000+
Cypress College Library : 100+
Stats Royale for Clash Royale : 1,000,000+
GATE 21 years CS Papers(2011-2018 Solved) : 50+
Learn CT Scan Of Head : 5,000+
Easy Cv maker 2018 : 10,000+
How to Write CV : 100,000+
CW Nuclear : 1,000+
CY Spray nozzle : 10+
BibleRead En Cy Zh Yue : 5+
CZ-Help : 5+
Guide for DB Xenoverse : 10,000+
Guide for DB Xenoverse 2 : 10,000+
Guide for IMS DB : 10+
DC HSEMA : 5,000+
DC Public Library : 1,000+
Painting Lulu DC Super Friends : 1,000+
Dictionary : 10,000,000+
Fix Error Google Playstore : 1,000+
D. H. Lawrence Poems FREE : 1,000+
Bilingual Dictionary Audio App : 5,000+
DM Screen : 10,000+
wikiHow: how to do anything : 1,000,000+
Dr. Doug's Tips : 1,000+
Bible du Semeur-BDS (French) : 50,000+
La citadelle du musulman : 50,000+
DV 2019 Entry Guide : 10,000+
DV 2019 - EDV Photo & Form : 50,000+
DV 2018 Winners Guide : 1,000+
EB Annual Meetings : 1,000+
EC - AP & Telangana : 5,000+
TN Patta Citta & EC : 10,000+
AP Stamps and Registration : 10,000+
CompactiMa EC pH Calibration : 100+
EGW Writings 2 : 100,000+
EGW Writings : 1,000,000+
Bible with EGW Comments : 100,000+
My Little Pony AR Guide : 1,000,000+
SDA Sabbath School Quarterly : 500,000+
Duaa Ek Ibaadat : 5,000+
Spanish English Translator : 10,000,000+
Dictionary - Merriam-Webster : 10,000,000+
JW Library : 10,000,000+
Oxford Dictionary of English : Free : 10,000,000+
English Hindi Dictionary : 10,000,000+
English to Hindi Dictionary : 5,000,000+
EP Research Service : 1,000+
Hymnes et Louanges : 100,000+
EU Charter : 1,000+
EU Data Protection : 1,000+
EU IP Codes : 100+
EW PDF : 5+
BakaReader EX : 100,000+
EZ Quran : 50,000+
La Fe de Jesus : 1,000+
Le Fe de Jesus : 500+
Florida - Pocket Brainbook : 1,000+
Florida Statutes (FL Code) : 1,000+
English To Shona Dictionary : 10,000+
Greek Bible FP (Audio) : 1,000+
Golden Dictionary (FR-AR) : 500,000+
Fanfic-FR : 5,000+
Bulgarian French Dictionary Fr : 10,000+
Chemin (fr) : 1,000+
The SCP Foundation DB fr nn5n : 1,000+
###Markdown
The book and reference genre includes a variety of apps: software for processing and reading ebooks, various collections of libraries, dictionaries, tutorials on programming or languages, etc. It seems there's still a small number of extremely popular apps that skew the average:
###Code
for app in google_final:
if app[1] == 'BOOKS_AND_REFERENCE' and (app[5] == '1,000,000,000+'
or app[5] == '500,000,000+'
or app[5] == '100,000,000+'):
print(app[0], ':', app[5])
###Output
Google Play Books : 1,000,000,000+
Bible : 100,000,000+
Amazon Kindle : 100,000,000+
Wattpad 📖 Free Books : 100,000,000+
Audiobooks from Audible : 100,000,000+
###Markdown
However, it looks like there are only a few very popular apps, so this market still shows potential. Let's try to get some app ideas based on the kind of apps that are somewhere in the middle in terms of popularity (between 1,000,000 and 100,000,000 downloads):
###Code
for app in google_final:
if app[1] == 'BOOKS_AND_REFERENCE' and (app[5] == '1,000,000+'
or app[5] == '5,000,000+'
or app[5] == '10,000,000+'
or app[5] == '50,000,000+'):
print(app[0], ':', app[5])
###Output
Wikipedia : 10,000,000+
Cool Reader : 10,000,000+
Book store : 1,000,000+
FBReader: Favorite Book Reader : 10,000,000+
Free Books - Spirit Fanfiction and Stories : 1,000,000+
AlReader -any text book reader : 5,000,000+
FamilySearch Tree : 1,000,000+
Cloud of Books : 1,000,000+
Ebook Reader : 5,000,000+
Read books online : 5,000,000+
eBoox: book reader fb2 epub zip : 1,000,000+
All Maths Formulas : 1,000,000+
Ancestry : 5,000,000+
HTC Help : 10,000,000+
Moon+ Reader : 10,000,000+
English-Myanmar Dictionary : 1,000,000+
Golden Dictionary (EN-AR) : 1,000,000+
All Language Translator Free : 1,000,000+
Aldiko Book Reader : 10,000,000+
Dictionary - WordWeb : 5,000,000+
50000 Free eBooks & Free AudioBooks : 5,000,000+
Al-Quran (Free) : 10,000,000+
Al Quran Indonesia : 10,000,000+
Al'Quran Bahasa Indonesia : 10,000,000+
Al Quran Al karim : 1,000,000+
Al Quran : EAlim - Translations & MP3 Offline : 5,000,000+
Koran Read &MP3 30 Juz Offline : 1,000,000+
Hafizi Quran 15 lines per page : 1,000,000+
Quran for Android : 10,000,000+
Satellite AR : 1,000,000+
Oxford A-Z of English Usage : 1,000,000+
Dictionary.com: Find Definitions for English Words : 10,000,000+
English Dictionary - Offline : 10,000,000+
Bible KJV : 5,000,000+
NOOK: Read eBooks & Magazines : 10,000,000+
Brilliant Quotes: Life, Love, Family & Motivation : 1,000,000+
Stats Royale for Clash Royale : 1,000,000+
Dictionary : 10,000,000+
wikiHow: how to do anything : 1,000,000+
EGW Writings : 1,000,000+
My Little Pony AR Guide : 1,000,000+
Spanish English Translator : 10,000,000+
Dictionary - Merriam-Webster : 10,000,000+
JW Library : 10,000,000+
Oxford Dictionary of English : Free : 10,000,000+
English Hindi Dictionary : 10,000,000+
English to Hindi Dictionary : 5,000,000+
###Markdown
Clean data
###Code
import pandas as pd
import utils
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
# SP500
df_1 = pd.read_csv("sp500-info.csv", index_col="Date", parse_dates=True)
print(sum(df_1.isna().sum() != 0), "stock(s) don't have enough value.")
df_clean_1 = utils.clean_data(df_1, out_df_dir="sp500_clean.csv")
# SP400
df_2 = pd.read_csv("sp400-info.csv", index_col="Date", parse_dates=True)
print(sum(df_2.isna().sum() != 0), "stock(s) don't have enough value.")
df_clean_2 = utils.clean_data(df_2, out_df_dir="sp400_clean.csv")
df_clean = pd.concat([df_clean_1, df_clean_2], axis = 1)
df_clean
###Output
2 stock(s) don't have enough value.
8 stock(s) don't have enough value.
###Markdown
Calcualte correlation
###Code
start = 0
end = 527
df_cor = utils.calculate_cor(df_clean, start, end)
df_cor
###Output
_____no_output_____
###Markdown
Create quantile and mean, variance of correlation values
###Code
sercurity_code = np.array(df_cor.columns.values.tolist())
n = len(sercurity_code) # number of stocks
# Correlation vector
correlation = []
for i in range(n-1):
for j in range(i+1,n):
correlation.append(df_cor.iloc[i].iloc[j])
npcorrelation = np.asarray(correlation)
plt.hist(npcorrelation)
plt.xlabel("Correlation")
plt.ylabel("Frequency")
plt.title("Correlation distribution")
plt.show()
###Output
_____no_output_____
###Markdown
Create 2 financial networks Network 1: Most correlated stocks Network 2: Least Correlated stocks
###Code
# Network 1
# 98.9% most correlated stocks
# Threshold 1
QUANTILE_1 = 0.989
threshold_1 = np.quantile(npcorrelation, QUANTILE_1)
threshold_1
network_1 = nx.Graph()
for item in sercurity_code:
network_1.add_node(item)
for u in network_1.nodes:
for v in network_1.nodes:
if u != v and df_cor[u][v] > threshold_1:
network_1.add_edge(u, v)
# Write network to file for ploting
nx.write_gexf(network_1, "network_1.gexf")
# Count node invovle in the connected comm
t = 0
de = list(network_1.degree)
for item in de:
if item[1] > 0:
t=t+1
print(f"Nodes involve in the connected component of network 1: {t}")
# Network 2
# 1.1% least correlated stock
# Threshold 2
QUANTILE_2 = 0.011
threshold_2 = np.quantile(npcorrelation, QUANTILE_2)
threshold_2
network_2 = nx.Graph()
for item in sercurity_code:
network_2.add_node(item)
for u in network_2.nodes:
for v in network_2.nodes:
if u != v and df_cor[u][v] < threshold_2:
network_2.add_edge(u, v)
# Write network to file for ploting
nx.write_gexf(network_2, "network_2.gexf")
# Count node invovle in the connected comm
t = 0
de = list(network_2.degree)
for item in de:
if item[1] > 0:
t=t+1
print(f"Nodes involve in the connected component of network 2: {t}")
# Since both network 1 and 2 have the same number of link and node
# their average degree is the same
# Number of nodes, edges, average degree of network
n_node = network_1.number_of_nodes()
n_link = network_1.number_of_edges()
avg_degree_12 = 2*n_link/n_node
print(f"Networks nodes: {n_node}")
print(f"Network links: {n_link}")
print(f"Average degree: {avg_degree_12}")
print(f"Density of network 1 and 2: {nx.density(network_1)}")
# create BA with approximative density to network 1 and 2
m = 5
ba_model= nx.barabasi_albert_graph(n_node, m)
# Check the density of BA model
print(f"Density of BA model: {nx.density(ba_model)}")
nx.write_gexf(ba_model, "ba_model.gexf")
er_model = nx.fast_gnp_random_graph(n_node, 0.011)
print(f"Density of ER model: {nx.density(er_model)}")
nx.write_gexf(er_model, "er_model.gexf")
# The fucntion return a list of length of shorted paths
def shorted_path_distribution(g):
length_dict = dict(nx.shortest_path_length(g))
density_of_length = []
for key1 in length_dict.keys():
dict_of_key = length_dict[key1]
for key2 in dict_of_key.keys():
if dict_of_key[key2] != 0:
density_of_length.append(dict_of_key[key2])
return density_of_length
def plot_shorted_path_dist(network, title, ax):
den = shorted_path_distribution(network)
ax.hist(den, bins = range(max(den)+1)[1:])
ax.set_title(title)
ax.set_xlabel("Length")
ax.set_ylabel("Frequency")
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
plot_shorted_path_dist(network_1, "Histogram of shorted path length in network 1", axs[0,0])
plot_shorted_path_dist(network_2, "Histogram of shorted path length in network 2", axs[0,1])
plot_shorted_path_dist(er_model, "Histogram of shorted path length in ER model", axs[1,0])
plot_shorted_path_dist(ba_model, "Histogram of shorted path length in BA model",axs[1,1])
# Connected components
def largest_component(network):
largest_cc = len(max(nx.connected_components(network), key=len))
return largest_cc/n
print(f"Size of largest component compare to the network, in network 1: {largest_component(network_1)}")
print(f"Size of largest component compare to the network, in network 2: {largest_component(network_2)}")
print(f"Size of largest component compare to the network, in ER model: {largest_component(er_model)}")
print(f"Size of largest component compare to the network, in BA model: {largest_component(ba_model)}")
# Clustering coefficient
def clustering_dist_plt(network, title, axs):
clu = nx.clustering(network)
b=[]
for c in clu.values():
b.append(c)
axs.hist(b, bins =10)
axs.set_title(label = title)
axs.set_xlabel("Clustering")
axs.set_ylabel("Frequency")
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
clustering_dist_plt(network_1, "Clustering distribution of network 1", axs[0,0])
clustering_dist_plt(network_2, "Clustering distribution of network 2", axs[0,1])
clustering_dist_plt(er_model, "Clustering distribution of ER model", axs[1,0])
clustering_dist_plt(ba_model, "Clustering distribution of BA model", axs[1,1])
# Average clustering
print(f"Average clustering of network 1: {nx.average_clustering(network_1)}")
print(f"Average clustering of network 2: {nx.average_clustering(network_2)}")
print(f"Average clustering of ER model: {nx.average_clustering(er_model)}")
print(f"Average clustering of BA model: {nx.average_clustering(ba_model)}")
r1 = nx.degree_pearson_correlation_coefficient(network_1)
r2 = nx.degree_pearson_correlation_coefficient(network_2)
r_er = nx.degree_pearson_correlation_coefficient(er_model)
r_ba = nx.degree_pearson_correlation_coefficient(ba_model)
print(f"Degree correlation of network 1: {r1}")
print(f"Degree correlation of network 2: {r2}")
print(f"Degree correlation of ER model: {r_er}")
print(f"Degree correlation of BA model: {r_ba}")
# So, network 1 is disassortative network
# while other network 2 is assortative network
# BA and ER model are neutral to slightly assortative
# Degree distribution
def degree_dist_plt(network, title, axs):
deg = nx.degree(network)
b=[]
for c in deg:
b.append(c[1])
axs.hist(b, bins =10)
axs.set_title(label = title)
axs.set_xlabel("Degree")
axs.set_ylabel("Frequency")
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
degree_dist_plt(network_1, "Degree distribution of network 1", axs[0,0])
degree_dist_plt(network_2, "Degree distribution of network 2", axs[0,1])
degree_dist_plt(er_model, "Degree distribution of ER model", axs[1,0])
degree_dist_plt(ba_model, "Degree distribution of BA model", axs[1,1])
###Output
_____no_output_____
###Markdown
Network robustness
###Code
# This part, we will evaluate the network robustness of 4 network
f =np.linspace(0, 1, 100)[:-1]
# Remove f fraction random node from the graph G
import random
def remove_nodes_random(G, f):
N = G.number_of_nodes()
k = int(f*N)
nx.set_node_attributes(G, {node: np.random.rand() for node in G.nodes()}, 'p')
sorted_nodes_failure = sorted(G.nodes(), key=lambda x: -G.nodes[x]['p'])
remain_list_failure = sorted_nodes_failure[k:]
H = nx.subgraph(G, remain_list_failure)
return H
# Remove f fraction highest degree node from graph G
def remove_nodes_attack(G, f):
N = G.number_of_nodes()
k = int(f*N)
nx.set_node_attributes(G, dict(G.degree()), 'd')
sorted_nodes_attack = sorted(G.nodes(), key=lambda x: -G.nodes[x]['d'])
remain_list_attack = sorted_nodes_attack[k:]
H = nx.subgraph(G, remain_list_attack)
return H
# Calculate fraction of node belong to largest giant component
def fraction_gc(H, f):
components = sorted(nx.connected_components(H), key=len, reverse=True)
if len(components) > 1:
if len(components[0]) > len(components[1]):
P = float(len(components[0]))/H.number_of_nodes()
else:
P = 0
else:
P = 1
return P
# Plot the figure for Random Network
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
# Simulate the targeted attacks and random failure in 4 networkx
def failure_simulation(network, title, axs):
# The fraction list for attack
Pa_gc = []
# The fraction list for failure
Pf_gc = []
for f0 in f:
# Make the failure graph and fraction list
Hf = remove_nodes_random(network_1, f0)
Pf_gc.append(fraction_gc(Hf, f0))
# Make the attack graph and fraction list
Ha = remove_nodes_attack(network_1, f0)
Pa_gc.append(fraction_gc(Ha, f0))
axs.plot(f, Pa_gc, label = "targeted attacks")
axs.plot(f, Pf_gc, label = "random failures")
axs.set_xlabel("f")
axs.set_ylabel("P")
#plt.axvline(fc, color='r', label = "Threshold")
axs.legend(loc ="upper right")
axs.set_title(title)
axs.axis("tight")
failure_simulation(network_1,"Network 1 robustness", axs[0,0])
failure_simulation(network_2,"Network 2 robustness", axs[0,1])
failure_simulation(er_model,"ER model robustness", axs[1,0])
failure_simulation(ba_model,"BA model robustness", axs[1,1])
# Compare the assortivity of 4 networks
def assortive_plot(g_network, network_name, axs):
# Calculate <k>
degrees = [degree for _, degree in g_network.degree()]
k_max = max(degrees)
# Calculate <k_nn(k)>
k_nn = nx.average_degree_connectivity(g_network)
k, knn = zip(*[(x, y) for x, y in k_nn.items()])
axs.scatter(k, knn, label='acual')
# Draw the line for random network approximation
k_nn_rand_exp = sum([degree**2 for degree in degrees]) / sum(degrees)
axs.axhline(k_nn_rand_exp, c='k', label='random')
# Calculate k and knn for random multiple link graph
g_random_multiple = nx.configuration_model(degrees)
k_nn_multiple = nx.average_degree_connectivity(g_random_multiple)
axs.scatter(k_nn_multiple.keys(), k_nn_multiple.values(), label='(R-M)')
# Assortativity coefficient of g_network
r = nx.degree_assortativity_coefficient(g_network)
# Plot
axs.loglog()
axs.legend()
axs.set_xlabel("<k>")
axs.set_ylabel("<knn(k)>")
axs.set_title(fr'{network_name}, degree assortativity $r={r:.2f}$')
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
assortive_plot(network_1, "Network 1", axs[0,0])
assortive_plot(network_2, "Network 2", axs[0,1])
assortive_plot(er_model, "ER model", axs[1,0])
assortive_plot(ba_model, "BA model", axs[1,1])
###Output
_____no_output_____
###Markdown
Spreading simulation
###Code
def infect_node(G, n=1):
# Determined list of infectious nodes
infected_list = random.sample(G.nodes(), k= n)
infected_atr_dict = {}
infection_time = {}
recovered_dict = {}
# make a boolen list of infected and non-infected
for node in G.nodes():
recovered_dict[node] = False
if node in infected_list:
infected_atr_dict[node] = True
# Set infection time = 0 for initial list
infection_time[node] = 0
else:
infected_atr_dict[node] = False
infection_time[node] = -1
nx.set_node_attributes(G, infected_atr_dict, "Infected")
nx.set_node_attributes(G, infection_time, "Infection_time")
nx.set_node_attributes(G, recovered_dict, "Recovered")
def plot(G,axs, title=None):
pos = nx.spring_layout(G)
G.graph["pos"] = pos
# Make a list to color according to infectious status
node_colors = []
isInfected = nx.get_node_attributes(G, "Infected")
isRecovered = nx.get_node_attributes(G, "Recovered")
for node in G.nodes:
if isInfected[node]:
node_colors.append("red")
elif isRecovered[node]:
node_colors.append("blue")
else:
node_colors.append("green")
nx.draw(G, pos=G.graph["pos"] , node_size=30 , node_color=node_colors, ax = axs)
axs.set_title(title)
def spread(G, p, mu):
N = G.number_of_nodes()
# Lambda function to count current infectious node
wI = lambda G: sum(nx.get_node_attributes(G, 'Infected').values())
# Lambda function to count current recovered node
wR = lambda G: sum(nx.get_node_attributes(G, 'Recovered').values())
# Lambda function to count current rnot yet infected and nerver recovered
wS = lambda G: N - wI(G) - wR(G)
# Reset a graph
# if infection time != 0, change to -1 and infected = False,
#Recovered all = False
node_attr_time = nx.get_node_attributes(G, 'Infection_time')
node_attr_infected = nx.get_node_attributes(G, 'Infected')
node_attr_recovered = nx.get_node_attributes(G, 'Recovered')
for node in G.nodes():
node_attr_recovered[node] = False
if node_attr_time[node] != 0:
node_attr_time[node] = -1
node_attr_infected[node] = False
else:
node_attr_infected[node] = True
# Set attributes for reseted network
nx.set_node_attributes(G, node_attr_infected, "Infected")
nx.set_node_attributes(G, node_attr_recovered, "Recovered")
nx.set_node_attributes(G, node_attr_time, "Infection_time")
G.graph["t"] = 0
# Initizalize St, It, Rt
It = [wI(G)]
Rt = [wR(G)]
St = [wS(G)]
t = 0
isComplete = False
# Is G already saturated
if wI(G) == 0:
isComplete = True
while isComplete == False:
t = t+1
H = G
# We make a copy H of G
# Every possible changes are made on H
# then later update to G
# At each step, some node recover:
if t != 1:
for node in G.nodes():
if G.nodes[node]["Infected"]:
s = np.random.rand()
isRecovered = (s < mu) # Decide if recover or not
if isRecovered:
G.nodes[node]["Infected"] = False
G.nodes[node]["Recovered"] = True
# Spreading
for e in G.edges():
u = e[0]
v = e[1]
# If 2 node of an edge have different status,
# they have the possibility for one infected not infect the other
if (G.nodes[u]["Infected"] != G.nodes[v]["Infected"]) and (G.nodes[u]["Recovered"] == False) and (G.nodes[v]["Recovered"] == False):
isSpread = (np.random.rand() < p) # decice if infect or not
if isSpread:
H.nodes[u]["Infected"] = True
H.nodes[v]["Infected"] = True
if G.nodes[u]['Infection_time'] == -1:
if H.nodes[u]['Infection_time'] == -1:
H.nodes[u]['Infection_time'] = t
elif G.nodes[v]['Infection_time'] == -1:
if H.nodes[v]['Infection_time'] == -1:
H.nodes[v]['Infection_time'] = t
G = H
St.append(wS(G))
It.append(wI(G))
Rt.append(wR(G))
if wI(G) ==0:
isComplete = True
G.graph["t"] = t
return St, It, Rt, t
# Calculate mean for vectors of different length
def tolerant_mean(arrs):
lens = [len(i) for i in arrs]
arr = np.ma.empty((np.max(lens),len(arrs)))
arr.mask = True
for idx, l in enumerate(arrs):
arr[:len(l),idx] = l
return arr.mean(axis = -1)
# Run the spreding over 10 simulation
# Calculate the average of S(t), I(t), R(t)
# and average time t to finish the spreading process
def plot_simulations(G,axs, p, mu, G_name):
St = []
It = []
Rt = []
t = 0
for b in tqdm(range(10)):
infect_node(G, 5)
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu))
St.append(St_b)
It.append(It_b)
Rt.append(Rt_b)
t = t+t_b
average_st = tolerant_mean(St)
average_it = tolerant_mean(It)
average_rt = tolerant_mean(Rt)
average_t = t/10
axs.plot(average_st/n_node, label='S', color = "blue")
axs.plot(average_it/n_node, label='I', color = "red")
axs.plot(average_rt/n_node, label='R', color = "green")
axs.axhline(1, ls=':', lw=1)
axs.set_xlabel(r'$t$', fontsize=16)
axs.set_ylabel('Percentage of node %', fontsize=16)
axs.set_title(f"Spreading effect on {G_name}")
axs.legend(loc="upper right")
print(f"Average spreading time of {G_name}: {average_t}")
fig, axs = plt.subplots(2,2)
fig.set_size_inches(18.5, 10.5, forward=True)
plot_simulations(network_1, axs[0,0], p =0.01, mu = 0.001, G_name = "Network 1" )
plot_simulations(network_2, axs[0,1], p =0.01, mu = 0.001, G_name = "Network 2" )
plot_simulations(er_model, axs[1,0], p =0.01, mu = 0.001, G_name = "ER model" )
plot_simulations(ba_model, axs[1,1], p =0.01, mu = 0.001, G_name = "BA model" )
def investigate_mu(G,axs1, axs2, p, G_name):
It = []
t = []
# Evaluate mu over percentage of p
mu = np.arange(p/10, 1.05*p, p/10)
for b in tqdm(range(10)):
infect_node(G, 5)
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu[b]))
It.append(max(It_b))
t.append(t_b)
# Size of the infection max It
axs1.plot(mu, It, label='I', color = "red")
axs1.set_xlabel(r'$mu$', fontsize=16)
axs1.set_ylabel('It_max', fontsize=16)
axs1.set_title(f"Max infectious node in {G_name}")
# Time to finish the spreading
axs2.plot(mu, t)
axs2.set_xlabel(r'$mu$', fontsize=16)
axs2.set_ylabel('Time for spreading', fontsize=16)
axs2.set_title(f"Max spreading time in {G_name}")
fig1, axs1 = plt.subplots(2,2)
fig1.set_size_inches(18.5, 10.5, forward=True)
fig2, axs2 = plt.subplots(2,2)
fig2.set_size_inches(18.5, 10.5, forward=True)
investigate_mu(network_1, axs1[0,0], axs2[0,0] , p =0.01, G_name = "Network 1" )
investigate_mu(network_2, axs1[0,1], axs2[0,1], p =0.01, G_name = "Network 2" )
investigate_mu(er_model, axs1[1,0], axs2[1,0], p =0.01, G_name= "ER model" )
investigate_mu(ba_model, axs1[1,1], axs2[1,1], p =0.01, G_name = "BA model" )
###Output
0%| | 0/10 [00:00<?, ?it/s]/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/1888901345.py:4: DeprecationWarning: Sampling from a set deprecated
since Python 3.9 and will be removed in a subsequent version.
infected_list = random.sample(G.nodes(), k= n)
/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/3446460704.py:8: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu[b]))
100%|█████████████████████████████████████████████████████████████████████████████| 10/10 [02:47<00:00, 16.79s/it]
0%| | 0/10 [00:00<?, ?it/s]/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/1888901345.py:4: DeprecationWarning: Sampling from a set deprecated
since Python 3.9 and will be removed in a subsequent version.
infected_list = random.sample(G.nodes(), k= n)
/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/3446460704.py:8: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu[b]))
100%|█████████████████████████████████████████████████████████████████████████████| 10/10 [02:59<00:00, 17.98s/it]
0%| | 0/10 [00:00<?, ?it/s]/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/1888901345.py:4: DeprecationWarning: Sampling from a set deprecated
since Python 3.9 and will be removed in a subsequent version.
infected_list = random.sample(G.nodes(), k= n)
/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/3446460704.py:8: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu[b]))
100%|█████████████████████████████████████████████████████████████████████████████| 10/10 [03:10<00:00, 19.07s/it]
0%| | 0/10 [00:00<?, ?it/s]/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/1888901345.py:4: DeprecationWarning: Sampling from a set deprecated
since Python 3.9 and will be removed in a subsequent version.
infected_list = random.sample(G.nodes(), k= n)
/var/folders/kr/bcfd33n546q4hgfqznq2t4t00000gn/T/ipykernel_82835/3446460704.py:8: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
[St_b, It_b, Rt_b, t_b] = np.array(spread(G, p=p, mu = mu[b]))
100%|█████████████████████████████████████████████████████████████████████████████| 10/10 [02:48<00:00, 16.89s/it]
###Markdown
Reading data
###Code
import torch
from spacy.tokenizer import Tokenizer
from torchtext import data
from torchtext import datasets
SEED = 11
torch.manual_seed(SEED) ## Reproducibility
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize = 'spacy', include_lengths = True) ## Text field
LABEL = data.LabelField(dtype = torch.float) ## Label Field
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
import random
test_data, valid_data = test_data.split(random_state = random.seed(SEED))
print(len(train_data), len(valid_data), len(test_data))
## Let's create 60 000 length vocabulary
MAX_VOCAB_SIZE = 60000
TEXT.build_vocab(train_data,
max_size = MAX_VOCAB_SIZE,
vectors = "glove.6B.100d", ## Global Vectors for Word Representation with 6B tokens and 100d
unk_init = torch.Tensor.normal_) ## normal distribution for out-of-vocab words
## uncomment the script bellow and comment the script above to read the saved vocabulary vocab.txt
# import pickle
# with open('vocab.txt', 'rb') as file:
# vocab = pickle.load(file)
# TEXT.vocab = vocab
LABEL.build_vocab(train_data)
print(f"Number of words in TEXT vocab: {len(TEXT.vocab)}")
print(f"Number of words in LABEL vocab: {len(LABEL.vocab)}")
print(TEXT.vocab.freqs.most_common(10))
BATCH_SIZE = 64
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') ## Let's use GPU if abailable
## BucketIterator will help us to minimize padding the amount of padding per batch
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_within_batch = True,
device = device)
###Output
_____no_output_____
###Markdown
Creating the LSTM model
###Code
import torch.nn as nn
class Model(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim,
n_layers, bidirectional, dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers = n_layers,
bidirectional = bidirectional,
dropout = dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
embedding = self.embedding(text) ## shape = (sent_length, batch_size)
embedded = self.dropout(embedding) ## shape = (sent_length, batch_size, emb_dim)
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths) ## pack sequence
packed_output, (hidden, cell) = self.lstm(packed_embedded)
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output) ## unpack sequence
## output shape = (sent_len, batch_size, hid_dim * num_directions)
## output over padding tokens are zero tensors
## hidden shape = (num_layers * num_directions, batch_size, hid_dim)
## cell shape = (num_layers * num_directions, batch_size, hid_dim)
## concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
## and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)) ## shape = (batch_size, hid_dim * num_directions)
return self.fc(hidden)
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.4
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = Model(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT, PAD_IDX)
train_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"There are {train_params} trainable parameters")
###Output
There are 8310857 trainable parameters
###Markdown
Replace initial embedding with pretrained embedding
###Code
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
###Output
_____no_output_____
###Markdown
Replace and with zeros (they were initialized with the normal distribution)
###Code
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
print(model.embedding.weight.data)
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_accuracy = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
accuracy = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_accuracy += accuracy.item()
return epoch_loss / len(iterator), epoch_accuracy / len(iterator)
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
accuracy = correct.sum() / len(correct)
return accuracy
def binary_classification_metrics(prediction, ground_truth):
'''
Computes metrics for binary classification
Arguments:
prediction, np array of bool (num_samples) - model predictions
ground_truth, np array of bool (num_samples) - true labels
Returns:
precision, recall, f1, accuracy - classification metrics
'''
prediction = torch.round(torch.sigmoid(prediction))
correct = (prediction == ground_truth).float() #convert into float for division
precision = 0
recall = 0
accuracy = 0
f1 = 0
tp = 0 ## true positive
tn = 0 ## true negative
fp = 0 ## false positive
fn = 0 ## false negative
for i in range(len(prediction)):
if prediction[i] == True and ground_truth[i] == True:
tp += 1
if prediction[i] == True and ground_truth[i] == False:
fp += 1
if prediction[i] == False and ground_truth[i] == True:
fn += 1
if prediction[i] == False and ground_truth[i] == False:
tn += 1
accuracy = (tp + tn)/(tp + tn + fp + fn)
precision = tp/(tp + fp)
recall = tp/(tp + fn)
f1 = 2 * (precision * recall)/(precision + recall)
return precision, recall, f1, accuracy
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_accuracy = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
accuracy = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_accuracy += accuracy.item()
return epoch_loss / len(iterator), epoch_accuracy / len(iterator)
def metrics(model, iterator, criterion):
epoch_loss = 0
epoch_f1 = 0
tp = tn = fp = fn = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
precision, recall, f1, accuracy = binary_classification_metrics(predictions, batch.label)
epoch_loss += loss.item()
epoch_f1 += f1
return epoch_loss / len(iterator), epoch_f1 / len(iterator)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
import torch.optim as optim
optimizer = optim.Adam(model.parameters(), lr = 0.0017)
criterion = nn.BCEWithLogitsLoss()
model = model.to(device) ## use GPU
criterion = criterion.to(device)
N_EPOCHS = 6
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_accuracy = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_accuracy = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model, 'model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_accuracy*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_accuracy*100:.2f}%')
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
###Output
Test Loss: 0.359 | Test Acc: 88.23%
###Markdown
Saving model and vocabulary
###Code
## Use if you don't save your model during training
# torch.save(model, 'model.pt')
def save_vocab(vocab, path):
import pickle
output = open(path, 'wb')
pickle.dump(vocab, output)
output.close()
save_vocab(TEXT.vocab, 'vocab.txt')
###Output
/usr/local/lib/python3.6/dist-packages/torch/storage.py:34: FutureWarning: pickle support for Storage will be removed in 1.5. Use `torch.save` instead
warnings.warn("pickle support for Storage will be removed in 1.5. Use `torch.save` instead", FutureWarning)
###Markdown
Loading model and using for typical review
###Code
import pickle
with open('vocab.txt', 'rb') as file:
vocab = pickle.load(file)
import spacy
nlp = spacy.load('en')
def predict_sentiment(model, sentence):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [vocab.stoi[t] for t in tokenized]
length = [len(indexed)]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor(length)
prediction = torch.sigmoid(model(tensor, length_tensor))
return prediction.item()
sentence = "Best movie ever"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loaded_model = torch.load('model.pt', map_location = device)
predict_sentiment(loaded_model, sentence)
test_loss, test_f1 = metrics(loaded_model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test F1: {test_f1*100:.2f}%')
###Output
Test Loss: 0.274 | Test F1: 88.17%
###Markdown
Additional
###Code
from google.colab import files
files.download('model.pt')
###Output
_____no_output_____
###Markdown
Setup and EDA Generate Data Sets
###Code
independent_data = generate_data()
correlated_data = generate_data(0.9)
###Output
_____no_output_____
###Markdown
Plot Mean and Variance Over Time
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
ax1.plot(independent_data.days, independent_data.mu, label="mean")
ax1.set_xlabel("Date")
ax1.set_ylabel("Log Event Count")
ax1.set_title("E(Z)")
ax2.plot(independent_data.days, independent_data.sigma, label="standard deviation")
ax2.set_xlabel("Date")
plt.ylabel("Log Event Count")
ax2.set_title("sd(Z)")
plt.tight_layout()
plt.savefig("./figures/mean_var_functions.png")
plt.show()
# The data set is underdispersed
print(np.exp(5))
print(np.sqrt(np.exp(5)))
print(np.exp(0.15))
###Output
148.4131591025766
12.182493960703473
1.161834242728283
###Markdown
Compare Data Sets
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
ax1.plot(independent_data.days, independent_data.y)
ax1.set_ylim((0, 1.1*np.max(independent_data.y)))
ax1.set_xlabel("Date")
ax1.set_ylabel("Event Count")
ax1.set_title("Independent Event Counts")
ax2.plot(correlated_data.days, correlated_data.y)
ax2.set_ylim((0, 1.1*np.max(correlated_data.y)))
ax2.set_xlabel("Date")
ax2.set_ylabel("Event Count")
ax2.set_title("Correlated Event Counts")
plt.tight_layout()
fig.savefig("./figures/events_over_time.png")
plt.show()
###Output
_____no_output_____
###Markdown
Model-fitting Independent Data
###Code
gibbs_ind_ind = GibbsSampler()
gibbs_ind_cor = GibbsSampler(alpha_rho=1, beta_rho=1)
n_iter = 2000
gibbs_ind_ind.fit(independent_data, n_iter=n_iter)
pickle.dump(gibbs_ind_ind, open("gibbs_ind_ind.pkl", "wb"))
# gibbs_ind_ind = pickle.load(open("gibbs_ind_ind.pkl", "rb"))
gibbs_ind_cor.fit(independent_data, n_iter=n_iter)
pickle.dump(gibbs_ind_cor, open("gibbs_ind_cor.pkl", "wb"))
# gibbs_ind_cor = pickle.load(open("gibbs_ind_cor.pkl", "rb"))
###Output
_____no_output_____
###Markdown
Correlated Data
###Code
gibbs_cor_ind = GibbsSampler()
gibbs_cor_cor = GibbsSampler(alpha_rho=1, beta_rho=1)
gibbs_cor_ind.fit(correlated_data, n_iter=n_iter)
pickle.dump(gibbs_cor_ind, open("gibbs_cor_ind.pkl", "wb"))
# gibbs_cor_ind = pickle.load(open("gibbs_cor_ind.pkl", "rb"))
gibbs_cor_cor.fit(correlated_data, n_iter=n_iter)
pickle.dump(gibbs_cor_cor, open("gibbs_cor_cor.pkl", "wb"))
# gibbs_cor_cor = pickle.load(open("gibbs_cor_cor.pkl", "rb"))
###Output
_____no_output_____
###Markdown
Analysis
###Code
burnin = int(n_iter/4)
n_days = independent_data.days.size
# Function for plotting E(Z) and sd(Z) for two models
def plot_mean_var(mod1, mod2, filename, proportion=1.):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
first_day_idx = int((1-proportion)*n_days)
ax1.plot(
independent_data.days[first_day_idx:],
independent_data.mu[first_day_idx:],
label="Truth"
)
ax1.plot(
independent_data.days[first_day_idx:],
mod1.mu.values[burnin:, first_day_idx:].mean(axis=0),
label="Independent Model",
# linestyle="dashed",
)
ax1.plot(
independent_data.days[first_day_idx:],
mod2.mu.values[burnin:, first_day_idx:].mean(axis=0),
label="Correlated Model",
# linestyle="dashdot",
)
ax1.set_xlabel("Date")
ax1.set_ylabel("Log Event Count")
ax1.set_title("E(Z)")
ax1.legend()
ax2.plot(
independent_data.days[first_day_idx:],
independent_data.sigma[first_day_idx:],
label="Truth"
)
ax2.plot(
independent_data.days[first_day_idx:],
mod1.sigma.values[burnin:, first_day_idx:].mean(axis=0),
label="Independent Model",
# linestyle="dashed",
)
ax2.plot(
independent_data.days[first_day_idx:],
mod2.sigma.values[burnin:, first_day_idx:].mean(axis=0),
label="Correlated Model",
# linestyle="dashdot",
)
ax2.set_xlabel("Date")
plt.ylabel("Log Event Count")
ax2.set_title("sd(Z)")
ax2.legend()
plt.tight_layout()
plt.savefig(filename)
plt.show()
plot_mean_var(gibbs_ind_ind, gibbs_ind_cor, "./figures/ind_data_mean_var.png", proportion=1)
plot_mean_var(gibbs_cor_ind, gibbs_cor_cor, "./figures/cor_data_mean_var.png")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
ax1.plot(independent_data.days, independent_data.y, label="Data")
ax1.plot(
independent_data.days,
np.exp(np.median(gibbs_ind_ind.mu.values[burnin:, :], axis=0)),
label="Independent Model Fit",
)
ax1.plot(
independent_data.days,
np.exp(np.median(gibbs_ind_cor.mu.values[burnin:, :], axis=0)),
label="Correlated Model Fit",
)
ax1.set_ylim((0, 1.1*np.max(independent_data.y)))
ax1.set_xlabel("Date")
ax1.set_ylabel("Event Count")
ax1.set_title("Independent Event Counts")
ax1.legend()
ax2.plot(correlated_data.days, correlated_data.y, label="Data")
ax2.plot(
independent_data.days,
np.exp(np.median(gibbs_cor_ind.mu.values[burnin:, :], axis=0)),
label="Independent Model Fit",
)
ax2.plot(
independent_data.days,
np.exp(np.median(gibbs_cor_cor.mu.values[burnin:, :], axis=0)),
label="Correlated Model Fit",
)
ax2.set_ylim((0, 1.1*np.max(correlated_data.y)))
ax2.set_xlabel("Date")
ax2.set_ylabel("Event Count")
ax2.set_title("Correlated Event Counts")
ax2.legend()
plt.tight_layout()
fig.savefig("./figures/model_fits.png")
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
ax1.plot(gibbs_ind_cor.rho.values[burnin:])
ax1.set_xlabel("Index")
ax1.set_ylabel("ρ")
ax1.set_title("Independent Data")
ax2.plot(gibbs_cor_cor.rho.values[burnin:])
ax2.set_xlabel("Index")
ax2.set_ylabel("ρ")
ax2.set_title("Correlated Data")
plt.tight_layout()
fig.savefig("./figures/rho_trace_plots.png")
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
i = 100
ax1.plot(gibbs_ind_ind.z.values[1:, i])
ax1.set_xlabel("Index")
ax1.set_ylabel(f"Z_{i+1}")
ax1.set_title("Trace Plot")
y_i = independent_data.y[i]
mu_i = gibbs_ind_ind.mu.values[burnin:, i].mean()
sigma_i = gibbs_ind_ind.sigma.values[burnin:, i].mean()
z_i_vals = np.linspace(mu_i - 2*sigma_i, mu_i + 2*sigma_i)
z_i_distribution = norm(loc=mu_i, scale=sigma_i)
density_scaling_i = z_i_distribution.cdf(np.log(y_i+1)) - z_i_distribution.cdf(np.log(y_i))
ax2.plot(z_i_vals, norm.pdf(z_i_vals, loc=mu_i, scale=sigma_i)/density_scaling_i)
ax2.set_xlabel(f"Z_{i+1}")
ax2.set_ylabel("Density")
ax2.hist(gibbs_ind_ind.z.values[burnin:, i], density=True, bins=4)
ax2.set_title("Estimated Density Plot with Sampled Values")
plt.tight_layout()
fig.savefig("./figures/z_plots.png")
plt.show()
def trace_coefs(model, filename):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))
p = 1
ax1.plot(model.beta.values[1:, p])
ax1.set_xlabel("Index")
ax1.set_ylabel(f"β_{p+1}")
ax1.set_title("Trace Plot")
ax2.plot(model.alpha.values[1:, p])
ax2.set_xlabel("Index")
ax2.set_ylabel(f"α_{p+1}")
ax2.set_title("Trace Plot")
plt.tight_layout()
fig.savefig(filename)
plt.show()
trace_coefs(gibbs_ind_ind, "./figures/ind_alpha_beta_plots.png")
trace_coefs(gibbs_cor_cor, "./figures/cor_alpha_beta_plots.png")
###Output
_____no_output_____
###Markdown
Import dependencies
###Code
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
###Output
_____no_output_____
###Markdown
Import plotting library
###Code
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Reflect Tables into SQLAlchemy ORM
###Code
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, and_, desc, extract
# create connection to database
engine = create_engine("sqlite:///data/hawaii.sqlite")
# reflect an existing database into a new model
Model = automap_base()
# reflect the tables
Model.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Model.classes.keys()
###Output
_____no_output_____
###Markdown
Save references to each table
###Code
# create python classes by extending the existing database models
# use a nicer representation of the class instances
class Measurement(Model):
__tablename__ = 'measurement'
def __repr__(self):
return "<{}(station='{}', date='{}', prcp='{}', tobs='{}')>".\
format(self.__class__.__name__, self.station, self.date, self.prcp, self.tobs)
class Station(Model):
__tablename__ = 'station'
def __repr__(self):
return "<{}(station='{}', name='{}', latitude='{}', longitude='{}', elevation='{}')>".\
format(self.__class__.__name__, self.station, self.name, self.latitude, self.longitude, self.elevation)
# reflect the tables
Model.prepare(engine, reflect=True)
# Create our session (link) from Python to the DB
session = Session(engine)
###Output
_____no_output_____
###Markdown
Exploratory Climate Analysis
###Code
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
last_date_reported = session.query(func.max(Measurement.date)).scalar()
last_year = datetime.strptime(last_date_reported, '%Y-%m-%d') - timedelta(365)
# Perform a query to retrieve the date and precipitation scores
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= last_year).\
all()
# Save the query results as a Pandas DataFrame and set the index to the date column
df = pd.DataFrame([ ( row.date, row.prcp ) for row in results ],
columns=['date', 'percipitation'] ).set_index('date')
# Fill in any missing percipitation values with 0
df.fillna(value=0, inplace=True)
# Check if there are any duplicate records
df.reset_index().duplicated().value_counts()
# Sort the dataframe by date
df.sort_values(by='date', ascending=True, inplace=True)
# Use Pandas Plotting with Matplotlib to plot the data
ax = df.plot(title='2016-2017 Percipitation Measurements\n', rot=45)
ax.set_xlabel('\nDate')
ax.set_ylabel('Inches\n')
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
df[['percipitation']].describe()
# Design a query to show how many stations are available in this dataset?
# Station table joined to get station name in query
q = session.query(Measurement.station, Station.name, func.count(Measurement.station)).\
join(Station, Station.station == Measurement.station).\
group_by(Measurement.station, Station.name).\
order_by(desc(func.count(Measurement.station)))
# query database
stations = q.all()
print(f"There are {len(stations)} stations available in this dataset.")
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
counter = 0
print()
print(' ID Rows Name of Station')
print(' =========== ==== =======================================')
for station in stations:
counter += 1
print(f"{counter}) {station[0]} {str(station[2]).rjust(4)} {station[1]}")
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
station = session.query(Measurement.station, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.station == q.first()[0]).\
first()
lo_temp = station[1]
hi_temp = station[2]
avg_temp = station[3]
print(f'{station[0]} is the most active station with the following readings:')
print(f'Lowest Temperature Recorded : { "{0:.2f}".format(lo_temp) }')
print(f'Highest Temperature Recorded : { "{0:.2f}".format(hi_temp) }')
print(f'Average Temperature : { "{0:.2f}".format(avg_temp) }')
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
q = session.query(Measurement.tobs).filter(Measurement.station == stations[0][0])
# Calculate the date 1 year ago from the last data point in the database
last_date_reported = session.query(func.max(Measurement.date)).\
filter(Measurement.station == stations[0][0]).\
scalar()
last_year = datetime.strptime(last_date_reported, '%Y-%m-%d') - timedelta(365)
# Perform a query to retrieve the temperature readings
results = q.filter(Measurement.date >= last_year).all()
# Plot the data using a dataframe
plt.hist([ row[0] for row in results ], bins=12, label='tobs')
plt.ylabel('Frequency')
plt.xlabel('Temperature')
plt.legend(loc='best')
plt.show()
###Output
_____no_output_____
###Markdown
Bonus Challenge Assignment
###Code
# Identify the average temperature in June at all stations across all available years in the dataset.
# Do the same for December temperature.
jun_avg_temps = [avg for avg, in session.query(func.avg(Measurement.tobs)).\
group_by(extract('year', Measurement.date)).\
filter(extract('month', Measurement.date) == 6)]
dec_avg_temps = [avg for avg, in session.query(func.avg(Measurement.tobs)).\
group_by(extract('year', Measurement.date)).\
filter(extract('month', Measurement.date) == 12)]
print(f'Number of years in June list {len(jun_avg_temps)}')
print(f'Number of years in Decmeber list {len(dec_avg_temps)}')
###Output
Number of years in June list 8
Number of years in Decmeber list 7
###Markdown
Statistical Analysis of June and December TemperaturesI prefered the paired t-test because it was for the same stations and locations but at different times of the year. And show that therre is definitely a statistical difference in temperatures between June and December in Hawaii.
###Code
from scipy import stats
print('Standard Deviation results:')
print(f'June Avg Temps STD : {np.std(jun_avg_temps)}')
print(f'Dec Avg Temps STD : {np.std(dec_avg_temps)}')
print()
# Welch's t-test because the sample sizes are not equal
statValue, pValue = stats.ttest_ind(jun_avg_temps, dec_avg_temps, equal_var=False)
print('Welch\'s t-test results:')
print(f'stat = {statValue}')
print(f'pValue = {pValue}')
print()
# Paired T-Test since they are for the same stations and locations at a different time
jun_copy = jun_avg_temps[:]
# paired tests require the same number of samples,
# so dropping the last year (2017) from June average temperatures
jun_copy.pop()
statValue, pValue = stats.ttest_rel(jun_copy, dec_avg_temps)
print('Paired t-test results:')
print(f'stat = {statValue}')
print(f'pValue = {pValue}')
print()
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date.between(start_date, end_date)).first()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
trip_start_date = '2018-08-01'
trip_start_calc = '2017-08-01'
temp_min, temp_avg, temp_max = calc_temps(trip_start_calc, trip_start_date)
temp_err = temp_max-temp_min
print(f'Min Temperature: {"{:.2f}".format(temp_min)}')
print(f'Max Temperature: {"{:.2f}".format(temp_max)}')
print(f'Avg Temperature: {"{:.2f}".format(temp_avg)}')
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
df = pd.DataFrame([temp_avg], columns=['avg'])
df['avg'].plot(kind='bar',
yerr=temp_err,
color='red',
alpha=0.3,
edgecolor='black',
grid=True,
figsize=(2,6),
position=0.5,
error_kw=dict(ecolor='black',elinewidth=1, capsize=5, capthick=2),
width=0.8,
title='Trip Avg Temp')
plt.ylabel('Temperature (F)')
plt.ylim((0,100))
plt.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
results = session.query(Station.station,
Station.name,
Station.latitude,
Station.longitude,
Station.elevation,
func.sum(Measurement.prcp).label('percipitation')).\
join(Station, Station.station == Measurement.station).\
filter(Measurement.date.between(trip_start_calc, trip_start_date)).\
group_by(Measurement.station).\
order_by(desc(func.sum(Measurement.prcp))).\
all()
counter = 0
print()
print(' ID Total Rainfall Name of Station LAT LON ELEV ')
print(' =========== ============== ======================================= ====== ======= ======')
for station in results:
counter += 1
print(f"{counter}) {station[0]} {'{:.2f}'.format(station[5]).ljust(13)} {station[1].rjust(40)} {'{:.2f}'.format(station[2]).ljust(5)} {'{:.2f}'.format(station[3]).ljust(5)} {'{:.2f}'.format(station[4]).ljust(5)}")
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
normals = []
# Set the start and end date of the trip
start_date = '2017-08-01'
end_date = '2018-08-01'
# Use the start and end date to create a range of dates
day1 = datetime.strptime(start_date, '%Y-%m-%d')
day2 = datetime.strptime(end_date, '%Y-%m-%d')
delta = day2-day1
trip_dates = days = [ (day1 + timedelta(days=i)).strftime('%Y-%m-%d') for i in range(delta.days) ]
# Stip off the year and save a list of %m-%d strings
days = [ (day1 + timedelta(days=i)).strftime('%m-%d') for i in range(delta.days) ]
# Loop through the list of %m-%d strings and calculate the normals for each date
for day in days:
normals.append(daily_normals(day)[0])
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
norm_df = pd.DataFrame(normals, columns=['min','avg','max'], index=trip_dates)
norm_df.sort_index(ascending=False, inplace=True)
print(norm_df.shape)
norm_df.head()
# Plot the daily normals as an area plot with `stacked=False`
norm_df.plot.area(stacked=False, rot=45)
plt.ylabel('Temperature (F)')
plt.xlabel('Date')
plt.legend(loc='best')
plt.show()
###Output
_____no_output_____
###Markdown
ResumoAs analises consistem em fazer o estudo entre a qualidade do e o numero de internações pelas enfermidades: outras tuberculoses respiratórias, restante de tuberculose respiratória, outras neoplasias malignas do aparelho respiratório e dos órgãos intratorácicos, outras doenças do trato respiratório superior, outras doenças do aparelho respiratório, outros transtornos cardiovasculares originados no período perinatal.**Levantando a hipótese de que, conforme há uma piora na qualidade do ar de uma determinada região, o número de internações pelas enfermidades listada acima aumentam.** FontesCetesb: https://servicos.cetesb.sp.gov.br/qa/DataSUS: https://datasus.saude.gov.br/informacoes-de-saude-tabnet/ ImportsÁrea reservada para imports e variáveis globais.
###Code
import os
import pandas as pd
import numpy as np
from urllib.parse import quote
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
sns.set_theme(style="darkgrid")
###Output
_____no_output_____
###Markdown
CetesbCompanhia Ambiental do Estado de São PauloA Cetesb é a agência ambiental paulista responsável pelo desenvolvimento de ações de controle, licenciamento, fiscalização e monitoramento das atividades potencialmente poluidoras. Essas ações estão voltadas para a promoção, proteção e a recuperação da qualidade do ar, das águas e do solo.Os dados foram coletados através de um script externo, para mais detalhes consultar: https://github.com/BrunoASNascimento/estudo-da-correlacao-da-poluicao-atmosfericas-com-os-gastos-no-sus/blob/main/get_data_cetesb.py Configuração de leitura de dados CetesbÁrea destinada para fazer a configuração para leitura dos arquivos da Cetesb, podem ler local, ou de um repositório público no GitHub.
###Code
try:
print('Try local upload documents')
dir_cetesb = 'data\cetesb'
cetesb_files = os.listdir(dir_cetesb)
print('Get local documents')
except:
print('Get in github')
cetesb_files = [
'cetesb_1-Parque D.Pedro II.csv', 'cetesb_17-Osasco.csv', 'cetesb_27-Pinheiros.csv', 'cetesb_29-Grajaú-Parelheiros.csv', 'cetesb_33-Itaim Paulista.csv',
'cetesb_36-Marg.Tietê-Pte Remédios.csv', 'cetesb_40-Guarulhos-Pimentas.csv', 'cetesb_41-Campinas-Taquaral.csv', 'cetesb_48-Paulínia-Sta Terezinha.csv', 'cetesb_56-S.José Campos-Jd.Satelite.csv',
'cetesb_58-Taubaté.csv', 'cetesb_62-Guaratinguetá.csv', 'cetesb_64-Limeira.csv', 'cetesb_7-São Caetano do Sul.csv', 'cetesb_74-Jundiaí.csv', 'cetesb_77-Piracicaba.csv',
'cetesb_8-Congonhas.csv', 'cetesb_80-São José do Rio Preto.csv', 'cetesb_83-Santos-Ponta da Praia.csv', 'cetesb_84-Ribeirão Preto.csv'
]
###Output
Try local upload documents
Get local documents
###Markdown
Função para fazer a limpeza dos dados
###Code
def clean_data_cetesb(df):
df.rename(columns={
'MP10 µg/m³|Média horária': 'MP10_hourly_mean',
'MP10 µg/m³|Média 24 h': 'MP10_daily_mean',
'MP10 µg/m³|Índice / Qualidade': 'MP10_index',
'MP2.5 µg/m³|Média horária': 'MP25_hourly_mean',
'MP2.5 µg/m³|Média 24 h': 'MP25_daily_mean',
'MP2.5 µg/m³|Índice / Qualidade': 'MP25_index'
}, inplace=True)
df.loc[df['Hora']=='24:00','Hora'] = '0:00'
df['station_time'] = pd.to_datetime(df['Data']+' '+df['Hora'])
return df
###Output
_____no_output_____
###Markdown
Função para calcular o otif dos dados coletadosComo as estações da Cetesb, são hardwares e podem ter falhas (como de comunicação, falta de energia etc.), é necessário fazer a verificação dos dados, para ver a qualidade deles e porcentagem de perda, para utilizar uma região que contenha um número de dados expressivo para amostra.
###Code
def otif_cetesb(station):
try:
df = pd.read_csv(f"{dir_cetesb}\{station}")
except:
url = f"https://raw.githubusercontent.com/BrunoASNascimento/estudo-da-correlacao-da-poluicao-atmosfericas-com-os-gastos-no-sus/main/data/cetesb/{quote(station)}"
# print(url)
df = pd.read_csv(url)
df.drop_duplicates(inplace=True)
clean_data_cetesb(df)
theoretical_data_size = len(pd.date_range(start=df['station_time'].min(), end=df['station_time'].max(), freq='h'))
data = {
'station': df['station_name'][0],
'name_file': station,
'otif_MP10': len(df['MP10_index'].dropna())/theoretical_data_size,
'otif_MP25': len(df['MP25_index'].dropna())/theoretical_data_size,
}
return data
###Output
_____no_output_____
###Markdown
DataFrame sobre os otifs MP10 e MP2.5
###Code
df_otif = pd.DataFrame([
otif_cetesb(info_cetesb)
for info_cetesb in cetesb_files
])
df_otif
###Output
_____no_output_____
###Markdown
Otif_MP10 >= 75%Pontos do MP10 com mais de 75% dos dados.Vale ressaltar que partículas inaláveis (MP10)Podem ser definidas de maneira simplificada como aquelas cujo diâmetro aerodinâmico é menor ou igual a 10 µm. Dependendo da distribuição de tamanho na faixa de 0 a 10 µm, podem ficar retidas na parte superior do sistema respiratório ou penetrar mais profundamente, alcançando os alvéolos pulmonares.
###Code
df_otif[df_otif['otif_MP10']>=0.75].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Otif_MP2.5 >= 75%Pontos do MP10 com mais de 75% dos dados.Vale ressaltar que partículas inaláveis finas (MP2,5)Podem ser definidas de maneira simplificada como aquelas cujo diâmetro aerodinâmico é menor ou igual a 2,5 µm. or causa do seu tamanho diminuto, penetram profundamente no sistema respiratório, podendo atingir os alvéolos pulmonares.
###Code
df_otif[df_otif['otif_MP25']>=0.75].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Indice de qualidade do ar![alt text](https://i.ibb.co/ypm47Gj/estrutura-do-indice-de-qualidade-do-ar.png)Baseado no índice, e como forma de normalizar os dados consideramos os valore **menores ou iguais a 40 como bons** e **acima de 40 como valores ruins**.Nesse ponto há a normalização dos dados horarios para mensais, visto que para o data SUS tem essa granularidade (mês e ano). A normalização foi feita baseada no número de pontos com qualidade do ar boa e ruim, sobre o número total de dados no período, com isso temos um índice mensal da qualidade do ar considerando todos os dados e não a média do período.
###Code
def air_quality_index(df, mp_type):
df['year'] = df['station_time'].dt.year
df['month'] = df['station_time'].dt.month
df.drop(columns=[
'Hora', 'Data',
'MP10_hourly_mean', 'MP10_daily_mean',
'MP25_hourly_mean', 'MP25_daily_mean',
'station_time'
], inplace=True)
filter_value = ['year', 'month', 'station_name']
result = pd.merge(
df[filter_value].drop_duplicates(),
df[df[mp_type] <= 40].groupby(
filter_value,
as_index=False
)[mp_type].count().rename(columns={
mp_type: 'air_quality_good'
}
),
how='left',
on=filter_value
).merge(
df[df[mp_type] > 40].groupby(
filter_value,
as_index=False
)[mp_type].count().rename(columns={
mp_type: 'air_quality_bad'
}
),
how='left',
on=filter_value).merge(
df.groupby(filter_value)[mp_type].count(
).reset_index(name='control'),
how='left',
on=filter_value
).fillna(0)
# Normalization
result = result[result['control'] > 0]
result['air_quality_good_normalizated'] = (
result['air_quality_good'] / result['control'])
result['air_quality_bad_normalizated'] = (
result['air_quality_bad'] / result['control'])
result['MP_TYPE'] = mp_type
return result
###Output
_____no_output_____
###Markdown
Filtrando MunicípioBaseado nas análises de otif e localização da estação no município, foi selecionado o município de Piracicaba para analise aprofundada os dados de qualidade do ar e internações.
###Code
filter_country = df_otif[df_otif['station']=='Piracicaba']
station, name_file = filter_country['station'].values[0], filter_country['name_file'].values[0]
station, name_file
###Output
_____no_output_____
###Markdown
Pegando os dados e organizandoFoi selecionado o MP25 por se tratar de uma partícula mais fina e que pode trazer problemas no sistema respiratório.
###Code
df = clean_data_cetesb(pd.read_csv(f"{dir_cetesb}\{name_file}"))
result = air_quality_index(df, 'MP25_index')
result['year_month'] = (result['year'].astype(str)+'/' +result['month'].astype(str).str.zfill(2))
result.sort_values(by='year_month',inplace=True,ascending=True)
result=result.reset_index(drop=True)
result.head(20)
fig, ax = plt.subplots()
fig.set_size_inches(20, 8)
ax.set(ylim=(0, 1))
# leg_good = mpatches.Patch(color='green', label='air_quality_good_normalizated')
leg_bad = mpatches.Patch(color='red', label='Qualidade do ar abaixo de 40')
# img = sns.lineplot(
# data=result,
# x='year_month',
# y='air_quality_good_normalizated',
# color='green',
# ax=ax
# )
sns.lineplot(
data=result,
x='year_month',
y='air_quality_bad_normalizated',
color='red',
ax=ax
)
plt.legend(handles=[leg_bad]) #leg_good
plt.xticks([result['year_month'][i] for i in range(0,result['year_month'].shape[0],5)],rotation=45)
plt.title(f'Gráfico da qualidade do ar abaixo de 40 no município de {station}',fontdict={'fontsize': 18})
plt.xlabel('Datas agrupadas por ano e mês')
plt.ylabel('Qualidade do ar normalizada')
sns.despine()
###Output
_____no_output_____
###Markdown
Top 5 máximas de qualidade do ar ruim
###Code
result.sort_values(by='air_quality_bad_normalizated',ascending=False).head(5)
###Output
_____no_output_____
###Markdown
Data SUS Função para coleta dos dados previamente baixados do Data SUS
###Code
def read_data_sus(path):
df = pd.read_csv(
path,
sep=';',
skiprows=4,
skipfooter=12,
encoding='ISO-8859-1',
thousands=".",
decimal=","
)
df.drop_duplicates(inplace=True)
df.replace('-',np.nan,inplace=True)
df[df.columns[1:]]=df[df.columns[1:]].astype("float")
return df
###Output
_____no_output_____
###Markdown
Leitura do dado do Data SUSÁrea destinada para fazer a configuração para leitura dos arquivos do data SUS sobre o numero de internações pelas enfermidades: outras tuberculoses respiratórias, restante de tuberculose respiratória, outras neoplasias malignas do aparelho respiratório e dos órgãos intratorácicos, outras doenças do trato respiratório superior, outras doenças do aparelho respiratório, outros transtornos cardiovasculares originados no período perinatal.
###Code
try:
df_hospitalizations = read_data_sus('data/sus/internacoes.csv')
except:
df_hospitalizations = read_data_sus('https://raw.githubusercontent.com/BrunoASNascimento/estudo-da-correlacao-da-poluicao-atmosfericas-com-os-gastos-no-sus/main/data/sus/internacoes.csv')
df_hospitalizations.head()
###Output
<ipython-input-453-cf3a02ed6a70>:2: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support skipfooter; you can avoid this warning by specifying engine='python'.
df = pd.read_csv(
###Markdown
Transformação do mês escrito para o numéricoEssa transformação é necessária para podermos fazer a comparação e junção com os dados da Cetesb.
###Code
to_month_num = {
'Jan':'01',
'Fev':'02',
'Mar':'03',
'Abr':'04',
'Mai':'05',
'Jun':'06',
'Jul':'07',
'Ago':'08',
'Set':'09',
'Out':'10',
'Nov':'11',
'Dez':'12'
}
rename_date={}
for old_date in df_hospitalizations.columns[1:len(df_hospitalizations.columns)-1]:
rename_date.update({old_date : old_date[:5]+to_month_num[old_date[5:]]})
###Output
_____no_output_____
###Markdown
Limpeza dos dados data SUSÉ feita a quebra dos municípios com o código do IBGE e seu nome em duas colunas, também é feita a retirada da coluna de totais.
###Code
df_hospitalizations.drop(columns=['Total'],inplace=True, errors='ignore')
df_hospitalizations.rename(columns=rename_date,inplace=True)
df_hospitalizations[['code_ibge','Município']]=df_hospitalizations['Município'].str.split(" ", 1, expand=True)
df_hospitalizations.head()
df_hospitalizations.tail()
###Output
_____no_output_____
###Markdown
Transformação dos dados e normalizaçãoPara efetuar a normalização dos dados foi utilizado o método min-max:![Método min-max](https://miro.medium.com/max/202/1*9N7QdpE_CfvkTyirk7_oWw.png)
###Code
county = station
df_hospitalizations_analysis = pd.DataFrame()
df_hospitalizations_analysis = df_hospitalizations[df_hospitalizations.columns[:-1]][df_hospitalizations['Município']==county].T #Transpose
df_hospitalizations_analysis.rename(columns=df_hospitalizations_analysis.iloc[0],inplace=True)
df_hospitalizations_analysis.drop(df_hospitalizations_analysis.index[0], inplace = True)
df_hospitalizations_analysis= df_hospitalizations_analysis.reset_index().rename(columns={'index':'year_month'})
df_hospitalizations_analysis[county]=df_hospitalizations_analysis[county].astype("float")
df_hospitalizations_analysis[f'{county}_norm'] = ((df_hospitalizations_analysis[county]-df_hospitalizations_analysis[county].min())/(df_hospitalizations_analysis[county].max()-df_hospitalizations_analysis[county].min())) #Normalization
df_hospitalizations_analysis.head(10)
###Output
_____no_output_____
###Markdown
Gráfico da qualidade do ar abaixo de 40 e o número de internações normalizadoTemos o gráfico das duas grandezas que queremos analisar, é possível ver algumas similaridades entre alguns cortes de datas como em abril de 2014 e março de 2016, entretanto o pico de qualidade do ar ruim é em setembro de 2017 e o pico de internações é em agosto de 2015. Mostrando que se há uma correlação ela pode ser baixa ou as enfermidades listadas não são tão afetadas pela MP10.
###Code
df_hospitalizations_analysis_plot = df_hospitalizations_analysis[(df_hospitalizations_analysis['year_month']>='2013')&(df_hospitalizations_analysis['year_month'].isin(result['year_month'].values[:-5]))].reset_index(drop=True)
fig, ax = plt.subplots()
fig.set_size_inches(20, 8)
ax.set(ylim=(0, 1))
leg_good = mpatches.Patch(color='green', label='Qualidade do ar abaixo de 40')
leg_bad = mpatches.Patch(color='red', label='Internações normalizada')
img = sns.lineplot(
data=result[(result['year_month']>='2013')&(result['year_month'].isin(result['year_month'].values[:-5]))],
x='year_month',
y='air_quality_bad_normalizated',
color='green',
ax=ax
)
sns.lineplot(
data=df_hospitalizations_analysis_plot,
x='year_month',
y=f'{county}_norm',
color='red',
ax=ax
)
plt.legend(handles=[leg_good,leg_bad])
plt.xticks([df_hospitalizations_analysis_plot['year_month'][i] for i in range(0,df_hospitalizations_analysis_plot['year_month'].shape[0],5)],rotation=45)
plt.title(f'Gráfico da qualidade do ar abaixo de 40 e o número de internações normalizado no município de {station}',fontdict={'fontsize': 18})
plt.xlabel('Datas agrupadas por ano e mês')
plt.ylabel('Qualidade do ar normalizada e numero de internações normalizado')
sns.despine()
###Output
_____no_output_____
###Markdown
Junção dos dados da Cetesb e do Data SUSEssa junção é feita para fazer o estudo de casos médios com a qualidade do ar abaixo de 40.
###Code
df_air_with_hospitalizations =result.merge(df_hospitalizations_analysis[(df_hospitalizations_analysis['year_month']>='2013')&(df_hospitalizations_analysis['year_month'].isin(result['year_month'].values[:-5]))],on='year_month').sort_values(by='year_month',ascending=True)
df_air_with_hospitalizations.head(10)
###Output
_____no_output_____
###Markdown
Média aritmética das internações normalizadasEsse valor serve para verificar qual foi a média do período de janeiro de 2013 até junho de 2020.
###Code
index_hospitalizations_mean = df_air_with_hospitalizations[f'{county}_norm'].mean()
index_hospitalizations_mean
###Output
_____no_output_____
###Markdown
Cálculo de verificação de internações acima da média baseando-se na qualidade do arEsse cálculo é para verificar se há um aumento nas internações acima da média do período baseado na qualidade do ar. Baseado nesse cálculo é possível comparar e verificar que há um aumento no número de internações nos meses que a qualidade do ar é pior, cerca de 13% maior.
###Code
df_air_with_hospitalizations_air_bad = df_air_with_hospitalizations[df_air_with_hospitalizations['air_quality_bad_normalizated']>0].reset_index(drop=True)
df_air_with_hospitalizations_air_good = df_air_with_hospitalizations[df_air_with_hospitalizations['air_quality_bad_normalizated']==0].reset_index(drop=True)
index_air_bad = df_air_with_hospitalizations_air_bad[df_air_with_hospitalizations_air_bad[f'{county}_norm']>=index_hospitalizations_mean]['air_quality_bad_normalizated'].count()/df_air_with_hospitalizations_air_bad['air_quality_bad_normalizated'].count()
index_air_good = df_air_with_hospitalizations_air_good[df_air_with_hospitalizations_air_good[f'{county}_norm']>=index_hospitalizations_mean]['air_quality_bad_normalizated'].count()/df_air_with_hospitalizations_air_good['air_quality_bad_normalizated'].count()
print(f'index_air_bad : {index_air_bad}')
print(f'index_air_good : {index_air_good}')
print(f'diff : {index_air_bad-index_air_good}')
###Output
index_air_bad : 0.5135135135135135
index_air_good : 0.38461538461538464
diff : 0.12889812889812885
###Markdown
Estimating text loss in The Old Norse fornaldarsögur This Python notebook is a derivative of the one which accompanies the following publication:> Mike Kestemont and Folgert Karsdorp, "Het Atlantis van de Middelnederlandse ridderepiek. Een schatting van het tekstverlies met methodes uit de ecodiversiteit". *Spiegel der letteren* (2020).Adaptation to the current study is still a work in progress. All figures and numbers were prepared using the code below. Future updates of the code and data will be managed in an open [Github repository](https://github.com/mikekestemont/chivalric_diversity). The code block below loads all (third-party) packages and modules necessary to run the module. These can be installed from the file `requirements.txt`: pip install -r requirements.txt
###Code
from functools import partial
from itertools import product
import numpy as np
np.random.seed(12345)
from scipy.special import erfinv
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("tufte.mplstyle")
plt.rcParams["text.usetex"] = False
%matplotlib inline
import scipy.stats as stats
from scipy.special import gammaln
###Output
_____no_output_____
###Markdown
Data We load the data from the spreadsheet file `mnl.xlsx`:
###Code
mnl = pd.read_excel('mnl.xlsx', header=None, names=('text', 'witness'))
mnl.head(10)
###Output
_____no_output_____
###Markdown
We are only interested in the count data, i.e. the number of witnesses per text (the technical term is "abundance data").
###Code
mnl.groupby('text').size().sort_values(ascending=False).head()
###Output
_____no_output_____
###Markdown
The counts per text can be plotted as follows:
###Code
fig, ax = plt.subplots(figsize=(10,18))
mnl.groupby('text').size().sort_values(ascending=True).plot.barh(ax=ax);
ax.set(xlabel='number of manuscripts', ylabel='',
title='Distribution of texts of ON legendary sagas (known to me)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig1.jpeg', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
Yet a different perspective is to list the size of the frequency bins that we can distinguish within the manuscript counts:
###Code
types = mnl.groupby('text').size().sort_values(ascending=False).value_counts().sort_index()
types = types.to_frame(name='number of texts')
types['number of manuscripts'] = types.index
types.to_excel('output/Tab1.xlsx')
types
###Output
_____no_output_____
###Markdown
Finally, we define the auxiliary function `species_richness` to count the number of unique texts in the data (i.e. the number of non-zero counts):
###Code
def species_richness(counts):
return np.sum(counts > 0)
print('# unique texts:', species_richness(mnl.groupby('text').size()))
print('# witnesses:', len(mnl))
###Output
# unique texts: 32
# witnesses: 87
###Markdown
Jackknife The following function computes the first-order Jackknife estimate, on the basis of the abundance data in our data frame, as well as a confidence interval (.95 be default). This approach is detailed in the following paper:> K. Burnham & W. Overton, "Robust Estimation of Population Size When Capture Probabilities Vary Among Animals". *Ecology* (1979), 927-936.
###Code
def jackknife(data, conf_lvl=0.95):
jack_stat = species_richness(data)
x = np.array(sum([[i] * c for i, c in enumerate(data, 1)], []))
index = np.arange(x.shape[0])
vals = []
for i in range(x.shape[0]):
t = x[index != i]
vals.append(species_richness(np.bincount(t)))
mean_jack_stat = np.mean(vals)
bias = (x.shape[0] - 1) * (mean_jack_stat - jack_stat)
estimate = jack_stat - bias
std_err = np.sqrt(
(x.shape[0] - 1) *
np.mean((mean_jack_stat - vals) *
(mean_jack_stat - vals), axis=0)
)
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, std_err, conf_interval
results = jackknife(mnl.groupby('text').size())
print('jackknife-estimate (order=1):', results[0], results[-1])
###Output
jackknife-estimate (order=1): 42.873563218390935 [36.83294758 48.91417886]
###Markdown
This implementation is verbose and uses an explicit `for`-loop, which iteratively leaves out observations and tracks the drops in diversity that follow from this operation. In the code blocks below we show that the same estimate can also be obtained in a fully analytical fashion. First we calculate the frequency counts for each unique text:
###Code
num_per_text = mnl.groupby('text').size()
num_per_text
###Output
_____no_output_____
###Markdown
Next, we store the species richness (the number of unique texts) in $t$:
###Code
t = species_richness(num_per_text)
t
###Output
_____no_output_____
###Markdown
Then we set $s$ to the number of texts that are only attested in a single witness:
###Code
s = sum(num_per_text == 1)
s
###Output
_____no_output_____
###Markdown
Only the $s$ texts that occur once will affect the species richness during the iterative Jackknife procedure. We can therefore predict that we will obtain the following deviations when applying the bootstrap:
###Code
mu = (((t - s) * t) + (s * (t - 1))) / t
mu
###Output
_____no_output_____
###Markdown
That means that we can calculate the bias as follows:
###Code
bias = (t - 1) * (mu - t)
bias
###Output
_____no_output_____
###Markdown
To account for this bias, we can subtract it from the original species richness in the observed data:
###Code
t - bias
###Output
_____no_output_____
###Markdown
Simple example
###Code
counts = [5, 4, 3, 3, 1, 1, 1, 1, 1]
names = 'ABCDEFGHI'
data = zip(counts, names)
df = pd.DataFrame(zip(names, counts), columns=('name', 'mss'))
df.to_excel('output/Tab2.xlsx')
df
print('total # of witnesses:', df['mss'].sum())
species_richness(df['mss'])
jackknife(df['mss'])
data = np.array(df['mss'])
x = np.array(sum([[i]*c for i, c in enumerate(data, 1)], []))
tradition = [names[i - 1] for i in x]
print(tradition)
bootstrap = []
for i in range(len(tradition)):
tradition_ = [tradition[j] for j in range(len(tradition)) if i != j]
bootstrap.append((
(i + 1), tradition[i], ''.join(tradition_),
len(set(tradition_)), len(set(tradition_)) - len(set(tradition))))
df = pd.DataFrame(bootstrap, columns=('iteration', 'leftout', 'imputed tradition', 'richness', 'error'))
df.to_excel('output/Tab3.xlsx')
df
mean_estimate = np.mean(df['richness'])
print('Average estimate:', mean_estimate)
print('Bias:', mean_estimate - 9)
bias = 19 * (mean_estimate - 9)
bias
corrected = 9 - bias
corrected
conf_lvl = .95
std_err = np.sqrt(
19 * np.mean((mean_estimate - df['richness']) *
(mean_estimate - df['richness']), axis=0))
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = corrected + z_score * np.array((-std_err, std_err))
conf_interval
###Output
_____no_output_____
###Markdown
Chao1 In the paper we eventually opt for the more recent, non-parametric formula "Chao1", which is described in this paper:> A. Chao & L. Jost, ‘Estimating diversity and entropy profiles via discovery rates of new species". *Methods in Ecology and Evolution* (2015), 873-882.Because we have "doubletons" in our data, we use can the following formula, where:- $\hat{f_0}$ is the (theoretical) number of non-observed species/texts;- $f_1$ is the number of species/texts attested exactly once ("singletons");- $f_2$ is the number of species/texts attested exactly twice ("doubletons");- $n$ is the total number of individuals/manuscripts in the observed data.\begin{equation}\hat{f_0} = \frac{(n - 1)}{n} \frac{f_1^2}{2f_2}\end{equation}The code block below returns the full, theoretical species richness as etimated by Chao1, i.e. it adds the estimated $\hat{f_0}$ to the species richness that was observed in the sample:
###Code
def chao_richness(x):
x, n = x[x > 0], x.sum()
t = x.shape[0]
f1, f2 = (x == 1).sum(), (x == 2).sum()
return t + (n - 1) / n * ((f1 ** 2 / 2 / f2) if f2 > 0 else (f1 * (f1 - 1) / 2))
###Output
_____no_output_____
###Markdown
If we apply this function to our data, we obtain an even higher (but arguably more realistic) estimate of the loss in textual diversity for this literature. Note, however, that this estimate is still a theoretical *minimum estimate*, since the original loss could still be higher.
###Code
chao_richness(num_per_text)
###Output
_____no_output_____
###Markdown
Instead of reporting just this number, we apply a bootstrapped procedure in which we sample from the material using a multinomial distribution (see the Appendix Chao and Jost, 2015) and apply Chao1 to the resulting samples. This procedure allows us to calculate a .95 confidence interval for this value.
###Code
def bt_prob(x):
x, n = x[x > 0], x.sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
C = 1 - f1 / n * (((n - 1) * f1 / ((n - 1) * f1 + 2 * f2)) if f2 > 0 else
((n - 1) * (f1 - 1) / ((n - 1) * (f1 - 1) + 2)) if f1 > 0 else
0)
W = (1 - C) / np.sum(x / n * (1 - x / n) ** n)
p = x / n * (1 - W * (1 - x / n) ** n)
f0 = np.ceil(((n - 1) / n * f1 ** 2 / (2 * f2)) if f2 > 0 else
((n - 1) / n * f1 * (f1 - 1) / 2))
p0 = (1 - C) / f0
p = np.hstack((p, np.array([p0 for i in np.arange(f0)])))
return p
def bootstrap(x, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
pro = np.array([chao_richness(row) for row in data_bt])
pro_mean = pro.mean(0)
lci_pro = -np.quantile(pro, (1 - conf) / 2, axis=0) + pro_mean
uci_pro = np.quantile(pro, 1 - (1 - conf) / 2, axis=0) - pro_mean
sd_pro = np.std(pro, axis=0)
pro = pro_mean - pro
return (lci_pro, uci_pro, sd_pro, pro)
def chao_estimate(x, n_iter=1000, conf=0.95):
pro = chao_richness(x)
(lci_pro, uci_pro, sd_pro, bt_pro) = bootstrap(x, n_iter=n_iter, conf=conf)
lci_pro, uci_pro = pro - lci_pro, pro + uci_pro
bt_pro = pro - bt_pro
return (lci_pro, uci_pro, bt_pro, pro)
###Output
_____no_output_____
###Markdown
The following block applies this bootstrapped procedure to obtain the final estimates:
###Code
lci_pro, uci_pro, bt_pro, pro = chao_estimate(num_per_text, n_iter=10000)
print('pro:', pro)
print('lci_pro:', lci_pro)
print('uci_pro:', uci_pro)
###Output
pro: 39.47557471264368
lci_pro: 28.554685801360193
uci_pro: 63.66962833009582
###Markdown
The array `bt_pro` contains the estimates that were collected during the bootstrap (1,000 iterations by default). Below, we plot the distribution of these numbers using a rainplot: [removing rain_alpha =.3 argument on pt.RainCloud() because it is showing as invalid]
###Code
import ptitprince as pt
fig, ax = plt.subplots(figsize=(8, 6))
d = list([(x, 'bootstrap') for x in bt_pro])
bt = pd.DataFrame(d, columns=('bootstrap', 'type'))
pt.RainCloud(
data=bt, x="type", y="bootstrap", ax=ax,
orient="h", alpha=.8, bw=.2, rain_alpha=.3, palette="Greys"
)
ax.axvline(pro, c='black', ls='--')
ax.axvline(uci_pro, c='darkgrey', ls='--')
ax.axvline(lci_pro, c='darkgrey', ls='--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel('')
plt.savefig('output/Fig2.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
[These findings refer to the original study of Dutch romances and have not yet been analyzed for the FAS] The idea that there were at least 100 texts is not completely unlikely, but it is a veryconservative estimate, at the very bottom of the probability continuum. The estimate of ~148 manuscripts (or more) is much more plausible, which would mean that *at least half ofthe chivalric texts have been lost*. Just as 100 is an extremely optimisticestimate, ~219 is the most pessimistic estimate: in thatcase, only a third of the ever available chivalric epics would have been persisted throughtime, which is quite a dramatic, but not entirely unrealistic figure. Species accumulation curve In what preceded, we have investigated how many unique texts may have been lost, or, more positively, how many unique texts we may have not yet seen. In this concluding section, we investigate how many texts should be retrieved before we arrive at this diversity estimate. This new estimate provides us with information about the total population size, i.e. the total number of text witnesses. We follow Hsieh, Ma and Chao (2016) to compute this estimate using "Rarefaction Extrapolation". For details about this method, see:> Hsieh, Ma and Chao (2016): iNEXT: an R package for rarefaction and extrapolation ofspecies diversity. *Methods in Ecology and Evolution*, 7, 1451–1456.
###Code
def bootstrap_re(x, fn=chao_richness, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
Dq = fn(x)
pro = np.array([fn(row) for row in data_bt])
error = stats.norm.ppf(1 - (1 - conf) / 2) * np.std(pro, 0)
lci_pro = Dq - error
uci_pro = Dq + error
sd_pro = np.std(pro, axis=0)
return (lci_pro, uci_pro, sd_pro, Dq, )
def rarefaction_extrapolation(x, max_steps):
x, n = x[x > 0], x.sum()
def _sub(m):
if m <= n:
return np.sum(1 - np.array(
[np.exp(gammaln(n - i + 1) + gammaln(n - m + 1) -
gammaln(n - i - m + 1) - gammaln(n + 1)) if i <= (n - m) else
0 for i in x]))
else:
S = (x > 0).sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
f0 = ((n - 1) / n * f1 * (f1 - 1) / 2) if f2 == 0 else ((n - 1) / n * f1**2 / 2 / f2)
A = n * f0 / (n * f0 + f1)
return S if f1 == 0 else (S + f0 * (1 - A**(m - n)))
return np.array([_sub(mi) for mi in range(1, max_steps)])
counts = np.bincount(mnl.groupby('text').size())[1:] # ignore zero
x = np.array(sum([[i] * c for i, c in enumerate(counts, 1)], []))
###Output
_____no_output_____
###Markdown
Here too we use a bootstrap method with 100 samples:
###Code
max_steps = 1000
lci_pro, uci_pro, sd_pro, Dq = bootstrap_re(
x,
fn=partial(rarefaction_extrapolation, max_steps=max_steps),
n_iter=100
)
steps = np.arange(1, max_steps)
interpolated = np.arange(1, max_steps) < x.sum()
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(steps[interpolated], Dq[interpolated], color='C0')
ax.plot(x.sum(), Dq[x.sum() - 1], 'o')
ax.plot(steps[~interpolated], Dq[~interpolated], '--', color='C0')
ax.fill_between(steps, lci_pro, uci_pro, alpha=0.3)
ax.grid()
ax.set(xlabel='# of manuscripts', ylabel='# texts', title='Species Accumulation Curve')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig3.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
Statistics:- Annual PM2.5 Average in India- Annual PM2.5 Average in New Delhi- Heat Map of PM2.5 Average in New Delhi for 2016- Heat Map of PM2.5 Averages in India for 2016- Most polluted and least polluted cities in India- Most polluted and least polluted neighborhoods in New Delhi
###Code
f = open('id-mappings/city-ids.txt', 'r')
cities = f.readlines()
cities = map(lambda elem: elem.split(","), cities)[1:]
cities = map(lambda elem: [elem[0], elem[1], elem[2], elem[3][:-1]], cities)
cities = filter(lambda elem: elem[2].isdigit(), cities)
india_data = {}
for elem in cities:
_, stateName, _, cityName = elem
f = open('data/{}_{}.txt'.format(stateName, cityName), 'r')
city_data = f.readlines()
city_data = filter(lambda elem: elem != "\n", city_data)
city_data = map(lambda elem: elem.split(","), city_data)
city_data = filter(lambda elem: elem[0] == "2016", city_data)
city_data = map(lambda elem: float(elem[1].rstrip("\n")), city_data)
if len(city_data) > 0:
print city_data
india_data[(stateName, cityName)] = np.mean(city_data)
india_data = [[k, v] for (k, v) in india_data.iteritems()]
india_data = sorted(india_data, key=lambda x: x[1])
gmaps.configure(api_key=os.environ["GOOGLE_API_KEY"])
def decode_address_to_coordinates(address):
params = {
'address' : address,
'sensor' : 'false',
}
url = 'http://maps.google.com/maps/api/geocode/json'
r = requests.get(url, params = params)
return r.json()['results'][0]['geometry']['location']
locations = []
for (state, city), val in india_data:
locations.append([(state, city), decode_address_to_coordinates("{}, {}".format(city, state)).values()])
india_coordinates = decode_address_to_coordinates("India").values()
fig = gmaps.figure(center=india_coordinates, zoom_level=4)
weights = map(lambda x: x[1], india_data)
coordinates = map(lambda x: x[1], locations)
heatmap_layer = gmaps.heatmap_layer(coordinates, weights=weights)
heatmap_layer.max_intensity = 200
heatmap_layer.point_radius = 2.0
heatmap_layer.dissipating = False
fig.add_layer(heatmap_layer)
info_box_template = """
<div>
<p><b>City:</b> {0}, {1}</p>
<p><b>PM2.5:</b> {2:.2f}</p>
</div>
"""
city_info = [info_box_template.format(city_data[0][1], city_data[0][0], city_data[1]) for city_data in india_data]
marker_layer = gmaps.marker_layer(coordinates, info_box_content=city_info)
fig.add_layer(marker_layer)
fig
embed_minimal_html('national-aq.html', views=[fig])
for d in india_data[:5]:
print "{0}: {1:.2f} ug/m3".format(d[0][1], d[1])
for d in india_data[-5:]:
print "{0}: {1:.2f} ug/m3".format(d[0][1], d[1])
coordinates = np.array(coordinates)
x = coordinates[:, 0]
y = coordinates[:, 1]
# Interpolating and plotting again
rbfi = Rbf(x, y, weights, function = "inverse")
# sleep(0.05)
data = open('indian-cities.csv', 'r').readlines()
data = map(lambda x: x.split("\r"), data)[0]
cities = []
for city in data:
cities.append(decode_address_to_coordinates("{}, India".format(city)).values())
sleep(1)
cities = np.array(cities)
print cities
aq = rbfi(cities[:, 0], cities[:, 1])
aq = map(lambda x: x if x > 0.0 else 0.0, aq)
print aq
fig = gmaps.figure(center=india_coordinates, zoom_level=4)
info_box_template = """
<div>
<p><b>City:</b> {0}</p>
<p><b>PM2.5:</b> {1:.2f}</p>
</div>
"""
city_info = [info_box_template.format(data[i], aq[i]) for i in range(0, len(data))]
marker_layer = gmaps.marker_layer(cities, info_box_content=city_info)
fig.add_layer(marker_layer)
fig
embed_minimal_html('national-aq-interp.html', views=[fig])
delhi_coordinates = decode_address_to_coordinates("New Delhi").values()
f = open('data/Delhi_Delhi.txt', 'r')
delhi_data = f.readlines()
delhi_data = filter(lambda elem: elem != "\n", delhi_data)
delhi_data = map(lambda elem: elem.split(","), delhi_data)
filtered_delhi_data = []
station = -1
for line in delhi_data:
if len(line) > 2 and "station" in line[2]:
station = line[2].split(":")[1].rstrip("\n")
if line[0] == "2016":
filtered_delhi_data.append([station, line[1].rstrip("\n")])
delhi_data = filtered_delhi_data
f = open('id-mappings/station-ids.txt', 'r')
stations = f.readlines()
stations = map(lambda elem: elem.split(","), stations)
stations = filter(lambda elem: elem[2] == "85" and elem[4].isdigit(), stations)
stations = {station[4]:station[5].rstrip("\n") for station in stations}
print stations, delhi_data
delhi_station_coordinates = []
for station in delhi_data:
delhi_station_coordinates.append(decode_address_to_coordinates("{}, Delhi".format(stations[station[0]])).values())
sleep(1)
print delhi_station_coordinates
delhi_coordinates = decode_address_to_coordinates("New Delhi, Delhi").values()
fig = gmaps.figure(center=delhi_coordinates, zoom_level=11)
weights = np.array(delhi_data)[:, 1]
heatmap_layer = gmaps.heatmap_layer(delhi_station_coordinates, weights=weights)
heatmap_layer.max_intensity = 200
heatmap_layer.point_radius = 35.0
fig.add_layer(heatmap_layer)
info_box_template = """
<div>
<p><b>Station:</b> {0}</p>
<p><b>PM2.5:</b> {1}</p>
</div>
"""
print delhi_data
station_info = [info_box_template.format(stations[d[0]], d[1]) for d in delhi_data]
marker_layer = gmaps.marker_layer(delhi_station_coordinates, info_box_content=station_info)
fig.add_layer(marker_layer)
fig
embed_minimal_html('delhi-aq-pm25.html', views=[fig])
###Output
_____no_output_____
###Markdown
AnalysisNotebook author: Martin Saveski ([email protected])Copyright (c) Facebook, Inc. and its affiliates.This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
###Code
# load libraries
suppressMessages(library(tidyverse))
suppressMessages(library(cowplot))
suppressMessages(library(ggsci))
suppressMessages(library(scales))
# setup
setwd("~/code/social-catalysts")
source("scripts/utils.R")
dta_root <- "data/"
plt_root <- "figs_cscw/"
theme_set(theme_light())
colors = c("Catalyst" = "#ee0001", "Matched" = "#3b4992")
###Output
_____no_output_____
###Markdown
Posts Analysis Topics (Fig 2)
###Code
df_empath <- readRDS(str_c(dta_root, "post_analysis/empath.rds"))
df_empath <- df_empath %>%
ungroup() %>%
mutate(post_cm = str_to_title(post_cm))
# (A) counts
plt_empath <- df_empath %>%
ggplot(aes(
x = fct_reorder(topic, m),
y = m,
fill = fct_rev(post_cm)
)) +
geom_bar(stat = "identity",
position = position_dodge(),
width = 0.7) +
geom_errorbar(
aes(ymin = low_ci, ymax = high_ci),
position = position_dodge(width = 0.7),
color = "black",
size = 0.3,
width = 0.5
) +
labs(x = NULL, y = "Average Count", fill = NULL) +
scale_fill_manual(values = colors) +
coord_flip() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
legend.position = "none"
)
# (B) differences
df_empath_dd <- df_empath %>%
select(post_cm, topic, m, se) %>%
mutate(post_cm = str_to_lower(post_cm)) %>%
multi_spread(post_cm, c(m, se)) %>%
mutate(
d_m = catalyst_m - matched_m,
d_se = sqrt(catalyst_se^2 + matched_se^2),
d_ci = 1.96 * d_se,
d_m_low = d_m - d_ci,
d_m_high = d_m + d_ci
) %>%
arrange(desc(d_m))
# variables for coloring the CIs
df_empath_dd <- df_empath_dd %>%
mutate(
b_start = case_when(
d_m_low > 0 & d_m_high > 0 ~ d_m,
d_m_low < 0 & d_m_high > 0 ~ d_m_low,
d_m_low < 0 & d_m_high < 0 ~ d_m_low
),
b_end = case_when(
d_m_low > 0 & d_m_high > 0 ~ d_m_high,
d_m_low < 0 & d_m_high > 0 ~ d_m_high,
d_m_low < 0 & d_m_high < 0 ~ d_m
),
w_start = case_when(
d_m_low > 0 & d_m_high > 0 ~ d_m_low,
d_m_low < 0 & d_m_high > 0 ~ 0,
d_m_low < 0 & d_m_high < 0 ~ d_m
),
w_end = case_when(
d_m_low > 0 & d_m_high > 0 ~ d_m,
d_m_low < 0 & d_m_high > 0 ~ d_m,
d_m_low < 0 & d_m_high < 0 ~ d_m_high
)
)
plt_empath_dd <- df_empath_dd %>%
ggplot(aes(x = fct_reorder(topic, d_m), y = d_m)) +
geom_bar(stat = "identity", width = 0.6) +
geom_errorbar(
aes(ymin = b_start, ymax = b_end),
width = 0,
color = "black",
size = 0.8
) +
geom_errorbar(
aes(ymin = w_start, ymax = w_end),
width = 0,
color = "white",
size = 0.8
) +
labs(x = "", y = "Catalyst - Matched") +
scale_x_discrete(position = "top") +
scale_y_continuous(breaks = pretty_breaks(), limits = c(-0.01, 0.04)) +
coord_flip() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank())
p_row <- plot_grid(plt_empath, plt_empath_dd, ncol = 2, align = "h")
legend <- get_legend(
plt_empath +
guides(fill = guide_legend(reverse = T)) +
theme(legend.position="bottom")
)
plt_empath_full <- plot_grid(p_row, legend, ncol=1, rel_heights = c(1, .08))
options(repr.plot.width=8, repr.plot.height=6.5)
print(plt_empath_full)
###Output
_____no_output_____
###Markdown
User Analysis Ego networks (Fig 3)
###Code
df_ego_stats <- readRDS(str_c(dta_root, "user_ego_nets_sample/ego_stats.rds"))
df_ego_stats_inc <- df_ego_stats %>%
ungroup() %>%
select(is_catalyst, field, m, se) %>%
multi_spread(is_catalyst, c(m, se)) %>%
group_by(field) %>%
do(
per_change_delta_se(
.$catalyst_m, .$matched_m,
.$catalyst_se, .$matched_se
)
)
ego_stats_fields <- c(
"n_nodes",
"density",
"avg_clust",
"avg_degree",
"var_degrees",
"deg_assortativity",
"fiedler",
"modularity"
)
df_ego_stats_inc <- df_ego_stats_inc %>%
ungroup() %>%
filter(field %in% ego_stats_fields) %>%
mutate(
field = case_when(
field == "n_nodes" ~ "Number of Nodes (Friends)",
field == "density" ~ "Density",
field == "avg_degree" ~ "Degree Average",
field == "var_degrees" ~ "Degree Variance",
field == "deg_assortativity" ~ "Degree Assortativity",
field == "fiedler" ~ "Algebraic Connectivity",
field == "avg_clust" ~ " Average Clustering Coefficient",
field == "modularity" ~ "Modularity"
),
field = factor(
field,
levels = rev(c(
"Number of Nodes (Friends)",
"Number of Edges",
"Density",
"Degree Average",
"Degree Variance",
"Degree Assortativity",
" Average Clustering Coefficient",
"Algebraic Connectivity",
"Modularity"
)
))
)
plt_ego_stats_inc <- df_ego_stats_inc %>%
ggplot(aes(
x = field,
y = mean,
ymin = lower95,
ymax = upper95
)) +
geom_point(size = 2) +
geom_errorbar(width = 0, size = 0.6) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
labs(x = NULL, y = "Catalyst vs Matched users (% increase)") +
scale_y_continuous(labels = percent_format(accuracy = 1),
limits = c(-0.013, 0.3)) +
scale_color_aaas() +
coord_flip() +
theme(
axis.ticks = element_blank(),
strip.text.y = element_text(color = "black", angle = 0),
strip.background.y = element_rect(fill = "grey90"),
legend.position = "none"
)
# k-core
df_ego_k_core <- readRDS(str_c(dta_root, "user_ego_nets_sample/k_core.rds"))
df_ego_k_core <- df_ego_k_core %>% ungroup() %>% filter(threshold < 16)
plt_ego_k_core <- df_ego_k_core %>%
ggplot(aes(x = threshold, y = m, color=is_catalyst)) +
geom_line() +
geom_point() +
geom_errorbar(aes(ymin=low_ci, ymax=high_ci), width=0.2) +
scale_x_continuous(trans = log2_trans(),
breaks = c(2, 4, 8),
labels = c(expression(2^1), expression(2^2), expression(2^3))) +
labs(x = "k", y = "Components in k-core", color = NULL) +
expand_limits(y = 1) +
guides(color = guide_legend(reverse = T)) +
scale_color_aaas() +
theme(
panel.grid.minor.y = element_blank(),
legend.position="bottom"
)
# k brace
df_ego_k_truss <- readRDS(str_c(dta_root, "user_ego_nets_sample/k_truss.rds"))
df_ego_k_truss <- df_ego_k_truss %>% ungroup() %>% filter(threshold < 16)
plt_ego_k_truss <- df_ego_k_truss %>%
ggplot(aes(x = threshold, y = m, color=is_catalyst)) +
geom_line() +
geom_point() +
geom_errorbar(aes(ymin=low_ci, ymax=high_ci), width=0.2) +
scale_x_continuous(trans = log2_trans(),
breaks = c(2, 4, 8),
labels = c(expression(2^1), expression(2^2), expression(2^3))) +
scale_y_continuous(breaks = seq(1, 8, by = 1)) +
labs(x = "k", y = "Components in k-brace", color = NULL) +
expand_limits(y = 1) +
guides(color = guide_legend(reverse = T)) +
scale_color_aaas() +
theme(
panel.grid.minor.y = element_blank(),
legend.position="bottom"
)
# group figures
plt_ego_nets_all <- plot_grid(
plt_ego_stats_inc,
plt_ego_k_core + theme(legend.position="none"),
plt_ego_k_truss + theme(legend.position="right"),
labels = c('A', 'B', 'C'),
nrow = 1,
rel_widths = c(1, 0.615, 0.82),
align = "h"
)
options(repr.plot.width=11, repr.plot.height=3)
print(plt_ego_nets_all)
###Output
_____no_output_____
###Markdown
Survey Questions Overlap (Fig 5)
###Code
df_overlap <- readRDS(str_c(dta_root, "survey/overlap.rds"))
df_overlap <- df_overlap %>%
mutate(col = ifelse(v > 0.345, "white", "black"))
plt_overlap <- df_overlap %>%
ggplot(aes(q_i, fct_rev(q_j), fill = v)) +
geom_tile() +
geom_tile(color = "black", linetype = 1, size = 0.2) +
geom_text(aes(label = v, color = col)) +
scale_fill_material("grey", na.value = 'white') +
scale_color_manual(values = c("black", "white")) +
scale_x_discrete(position = "top") +
labs(
x = expression(paste("Question ", italic("i"))),
y = expression(paste("Question ", italic("j")))
) +
guides(
color = F,
fill = guide_colourbar(
draw.ulim = FALSE,
draw.llim = FALSE,
label.theme = element_text(colour = "black", size = 8, margin = margin(l=5))
)) +
theme(
axis.ticks = element_blank(),
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
legend.title = element_blank(),
legend.position = "right"
)
options(repr.plot.width=4, repr.plot.height=3)
print(plt_overlap)
###Output
Warning message:
“Removed 4 rows containing missing values (geom_text).”
###Markdown
Nominated Percentiles (Fig 6)
###Code
df_nom_percentiles <- readRDS(
str_c(
dta_root,
"survey_nominated_percentiles/nominated_percentile_per_nomination.rds"
)
)
df_nom_percentiles <- df_nom_percentiles %>%
ungroup() %>%
filter(field != 'catalyst comments (per post)') %>%
mutate(
field = case_when(
field == "posts" ~ "Number of Posts",
field == "catalyst comments (total)" ~ "Number of Catalyst Comments",
field == "mutual friends" ~ "Number of Mutual Friends",
field == "friends" ~ "Number of Friends"
),
field = factor(
field,
levels = c(
"Number of Posts",
"Number of Friends",
"Number of Mutual Friends",
"Number of Catalyst Comments"
)
),
nomination_number = case_when(
nomination_number == "0" ~ "1",
nomination_number == "1" ~ "2",
nomination_number == "2" ~ "3"
),
question_code = str_to_upper(question_code)
)
plt_nom_percentiles <- df_nom_percentiles %>%
ggplot(aes(x = question_code, y = m, color = nomination_number)) +
geom_point(size = 2, position = position_dodge(0.9)) +
geom_errorbar(aes(ymin = low_ci, ymax = high_ci),
width = 0.8,
position = position_dodge(0.9)) +
geom_hline(aes(yintercept = 0.5), linetype = "dashed") +
facet_wrap(~ field, ncol = 4) +
labs(x = NULL, y = "Mean Percentile Rank of Nominated Users", color = "Nomination Number") +
scale_y_continuous(labels = percent_format(accuracy = 1),
breaks = seq(0, 0.8, 0.1)) +
expand_limits(y = 0) +
scale_color_aaas() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
strip.text.x = element_text(color = "black", size = 8, face = "bold"),
strip.background.x = element_rect(fill = "grey95"),
legend.position = "bottom"
)
options(repr.plot.width=11, repr.plot.height=4)
print(plt_nom_percentiles)
###Output
_____no_output_____
###Markdown
Percent Increase in Mean Catalystness (Fig 7)
###Code
df_tot_cat_per_q <- readRDS(str_c(
dta_root,
"survey_catalystness_per_question/tot_catalystness.rds"
))
df_tot_cat_per_q <- df_tot_cat_per_q %>%
ungroup() %>%
mutate(question_code = str_to_upper(question_code)) %>%
rename(
mean = avg_total_cat,
std = std_total_cat
) %>%
multi_spread(is_nominated, c(mean, std, n))
df_tot_cat_per_q_inc <- df_tot_cat_per_q %>%
group_by(question_code) %>%
do(
per_change_delta(
.$nominated_mean, .$matched_mean,
.$nominated_n, .$matched_n,
.$nominated_std, .$matched_std
)
)
plt_cat_total_inc <- df_tot_cat_per_q_inc %>%
ggplot(aes(x = fct_rev(question_code), y = mean)) +
geom_point(size = 3) +
geom_errorbar(aes(ymin = lower95, ymax = upper95), width = 0, size = 0.6) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
labs(x = NULL, y = "Catalysts Comments of Nominated vs Matched users \n (% increase)") +
scale_y_continuous(labels = percent) +
expand_limits(y = 0) +
coord_flip() +
theme(
axis.ticks = element_blank()
)
options(repr.plot.width=4.5, repr.plot.height=3)
print(plt_cat_total_inc)
###Output
_____no_output_____
###Markdown
Schelling Segregation Model BackgroundThe Schelling (1971) segregation model is a classic of agent-based modeling, demonstrating how agents following simple rules lead to the emergence of qualitatively different macro-level outcomes. Agents are randomly placed on a grid. There are two types of agents, one constituting the majority and the other the minority. All agents want a certain number (generally, 3) of their 8 surrounding neighbors to be of the same type in order for them to be happy. Unhappy agents will move to a random available grid space. While individual agents do not have a preference for a segregated outcome (e.g. they would be happy with 3 similar neighbors and 5 different ones), the aggregate outcome is nevertheless heavily segregated. ImplementationThis is a demonstration of running a Mesa model in an IPython Notebook. The actual model and agent code are implemented in Schelling.py, in the same directory as this notebook. Below, we will import the model class, instantiate it, run it, and plot the time series of the number of happy agents.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
from model import SchellingModel
###Output
_____no_output_____
###Markdown
Now we instantiate a model instance: a 10x10 grid, with an 80% chance of an agent being placed in each cell, approximately 20% of agents set as minorities, and agents wanting at least 3 similar neighbors.
###Code
model = SchellingModel(10, 10, 0.8, 0.2, 3)
###Output
_____no_output_____
###Markdown
We want to run the model until all the agents are happy with where they are. However, there's no guarentee that a given model instantiation will *ever* settle down. So let's run it for either 100 steps or until it stops on its own, whichever comes first:
###Code
while model.running and model.schedule.steps < 100:
model.step()
print(model.schedule.steps) # Show how many steps have actually run
###Output
46
###Markdown
The model has a DataCollector object, which checks and stores how many agents are happy at the end of each step. It can also generate a pandas DataFrame of the data it has collected:
###Code
model_out = model.datacollector.get_model_vars_dataframe()
model_out.head()
###Output
_____no_output_____
###Markdown
Finally, we can plot the 'happy' series:
###Code
model_out.happy.plot()
###Output
_____no_output_____
###Markdown
For testing purposes, here is a table giving each agent's x and y values at each step.
###Code
x_positions = model.datacollector.get_agent_vars_dataframe()
x_positions.head()
###Output
_____no_output_____
###Markdown
Effect of Homophily on segregationNow, we can do a parameter sweep to see how segregation changes with homophily.First, we create a function which takes a model instance and returns what fraction of agents are segregated -- that is, have no neighbors of the opposite type.
###Code
from mesa.batchrunner import BatchRunner
def get_segregation(model):
'''
Find the % of agents that only have neighbors of their same type.
'''
segregated_agents = 0
for agent in model.schedule.agents:
segregated = True
for neighbor in model.grid.neighbor_iter(agent.pos):
if neighbor.type != agent.type:
segregated = False
break
if segregated:
segregated_agents += 1
return segregated_agents / model.schedule.get_agent_count()
###Output
_____no_output_____
###Markdown
Now, we set up the batch run, with a dictionary of fixed and changing parameters. Let's hold everything fixed except for Homophily.
###Code
parameters = {"height": 10, "width": 10, "density": 0.8, "minority_pc": 0.2,
"homophily": range(1,9)}
model_reporters = {"Segregated_Agents": get_segregation}
param_sweep = BatchRunner(SchellingModel, parameters, iterations=10,
max_steps=200, model_reporters=model_reporters)
param_sweep.run_all()
df = param_sweep.get_model_vars_dataframe()
plt.scatter(df.homophily, df.Segregated_Agents)
plt.grid(True)
###Output
_____no_output_____
###Markdown
This is a heading This is a subheading!This is normal text!*** I'm excited!! ***| Tables | Are | Cool || ------------- |:-------------:| -----:|| col 3 is | right-aligned | $1600 || col 2 is | centered | $12 || zebra stripes | are neat | $1 |
###Code
name = report["First Name"]
name.describe()
doctors = report["Attnd. Phys."]
doctors.describe()
###Output
_____no_output_____
###Markdown
Given that there are only 6 attending phys., there is probably only one hospital. It's called "central" so we'll assume that it is between the river and the park.
###Code
sectors = report['Sector']
sectors.describe()
###Output
_____no_output_____
###Markdown
- Sector 22 is the most infected! - There are only patients from 18 unique sectors...
###Code
sector_names = [sector for sector in np.unique(sectors)]
sector_names
###Output
_____no_output_____
###Markdown
###Code
report["Admission"].unique()
report.where(report["Admission"] == '2018-03-04').groupby("Sector").size()
report[report["Admission"]=='2018-03-04']
###Output
_____no_output_____
###Markdown
The afforementioned is the earliest recorded infected -- we assume patient 0.- Named Lila DeVoulier- Female- Aged 12- Patient reports from sector 26.- Maybe immigrant? No SIN.
###Code
report["DOD"].unique()
###Output
_____no_output_____
###Markdown
We may assume at most that the earliest submission data was March 16th 2018 -- being that this is the last reported date.
###Code
report[report["SIN"].isnull()]
report[report["Last Name"]=="Heskey"]
report[report["Last Name"]=="Lila"]
###Output
_____no_output_____
###Markdown
We have found that there are two people without a SIN -- both are from sector 26, and both are children. We were not able to identify any relations that were admitted. DeVoulier Lila - Aged 12, Female > Patient Zero, aliveMoises Heskey -- Aged 0, Male, deceased
###Code
report["Age "].describe()
report.hist(column="Age ", figsize=(9,6), bins=20)
plt.show()
report["Gender"].describe()
###Output
_____no_output_____
###Markdown
Looking at age and gender of admitted patients does not appear to reveal anything statistically significant.
###Code
report["Gender"].where(report["Status"]=="DECEASED").describe()
###Output
_____no_output_____
###Markdown
52% of infected female are dead. 51% of infected males are dead.
###Code
report["Sector"].value_counts()
report["Sector"].where(report["Status"] == "DECEASED").value_counts()
###Output
_____no_output_____
###Markdown
EMF-Datenbank der Bundesnetzagentur
###Code
%matplotlib inline
import json
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
def to_gpd(df, lat='lat', lng='lng'):
geometry = [Point(xy) for xy in zip(df[lng], df[lat])]
crs = {'init': 'epsg:4326'}
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
germany = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
germany = germany[germany['name'] == 'Germany']
germany
def get_data():
with open('data/positions.jsonl') as f:
for line in f:
yield json.loads(line)
df = pd.DataFrame.from_records(get_data())
df.head()
df['kind'].value_counts()
###Output
_____no_output_____
###Markdown
Amateurfunk Freigaben
###Code
afu_df = pd.DataFrame.from_records(df[df['kind'] == 'GetAFuFreigabe']['position'].values)
afu_df.head()
afu_df = to_gpd(afu_df, lat='Lat', lng='Lng')
base = germany.plot(color='white', edgecolor='black')
afu_df.plot(ax=base, alpha=0.2)
###Output
_____no_output_____
###Markdown
Antennen
###Code
def get_antennas(df):
for _, p in df.iterrows():
if not isinstance(p['antennas'], list):
continue
for antenna in p['antennas']:
antenna['datum'] = p['datum']
antenna['standortbescheinigung_nr'] = p['standortbescheinigung_nr']
antenna.update(p['position'])
yield antenna
a_df = pd.DataFrame.from_records(get_antennas(df[df['kind'] == 'GetStandorteFreigabe']))
a_df['datum'] = pd.to_datetime(a_df['datum'], format='%d.%m.%Y')
a_df.head()
a_df.to_csv('data/antennen.csv', index=False)
ag_df = to_gpd(a_df, lat='Lat', lng='Lng')
base = germany.plot(color='white', edgecolor='black')
ag_df.plot(ax=base, alpha=0.01, markersize=0.5)
###Output
_____no_output_____
###Markdown
Data Cleaning
###Code
df.dropna(inplace=True)
# converting string time to a timestamp
df['time'] = pd.to_datetime(df['time'], errors='coerce')
# Getting hour and month from the time column
df['hour'] = df['time'].dt.hour
df['month'] = df['time'].dt.month
df.head(5)
###Output
_____no_output_____
###Markdown
KPI cards texts
###Code
# Total tweet card
total_tweet = len(df)
print(f'TOTAL TWEETS: {total_tweet}')
# Average impression card
avg_impression = round(df.impressions.sum()/len(df.impressions), 1)
print(f'Avg impression: {avg_impression}')
# engagements rate card
likes_retweet = df['likes'].sum()+df['retweets'].sum()
avg_engagement = round(likes_retweet/len(df), 1)
print(f'Avg engagement: {avg_engagement}')
# Media Engagement rate card
media_engagement = int(df['media engagements'].sum()/len(df['media engagements']))
print(f'Media Engagement Per Tweet: {media_engagement}')
###Output
Media Engagement Per Tweet: 52
###Markdown
Analysis Total tweet card
###Code
area_chart_total_tweet = df[['Tweet','month']]
tweet_count_db = area_chart_total_tweet.value_counts('month').reset_index(name='tweet_count').sort_values('month')
tweet_count_db
x = ['Jun', 'Jul', 'Aug', 'Sept', 'Oct']
fig = px.area(
x = x,
y = tweet_count_db['tweet_count'],
markers = True,
)
fig.update_layout(
hovermode = 'closest',
piecolorway = ['#0f52d9'],
margin = dict(
t = 40,
b = 20,
l = 30,
r = 30
),
)
fig.update_xaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.update_yaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.show()
###Output
_____no_output_____
###Markdown
Avg impression area plot
###Code
area_chart_avg_impression = df[['impressions','month']]
impression_count_db = area_chart_avg_impression.value_counts('month').reset_index(name='impressions').sort_values('month')
impression_count_db
x = ['Jun', 'Jul', 'Aug', 'Sept', 'Oct']
fig = px.area(
x = x,
y = impression_count_db['impressions'],
markers = True,
)
fig.update_layout(
hovermode = 'closest',
piecolorway = ['#0f52d9'],
margin = dict(
t = 40,
b = 20,
l = 30,
r = 30
),
)
fig.update_xaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.update_yaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.show()
###Output
_____no_output_____
###Markdown
Engagement rate
###Code
area_chart_avg_engagements = df[['media engagements','month']]
engagements_count_db = area_chart_avg_engagements.value_counts('month').reset_index(name='media engagements').sort_values('month')
engagements_count_db
x = ['Jun', 'Jul', 'Aug', 'Sept', 'Oct']
fig = px.area(
x = x,
y = engagements_count_db['media engagements'],
markers = True,
)
fig.update_layout(
hovermode = 'closest',
piecolorway = ['#0f52d9'],
margin = dict(
t = 40,
b = 20,
l = 30,
r = 30
),
)
fig.update_xaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.update_yaxes(
showgrid = False,
zeroline = False,
visible = False
)
fig.show()
###Output
_____no_output_____
###Markdown
Bikers on the Fremont bridgeExample adapted from the [Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html) Set up: Download (and load) data
###Code
# Download data(you can download it by uncommenting and runing this line of code)
#!curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
import matplotlib.pyplot as plt # for making plots
import numpy as np # for doing numerical operations
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler # scaling data
from sklearn.model_selection import train_test_split # splitting data
from sklearn.neighbors import KNeighborsRegressor # regressor
from sklearn.model_selection import GridSearchCV # for grid search
from sklearn.pipeline import make_pipeline # for making pipelines
%matplotlib inline
# Aggregate data to the daily level
counts = pd.read_csv('FremontBridge.csv', index_col='Date', parse_dates=True)
# This operation shows us first 10 rows of this dataset
counts.head(10)
# The next few operations sum up the total number of bikers that have crossed
# the Fremont Bridge on a given day and makes a new dataframe that gives us
# date and total number of bikers that crossed that day.
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # remove other columns
daily.head(10)
###Output
_____no_output_____
###Markdown
Data Prep: Adding Features
###Code
# Load weather data (downloaded from: https://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND)
weather = pd.read_csv('weather.csv', index_col='DATE', parse_dates=True)
# Create dry_day column
# This basically creates a new feature called dry day by checking if the
# value of the precipitation('PRCP') column is 0 for that row. If it is,
# it means it is a dry day and is assigned a numerical value of 1.Else, it
# it is assigned a numerical value of zero.
weather['dry_day'] = (weather['PRCP'] == 0).astype(int)
weather.head(10)
# Join selected weather columns
# We are joining the four columns relevant to us in the weather dataset to our
# daily dataframe which has total number of bikers for the given day.
# We choose the 3 columns that are pre-existent in the weather dataset,
# namely, precipitation, minimum and maximum temperature for the day and
# our own feature which we made, which is the dry day column.
daily = daily.join(weather[['PRCP', 'dry_day', 'TMIN', 'TMAX']])
daily.head(10)
# Compute hours of daylight
# Below is a function that calculates the hours of daylight for a given date
# (Due to the complex nature, we wont get into it this session)
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
# We are basically adding a new feature named daylight_hrs to our daily
# dataframe. It gives the hours of daylight for that particular day
daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index))
daily[['daylight_hrs']].plot()
plt.ylim(8, 17)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: FutureWarning: The pandas.datetime class is deprecated and will be removed from pandas in a future version. Import from datetime instead.
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Feature Generation: Categorical Variable(s)
###Code
# Get dummy variables from categorical columns (alternative: sklearn OneHotEncoding)
# Make each day of the week a column and the day which it is would get assigned
# a value of one and the rest a value of 0.
daily['day_of_week'] = daily.index.dayofweek.astype("str")
# Plot: daily[["day", "Total"]].groupby("day").sum().plot()
daily = pd.get_dummies(daily)
daily.head(10)
###Output
_____no_output_____
###Markdown
Abbreviated EDA We now see how various features are correlated with the number of bikers that crossed the fremont bridge. We want to be able to see how much influence features have on our target variable, so that we can get a better idea of which features we can use in our ML model
###Code
# What is the relationship between bikers and temperature?
plt.scatter(daily.TMAX, daily.Total, alpha=.2)
# What is the relationship between bikers and date?
plt.figure(figsize=(15, 3))
daily.Total.plot()
# What is the relationship between bikers and (min) temperature?
plt.scatter(daily.TMIN, daily.Total, alpha=.2)
# What is the distribution of bikers on dry/wet days?
plt.figure()
plt.hist(daily.Total[daily.dry_day == True], label="Dry Day", alpha = .3)
plt.hist(daily.Total[daily.dry_day == False], label="Wet Day", alpha = .3)
plt.legend()
# How does the number of bikers vary by temperature and wet/dry?
# We want to see the correlation between temperature and dry day on the bikers
# We see the number of bikers for various maximum temperatures for dry and wet
# days. While we might not see a necessarily linear correlation, we do see that
# there is a larger positive correlation between max temperature on dry days and
# number of bikers as compared to that on a wet day.
plt.figure()
plt.scatter(daily.TMAX[daily.dry_day == True], daily.Total[daily.dry_day == True], alpha = .3, label="Dry")
plt.scatter(daily.TMAX[daily.dry_day == False], daily.Total[daily.dry_day == False], alpha = .3, label="Wet")
plt.legend()
###Output
_____no_output_____
###Markdown
Modeling: KNN Regressor What is KNN Regressor model: * **KNN** stands for K Nearest Neighbors. This one of the standard models data scientists use to model their data.* *Basically, it is an algorithm that is used to check the distance of a new point from the nearest data points on the plot, so we can classify the point accordingly*. * **For example**, on the previous plot for a given value of temperature if we draw a vertical line, we would see that there are multiple data points around that. We want to see the nearest points(basically number of bikers for that temperature) and be able to predict what our target value might be, given our value on the X-axis.* We define K: The number of nearest neighbors we want the algorithm to look at while making our prediction. The more we make the value of K, we tend to avoid overfitting the data. Why we use KNN?* It works great where the correlation between features and the target isn't necessarily linear, hence it would be more appropriate than using an OLS regression model.* It is simple to implement and handles non-linearity well.* Fitting the model also tends to be quick: the computer doesn’t have to calculate any particular parameters or values
###Code
# Split data into training and testing data
# We
train_features, test_features, train_outcome, test_outcome = train_test_split(
daily.drop("Total", axis=1),
daily.Total,
test_size=0.30,
random_state=11
)
###Output
_____no_output_____
###Markdown
What is a scaler?It basically scales our data. What does that mean? * Some models tend to produce better and more accurate results when all the input data(features) are relatively on the same scale. As in, the values of the data in our features are somewhat on the same scale.* For example, if one of our features has values ranging from 10 to 100 and other features have values from 1 million to 10 million, our features **are not** in the same relative scale. * Therefore, a scaler will perform appropriate transformations on our data in the features and try and keep them in the same relative scale. ***The distribution of the data still remains the same***
###Code
# Create a scaler and your classifier
# We will use a MinMaxScaler() for our transformations where,
# from each value in the columns the min value of the column is subtracted,
# followed by dividing it by the range of the column(max - min).
scaler = MinMaxScaler()
knn_reg = KNeighborsRegressor()
# Define a pipeline that uses your scaler and classifier
pipe = make_pipeline(scaler, knn_reg)
# Define a grid to search through
# We are simply making a dictionary of the parameters here. We need to choose the
# best parameters from these available ones to use for our model.
param_grid = {'kneighborsregressor__n_neighbors':range(1, 5), 'kneighborsregressor__weights':["uniform", "distance"]}
# Perform a grid search of your pipeline
# Luckily, the grid search of our pipeline will output our best parameteres for
# us below, which we can use. It does so according to the scoring we want to
# use for our model, the available parameters we have given from above and
# the regressor and scaler we have chosen to use from above.
# If the parameters to search are more this takes longer to run!!
grid = GridSearchCV(pipe, param_grid, scoring="neg_mean_absolute_error")
grid.fit(train_features, train_outcome)
grid.score(test_features, test_outcome)
grid.best_params_
# Compare prediction to (test) data
plt.scatter(grid.predict(test_features), test_outcome, alpha=.4)
grid.score(test_features, test_outcome)
###Output
_____no_output_____
###Markdown
Feature Generation: Polynomial Transformations
###Code
# Add a polynomial transformation to the pipeline
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures()
# Define a pipeline that includes the polynomial transformation
pipe = make_pipeline(poly, scaler, knn_reg)
# Define a grid to search through (including the degree of polynomial)
param_grid = {'polynomialfeatures__degree':range(1, 3),
'kneighborsregressor__n_neighbors':range(1, 5),
'kneighborsregressor__weights':["uniform", "distance"]}
# Perform a grid search of your pipeline
grid = GridSearchCV(pipe, param_grid, scoring="neg_mean_absolute_error")
grid.fit(train_features, train_outcome)
grid.score(test_features, test_outcome)
plt.scatter(grid.predict(test_features), test_outcome)
# Visualize time trends
test_data = test_features.join(test_outcome)
test_data['preds'] = grid.predict(test_features)
plt.figure(figsize=(15, 3))
test_data.Total.plot(label="Actual", alpha = .8)
test_data.preds.plot(label="Predicted", alpha = .8)
plt.legend()
###Output
_____no_output_____
###Markdown
Error assessment: find systematic errors
###Code
# Why are we getting this wrong?
# Assess error by day of the week
test_data['day'] = test_data.index.dayofweek
test_data['err'] = test_data.Total - test_data.preds
sns.violinplot(y="err", x="day", data=test_data)
# Assess error by temperature and dry_day
plt.figure(figsize=(10, 5))
plt.scatter(test_data.TMIN[test_data.dry_day == True], test_data.err[test_data.dry_day == True], alpha=.4, label="Dry")
plt.scatter(test_data.TMIN[test_data.dry_day == False], test_data.err[test_data.dry_day == False], alpha=.4, label="Wet")
plt.legend()
# Assess error by precipitation
plt.figure(figsize=(10, 5))
plt.scatter(test_data.PRCP, test_data.err, c=test_data.TMAX)
###Output
_____no_output_____
###Markdown
Feature Selection: Select best featuresAs a form of dimensionality reduction, only select the top percentile features that have a certain threshold of variance.
###Code
# Create a percentile selector, add it to the pipeline
# (alternatives a K selectors, PCA, or others)
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import VarianceThreshold
selecter = SelectPercentile()
threshold = VarianceThreshold(.1)
pipe = make_pipeline(poly, threshold, scaler, selecter, knn_reg)
# Define a grid to search through (including the degree of polynomial AND percentile of best features)
param_grid = {
'polynomialfeatures__degree':range(1, 3),
'selectpercentile__percentile':range(10, 30, 5),
'kneighborsregressor__n_neighbors':range(1, 5),
'kneighborsregressor__weights':["uniform", "distance"]
}
grid = GridSearchCV(pipe, param_grid, scoring="neg_mean_absolute_error")
grid.fit(train_features, train_outcome)
grid.score(test_features, test_outcome)
###Output
_____no_output_____
###Markdown
Analysis to aid in development of a wordle challenge helper --- Notebook goal: using data analysis, discover trends in 5-letter english words to develop the logic needed to automate a wordle solver. First step of analysis: determine the best **first guess** to use in a game of wordle We can look at the most-used letters for five-letter words to see if there is any stand-out first guesses
###Code
from english_words import english_words_lower_alpha_set
from matplotlib import pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
allWords = list(english_words_lower_alpha_set)
words = [i for i in allWords if len(i) == 5]
words.remove('u.s.a') # 'u.s.a' considered a word for some reason
numWords = len(words)
letters = {
'a' : 0, 'b' : 0,
'c' : 0, 'd' : 0,
'e' : 0, 'f' : 0,
'g' : 0, 'h' : 0,
'i' : 0, 'j' : 0,
'k' : 0, 'l' : 0,
'm' : 0, 'n' : 0,
'o' : 0, 'p' : 0,
'q' : 0, 'r' : 0,
's' : 0, 't' : 0,
'u' : 0, 'v' : 0,
'w' : 0, 'x' : 0,
'y' : 0, 'z' : 0
}
for word in words:
usedLetters = []
for letter in word:
if letter not in usedLetters:
letters[letter] += 1
usedLetters.append(letter)
x = []
y = []
for key in letters:
x.append(key)
y.append((letters[key]/numWords) * 100)
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, y, color='green')
plt.xlabel('Letter')
plt.ylabel('Percent Used')
plt.title('Unique Letters Used in Five-Letter Words')
plt.xticks(x_pos, x)
plt.show()
###Output
_____no_output_____
###Markdown
It appears as though the most common letters are:* e* a* r* o* i* l* s* tWhile a, e, r, o, and i don't come together to make a five-letter word, we can find the best combo of letters by assigning a value to all combinations of five-letter words that can be made from these above letters. We can assign a value by adding together the % used value for each of the five letters for each combination.Once we know the values for all combinations, we can start from the highest value combos and work our way down. These combos will then be tested for whether there is a word that contains all five. If there is a word, this is the most optimum starting wordle word!
###Code
from itertools import combinations
# Create all combinations of 5 letters and store them in a dictionary
topLetters = ['a', 'e', 'r', 'o', 'i', 'l', 's', 't']
combos = combinations(topLetters, 5)
values = {}
for i in list(combos):
values[''.join(i)] = 0
# Assign values to each of the combinations
for key in values:
for i in key:
values[key] += (letters[i]/numWords)
# Sort the values dictionary by value
values = dict(sorted(values.items(), key=lambda item: item[1], reverse=True))
def findWord(testLetters):
'''
Given a list of test letters, will try to find a word with all five
'''
print(f' Testing combo {testLetters}')
firstWord = None
for word in words:
goodLetters = [i for i in testLetters]
for letter in word:
if letter in goodLetters:
goodLetters.remove(letter)
if len(goodLetters) == 0:
print(f'Best word: {word}')
firstWord = word
return True, firstWord
return False, None
optimized = False
for key in values:
if not optimized:
check, word = findWord([char for char in key])
if check:
print(f'Best combination of letters: {key}')
print(f'Best word to use: {word}')
optimized = True
###Output
Testing combo ['a', 'e', 'r', 'o', 'i']
Testing combo ['a', 'e', 'r', 'o', 'l']
Testing combo ['a', 'e', 'r', 'o', 's']
Best word: arose
Best combination of letters: aeros
Best word to use: arose
###Markdown
Replica-based ("official") metrics Up until February, active editors were calculated using the following procedure. For the initial run (probably some time in 2018), the procedure was run for all previous time. Then, each month, it was re-run to add data for the previous month.First, we built an [editor month dataset](https://meta.wikimedia.org/wiki/Research:Editor_month_dataset) by running [the update_editor_month query](https://github.com/wikimedia-research/Editing-movement-metrics/blob/3f5322cc1302419114ea7f647fdf4592063c6a35/queries/update_editor_month.sql) (or [a rewritten version](https://github.com/wikimedia-research/Editing-movement-metrics/blob/f6db91f3c64ffc05ae6eeda599755af744928803/queries/update_editor_month.sql) in January 2018) on [a specific selection of wikis](https://github.com/neilpquinn/wmfdata/blob/b0548529c4d39fc37f40fb637025e8a9b428a33f/wmfdata/mariadb.pyL94) sequentially.Then, the active editor numbers were calculated using [an SQL query](https://github.com/wikimedia-research/Editing-movement-metrics/blob/f6db91f3c64ffc05ae6eeda599755af744928803/queries/active_editors.sql) on that editor-month table.This gave the following, which is our currently accepted version of reality:
###Code
metrics_url = "https://raw.githubusercontent.com/wikimedia-research/Editing-movement-metrics/75b3251727f8c766e4872f775f57a09632df6500/metrics/metrics.tsv"
metrics_stream = StringIO(requests.get(metrics_url).text)
official_ae = pd.read_csv(
metrics_stream,
sep="\t",
parse_dates=["month"]
).set_index("month")["active_editors"].to_frame()
official_ae.tail()
###Output
_____no_output_____
###Markdown
Data Lake-based ("new") metrics For February's metrics, we switched to calculating these based on the `mediawiki_history` dataset in the Data Lake. First, we built an editor-month table using the following SQL: ```sqlinsert into neilpquinn.editor_monthselect trunc(event_timestamp, "MONTH") as month, wiki_db, event_user_id as local_user_id, max(event_user_text) as user_name, -- Some rows incorrectly have a null `event_user_text` count(*) as edits, coalesce( sum(cast(page_namespace_is_content_historical as int)), 0 ) as content_edits, NULL as mobile_web_edits, NULL as mobile_app_edits, NULL as visual_edits, NULL as ve_source_edits, ( max(event_user_is_bot_by_name) or max(array_contains(event_user_groups, "bot")) or max(array_contains(event_user_groups_historical, "bot")) ) as bot, min(event_user_creation_timestamp) as user_registrationfrom wmf.mediawiki_historywhere event_timestamp between "{start}" and "{end}" and event_entity = "revision" and event_type = "create" and snapshot = "{mwh_snapshot}"group by trunc(event_timestamp, "MONTH"), wiki_db, event_user_id```
###Code
new_ae = (
hive.run("""
select
month,
count(*) as active_editors
from (
select
cast(month as date) as month,
user_name,
sum(content_edits) as content_edits,
max(bot) as bot
from neilpquinn.editor_month
where
month < "2019-02-01" and
local_user_id != 0
group by month, user_name
) global_edits
where
content_edits >= 5 and
(not bot or user_name in ("Paucabot", "Niabot", "Marbot"))
group by month
""")
.assign(month=lambda df: pd.to_datetime(df["month"]))
.set_index("month")
)
###Output
_____no_output_____
###Markdown
These differ a LOT from the replica-based metrics.
###Code
(new_ae - official_ae).plot(title="Deviation of 'new' active editors from 'official'");
###Output
_____no_output_____
###Markdown
Load editor month datasets for comparisons Let's directly compare the official version of the dataset with the new one (tweaked to eliminate a few obvious differences).
###Code
staging_host = !analytics-mysql -d staging --print-target
staging_host = staging_host[0]
jdbc_uri = "jdbc:mysql://" + staging_host + "/staging"
cnf_path = "/etc/mysql/conf.d/research-client.cnf"
sqoop_query = """
select
convert(wiki using utf8) as wiki,
cast(month as datetime) as month,
local_user_id,
convert(user_name using utf8) as user_name,
edits,
content_edits,
bot_flag,
user_registration
from editor_month
where $CONDITIONS
"""
!sqoop import --connect {jdbc_uri} --connection-param-file {cnf_path} --query '{sqoop_query}' \
--split-by local_user_id --target-dir /user/neilpquinn-wmf/editor_month_official_raw \
--hive-import --hive-table neilpquinn.editor_month_official \
--map-column-hive month=timestamp,user_registration=timestamp
hive.run(["""
CREATE TABLE IF NOT EXISTS neilpquinn.editor_month_new (
`wiki` STRING,
`month` TIMESTAMP, -- Hive 1.1 does not support the DATE type
`local_user_id` BIGINT,
`user_name` STRING,
`edits` BIGINT,
`content_edits` BIGINT,
`bot_flag` BOOLEAN,
`user_registration` TIMESTAMP
)
STORED AS PARQUET
""", """
insert into neilpquinn.editor_month_new
select
wiki_db as wiki,
trunc(event_timestamp, "MONTH") as month,
event_user_id as local_user_id,
max(event_user_text) as user_name, -- Some rows incorrectly have a null `event_user_text`
count(*) as edits,
coalesce(
sum(cast(page_namespace_is_content_historical as int)),
0
) as content_edits,
(
max(array_contains(event_user_groups, "bot")) or
max(array_contains(event_user_groups_historical, "bot"))
) as bot,
min(event_user_creation_timestamp) as user_registration
from wmf.mediawiki_history
where
event_timestamp < "2019-02-01" and
event_entity = "revision" and
event_type = "create" and
snapshot = "2019-03"
group by
trunc(event_timestamp, "MONTH"),
wiki_db,
event_user_id
"""])
###Output
_____no_output_____
###Markdown
Let's make sure these datasets have the same active editors discrepancy.
###Code
ae_query = """
select
month,
count(*) as active_editors
from (
select
cast(month as date) as month,
user_name,
sum(content_edits) as content_edits,
max(bot_flag) as bot_flag
from neilpquinn.{table}
where
local_user_id != 0
group by month, user_name
) global_edits
where
content_edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_query = ae_query.format(table="editor_month_official")
emn_ae_query = ae_query.format(table="editor_month_new")
emo_ae = hive.run(emo_ae_query).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae = hive.run(emn_ae_query).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
###Output
_____no_output_____
###Markdown
Yup, it's the same. So we can proceed to compare just these two datasets.
###Code
(emn_ae - emo_ae)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Wiki inclusion It looks there are discrepancies in which wikis are included.
###Code
emo_wikis = hive.run("""
select distinct wiki
from neilpquinn.editor_month_official
""")
emo_wikis = set(emo_wikis["wiki"].unique())
emn_wikis = hive.run("""
select distinct wiki
from neilpquinn.editor_month_new
""")
emn_wikis = set(emn_wikis["wiki"].unique())
###Output
_____no_output_____
###Markdown
The extra wikis included in `editor_month_official` seem to be a miscellaneous collection that are [mistakenly not included](https://phabricator.wikimedia.org/T220456) in `mediawiki_history`.
###Code
extra_emo_wikis = emo_wikis - emn_wikis
len(extra_emo_wikis)
###Output
_____no_output_____
###Markdown
The extra wikis included in `editor_month_new` are a mixture of test wikis, infrastructure wikis (`donatewiki`, `loginwiki`), and affiliate wikis that are only there because I forgot to port the logic excluding them from the replicas-based pipeline to the Data Lake-based one.
###Code
extra_emn_wikis = emn_wikis - emo_wikis
len(extra_emn_wikis)
###Output
_____no_output_____
###Markdown
Let's see what the discrepancy looks like when we exclude these extra wikis.
###Code
ae_same_wikis_sql = """
select
month,
count(*) as active_editors
from (
select
cast(month as date) as month,
user_name,
sum(content_edits) as content_edits,
max(bot_flag) as bot_flag
from neilpquinn.{table}
where
local_user_id != 0 and
wiki not in {excluded_wikis!r}
group by month, user_name
) global_edits
where
content_edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_same_wikis_sql = ae_same_wikis_sql.format(
table="editor_month_official",
excluded_wikis=tuple(extra_emo_wikis)
)
emn_ae_same_wikis_sql = ae_same_wikis_sql.format(
table="editor_month_new",
excluded_wikis=tuple(extra_emn_wikis)
)
emo_ae_same_wikis = hive.run(emo_ae_same_wikis_sql).assign(
month=lambda df: pd.to_datetime(df["month"])
).set_index("month")
emn_ae_same_wikis = hive.run(emn_ae_same_wikis_sql).assign(
month=lambda df: pd.to_datetime(df["month"])
).set_index("month")
###Output
_____no_output_____
###Markdown
Basically no change, which makes sense when you consider that all the wikis involved are tiny.
###Code
(emn_ae_same_wikis - emo_ae_same_wikis)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Unmatched rows If we use the same selection of wikis, `editor_month_new` has about 476,000 more rows than `editor_month_official`.
###Code
row_count_sql = """
select count(*)
from neilpquinn.{table}
where wiki not in {excluded_wikis!r}
"""
emo_row_count_sql = row_count_sql.format(
table="editor_month_official",
excluded_wikis=tuple(extra_emo_wikis)
)
emn_row_count_sql = row_count_sql.format(
table="editor_month_new",
excluded_wikis=tuple(extra_emn_wikis)
)
hive.run(emn_row_count_sql)
hive.run(emo_row_count_sql)
###Output
_____no_output_____
###Markdown
Let's find the rows that don't match up.
###Code
unmatched_rows_sql = """
select *
from neilpquinn.editor_month_official emo
full outer join neilpquinn.editor_month_new emn
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
coalesce(emo.wiki not in {extra_emo_wikis!r}, true) and
coalesce(emn.wiki not in {extra_emn_wikis!r}, true) and
(emo.local_user_id is null or emn.local_user_id is null)
""".format(
extra_emo_wikis=tuple(extra_emo_wikis),
extra_emn_wikis=tuple(extra_emn_wikis)
)
unmatched_rows = hive.run([
"set hive.resultset.use.unique.column.names=true",
unmatched_rows_sql
]).rename(columns=lambda x: x.replace(".", "_"))
###Output
_____no_output_____
###Markdown
This gives us about 522,000 unmatched rows, which is about 46,000 more than the overall row count discrepancy. That suggests that we're mostly talking about rows that don't appear at all in one dataset, not about rows that appear in both dataset but didn't match up.Out of these these unmatched rows, 96% are found only in `editor_month_new` and 4% only in `editor_month_official`.
###Code
len(unmatched_rows)
emn_only = unmatched_rows.query("~emn_wiki.isnull()")
len(emn_only)
emo_only = unmatched_rows.query("~emo_wiki.isnull()")
len(emo_only)
###Output
_____no_output_____
###Markdown
The rows found only in `editor_month_new` correspond to revisions [imported](https://www.mediawiki.org/wiki/Manual:Importing_XML_dumps) from one wiki to another. The number dropped substantially in the past few years, when `editor_month_official` was being periodically built a month after the fact (unlike the `mediawiki_history`, which is rebuilt completely every month). This suggests that these rows are at least partly a case of "history" being changed gradually after the fact.
###Code
emn_only.groupby("emn_month")["emn_wiki"].count().plot();
###Output
_____no_output_____
###Markdown
Best viewed locally in a Jupyter notebook or online in Jupyter Notebook Viewer Analysis of Noun Semantics in the Hebrew Bible Cody KinghamIn this notebook, I compare the syntactic contexts of the top 200 most frequent nouns in the Hebrew Bible. This notebook essentially walks through my process and includes limited commentary throughout. Full descriptions borrowed from the paper will soon be transferred to here as well.
###Code
! echo "last updated:"; date
from pathlib import Path
# ETCBC's BHSA data
from tf.fabric import Fabric
from tf.app import use
# stats & data-containers
import collections, math, re, random, csv
import pandas as pd
pd.set_option('display.max_rows', 100)
import numpy as np
from kneed import KneeLocator # https://github.com/arvkevi/kneed
# data visualizations
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.serif'] = ['Times New Roman']
from bidi.algorithm import get_display
from IPython.display import HTML, display, Image
from adjustText import adjust_text # fixes overlapping scatterplot annotations
# custom modules
from pyscripts.contextparameters import deliver_params
from pyscripts.deliver_data import deliver_data
from pyscripts.pca import apply_pca, plot_PCA
import pyscripts.significance as my_stats
# prep the Hebrew syntax data
name = 'noun_semantics'
hebrew_data = ['~/github/etcbc/{}/tf/c'.format(direc) for direc in ('bhsa','lingo/heads', 'heads', 'phono')] # data dirs
load_features = '''
typ phono lex_utf8 lex
voc_lex_utf8 voc_lex gloss
freq_lex pdp sp ls
language
rela number function
vs vt
code label
head obj_prep sem_set nhead
heads noun_heads
'''
# Text Fabric load statements
TF = Fabric(locations=hebrew_data)
api = TF.load(load_features)
B = use('bhsa', api=api, hoist=globals(), silent=True) # Bhsa functions for search and visualizing text
# configure paths for figures and data
plot_path = Path('results/plots/')
table_path = Path('results/tables')
fisher_data = table_path.joinpath('fisher_scores.csv')
def savefig(name):
plt.savefig(plot_path.joinpath(name), format='svg', bbox_inches='tight')
def savecsv(name, df):
df.to_csv(table_path.joinpath(name))
def reverse_hb(heb_text):
'''
Reverses order of left-to-right text
for good matplotlib formatting.
'''
return ''.join(reversed(heb_text))
def show_word_list(word_nodes, joiner=' |', title=''):
'''
Displays Hebrew for a pipe-separated list of word nodes
Good for seeing lexemes without taking up screen space.
'''
formatted = joiner.join(T.text(node) for node in word_nodes)
display(HTML(formatted))
def show_subphrases(phrase, direction=L.d):
'''
A simple function to print subphrases
and their relations to each other.
'''
for sp in direction(phrase, 'subphrase'):
mother = E.mother.f(sp)[0] if E.mother.f(sp) else ''
mother_text = T.text(mother)
print('-'*7 + str(sp) + '-'*16)
print()
print(f'{T.text(sp)} -{F.rela.v(sp)}-> {mother_text}')
print(f'nodes: {sp} -{F.rela.v(sp)}-> {mother}')
print(f'slots: {L.d(sp, "word")} -{F.rela.v(sp)}-> {L.d(mother or 0, "word")}')
print('-'*30)
###Output
_____no_output_____
###Markdown
Corpus SizeBelow is the number of words included in the corpus of BHSA.
###Code
len(list(F.otype.s('word')))
###Output
_____no_output_____
###Markdown
Define a Target Noun Set*Insert discussion about the semantic relationship between iconicity and frequency with regards to the most frequent noun lexemes in the HB.*
###Code
raw_search = '''
lex language=Hebrew sp=subs
'''
raw_nouns = B.search(raw_search)
###Output
0.02s 3706 results
###Markdown
Now we order the results on the basis of lexeme frequency.
###Code
raw_terms_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in raw_nouns), reverse=True)
###Output
_____no_output_____
###Markdown
Below we have a look at the top 50 terms from the selected set. Pay attention to the feature `ls`, i.e. "lexical set." This feature gives us some rudimentary semantic information about the nouns and their usual functions, and it suggests that some additional restrictions are necessary for the noun selection procedure. Note especially that several of these nouns are used in adjectival or prepositional roles (e.g. כל ,אחד, אין, תחת).
###Code
raw_nnodes = [res[1] for res in raw_terms_ordered] # isolate the word nodes of the sample
B.displaySetup(extraFeatures={'ls', 'freq_lex'}) # config B to display ls and freq_lex
# display lexeme data
for i, node in enumerate(raw_nnodes[:50]):
print(T.text(node), end=' | ')
###Output
כֹּל | בֵּן | אֱלֹהִים | מֶלֶךְ | אֶרֶץ | יֹום | אִישׁ | פָּנֶה | בַּיִת | עַם | יָד | דָּבָר | אָב | עִיר | אֶחָד | עַיִן | שָׁנָה | שֵׁם | עֶבֶד | אַיִן | אִשָּׁה | שְׁנַיִם | נֶפֶשׁ | כֹּהֵן | אַחַר | דֶּרֶךְ | אָח | שָׁלֹשׁ | לֵב | רֹאשׁ | בַּת | מַיִם | מֵאָה | הַר | גֹּוי | אָדָם | חָמֵשׁ | קֹול | תַּחַת | פֶּה | אֶלֶף | עֹוד | שֶׁבַע | צָבָא | קֹדֶשׁ | אַרְבַּע | עֹולָם | מִשְׁפָּט | שַׂר | שָׁמַיִם |
###Markdown
Based on the nouns that are present, we should make some key exclusions. Many substantives have more functional or adjectival roles. Undesirable categories include copulative nouns (`nmcp`, e.g. אין), cardinal numbers (`card`), potential prepositions (`ppre`, e.g. תחת). The `ls` category of potential adverb (`padv`) contains desirable nouns like יום, but also more functionally adverbial-nouns like עוד. Thus we can see that there is a range of adverbial tendencies found in this category. Due to the potentially interesting possibility of seeing these tendencies play out in the data, we can decide to keep these instances. To be sure, the very phenomenon of "functional" versus "nominal" is worthy of further, quantitative investigation. The `ls` feature is an experimental and incomplete feature in the ETCBC, and this is precisely the kind of shortcoming this present work seeks to address. Nouns and adverbs likely sit along a sliding scale of adverbial tendencies, with adverbs nearly always functioning in such a role, and nouns exhibiting various statistical tendencies. But due to the scope of this investigation, we limit ourselves to mainly nominal words with a small inclusion of some adverbial-like substantives.We can eliminate more functional nouns by restricting the possible lexical set (`ls`) values. Below we apply those restrictions to the search template. In the case of certain quantifiers such as כל there is an `ls` feature of distributive noun (`nmdi`), yet this feature is likewise applied to nouns such as אח ("brother"). So it is undesirable to exclude all of these cases. Thus we depend, instead, on an additional filter list that excludes quantifiers.A few terms such as דרך and עבר are eliminated because the ETCBC labels it as a potential preposition. This is a speculative classification. So we define a seperate parameter in the template that saves this instance.
###Code
exclude = '|'.join(('KL/', 'M<V/', 'JTR/', 'M<FR/', 'XYJ/')) # exclude quantifiers
include = '|'.join(('padv', 'nmdi')) # ok ls features
keep = '|'.join(('DRK/', '<BR/'))
'''
Below is a TF search query for three cases:
One is a lexeme with included ls features.
The second is a lexeme with a null ls feature.
The third is lexemes we want to prevent from being excluded.
For all cases we exclude excluded lexemes.
'''
select_noun_search = f'''
lex language=Hebrew
/with/
sp=subs ls={include} lex#{exclude}
/or/
sp=subs ls# lex#{exclude}
/or/
sp=subs lex={keep}
/-/
'''
select_nouns = B.search(select_noun_search)
noun_dat_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in select_nouns), reverse=True)
nnodes_ordered = list(noun_dat[1] for noun_dat in noun_dat_ordered)
filtered_lexs = list(node for node in raw_nnodes if node not in nnodes_ordered)
print(f'\t{len(raw_nouns) - len(select_nouns)} results filtered out of raw noun list...')
print('\tfiltered lexemes shown below:')
show_word_list(filtered_lexs)
###Output
0.02s 3658 results
48 results filtered out of raw noun list...
filtered lexemes shown below:
###Markdown
Plot the Nouns in Order of FrequencyNow that we have obtained a filtered noun-set, we must decide a cut-off point at which to limit the present analysis. Below we plot the attested nouns and their respective frequencies.
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,4))
y_freqs = [lex_data[0] for lex_data in noun_dat_ordered]
x_rank = [i+1 for i in range(0, len(y_freqs))]
# second plot
ax1.plot(x_rank[:1000], y_freqs[:1000], color='black', linewidth=1)
ax1.set_xlabel('noun rank', size=10)
ax1.set_ylabel('noun freq.', size=10)
ax1.axvline(200, color='red', linewidth=0.8, linestyle='--')
ax1.set_title('raw frequencies', size=10)
# second plot log x log
ax2.plot(np.log(x_rank[:1000]), np.log(y_freqs[:1000]), color='black', linewidth=1)
ax2.set_xlabel('log noun rank', size=10)
ax2.set_ylabel('log noun freq.', size=10)
ax2.set_title('log frequencies', size=10)
savefig('noun_frequencies1-1000.svg')
###Output
_____no_output_____
###Markdown
These curves are typical of Zipf's law:> Zipf's law states that given some corpus of natural language utterances, the frequency of any word is inversely proportional to its rank in the frequency table ([wikipedia](https://en.wikipedia.org/wiki/Zipf%27s_law))The curve sharply "elbows" at around rank 15. Between ranks 50-100 there is still an appreciable drop-off. The curve starts to significantly flatten after 200. We thus decide an arbitrary cut-off point at rank 200, based on the fact that the curve does not show any significant leveling after this point.
###Code
target_nouns = nnodes_ordered[:200]
tnoun_instances = set(word for lex in target_nouns for word in L.d(lex, 'word'))
show_word_list(target_nouns) # temporary comment out while bug is fixed
print(f'\n{len(tnoun_instances)} nouns ready for searches')
nouns_text_freqs = sorted(
((F.voc_lex_utf8.v(L.d(noun,'word')[0]), F.freq_lex.v(noun))
for noun in target_nouns), key=lambda k: k[-1], reverse=True
)
', '.join(f'{noun}' for noun, freq in nouns_text_freqs)
###Output
_____no_output_____
###Markdown
Strategy for Context SelectionSee [pyscripts/contextparameters.py](pyscripts/contextparameters.py) for the full delineation of these patterns and to see how they've been selected and tokenized.
###Code
contexts = deliver_params(tnoun_instances, tf=api)
print('done!')
context_data = deliver_data(contexts, tf=TF)
###Output
running query on template [ T.function→ st.verb.lex ]...
19884 results found.
running query on template [ T.prep.funct→ st.verb.lex ]...
15009 results found.
running query on template [ lex.PreC→ T.Subj ]...
2525 results found.
running query on template [ lex.prep.PreC→ T.Subj ]...
1136 results found.
running query on template [ T.PreC→ lex.Subj ]...
930 results found.
running query on template [ T.prep.PreC→ lex.Subj ]...
1504 results found.
running query on template [ lex.coord→ T ]...
4217 results found.
running query on template [ T.coord→ lex ]...
4336 results found.
running query on template [ lex.atr→ T ]...
1588 results found.
running query on template [ lex.coord→ T (phrase atoms) ]...
704 results found.
running query on template [ T.coord→ lex (phrase atoms) ]...
600 results found.
running query on template [ lex.appo→ T ]...
1410 results found.
running query on template [ T.appo→ lex ]...
3640 results found.
###Markdown
Let's have a look at the first example...
###Code
context_data[0]
###Output
_____no_output_____
###Markdown
Now we put the data into a dataframe. We also export the dataframe for reference.
###Code
data_df = pd.DataFrame(context_data)
data_df.set_index('clause', inplace=True)
data_df.to_csv('dataset.csv') # export dataset
data_df.head()
###Output
_____no_output_____
###Markdown
Now we'll build the co-occurrence counts.
###Code
raw_counts = pd.pivot_table(
data_df,
index='target',
columns='basis',
fill_value=0,
aggfunc='size'
)
# sort by size, first by noun sum, then by basis sum
raw_counts = raw_counts.loc[raw_counts.sum(1).sort_values(ascending=False).index]
raw_counts = raw_counts[raw_counts.sum().sort_values(ascending=False).index]
raw_counts.head()
###Output
_____no_output_____
###Markdown
Removing OutliersWe will apply two primary adjustments:1. We drop co-occurrences that are unique to a noun. The dropped observations will thus be considered outliers. While these items are useful for describing the uniqueness of a given lexeme, they are unhelpful for drawing comparisons between our sets. 2. We convert the counts into a measure of statistical significance. For this we use Fisher's exact test, which is ideal for datasets that have counts that are less than 5. Our matrix is likely to have many such counts. The resulting p-values, of which <0.05 represents a statistically significant colexeme, will be log-transformed. Values that fall below expected frequencies will be negatively transformed.
###Code
raw_counts.sum(1).sort_values().head(10)
###Output
_____no_output_____
###Markdown
We note that the term נאם only occurs 7 times in the entire datasetcompared with the other terms. We will therefore drop that term due toa lack of representative examples. Remove Co-occurrence OutliersWe will remove colexemes/bases that occur with only one target noun. This is done by subtracting the row total from each item in the row. Any 0 value in a row means that that row has a unique colexeme that only occurs with one target noun (we will call that a `hapax_colex` here). We willremove these rows further down. Drop the outliers
###Code
# drop נאם and any context counts left empty as a result
count_df = raw_counts.drop('נאם.n1', axis=0)
empties = count_df.loc[:, (count_df == 0).all(0)]
count_df = count_df.drop(empties.columns, axis=1)
# drop all hapax legomena
colex_counts = count_df.sum(0)
remaining_counts = count_df.sub(colex_counts, axis=1) # subtract colex_counts
hapax_colex = remaining_counts.loc[:,(remaining_counts == 0).any(0)] # select columns that have a 0 value anywhere
count_df = count_df.drop(labels=hapax_colex.columns, axis=1)
print(f'New data dimensions: {count_df.shape}')
print(f'New total observations: {count_df.sum().sum()}')
print(f'Observations removed: {raw_counts.sum().sum() - count_df.sum().sum()}')
###Output
New data dimensions: (199, 4045)
New total observations: 45424
Observations removed: 12059
###Markdown
Let's look at the sorted minimum values to make sure no terms have been left featureless.
###Code
count_df.sum().sort_values().head(5)
count_df.sum(1).sort_values().head(5)
###Output
_____no_output_____
###Markdown
How many zero counts are there?The raw count matrix has a lot of sparsity. Here's how many zeros there are. We also count other values.
###Code
# unique_values, value_counts = np.unique(data.values, return_counts=True)
# unique_counts = pd.DataFrame.from_dict(dict(zip(unique_values, value_counts)), orient='index', columns=['count'])
# display(HTML('<h5>Top 10 Unique Values and Their Counts in Dataset</h5>'))
# unique_counts.head(10)
# zero = unique_counts.loc[0.0][0]
# non_zero = unique_counts[unique_counts.index > 0].sum()[0]
# non_zero_ratio, zero_ratio = non_zero / (non_zero+zero), zero / (non_zero+zero)
# print(f'Number of zero count variables: {zero} ({round(zero_ratio, 2)})')
# print(f'Number of non-zero count variables: {non_zero} ({round(non_zero_ratio, 2)})')
###Output
_____no_output_____
###Markdown
Below the number of observed counts is given:
###Code
count_df.sum().sum()
###Output
_____no_output_____
###Markdown
Data DistributionThe basic unit of analysis is the level of the clause. We have selected a subset of all clauses from the Hebrew Bible. Let's see if the observed frequencies within the dataset exist above or below the expected frequencies.If they are below, then how much so?
###Code
from pyscripts.feature_formatting import book2sbl
all_clauses = collections.Counter()
for cl in F.otype.s('clause'):
lang = F.language.v(L.d(cl,'word')[0])
if lang != 'Hebrew':
continue
book, chapter, verse = T.sectionFromNode(cl)
book = book2sbl[book]
all_clauses[book] += 1
expected_freq = pd.Series(all_clauses)
expected_freq.head()
# get samples from dataset
sample_df = data_df.loc[
(data_df.target.isin(count_df.index)) & (data_df.basis.isin(count_df.columns))
]
# calculate deviation of proportions (Gries 2008; Levshina 2015)
# for observed frequencies, we only want to consider each clause once
# thus we create a dataframe that only keeps the first clause entry
non_duplicated_clauses = sample_df[~sample_df.index.duplicated(keep='first')]
observed_freq = non_duplicated_clauses.book.value_counts()
observed_prop = observed_freq.div(observed_freq.sum())
expected_prop = expected_freq.div(expected_freq.sum())
deviation_prop = observed_prop - expected_prop
###Output
_____no_output_____
###Markdown
Let's compare the overall number of clauses with observed clauses.
###Code
# overall clauses
expected_freq.sum()
# observed clauses
observed_freq.sum()
###Output
_____no_output_____
###Markdown
Let's make that a proportion...
###Code
observed_freq.sum() / expected_freq.sum()
###Output
_____no_output_____
###Markdown
We see that our dataset consists of 32% of all clauses in the Hebrew Bible. This raises thefurther question. Have any particular books become over/under represented in the sample?We can answer this question by calculating the deviation of proportions (above), which tellshow much the observed proportions differ from the expected proportions.In this case, let total number of clauses in the Hebrew Bible be $NC$ and let the total numberof clauses in the sample be $SC$. We can obtain the deviation of proportions by doing the following:[add formula] Plot Book Representations in Sample
###Code
fig, ax = plt.subplots(figsize=(8, 5))
observed_prop.sort_values().plot(kind='bar', ax=ax, color='lightgrey', edgecolor='black', linewidth=0.8)
ax.grid(axis='y')
ax.set_axisbelow(True)
ax.axhline(0, color='black', linewidth=0.5)
ax.set_ylim((0, 0.1))
ax.set_ylabel('observed ratio')
ax.set_xlabel('book')
savefig('sample_book_proportions.svg')
###Output
_____no_output_____
###Markdown
Plot deviated proportions in sample from expected proportion (entire Hebrew Bible)
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 3))
for ax in (ax1, ax2):
deviation_prop.sort_values().plot(kind='bar', ax=ax, color='lightgrey', edgecolor='black', linewidth=0.8)
ax.grid(axis='y')
ax.set_axisbelow(True)
ax.axhline(0, color='black', linewidth=0.5)
ax.set_ylabel('deviation of sample ratio')
ax.set_xlabel('book')
ax1.set_title('sample deviation from expected ratio (at scale)')
ax1.set_ylim((-1, 1))
ax2.set_title('sample deviation from expected ratio (zoomed)')
savefig('sample_deviation_proportions.svg')
deviation_prop.sort_values().head(10)
deviation_prop.sort_values(ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Here we see 2% underpresentation of Isaiah, as well as some smaller representationof Psalms, Job, and Proverbs.In general, the underepresented portions are more poetic/prophetic in nature while morenarratival books are represented very slightly higher.Meanwhile there is a slightly higher sample of 2 Chronicles (1.3%) 1 Kings (1%) and Deuteronomy(1%).These differences are very small, and thus we can say that the sample dataset is essentially asevenly distributed as the original sample across the Hebrew Bible. Context Type DistributionLooking at the distribution of the various contexts
###Code
context_counts = sample_df.context_type.value_counts()
context_props = context_counts.div(context_counts.sum())
context_counts
context_props
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
for ax, data in zip((ax1, ax2), (context_counts, context_props)):
data.plot(kind='bar', ax=ax, color='white', edgecolor='black', linewidth=0.8, width=0.7)
ax.grid(axis='y')
ax.set_axisbelow(True)
ax.axhline(0, color='black', linewidth=0.5)
ax.set_xlabel('context')
ax1.set_ylabel('count')
ax2.set_ylabel('ratio')
savefig('context_counts.svg')
###Output
_____no_output_____
###Markdown
Look at function distribution
###Code
function_count = sample_df.function.value_counts()
function_prop = function_count.div(function_count.sum())
function_count
function_prop
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
for ax, data in zip((ax1, ax2), (function_count, function_prop)):
data.plot(kind='bar', ax=ax, color='lightgrey', edgecolor='black', linewidth=0.8, width=0.6)
ax.grid(axis='y')
ax.set_axisbelow(True)
ax.axhline(0, color='black', linewidth=0.5)
ax.set_xlabel('context')
ax1.set_ylabel('count')
ax2.set_ylabel('proportion')
savefig('function_counts.svg')
###Output
_____no_output_____
###Markdown
Examining the DatasetBelow we look at the number of dimensions in the data:
###Code
count_df.shape
###Output
_____no_output_____
###Markdown
And number of observations..
###Code
count_df.size
###Output
_____no_output_____
###Markdown
Apply Fisher's Exact TestNow we apply the Fisher's exact test to the data set. This involves supplying values to a 2x2 contingency table that is fed to `scipy.stats.fisher_exact` Number of Datapoints To Iterate OverThe Fisher's exact test takes some time to run. That is because it must iterate over a lot of pairs. The number is printed below.
###Code
count_df.size
###Output
_____no_output_____
###Markdown
Apply the TestsThe whole run takes 5.5-6.0 minutes on a 2017 Macbook pro.
###Code
run = False
if run:
fisherdata, odds_ratios = my_stats.apply_fishers(count_df, sample_axis=0, feature_axis=1)
fisherdata.to_csv(fisher_data)
else:
fisherdata = pd.read_csv(fisher_data, index_col=0)
odds_ratios = pd.read_csv('results/tables/fisher_odds.csv')
fisherdata.head(10)
###Output
_____no_output_____
###Markdown
The Fisher's test has produced p-vales of 0, indicating a very high degree of attraction between lexemes and a colexemes. A log-transformed zero equals `infinity`. Below those values are isolated.
###Code
inf_nouns = fisherdata[(fisherdata == np.inf).any(1) | (fisherdata == -np.inf).any(1)]
inf_nouns
###Output
_____no_output_____
###Markdown
In this case the Fisher's has returned a zero value. A p-value of 0 means that the likelihood אלהים and יהוה are *not* dependent variables is essentially null. We can thus reject the null hypothesis that the two values are not related. There is, rather, a maximum level of confidence that these two values *are* interrelated. The `np.inf` value that resulted from `log10(0)` is not viable for calculating vector distances. Thus, we need to substitute an arbitrary, but appropriate value. Below we access the lowest non-zero p-values in the dataset.
###Code
fisherdata.max().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
The largest non-infinite value is ~189. We make the substitution below.
###Code
# set the infinite context equal to max non-infinite value
fisherdata.loc['אלהים.n1']['T.appo→ יהוה.n1'] = fisherdata.max().sort_values(ascending=False)[1]
###Output
_____no_output_____
###Markdown
Below we double to check to ensure that all infinitive values have been removed. The test should read `False`.
###Code
# infinites in dataset?
bool(len(fisherdata[(fisherdata == np.inf).any(1)].index))
fisherdata.loc[:, (fisherdata > 50).any()]
###Output
_____no_output_____
###Markdown
Examine the Spread of Fisher ScoresThe scores vary widely, and it seems that some relations are unduly influencing the model.
###Code
scores = pd.Series(fisherdata.values.flatten())
scores = scores[scores != 0]
scores.shape
fig, ax = plt.subplots(figsize=(8, 5))
scores.plot(kind='box', ax=ax)
ax1.set_xticklabels(['Fisher score'])
# zoom on scores between -2 and 4
# +/-log10(0.5) = +/- 1.3
fig, ax = plt.subplots(figsize=(8, 5))
scores[(scores > -2) & (scores < 4)].plot(kind='box', ax=ax)
ax.set_xticklabels(['Fisher score'])
plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
sns.distplot(scores, ax=ax1)
ax1.set_title('log10 Fisher scores distribution\n', size=10)
ax1.set_xlabel('log10 Fisher scores')
subset_scores = scores[(scores < 10) & (scores > -5)]
sns.distplot(subset_scores, ax=ax2)
ax2.set_xlabel('log10 Fisher scores')
ax2.set_title('log10 Fisher scores distribution\nwhere -5 < score < 10', size=10)
subset_scores.plot(kind='box', ax=ax3)
ax3.set_xticklabels(['log10 Fisher -5 < score < 10'])
savefig('fisher_score_dist.svg')
plt.show()
###Output
_____no_output_____
###Markdown
Note that the vast majority of the data falls between a score of $-2$ to $5$.
###Code
extreme_values = ((fisherdata > -2) & (fisherdata < 5)).any()
extreme_values.size
extreme_values
fisherdata.loc[:,extreme_values].T.head(20)
fisherdata.columns.size
###Output
_____no_output_____
###Markdown
Adjust scores
###Code
fish_mean = fisherdata.copy()
def replace_score(score):
if score < -2:
return -2
elif score > 5:
return 5
else:
return score
fish_mean = fisherdata.apply(lambda x: pd.Series(replace_score(y) for y in x), result_type='broadcast')
fish_mean
fish_capped = fisherdata.copy()
def replace_score(score):
if score < -1.3:
return -1.3
elif score > 1.3:
return 1.3
else:
return score
fish_capped = fish_capped.apply(lambda x: pd.Series(replace_score(y) for y in x), result_type='broadcast')
fish_capped.head()
###Output
_____no_output_____
###Markdown
Comparing the NounsThe nouns are now ready to be compared.Principle Component Analysis — We have a semantic space with ~4k dimensions. That is a lot of potential angles from which to compare the vectors. One method that is commonly used in semantic space analysis is principle component analysis or **PCA**. PCA is a dimensionality reduction method that reduce a multi-dimensional vector to the two points in an imagined space that show the most distance between the nouns. We can visualize said space by plotting the two points on an X and Y axis. PCA AnalysisWe want to apply PCA in order to plot nouns in an imaginary space. The goal is to use the visualization to identify patterns and groups amongst the 199 target nouns. Nouns that are more similar should fall within the same general areas relative to the origin (0, 0). PCA seeks to identify the maximum variance amongst the vector spaces. Fisher with adjusted max/min scores
###Code
def plot_nouns(df, ax, family='serif', weight='heavy', **kwargs):
for noun in df.index:
x,y = df.loc[noun]
lex, sp = noun.split('.')
noun_text = get_display(lex).replace('\u05C1','')
ax.text(x, y, noun_text, family=family, weight=weight, **kwargs)
# store PCA experiments by normalization type
exp2pca = collections.defaultdict(dict)
experiments = {
'fish_raw': fisherdata,
'fish_capped': fish_capped,
'fish_mean': fish_mean,
}
for exp_name, data in experiments.items():
print(exp_name)
pca_df, loadings_df = apply_pca(
data,
sample_axis=0,
feature_axis=1,
components=5
)
exp2pca[exp_name]['pca'] = pca_df.loc[:, :'PC2']
exp2pca[exp_name]['loadings'] = loadings_df.iloc[:2].T
fig, axes = plt.subplots(1, 3, figsize=(18,5))
for x, ax in zip(exp2pca, axes):
pca_df = exp2pca[x]['pca']
ax.scatter(pca_df['PC1'], pca_df['PC2'], s=10, color='', edgecolor='black')
ax.axhline(0, linewidth=0.7, color='black')
ax.axvline(0, linewidth=0.7, color='black')
ax.set_title(x)
fig, axes = plt.subplots(1, 3, figsize=(20,5))
for x, ax in zip(exp2pca, axes):
pca_df = exp2pca[x]['pca']
ax.scatter(pca_df['PC1'], pca_df['PC2'], s=11, color='')
plot_nouns(pca_df, ax, size=8)
ax.axhline(0, linewidth=0.7, color='black')
ax.axvline(0, linewidth=0.7, color='black')
ax.set_title(x)
savefig('pca_text.svg')
###Output
_____no_output_____
###Markdown
Examining Fish Mean
###Code
fm_pca = exp2pca['fish_mean']['pca']
fm_loads = exp2pca['fish_mean']['loadings']
fm_pca_subset = fm_pca.loc[fm_pca.abs().max(1) < 11]
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(fm_pca_subset['PC1'], fm_pca_subset['PC2'], s=11, color='')
plot_nouns(fm_pca_subset, ax)
ax.axhline(0, linewidth=0.7, color='black')
ax.axvline(0, linewidth=0.7, color='black')
savefig('pca_fish_mean_zoomed.svg')
###Output
_____no_output_____
###Markdown
Quadrant I
###Code
# terms in Q1
def sort_on_mean(df, axis=1):
"""Sort on the mean of the absolute value across rows"""
return df.loc[df.abs().mean(axis).sort_values(ascending=False).index]
def get_quad(bound1, bound2, sort=True):
words = fm_pca[(bound1) & (bound2)]
if sort:
words = sort_on_mean(words)
return words
quad_1 = get_quad(fm_pca.PC1 > 0, fm_pca.PC2 > 0)
quad_1
# influences on Q1
def get_qloads(bound1, bound2, sort=True):
loads = fm_loads[(bound1) & (bound2)]
if sort:
loads = sort_on_mean(loads)
return loads
q1_loads = get_qloads(fm_loads[1] > 0, fm_loads[2] > 0)
q1_loads.head(20)
# sort by influence on PC2
q1_loads.sort_values(by=2, ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Quadrant IV
###Code
quad_4 = get_quad(fm_pca.PC1 > 0, fm_pca.PC2 < 0)
quad_4
q4_loads = get_qloads(fm_loads[1] > 0, fm_loads[2] < 0)
q4_loads.head(20)
###Output
_____no_output_____
###Markdown
Quadrant II
###Code
quad_2 = get_quad(fm_pca.PC1 < 0, fm_pca.PC2 > 0)
quad_2
q2_loads = get_qloads(fm_loads[1] < 0, fm_loads[2] > 0)
q2_loads.head(20)
###Output
_____no_output_____
###Markdown
Quadrant III
###Code
quad_3 = get_quad(fm_pca.PC1 < 0, fm_pca.PC2 < 0)
quad_3.head(40)
q3_loads = get_qloads(fm_loads[1] < 0, fm_loads[1] < 0)
q3_loads.head(20)
# what of JWM?
# raw log10 fisher scores below
fisherdata.loc['יום.n1'].sort_values(ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Alicia [email protected] 12/03/17 Discourse Analysis of the Australian Radio Talkback CorpusThis file starts where [process-art-corpus.ipynb](https://github.com/Data-Science-for-Linguists/Discourse-Analysis-ART-Corpus/blob/master/process-art-corpus.ipynb) left off, and is the analysis portion of this project. Table of Contents- [About the Data](about-the-data)- [Reading in Data Frames](reading-in-data-frames) - [Data Frames Summary](data-frames-summary) - [Splitting Speakers by Role](splitting-speakers-by-role) - [All Presenters](all-presenters)- [Distribution of Speakers](distribution-of-speakers) - [How many Speakers are there for each Role?](how-many-speakers-are-there-for-each-role) - [How many Males vs. Females?](how-many-males-vs-females?) - [How are Males and Females distributed across Roles?](wow-are-males-and-females-distributed-across-roles) - [Gender Equality](gender-equality)- [Comparison by Speaker Type](comparison-by-speaker-type) - [Analysis](analysis)- [Comparison by Gender](comparison-by-gender)- [Back Channels](back-channels) - [What are the Back Channels? Which ones are most common?](what-are-the-back-channels-which-ones-are-most-common) - [What Speaker Type has the most Back Channels?](what-speaker-type-has-the-most-back-channels) - [What Speaker Type has the most number of Back Channels uttered during their lines?](what-speaker-type-has-the-most-number-of-back-channels-uttered-during-their-lines) - [What Gender utters the most Back Channels?](what-gender-utters-the-most-back-channels) - [What Gender has the most Back Channels uttered while they are speaking?](what-gender-has-the-most-back-channels-uttered-while-they-are-speaking) - [Are Men more likely to utter Back Channels when a Women or Man is speaking? How about the other way around?](are-men-more-likely-to-utter-back-channels-when-a-women-or-man-is-speaking-how-about-the-other-way-around) - [How do Male and Female Most Common Back Channels Compare?](how-do-male-and-female-most-common-back-channels-compare)- [Presenter Gender Analysis](presenter-gender-analysis) - [Making Data Frames](making-data-frames) - [Presenter Distribution](presenter-distribution) - [Presenter Gender Statistics](presenter-gender-statistics)- [Caller Gender Back Channel Analysis](caller-gender-back-channel-analysis) - [Data Frame of Caller Back Channels](data-frame-of-caller-back-channels) - [Data Frame of Caller Lines with Back Channels](data-frame-of-caller-lines-with-back-channels) - [How Gender Alone Affects Back Channels](how-gender-alone-affects-back-channels)- [Conclusion](conclusion) About the Data- 27 transcribed recordings of samples of national, regional and commercial Australian talkback radio from 2004 to 2006.- *raw files* and text files- Closed Data
###Code
%pprint
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import nltk
import pandas as pd
# visualization tools
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('classic')
###Output
_____no_output_____
###Markdown
Reading in Data Frames
###Code
# reading in data frames
speaker_df=pd.read_csv("data_files/Speakers.csv")
art_df=pd.read_csv("data_files/Texts.csv")
bk_df=pd.read_csv("data_files/Back_Channels.csv")
# speaker_df.head()
# art_df.head()
# bk_df.head()
###Output
_____no_output_____
###Markdown
Data Frames Summary- speaker_df - data frame of all unique speakers- art_df - data frame of each line of text- bk_df - data frame of all back channels
###Code
# modifying data frame column names
speaker_df.columns = ["Speaker","Segment","Speaker_Type","Gender","Name","Number_of_Utterances"]
speaker_df = speaker_df.set_index("Speaker")
print("Speaker Data Frame:")
speaker_df.head()
art_df.columns = ["Speaker","Utterance_Number","Segment","Speaker_Type","Gender","Text","Word_Toks","Num_Words","Avg_Word_Length","Sents","Num_Sents"]
art_df = art_df.set_index(keys=["Speaker","Utterance_Number"])
print("Lines of Text Data Frame:")
art_df.head()
bk_df.columns = ["","Speaker","Speaker_Type","Speaker_Gender","Back_Channel","Line_Speaker","Segment_Utterance_Number","Segment","Line_Speaker_Type","Line_Speaker_Gender"]
bk_df = bk_df.set_index("")
print("Back Channel Data Frame:")
bk_df.head()
###Output
Speaker Data Frame:
###Markdown
Splitting Speakers by Role
###Code
# dataframe of presenters
P_df=speaker_df.loc[speaker_df["Speaker_Type"]=='P',:]
# dataframe of callers
C_df=speaker_df.loc[speaker_df["Speaker_Type"]=='C',:]
# dataframe of experts
E_df=speaker_df.loc[speaker_df["Speaker_Type"]=='E',:]
###Output
_____no_output_____
###Markdown
All Presenters
###Code
P_df
###Output
_____no_output_____
###Markdown
Distribution of Speakers How many Speakers are there for each Role?- 31 Presenters- 362 Callers- 37 Experts
###Code
# Number of Speakers per Role
figure=speaker_df["Speaker_Type"].value_counts().reindex(["P","C","E"]).plot.bar()
speaker_df["Speaker_Type"].value_counts().reindex(["P","C","E"])
plt.title("Number of Speakers per Role")
plt.xlabel("Speaker Type")
plt.ylabel("Number of Speakers")
plt.show()
# saving the figure
figure.figure.savefig("images/role_totals.png")
###Output
_____no_output_____
###Markdown
There are a lot of Callers, but very few Presenters and Experts in the corpus. This is because many Callers call in during the show, while Presenters and Experts have a more steady position in each show. How many Males vs. Females?- 218 Males- 212 FemalesNumber of Males and Females are about equal with slightly more Males.
###Code
figure=speaker_df["Gender"].value_counts().reindex(["M","F"]).plot.bar()
speaker_df["Gender"].value_counts().reindex(["M","F"])
plt.title("Number of Males and Females")
plt.xlabel("Gender")
plt.ylabel("Number of Speakers")
plt.show()
# saving the figure
figure.figure.savefig("images/gender_totals.png")
###Output
_____no_output_____
###Markdown
How are Males and Females distributed across Roles?
###Code
# Presenters:
fig1 = P_df["Gender"].value_counts().reindex(["M","F"]).plot.bar()
P_df["Gender"].value_counts().reindex(["M","F"])
plt.title("Male vs. Female Presenters")
plt.xlabel("Gender")
plt.ylabel("Number of Speakers")
plt.show()
# saving the figure
fig1.figure.savefig("images/presenter_genders.png")
# Callers:
fig2 = C_df["Gender"].value_counts().reindex(["M","F"]).plot.bar()
C_df["Gender"].value_counts().reindex(["M","F"])
plt.title("Male vs. Female Callers")
plt.xlabel("Gender")
plt.ylabel("Number of Speakers")
plt.show()
# saving the figure
fig2.figure.savefig("images/caller_genders.png")
# Experts:
fig3 = E_df["Gender"].value_counts().reindex(["M","F"]).plot.bar()
E_df["Gender"].value_counts().reindex(["M","F"])
plt.title("Male vs. Female Experts")
plt.xlabel("Gender")
plt.ylabel("Number of Speakers")
plt.show()
# saving the figure
fig3.figure.savefig("images/expert_genders.png")
###Output
_____no_output_____
###Markdown
Gender Equality**Ratio of Gender by Role (Male : Female):**- Presenters: 2.1 : 1- Callers: .895 : 1- Experts: 2.36: 1There are about twice as many Male Presenters and Experts as compared to Females, but about equal numbers of Male and Female Callers, with slightly more Females.**Conclusion:** Presenters and Experts are predominantly Male and Callers are more equally distributed but with more Females than Males.Presenters are the show's hosts, hired by the program. Experts are professionals talking about their line of work. Callers however can be anyone who calls the radio station. Presenters and Experts are the people chosen by the radio to talk, and they are mostly males. Why are there more men than women working in this radio station? Is radio a generally predominantly male industry across the US?**Further Analysis Needed:** I will do further research and analysis on gender equality before making conclusions about the Australian Radio Talkback Corpus. Comparison by Speaker Type- Number of Turns- Number of Sentences- Number of Words- Average Word Length- Average Sentence Length- Average Number of Turns
###Code
# Comparing Presenter, Caller, and Expert Data Frames
# this gives a table of all the information
P_df.describe()
C_df.describe()
E_df.describe()
# SHOULD THIS SUMMARY BE HERE OR IN THE MARKDOWN CELL BELOW?
print("Summary of Important Information:")
print("Presenters:")
print("Total Number of Turns:\t",str(P_df["Number_of_Utterances"].sum())) # 1470
print("Average Number of Turns:",str(P_df["Number_of_Utterances"].mean())) # 122.5
print("Standard Deviation:\t",str(P_df["Number_of_Utterances"].std())) # 129.38
print("\nCallers:")
print("Total Number of Turns:\t",C_df["Number_of_Utterances"].sum()) # 1505
print("Average Number of Turns:",str(C_df["Number_of_Utterances"].mean())) # 11.23
print("Standard Deviation:\t",str(C_df["Number_of_Utterances"].std())) # 7.97
print("\nExperts:")
print("Total Number of Turns:\t",str(E_df["Number_of_Utterances"].sum())) # 1464
print("Average Number of Turns:",str(E_df["Number_of_Utterances"].mean())) # 91.5
print("Standard Deviation:\t",str(E_df["Number_of_Utterances"].std())) # 86.82
# data frames for Presenter, Caller, and Expert Lines
P_art_df=art_df.loc[art_df["Speaker_Type"]=='P',:]
C_art_df=art_df.loc[art_df["Speaker_Type"]=='C',:]
E_art_df=art_df.loc[art_df["Speaker_Type"]=='E',:]
# Presenter vs. Caller vs. Experts
# utterances
print("Turns:")
art_df["Speaker_Type"].value_counts().reindex(["P","C","E"])
# sentences
print("Number of Sentences:")
P_art_df["Num_Sents"].sum()
C_art_df["Num_Sents"].sum()
E_art_df["Num_Sents"].sum()
# words
print("Number of Words:")
P_art_df["Num_Words"].sum()
C_art_df["Num_Words"].sum()
E_art_df["Num_Words"].sum()
# avg word length
print("Average Word Length:")
P_art_df["Avg_Word_Length"].mean()
C_art_df["Avg_Word_Length"].mean()
E_art_df["Avg_Word_Length"].mean()
# avg sent length
print("Average Sentence Length:")
P_art_df["Num_Words"].sum()/P_art_df["Num_Sents"].sum()
C_art_df["Num_Words"].sum()/C_art_df["Num_Sents"].sum()
E_art_df["Num_Words"].sum()/E_art_df["Num_Sents"].sum()
print("Average Number of Turns:")
P_df["Number_of_Utterances"].mean()
C_df["Number_of_Utterances"].mean()
E_df["Number_of_Utterances"].mean()
###Output
Turns:
###Markdown
AnalysisSummary of Numbers Above:- Total Number of Turns: - Presenters > Callers > Experts - Total Number of Sentences: - Presenters > Callers > Experts- Total Number of Words: - Experts > Presenters > Callers- Average Word Length: - About Equal- Average Sentence Length: - Expert > Caller > Presenter - Presenter and Callers are about equal - Average Number of Turns: - Presenters > Experts > CallersImportant Discoveries:- More Callers with fewer turns- Fewer Presenters with more turns- Experts have the longest sentencesOn average, Presenters speak the most throughout the Australian Radio Talkback Corpus. There are many Callers in each show, but they do not speak for very long. The Presenters probably talk the most because they are leading the show. Across the corpus, **the Presenters have the most turns and sentences, followed by Callers and then Experts.** Average sentence length is much more indicative of speaker type than word length. Based on the Australian Radio Talkback Corpus, **Experts' sentences are the longest** with an average of about 23 words per sentence, while Callers have on average 17 words per sentence and Presenters about 16 words per sentence. Without a statistical analysis, I cannot be certain whether or not this finding is significant for this data. However, this finding makes sense, because Experts will talk at length about their topic, so they may have longer, more complicated sentences. Comparison by Gender- Number of Turns- Number of Sentences- Number of Words- Average Word Length- Average Sentence Length- Average Number of Turns
###Code
# Males vs. Females
# utterances
print("Utterances:")
art_df["Gender"].value_counts().reindex(["M","F"])
# data frames for male and female lines
M_art_df=art_df.loc[art_df["Gender"]=='M',:]
F_art_df=art_df.loc[art_df["Gender"]=='F',:]
# sentences
print("Number of Sentences:")
M_art_df["Num_Sents"].sum()
F_art_df["Num_Sents"].sum()
# words
print("Number of Words:")
M_art_df["Num_Words"].sum()
F_art_df["Num_Words"].sum()
# avg word length
print("Average Word Length:")
M_art_df["Avg_Word_Length"].mean()
F_art_df["Avg_Word_Length"].mean()
# avg sent length
print("Average Sentence Length:")
M_art_df["Num_Words"].sum()/M_art_df["Num_Sents"].sum()
F_art_df["Num_Words"].sum()/F_art_df["Num_Sents"].sum()
# building male and female data frames from speaker_df
M_df=speaker_df.loc[speaker_df["Gender"]=='M',:]
F_df=speaker_df.loc[speaker_df["Gender"]=='F',:]
print("Average Number of Turns:")
M_df["Number_of_Utterances"].mean()
F_df["Number_of_Utterances"].mean()
###Output
Utterances:
###Markdown
Summary of Key Information:- Number of Turns: - Males > Females- Number of Sentences: - Males > Females- Number of Words: - Males > Females- Average Word Length: - About Equal- Average Sentence Length: - Males > Females- Average Number of Turns: - Males > Females As previously stated, Presenters and Experts are predominantly male, while Callers have slightly more Females. Thus, men are in roles that talk more throughout the corpus, meaning that **men have more opportunities to talk in the corpus because of their roles.** Back Channels What are the Back Channels? Which ones are most common?
###Code
bk_df["Back_Channel"].value_counts()[:20]
bk_df["Back_Channel"].value_counts()[-20:]
figure = bk_df["Back_Channel"].value_counts()[:20].plot.bar()
plt.title("Top 20 Back Channels")
plt.xlabel("Back Channel")
plt.ylabel("Number of Occurances")
plt.show()
# saving the figure
figure.figure.savefig("images/top_20_back_channels.png")
###Output
_____no_output_____
###Markdown
Unfortunately, a lot of the back channels were inaudible, with 303 being inaudible. However, the top back channels make sense and I do not expect that having the inaudible utterances would impact the results greatly.Laughter is marked as plural and singlular because when 2 speakers laughed at the same time, the format was .
###Code
# What speakers uttered the most Back Channels?
bk_df["Speaker"].value_counts()[:20]
###Output
_____no_output_____
###Markdown
What Speaker Type has the most Back Channels?
###Code
# number of back channels per speaker type
bk_df["Speaker_Type"].value_counts().reindex(["P","C","E"])
figure = bk_df["Speaker_Type"].value_counts().reindex(["P","C","E"]).plot.bar()
plt.title("Back Channels by Speaker Type")
plt.xlabel("Speaker Type")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figure
figure.figure.savefig("images/back_channel_speaker_types.png")
###Output
_____no_output_____
###Markdown
Callers (closely followed by Presenters) utter the most Back Channels.**Conclusions:** - There are many Callers with few lines each, so they're constantly hearing new information upon being introduced to the show.- Presenters stay throughout the entire show, so they have plenty of opportunities to utter back channels.- Experts have the fewest number of turns and sentences, and their biggest purpose is to explain a complicated topic. This means that the other speakers will be uttering more back channels for the complicated topics. What Speaker Type has the most number of Back Channels uttered during their lines?
###Code
bk_df["Line_Speaker_Type"].value_counts().reindex(["P","C","E"])
figure = bk_df["Line_Speaker_Type"].value_counts().reindex(["P","C","E"]).plot.bar()
plt.title("Back Channels by Line Speaker Type")
plt.xlabel("Line Speaker Type")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figure
figure.figure.savefig("images/back_channel_line_speaker_types.png")
###Output
_____no_output_____
###Markdown
Presenter Lines have the most number of Back Channels, closely followed by Expert Lines. There are over 1000 fewer Caller lines containing back channels. I believe Experts and Presenters have more back channels uttered while they are talking, because:- Experts have the longest sentences and are giving detailed information for many of their lines, so Presenters and Callers would utter back channels to show they are listening (and maybe understanding).- Presenters are taking many turns and uttering more sentences, so there is more information coming from the Presenters.**Potential Conclusion:** More turns and sentences and longer sentences lead to more back channels. What Gender utters the most Back Channels?For my back channel gender analysis, please visit my [Final Report](https://github.com/Data-Science-for-Linguists/Discourse-Analysis-ART-Corpus/blob/master/final_report.md72-kieran-snyder-men-interrupt-more-than-women) for information about the Language Log article [*Men Interrupt More Than Women*](http://languagelog.ldc.upenn.edu/nll/?p=13422) by Kieran Snyder.
###Code
# number of back channels per gender
bk_df["Speaker_Gender"].value_counts().reindex(["M","F"])
figure = bk_df["Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Back Channels by Speaker Gender")
plt.xlabel("Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figure
figure.figure.savefig("images/back_channel_speaker_genders.png")
###Output
_____no_output_____
###Markdown
What Gender has the most Back Channels uttered while they are speaking?
###Code
# number of lines with back channels per gender
bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
figure = bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Back Channels by Line Speaker Gender")
plt.xlabel("Line Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figure
figure.figure.savefig("images/back_channel_line_speaker_genders.png")
###Output
_____no_output_____
###Markdown
**Conclusion:** Men produced more back channels, and more back channels were uttered while they were talking. Are Men more likely to utter Back Channels when a Women or Man is speaking? How about the other way around?
###Code
# Male Back Channels
M_bk_df=bk_df.loc[bk_df["Speaker_Gender"]=='M',:]
# Female Back Channels
F_bk_df=bk_df.loc[bk_df["Speaker_Gender"]=='F',:]
# peaking at the data frames
M_bk_df.head()
F_bk_df.head()
print("The Gender of the Line's Speaker during All Instances of Male Back Channels:")
M_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
print("The Gender of the Line's Speaker during All Instances of Female Back Channels:")
F_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
# creating bar graphs
fig1 = M_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Male Back Channels by Line Speaker Gender")
plt.xlabel("Line Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
fig2 = F_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Female Back Channels by Line Speaker Gender")
plt.xlabel("Line Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figures
fig1.figure.savefig("images/male_back_channel_line_speaker_genders.png")
fig2.figure.savefig("images/female_back_channel_line_speaker_genders.png")
###Output
The Gender of the Line's Speaker during All Instances of Male Back Channels:
###Markdown
**Conclusion:** *Men* produce more Back Channels when *other men* are talking, and *Women* produce slightly more back channels when *other women* are talking. How do Male and Female Most Common Back Channels Compare?
###Code
# Most Common Male and Female Back Channels
print("Most Common Male Back Channels:")
M_bk_df["Back_Channel"].value_counts()[:20]
print("Most Common Female Back Channels:")
F_bk_df["Back_Channel"].value_counts()[:20]
# creating graphs
fig1 = M_bk_df["Back_Channel"].value_counts()[:10].plot.bar()
plt.title("Top 10 Male Back Channels")
plt.xlabel("Back Channel")
plt.ylabel("Number of Back Channels")
plt.show()
fig2 = F_bk_df["Back_Channel"].value_counts()[:10].plot.bar()
plt.title("Top 10 Female Back Channels")
plt.xlabel("Back Channel")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figures
fig1.figure.savefig("images/top_10_male_back_channels.png")
fig2.figure.savefig("images/top_10_female_back_channels.png")
###Output
Most Common Male Back Channels:
###Markdown
Female and Male Back Channels appear to be about the sameNext Question: How does Speaker Type Affect Men and Women's Back Channels? Presenter Gender Analysis
###Code
# Presenter Data Frame:
P_df
P_df["Name"].value_counts().sum()
len(P_df["Name"].unique())
###Output
_____no_output_____
###Markdown
Making Data Frames
###Code
# Male and Female Presenter Data Frames:
M_P_df=P_df.loc[P_df["Gender"]=='M',:]
F_P_df=P_df.loc[P_df["Gender"]=='F',:]
M_P_df
F_P_df
print("Number of Uninque Male IDs:")
M_P_df["Name"].value_counts().sum()
print("Number of Unique Male Presenters")
len(M_P_df["Name"].unique())
print("Number of Uninque Female IDs:")
F_P_df["Name"].value_counts().sum()
print("Number of Unique Female Presenters")
len(F_P_df["Name"].unique())
###Output
_____no_output_____
###Markdown
Presenter Distribution- 31 Unique Speaker Ids - 21 Male Ids - 10 Female Ids- 25 Unique Speakers - 15 Males - 10 Females There are 10 unique Female IDs and 10 unique Female Presenters - **no Female presents twice.**There are 21 unique Male IDs and 15 unique Male Presenters - Multiple Males present twice and 1 presents 3 times.Therefore there are not only are **more males hired by the show,** but **only males presesnt multiple times.**
###Code
# Male and Female Presenter Lines Data Frames:
M_P_art_df=P_art_df.loc[P_art_df["Gender"]=='M',:]
F_P_art_df=P_art_df.loc[P_art_df["Gender"]=='F',:]
M_P_art_df.head()
F_P_art_df.head()
###Output
_____no_output_____
###Markdown
Presenter Gender Statistics
###Code
# utterances
print("Number of Utterances:")
P_art_df["Gender"].value_counts().reindex(["M","F"])
# sentences:
print("Number of Sentences:")
M_P_art_df["Num_Sents"].sum()
F_P_art_df["Num_Sents"].sum()
# words
print("Number of Words:")
M_P_art_df["Num_Words"].sum()
F_P_art_df["Num_Words"].sum()
# avg word length
print("Average Word Length:")
M_P_art_df["Avg_Word_Length"].mean()
F_P_art_df["Avg_Word_Length"].mean()
# avg sent length
print("Average Sentence Length:")
M_P_art_df["Num_Words"].sum()/M_P_art_df["Num_Sents"].sum()
F_P_art_df["Num_Words"].sum()/F_P_art_df["Num_Sents"].sum()
print("Average Number of Turns:")
M_P_df["Number_of_Utterances"].mean()
F_P_df["Number_of_Utterances"].mean()
###Output
Number of Utterances:
###Markdown
Because there are about twice as many Male Presenters as Female Presenters, I cannot compare their raw scores directly. However, looking looking at Average Word Length, Average Sentence Length, and Average Number of Turns, it seems that Women talk more on average than Men, because **Females have a longer average sentence length and more number of turns.** Caller Gender Back Channel AnalysisI can look at Caller Gender Back Channels to compare back channels by gender in a more equal distribution of males and females. Data Frame of Caller Back Channels
###Code
# Caller's saying back channels
print("Callers Uttering Back Channels:")
C_bk_df=bk_df.loc[bk_df["Speaker_Type"]=='C',:]
C_bk_df.head()
print("All Instances of Male and Female Callers Contributing Back Channels:")
C_bk_df["Speaker_Gender"].value_counts().reindex(["M","F"])
print("All Male and Female Lines that Contained Caller Back Channels")
C_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
###Output
All Instances of Male and Female Callers Contributing Back Channels:
###Markdown
**Observations:** - Female Callers uttered twice as many back channels as compared to Male Callers.- More Females Lines had Caller Back Channels.**Conclusion:** *Female Callers* utter more back channels and have more back channels uttered while they are talking as compared to Male callers. Data Frame of Caller Lines with Back Channels
###Code
# Caller lines that include back channels
print("Caller Lines that Include Back Channels:")
C_line_bk_df=bk_df.loc[bk_df["Line_Speaker_Type"]=='C',:]
C_line_bk_df.head()
print("All Caller Lines that Contained Back Channels (by Any Speaker Type):")
C_line_bk_df["Speaker_Gender"].value_counts().reindex(["M","F"])
print("All Instances of Male and Female Caller Lines that Contained Back Channels:")
C_line_bk_df["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
###Output
All Caller Lines that Contained Back Channels (by Any Speaker Type):
###Markdown
**Observations**- Of the Caller Lines that contained Back Channels, more of those back channels came from Males. - **Conclusion:** *Males* are more likely to contribute a back channel to a *Caller* than Females are.- Of the Caller Lines that contained Back Channels, more Female Caller Lines contained back channels. - **Conclusion:** *Speakers* are more likely to contribute a back channel to a *Female Caller* than a Male Caller. The real question is: **Are Male and Female Callers contributing more back channels to speakers of the same gender or different genders?** How Gender Alone Affects Back ChannelsBecause Callers are about equally male and female, I can negate the affect of the speaker role. Thus, this analysis shows the affect of *gender* instead of a combination of gender and speaker type.
###Code
print("Male Callers Uttering Back Channels during Male and Female Lines:")
# Back Channels by Male Callers
CM=C_bk_df.loc[(C_bk_df["Speaker_Gender"]=='M') & (C_bk_df["Speaker_Type"]=="C"),:]
# uttered during Male and Female Lines
CM["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
print("Female Callers Uttering Back Channels during Male and Female Lines:")
# Back Channels by Female Callers
CF=C_bk_df.loc[(C_bk_df["Speaker_Gender"]=='F') & (C_bk_df["Speaker_Type"]=="C"),:]
# uttered during Male and Female Lines
CF["Line_Speaker_Gender"].value_counts().reindex(["M","F"])
# creating bar graphs
fig1 = CM["Line_Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Male Caller Back Channels by Line Speaker Gender")
plt.xlabel("Line Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
fig2 = CF["Line_Speaker_Gender"].value_counts().reindex(["M","F"]).plot.bar()
plt.title("Female Caller Back Channels by Line Speaker Gender")
plt.xlabel("Line Speaker Gender")
plt.ylabel("Number of Back Channels")
plt.show()
# saving the figures
fig1.figure.savefig("images/male_caller_back_channel_line_speaker_genders.png")
fig2.figure.savefig("images/female_caller_back_channel_line_speaker_genders.png")
###Output
Male Callers Uttering Back Channels during Male and Female Lines:
###Markdown
Screen data to tabular form
###Code
text = open('list.txt', 'r', encoding = 'utf-8')
to_analyse = text.read()
to_analyse_list = to_analyse.split('\n')
display(to_analyse_list)
import pandas as pd
objectives = []
subsection = []
standards = []
subjects = []
include = ['คณิตศาสตร์', 'วิทยาศาสตร์', 'สังคมศึกษา', 'สุขศึกษาและพลศึกษา', 'ศิลปะ', 'การงานอาชีพ', 'ภาษาต่างประเทศ']
for i in to_analyse_list:
tmp = ' '
if i == '': continue
elif i[0].isnumeric():
subjects.append(i.split()[1])
elif subjects[-1] in include and i[:4] == 'สาระ':
subsection.append({'วิชา': subjects[-1], 'สาระ': tmp.join(i.split()[2:])})
elif i[:7] == 'มาตรฐาน' and subjects[-1] in include:
standards.append({'วิชา': subjects[-1], 'สาระ': subsection[-1]['สาระ'], 'มาตรฐาน': tmp.join(i.split()[3:])})
elif i[:7] == 'มาตรฐาน':
standards.append({'วิชา': subjects[-1], 'มาตรฐาน': tmp.join(i.split()[3:])})
elif i[:1] == '\t':
if i[2] == ' ' or i[1].isnumeric():
tmp2 = i[1:].split()
objectives.append({'มาตรฐาน' : standards[-1]['มาตรฐาน'], 'ตัวชี้วัด' : tmp.join(tmp2[1:]) if tmp2[0][0].isnumeric() else tmp.join(tmp2[3:])})
import pandas as pd
object_pd = pd.read_csv('Std51\objectives.csv', encoding='UTF-8')
standard_pd = pd.read_csv('Std51\standards.csv', encoding='UTF-8')
subject_pd = pd.read_csv('Std51\subjects.csv', encoding='UTF-8')
subsection_pd = pd.read_csv('Std51\subsection.csv', encoding='UTF-8')
import pythainlp
from pythainlp import tokenize, spell
obj_word = []
for i in object_pd['ตัวชี้วัด']:
obj_word.append(tokenize.word_tokenize(i, engine='deepcut', keep_whitespace = False))
import ast
def ordered_sublist(list1, list2):
if list1[0] not in list2: return False, -1
else:
idx_list = []
count = list2.count(list1[0])
for idx in range(count):
firstidx = list2.index(list1[0], idx)
for i in range(len(list1)):
if list1[i] != list2[firstidx + i]: break
idx_list.append(firstidx)
break
if idx_list == []: return False, -1
return True, idx_list
def join_list(joindict, testlist):
for i,j in joindict.items():
tmp = ast.literal_eval(i)
condition = ordered_sublist(tmp, testlist)
if condition[1] != -1: condition[1].reverse()
if condition[0]:
for idx in condition[1]:
testlist[idx:idx+len(tmp)] = j
return testlist
dict_freq = {}
for i in range(len(obj_word)):
tmp = set(obj_word[i])
for j in list(tmp):
try:
dict_freq[j].append(i)
except:
dict_freq[j] = [i]
dict_count = {}
for i, j in dict_freq.items():
dict_count[i] = len(j)
sort_value = pd.Series(dict_count).to_frame().reset_index().sort_values(0, ascending = False)
from pythainlp.tag import pos_tag
removal = sort_value[sort_value.apply(lambda x: pos_tag([x['index']])[0][1] not in ['NCMN', 'NPRP', 'VACT'], axis = 1)]['index'].tolist()
for i in removal:
del dict_freq[i]
newdf = pd.merge(subject_pd, pd.merge(subsection_pd, pd.merge(standard_pd, object_pd), how='right'), how='left')
universal = {}
for i, j in dict_freq.items():
for k in j:
value = newdf.iloc[k]['สาระ'] if newdf.iloc[k]['วิชา'] == 'วิทยาศาสตร์' else newdf.iloc[k]['วิชา']
try: universal[i].add(value)
except: universal[i] = set(); universal[i].add(value)
universal
filtration = {}
for i, j in universal.items():
cond_set = {subject_pd['วิชา'].iloc[0], subject_pd['วิชา'].iloc[7]}
if j == cond_set:
filtration[i] = len(dict_freq[i])
print(cond_set)
import json
to_json = {'elements': [], 'connection': []}
subjects = {'ภาษาไทย':'Thai Linguistics', 'คณิตศาสตร์':'Mathematics', 'ชีววิทยา':'Biology', 'เคมี':'Chemistry', 'ฟิสิกส์':'Physics',
'โลก ดาราศาสตร์ และอวกาศ':'Geology, Astronomy, and Cosmology', 'เทคโนโลยี':'Technology', 'สังคมศึกษา':'Social Science',
'สุขศึกษาและพลศึกษา':'Health Science & Physical Education', 'ศิลปะ':'Arts', 'การงานอาชีพ':'Home Economics',
'ภาษาต่างประเทศ':'Foreign Linguistics'}
for i, j in universal.items():
tmp_elements = {}
tmp_elements['label'] = i
tmp_elements['count'] = dict_count[i]
tmp_elements['tags'] = []
for k in j: tmp_elements['tags'].append(subjects[k])
to_json['elements'].append(tmp_elements)
with open('course.json', 'w', encoding = 'utf8') as json_file:
json.dump(to_json, json_file, ensure_ascii = False)
to_json
###Output
_____no_output_____
###Markdown
Agent-Based Traffic Model BackgroundThis model is a looped implementation of the cellular automata (CA) described by Nagel and Schreckenberg (NaSch).The NaSch CA model splits agent (vehicle) actions into four stages:1. Acceleration2. Braking3. Randomisation4. Vehicle MovementIn this implementation the 4th action is separated from the other actions to simulate simultaneous activation of the agents.This isn't strictly necessary for non-multithreaded processes but ensures that vehicle positions wouldn't cause conflicts if it were multithreaded. ImplementationThe model is written in Python using the Mesa ABM framework which allows for easy visualisation.This is a demonstration of running a Mesa model in an IPython Notebook which is an alternative to running it using javascript visualisation in a webpage.The actual model and agent code are implemented in model.py, in the same directory as this notebook.Below, we will import the model class, instantiate it, run it, and plot the average speed of the agents.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = [10, 6]
plt.rcParams['figure.dpi'] = 100
from model import NaSchTraffic
###Output
_____no_output_____
###Markdown
Now we instantiate a model instance: a 1x30 grid, with a 20% chance of an agent being placed in each cell, and a max vehicle speed of 4.
###Code
model = NaSchTraffic(1, 60, 5, 4, seed=1)
###Output
_____no_output_____
###Markdown
We want to run the model until it's settles, but it's hard to tell when that is so let's just run it for 100 steps:
###Code
while model.running and model.schedule.steps < 100:
model.step()
print(model.schedule.steps) # Show how many steps have actually run
###Output
100
###Markdown
The model has a DataCollector object, which checks and stores the average speed of the agents at every step.It also collects the individual speed and position of each agent at each step.It can also generate a pandas DataFrame of the data it has collected.
###Code
model_out = model.datacollector.get_model_vars_dataframe()
###Output
_____no_output_____
###Markdown
The dataframe for the model:
###Code
model_out.head()
###Output
_____no_output_____
###Markdown
Finally, we can plot the 'AverageSpeed' series:
###Code
plt.plot(model_out.Average_Speed)
plt.xlabel('Step Number')
plt.ylabel('Average Speed')
plt.show()
###Output
_____no_output_____
###Markdown
For testing purposes, here is the dataframe for the agents giving each agent's x position and speed at each step.*commented out as not yet reimplemented*
###Code
# agent_out = model.datacollector.get_agent_vars_dataframe()
# agent_out.head()
###Output
_____no_output_____
###Markdown
Effect of speed limit and traffic vehicle_quantity on traffic average speedNow, we can do a parameter sweep to see how speed changes against number of vehicles and the max speed.First we make a new function to collect the average speed during the second half of the simulation.
###Code
from mesa.batchrunner import BatchRunner
import itertools
def get_averages(model):
"""
Find the average speed of all the agents over the last 30 steps.
"""
total_averages = 0
list_length = 0
selected_averages = itertools.islice(model.averages, 60)
for average_speed in selected_averages:
total_averages += average_speed
list_length+=1
return total_averages / list_length
model_reporters={"Average_Speed": get_averages}
###Output
_____no_output_____
###Markdown
Now, we set up the batch run, with a dictionary of fixed and changing parameters.Let's vary the maximum speed, and the number of vehicles.
###Code
fixed_params = {"height": 1, "width": 60}
variable_parms = {"general_max_speed": range(1, 6), "vehicle_quantity": range(1, 20+1)}
###Output
_____no_output_____
###Markdown
Then we create a batch runner object to conduct the parameter sweep.The number of iterations is the number of runs it does of the whole parameter space.
###Code
param_sweep = BatchRunner(NaSchTraffic,
variable_parameters=variable_parms, fixed_parameters=fixed_params,
iterations=10,
max_steps=120,
model_reporters=model_reporters)
###Output
_____no_output_____
###Markdown
Then we run the parameter sweep (this can take a few minutes).
###Code
param_sweep.run_all()
###Output
1000it [00:30, 32.38it/s]
###Markdown
Now we create the dataframe for the data collected like we did for the single model run.
###Code
df = param_sweep.get_model_vars_dataframe()
df.head()
###Output
_____no_output_____
###Markdown
A scatter plot can be used to show how the parameters affect each other.We have varied more than one parameter, so we should try to visualise the interactions.One way of achieving this is with coloured data points:
###Code
plt.scatter(df.Average_Speed, df.general_max_speed, c=df.vehicle_quantity, cmap=plt.cm.coolwarm)
plt.xlabel('Average Speed')
plt.ylabel('Max Speed')
bar = plt.colorbar()
bar.set_label('Number of Vehicles')
plt.grid(True)
###Output
_____no_output_____
###Markdown
If coloured data points aren't showing the trends clearly enough another option is a 3D scatter plot:
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# fig.tight_layout(pad=4)
ax = Axes3D(fig)
ax.scatter(df.vehicle_quantity, df.general_max_speed, df.Average_Speed, c=df.vehicle_quantity, cmap=plt.cm.coolwarm)
ax.set_zlabel('Average Speed')
plt.xlabel('Number of Vehicles')
plt.ylabel('Max Speed')
plt.show()
###Output
_____no_output_____
###Markdown
Relatório do trabalho final da disciplina de estrutura de dados (2017/2) Fernando Correa Gomes (00274317) e Daniel de Souza Novaes (00290193)Esse relatório analisa o desempenho de um programa escrito em C escrito como trabalho final dessa disciplina. A análise é iniciada partindo do arquivo de exemplo, quais são as diferenças de desempenho entre a implementação da splay e da abp para converter o arquivo TheGodfather-MarioPuzo-Chapter1-English.txt (78K) de ascii para morse?
###Code
read_csv()
###Output
_____no_output_____
###Markdown
Podemos ver que entre a splay e a abp, o tempo total de processamento é bem próximo, mas o número de comparações realizadas na conversão é bem menor na splay, em comparação com a abp.O número de comparações não deve mudar, mas devido ao método de cálculo de tempo de execução com a função clock do C, seria mais confiável se repetíssemos a execução do programa mais vezes com o mesmo arquivo e utilizássemos a média de tempo para comparação.
###Code
time_histogram(df)
###Output
_____no_output_____
###Markdown
A distribuição de tempo tende a ser bastante próxima entre as duas árvores, ao menos nesse arquivo. Com um número maior de caractéres a ser convertido, as diferenças de tempo devem ficar mais significativas, então vamos realizar o mesmo processo com um arquivo de 331K.
###Code
time_histogram(df_insect)
###Output
_____no_output_____
###Markdown
Nesse exemplo já é possível notar uma distribuição bem mais acentuada do tempo de execução diferente entre a ABP e a Splay. Então talvez com arquivos ainda maiores, a visualização da diferença de tempo seja maior.Vamos realizar o mesmo teste com arquivos de 1,5M e 3,2M, respectivamente:
###Code
time_histogram(df_history)
time_histogram(df_miserables)
###Output
_____no_output_____
###Markdown
A diferença se tornou bastante clara no último exemplo.Vale ressaltar que todos os arquivos utilizados continham texto em inglês, nenhum foi gerado de maneira artificial. A geração de arquivos .txt com textos artificiais pode pender para um desempenho melhor de uma árvore em comparação com a outra. Abaixo são os histogramas de dois arquivos gerados artificialmente que favorecem um tipo específico de árvore.
###Code
time_histogram(df_splay_biased)
time_histogram(df_bst_biased)
comparison_list = [df.at[800, "comparisons"], df.at[801, "comparisons"], df.at[1000, "comparisons"], df.at[1001, "comparisons"]]
file_sizes = ["98K, Splay", "98K, BST"]
comparison_bar(comparison_list, file_sizes)
###Output
_____no_output_____
###Markdown
Para finalizar, vamos demonstrar as diferenças gerais entre as duas implementações
###Code
m_splay, m_bst, c_splay, c_bst = lists_of_values(df)
file_sizes = [78, 331, 1.5*1024, 3.2*1024]
f, (ax1, ax2) = plt.subplots(2)
ax1.plot(file_sizes, m_splay, 'o', label="Splay")
ax1.plot(file_sizes, m_bst, 'o', label="ABP")
ax1.set_title('Tempo gasto em relação ao tamanho do arquivo')
ax1.legend()
ax1.set_ylabel("Tempo em ms")
ax1.set_xlabel("Tamanho em KB")
ax2.set_title('Comparações em relação ao tamanho do arquivo')
ax2.plot(file_sizes, c_splay, label="Splay")
ax2.plot(file_sizes, c_bst, label="ABP")
ax2.set_ylabel("Número de comparações")
ax2.set_xlabel("Tamanho em KB")
plt.tight_layout()
! rm data.csv
! ./txtToMorse -t TabelaMorse.txt -i test-files/TheGodfather-MarioPuzo-Chapter1-English.txt -o saida.txt -s -c
! ./txtToMorse -t TabelaMorse.txt -i test-files/TheGodfather-MarioPuzo-Chapter1-English.txt -o saida.txt -c
! rm data.csv
for i in range(100):
convert("test-files/TheGodfather-MarioPuzo-Chapter1-English.txt")
df = read_csv()
for i in range(100):
convert("test-files/insect_adventures.txt")
df = read_csv()
for i in range(100):
convert("test-files/history_modern_philosophy.txt")
df = read_csv()
df_history = df.iloc[400:600]
for i in range(100):
convert("test-files/les_miserables.txt")
df = read_csv()
df_miserables = df.iloc[600:800]
create_splay_biased();
for i in range(100):
convert("./splay_biased.txt")
df = read_csv()
df_splay_biased = df.iloc[800:1000]
create_bst_biased();
for i in range(100):
convert("./bst_biased.txt")
df = read_csv()
df_bst_biased = df.iloc[1000:1200]
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def read_csv():
df = pd.read_csv("data.csv", header=None,
names=["filename", "tree", "total_time", "comparisons", "converted_chars", "tree_height"])
df["tree"] = df["tree"].replace(trees)
return df
def convert(filename):
! ./txtToMorse -t TabelaMorse.txt -i {filename} -o saida.txt -s -c
! ./txtToMorse -t TabelaMorse.txt -i {filename} -o saida.txt -c
def time_histogram(dataframe):
total_time = []
total_time.append(dataframe[dataframe['tree']=="Splay"]['total_time'].tolist())
total_time.append(dataframe[dataframe['tree']=="ABP"]['total_time'].tolist())
colors = ['red', 'blue']
labels = ['Splay', 'ABP']
plt.hist(total_time, 15, histtype='step', color=colors, label=labels)
plt.title('Frequência de tempo da execução')
plt.xlabel("Tempo em ms")
plt.ylabel("Frequência")
plt.legend()
plt.show()
def comparison_bar(values, file_size):
N = len(values)//2
labels = ['Splay', 'ABP']
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, values[0::2], width, color='red')
rects2 = ax.bar(ind + width, values[1::2], width, color='blue')
# add some text for labels, title and axes ticks
ax.set_title('Comparações durante execução')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(file_size)
ax.legend((rects1[0], rects2[0]), labels)
plt.show()
trees = {0: "ABP",
1: "Splay"}
import random
def create_splay_biased():
with open("./splay_biased.txt", 'w') as file:
for i in range(100000):
file.write("Y")
def create_bst_biased():
random.seed()
char = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890.,? '!/()&:;=-_\"$@"
with open("./bst_biased.txt", 'w') as file:
for i in range(100000):
file.write(char[random.randint(0, len(char) - 1)])
def lists_of_values(dataframe):
mean_splay = [0, 0, 0, 0]
mean_bst = [0, 0, 0, 0]
comparisons_splay = []
comparisons_bst = []
for i in range(800):
if (i % 2) is 0:
mean_splay[i//200] += dataframe["total_time"][i]
else:
mean_bst[i//200] += dataframe["total_time"][i]
for i in range(4):
mean_splay[i] = mean_splay[i] / 100
comparisons_splay.append(dataframe["comparisons"][i * 200])
mean_bst[i] = mean_bst[i] / 100
comparisons_bst.append(dataframe["comparisons"][(i * 200) + 1])
return mean_splay, mean_bst, comparisons_splay, comparisons_bst
###Output
_____no_output_____
###Markdown
EE5907/EE5027 Programming Assignment CA1> by: SUN Shuo A0162488U> > "You may just run the code blocks all the way till the end" Data Processing
###Code
%matplotlib inline
import scipy.io
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
# Load mat data
mat_data = scipy.io.loadmat('data/spamData.mat')
#print(mat_data)
x_train = mat_data['Xtrain']
y_train = mat_data['ytrain']
x_test = mat_data['Xtest']
y_test = mat_data['ytest']
#x_train = np.array([(1,0), (1,1), (0,0)]).reshape(-1,2)
#y_train = np.array([1, 1, 0]).reshape(-1,1)
#x_test = np.array([(1,0), (1,0)]).reshape(-1,2)
#y_test = np.array([1, 1]).reshape(-1,1)
# Check data shapes and types
print("X train", type(x_train), "shape:", x_train.shape, "dtype:", x_train.dtype)
print("y train", type(y_train), "shape:", y_train.shape, "dtype:", y_train.dtype)
print("X test", type(x_test), "shape:", x_test.shape, "dtype:", x_test.dtype)
print("y test", type(y_test), "shape:", y_test.shape, "dtype:", y_test.dtype)
# Binarization
x_train_bin = (x_train > 0) * 1
x_test_bin = (x_test > 0) * 1
#print(x_train_bin)
#print(x_test_bin)
# Log Transform
x_train_log = np.log(x_train + 0.1)
x_test_log = np.log(x_test + 0.1)
#print(x_train_log)
#print(x_test_log)
###Output
X train <class 'numpy.ndarray'> shape: (3065, 57) dtype: float64
y train <class 'numpy.ndarray'> shape: (3065, 1) dtype: uint8
X test <class 'numpy.ndarray'> shape: (1536, 57) dtype: float64
y test <class 'numpy.ndarray'> shape: (1536, 1) dtype: uint8
###Markdown
Q1. Beta-binomial Naive Bayes (24%)
###Code
def beta(N, N_1, a, b):
"""
Compute the Beta(`alpha`, `alpha`) distribution
"""
if (N + a + b) > 0:
return (N_1 + a)/(N + a + b)
else:
return 0
def computeFeatureLikelihood(X_train, Y_train, alpha):
"""
Compute the feature likelihood term on all training data
Class: `c`, Feature: `j`: p(x_test_j| x_i_j, y_test=c)
"""
eta = np.zeros((X_train.shape[1], 2))
for j in range(X_train.shape[1]):
X_train_j = X_train[:, j].reshape(-1, 1)
N_1 = (X_train_j[Y_train == 1] == 1).sum()
N_0 = (X_train_j[Y_train == 0] == 1).sum()
#print("N:", N, "N_1:", N_1)
eta[j,1] = beta((Y_train == 1).sum(), N_1, alpha, alpha)
eta[j,0] = beta((Y_train == 0).sum(), N_0, alpha, alpha)
return eta
def lookUpfeatureLikelihood(eta, j, x_j, c):
"""
Look up for the feature likelihood term for one (x_test, y_test=c) data point
Class: `c`, Feature: `j`: p(x_test_j| x_i_j, y_test=c)
"""
if x_j == 1:
return eta[j, c]
else:
return 1 - eta[j, c]
def posteriorPredictiveDistribution(X_test, i, c, eta):
"""
Compute the posterior predictive distribution of test feature
SUM of log(p(x_test_j | x_i_j, y_test=c))
"""
p_sum = 0
# For its j-th feature
for j in range(X_test.shape[1]):
p = lookUpfeatureLikelihood(eta, j, X_test[i][j], c)
if p > 0:
p_sum += np.log(p)
#print("Term(", i, ",", j, ") is:", p)
return p_sum
def betaBinomialNaiveBayes(X_train, Y_train, X_test, alpha):
"""
Fit a Beta Binomial Naive Bayes Classifier on the `X_train`, `Y_train` data,
and predict the results `Y_pred` with the given `alpha`
"""
# Class label prior lambda
lambda_ml = (Y_train == 1).sum() / Y_train.shape[0]
#print("lambda_ml:", lambda_ml)
eta = computeFeatureLikelihood(X_train, Y_train, alpha)
Y_pred = np.zeros((X_test.shape[0], 1), dtype=int)
# For the i-th test data
for i in range(Y_pred.shape[0]):
P_0 = np.log(1 - lambda_ml) + posteriorPredictiveDistribution(X_test, i, 0, eta)
P_1 = np.log(lambda_ml) + posteriorPredictiveDistribution(X_test, i, 1, eta)
#print(P_0)
#print(P_1)
if P_0 < P_1:
Y_pred[i][0] = 1
#print(Y_pred)
#print("y predict", type(Y_pred), "shape:", Y_pred.shape, "dtype:", Y_pred.dtype)
return Y_pred
def computeErrorRate(X_train, Y_train, X_test, Y_test, alpha):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`,
with a given alpha values
"""
Y_pred = betaBinomialNaiveBayes(X_train, Y_train, X_test, alpha)
num_error = (Y_pred != Y_test).sum()
return num_error/Y_test.shape[0]
def compareAlphas(X_train, Y_train, X_test, Y_test, alphas):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`,
with varying alpha values
"""
error_rates = np.zeros((alphas.shape[0], 1))
for i in tqdm(range(alphas.shape[0])):
error_rates[i] = computeErrorRate(X_train, Y_train, X_test, Y_test, alphas[i])
return error_rates
###Output
_____no_output_____
###Markdown
Compute and Plot Results
###Code
# Set experimenting alpha values
alphas = np.arange(0, 100.5, 0.5)
print("Plotting error rates on the training set:")
train_error_rates = compareAlphas(x_train_bin, y_train, x_train_bin, y_train, alphas)
print("Plotting error rates on the test set:")
test_error_rates = compareAlphas(x_train_bin, y_train, x_test_bin, y_test, alphas)
# Plotting
fig, ax = plt.subplots()
line1, = ax.plot(alphas, train_error_rates, label='training')
line2, = ax.plot(alphas, test_error_rates, dashes=[6, 2], label='test')
ax.set(xlabel='α', ylabel='error rate', title='Q1. Beta-binomial Naive Bayes')
ax.legend()
ax.grid()
fig.savefig("pics/q1.png")
plt.show()
# Print some results
print("On the training set, the error rates for α = 1, 10, 100 are respectively:",
train_error_rates[2], ",", train_error_rates[20], ",", train_error_rates[-1])
print("On the test set, the error rates for α = 1, 10, 100 are respectively:",
test_error_rates[2], ",", test_error_rates[20], ",", test_error_rates[-1])
###Output
Plotting error rates on the training set:
###Markdown
Q2. Gaussian Naive Bayes (24%)
###Code
def gaussian(x, mu, sigma_sq):
"""
Compute the gaussian(`mu`, `sigma_sq`) distribution of `x`
"""
if sigma_sq > 0:
return 1/np.sqrt(2*np.pi*sigma_sq) * np.exp(-0.5*np.power((x - mu), 2)/sigma_sq)
else:
return 0
def paramMLEstimate(X_train, Y_train, c):
"""
Compute the ML estimate of `mean` and `var` for each feature
"""
row_idxs = []
for i in range(Y_train.shape[0]):
if Y_train[i][0] == c:
row_idxs.append(i)
X_train_c = X_train[np.array(row_idxs), :]
#print("X_train_c:", X_train_c.shape)
mean = np.mean(X_train_c, axis=0)
var = np.var(X_train_c, axis=0)
#print("Mean:", mean[0], "shape:", mean.shape)
#print("Var:", var[0], "shape:", var.shape)
return mean, var
def featureLikelihood(x_j, mu, sigma_sq):
"""
Compute the feature likelihood term for one (x_test, y_test=c) data point
Class: `c`, Feature: `j`: p(x_test_j| x_i_j, y_test=c)
"""
return gaussian(x_j, mu, sigma_sq)
def sumFeatureLikelihood(X_test, Means, Vars, i, c):
"""
Compute the sum of test feature likelihood:
SUM(log(p(x_test_j | x_i_j, y_test=c)))
"""
p_sum = 0
for j in range(X_test.shape[1]):
p_sum += np.log(featureLikelihood(X_test[i][j], Means[c][j], Vars[c][j]))
return p_sum
def GaussianNaiveBayes(X_train, Y_train, X_test):
"""
Fit a Beta Binomial Naive Bayes Classifier on the `X_train`, `Y_train` data,
and predict the results `Y_pred` with the given `alpha`
"""
# Class label prior lambda
lambda_ml = (Y_train == 1).sum() / Y_train.shape[0]
#print("lambda_ml:", lambda_ml)
Means = np.zeros((2, X_train.shape[1]))
Vars = np.zeros((2, X_train.shape[1]))
for i in range(2):
Means[i], Vars[i] = paramMLEstimate(X_train, Y_train, i)
#print("Means:", Means, "shape:", Means.shape)
#print("Vars:", Vars, "shape:", Vars.shape)
Y_pred = np.zeros((X_test.shape[0], 1), dtype=int)
# For the i-th test data
for i in range(Y_pred.shape[0]):
P_0 = np.log(1 - lambda_ml) + sumFeatureLikelihood(X_test, Means, Vars, i, 0)
P_1 = np.log(lambda_ml) + sumFeatureLikelihood(X_test, Means, Vars, i, 1)
#print(P_0)
#print(P_1)
if P_0 < P_1:
Y_pred[i][0] = 1
#print(Y_pred)
#print("y predict", type(Y_pred), "shape:", Y_pred.shape, "dtype:", Y_pred.dtype)
return Y_pred
def computeErrorRate(X_train, Y_train, X_test, Y_test):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`
"""
Y_pred = GaussianNaiveBayes(X_train, Y_train, X_test)
num_error = (Y_pred != Y_test).sum()
return num_error/Y_test.shape[0]
###Output
_____no_output_____
###Markdown
Compute Results
###Code
# Compute the train and test error rate
train_error_rate = computeErrorRate(x_train_log, y_train, x_train_log, y_train)
test_error_rate = computeErrorRate(x_train_log, y_train, x_test_log, y_test)
# Print some results
print("On the training set, the error rate is:", train_error_rate)
print("On the test set, the error rate is:", test_error_rate)
###Output
/var/folders/fg/jctl91s50mlb_ynfb3z_7xx80000gn/T/ipykernel_35686/4275886289.py:42: RuntimeWarning: divide by zero encountered in log
p_sum += np.log(featureLikelihood(X_test[i][j], Means[c][j], Vars[c][j]))
###Markdown
Q3. Logistic regression (24%)
###Code
def sigmoid(x):
"""
Compute the sigmoid(`x`)
"""
return 1/(1 + np.exp(-x))
def mu(w, X):
"""
Compute the sigmoid(`-w^Tx`) for N feature vectors
"""
mu_ = []
for i in range(X.shape[0]):
mu_.append(sigmoid(np.transpose(w).dot(X[i])))
return np.array(mu_)
def NLL(w, X, Y):
"""
Compute the Negative Log Likelihood, NLL(`w`)
"""
nll_sum = 0
mu_ = mu(w, X)
for i in range(Y.shape[0]):
nll_sum += Y[i]*np.log(mu[i]) + (1 - Y[i])*np.log(1 - mu[i])
return -nll_sum
def NLLReg(w_, X_, Y, _lambda):
"""
Compute the Negative Log Likelihood with l2 regularization, NLL_reg(`w_`)
"""
return NLL(w_, X_, Y) + 0.5*_lambda*np.transpose(w_).dot(w_)
def g(mu_, X_, Y):
"""
Compute NLL's first derivatives `g`
"""
return np.transpose(X_).dot(mu_ - Y)
def hessian(mu_, X_):
"""
Compute NLL's second derivatives, the Hessian Matrix `H`
"""
for i in range(mu_.shape[0]):
mu_[i] = mu_[i]*(1 - mu_[i])
S = np.diag(mu_.reshape(mu_.shape[0]))
return np.transpose(X_).dot(S).dot(X_)
def newtonsMethod(w, X, Y, k_max, _lambda, eta, tolerance):
"""
Optimization with Newton's Method
"""
w_ = np.insert(w, 0, 0.0, axis=0)
X_ = np.insert(X, 0, 1.0, axis=1)
w_curr = w_
diag_v = np.ones(w_.shape[0])
diag_v[0] = 0.0
for i in range(k_max):
v = np.copy(w_curr)
v[0] = 0.0
mu_ = mu(w_curr, X_)
g_reg = g(mu_, X_, Y) + _lambda*v
H_reg = hessian(mu_, X_) + _lambda*np.diag(diag_v)
d = np.linalg.inv(H_reg).dot(-g_reg)
if np.average((d/w_curr)) < tolerance:
# print("Optimization finished at iteration:", i)
break
else:
w_next = w_curr + eta*d
w_curr = w_next
return w_next
def logisticRegressionPredict(w_, X_test):
"""
Predict the results `Y_pred` based on the given weights `w_`
"""
X_ = np.insert(X_test, 0, 1.0, axis=1)
# Start Prediction
mu_test = mu(w_, X_)
Y_pred = np.zeros((X_test.shape[0], 1), dtype=int)
for i in range(mu_test.shape[0]):
P_1 = np.log(mu_test[i])
P_0 = np.log(1 - mu_test[i])
if P_1 > P_0:
Y_pred[i][0] = 1
return Y_pred
def computeErrorRate(w_, X_test, Y_test):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`,
with a given alpha values
"""
Y_pred = logisticRegressionPredict(w_, X_test)
num_error = (Y_pred != Y_test).sum()
return num_error/Y_test.shape[0]
def compareLambdas(X_train, Y_train, X_test, Y_test, lambdas, lr, tol, k_max):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`,
with varying lambda values
"""
train_error_rates = np.zeros((lambdas.shape[0], 1))
test_error_rates = np.zeros((lambdas.shape[0], 1))
for i in tqdm(range(lambdas.shape[0])):
# Start Training
w = np.zeros((x_train_log.shape[1], 1))
w = newtonsMethod(w, x_train_log, y_train, k_max, lambdas[i], lr, tolerance)
# Start Prediction
train_error_rates[i] = computeErrorRate(w, X_train, Y_train)
test_error_rates[i] = computeErrorRate(w, X_test, Y_test)
return train_error_rates, test_error_rates
###Output
_____no_output_____
###Markdown
Train, Predict and Plot
###Code
# Hyperparameters
lr = 0.01
tol = 1e-6
k_max = 1000
# Set experimenting lambda values
lambdas = np.concatenate((np.arange(1, 10, 1), np.arange(10, 105, 5)), axis=None)
print(lambdas)
print("Plotting error rates...")
train_error_rates, test_error_rates = compareLambdas(x_train_log, y_train, x_test_log, y_test, lambdas, lr, tol, k_max)
# Plotting
fig, ax = plt.subplots()
line1, = ax.plot(lambdas, train_error_rates, label='training')
line2, = ax.plot(lambdas, test_error_rates, dashes=[6, 2], label='test')
ax.set(xlabel='λ', ylabel='error rate', title='Q3. Logistic Regression')
ax.legend()
ax.grid()
fig.savefig("pics/q3.png")
plt.show()
# Print some results
print("On the training set, the error rates for λ = 1, 10, 100 are respectively:",
train_error_rates[0], ",", train_error_rates[9], ",", train_error_rates[-1])
print("On the test set, the error rates for λ = 1, 10, 100 are respectively:",
test_error_rates[0], ",", test_error_rates[9], ",", test_error_rates[-1])
###Output
[ 1 2 3 4 5 6 7 8 9 10 15 20 25 30 35 40 45 50
55 60 65 70 75 80 85 90 95 100]
Plotting error rates...
###Markdown
Q4. K-Nearest Neighbors (24%)
###Code
def euclideaniDist(a, b):
"""
Compute the euclidean distance between two feature vectors
"""
dist = 0
for j in range(a.shape[0]):
dist += np.power((a[j] - b[j]), 2)
return np.sqrt(dist)
def sortNN(X_train, Y_train, x_test):
"""
sort the training data based on the euclidean distance to the test data `x_test`
"""
dists = []
for i in range(X_train.shape[0]):
dists.append(euclideaniDist(X_train[i], x_test))
idxs = np.argsort(np.array(dists))
return idxs
def KNNPredict(X_train, Y_train, X_test, Ks):
"""
Predict class labels for `X_test` using `K` Nearest Neighbors
"""
Y_pred = np.zeros((X_test.shape[0], Ks.shape[0]), dtype=int)
# loop through all test data
for i in tqdm(range(X_test.shape[0])):
idxs = sortNN(X_train, Y_train, X_test[i])
# for each K value, estimate the resulted class labels
for k in range(Ks.shape[0]):
k_1 = (Y_train[idxs[:Ks[k]]] == 1).sum()
P_1 = k_1/Ks[k]
if P_1 > 0.5:
Y_pred[i][k] = 1
return Y_pred
def computeErrorRate(X_train, Y_train, X_test, Y_test, Ks):
"""
Compute the Error Rate based on the `Y_pred` result and the given ground truth `Y_test`,
with a given K value
"""
error_rates = np.zeros((Ks.shape[0], 1))
Y_pred = KNNPredict(X_train, Y_train, X_test, Ks)
for k in tqdm(range(Ks.shape[0])):
y_pred = Y_pred[:, k].reshape(-1, 1)
num_error = (y_pred != Y_test).sum()
#print("y_pred", y_pred)
error_rates[k] = num_error/Y_test.shape[0]
return error_rates
###Output
_____no_output_____
###Markdown
Predict and Plot Results
###Code
# Set experimenting lambda values
Ks = np.concatenate((np.arange(1, 10, 1), np.arange(10, 105, 5)), axis=None)
print("K values:", Ks)
print("Plotting error rates...")
train_error_rates = computeErrorRate(x_train_log, y_train, x_train_log, y_train, Ks)
test_error_rates = computeErrorRate(x_train_log, y_train, x_test_log, y_test, Ks)
# Plotting
fig, ax = plt.subplots()
line1, = ax.plot(Ks, train_error_rates, label='training')
line2, = ax.plot(Ks, test_error_rates, dashes=[6, 2], label='test')
ax.set(xlabel='K', ylabel='error rate', title='Q4. K-Nearest Neighbors')
ax.legend()
ax.grid()
fig.savefig("pics/q4.png")
plt.show()
# Print some results
print("On the training set, the error rates for K = 1, 10, 100 are respectively:",
train_error_rates[0], ",", train_error_rates[9], ",", train_error_rates[-1])
print("On the test set, the error rates for K = 1, 10, 100 are respectively:",
test_error_rates[0], ",", test_error_rates[9], ",", test_error_rates[-1])
###Output
K values: [ 1 2 3 4 5 6 7 8 9 10 15 20 25 30 35 40 45 50
55 60 65 70 75 80 85 90 95 100]
Plotting error rates...
###Markdown
Normalize columnsFirst, let's normalize all of the statistics in the dataset.We want to make the range of all variables 0-100. It'll make it easier to do a radar plot.First, we'll look at the distribution of one metric.
###Code
layer_scores['layer4-alexa-rankings'].plot.hist()
###Output
_____no_output_____
###Markdown
When we normalize it, we should see the same distribution.
###Code
def normalize (df: pd.DataFrame) -> pd.DataFrame:
'''Convert to values 0 to 1.'''
return (df - df.min()) / (df.max() - df.min())
normalize(layer_scores['layer4-alexa-rankings']).plot.hist()
###Output
_____no_output_____
###Markdown
One more issue here is that network interference events are power-law distributed.
###Code
normalize(layer_scores['layer3-network-interference-rate']).plot.hist()
###Output
_____no_output_____
###Markdown
We have the same problem with data laws
###Code
normalize(layer_scores['layer5-discrete-categories-data-laws']).plot.hist()
###Output
_____no_output_____
###Markdown
We also want to "flip" IPv6 adoption, such that higher IPv6 signals lower fragmentation.
###Code
def flip (df):
return 1 - df
flip(normalize(layer_scores['layer2-ipv6-adoption'])).plot.hist()
###Output
_____no_output_____
###Markdown
Simplify datasetLet's perform this mapping for all of our metrics to produce a simplified dataset.
###Code
data = pd.DataFrame({
'Country': layer_scores['Country'],
'Alpha-2 code': layer_scores['Alpha-2 code'],
'layer2 (ipv6)': flip(normalize(layer_scores['layer2-ipv6-adoption'])),
'layer3 (network interference)': normalize(layer_scores['layer3-network-interference-rate']).fillna(value=0),
# 'layer3 (network interference)': normalize(layer_scores['layer3-network-interference-rate']).fillna(value=0),
'layer4 (popular website locality)': normalize(layer_scores['layer4-alexa-rankings']),
'layer5 (data laws)': normalize(layer_scores['layer5-discrete-categories-data-laws']),
})
data.head()
###Output
_____no_output_____
###Markdown
Produce radar plotsNow we'll want to produce radar plots for each country (or for sets of countries).
###Code
def find_country (alpha2: str) -> pd.DataFrame:
return data[data['Alpha-2 code']==alpha2]
find_country('CN')
# number of variable
def radar_plot (df, filename=None):
non_numeric_cols = ['Country','Alpha-2 code']
categories=list(df.drop(columns=non_numeric_cols))
N = len(categories)
# # We are going to plot the first line of the data frame.
# # But we need to repeat the first value to close the circular graph:
# # values=df.loc[0].drop(columns=['Country','Alpha-2 code']).values.flatten().tolist()
# values= df.values.flatten().tolist()
# values += values[:1]
# # values
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
ax = plt.subplot(111, polar=True)
# If you want the first axis to be on top:
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], categories, color='grey', size=8)
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([10,20,30], ["10","20","30"], color="grey", size=7)
plt.ylim(0,1)
# # Plot data
# ax.plot(angles, values, linewidth=1, linestyle='solid')
# # Fill area
# ax.fill(angles, values, 'b', alpha=0.1)
for i in range(len(df)):
values=df.iloc[i].drop(non_numeric_cols).values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, linewidth=1, linestyle='solid', label=df.iloc[i]['Country'])
ax.fill(angles, values, 'b', alpha=0.05)
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
if filename:
plt.savefig(filename,
dpi=300,
bbox_inches='tight')
# radar_plot(
# find_country('DK')\
# .append(find_country('SE'))\
# .append(find_country('NO'))
# )
###Output
_____no_output_____
###Markdown
FindingsMaking observations about 'profiles' groups of countries. The ScandinavainsLet's start with the Scandinavians (Denmark, Norway and Sweden). These countriesare very similar to one another in a variety of ways. Norway stands out as beingoutside the EU, and exceptionally wealthy. However, the three share culture,basic systems of governance, and arguably a single language. [fn:4]Immediately, Norway stands out as having higher IPv6 adoption, and no laws aboutcross-border data flow. Denmark also has much less "fragmentation" (locality) atthe content layer than Sweden.
###Code
radar_plot(
find_country('DK')\
.append(find_country('SE'))\
.append(find_country('NO'))
, filename="writing/figures/scandinavians.png"
)
###Output
_____no_output_____
###Markdown
Immediately, Norway stands out as having higher IPv6 adoption, and no laws aboutcross-border data flow. Denmark also has much less "fragmentation" (locality) atthe content layer than Sweden. Five eyes
###Code
radar_plot(
find_country('US')\
.append(find_country('GB'))\
# .append(find_country('CA'))\
# .append(find_country('AU'))\
.append(find_country('NZ'))\
)
###Output
_____no_output_____
###Markdown
Let's start by looking at United States, United Kingdom, and New Zealand. The UKand New Zealand are similar, but the United States has very little networkinterference, and more fragmentation on layers 2 and 3.
###Code
radar_plot(
find_country('US')\
.append(find_country('GB'))\
.append(find_country('NZ'))\
.append(find_country('CA'))\
.append(find_country('AU'))\
, filename="writing/figures/five-eyes-individual.png"
)
###Output
_____no_output_____
###Markdown
Now let's add in Canada and Australia. These two countries have laws aboutcross-border data flow, and Australia's network interference is near zero.Other than that, these countries are all similar to one another, except for theUnited States. The United States has lower transport-layer fragmentation andhigher network- and content-layer than the rest of the pack. Regardless, let's look at an "average" of the 5-eyes countries.
###Code
def create_block (
block_name: str,
alpha2s: list,
) -> pd.DataFrame:
'''Produces a mean of all countries in a block.'''
block = find_country(alpha2s[0])
for alpha2 in alpha2s[1:]:
# print(find_country(alpha2))
block = block.append(find_country(alpha2))
block = block.mean().to_frame().T
block['Alpha-2 code'] = ''
block['Country'] = block_name
return block
# five_eyes=\
five_eyes = create_block('Five eyes', [
'US', 'GB', 'NZ', 'CA', 'AU'
])
five_eyes
radar_plot(five_eyes)
###Output
_____no_output_____
###Markdown
How do five-eyes compare with the G7?
###Code
g7 = create_block('G7', [
'CA', 'FR', 'DE', 'IT', 'JP', 'US', 'GB'
])
radar_plot(g7.append(five_eyes))
###Output
_____no_output_____
###Markdown
G7 and five eyes countries are roughly similar, those G7 has higher fragmentation across the board. China, belt-and-roadThat was all warmup. Let's look at China.
###Code
belt_and_road = create_block('Belt and road', [
# asia
'CN', # china
'LA', # laos
'ID', # indonesia
'MN', # mongolia
# beyond asia
'PK', # pakistan
'DJ', # djbouti
'AR', # argentina
'SD', # sudan
'JM', # jamaica
])
radar_plot(belt_and_road.append(five_eyes), filename='writing/figures/belt-and-road-vs-fiveeyes.png')
radar_plot(
belt_and_road\
.append(find_country('CN'))
, filename='writing/figures/china-vs-belt-and-road.png'
)
###Output
/home/ffff/.local/lib/python3.6/site-packages/pandas/core/frame.py:6211: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silence the warning, pass 'sort=True'.
sort=sort)
###Markdown
I'm showing China here with Five Eyes, for comparison.We can see a qualitatively different profile here. There's more network interference,but the real stand-out is content-layer fragmentation. This fragmentation isextremely high, probably the result of both censorship and language/culture.As a sanity check, how does China compare with its East Asian neighbors?
###Code
radar_plot(
find_country('CN')\
.append(find_country('KR').iloc[1])\
.append(find_country('JP'))\
)
###Output
_____no_output_____
###Markdown
Overall, it's US-allied neighbors have slightly lower content layerfragmentation, and higher IPv6 uptake.Now, what happens when we average China with all of the belt-and-road countries?*Note*: there is no authoritative list of the belt-and-road countries, as many Western and South American countries have signed various belt-and-road-related treaties. I'm including a highly (let's call it) "intuitive" list of developing Asian and African countries who have developed diplomatic relationships with China and/or accepted loans for large infrastructure projects, without major outcry (i.e., excluding Malaysia). Other "high-touch" countries
###Code
islamic_theocracies = create_block('Gulf states with Islamic theocracies', [
'SA', # saudi arabia
'AE', # UAE
'KW', # kuwait
'BH', # bahrain
'IR', # iran
])
radar_plot(
belt_and_road\
.append(islamic_theocracies)\
.append(five_eyes)\
, filename='writing/figures/three-bloc.png'
)
###Output
_____no_output_____
###Markdown
'Best buddies' Europe - similar and diffferent
###Code
radar_plot(
find_country('DE')\
.append(find_country('FR'))\
)
radar_plot(
find_country('UA')\
.append(find_country('BY'))\
)
layer_scores.sort_values(by='layer3-network-interference-rate', ascending=False)
radar_plot(
find_country('NO')\
.append(islamic_theocracies)\
, filename='writing/figures/no-vs-islamic.png'
)
radar_plot(
find_country('SE')\
.append(find_country('DK'))\
)
###Output
_____no_output_____
###Markdown
South and Southeast Asia
###Code
radar_plot(
find_country('IN')\
.append(find_country('MY'))\
)
radar_plot(
find_country('IN')\
.append(find_country('BD'))\
, filename='writing/figures/in-bd.png'
)
radar_plot(
find_country('BR')\
.append(find_country('MX'))\
)
find_country('BD')
###Output
_____no_output_____
###Markdown
South America
###Code
radar_plot(
find_country('BR')\
.append(find_country('PY'))\
.append(find_country('AR'))\
)
###Output
_____no_output_____
###Markdown
Caribbean
###Code
radar_plot(
find_country('JM')\
.append(find_country('BS'))\
.append(find_country('CU'))\
.append(find_country('VG'))\
)
###Output
/home/ffff/.local/lib/python3.6/site-packages/matplotlib/projections/polar.py:63: RuntimeWarning: invalid value encountered in less
mask = r < 0
###Markdown
A more complex picture
###Code
radar_plot(
find_country('BH')\
.append(find_country('CN'))\
.append(find_country('US'))\
# .append(find_country('DK'))\
.append(find_country('DE'))\
, filename='writing/figures/us-cn-bh-de.png'
)
###Output
_____no_output_____
###Markdown
Analysis of the DLR Knowledge Exchange Workshop Series on Software EngineeringThe following Jupyter notebook gives an overview about the five different workshops and participants since 2014. First, we show a basic overview about the workshop and the participant data. Then, we identify the two main groups and check their attendance behavior for every workshop. Finally, we consider how many participants attended the next workshop. Basic overview Workshop dataThe following data set contains the basic information about every workshop including its main topic, number of participants, date, location, and the number of employees currently working at the workshop location. In addition, we calculate the total number of workshop series participants and the average number of participants.
###Code
import pandas as pd
workshops = pd.read_csv("data/workshops.csv", index_col="id")
total_num_participants = workshops.num_participants.sum()
average_num_participants = total_num_participants / len(workshops)
print("Total number of participants:", total_num_participants)
print("Average number of participants:", average_num_participants)
workshops.head(6)
###Output
Total number of participants: 320
Average number of participants: 53.333333333333336
###Markdown
Participant dataThe participant data has been pre-processed as follows:- The basis formed the registration lists which have been further cleaned up by removing duplicates and double-checking them with the available attendance lists.- The data has been anonymized by removing the participants names.- Specific helper fields have been calculated to support the later analysis.The resulting data set only contains the unique participants of the workshop series. I.e., every entry represents an unique particpant and indicates the total number of workshops visited, the specific workshops visisted, if the participant still works for the DLR and whether we consider the particpant a non-regular visitor. The last field (non-regular) indicates whether the participant visited more than one workshop but skipped more than one workshop in a row while still working at DLR. The field is later used to differentiate the core participant group.
###Code
participants = pd.read_csv("data/participants.csv", index_col="id")
total_unique_participants = len(participants)
print("Total number of unique participants:", total_unique_participants)
participants.head(10)
###Output
Total number of unique participants: 223
###Markdown
Location DataThe location data for each workshop lists for each particpant it's location of origin as well as if this was the only workshop he or she attended. The data has been anonymized by removing the participants names. The IDs used are not related to each other or the IDs of the participant data set.
###Code
locations_ws1 = pd.read_csv("data/ws1_location.csv", index_col="id")
locations_ws2 = pd.read_csv("data/ws2_location.csv", index_col="id")
locations_ws3 = pd.read_csv("data/ws3_location.csv", index_col="id")
locations_ws4 = pd.read_csv("data/ws4_location.csv", index_col="id")
locations_ws5 = pd.read_csv("data/ws5_location.csv", index_col="id")
locations_ws6 = pd.read_csv("data/ws6_location.csv", index_col="id")
locations = pd.concat([locations_ws1, locations_ws2, locations_ws3, locations_ws4, locations_ws5, locations_ws6], keys=['BS', 'KP', 'OP', 'BA', 'HB', 'JE'])
# Amount of unique locations from which people attended
unique_locations = locations["location"].drop_duplicates().count()
print ("Unique locations from which people attended the workshop series:", unique_locations)
locations.head(10)
###Output
Unique locations from which people attended the workshop series: 16
###Markdown
Analysis of the attendance behavior Definition of the core group and the group of non-regular visitorsWe consider participants that continually attend the workshops as part of the core group. We include participants into this group, if:- they attended more than one workshop and- did not skip more than one workshop in a row while still working at DLR.Otherwise we consider them as non-regular workshop visitors.
###Code
participants_more_one_workshop = participants[participants.num_workshops_visited > 1]
num_participants_more_one_workshop = len(participants_more_one_workshop)
core_group = participants_more_one_workshop[participants_more_one_workshop.non_regular == False] # See definition of non_regular
num_core_group = len(core_group)
num_core_group_still_there = len(core_group[core_group.currently_works_for_DLR == True])
non_regular_group = participants_more_one_workshop[participants_more_one_workshop.non_regular == True] # See definition of non_regular
num_non_regular_group = len(non_regular_group)
num_non_regular_group_still_there = len(non_regular_group[non_regular_group.currently_works_for_DLR == True])
print("Number of participants visiting more than one workshop:", num_participants_more_one_workshop)
print("Number of core group members:", num_core_group)
print("Number of core group members still working at DLR:", num_core_group_still_there)
print("Number of non-regular visitors:", num_non_regular_group)
print("Number of non-regular visitors still working at DLR:", num_non_regular_group_still_there)
###Output
Number of participants visiting more than one workshop: 49
Number of core group members: 33
Number of core group members still working at DLR: 25
Number of non-regular visitors: 16
Number of non-regular visitors still working at DLR: 15
###Markdown
Definition of the group of one-time participantsOne-time participants are participants attending only one workshop.
###Code
one_time_participants = participants[participants.num_workshops_visited == 1]
num_one_time_participants = len(one_time_participants)
print("Number of one-time participants:", num_one_time_participants)
###Output
Number of one-time participants: 174
###Markdown
Trend of the attendance rates of the two main groupsIn the following, we calculate for every group the attendance rate for every workshop. I.e., we want to find out how many core group members and how many one-time participants attended every workshop.
###Code
# Define attendance data and calculates corresponding attendance rates
attendance_data = {
"workshop": [1, 2, 3, 4, 5, 6],
"num_participants": workshops.num_participants.values,
"num_core_group": [
core_group["1"].sum(),
core_group["2"].sum(),
core_group["3"].sum(),
core_group["4"].sum(),
core_group["5"].sum(),
core_group["6"].sum()],
"num_one_time_participants": [
one_time_participants["1"].sum(),
one_time_participants["2"].sum(),
one_time_participants["3"].sum(),
one_time_participants["4"].sum(),
one_time_participants["5"].sum(),
one_time_participants["6"].sum()],
"num_non_regular_participants": [
non_regular_group["1"].sum(),
non_regular_group["2"].sum(),
non_regular_group["3"].sum(),
non_regular_group["4"].sum(),
non_regular_group["5"].sum(),
non_regular_group["6"].sum()]
}
attendance_data = pd.DataFrame(attendance_data)
attendance_data["rate_core_to_num_participants"] = attendance_data["num_core_group"] / attendance_data["num_participants"] * 100
attendance_data["rate_one_time_to_num_participants"] = attendance_data["num_one_time_participants"] / attendance_data["num_participants"] * 100
attendance_data["rate_non_regular_participants_to_num_participants"] = attendance_data["num_non_regular_participants"] / attendance_data["num_participants"] * 100
attendance_data = attendance_data.set_index("workshop")
# Calculate the average attendance rate for every group
average_attendance_rate_core = attendance_data["rate_core_to_num_participants"].sum() / len(workshops)
average_attendance_rate_one_time = attendance_data["rate_one_time_to_num_participants"].sum() / len(workshops)
average_attendance_rate_non_regular = attendance_data["rate_non_regular_participants_to_num_participants"].sum() / len(workshops)
print("Average attendance rate of the core group:", average_attendance_rate_core)
print("Average attendance rate of one-time participants:", average_attendance_rate_one_time)
print("Average attendance rate of non-regular participants:", average_attendance_rate_non_regular)
attendance_data.head(6)
# Plot rates trend
%matplotlib inline
attendance_rate_data = attendance_data.drop(
columns=["num_core_group", "num_one_time_participants", "num_non_regular_participants", "num_participants", "rate_non_regular_participants_to_num_participants"])
ax = attendance_rate_data.plot.line()
# Adjust x and y axis as well as the legend
ax.set_xbound(0.5, 5.5)
ax.set_xticks([1, 2, 3, 4, 5, 6, 7])
ax.set_xlabel("Workshop")
ax.set_ybound(15, 70)
ax.set_ylabel("Attendance Rate [%]")
ax.legend(["Core group", "One-time participants"], loc="upper left")
# Print values
for index, value in enumerate(attendance_rate_data["rate_core_to_num_participants"]):
if index < len(attendance_rate_data) - 2:
ax.text(index + 1, value, str(round(value, 2)) + "%")
else:
ax.text(index + 1, value, str(round(value, 2)) + "%*")
for index, value in enumerate(attendance_rate_data["rate_one_time_to_num_participants"]):
if index < len(attendance_rate_data) - 2:
ax.text(index + 1, value, str(round(value, 2)) + "%")
else:
ax.text(index + 1, value, str(round(value, 2)) + "%*")
###Output
_____no_output_____
###Markdown
\* The data points of the indicated workshops are usually subject to a larger change because:* From the current point of time it is unclear whether the identified one-time participants of the last workshop will attend the next workshop and become members of the core group or not.* It is unclear whether the trend of new core group members joined at the fifth workshop will last. How many participants attended the next workshop as well?
###Code
# Define the attended next data
attended_next_data = {
"workshop": [1, 2, 3, 4, 5],
"num_attended_next": [
len(participants[(participants["1"] == True) & (participants["2"] == True)]),
len(participants[(participants["2"] == True) & (participants["3"] == True)]),
len(participants[(participants["3"] == True) & (participants["4"] == True)]),
len(participants[(participants["4"] == True) & (participants["5"] == True)]),
len(participants[(participants["5"] == True) & (participants["6"] == True)])
],
"num_participants": workshops.num_participants.values[:-1]
}
attended_next_data = pd.DataFrame(attended_next_data)
attended_next_data["attended_next_rate"] = attended_next_data["num_attended_next"] / attended_next_data["num_participants"] * 100
attended_next_data = attended_next_data.set_index("workshop")
attended_next_data.head(6)
# Plot the attended next data
attended_next_plot = attended_next_data.drop(columns=["attended_next_rate"])
attended_next_plot = attended_next_plot[["num_participants", "num_attended_next"]] # Ensure the right column order
ax = attended_next_plot.plot.bar(figsize=(9, 5))
ax.set_xlabel("Workshop")
ax.set_ylabel("Number of participants")
ax.set_ybound(0, 70)
ax.legend(["Total number of participants", "Number of participants attending the next workshop"])
# Print attended next rate values
num_pairs = 5
attended_next_rates = ["{}%".format(int(value)) for value in attended_next_data.attended_next_rate.values]
make_pairs = zip(*[ax.get_children()[:num_pairs], ax.get_children()[num_pairs:num_pairs*2]])
for index, (left, right) in enumerate(make_pairs):
ax.text(index + 0.15, min(left.get_bbox().y1, right.get_bbox().y1) + 1, attended_next_rates[index], horizontalalignment="center")
###Output
_____no_output_____
###Markdown
Analysis of the attendance origin locationIn the following we analyse the origin locations of the workshop participants and how it influences their attendance. Where do the participants of the workshop series origin from?
###Code
# Count the amount of participants of the workshop series per location
location_distribution = locations.location.value_counts()
location_distribution_filt = location_distribution[location_distribution >= 5]
location_distribution_filt
# Plot the locatio dictribution data
ax = location_distribution_filt.plot(kind='bar', figsize=(10,7),
fontsize=13);
ax.set_xlabel("Locations")
ax.set_ylabel("Number of Participants");
# set individual bar lables using above list
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
ax.text(i.get_x()+0.1, i.get_height()+1, \
str(i.get_height()))
###Output
_____no_output_____
###Markdown
Do more people attend a workshop if it is run at their home location?
###Code
# From which locations are the participants for each workshop?
location_distribution_per_ws = pd.DataFrame({"1: BS": locations_ws1.location.value_counts(),
"2: KP": locations_ws2.location.value_counts(),
"3: OP": locations_ws3.location.value_counts(),
"4: BA": locations_ws4.location.value_counts(),
"5: HB": locations_ws5.location.value_counts(),
"6: JE": locations_ws6.location.value_counts()})
# Filter chart down to locations where a workshop took place
location_distribution_per_ws_filtered = location_distribution_per_ws.loc[['BS', 'KP', 'OP', 'BA', 'HB', 'JE']]
# Create a heatmap of the location-scpecific distribution of participant data
# X axis: location names of the workshops ordered chronologically
# Y axis: number of participants from the indicated locations
import matplotlib.pyplot as plt
import seaborn
fig, ax = plt.subplots(figsize=(8, 6))
seaborn.heatmap(location_distribution_per_ws_filtered, cmap="Greens", annot=True)
###Output
_____no_output_____
###Markdown
Some effects and (possible) explanations:- Determinate shows the of participants of a location when the workshop took place there. Usually, the largest participant group comes from the location at which the workshop takes place. Exception: HB.- BS row: 11 => Topic "Embedded Systems" was a driver from some specific departments to more or less jointly attend the workshop.- BS row: columns "1:BS" (27) and "2:KP" (14) => Two BS institutes have been involved in the initial workshop series setup. Thus, there are quite many BS participants for BS/KP.- JE row: No one from JE attended the first 4 workshops. Reason: The JE location did not exist at this time.
###Code
# Plot the same data as bar chart
ax = location_distribution_per_ws_filtered.plot(kind='bar', figsize=(10,7), fontsize=13)
ax.set_xlabel("Origin location of participants")
ax.set_ylabel("Number of participants")
###Output
_____no_output_____
###Markdown
How many more people attend a workshop if it is run at their location?Netx, we calculate the factor of which the number of participants increase when the workshop is run at the home location in comparison to the participation in external workshops.
###Code
def calc_factor(location):
local_ws = location_distribution_per_ws.loc[location].max() # Max can be used since all local workshops were visited by the most participants in comparison to external workshops
external_ws = location_distribution_per_ws.loc[location].sum() - local_ws
external_ws_avg = (external_ws / 5)
diff = local_ws - external_ws_avg
factor = local_ws / external_ws_avg
print ("The workshop in", location, "was visited by", diff, "more participants from", location, "than an average external one.")
print ("The workshop in", location, "was visited by", factor, "times more participants from", location, "than an average external one.\n")
for location in locations.index.levels[0]:
calc_factor(location)
###Output
The workshop in BS was visited by 17.6 more participants from BS than an average external one.
The workshop in BS was visited by 2.872340425531915 times more participants from BS than an average external one.
The workshop in KP was visited by 15.4 more participants from KP than an average external one.
The workshop in KP was visited by 10.625 times more participants from KP than an average external one.
The workshop in OP was visited by 24.6 more participants from OP than an average external one.
The workshop in OP was visited by 3.6170212765957444 times more participants from OP than an average external one.
The workshop in BA was visited by 10.6 more participants from BA than an average external one.
The workshop in BA was visited by 2.962962962962963 times more participants from BA than an average external one.
The workshop in HB was visited by 6.2 more participants from HB than an average external one.
The workshop in HB was visited by 8.75 times more participants from HB than an average external one.
The workshop in JE was visited by 13.4 more participants from JE than an average external one.
The workshop in JE was visited by 23.333333333333336 times more participants from JE than an average external one.
###Markdown
Analysis of the relation between one-time participants and origin locationMore people attend a workshop if it is run at their origin location. The resulting question is: Are those the one-time participants we identified earlier? What seems to be more important: The topic or the location. Do one-time participants attend local or external workshops?
###Code
from collections import OrderedDict
one_time = []
one_time_local = []
local = []
for location in locations.index.levels[0]:
location_data = locations.loc[location]
one_time.append(location_data.one_time_participant.value_counts()[True])
one_time_local.append(location_data[location_data.location == location].one_time_participant.value_counts()[True])
local.append(len(location_data[location_data.location == location]))
data = OrderedDict()
data["Workshop"] = [1, 2, 3, 4, 5, 6]
data["Local One-Time Participants"] = one_time_local
data["Local Participants"] = local
data["One-Time Participants"] = one_time
local_one_timers = pd.DataFrame(data)
local_one_timers = local_one_timers.set_index("Workshop")
# plot data
ax = local_one_timers.plot(kind='bar', figsize=(10,7), fontsize=13)
ax.set_xlabel("Workshop")
ax.set_ylabel("Number of participants");
###Output
_____no_output_____
###Markdown
Capstone Project Submission- Student name: Bronwen Cohn-Cort- Student pace: self-paced- Scheduled project review date/time: July 27, 2020- Instructor name: Jeff Herman- Blog post URL: https://bronwencc.github.io/pandas_dataframe_quick-start_guide_python AbstractThe problem was to see whether the text of abstracts authored by Nobel Prize winners that were highly cited was different from the text of abstracts from highly-cited publications authored by scientists who were listed as an author on one of the Nobel Prize winners' highly-cited publications. The publications were from Google Scholar and the information about the Nobel Prize winners was from the Nobel Prize website. Publications may be books as well as journal articles, and abstract text was assumed to be in English, although there is at least one suspected instance of French.Then, the publications that had abstract text (a non-null `bib_abstract`) were put into one dataframe and the text was stripped of punctuation, tokenized and lemmatized with `nltk`. After that, I found frequency distributions and subsequently TF-IDF (term frequency - inverse document frequency) values for each word (or name or number) in all the abstracts for each abstract. These values were reduced to two dimensions with `sklearn`'s `TSNE` and plotted with varying color codes: author name, field of study (as related to the field in which the connected Nobel Prize was won), and whether the author of the publication was a Nobel Prize winner.The TF-IDF values were put through Sequential models to get predictions for field of study (4 possibilities): 19% accuracy and for coauthor (Nobel Prize winner or not): 58% accuracy. A t-test found the average TF-IDF values to not be significantly different between abstracts from a non-Prize winner's author page and those from a Prize-winner's author page. Import Statements
###Code
import pandas as pd
import numpy as np
from keras.layers import Input, Dense, LSTM, Embedding
from keras.layers import Dropout, Activation, Bidirectional, GlobalMaxPool1D
from keras.models import Sequential
from keras import initializers, regularizers, constraints, optimizers, layers
from keras.preprocessing import sequence
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer
from scipy import stats
from statsmodels.stats.multicomp import pairwise_tukeyhsd
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use("seaborn")
from nltk.tokenize import word_tokenize
from nltk import FreqDist
from nltk.corpus import stopwords
import string
#use WordNet database to reduce words to roots (lemmatize)
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import nltk
nltk.download("punkt") #download Punkt sentence tokenizer
nltk.download('wordnet') #download wordnet for WordNetLemmatizer
np.random.seed(321)
###Output
_____no_output_____
###Markdown
ObtainThe bulk of obtaining is in the obtain.ipynb notebook, where it retrieved and parsed results through my modified `scholarly` package.This section reads in the .CSV files that were created by putting together information from `newscholarly` search results for filled publication information. It also creates new dataframe with additional informative columns consisting of those records that have abstract text (where `bib_abstract` is not null).
###Code
#all filled publications that were first five results for Nobel Prize winners
publidf = pd.read_csv("csvdata\publications.csv",index_col=[0])
publidf.reset_index(drop=True, inplace=True)
publidf.info()
#all first five filled publications for at most three authors that were first
#listed on first five results for Nobel Prize winners
coapublidf = pd.read_csv("csvdata\copublications.csv",index_col=[0])
coapublidf.reset_index(drop=True,inplace=True)
coapublidf.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 75 entries, 0 to 74
Data columns (total 23 columns):
bib_eprint 38 non-null object
bib_cites 75 non-null int64
citations_link 75 non-null object
url_scholarbib 0 non-null float64
url_add_sclib 0 non-null float64
bib_abstract 44 non-null object
bib_author_list 0 non-null float64
bib_venue 0 non-null float64
bib_year 75 non-null int64
bib_gsrank 0 non-null float64
bib_title 75 non-null object
bib_url 44 non-null object
bib_author 75 non-null object
bib_listauthors 75 non-null object
bib_journal 64 non-null object
bib_volume 65 non-null float64
bib_number 60 non-null object
bib_publisher 61 non-null object
bib_pages 69 non-null object
source 75 non-null object
id_citations 75 non-null object
cites_per_year 75 non-null object
fileID 75 non-null object
dtypes: float64(6), int64(2), object(15)
memory usage: 13.6+ KB
###Markdown
Create author identifier from fileID (an index number plus the last four characters of an author's name) for both dataframes listing filled publications.
###Code
publidf["authID"]=[ident[:-1] for ident in publidf["fileID"]]
coapublidf["authID"]=[ident[:-1] for ident in coapublidf["fileID"]]
#how many publications that have an abstract are there for "co-authors"
coapublidf[coapublidf.bib_abstract.isna()==False].authID.value_counts()
#add coauthor binary column
coapublidf["coauthor"]=1
publidf["coauthor"]=0
###Output
_____no_output_____
###Markdown
ScrubRemove author's publications known to be an incorrect search result and combine dataframes into one with all publications that have abstracts.
###Code
#drop rows at indices where authID is "1Bado"
coapublidf.drop(index=coapublidf[coapublidf.authID=="1Bado"].index,inplace=True)
coapublidf.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 70 entries, 0 to 74
Data columns (total 25 columns):
bib_eprint 34 non-null object
bib_cites 70 non-null int64
citations_link 70 non-null object
url_scholarbib 0 non-null float64
url_add_sclib 0 non-null float64
bib_abstract 40 non-null object
bib_author_list 0 non-null float64
bib_venue 0 non-null float64
bib_year 70 non-null int64
bib_gsrank 0 non-null float64
bib_title 70 non-null object
bib_url 40 non-null object
bib_author 70 non-null object
bib_listauthors 70 non-null object
bib_journal 59 non-null object
bib_volume 60 non-null float64
bib_number 55 non-null object
bib_publisher 57 non-null object
bib_pages 64 non-null object
source 70 non-null object
id_citations 70 non-null object
cites_per_year 70 non-null object
fileID 70 non-null object
authID 70 non-null object
coauthor 70 non-null int64
dtypes: float64(6), int64(3), object(16)
memory usage: 14.2+ KB
###Markdown
With 44 out of 70 or 75 for each dataframe having a non-null abstract, put together dataframe of those with abstract and whether from coauthor or prize-winner list. This is excepting 1Bado from the `coapublidf`, which I know to be information for a different author than intended.Matching names was one of the more difficult aspects of getting the desired search results.
###Code
#concatenate where abstract is not null
abstractdf = pd.concat([coapublidf[coapublidf.bib_abstract.isna()==False],publidf[publidf.bib_abstract.isna()==False]])
abstractdf.info()
#reset index and not add as a column the one being replaced (drop=True)
abstractdf.reset_index(drop=True,inplace=True)
abstractdf.tail()
#save this dataframe
abstractdf.to_csv("csvdata\pubs_with_abstracts.csv")
abstractdf = pd.read_csv("csvdata\pubs_with_abstracts.csv", index_col=[0])
abstractdf.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 84 entries, 0 to 83
Data columns (total 25 columns):
bib_eprint 60 non-null object
bib_cites 84 non-null int64
citations_link 84 non-null object
url_scholarbib 0 non-null float64
url_add_sclib 0 non-null float64
bib_abstract 84 non-null object
bib_author_list 0 non-null float64
bib_venue 0 non-null float64
bib_year 84 non-null int64
bib_gsrank 0 non-null float64
bib_title 84 non-null object
bib_url 84 non-null object
bib_author 84 non-null object
bib_listauthors 84 non-null object
bib_journal 70 non-null object
bib_volume 74 non-null float64
bib_number 70 non-null object
bib_publisher 75 non-null object
bib_pages 76 non-null object
source 84 non-null object
id_citations 84 non-null object
cites_per_year 84 non-null object
fileID 84 non-null object
authID 84 non-null object
coauthor 84 non-null int64
dtypes: float64(6), int64(3), object(16)
memory usage: 17.1+ KB
###Markdown
Add field from Nobel Prize winner information to authorinfo.csv.
###Code
authdf = pd.read_csv("csvdata/authorinfo.csv",index_col=[0])
authdf.reset_index(drop=True, inplace=True)
authdf.info()
#checking all fileID's are unique to each record
sum(authdf.fileID.value_counts())
#get sciences Nobel Prize winner information from 2010-2019
sciwindf = pd.read_csv("files/science10years.csv",index_col=[0])
sciwindf.info()
sciwindf["thor"]=[name[-4:] for name in sciwindf.name1] #last four characters of authorname
#be sure last four characters are unique
sum(sciwindf.thor.value_counts())
#create new empty column for winning area in authdf
authdf["area"]=np.nan
#add field if sciwindf.thor matches last 4 characters from fileID from authdf
for idx, nameid in zip(authdf.index,authdf.fileID):
last4 = nameid[-4:]
winarea = sciwindf[sciwindf.thor==last4].field
area = None
try:
for i in winarea:
area = i
except:
continue
authdf.iloc[idx,-1]=area #last column is "area"
#it should be where area is missing, is also not Nobel Prize winners
authdf.head()
#save new file of author information with winning areas
authdf.to_csv("files/authors.csv")
#create column of last four characters of an author name from fileID to compare with abstractdf
authdf["name4"] = [fileid[-4:] for fileid in authdf["fileID"]]
###Output
_____no_output_____
###Markdown
Use information from `authdf` to fill in `abstractdf` with Nobel Prize winning field for that author or coauthor.
###Code
#initialize column to be null
abstractdf["area"]=None
#if an author in bib_listauthors matches an authdf.name4 and area is not NaN
for idx, authors in zip(abstractdf.index,abstractdf.bib_listauthors):
authlist = authors.strip("[]").split(", ")#expecting string with appearance of a list: ['Name H Itisi', 'I M Here']
area = None
hasfield=False
for auth in authlist:
authname = auth.strip("'")#remove trailing or leading apostrophes
abslast4 = authname[-4:] #find whether last four characters match a name in authdf and add that field
winarea = authdf[authdf.name4==abslast4].area #find the winning subject area for that match
try:
for i in winarea: #winarea is a Series with an index, so iterating gives the desired value
area = i
hasfield = True
except:
continue
if hasfield:#the first author listed in bib_listauthors that matches an author in authdf and has a useful result
abstractdf.iloc[idx,-1]=area #last column is "area", that record will be classified in that area
break #and leave the loop since it found an area for this list of authors for this record
else:
continue
#create list with one dataframe for each authID and concatenate back together
#after filling with nearby values in area
concatlist = []
for authorID in abstractdf.authID.value_counts().index: #set of authID values
tempdf = abstractdf[abstractdf.authID==authorID].copy() #separate out each dataframe by authID
tempdf.area.fillna(method="ffill",inplace=True) #fill forwards
tempdf.area.fillna(method="bfill",inplace=True) #fill backwards
concatlist.append(tempdf)
newabsdf = pd.concat(concatlist)
newabsdf[newabsdf.coauthor==0].tail()
#some values for area are missing from Nobel Prize winners (where authdf.coauthor is 0)
#add area if last four characters from newabsdf.authID matches fileID from authdf
for idx, nameid in zip(newabsdf.index,newabsdf.authID):
if newabsdf.iloc[idx,-2]==0: #if the coauthor column is 0, the record is for a Nobel winner
last4 = nameid[-4:]
winarea = authdf[authdf.name4==last4].area #get area value from author info dataframe
absarea = None
hasarea = False
try:
for i in winarea:
absarea = i
except:
continue #go on to next record
if hasarea and newabsdf.iloc[idx,-1]==None:
newabsdf.iloc[idx,-1]=absarea #"area" was last column added
# the value at the corresponding index for this authID is set to the area from authdf
continue
newabsdf.area.value_counts()
###Output
_____no_output_____
###Markdown
This method of using the last four characters did not work completely well and left some records in `area` empty, so I manually searched for who were coauthors of Nobel Prize winners in my rough draft notebook and then looked up in `sciwindf` in which field they won.
###Code
#example lookup for gham having alij as a coauthor
sciwindf[sciwindf.thor=="gham"]
#since gham won in chemistry, alij's publications will also be classified as such
prefindx = newabsdf[newabsdf.authID=="1alij"].index
newabsdf.loc[prefindx,"area"]="chemistry"
###Output
_____no_output_____
###Markdown
I saved the resulting `newabsdf`, after manually adding the rest of the values for `area` as "abstractsinfo.csv", which can be read in below as `infodf`.
###Code
infodf = pd.read_csv("csvdata/abstractsinfo.csv", index_col=[0])
infodf.info()
#sorting by index column, then dropping it
infodf.set_index(infodf["index"], inplace=True)
infodf.sort_index(inplace=True)
infodf.drop(columns=["index"],inplace=True)
infodf.tail()
sum(infodf.area.isna())
infodf.area.value_counts()
###Output
_____no_output_____
###Markdown
Possible duplicates
###Code
infodf[infodf.citations_link=="/scholar?cites=10229049581137351676"]
###Output
_____no_output_____
###Markdown
Before truly beginning analysis, some publications may have been added more than once so I will remove those with enough similar information with `drop duplicates`; keeping one of each publication.
###Code
infodf.drop_duplicates().info()
#there are no records that match completely, so I will specify where the abstract, eprint and citations_link are the same
finaldf = infodf.drop_duplicates(subset=["bib_abstract","citations_link","bib_eprint"])
finaldf.info()
#reset index and save as abstracts76
finaldf.reset_index(drop=True,inplace=True)
finaldf.to_csv("csvdata/abstracts76.csv")
finaldf.tail()
###Output
_____no_output_____
###Markdown
ExploreWith dataframe of records that have abstracts, analyze abstract text for each with `nltk` and get TF-IDF values for each.
###Code
def cleantokens(textstring):
'''
A function to remove punctuation, various other symbol characters, newlines, and English stopwords from textstring.
It returns words or sequences of numbers (and/or letters) from textstring as tokens using nltk.word_tokenize.
Args
textstring (str) : A string of text, assumed to be in English, with words separated by spaces.
Returns
cleantokens (list) : lowercase words (including acronyms) from textstring not in stopwords.words("english")
'''
stopwordspunct = stopwords.words("english")
stopwordspunct += list(string.punctuation)
stopwordspunct += ["…","\+","''","‘","’","“","”","—","\*","à"] #other punctuation that was unaccounted for
for symbol in "\+,.?!;-\\:\∼\~'\n\'…‘’“”—\*":#replace punctuation with an empty string, including ~ and ∼
textstring = textstring.replace(symbol, '').lower() #put into lowercase
tokenized = word_tokenize(textstring)
#alternately:
cleanedtokens = [word.lower() for word in tokenized if word not in stopwordspunct]
return cleanedtokens
###Output
_____no_output_____
###Markdown
The next code blocks get a list of lists of all words in all abstracts.
###Code
abstractext = [abstract for abstract in finaldf.bib_abstract]#each entry in bib_abstract is one string
tokenslists = []
abstractlist = []
for each in abstractext: #a list of Strings
tokens = cleantokens(each) #get tokens
lemmlist = []
for token in tokens:
lemtoken = lemmatizer.lemmatize(token)
lemmlist.append(lemtoken) #lemmatize each token, add as item to list
abstractlist.append(lemtoken) #add to one big list with no distinction between abstracts
tokenslists.append(lemmlist) #each list of lemmatized tokens is an item in the list
#freqdist for all words in all abstracts
absfreq = FreqDist(abstractlist)
absfreq.most_common(10)
len(absfreq)
###Output
_____no_output_____
###Markdown
The most common is "cell" although there are 2500 unique words, numbers (and likely, names as well) in total for all abstract text.Now, I will get the TF-IDF values for each word for each abstract text.
###Code
#get frequency dictionaries for each list of words
freqlist = []
for tokenslist in tokenslists:
freqdistr = FreqDist(tokenslist)
freqlist.append(freqdistr)
###Output
_____no_output_____
###Markdown
The below are based on the TF-IDF lab comparing song lyrics https://github.com/learn-co-curriculum/ds-word-vectorization-lab/ that get term frequency - inverse term frequency.
###Code
#get term frequency:
def proportional_freq(freqdict):
'''
If given a frequency dictionary, would return a new dictionary with the same keys and the frequencies relative to the
total sum of the frequencies (total number of times all keys were counted): a key's given value / sum of all values.
Args
freqdict (dict or similar) : has words as keys and their frequencies in the text as the values
Returns
propor (dict) : same keys as freqdict, values are freqdict's value divided by the total sum of freqdict's values
'''
totalcount = sum(freqdict.values()) #the total number of words, summing up all the frequency counts
propor=dict()
for item, freq in freqdict.items(): #iterate over each item-frequency pair
propor[item] = freq/totalcount
return propor
#find inverse document frequency given a list of frequency dictionaries
def calc_inverse_freq(freqdlist):
'''
Takes in a list of frequency dictionaries and finds the inverse document frequency for every key. Returns one dict
containing all of the set of keys across all freqdlist's dictionaries with the inverse document frequency as the values
Args
freqdlist (list) : list of frequency dictionaries that are part of the same corpus
Returns
inversedict (dict) : the keys are from the dictionaries in freqdlist's keys and the values are log-transformed
inverse document frequency (the number of dictionaries / how many dicts the key appears in)
'''
listlen = len(freqdlist)
totfreqd = dict()
for freqd in freqdlist: #create frequency dictionary with every key across all dictionaries in freqdlist
for key in freqd.keys():
totfreqd[key]=totfreqd.get(key,0)+1 #if key does not exist already, creates it with value of 1
#totfreqd contains how many times a word appears at least once in a freqd from freqdlist (the number of
#documents a word appears in)
inversedict=dict()
for word, freq in totfreqd.items():#for every token-frequency pair in totfreqd
#inversedict contains log of (the total number of dictionaries (documents) / how many documents word appears in)
inversedict[word] = np.log(listlen/float(freq)) #the log of this quotient (to divide well, needs float type)
# (so the difference between 10 and 20 is bigger than between 100 and 120)
return inversedict
#for term frequency, I used FreqDist object from nltk
def termf_invdf(listofdicts):
'''
Finds term frequency-inverse document frequency for every key in each given frequency dictionary in listofdicts and
returns the values in tfidf_dictslist list of dictionaries, each having every key from across listofdicts.
Args
listofdicts (list) : list of frequency dictionaries
Returns
tfidf_dictslist (list) : list of dictionaries, each with keys of all of the keys across listofdicts' dictionaries
and the values are term frequency * inverse document frequency derived from same
'''
idfdict = calc_inverse_freq(listofdicts)
newdict = {i:0 for i in list(idfdict.keys())} #all the words from all dictionaries in the list
tfidf_dictslist=[]
for eachdict in listofdicts:
tfidf_dict = newdict.copy() #dictionary with keys as all the tokens in the corpus and all have values of 0
profreq = proportional_freq(eachdict) #gets term frequency
for word in profreq.keys():
tfidf_dict[word] = profreq[word]*idfdict[word] #multiply proportional frequency by inverse frequency
tfidf_dictslist.append(tfidf_dict) #each dictionary lists every word in the corpus
#if a word is not in eachdict, it has a value of 0 in its corresponding tfidf_dict
return tfidf_dictslist
###Output
_____no_output_____
###Markdown
Use frequency distributions FreqDist objects (similar to dictionaries) to get list of dictionaries of TF-IDF values for each abstract.
###Code
tfidflist = termf_invdf(freqlist)
#put all TF-IDF values into dataframe format
tfidframe = pd.DataFrame(tfidflist)
#tfidflist is a list of dictionaries where each dictionary is every word's tfidf value for an abstract
tfidflist[41]["cell"]
###Output
_____no_output_____
###Markdown
As a dataframe, each record is for one asbtract.
###Code
tfidframe.head()
#interesting note: there are two different strings for micrometers
"µm"=="μm"
tfidframe.info()
#save TF-IDF values to .CSV file
tfidframe.to_csv("csvdata/tfidfvalues.csv")
tfidframe.describe()
#quick calculation for average tokens per abstract
totalwords=0
for i in tokenslists: #list of lists of tokens
totalwords+=len(i) #how many tokens in each abstract = length of list
totalwords/finaldf.shape[0] #with number of rows in finaldf
###Output
_____no_output_____
###Markdown
Most values are zero, likely indicating that most tokens are not in most abstracts, as there are 70 tokens on average in each abstract. Of the tokens shown by `.describe()` at a glance, β is the most unique; with a maximum TF-IDF value of 0.2 (closest to 1).
###Code
statsframe = tfidframe.describe()
statsframe.info()
#finding the highest values for which tokens
statsframeT = statsframe.T
statsframeT[statsframeT["max"]>.37]
###Output
_____no_output_____
###Markdown
There are ten tokens with the TF-IDF values in the dataset that are above 0.37. The highest, or most unique, is "decision" at 0.58 although six have the same `max` value of 0.481193. Plot TF-IDF valuesUse t-SNE (t-Stochastic Neighbors Embedding) from sci-kit learn to graph the values.
###Code
#list of lists of values from the list of tf-idf dictionaries
tfidfvalues = []
for tfidfdict in tfidflist:
tfidfvalues.append(list(tfidfdict.values())) #convert values of each to list
def graphdims(transformed, dims=3):
'''
Transpose a list of lists or similar iterable from one N-length list of dims-length lists -> one list of dims
number of N-length lists. It's like switching a matrix coordinate from (i, j) to be (j, i) except using a list of lists
Args
transformed (list or arraylike) : an arraylike of an arraylike of dimension dims x N (dims lists each of size N)
dims (int) : defaults to 3. The desired dimension of the returned list of lists
Returns
coordlist (list) : a list of lists of dimension N x dims (N lists each of size dims)
'''
coordlist = [[] for i in range(0,dims)] #list of lists
for coord in transformed:
for j in range(0,dims):
coordlist[j].append(coord[j])
return coordlist
#reduce to two dimensions to graph on x-y axes
dim2TSNE = TSNE(n_components=2) #using sklearn
dim2data = dim2TSNE.fit_transform(tfidfvalues)
dim2abstr = graphdims(dim2data,2) #kind of transpose list of lists
#check graphdims changed shape as expected
print(dim2data.shape, len(dim2abstr), len(dim2abstr[0]))
#add coordinates as columns to finaldf
finaldf["xcoord"] = dim2abstr[0]
finaldf["ycoord"] = dim2abstr[1]
finaldf.describe()
#save with coordinates
finaldf.to_csv("csvdata/abs76coords.csv")
finaldf = pd.read_csv("csvdata/abs76coords.csv", index_col=[0])
###Output
_____no_output_____
###Markdown
Match individual colors to each `authID` for graph.
###Code
authidlist = finaldf.authID
authidset = set(authidlist)
len(authidset)
#create list of 26 color names to be for each authID
colors = ["darkgrey","dimgray","brown","red","tomato","sienna",
"chocolate","darkorange","tan","gold","darkkhaki","y",
"yellowgreen","green","c","royalblue","slateblue","navy",
"blue","mediumpurple","violet","fuchsia","deeppink",
"lawngreen","cyan","darkgoldenrod"]
len(colors)==len(authidset)
#create dictionary for color names
colordict = dict()
for authid,color in zip(authidset,colors):
colordict[authid]=color
#graph with the authID's for the corresponding abstracts
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
for xcoord, ycoord, authid in zip(dim2abstr[0], dim2abstr[1], authidlist):
colr = colordict[authid]
ax.scatter(xcoord, ycoord, c=colr, label=authid)
pts = 18 #font pt size
plt.rc('axes', titlesize=pts, labelsize=pts) # font size of the axes titles and labels
plt.rc('xtick', labelsize=pts-2) # font size of the tick labels
plt.rc('ytick', labelsize=pts-2) # font size of the tick labels
plt.rc('figure', titlesize=30) #title font size, slightly larger than the other text
plt.title('Abstract TF-IDF Color-coded by Author')
#plt.savefig("images/dots-by-author.png")
#ax.legend(ncol=8) #with the legend, it was hard to see which was a datapoint and which might be in the key
plt.show()
###Output
_____no_output_____
###Markdown
Match four colors for Nobel Prize field to graph and look for clusters.
###Code
fourcolors = ["darkgreen","blue","goldenrod","magenta"]
fieldset = ["physics","chemistry","economics","medicine"]
#sort by area
areasort = finaldf.sort_values(by=["area"])
phys = areasort[areasort["area"]=="physics"]
chem = areasort[areasort["area"]=="chemistry"]
econ = areasort[areasort["area"]=="economics"]
medi = areasort[areasort["area"]=="medicine"]
areadflist = [phys,chem,econ,medi]
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
for areadf, colr, field in zip(areadflist,fourcolors,fieldset):
ax.scatter(areadf.xcoord,areadf.ycoord,color=colr,label=field)
ax.legend()
pts = 18 #font pt size
plt.rc('axes', titlesize=pts, labelsize=pts) # font size of the axes titles and labels
plt.rc('xtick', labelsize=pts-2) # font size of the tick labels
plt.rc('ytick', labelsize=pts-2) # font size of the tick labels
plt.rc('figure', titlesize=30) #title font size, slightly larger than the other text
plt.title('Abstract TF-IDF Color-coded by Field')
#plt.savefig("images/dots-by-field.png")
plt.show()
###Output
_____no_output_____
###Markdown
The most noticeable group is about 10 Physics publications near center, at about coordinate pair (50, 100).
###Code
#see how coauthor column compares
winners = finaldf[finaldf["coauthor"]==0]
coauths = finaldf[finaldf["coauthor"]==1]
coautcatlist=[winners,coauths]
labellist = ["Winners","Other Authors"]
twocolors = ["blue","goldenrod"]
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
for df, colr, each in zip(coautcatlist,twocolors,labellist):
ax.scatter(df.xcoord,df.ycoord,color=colr,label=each)
ax.legend()
pts = 18 #font pt size
plt.rc('axes', titlesize=pts, labelsize=pts) # font size of the axes titles and labels
plt.rc('xtick', labelsize=pts-2) # font size of the tick labels
plt.rc('ytick', labelsize=pts-2) # font size of the tick labels
plt.rc('figure', titlesize=30) #title font size, slightly larger than the other text
plt.title('Abstract TF-IDF Color-coded by Nobel Prize Winner')
#plt.savefig("images/dots-by-winner.png")
plt.show()
###Output
_____no_output_____
###Markdown
There appears to be a few clusters, one in particular is made up of non-winners' publications text near the left center of the plot around coordinates (-100, 100). In other graphs, the dots are different colors in this cluster, indicating that they are not by the same author, nor are they in the same field. ModelUsing keras to determine a model of 7 clusters using TF-IDF values as X, predict for each y: `area` and `coauthor`.
###Code
#get dummies to create two targets: coauthor and field
ycoa = pd.get_dummies(finaldf["coauthor"]).values
yarea = pd.get_dummies(finaldf["area"]).values
tfidframe.shape
X = [row for row in tfidframe.values] #list of rows (numpy arrays)
X = np.array(X) #Sequential prefers numpy arrays to lists
type(X[0][0])
###Output
_____no_output_____
###Markdown
Model for `coauthor` as Target
###Code
#train test split
X_train, X_test, yc_train, yc_test = train_test_split(X, ycoa, test_size=0.33, random_state=321)
modelc = Sequential()
embedsize = 32 #there are 84 records
numwords = tfidframe.shape[1] #and 2500 tokens
modelc.add(Embedding(numwords, embedsize))
modelc.add(LSTM(10, return_sequences=True))
modelc.add(GlobalMaxPool1D())
modelc.add(Dropout(0.5))
modelc.add(Dense(20, activation='relu'))
modelc.add(Dropout(0.5))
modelc.add(Dense(2, activation='softmax')) #coauthor has two possibilities
modelc.compile(loss="categorical_crossentropy",optimizer="SGD",metrics=["accuracy"])
modelc.fit(X_train, yc_train,epochs=10,batch_size=15)
###Output
Epoch 1/10
4/4 [==============================] - 9s 2s/step - loss: 0.6983 - accuracy: 0.3800
Epoch 2/10
4/4 [==============================] - 9s 2s/step - loss: 0.6947 - accuracy: 0.4800
Epoch 3/10
4/4 [==============================] - 9s 2s/step - loss: 0.6926 - accuracy: 0.5400
Epoch 4/10
4/4 [==============================] - 9s 2s/step - loss: 0.6981 - accuracy: 0.4000
Epoch 5/10
4/4 [==============================] - 9s 2s/step - loss: 0.6917 - accuracy: 0.4400
Epoch 6/10
4/4 [==============================] - 9s 2s/step - loss: 0.6981 - accuracy: 0.3800
Epoch 7/10
4/4 [==============================] - 9s 2s/step - loss: 0.6926 - accuracy: 0.5800
Epoch 8/10
4/4 [==============================] - 9s 2s/step - loss: 0.6930 - accuracy: 0.5400
Epoch 9/10
4/4 [==============================] - 9s 2s/step - loss: 0.6925 - accuracy: 0.5000
Epoch 10/10
4/4 [==============================] - 9s 2s/step - loss: 0.6944 - accuracy: 0.5200
###Markdown
The loss seems large, at .7, but the accuracy improved over the epochs from .38 to .52.
###Code
yc_preds = modelc.predict(X_test) #I can compare with .evaluate more conveniently
traincresults = modelc.evaluate(X_train, yc_train)
testcresults = modelc.evaluate(X_test, yc_test)
###Output
2/2 [==============================] - 0s 215ms/step - loss: 0.6932 - accuracy: 0.5000
1/1 [==============================] - 0s 0s/step - loss: 0.6919 - accuracy: 0.5769
###Markdown
The accuracy on the test set is about 57%. The loss is lower than 1, but still rather high and improved minimally from the first epoch. Model for `area` as the target
###Code
#train test split
X_train, X_test, ya_train, ya_test = train_test_split(X, yarea, test_size=0.33, random_state=321)
modela = Sequential()
embedsize = 32 #there are 84 records
numwords = tfidframe.shape[1] #and 2500 tokens
modela.add(Embedding(numwords, embedsize))
modela.add(LSTM(10, return_sequences=True))
modela.add(GlobalMaxPool1D())
modela.add(Dropout(0.5))
modela.add(Dense(20, activation='relu'))
modela.add(Dropout(0.5))
modela.add(Dense(4, activation='softmax')) #area has 4 possibilities
modela.compile(loss="categorical_crossentropy",optimizer="SGD",metrics=["accuracy"])
modela.fit(X_train, ya_train,epochs=10,batch_size=15)
###Output
Epoch 1/10
4/4 [==============================] - 9s 2s/step - loss: 1.3882 - accuracy: 0.2400
Epoch 2/10
4/4 [==============================] - 9s 2s/step - loss: 1.3825 - accuracy: 0.3200
Epoch 3/10
4/4 [==============================] - 9s 2s/step - loss: 1.3885 - accuracy: 0.2000
Epoch 4/10
4/4 [==============================] - 9s 2s/step - loss: 1.3874 - accuracy: 0.3400
Epoch 5/10
4/4 [==============================] - 9s 2s/step - loss: 1.3826 - accuracy: 0.3200
Epoch 6/10
4/4 [==============================] - 9s 2s/step - loss: 1.3829 - accuracy: 0.3400
Epoch 7/10
4/4 [==============================] - 9s 2s/step - loss: 1.3876 - accuracy: 0.2600
Epoch 8/10
4/4 [==============================] - 9s 2s/step - loss: 1.3915 - accuracy: 0.1800
Epoch 9/10
4/4 [==============================] - 9s 2s/step - loss: 1.3838 - accuracy: 0.3200
Epoch 10/10
4/4 [==============================] - 9s 2s/step - loss: 1.3837 - accuracy: 0.2600
###Markdown
The accuracy improved a little over the epochs, but perhaps it needs more of them to converge.
###Code
ya_preds = modela.predict(X_test)
trainaresults = modela.evaluate(X_train, ya_train)
testaresults = modela.evaluate(X_test, ya_test)
###Output
2/2 [==============================] - 0s 213ms/step - loss: 1.3824 - accuracy: 0.3000
1/1 [==============================] - 0s 0s/step - loss: 1.3945 - accuracy: 0.1923
###Markdown
The accuracy is 19% for the test data and 30% for the training data, both with loss values near 1.4. T-test for significance for `coauthor` feature I'd like to see whether there is a significant difference between TF-IDF values for abstract text by a Nobel winner (0) or a coauthor (1). As a hypothesis test:$H_0$ : The mean difference between the TF-IDF values for non-coauthors and coauthors is zero. Rephrased, $\mu_{nobel} = \mu_{coauthor}$$H_1$ : The mean difference between the TF-IDF values for non-coauthors and coauthors is nonzero. Rephrased, $\mu_{nobel} \ne \mu_{coauthor}$To prove the alternative hypothesis, I can use a two-tailed less-than test.With the $p$ and $t$ values from a two-tailed test (returned by `ttest_ind`), I can reject the null hypothesis when $p 0$.
###Code
finaldf["tfidf_avg"] = [np.mean(i) for i in tfidframe.values] #average each row
#create lists of average TF-IDF values for whether coauthor/Nobel winner
coauth1tfidf = [tival for tival in finaldf[finaldf["coauthor"]==1].tfidf_avg]
coauth0tfidf = [tival for tival in finaldf[finaldf["coauthor"]==0].tfidf_avg]
ttestresults = stats.ttest_ind(coauth1tfidf,coauth0tfidf)
ttestresults
###Output
_____no_output_____
###Markdown
That appears to not be significant; a publication authored by a Nobel prize-winner and that which was authored by a collaborator do not have abstracts that are significantly different. T-test for significance for `area` feature I'd like to see whether there is a significant difference between TF-IDF values for abstract text by one field or another. To prove the alternative hypothesis, these would be two-tailed, paired tests comparing the four possibilities for `area` (each pair would be field A compared to field B).$H_0$ : The mean difference between the TF-IDF values for field A and field B is zero. Rephrased, $\mu_{A} = \mu_{B}$$H_1$ : The mean difference between the TF-IDF values for field A and field B is nonzero. Rephrased, $\mu_{A} \ne \mu_{B}$With the $p$ and $t$ values from a two-tailed test (returned by `ttest_ind`), I can reject the null hypothesis when $p 0$.
###Code
phystfidf = [tival for tival in finaldf[finaldf["area"]=="physics"].tfidf_avg]
chemtfidf = [tival for tival in finaldf[finaldf["area"]=="chemistry"].tfidf_avg]
econtfidf = [tival for tival in finaldf[finaldf["area"]=="economics"].tfidf_avg]
meditfidf = [tival for tival in finaldf[finaldf["area"]=="medicine"].tfidf_avg]
#pairwise Tukey test:
#information needs to be in numpy arrays
area_arrays = np.concatenate([np.array(phystfidf),np.array(chemtfidf),np.array(econtfidf),np.array(meditfidf)])
#and labels are important for it to handle the data properly
areanames = ["physics"]*len(phystfidf) + ["chemistry"]*len(chemtfidf) + ["economics"]*len(econtfidf) + ["medicine"]*len(meditfidf)
#get tukeyhsd test results with 0.05 for significant p-value
print(pairwise_tukeyhsd(area_arrays,areanames,0.05))
###Output
Multiple Comparison of Means - Tukey HSD,FWER=0.05
==================================================
group1 group2 meandiff lower upper reject
--------------------------------------------------
chemistry economics 0.0 -0.0 0.0001 False
chemistry medicine 0.0 -0.0 0.0001 False
chemistry physics -0.0001 -0.0001 -0.0 True
economics medicine 0.0 -0.0001 0.0001 False
economics physics -0.0001 -0.0002 -0.0 True
medicine physics -0.0001 -0.0002 -0.0 True
--------------------------------------------------
###Markdown
By field, three combinations' null hypotheses can be rejected: chemistry-economics, chemistry-medicine, and economics-medicine. Conversely, it means pairs with physics are not significantly different. However, all values for `meandiff`, `lower` and `upper` are very close to 0. InterpretUltimately, the results rely on too little data. The model has a relatively high loss values and accuracy metrics that barely improve over the last few epochs of the Sequential Model from `keras`. The t-test comparing TF-IDF values for co-authors and those for Nobel Prize winners did not find significance. Pairwise tests comparing four different fields (`area`) found significance for half of the pairs (those combinations of chemistry, medicine and economics).Comparing the TF-IDF for abstract text gives closeness based on the words' importances to the abstract. With many abstracts containing specialized jargon from different disciplines, it was surprising that when graphing the values, the abstracts were not more clustered by `area`. Comparison to `sklearn`'s `TfidfTransformer` with `CountVectorizer`Using the tokenized abstracts and a `set` of the words from all the abstracts, get TF-IDF values from `sklearn` methods.
###Code
#take tokenslists and put each list into one String, each word separated by spaces
corpus = []
vocab = [] #vocab will be set of all words from all tokens
for tokenlist in tokenslists:
tokenstring=""
lasttoken = tokenlist[-1]
for token in tokenlist[:-1]:
vocab.append(token)
tokenstring+=token
tokenstring+=" " #last token does not need space after it
tokenstring+=lasttoken #each separated by spaces
vocab.append(lasttoken)
corpus.append(tokenstring)
len(corpus)
vocabul = set(vocab)
len(vocabul)
tokenslists[10][-1]
corpus[10]
#without a pipeline
vectorizer = CountVectorizer()
vectorized = vectorizer.fit_transform(corpus)
vectorized[10]
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorized)
tfidf
vectarr = vectorized.toarray()
tfidfarr = tfidf.toarray()
vectdf = pd.DataFrame(vectarr, columns=vectorizer.get_feature_names())
vectdf.head()
tfidfdf = pd.DataFrame(tfidfarr, columns=vectorizer.get_feature_names())
tfidfdf.head()
###Output
_____no_output_____
###Markdown
Data Structuring and Pruning
###Code
# Load datasets
import json
import pathlib
import importlib
from collections import defaultdict, Counter
import pyupset as pyu
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import re
from matplotlib.gridspec import GridSpec
import csv
import requests
import pickle
import os
from vicckb import model as viccdb
from vicckb.definitions import DATA_ROOT, PROJECT_ROOT
%matplotlib inline
REPOPATH = PROJECT_ROOT.parent
OUTPATH = REPOPATH / 'out'
FIGPATH = OUTPATH / 'figures'
os.makedirs(FIGPATH, exist_ok=True)
# reload module and load data
importlib.reload(viccdb)
vdb = viccdb.ViccDb()
vdb.report_groups()
# for now, omit brca from analysis
brca = vdb.select(lambda x: x['source'] == 'brca')
core_vdb = vdb - brca
core_vdb.report_groups()
# remove biological associations
oncokb_biological = core_vdb.select(lambda x: x['source'] == 'oncokb' and 'biological' in x['raw'])
oncokb_biological.report_groups(core_vdb)
core_vdb = core_vdb - oncokb_biological
###Output
oncokb: 3801 (93.9% of superset)
Total: 3801 (22.5% of superset)
###Markdown
Evidence UniquenessThis section deals with non-unique entries from the database. This is a temporary measure until the importers are fixed. As such, it uses private variables and non-standard methods to hack around the built-in uniqueness assumptions that are violated by these data. Remove this entire section once the source hash checks pass.
###Code
core_vdb.report_groups()
# Non-unique raw entries
raw_duplicates = core_vdb.select(lambda x: len(core_vdb._hashed[hash(x)]) > 1)
raw_duplicates.report_groups(core_vdb)
cgi_dups = raw_duplicates.by_source('cgi')
cgi_clean = [x for x in cgi_dups if x['raw']['Drug status']]
test = viccdb.ViccDb([x for x in core_vdb if x not in cgi_dups] + list(cgi_clean))
test.report_groups(core_vdb)
# Test matches expectation, moving to core
core_vdb = test
pmkb_dups = raw_duplicates.by_source('pmkb')
len(pmkb_dups._hashed)
len(pmkb_dups)
merged_associations = list()
for hash_key, equivalent_associations in pmkb_dups._hashed.items():
root_association = equivalent_associations.pop()
for other_association in equivalent_associations:
root_association['features'].append(other_association['features'][0])
merged_associations.append(root_association)
test = viccdb.ViccDb([x for x in core_vdb if x not in merged_associations] + list(merged_associations))
test.report_groups(core_vdb)
x = len(core_vdb.by_source('pmkb')) - len(pmkb_dups) + len(pmkb_dups._hashed)
print("Expecting {} associations for PMKB".format(x))
# Test matches expectation, moving to core
core_vdb = test
###Output
_____no_output_____
###Markdown
Evidence filteringRemoval of all evidence without associated publications, followed by removal of all associations without evidence.
###Code
def clean_refs(association):
evidences = association['association']['evidence']
evidence_indices_to_delete = list()
for i, evidence in enumerate(evidences):
assert isinstance(publications, list)
publications = [x for x in evidence['info']['publications'] if x]
evidence['info']['publications'] = publications
if not publications:
evidence_indices_to_delete.append(i)
for index in sorted(evidence_indices_to_delete, reverse=True):
del association['association']['evidence'][index]
map(clean_refs, core_vdb)
core_missing_ref = core_vdb.select(lambda x: not any(x.publications))
core_missing_ref.report_groups(core_vdb)
core_vdb = core_vdb - core_missing_ref
core_vdb.report_groups()
# All associations should have an evidence level
core_vdb.select(lambda x: not x.evidence_level).report_groups(core_vdb)
###Output
Total: 0 (0.0% of superset)
###Markdown
Feature coordinatesWhat follows is a detailed look at associations without start and end coordinates after normalization, and a set of regular expression filters to separate out these associations into chunks that can be annotated with gene- or exon-level coordinates, as appropriate.
###Code
# Associations with more than 1 feature
x = core_vdb.select(lambda x: len(x.features) > 1)
x.report_groups(vdb)
# Associations without at least 1 complete and valid feature
no_features = core_vdb.select(lambda x: len(x.features) == 0)
no_features.report_groups(vdb)
vdb[0]['association']['phenotype']
# Associations with coordinate features
coord_featured = core_vdb - no_features
coord_featured.report_groups(core_vdb)
###Output
cgi: 1063 (99.2% of superset)
civic: 3323 (99.5% of superset)
jax: 5736 (99.8% of superset)
molecularmatch: 2063 (99.2% of superset)
oncokb: 245 (99.2% of superset)
pmkb: 369 (99.5% of superset)
Total: 12799 (99.6% of superset)
###Markdown
Fix PMKB features
###Code
with open('vicckb/data/gene_strand.pkl', 'rb') as f:
gene_strand = pickle.load(f)
COMPLEMENT = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G',
'-': '-'
}
complement_map = str.maketrans(COMPLEMENT)
def get_gene_strand(gene, trx_id):
strand = gene_strand.get((gene, trx_id), None)
if strand is None:
cmd = '''wget -q -O - 'http://grch37.ensembl.org/biomart/martservice?query=<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Query>
<Query virtualSchemaName = "default" formatter = "CSV" header = "0" uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
<Dataset name = "hsapiens_gene_ensembl" interface = "default" >
<Filter name = "hgnc_symbol" value = "{}"/>
<Attribute name = "ensembl_transcript_id" />
<Attribute name = "strand" />
</Dataset>
</Query>' | grep '{}'
'''.format(gene, trx_id)
result = !{cmd}
strand = result[0].split(',')[-1]
gene_strand[(gene, trx_id)] = strand
return strand
return strand
for association in core_vdb.by_source('pmkb'):
del(association._features)
for feature in association['features']:
gene = feature['geneSymbol']
trx_id = feature['attributes']['transcript']['string_value']
strand = get_gene_strand(gene, trx_id)
if strand == '-1':
# feature['ref'] = feature['ref'][::-1].translate(complement_map)
try:
feature['alt'] = feature['alt'][::-1].translate(complement_map)
except KeyError:
continue
with open('vicckb/data/gene_strand.pkl', 'wb') as f:
pickle.dump(gene_strand, f)
###Output
_____no_output_____
###Markdown
Remainder of section is inactivated code for identifying associations without coordinatesimport redef feature_filter(re_obj, associations): report matches and return non-matches found = list(filter(lambda x: re_obj.search(x['feature_names']) is not None, associations)) not_found = list(filter(lambda x: re_obj.search(x['feature_names']) is None, associations)) report_groups(found) return(not_found)amp_re = re.compile(r'(amplification)|(loss)|(amp)', re.IGNORECASE)remainder = feature_filter(amp_re, no_partial_coord_featured_with_feature_names) fusion_re = re.compile(r'(\w{2,}-\w{2,})|(fusion)', re.IGNORECASE)r2 = feature_filter(fusion_re, remainder) ppm_re = re.compile(r'\w+(:| )[a-z]\d+[a-z]?(fs\*?)?$', re.IGNORECASE)r3 = feature_filter(ppm_re, r2) indel_re = re.compile(r'\w+(:| )\w+(ins\w+)|(del($|ins\w+))|(dup$)')r4 = feature_filter(indel_re, r3) bucket_re = re.compile(r'[A-Z0-9]+( (in)?act)?( oncogenic)? mut((ant)|(ation))?$')r5 = feature_filter(bucket_re, r4) exon_re = re.compile(r'exon', re.IGNORECASE)r6 = feature_filter(exon_re, r5) expression_re = re.compile(r'(exp)|(^\w+ (pos(itive)?)|(neg(ative)?)|(biallelic inactivation)$)|(truncating)|(deletion)', re.IGNORECASE)r7 = feature_filter(expression_re, r6) report_groups(r7) get_feature_names([x for x in r7 if x['source'] == 'cgi']) Diseases
###Code
disease_missing = core_vdb.select(lambda x: x.disease is None)
disease_missing.report_groups(core_vdb)
disease_missing[0]['association']
# Fix DOID
for association in core_vdb.select(lambda x: x.disease and x.disease.name == 'CNS Cancer'):
association['association']['phenotype']['type']['term'] = 'central nervous system cancer'
association['association']['phenotype']['type']['id'] = 'DOID:3620'
mismatched_do_cgi = core_vdb.select(lambda x: x.disease and \
x.disease.source.lower().endswith('doid') and \
not x.disease.id.startswith('DOID') and \
x.source == 'cgi'
)
mismatched_do_cgi.report_groups(core_vdb)
mismatched_do_cgi[0]['association']['phenotype']
mismatched_do_cgi[0]['raw']
from vicckb.definitions import DATA_ROOT
from vicckb.harmonizers import DiseaseHarmonizer
adh = DiseaseHarmonizer(map_file=(DATA_ROOT / 'disease_alias.tsv'),
disease_ontology='DOID')
disease = mismatched_do_cgi[0]['raw']['Primary Tumor type']
adh.harmonize(disease)
for cgi_association in mismatched_do_cgi:
disease = cgi_association['raw']['Primary Tumor type']
harmonized = adh.harmonize(disease)
cgi_association['association']['phenotype'] = {
'description': disease,
'type': {
'id': harmonized['id'],
'source': harmonized['ontology'],
'term': harmonized['term']
}
}
mismatched_do_cgi = core_vdb.select(lambda x: x.disease and \
x.disease.source.lower().endswith('doid') and \
not x.disease.id.startswith('DOID') and \
x.source == 'cgi'
)
mismatched_do_cgi.report_groups(core_vdb)
core_vdb.select(lambda x: not x.disease and x.source == 'cgi').report_groups()
other_do_cgi = core_vdb.select(lambda x: x.disease and \
x.source == 'cgi' and \
not x.disease.source.lower().endswith('doid'))
other_do_cgi.report_groups()
for cgi_association in other_do_cgi:
disease = cgi_association['raw']['Primary Tumor type']
harmonized = adh.harmonize(disease)
cgi_association['association']['phenotype'] = {
'description': disease,
'type': {
'id': harmonized['id'],
'source': harmonized['ontology'],
'term': harmonized['term']
}
}
core_vdb.select(lambda x: x.disease and \
x.source == 'cgi' and \
not x.disease.source.lower().endswith('doid')).report_groups()
###Output
0 total associations
###Markdown
Drugs
###Code
drugs_missing = core_vdb.select(lambda x: len(x.drugs) == 0)
drugs_missing.report_groups(core_vdb)
###Output
cgi: 112 (10.4% of superset)
civic: 1261 (37.8% of superset)
jax: 457 (8.0% of superset)
molecularmatch: 120 (5.8% of superset)
oncokb: 8 (3.2% of superset)
pmkb: 371 (100.0% of superset)
Total: 2329 (18.1% of superset)
###Markdown
Genes
###Code
ambiguous = list()
for a in core_vdb:
a.genes
# assert len(ambiguous) == 0 # Ensure there are no ambiguous genes from knowledgebases
###Output
/Users/awagner/Workspace/git/vicckb/vicckb/model.py:257: UserWarning: Ambiguous gene symbol MLL2 in assertion 235677252030682
warn('Ambiguous gene symbol {} in assertion {}'.format(g, self))
###Markdown
CacheSaving core_vdb to cache for testing.
###Code
core_vdb.cache_data()
###Output
_____no_output_____
###Markdown
Knowledgebase Comparison Publications All publications
###Code
x = core_vdb.plot_element_by_source('publications', min_bound=4)
f = x['figure']
# f.savefig('out/publications.pdf')
# x['input_data']
## For Sidi
ebs = x['input_data']
df_dict = dict()
for source in ebs:
fe = list(filter(lambda x: bool(x), ebs[source]))
df_dict[source] = pd.DataFrame(fe, columns=['attribute'])
with open('example.pkl', 'wb') as f:
pickle.dump(df_dict, f)
with open('example.pkl', 'rb') as f:
df_dict = pickle.load(f)
!open .
# Publications uniquely cited
data = x['input_data']
total = 0
for source in data:
publications_from_elsewhere = set()
for source2 in data:
if source == source2:
continue
publications_from_elsewhere.update(data[source2])
unique = data[source] - publications_from_elsewhere
print("{}: {} resource-specific publications".format(source, len(unique)))
total += len(unique)
print("{} ({:.2%}) total resource-specific publications".format(total, total / len(set.union(*(data.values())))))
p_sets = core_vdb.get_element_by_source('publications')
len((p_sets['civic'] & p_sets['jax']) - p_sets['pmkb'] - p_sets['oncokb'] - p_sets['molecularmatch'] - p_sets['cgi'])
x = core_vdb.element_by_source_stats('publications')
x['ubiquitous'] # Bose et al. Cancer Discovery 2013
###Output
3696 / 4354 (84.89%) of publications are represented in only 1 resource.
203 / 4354 (4.66%) of publications are represented in the majority of (3) resources.
1 / 4354 (0.02%) of publications are represented across all resources.
###Markdown
PMIDs
###Code
x = core_vdb.plot_element_by_source('publications', lambda x: x.pmid, min_bound=3)
# f = x['figure']
# f.savefig('out/pmids.pdf')
x = core_vdb.element_by_source_stats('publications', lambda x: x.pmid)
x['ubiquitous'] # Bose et al. Cancer Discovery 2013
###Output
3146 / 3800 (82.79%) of publications are represented in only 1 resource.
203 / 3800 (5.34%) of publications are represented in the majority of (3) resources.
1 / 3800 (0.03%) of publications are represented across all resources.
###Markdown
Genes
###Code
g_set = core_vdb.get_element_by_source('genes')
x = g_set['civic']
for n, s in g_set.items():
if n == 'civic':
continue
x = x - s
len(x)
no_genes = core_vdb.select(lambda x: not x.genes)
no_genes.report_groups(core_vdb)
with_genes = core_vdb - no_genes
x = with_genes.plot_element_by_source('genes')
# f = x['figure']
# f.savefig('out/genes.pdf')
x = with_genes.element_by_source_stats('genes')
x['ubiquitous']
stats.fisher_exact([
[203, 4151],
[97, 318]
])
###Output
_____no_output_____
###Markdown
Features
###Code
# suddenly stopped working? maybe a dependency error?
# x = core_vdb.plot_element_by_source('features', min_bound=5)
# f = x['figure']
# f.savefig(str(FIGPATH / 'misc_figures' / 'feature_upset.pdf'))
x = core_vdb.select(lambda x: x.evidence_level == 'A').element_by_source_stats('features')
f_sets = core_vdb.get_element_by_source('features')
# all_features = set.update()
count = Counter()
for s in f_sets.values():
count.update(s)
# non-scientific-notation calculations
sources = list(f_sets)
for source in sources:
s = set()
for other in sources:
if source == other:
continue
s.update(f_sets[other])
print(f'{source}: {len(f_sets[source] - s)}')
cgi_and_okb = f_sets['oncokb'] & f_sets['cgi']
others = f_sets['pmkb'] | f_sets['molecularmatch'] | f_sets['civic'] | f_sets['jax']
print(f'cgi and oncokb: {len(cgi_and_okb - others)}')
count_of_counts = Counter()
count_of_counts.update(count.values())
np.asarray(list(count_of_counts.values()))
labels = sorted(count_of_counts.keys())
values = [count_of_counts[x] for x in labels]
values
fig1, ax1 = plt.subplots()
pie, _, _ = ax1.pie(values, radius=1,
labels=labels, autopct='%1.1f%%', pctdistance=2)
ax1.axis('equal')
plt.setp(pie, edgecolor='white')
# plt.savefig(str(FIGPATH / 'misc_figures' / 'feature_overlap.pdf'))
plt.show()
ubiquitous_features = list(x['ubiquitous'])
sorted([x.name for x in ubiquitous_features])
def element_uniqueness_across_kbs(element, as_proportion=False):
e_sets = core_vdb.get_element_by_source(element)
count = Counter()
for s in e_sets.values():
count.update(s)
count_of_counts = Counter()
count_of_counts.update(count.values())
labels = sorted(count_of_counts.keys())
if as_proportion:
d = sum(count_of_counts.values())
values = [count_of_counts[x]/d for x in labels]
else:
values = [count_of_counts[x] for x in labels]
return dict(zip(labels, values))
element_uniqueness_across_kbs('features', as_proportion=True)
g_prop = element_uniqueness_across_kbs('genes', as_proportion=True)
f_prop = element_uniqueness_across_kbs('features', as_proportion=True)
di_prop = element_uniqueness_across_kbs('disease', as_proportion=True)
dr_prop = element_uniqueness_across_kbs('drugs', as_proportion=True)
pub_prop = element_uniqueness_across_kbs('publications', as_proportion=True)
labels = ['Genes', 'Features', 'Diseases', 'Drugs*', 'Publications']
N = len(labels)
ind = np.arange(N)
value_sets = [g_prop, f_prop, di_prop, dr_prop, pub_prop]
width = 0.5
plot_sets = [[x.get(i, 0) for x in value_sets] for i in range(1,7)]
plots = list()
b_sums = np.zeros(N)
for plot_set in plot_sets:
p = plt.bar(ind, plot_set, width, bottom=b_sums)
b_sums += np.array(plot_set)
plots.append(p)
plt.ylabel('Proportion')
plt.xticks(ind, labels)
plt.legend([p[0] for p in plots], range(1,7), bbox_to_anchor=(1.2, 0.5, 0, 0.5))
# plt.savefig(str(FIGPATH / 'misc_figures' / 'elements_overlap.pdf'))
plt.show()
###Output
_____no_output_____
###Markdown
Tier 1 variants
###Code
tier1 = core_vdb.select(lambda x: x.evidence_level in ['A', 'B'])
genes = set()
features = set()
for a in tier1:
genes.update(a.genes)
features.update(a.features)
print(len(genes))
print(len(features))
###Output
236
1512
###Markdown
Hierarchical searchExisting method is to find an exact match of any features for an association. Below we demonstrate gains through hierarchical search of GenomicFeatures, a core result of this effort.
###Code
hits = core_vdb.search_by_feature(chromosome=7, start=140453136, end=140453136, reference_name='GRCh37')
v600k = [x['best_match']['p'] for x in hits if x['best_match']['feature'].name.endswith('V600K')]
len(v600k)
unique_features = set()
x = [x.features for x in core_vdb]
for fset in x:
unique_features.update(fset)
unique_features = list(unique_features)
len(unique_features)
# This is a computationally expensive operation (~1 minute for the 2800 searches). Could be sped up through indexed searching.
# feature_hits = dict()
# for feature in unique_features:
# feature_hits[feature] = core_vdb.search_by_feature(genomic_feature=feature)
# New method
feature_hits = core_vdb.search_by_features(genomic_features=unique_features)
ranking = viccdb.ViccDb.MATCH_RANKING
hits_by_type = Counter()
sources_by_type = defaultdict(Counter)
for match_type in ranking:
typed_hits = [x for x in feature_hits if ranking.index(x['best_match']['type']) <= ranking.index(match_type)]
hits_by_type[match_type] = len(typed_hits)
typed_associations_by_query = defaultdict(set)
for hit in typed_hits:
typed_associations_by_query[hit['query']].add(hit['association'])
for associations in typed_associations_by_query.values():
sources = {association.source for association in associations}
sources_by_type[len(sources)][match_type] += 1
hits_by_type
sources_by_type
fig, ax = plt.subplots()
source_counts = sorted(sources_by_type)
groups = viccdb.ViccDb.MATCH_RANKING
width = 0.15
plot_elements = list()
ind = np.arange(len(groups))
for i, source_count in enumerate(source_counts):
type_counts = sources_by_type[source_count]
x = [type_counts[k] for k in groups]
p = ax.bar(ind + width*(i-2.5), x, width, label=source_count)
plot_elements.append(p)
ax.set_xticks(ind)
ax.set_xticklabels(groups)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, title='Sources')
plt.ylabel('Queries')
plt.show()
# fig.savefig(str(FIGPATH / 'misc_figures' / 'search_performance.pdf'), format='pdf')
# feature_hits = core_vdb.search_by_features(genomic_features=ubiquitous_features)
q = {hit['query'] for hit in feature_hits}
len(q)
c = Counter()
for f in unique_features:
if len(f) > 9 and len(f) < 100:
l = '10-99'
elif len(f) >= 100:
l = '100+'
else:
l = len(f)
c[l] += 1
c
###Output
_____no_output_____
###Markdown
Sequence Ontology
###Code
count = Counter()
for association in core_vdb:
for feature in association.features:
count[feature.so.get('name', 'Uncategorized')] += 1
count.most_common(10)
(5052 + 3263) / sum(count.values())
# Associations lacking any sequence ontology ID
def no_soid(association):
x = [feature.so.get('name', 'Uncategorized') == 'Uncategorized' for feature in association.features]
return all(x)
no_soid_group = core_vdb.select(no_soid)
no_soid_group.report_groups(core_vdb)
# Associations lacking at least one sequence ontology ID
def missing_soid(association):
x = [feature.so.get('name', 'Uncategorized') == 'Uncategorized' for feature in association.features]
return any(x)
missing_soid_group = core_vdb.select(missing_soid)
missing_soid_group.report_groups(core_vdb)
partial_soid_group = missing_soid_group - no_soid_group
partial_soid_group.report_groups(core_vdb)
partial_soid_group[0].features[2].so
###Output
_____no_output_____
###Markdown
Project GENIE feature match
###Code
# Loading Disease graph here for use in GENIE disease matching
import obonet
import networkx
url = 'https://raw.githubusercontent.com/DiseaseOntology/HumanDiseaseOntology/v2018-05-11/src/ontology/HumanDO.obo'
graph = obonet.read_obo(url)
assert networkx.is_directed_acyclic_graph(graph)
so_by_name = {
"3'Flank": {'name': 'downstream_transcript_variant', 'soid': 'SO:0001987'},
"3'UTR": {'name': '3_prime_UTR_variant', 'soid': 'SO:0001624'},
"5'Flank": {'name': 'upstream_transcript_variant', 'soid': 'SO:0001986'},
"5'UTR": {'name': '5_prime_UTR_variant', 'soid': 'SO:0001623'},
"Frame_Shift_Del": {'name': 'frameshift_truncation', 'soid': 'SO:0001910'},
"Frame_Shift_Ins": {'name': 'frameshift_elongation', 'soid': 'SO:0001909'},
"In_Frame_Del": {'name': 'inframe_deletion', 'soid': 'SO:0001822'},
"In_Frame_Ins": {'name': 'inframe_insertion', 'soid': 'SO:0001821'},
"Intron": {'name': 'intron_variant', 'soid': 'SO:0001627'},
"Missense_Mutation": {'name': 'missense_variant', 'soid': 'SO:0001583'},
"Nonsense_Mutation": {'name': 'stop_gained', 'soid': 'SO:0001587'},
"Nonstop_Mutation": {'name': 'stop_lost', 'soid': 'SO:0001578'},
"Silent": {'name': 'synonymous_variant', 'soid': 'SO:0001819'},
"Splice_Region": {'name': 'splice_region_variant', 'soid': 'SO:0001630'},
"Splice_Site": {'name': 'splice_site_variant', 'soid': 'SO:0001629'},
"Translation_Start_Site": {'name': 'initiator_codon_variant', 'soid': 'SO:0001582'}
}
alias_to_doids = defaultdict(list)
def map_to_doid(graph, doid):
for _id in graph.predecessors(doid):
map_to_doid(graph, _id)
xrefs = graph.node[doid].get('xref', [])
for xref in xrefs:
source, xref_id = xref.split(':')
alias_to_doids[(source, xref_id)].append(doid)
map_to_doid(graph, 'DOID:162')
oncotree_to_aliases = dict()
oncotree_types_url = 'http://oncotree.mskcc.org/api/tumorTypes'
resp = requests.get(oncotree_types_url, params={'version': 'oncotree_2018_05_01'})
resp.raise_for_status()
oncotree_types = resp.json()
for o_type in oncotree_types:
oncotree_to_aliases[o_type['code']] = []
for source, terms in o_type['externalReferences'].items():
for term in terms:
oncotree_to_aliases[o_type['code']].append((source, term))
oncotree_to_doids = dict()
for o_term, aliases in oncotree_to_aliases.items():
if not aliases:
oncotree_to_doids[o_term] = None
continue
doids = set()
for alias in aliases:
alias_doids = alias_to_doids.get(alias, False)
if alias_doids:
doids.update(alias_doids)
if doids:
oncotree_to_doids[o_term] = list(doids)
else:
oncotree_to_doids[o_term] = None
patched_doids = {
# 'MAAP': 'DOID:3608',
# 'SCCNOS': 'DOID:1749',
# 'MACR': 'DOID:0050861',
# 'OCSC': 'DOID:0050866',
# 'UDMN': 'DOID:162',
# 'CUP': 'DOID:162',
# 'CUPNOS': 'DOID:162',
# 'MYF': 'DOID:4971',
# 'HGSOC': 'DOID:0050933',
# 'LGSOC': 'DOID:0050933',
# 'SOC': 'DOID:0050933',
# 'PANET': 'DOID:1798',
# 'IMT': 'DOID:0050905',
# 'OPHSC': 'DOID:0050921',
# 'MDS': 'DOID:0050908',
'ACYC': 'DOID:0080202',
'HL': 'DOID:8567',
'SEM': 'DOID:4440'
}
sample_to_patient = dict()
patient_to_samples = defaultdict(list)
sample_to_doid = dict()
sample_oncotree_code = dict()
required_patches = set()
optional_patches = set()
no_patch = set()
with open(DATA_ROOT / 'GENIE_v3' / 'data_clinical_sample_3.0.0.txt') as f:
for _ in range(4):
f.readline() # get past info lines
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
patient = row['PATIENT_ID']
sample = row['SAMPLE_ID']
sample_to_patient[sample] = patient
patient_to_samples[patient].append(sample)
oncotree_code = row['ONCOTREE_CODE'].upper()
doids = oncotree_to_doids[oncotree_code]
if not doids:
required_patches.add(oncotree_code)
doid = patched_doids.get(oncotree_code, None)
elif len(doids) > 1:
optional_patches.add(oncotree_code)
doid = patched_doids[oncotree_code]
else:
no_patch.add(oncotree_code)
doid = doids[0]
sample_to_doid[sample] = doid
sample_oncotree_code[sample] = oncotree_code
patient_to_samples = dict(patient_to_samples)
# Load genie variants
classification_counter = Counter()
genie_features = list()
genie_features_by_patient = defaultdict(list)
genie_features_by_variant = defaultdict(list)
genie_features_by_sample = defaultdict(list)
unfiltered_patients_with_variants = set()
EXCLUDED_CLASSIFICATIONS = [
'Silent',
"3'Flank",
"3'UTR",
"5'Flank",
"5'UTR",
'Intron',
'Splice_Region'
]
with open(DATA_ROOT / 'GENIE_v3' / 'data_mutations_extended_3.0.0.txt', 'r') as maf:
sample_list = maf.readline().strip().split(' ')[1:]
maf_reader = csv.DictReader(maf, delimiter="\t")
for row in maf_reader:
start = row['Start_Position']
end = row['End_Position']
chromosome = row['Chromosome']
patient = sample_to_patient[row['Tumor_Sample_Barcode']]
unfiltered_patients_with_variants.add(patient)
if row['Variant_Classification'] in EXCLUDED_CLASSIFICATIONS:
continue
if row['Reference_Allele'] != row['Tumor_Seq_Allele1']:
alt = row['Tumor_Seq_Allele1']
else:
alt = row['Tumor_Seq_Allele2']
reference = row['NCBI_Build']
feature = viccdb.GenomicFeature(
chromosome=chromosome,
start=start,
end=end,
referenceName=reference,
name=':'.join([row['Tumor_Sample_Barcode'], row['HGVSp_Short']]),
geneSymbol=row['Hugo_Symbol'],
sequence_ontology=so_by_name[row['Variant_Classification']],
alt=alt
)
genie_features.append(feature)
genie_features_by_patient[patient].append(feature)
genie_features_by_variant[(reference, chromosome, start, end, alt)].append(feature)
genie_features_by_sample[row['Tumor_Sample_Barcode']].append(feature)
len(genie_features)
# Do a GENIE feature search across knowledgebase. Huge search operation takes ~5.5 min to complete
from timeit import default_timer
tick = default_timer()
genie_search_results = core_vdb.search_by_features(genie_features)
tock = default_timer()
print(tock-tick)
featured_patients = set(genie_features_by_patient)
print(f'Avg. queries / second: {len(genie_features)/(tock-tick)}')
print(f'Search results: {len(genie_search_results)}')
print(f'Avg. search results / query: {len(genie_search_results)/len(genie_features)}')
genie_feature_lengths = Counter()
for feature in genie_features:
genie_feature_lengths[len(feature)] += 1
result_size = defaultdict(Counter)
for result in genie_search_results:
for match_type in ranking:
if ranking.index(result['best_match']['type']) <= ranking.index(match_type):
result_size[result['query']][match_type] += 1
print('Percentage of queries with results: {:.1%}'.format(len(result_size) / len(genie_features)))
exact_match_features = [x for x in result_size
if result_size[x]['exact'] +
result_size[x]['positional'] > 0]
len(exact_match_features)
print('Percentage of queries with exact results: {:.1%}'.format(len(exact_match_features) / len(genie_features)))
data = defaultdict(list)
for feature in genie_features:
length = len(feature)
for match_type in ranking:
data[match_type].append([length, result_size[feature][match_type]])
colors = dict(zip(ranking, ['blue', 'green', 'orange', 'red']))
pcs = list()
labels = list()
plt.figure()
plt.xscale('log')
plt.yscale('log')
for t in reversed(ranking):
coord_pairs = data[t]
a = np.array(coord_pairs) + 1
pc = plt.scatter(a[:,0], a[:,1], marker='.', color=colors[t], alpha=0.05)
pcs.append(pc)
labels.append(t)
plt.xlabel('Feature Size')
plt.ylabel('Interpretations')
plt.title('Feature v Result Size of GENIE Variants')
plt.legend(pcs, labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout() # drops legend :-/
# plt.savefig(str(FIGPATH / 'feature_v_result_size_genie.pdf'))
# plt.savefig(str(FIGPATH / 'feature_v_result_size_genie.png'))
plt.show()
for t in reversed(ranking):
data = [result_size[q][t] for q in genie_features
if result_size[q][t] > 0
]
plt.hist(data, alpha=1, label=t, color=colors[t], bins=150)
plt.xlabel('Interpretations Matched to Variant Query')
plt.ylabel('Queries')
plt.title('Counts of Interpretations per Variant Query by Search Strategy')
plt.legend()
# plt.savefig(str(FIGPATH / 'genie_interpretation_v_query_all.pdf'))
plt.show()
notables = Counter()
for t in reversed(ranking[:3]):
data = [result_size[q][t] for q in genie_features
if result_size[q][t] > 0
]
for q in genie_features:
if result_size[q][t] > 90:
notables[(q,t)] += 1
plt.hist(data, alpha=1, label=t, color=colors[t], bins=150)
plt.title('Counts of Interpretations per Variant Query by Search Strategy')
plt.xlabel('Interpretations Matched to Variant Query')
plt.ylabel('Queries')
plt.legend()
# plt.savefig(str(FIGPATH / 'genie_interpretation_v_query_no_regional.pdf'))
plt.show()
coord_q = dict()
for notable in notables.most_common(len(notables)):
if notable[1] > 950:
feature = notable[0][0]
t = notable[0][1]
queries = notable[1]
HGVSp = feature.name.split(':')[1]
coord_q[(result_size[feature][t], queries)] = (feature, t)
else:
break
for coord in sorted(coord_q):
feature, t = coord_q[coord]
HGVSp = feature.name.split(':')[1]
queries = coord[1]
print(f'({result_size[feature][t]}, {queries}): {feature.gene_symbol} {HGVSp} ({t})')
###Output
(92, 1425): KRAS p.G12V (exact)
(94, 998): KRAS p.G12C (exact)
(111, 1062): PIK3CA p.E545K (exact)
(113, 1062): PIK3CA p.E545K (positional)
(117, 1062): PIK3CA p.E545K (focal)
(160, 1080): PIK3CA p.H1047R (exact)
(161, 1720): KRAS p.G12D (exact)
(174, 1080): PIK3CA p.H1047R (positional)
(178, 1080): PIK3CA p.H1047R (focal)
(205, 998): KRAS p.G12C (positional)
(308, 1425): KRAS p.G12V (positional)
(308, 1720): KRAS p.G12D (positional)
(343, 998): KRAS p.G12C (focal)
(457, 1425): KRAS p.G12V (focal)
(457, 1720): KRAS p.G12D (focal)
(565, 1439): BRAF p.V600E (exact)
(584, 1439): BRAF p.V600E (positional)
(655, 1439): BRAF p.V600E (focal)
###Markdown
Disease
###Code
oncotree_codes = {x['code'] for x in oncotree_types}
print(f'There are {len(oncotree_codes)} diseases in oncotree.')
no_xrefs = {x for x, v in oncotree_to_aliases.items() if not v}
print(f'Of these, {len(no_xrefs)} have no xrefs.')
print(f'{len(required_patches)} of {len(required_patches | no_patch)} oncotree diseases from GENIE do not automatically map to doid.')
print(f'Of these, {len(required_patches & no_xrefs)} have no xrefs.')
a = np.array([
[len(required_patches & no_xrefs), len(required_patches - no_xrefs)],
[len(oncotree_codes & no_xrefs), len(oncotree_codes - no_xrefs)]
])
print(a)
stats.fisher_exact(a)
c = Counter()
for x in core_vdb:
try:
c[x.disease.source] += 1
except AttributeError:
c[None] += 1
c
doid = core_vdb.select(lambda x: x.disease is not None and x.disease.id.startswith('DOID:'))
# Problem with number of entries from jax reporting "cancer" as doid type
doid.select(lambda x: x.disease.id == 'DOID:162').report_groups(core_vdb)
id_to_name = {id_: data['name'] for id_, data in graph.nodes(data=True)}
c = Counter()
for x in doid:
try:
c[x.disease.id] += 1
except AttributeError:
c[None] += 1
for k, v in c.most_common(20):
print(f'{id_to_name[k]}: {v}')
organ_system_ids = graph.predecessors('DOID:0050686')
benign_and_premalignant_ids = ['DOID:0060072', 'DOID:0060071']
cell_type_cancer_id = 'DOID:0050687'
cancer_id = 'DOID:162'
organ_system = dict()
benign_and_premalignant = dict()
def assign_to_id(ids, id_, d):
if len(ids) == 0:
return
for i in ids:
d[i] = id_
assign_to_id(graph.predecessors(i), id_, d)
return
for organ_id in (organ_system_ids + [cell_type_cancer_id]):
assign_to_id(graph.predecessors(organ_id), organ_id, organ_system)
organ_system[organ_id] = organ_id
for id_ in benign_and_premalignant_ids:
assign_to_id(graph.predecessors(id_), id_, benign_and_premalignant)
benign_and_premalignant[id_] = id_
return_id = organ_system['DOID:0050615']
graph.node[return_id]
normalized_disease = core_vdb.select(lambda x: x.disease is not None)
normalized_disease.report_groups(core_vdb)
do_sourced = normalized_disease.select(lambda x: x.disease.source in ['DOID', 'http://purl.obolibrary.org/obo/doid'])
do_sourced.report_groups(core_vdb)
cancer_organ_interpretations = do_sourced.select(lambda x: organ_system.get(x.disease.id, False))
benign_premalignant_interpretations = do_sourced.select(lambda x: benign_and_premalignant.get(x.disease.id, False))
cancer_interpretations = do_sourced.select(lambda x: x.disease.id == cancer_id)
other_interpretations = do_sourced - cancer_organ_interpretations - benign_premalignant_interpretations - cancer_interpretations
cancer_organ_interpretations.report_groups(do_sourced)
benign_premalignant_interpretations.report_groups(do_sourced)
cancer_interpretations.report_groups(do_sourced)
other_interpretations.report_groups(do_sourced)
x = other_interpretations.select(lambda x: x.disease.id.split(':')[0] != 'DOID')
x.report_groups(do_sourced)
c = Counter()
for i in x:
c[i.disease.id.split(':')[0]] += 1
c
entry = x.by_source('civic')[0]
print(entry.disease.source)
print(entry.disease.id)
print(entry.disease.term)
print(entry['feature_names'])
c = Counter()
for association in cancer_organ_interpretations:
disease_id = association.disease.id
organ_id = organ_system[disease_id]
c[organ_id] += 1
for k, v in c.most_common(13):
print(f'{id_to_name[k]}: {v}')
p_cancer = cancer_organ_interpretations.select(lambda x: id_to_name[organ_system[x.disease.id]] == 'peritoneum cancer')[0]
print(p_cancer.disease)
print(p_cancer.source)
print(p_cancer.description)
len(set(organ_system.values()))
with open(DATA_ROOT / 'TopNodes_DOcancerslim_3_18.json', 'r') as f:
result = json.load(f)
nodes = result['graphs'][0]['nodes']
nodes[0]['id'].split('/')[-1].replace('_', ':')
topnodes_docancerslim = list()
doid_re = re.compile(r'DOID:\d+')
for node in nodes:
doid = node['id'].split('/')[-1].replace('_', ':')
if doid_re.match(doid):
topnodes_docancerslim.append(doid)
len(topnodes_docancerslim)
def assign_to_nearest_id(ids, id_, d, terminals):
if len(ids) == 0:
return
for i in ids:
if i in terminals:
assignment = i
else:
assignment = id_
d[i] = assignment
assign_to_nearest_id(graph.predecessors(i), assignment, d, terminals)
return
topnode_map = dict()
assign_to_nearest_id(['DOID:162'], 'DOID:162', topnode_map, topnodes_docancerslim)
cancer_counts = Counter()
cancer_associations = do_sourced.select(
lambda x: topnode_map.get(x.disease.id, False))
for association in cancer_associations:
disease_id = association.disease.id
topnode_id = topnode_map[disease_id]
cancer_counts[topnode_id] += 1
cancer_associations.report_groups(do_sourced)
other_associations = do_sourced - cancer_associations
other_associations.report_groups(do_sourced)
len(cancer_counts)
for k, v in cancer_counts.most_common(48):
print(f'{id_to_name[k]}: {v}')
def write_disease_counts(file_handle, disease_counter):
writer = csv.writer(file_handle)
s = sum(disease_counter.values())
writer.writerow(['DOID', 'Disease Name', 'Interpretations', 'Percentage'])
for k, v in disease_counter.most_common(len(disease_counter)):
writer.writerow([k, id_to_name[k], v, '{:.2%}'.format(v/s)])
with open('out/interpretation_disease_topnode_counts.csv', 'w') as f:
write_disease_counts(f, cancer_counts)
# Benign
benign_id = 'DOID:0060072'
benign = dict()
assign_to_id(graph.predecessors(benign_id), benign_id, benign)
benign[benign_id] = benign_id
benign_associations = other_associations.select(
lambda x: benign.get(x.disease.id, False))
other_associations = other_associations - benign_associations
benign_associations.report_groups(do_sourced)
# pre-malignant
premalignant_id = 'DOID:0060071'
premalignant = dict()
assign_to_id(graph.predecessors(premalignant_id), premalignant_id, premalignant)
premalignant[premalignant_id] = premalignant_id
premalignant_associations = other_associations.select(
lambda x: premalignant.get(x.disease.id, False))
premalignant_associations.report_groups(do_sourced)
# Make data
common_cancers = list(filter(lambda x: x[0] != "DOID:162", cancer_counts.most_common(6)))
interpretation_group_names=[id_to_name[x[0]] for x in common_cancers] + ['other cancers'] + ['benign', 'other disease']
common_cancer_values = [x[1] for x in common_cancers]
interpretation_group_sizes=common_cancer_values + [sum(cancer_counts.values()) - sum(common_cancer_values),
len(benign_associations),
len(other_associations)]
incidence_by_topnode = Counter()
mortality_by_topnode = Counter()
p = (len(benign_associations) + len(other_associations)) / len(do_sourced)
with open(DATA_ROOT / 'Cancer Incidence and Mortality 2018.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
incidence_by_topnode[topnode_map[row['DOID']]] += int(row['New Cases'])
mortality_by_topnode[topnode_map[row['DOID']]] += int(row['Estimated Deaths'])
def select_by_percent(counter, percent=5):
assert percent <= 100
s = sum(counter.values())
out = Counter()
for k, v in counter.most_common(len(counter)):
if k == "DOID:162":
continue
p = v/s
if p*100 < percent:
break
out[k] = v
print(f'{id_to_name[k]}: {p}')
return out
with open('out/NCI_disease_topnode_counts.csv', 'w') as f:
writer = csv.writer(f)
s1 = sum(incidence_by_topnode.values())
s2 = sum(mortality_by_topnode.values())
writer.writerow(
['DOID', 'Disease Name',
'Estimated New Cases, 2018, US', 'Percentage',
'Estimated Deaths, 2018, US', 'Percentage'
])
for k, v in incidence_by_topnode.most_common(len(incidence_by_topnode)):
v2 = mortality_by_topnode[k]
writer.writerow(
[k, id_to_name[k],
v, '{:.2%}'.format(v/s1),
v2, '{:.2%}'.format(v2/s2)
])
prevalent_incidence = select_by_percent(incidence_by_topnode)
prevalent_mortality = select_by_percent(mortality_by_topnode)
len(incidence_by_topnode)
incidence_group_names = [id_to_name[x] for x in prevalent_incidence] + ['other cancers']
incidence_group_sizes = list(prevalent_incidence.values()) + [
sum(incidence_by_topnode.values()) - sum(prevalent_incidence.values())
]
mortality_group_names = [id_to_name[x] for x in prevalent_mortality] + ['other cancers']
mortality_group_sizes = list(prevalent_mortality.values()) + [
sum(mortality_by_topnode.values()) - sum(prevalent_mortality.values())
]
RADIUS=1
greens = plt.cm.Greens
grays = plt.cm.Greys
blues = plt.cm.Blues
reds = plt.cm.Reds
colors = [
greens(.85),
greens(.7),
greens(.55),
greens(.4),
greens(.25),
greens(.1),
grays(.5),
grays(.25)
]
fig1, ax1 = plt.subplots()
pie, _ = ax1.pie(interpretation_group_sizes, colors=colors, radius=RADIUS,
labels=interpretation_group_names)
ax1.axis('equal')
plt.setp(pie, edgecolor='white')
# plt.savefig(str(FIGPATH / 'disease_interpretations.pdf'))
plt.show()
colors = [
blues(.85),
blues(.75 * 5/6 + .1),
blues(.75 * 4/6 + .1),
blues(.75 * 3/6 + .1),
blues(.75 * 2/6 + .1),
blues(.75 * 1/6 + .1),
blues(.1)
]
fig1, ax1 = plt.subplots()
pie, _ = ax1.pie(incidence_group_sizes, colors=colors, radius=RADIUS,
labels=incidence_group_names)
ax1.axis('equal')
plt.setp(pie, edgecolor='white')
# plt.savefig(str(FIGPATH / 'disease_incidence.pdf'))
plt.show()
colors = [
reds(.85),
reds(.7),
reds(.55),
reds(.4),
reds(.25),
reds(.1)
]
fig1, ax1 = plt.subplots()
pie, _ = ax1.pie(mortality_group_sizes, colors=colors, radius=RADIUS,
labels=mortality_group_names)
ax1.axis('equal')
plt.setp(pie, edgecolor='white')
# plt.savefig(str(FIGPATH / 'disease_mortality.pdf'))
plt.show()
b = benign_associations[0]
print(b.description)
print(b.disease)
print(b.disease.id)
print(b.source)
# MAKE A DISEASE/GENE PLOT: heatmap of ubiquitous gene x disease, heat = tier 1 evidence
###Output
_____no_output_____
###Markdown
Export disease counts for supplementary table
###Code
# d = cancer_associations.get_element_by_source('disease')
# d2 = benign_associations.get_element_by_source('disease')
d = defaultdict(Counter)
d2 = defaultdict(Counter)
d3 = defaultdict(Counter)
for association in cancer_associations:
disease = association.disease
source = association.source
d[source][disease] += 1
for association in benign_associations:
disease = association.disease
source = association.source
d2[source][disease] += 1
for association in other_associations:
disease = association.disease
source = association.source
d3[source][disease] += 1
sorted(d.keys())
diseases = set()
cancer_diseases = set()
benign_diseases = set()
other_diseases = set()
for x in d.values():
cancer_diseases.update(x)
diseases.update(x)
for x in d2.values():
benign_diseases.update(x)
diseases.update(x)
for x in d3.values():
other_diseases.update(x)
diseases.update(x)
###Output
_____no_output_____
###Markdown
with open(FIGPATH / 'Data' / 'disease_counts.csv', 'w') as f: header = ['disease', 'doid', 'TopNode_disease', 'TopNode_doid'] + sorted(d.keys()) writer = csv.DictWriter(f, fieldnames=header) writer.writeheader() counts = dict() for s, v in d.items(): counts[s] = Counter(v) for s, v in d2.items(): counts[s].update(Counter(v)) for s, v in d3.items(): counts[s].update(Counter(v)) for disease in diseases: if disease in cancer_diseases: tn_id = topnode_map[disease.id] tn_name = id_to_name[tn_id] elif disease in benign_diseases: tn_id = benign[disease.id] tn_name = id_to_name[tn_id] elif disease in other_diseases: tn_id = None tn_name = 'other' else: raise ValueError if not disease.id.startswith('DOID'): continue try: row = { 'disease': id_to_name[disease.id], 'doid': disease.id, 'TopNode_disease': tn_name, 'TopNode_doid': tn_id } except KeyError: print(f'Failed to find name for {disease.id}: {disease.name}') continue for s in d: row[s] = counts[s][disease] writer.writerow(row) Clinical actionability improvement
###Code
# Stacked bar (Actionability type):
# Group 1: Average actionability, variant only (SD whiskers?)
# Group 2: Aggregate actionability, variant only (p-value bar?; with narrow search)
# Group 3: Aggregate actionability, variant only (p-value bar?; with broad search)
# Group 4: Average actionability, variant + disease
# Group 5: Aggregate actionability, variant + disease (p-value bar?; with narrow search)
# Group 6: Aggregate actionability, variant + disease (p-value bar?; with broad search)
# Actionability by disease type
from collections import defaultdict
genie_search_results[0]['query']
genie_features_by_patient['GENIE-NKI-01CH']
genie_search_results_by_query = defaultdict(list)
for result in genie_search_results:
genie_search_results_by_query[result['query']].append(result)
###Output
_____no_output_____
###Markdown
Interpretations and Actionability
###Code
tier1_disease_gene = Counter()
all_disease_gene = Counter()
for association in core_vdb:
try:
disease = id_to_name[topnode_map[association.disease.id]]
except (KeyError, AttributeError):
continue
level = association.evidence_level
for gene in association.genes:
k = (gene.gene_symbol, disease)
if level in ['A', 'B']:
tier1_disease_gene[k] += 1
all_disease_gene[k] += 1
(gene, disease), count = tier1_disease_gene.most_common(30)[0]
import seaborn as sns
f, ax = plt.subplots(figsize=(3.75, 1.5))
genes = Counter()
diseases = interpretation_group_names[:5]
i = 0
for (gene, disease), count in all_disease_gene.most_common(len(all_disease_gene)):
i += 1
if disease not in diseases:
continue
genes[gene] += count
genes = [x[0] for x in genes.most_common(15)]
heat_array_all = np.zeros((len(diseases),len(genes)))
for i in range(len(genes)):
for j in range(len(diseases)):
heat_array_all[j][i] = all_disease_gene.get((genes[i], diseases[j]), 0)
sns.heatmap(heat_array_all, xticklabels=genes, yticklabels=diseases, robust=True, cmap='Greens')
# f.savefig(str(FIGPATH / 'misc_figures' / 'all_disease_gene_heatmap.pdf'), format='pdf')
# genes = [x.gene_symbol for x in with_genes.element_by_source_stats('genes')['ubiquitous']]
f, ax = plt.subplots(figsize=(3.75, 1.5))
genes = Counter()
diseases = interpretation_group_names[:5]
i = 0
for (gene, disease), count in tier1_disease_gene.most_common(len(tier1_disease_gene)):
i += 1
if disease not in diseases:
continue
genes[gene] += count
genes = [x[0] for x in genes.most_common(15)]
heat_array_tier1 = np.zeros((len(diseases),len(genes)))
for i in range(len(genes)):
for j in range(len(diseases)):
heat_array_tier1[j][i] = tier1_disease_gene.get((genes[i], diseases[j]), 0)
sns.heatmap(heat_array_tier1, xticklabels=genes, yticklabels=diseases, robust=True, cmap='Greens')
# f.savefig(str(FIGPATH / 'misc_figures' / 'tier1_disease_gene_heatmap.pdf'), format='pdf')
all_df = pd.DataFrame.from_dict(all_disease_gene, orient='index')
tier1_df = pd.DataFrame.from_dict(tier1_disease_gene, orient='index')
merged_df = all_df.merge(tier1_df, left_index=True, right_index=True, how='outer').fillna(0)
merged_df.columns = ['all_interpretations', 'tier1_interpretations']
sns.set_style("ticks")
f, ax = plt.subplots(figsize=(5, 4))
ax = sns.regplot(x='all_interpretations', y='tier1_interpretations', fit_reg=False, data=np.log2(merged_df + 1),
marker='.'
)
tdf = np.log2(merged_df + 1)
tdf.loc[(tdf['all_interpretations'] > 8) & (tdf['tier1_interpretations'] < 4)]
with open(DATA_ROOT / 'GENIE_v3' / 'data_clinical_sample_3.0.0.txt') as f, open(OUTPATH / 'genie_mapping.csv', 'w') as out_f:
for _ in range(4):
f.readline() # get past info lines
reader = csv.DictReader(f, delimiter="\t")
writer = csv.writer(out_f)
header = ['PATIENT_ID',
'SAMPLE_ID',
'ONCOTREE_CODE',
'SPECIFIC_CANCER_TYPE',
'CANCER_TYPE',
'DOID',
'DO_NAME',
'TOPNODE_DOID',
'TOPNODE_NAME'
]
writer.writerow(header)
for row in reader:
patient = row['PATIENT_ID']
sample = row['SAMPLE_ID']
oncotree_code = row['ONCOTREE_CODE'].upper()
cancer_type_detailed = row['CANCER_TYPE_DETAILED']
cancer_type = row['CANCER_TYPE']
doid = sample_to_doid[sample]
do_name = id_to_name.get(doid, None)
topnode_id = topnode_map.get(doid, None)
topnode_name = id_to_name.get(topnode_id, None)
writer.writerow([
patient,
sample,
oncotree_code,
cancer_type_detailed,
cancer_type,
doid,
do_name,
topnode_id,
topnode_name
])
dd_score = dict()
def disease_dist(doid_1, doid_2):
try:
key = tuple(sorted([doid_1, doid_2]))
except TypeError:
return -1
if key in dd_score:
return dd_score[key]
best_score = None
queue = [(doid_1, doid_2, 0, False), (doid_2, doid_1, 0, False)]
while queue:
current, target, distance, topnode_hit = queue.pop()
if current == target:
if best_score is None or distance < best_score:
best_score = distance
else:
if topnode_hit:
distance += 1
topnode_hit = False
if current in topnode_map and current == topnode_map[current]:
topnode_hit = True
try:
for successor in graph.successors(current):
queue.append((successor, target, distance, topnode_hit))
except networkx.NetworkXError:
pass
if best_score is None:
best_score = -1
dd_score[key] = best_score
return best_score
SOURCES = tuple(sorted(core_vdb.sources))
patient_actionability = dict()
disease_actionability_count = Counter()
sample_disease_count = Counter()
disease_feature_count = Counter()
patients_with_topnode = set()
cgi_diseases_with_patient_match = defaultdict(set)
ALL_PATIENT_COUNT = len(patient_to_samples)
for patient in featured_patients:
samples = patient_to_samples[patient]
actionable = np.zeros(40) # [0..5]: sources (exact), 6: combined, 7: deprecated.
# +8 for +disease +16 for +dis/tier1
# +24 for broad variant, +32 for broad disease
for sample in samples:
sample_disease = sample_to_doid[sample]
features = genie_features_by_sample[sample]
evidence_level = None
for feature in features:
results = genie_search_results_by_query.get(feature, [])
for result in results:
best = result['best_match']
dd = disease_dist(result['association'].disease.id, sample_disease)
level = result['association']['association']['evidence_label']
tier1 = level in ['A', 'B']
idx = SOURCES.index(result['association']['source'])
if best['type'] in ['exact', 'positional']:
actionable[idx] = 1
actionable[6] = 1
if result['association'].source == 'cgi' and sample_disease:
cgi_diseases_with_patient_match[patient].add((result['association'].disease.id, sample_disease))
if dd == 0:
actionable[14] = 1
actionable[idx + 8] = 1
if tier1:
actionable[22] = 1
actionable[idx + 16] = 1
if evidence_level is None or level < evidence_level:
evidence_level = level
actionable[idx + 24] = 1
actionable[30] = 1
if dd >= 0:
actionable[idx + 32] = 1
actionable[38] = 1
try:
top_disease = id_to_name[topnode_map[sample_disease]]
sample_disease_count[top_disease] += 1
disease_feature_count[top_disease] += len(features)
except KeyError:
top_disease = None
if evidence_level is not None and top_disease is not None:
disease_actionability_count[(top_disease, evidence_level)] += 1
if top_disease is not None:
patients_with_topnode.add(sample_to_patient[sample])
patient_actionability[patient] = actionable
actionability_grid = np.array(list(patient_actionability.values()))
actionability_sum = actionability_grid.sum(axis=0)
###Output
_____no_output_____
###Markdown
investigating CGI discrepancy
###Code
# len(cgi_diseases_with_patient_match)
patient_disease_match = dict()
for patient, matches in cgi_diseases_with_patient_match.items():
best_score = -1
for cgi_diseases in matches:
score = disease_dist(*cgi_diseases)
if score >= 0:
if best_score == -1:
best_score = score
elif score < best_score:
best_score = score
if best_score < 0:
patient_disease_match[patient] = {'total': 0}
patient_diseases = set([x[1] for x in matches])
for disease in patient_diseases:
patient_disease_match[patient][disease] = Counter()
for cgi_diseases in matches:
patient_disease_match[patient][cgi_diseases[1]][cgi_diseases[0]] += 1
patient_disease_match[patient]['total'] += 1
# for patient in sorted(patient_disease_match, key=lambda x: patient_disease_match[x]['total'], reverse=True):
# print(patient, patient_disease_match[patient]['total'])
disease_counter = Counter()
for patient in patient_disease_match:
for disease in patient_disease_match[patient]:
disease_counter[disease] += 1
disease_counter.most_common(30)
cgi_disease_counter = Counter()
for patient in patient_disease_match:
if 'DOID:3008' in patient_disease_match[patient]:
for cgi_disease in patient_disease_match[patient]['DOID:3008']:
cgi_disease_counter[cgi_disease] += 1
cgi_disease_counter.most_common(30)
###Output
_____no_output_____
###Markdown
disease match analysis
###Code
x = (0, 2, 4, 6, 8, 10) * 4
y = list()
for i in (6, 4, 2, 0):
y.extend((i,) * 6)
levels = ['A','B','C','D']
proportion = np.zeros((4,6))
for i, level in enumerate(levels):
for j, disease in enumerate(diseases):
proportion[i][j] = disease_actionability_count[(disease, level)] / sample_disease_count[disease]
proportion[:,5] = (.5,.25,.1,.01)
f, ax = plt.subplots(figsize=(5, 4))
ax.set_xlim(-1,11)
ax.set_ylim(-1,7)
plt.scatter(x, y, s=proportion*4000)
# f.savefig(str(FIGPATH / 'misc_figures' / 'disease_actionability.pdf'), format='pdf')
diseases
interpretable_disease_counts = np.zeros((4,2))
total_counts = np.zeros(2)
genie_topnode_diseases = set([
x[0] for x in disease_actionability_count
])
for disease in genie_topnode_diseases:
for i, level in enumerate(levels):
if disease in diseases:
j = 0
else:
j = 1
interpretable_disease_counts[i][j] += disease_actionability_count[(disease, level)]
total_counts[j] += sample_disease_count[disease]
test_matrix = np.zeros((2,2))
test_matrix[0,:] = interpretable_disease_counts.sum(axis=0)
test_matrix[1,:] = total_counts - test_matrix[0,:]
print(test_matrix[0,:] / total_counts)
print(test_matrix)
stats.fisher_exact(test_matrix)
interpretable_disease_counts.sum(axis=0)
test_matrix = np.zeros((2,2))
test_matrix[0,:] = interpretable_disease_counts[:2,:].sum(axis=0)
test_matrix[1,:] = total_counts - test_matrix[0,:]
print(test_matrix[0,:] / total_counts)
print(test_matrix)
result = stats.fisher_exact(test_matrix)
print(result)
fig, ax = plt.subplots(figsize=(7.5,3))
counts = actionability_sum
groups = ['Variant', 'Variant + Disease', 'Variant + Disease + Tier I']
subs = SOURCES + ('aggregate',)
width = 0.15
plot_elements = list()
resource_ind = np.arange(3)
for i, source in enumerate(subs):
x = np.array([actionability_sum[i + 24], actionability_sum[i + 32], 0]) / ALL_PATIENT_COUNT
ind = np.array((i, i+12, i+24)) * width
p = ax.bar(ind, x, width, label=source, color='black')
plot_elements.append(p)
for i, source in enumerate(subs):
x = np.array([actionability_sum[i], actionability_sum[i + 8], actionability_sum[i + 16]]) / ALL_PATIENT_COUNT
ind = np.array((i, i+12, i+24)) * width
p = ax.bar(ind, x, width, label=source)
plot_elements.append(p)
ax.set_xticks(ind - 3*width)
# ax.set_xticklabels(groups, rotation='vertical')
handles, labels = ax.get_legend_handles_labels()
half_idx = len(labels) // 2
ax.legend(handles[half_idx:], labels[half_idx:], title='Search Type')
plt.ylabel('% Cohort with Interpretations')
plt.show()
# fig.savefig(str(FIGPATH / 'misc_figures' / 'genie_actionability.pdf'), format='pdf')
(actionability_sum / ALL_PATIENT_COUNT).reshape((5,8))
# Average individual KB variant matching, exact searching
(actionability_sum[:6] / ALL_PATIENT_COUNT).mean()
# Average individual KB variant+disease matching, exact searching
(actionability_sum[8:14] / ALL_PATIENT_COUNT).mean()
# Average individual KB variant+disease+tier I matching, exact searching
(actionability_sum[16:22] / ALL_PATIENT_COUNT).mean()
# Average individual KB variant matching, broad searching
(actionability_sum[24:30] / ALL_PATIENT_COUNT).mean()
# Average individual KB variant+disease matching, broad searching
(actionability_sum[32:38] / ALL_PATIENT_COUNT).mean()
c = defaultdict(Counter)
for a in core_vdb:
c[a.source][a.evidence_level] += 1
c
for source, counts in c.items():
tier1 = (counts['A'] + counts['B']) / sum(counts.values())
print("{}: Tier 1 is {:.1%} of total.".format(source, tier1))
s = len(patients_with_topnode)
s / ALL_PATIENT_COUNT
len(sample_oncotree_code)
code_patient_count = Counter()
for patient, samples in patient_to_samples.items():
patient_codes = set()
for sample in samples:
patient_codes.add(sample_oncotree_code[sample])
for code in patient_codes:
code_patient_count[code] += 1
patient_count = len(patient_to_samples)
for code, count in code_patient_count.most_common():
f = count / patient_count
print('{}: {:.1%}, {}'.format(code, f, count))
cancer_rank = {pair[0]: rank for rank, pair in enumerate(
code_patient_count.most_common()
) if pair[0] not in patched_doids} # Exclude manual patching for analysis
mapped = list()
unmapped = list()
for code, rank in cancer_rank.items():
doids = oncotree_to_doids.get(code, False)
if doids:
mapped.append(rank)
else:
unmapped.append(rank)
print(mapped)
print(unmapped)
print(len(mapped) / (len(unmapped) + len(mapped)))
stats.mannwhitneyu(mapped, unmapped, alternative='two-sided')
cancer_rank
len(unmapped)
np.mean(mapped)
np.mean(unmapped)
###Output
_____no_output_____
###Markdown
Interpretation gene intersection search
###Code
a = core_vdb[0]
f = genie_features[0]
e_level = defaultdict(dict)
gene_diseases = defaultdict(lambda: defaultdict(set))
for association in core_vdb:
source = association.source
for gene in association.genes:
try:
disease = association.disease.id
except AttributeError:
continue
key = (gene.symbol, disease)
gene_diseases[source][gene.symbol].add(disease)
gene_diseases['aggregate'][gene.symbol].add(disease)
current = e_level[source].get(key, None)
new = association.evidence_level
if current is None or new < current:
e_level[source][key] = new
current = e_level['aggregate'].get(key, None)
if current is None or new < current:
e_level['aggregate'][key] = new
patient_gene_actionability = dict()
for patient in featured_patients:
samples = patient_to_samples[patient]
actionable = np.zeros(24) # [0..5]: sources (exact), 6: combined, 7: deprecated.
# +8 for +disease +16 for +dis/tier1
aggregate_gene_diseases = gene_diseases['aggregate']
aggregate_e_level = e_level['aggregate']
for sample in samples:
sample_disease = sample_to_doid[sample]
features = genie_features_by_sample[sample]
evidence_level = None
for feature in features:
feature_gene = feature.gene_symbol
if feature_gene not in aggregate_gene_diseases:
continue
actionable[6] = 1
for interpretation_disease in aggregate_gene_diseases[feature_gene]:
if disease_dist(interpretation_disease, sample_disease) >= 0:
actionable[14] = 1
if aggregate_e_level[(feature_gene, interpretation_disease)] in ['A', 'B']:
actionable[22] = 1
break
for i, source in enumerate(SOURCES):
source_e_level = e_level[source]
source_gene_diseases = gene_diseases[source]
if feature_gene not in source_gene_diseases:
continue
actionable[i] = 1
if not actionable[14]:
continue
for interpretation_disease in source_gene_diseases[feature_gene]:
if disease_dist(interpretation_disease, sample_disease) >= 0:
actionable[i + 8] = 1
if source_e_level[(feature_gene, interpretation_disease)] in ['A', 'B']:
actionable[i + 16] = 1
break
patient_gene_actionability[patient] = actionable
gene_actionability_grid = np.array(list(patient_gene_actionability.values()))
gene_actionability_sum = gene_actionability_grid.sum(axis=0)
(gene_actionability_sum / ALL_PATIENT_COUNT).reshape((3,8))
fig, ax = plt.subplots(figsize=(7.5,3))
counts = gene_actionability_sum
groups = ['Variant', 'Variant + Disease', 'Variant + Disease + Tier I']
subs = SOURCES + ('aggregate',)
width = 0.15
plot_elements = list()
resource_ind = np.arange(3)
# for i, source in enumerate(subs):
# x = np.array([gene_actionability_sum[i + 24], gene_actionability_sum[i + 32], 0]) / gene_actionability_grid.shape[0]
# ind = np.array((i, i+12, i+24)) * width
# p = ax.bar(ind, x, width, label=source, color='black')
# plot_elements.append(p)
for i, source in enumerate(subs):
x = np.array([gene_actionability_sum[i], gene_actionability_sum[i + 8], gene_actionability_sum[i + 16]]) / ALL_PATIENT_COUNT
ind = np.array((i, i+12, i+24)) * width
p = ax.bar(ind, x, width, label=source)
plot_elements.append(p)
ax.set_xticks(ind - 3*width)
# ax.set_xticklabels(groups, rotation='vertical')
handles, labels = ax.get_legend_handles_labels()
# half_idx = len(labels) // 2
half_idx = 0
lgd = ax.legend(handles[half_idx:], labels[half_idx:], title='Search Type',
bbox_to_anchor=(1,1)
)
plt.ylabel('% Cohort with Interpretations')
plt.show()
# fig.savefig(str(FIGPATH / 'misc_figures' / 'genie_gene_actionability.pdf'), format='pdf',
# bbox_extra_artists=(lgd, ), bbox_inches='tight'
# )
###Output
_____no_output_____
###Markdown
Quantitative value of harmonization Genes
###Code
raw_genes = list()
###Output
_____no_output_____
###Markdown
CGI
###Code
def create_raw_cgi_genes(vdb):
cgi = vdb.by_source('cgi')
genes = list()
for interpretation in cgi:
for gene in interpretation['raw']['Gene'].split(';'):
genes.append(gene)
return genes
cgi_genes = create_raw_cgi_genes(core_vdb)
raw_genes.extend(set(cgi_genes))
###Output
_____no_output_____
###Markdown
CIViC
###Code
def create_raw_civic_genes(vdb):
civic = vdb.by_source('civic')
genes = list()
for interpretation in civic:
genes.append(interpretation['raw']['entrez_name'])
return genes
civic_genes = create_raw_civic_genes(core_vdb)
raw_genes.extend(set(civic_genes))
###Output
_____no_output_____
###Markdown
JAX-CKB
###Code
def create_raw_jax_genes(vdb):
jax = vdb.by_source('jax')
genes = list()
for interpretation in jax:
for gene in interpretation['genes']:
genes.append(gene.strip())
return genes
jax_genes = create_raw_jax_genes(core_vdb)
raw_genes.extend(set(jax_genes))
###Output
_____no_output_____
###Markdown
MolecularMatch
###Code
def create_raw_molecularmatch_genes(vdb):
mm = vdb.by_source('molecularmatch')
genes = list()
for interpretation in mm:
genes2 = interpretation['raw'].get('includeGene0', None)
if not genes2:
genes2 = interpretation['raw'].get('includeGene1', None)
if genes2:
genes.extend(genes2)
return genes
mm_genes = create_raw_molecularmatch_genes(core_vdb)
raw_genes.extend(set(mm_genes))
###Output
_____no_output_____
###Markdown
OncoKB
###Code
def create_raw_okb_genes(vdb):
okb = vdb.by_source('oncokb')
genes = list()
for interpretation in okb:
gene = interpretation['raw']['clinical']['gene']
genes.append(gene)
return genes
okb_genes = create_raw_okb_genes(core_vdb)
raw_genes.extend(set(okb_genes))
###Output
_____no_output_____
###Markdown
PMKB
###Code
def create_raw_pmkb_genes(vdb):
pmkb = vdb.by_source('pmkb')
genes = list()
for interpretation in pmkb:
gene = interpretation['raw']['variant']['gene']['name']
genes.append(gene)
return genes
pmkb_genes = create_raw_pmkb_genes(core_vdb)
raw_genes.extend(set(pmkb_genes))
###Output
_____no_output_____
###Markdown
Harmonized genes
###Code
h_genes = list()
for source in core_vdb.sources:
unique_source_genes = set()
for interpretation in core_vdb.by_source(source):
for gene in interpretation.genes:
unique_source_genes.add(gene.symbol)
print(f'{source}: {len(unique_source_genes)}')
h_genes.extend(unique_source_genes)
###Output
molecularmatch: 109
civic: 296
pmkb: 42
oncokb: 44
jax: 107
cgi: 182
###Markdown
Comparison
###Code
raw_start = len(raw_genes)
raw_uniq = len(set(raw_genes))
h_start = len(h_genes)
h_uniq = len(set(h_genes))
1 - (h_uniq / h_start)
1 - (raw_uniq / raw_start)
###Output
_____no_output_____
###Markdown
Variants
###Code
raw_features = list()
###Output
_____no_output_____
###Markdown
CGI
###Code
hgvs_re = re.compile(r'(.*):g.(\d+)(\w+)>(\w+)')
def hgvs_to_coords(hgvs):
match = hgvs_re.match(hgvs)
if not match:
return None
groups = match.groups()
return (str(groups[0]), int(groups[1]), int(groups[1]) + len(groups[2]) - 1)
def create_raw_cgi_features(vdb):
cgi = vdb.by_source('cgi')
features = list()
for interpretation in cgi:
for f in interpretation['raw']['gDNA']:
if not f:
continue
coords = hgvs_to_coords(f)
if coords:
features.append(coords)
return features
cgi_features = create_raw_cgi_features(core_vdb)
len(set(cgi_features))
raw_features.extend(set(cgi_features))
###Output
_____no_output_____
###Markdown
CIViC
###Code
def create_raw_civic_features(vdb):
civic = vdb.by_source('civic')
features = list()
harmonized_features = list()
for interpretation in civic:
coordinates = interpretation['raw']['coordinates']
gf = (
coordinates['chromosome'],
coordinates['start'],
coordinates['stop'],
# coordinates['variant_bases']
)
if not all(gf[:3]):
continue
features.append(gf)
return features
civic_features = create_raw_civic_features(core_vdb)
len(set(civic_features))
raw_features.extend(set(civic_features))
###Output
_____no_output_____
###Markdown
JAX-CKB There is nothing to do for this resource. Without harmonization or inference routines, cannot ascribe variant names to coordinates. MolecularMatch
###Code
def create_raw_molecularmatch_features(vdb):
features = list()
mm = vdb.by_source('molecularmatch')
for interpretation in mm:
for mutation in interpretation['raw']['mutations']:
try:
coords = mutation['GRCh37_location'][0] # Take first coord only
except IndexError:
continue
try:
start = int(coords['start'])
stop = int(coords['stop'])
except TypeError:
continue
assert start <= stop
chromosome = coords['chr']
alt = coords['alt']
if not all([chromosome, start, stop]):
continue
f = (chromosome, start, stop)
features.append(f)
return features
mm_features = create_raw_molecularmatch_features(core_vdb)
len(set(mm_features))
raw_features.extend(set(mm_features))
###Output
_____no_output_____
###Markdown
OncoKB All mutations at protein level. PMKB
###Code
def create_raw_pmkb_features(vdb):
features = list()
pmkb = vdb.by_source('pmkb')
for interpretation in pmkb:
coordinates = interpretation['raw']['variant']['coordinates']
for coordinate in coordinates.split(', '):
chromosome, r = coordinate.split(':')
start, stop = r.split('-')
features.append((chromosome, int(start), int(stop)))
return features
pmkb_features = create_raw_pmkb_features(core_vdb)
len(set(pmkb_features))
raw_features.extend(set(pmkb_features))
###Output
_____no_output_____
###Markdown
Harmonized features
###Code
h_features = list()
for source in core_vdb.sources:
unique_source_features = set([x[0] for x in core_vdb.by_source(source).features])
print(f'{source}: {len(unique_source_features)}')
h_features.extend(unique_source_features)
len(h_features)
###Output
_____no_output_____
###Markdown
Comparison
###Code
raw_start = len(raw_features)
raw_uniq = len(set(raw_features))
h_start = len(h_features)
h_uniq = len(set(h_features))
1 - (h_uniq / h_start)
1 - (raw_uniq / raw_start)
###Output
_____no_output_____
###Markdown
Diseases
###Code
raw_diseases = list()
###Output
_____no_output_____
###Markdown
CGI
###Code
def create_raw_cgi_diseases(vdb):
cgi = vdb.by_source('cgi')
diseases = list()
for interpretation in cgi:
for d in interpretation['raw']['Primary Tumor type'].split(';'):
diseases.append(d.lower())
return diseases
cgi_diseases = create_raw_cgi_diseases(core_vdb)
raw_diseases.extend(set(cgi_diseases))
###Output
_____no_output_____
###Markdown
CIViC
###Code
def create_raw_civic_diseases(vdb):
civic = vdb.by_source('civic')
diseases = list()
for interpretation in civic:
disease = interpretation['raw']['evidence_items'][0]['disease']['display_name']
diseases.append(disease.lower())
return diseases
civic_diseases = create_raw_civic_diseases(core_vdb)
raw_diseases.extend(set(civic_diseases))
###Output
_____no_output_____
###Markdown
JAX-CKB
###Code
def create_raw_jax_diseases(vdb):
jax = vdb.by_source('jax')
diseases = list()
for interpretation in jax:
disease = interpretation['raw']['indication']['name']
diseases.append(disease.lower())
return diseases
jax_diseases = create_raw_jax_diseases(core_vdb)
raw_diseases.extend(set(jax_diseases))
###Output
_____no_output_____
###Markdown
MolecularMatch
###Code
def create_raw_mm_diseases(vdb):
mm = vdb.by_source('molecularmatch')
diseases = list()
for interpretation in mm:
disease = interpretation['raw']['includeCondition1'][0]
diseases.append(disease.lower())
return diseases
mm_diseases = create_raw_mm_diseases(core_vdb)
raw_diseases.extend(set(mm_diseases))
###Output
_____no_output_____
###Markdown
OncoKB
###Code
def create_raw_okb_diseases(vdb):
okb = vdb.by_source('oncokb')
diseases = list()
for interpretation in okb:
disease = interpretation['raw']['clinical']['cancerType']
diseases.append(disease.lower())
return diseases
okb_diseases = create_raw_okb_diseases(core_vdb)
raw_diseases.extend(set(okb_diseases))
###Output
_____no_output_____
###Markdown
PMKB
###Code
def create_raw_pmkb_diseases(vdb):
pmkb = vdb.by_source('pmkb')
diseases = list()
for interpretation in pmkb:
for tissue in interpretation['raw']['tissues']:
disease = ' '.join([tissue['name'], interpretation['raw']['tumor']['name']])
diseases.append(disease.lower())
return diseases
pmkb_diseases = create_raw_pmkb_diseases(core_vdb)
raw_diseases.extend(set(pmkb_diseases))
###Output
_____no_output_____
###Markdown
Harmonized diseases
###Code
h_diseases = list()
for source in core_vdb.sources:
unique_source_diseases = set([x.disease.term for x in core_vdb.by_source(source) if x.disease])
print(f'{source}: {len(unique_source_diseases)}')
h_diseases.extend(unique_source_diseases)
len(h_diseases)
###Output
_____no_output_____
###Markdown
Comparison
###Code
raw_start = len(raw_diseases)
raw_uniq = len(set(raw_diseases))
h_start = len(h_diseases)
h_uniq = len(set(h_diseases))
1 - (h_uniq / h_start)
1 - (raw_uniq / raw_start)
###Output
_____no_output_____
###Markdown
Drugs
###Code
raw_drugs = list()
###Output
_____no_output_____
###Markdown
CGI
###Code
def create_raw_cgi_drugs(vdb):
cgi = vdb.by_source('cgi')
drugs = list()
for interpretation in cgi:
for drug in interpretation['raw']['Drug full name'].split('+'):
drugs.append(drug.strip().lower())
return drugs
cgi_drugs = create_raw_cgi_drugs(core_vdb)
raw_drugs.extend(set(cgi_drugs))
###Output
_____no_output_____
###Markdown
CIViC
###Code
def create_raw_civic_drugs(vdb):
civic = vdb.by_source('civic')
drugs = list()
for interpretation in civic:
for drug in interpretation['raw']['evidence_items'][0]['drugs']:
drugs.append(drug['name'].lower())
return drugs
civic_drugs = create_raw_civic_drugs(core_vdb)
raw_drugs.extend(set(civic_drugs))
###Output
_____no_output_____
###Markdown
JAX-CKB
###Code
def create_raw_jax_drugs(vdb):
jax = vdb.by_source('jax')
drugs = list()
for interpretation in jax:
for drug in interpretation['raw']['therapy']['therapyName'].split('+'):
drugs.append(drug.strip().lower())
return drugs
jax_drugs = create_raw_jax_drugs(core_vdb)
raw_drugs.extend(set(jax_drugs))
###Output
_____no_output_____
###Markdown
MolecularMatch
###Code
def create_raw_mm_drugs(vdb):
mm = vdb.by_source('molecularmatch')
drugs = list()
for interpretation in mm:
try:
for drug in interpretation['raw']['includeDrug1']:
for drug2 in drug.split('+'):
drugs.append(drug2.strip().lower())
except KeyError:
continue
return drugs
mm_drugs = create_raw_mm_drugs(core_vdb)
raw_drugs.extend(set(mm_drugs))
###Output
_____no_output_____
###Markdown
OncoKB
###Code
def create_raw_okb_drugs(vdb):
okb = vdb.by_source('oncokb')
drugs = list()
for interpretation in okb:
for drug in interpretation['raw']['clinical']['drug'].split(','):
for drug2 in drug.strip().split('+'):
drugs.append(drug2.strip().lower())
return drugs
okb_drugs = create_raw_okb_drugs(core_vdb)
raw_drugs.extend(set(okb_drugs))
###Output
_____no_output_____
###Markdown
PMKB PMKB does not provide drug fields Harmonized drugs
###Code
h_drugs = list()
for source in core_vdb.sources:
unique_source_drugs = set()
for interpretation in core_vdb.by_source(source):
for drug in interpretation.drugs:
unique_source_drugs.add(drug.term)
print(f'{source}: {len(unique_source_drugs)}')
h_drugs.extend(unique_source_drugs)
###Output
molecularmatch: 110
civic: 313
pmkb: 0
oncokb: 77
jax: 542
cgi: 200
###Markdown
Comparison
###Code
raw_start = len(raw_drugs)
raw_uniq = len(set(raw_drugs))
h_start = len(h_drugs)
h_uniq = len(set(h_drugs))
1 - (h_uniq / h_start)
1 - (raw_uniq / raw_start)
Counter([x['raw']['clinicalSignificance'] for x in core_vdb.by_source('molecularmatch')])
Counter([x['raw']['clinicalSignificance'] for x in vdb.by_source('molecularmatch')])
###Output
_____no_output_____
###Markdown
Export for Somatic Reference Sample project
###Code
import csv
with open('out/srs_export.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow([
'chromosome',
'start',
'stop',
'ref',
'alt',
'feature_label',
'evidence_level',
'drugs',
'disease_context',
'disease_id',
'interpretation_source',
'pmids',
])
for a in core_vdb:
pmids = '|'.join([str(x.pmid) for x in a.publications if x.pmid])
drugs = '|'.join([str(x) for x in a.drugs if x])
if a.evidence_level not in ['A', 'B']:
continue
for drug in a.drugs:
assert '|' not in str(drug)
for feature in a.features:
try:
feature_name = feature.name
except AttributeError:
feature_name = ''
try:
disease_name = a.disease.name
except AttributeError:
disease_name = a['association']['phenotype'].get('description', '')
try:
disease_id = a.disease.id
except AttributeError:
continue
out = [feature.chromosome, feature.start, feature.end,
feature.ref, feature.alt, feature_name,
a.evidence_level, drugs,
disease_name, disease_id, a.source, pmids]
if not (feature.ref and feature.alt):
continue
writer.writerow(out)
###Output
_____no_output_____
###Markdown
Troubleshooting
###Code
x = core_vdb[0]
f = x.features[0]
x.disease.id
len(unfiltered_patients_with_variants)
len(unfiltered_patients_with_variants) / len(patient_to_samples)
brca.report_groups()
###Output
0 total associations
###Markdown
Analyze the experimental results and generate a report Authors* Juan Carlos Alfaro Jiménez* Juan Ángel Aledo Sánchez* José Antonio Gámez MartínIn this notebook, we analyze the experimental results and generate a report (`HTML` format). Below, we detail the steps. 1. ArgumentsFirst, we add the command line arguments:
###Code
library(argparser)
description <- "Analysis of experimental results and report generation."
parser <- arg_parser(description)
###Output
_____no_output_____
###Markdown
* The path to the tables:
###Code
arg <- "--source"
default <- "tables"
help <- "Path to the tables"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The path to the rendered file:
###Code
arg <- "--destination"
default <- "reports"
help <- "Path to the rendered file"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The name of the target output variable:
###Code
arg <- "--output"
default <- "test_score"
help <- "Name of the target output variable"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The optimization strategy of the target output variable:
###Code
arg <- "--rank"
default <- "max"
help <- "Optimization strategy of the target output variable"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The number of decimal digits for the numeric output
###Code
arg <- "--digits"
default <- 3
help <- "Number of decimal digits for the numeric output"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The short title for the document:
###Code
arg <- "--title"
help <- "Short title for the document"
default <- "Report"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The significance level used for the testing procedure
###Code
arg <- "--alpha"
default <- 0.05
help <- "Significance level used for the testing procedure"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The subset of methods to filter:
###Code
arg <- "--methods"
default <- ".*"
help <- "Subset of methods to filter"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
* The subset of problems to filter:
###Code
arg <- "--problems"
default <- ".*"
help <- "Subset of problems to filter"
parser <- add_argument(parser, arg, help, default = default)
###Output
_____no_output_____
###Markdown
Now, we parse the command line arguments:
###Code
argv <- readLines(con = "arguments.txt", n = 1)
argv <- strsplit(argv, split = " ")
argv <- parse_args(parser, argv = argv)
###Output
_____no_output_____
###Markdown
And rename the variables:
###Code
source <- argv$source
source
destination <- argv$destination
destination
output <- argv$output
output
rank <- argv$rank
rank
digits <- argv$digits
digits
title <- argv$title
title
alpha <- argv$alpha
alpha
methods <- argv$methods
methods
problems <- argv$problems
problems
###Output
_____no_output_____
###Markdown
2. LoadSecond, we get the file with the table (`source`):
###Code
source <- file.path("work", source, output, "mean.csv")
data <- read.csv(source, row.names = 1, check.names = FALSE)
###Output
_____no_output_____
###Markdown
And include a column with the `methods`:
###Code
data <- cbind(rownames(data), data)
colnames(data)[1] <- "method"
###Output
_____no_output_____
###Markdown
Then, we initialize the report document (`title`) and create the experiment object from a tabular representation:
###Code
library(exreport)
report <- exreport(title)
experiment <- expCreateFromTable(data, output, title)
###Output
_____no_output_____
###Markdown
Finally, we filter the methods (`methods`) and datasets (`datasets`):
###Code
library(stringr)
rows <- rownames(data)
cols <- colnames(data)
methods <- str_subset(rows, methods)
problems <- str_subset(cols, problems)
subset <- list(method = methods, problem = problems)
experiment <- expSubset(experiment, subset)
experiment <- expInstantiate(experiment)
###Output
_____no_output_____
###Markdown
3. Analyze Third, we summarize the experiment with a table and a plot for the given target output variable (`output`) according with the optimization strategy (`rank`) and number of decimal digits (`digits`):
###Code
tabular_exp_summary <- tabularExpSummary(experiment, output, rank, digits = digits)
plot_exp_summary <- plotExpSummary(experiment, output, columns = 5, freeScale = TRUE)
report <- exreportAdd(report, tabular_exp_summary)
report <- exreportAdd(report, plot_exp_summary)
###Output
_____no_output_____
###Markdown
Now, we perform a multiple comparison statistical test for the given experiment. In particular, we apply a *Friedman test* and a *post-hoc test* with the *Shaffer procedure*:
###Code
test_multiple_pairwise <- testMultiplePairwise(experiment, output, rank, alpha)
tabular_test_pairwise <- tabularTestPairwise(test_multiple_pairwise)
report <- exreportAdd(report, test_multiple_pairwise)
report <- exreportAdd(report, tabular_test_pairwise)
###Output
_____no_output_____
###Markdown
And the *Holm procedure*:
###Code
metrics <- c("rank", "pvalue", "wtl")
test_multiple_control <- testMultipleControl(experiment, output, rank, alpha)
tabular_test_summary <- tabularTestSummary(test_multiple_control, metrics)
plot_rank_distribution <- plotRankDistribution(test_multiple_control)
report <- exreportAdd(report, test_multiple_control)
report <- exreportAdd(report, tabular_test_summary)
report <- exreportAdd(report, plot_rank_distribution)
###Output
_____no_output_____
###Markdown
4. Generate Fourth, we generate the report (`destination`):
###Code
destination <- file.path("work", destination, output)
dir.create(destination, showWarnings = FALSE, recursive = TRUE)
exreportRender(report, destination, target = "html", safeMode = FALSE, visualize = FALSE)
###Output
_____no_output_____
###Markdown
Finally, we write the destination directory in a file for the `HTML` export:
###Code
writeLines(destination, "destination.txt")
###Output
_____no_output_____
###Markdown
Setup
###Code
import pandas as pd
import numpy as np
import regex as re
###Output
_____no_output_____
###Markdown
Load and transform data
###Code
df_original = pd.read_csv('data.csv')
df_original.info()
df = df_original.rename({
"Message Id": "msg_id", "Time": "time", "Sender Name": "sender",
"Reply Id": "reply_id", "Message": "msg"
}, axis=1)
df.reply_id = df.reply_id.fillna(-1).astype(np.int64)
df.time = df.time.astype(np.datetime64)
df.set_index('msg_id', inplace=True)
df.msg.fillna('', inplace=True)
###Output
_____no_output_____
###Markdown
Anonymize
###Code
def gen_names_map(names):
new_name = iter(["Michael","Christopher","Jessica","Matthew","Ashley","Jennifer","Joshua","Amanda","Daniel","David","James","Robert","John","Joseph","Andrew","Ryan","Brandon","Jason","Justin","Sarah","William","Jonathan","Stephanie","Brian","Nicole","Nicholas","Anthony","Heather","Eric","Elizabeth","Adam","Megan","Melissa","Kevin","Steven","Thomas","Timothy","Christina","Kyle","Rachel","Laura","Lauren","Amber","Brittany","Danielle","Richard","Kimberly","Jeffrey","Amy","Crystal","Michelle","Tiffany","Jeremy","Benjamin","Mark","Emily","Aaron","Charles","Rebecca","Jacob","Stephen","Patrick","Sean","Erin","Zachary","Jamie","Kelly","Samantha","Nathan","Sara","Dustin","Paul","Angela","Tyler","Scott","Katherine","Andrea","Gregory","Erica","Mary","Travis","Lisa","Kenneth","Bryan","Lindsey","Kristen","Jose","Alexander","Jesse","Katie","Lindsay","Shannon","Vanessa","Courtney","Christine","Alicia","Cody","Allison","Bradley","Samuel","Shawn","April","Derek","Kathryn","Kristin","Chad","Jenna","Tara","Maria","Krystal","Jared","Anna","Edward","Julie","Peter","Holly","Marcus","Kristina","Natalie","Jordan","Victoria","Jacqueline","Corey","Keith","Monica","Juan","Donald","Cassandra","Meghan","Joel","Shane","Phillip","Patricia","Brett","Ronald","Catherine","George","Antonio","Cynthia","Stacy","Kathleen","Raymond","Carlos","Brandi","Douglas","Nathaniel","Ian","Craig","Brandy","Alex","Valerie","Veronica","Cory","Whitney","Gary","Derrick","Philip","Luis","Diana","Chelsea","Leslie","Caitlin","Leah","Natasha","Erika","Casey","Latoya","Erik","Dana","Victor","Brent","Dominique","Frank","Brittney","Evan","Gabriel","Julia","Candice","Karen","Melanie","Adrian","Stacey","Margaret","Sheena","Wesley","Vincent","Alexandra","Katrina","Bethany","Nichole","Larry","Jeffery","Curtis","Carrie","Todd","Blake","Christian","Randy","Dennis","Alison","Trevor","Seth","Kara","Joanna","Rachael","Luke","Felicia","Brooke","Austin","Candace","Jasmine","Jesus","Alan","Susan","Sandra","Tracy","Kayla","Nancy","Tina","Krystle","Russell","Jeremiah","Carl","Miguel","Tony","Alexis","Gina","Jillian","Pamela","Mitchell","Hannah","Renee","Denise","Molly","Jerry","Misty","Mario","Johnathan","Jaclyn","Brenda","Terry","Lacey","Shaun","Devin","Heidi","Troy","Lucas","Desiree","Jorge","Andre","Morgan","Drew","Sabrina","Miranda","Alyssa","Alisha","Teresa","Johnny","Meagan","Allen","Krista","Marc","Tabitha","Lance","Ricardo","Martin","Chase","Theresa","Melinda","Monique","Tanya","Linda","Kristopher","Bobby","Caleb","Ashlee","Kelli","Henry","Garrett","Mallory","Jill","Jonathon","Kristy","Anne","Francisco","Danny","Robin","Lee","Tamara","Manuel","Meredith","Colleen","Lawrence","Christy","Ricky","Randall","Marissa","Ross","Mathew","Jimmy"])
result = {}
for name in names:
result[name] = next(new_name)
return result
names_map = gen_names_map(df.sender.unique())
df.sender.replace(names_map, inplace=True)
df.msg.replace(names_map, inplace=True)
df.msg = df.msg.str.replace(r'(?<!\w@)\b(?<=@)(\w+)(?<!bot)\b', flags=re.I,
repl=lambda m: names_map[m[0]] if m[0] in names_map else re.sub('.', '*', m[0]))
df.msg = df.msg.str.replace(r'((?<![\d\=\-_]|(?<!\\)[A-z])(?:\(?\+?55\)?)? ?(?:\(?0?[2-9]\d\)?)? ?(?:9[ \.]?)?[1-9]\d{3}[ \-]?\d{4}\b)', flags=re.I,
repl=lambda m: re.sub('\d', '*', m[0]))
df.info()
df.head()
df.to_csv('out.csv')
###Output
_____no_output_____
###Markdown
...
###Code
msg_cnt = df.sender.value_counts()
df = df[~df.sender.isin(msg_cnt[msg_cnt == 1].index)]
reply_df = df[df.reply_id != -1]
replied_df = df[df.index.isin(reply_df.reply_id)]
links_df = df[df.msg.str.contains(r'https?://[\w\-\.]+')]
has_emoji_df = df[df.msg.str.match(r"[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F1E0-\U0001F1FF]")]
members_df = pd.DataFrame(index=df.sender.unique())
members_df['reply_percent'] = (
reply_df.sender.value_counts() / df.sender.value_counts()
).fillna(0).apply(
lambda p: '< 1%' if p < 0.01 else '>= 1%')
members_df.reply_percent.value_counts()
members_df['replied_percent'] = (
replied_df.sender.value_counts() / df.sender.value_counts()
).fillna(0).apply(
lambda p: '< 10%' if p < 0.10 else '>= 10%')
members_df.replied_percent.value_counts()
members_df['link_percent'] = (
links_df.sender.value_counts() / df.sender.value_counts()
).fillna(0).apply(
lambda p: '< 5%' if p < 0.05 else '>= 5%')
# Removing the links so that it will not affect the size of the messages.
df.msg = df.msg.str.replace(r'https?://.+', '')
members_df.link_percent.value_counts()
df['msg_size'] = df.msg.str.len()
members_df['msg_mean_size'] = df.groupby('sender').msg_size.mean().apply(
lambda s: '< 10 characters' if s < 10 else '< 200 characters' if s < 200 else '>= 200 characters')
members_df.msg_mean_size.value_counts()
members_df['msg_cnt'] = df.sender.value_counts().apply(
lambda c: '< 5' if c < 5 else '>= 5')
members_df.msg_cnt.value_counts()
members_df['uses_emoji'] = (has_emoji_df.sender.value_counts() / df.sender.value_counts()).apply(
lambda p: 'Yes' if p > 0 else "No")
members_df.uses_emoji.value_counts()
import plotly.graph_objects as go
# Create dimensions
msg_cnt_dim = go.parcats.Dimension(
values=members_df.msg_cnt,label="Number of messages"
)
msg_mean_size_dim = go.parcats.Dimension(
values=members_df.msg_mean_size,label="Messages size"
)
replied_percent_dim = go.parcats.Dimension(
values=members_df.replied_percent,label="Percent of replied messages"
)
reply_percent_dim = go.parcats.Dimension(
values=members_df.reply_percent,label="Percent of reply messages"
)
link_percent_dim = go.parcats.Dimension(
values=members_df.link_percent,label="Percent of messages with hyperlinks"
)
uses_emoji_dim = go.parcats.Dimension(
values=members_df.uses_emoji,
categoryorder='category ascending', label="Uses emoji"
)
group_one = np.int32(members_df.uses_emoji == 'Yes')
group_two = ((members_df.msg_cnt == '< 5') &
(members_df.replied_percent == '< 10%') &
(members_df.reply_percent == '< 1%'))
color = np.int32(group_one) + np.int32(group_two)*2
colorscale = [[0, 'lightsteelblue'], [0.5, 'mediumseagreen'], [1, 'lightsalmon']];
fig = go.Figure(data = [
go.Parcats(
dimensions=[msg_cnt_dim, replied_percent_dim,
reply_percent_dim, uses_emoji_dim],
line={'color': color, 'colorscale': colorscale},
labelfont={'size': 18, 'family': 'Times'},
tickfont={'size': 16, 'family': 'Times'},
arrangement='freeform')])
fig.layout = {
'title': ('The relation between the use of emotes and the type of interaction' +
' over a Telegram Group')
}
fig.show()
group_one = ((members_df.msg_cnt == '>= 5') &
(members_df.replied_percent == '>= 10%') &
(members_df.reply_percent == '>= 1%'))
group_two = ((members_df.msg_cnt == '< 5') &
(members_df.replied_percent == '< 10%') &
(members_df.reply_percent == '< 1%'))
color = np.int32(group_one) + np.int32(group_two)*2
colorscale = [[0, 'lightsteelblue'], [0.5, 'peru'], [1, 'lightsalmon']];
fig = go.Figure(data = [
go.Parcats(
dimensions=[msg_cnt_dim, replied_percent_dim,
reply_percent_dim],
line={'color': color, 'colorscale': colorscale},
labelfont={'size': 18, 'family': 'Times'},
tickfont={'size': 16, 'family': 'Times'},
arrangement='freeform'
)])
fig.layout = {
'title': ('The relation between the proportion of replies and the replied' +
' messages over a Telegram Group')
}
fig.show()
fig = go.Figure(data = [
go.Parcats(
dimensions=[msg_cnt_dim, replied_percent_dim,
reply_percent_dim, uses_emoji_dim, link_percent_dim, msg_mean_size_dim],
labelfont={'size': 18, 'family': 'Times'},
tickfont={'size': 16, 'family': 'Times'},
arrangement='freeform')])
fig.layout = {
'title': ('The interaction' +
' over a Telegram Group')
}
fig.show()
###Output
_____no_output_____
###Markdown
Sample Analysis
Coy Zimmermann
Last updated 8/6/2021
The purpose of this notebook is to run you through a typical analysis of data generated from link.py. Use this as a starting point to retrieve the data you need for your purposes.
I really like [seaborn](https://seaborn.pydata.org/index.html) for plotting data in Python. If there are any plots you want to make, look through the documentation!
###Code
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
from scipy import signal, fftpack
from scipy.stats import linregress
import glob
import cv2
import trackpy as tp
# Plot defaults
mpl.rcParams['svg.fonttype'] = 'none'
mpl.rcParams['font.sans-serif'] = "Arial"
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['figure.figsize'] = 10, 8
sns.set_context('talk', font_scale=1.3)
df = pd.read_csv('linked_results/210806_1141AM.csv')
df.drop(labels=['Unnamed: 0'], inplace=True, axis=1) # drop unused column
###Output
_____no_output_____
###Markdown
Alright, what's our data look like that we generated from link.py? `df.head()` will display the first 5 columns of our dataframe.
###Code
df.head()
###Output
_____no_output_____
###Markdown
Description of columns* `Area` - area of particle in pix^2 for each point in time* `x`, `y` - location in pixels. the origin is the top left corner of the video.* `XM`, `YM` - location in µm.* `Major`, `Minor` - major and minor axes of the fit ellipse. taken directly from the ImageJ particle analysis. [More information](https://imagej.nih.gov/ij/docs/menus/analyze.html)* `frame` - frame number* `particle` - particle number *in the given video** `dx`, `dy` - centered difference derivative of position, velocity in pix/frame* `time` - time in seconds.* `dv` - magnitude of the velocity in pix/frame, $\sqrt{dx^2 + dy^2}$* `dv_m` - """ but in µm/s* `Area_m` - area in microns^2* `dx_m` - horizontal velocity in µm/s* `filename` - name of the video* `particle_u` - identifier for a wheel that is a combination of the `filename` and `particle`. Allows analysis across all videos. Time Series Plots
###Code
# Example plot, plot velocity over time for each particle
# sns.lineplot(data=df, x='time', y='dv_m', hue='particle_u', legend=False)
# ax = plt.gca()
# # ax.set_xlim(left=0)
# ax.set_ylim(bottom=0)
# ax.set(xlabel='Time (s)', ylabel='Velocity (µm/s)')
###Output
_____no_output_____
###Markdown
Checking trajectoriesIts often useful to look at the trajectories overlaid over a frame of the video to see if its working correctly or tracking dust. `video_wheels.py` will generate an entire video, but will take awhile on a slow computer. This next cell will just show a single frame of the video.
###Code
def show_trajectories(VIDEO_NAME, FRAME):
"""
Given the filename of the video and a valid frame, display a frame in the video with trajectories overlaid and annotated.
"""
# video always indexes at 0, it is unaware of any clipping in imagej.
df_check = df[df['filename'] == VIDEO_NAME]
f_start, f_end = [df_check['frame'].unique().min(), df_check['frame'].unique().max()]
chosen_frame_img = FRAME - f_start
vid_path = f'original_video/{VIDEO_NAME}/*.tif'
img_names = glob.glob(vid_path)
loaded_img = cv2.imread(img_names[chosen_frame_img])
fig, ax = plt.subplots()
plt.imshow(loaded_img)
tp.plot_traj(df_check.query(f'frame<={FRAME}'), label=True)
return fig
#fig = show_trajectories(VIDEO_NAME='05182021_2', FRAME=481)
###Output
_____no_output_____
###Markdown
FFTSo above you can see that the `df` dataframe we generated from link.py is time series data for every particle. You might notice there is some periodicity in the data.. we can *sometimes* use this to extract a good guess using the finite fourier transform. See `ffttest.ipynb` for the functions and tests for using the FFT to get the rotation rate of a µwheel.The next cell contains the functions to do this. I really encourage you to walk through this code and try to understand what's going on. This is sometimes tricky and may require some debugging. For example, midway through the function you could ask it to save or return a plot of the actual spectrum, like what is shown in `ffttest.ipynb`.Also, I've had most success using the `Angle` of the fit ellipse to get a reliable rotation rate. I've found that one period of the `Angle` corresponds to one *half rotation*. However, this may not work for you. One of the arguments to the function is `y`, which is the column of the data it will take the FFT with respect to.
###Code
def compute_fft(data, y, particle_u):
"""Compute the finite fourier transform of the chosen particle.
Compute the fft using scipy. Use the a detrended column to determine the frequency at which the
eccentricity of the wheel changes. I used this video for a background on the discrete fourier transform.
https://www.youtube.com/watch?v=mkGsMWi_j4Q
Args:
data: dataframe
y: column to take the fft with respect to
particle_u: particle to take the fft of
Returns:
Dataframe column containing the fft of y
"""
data = data[data['particle_u'] == particle_u] # pull only the data matching input `particle_u`
num_frames = int(len(data)) # the number of frames, or sample points
# sample spacing
spf = data['time'].values[1] - data['time'].values[0] # seconds per frame
y = data[y]
detrend_y = signal.detrend(y) # Detrend
fft_particle = fftpack.fft(detrend_y) # Compute fft.
t_fi = pd.DataFrame() # Initialize empty dataframe to store xf and yf columns
# Actually calculating the FFT (formulas from examples and youtube video)
t_fi['yf'] = 2.0 / num_frames * np.abs(fft_particle[:num_frames // 2])
# 'np.abs' computes the magnitude of the complex number. fft_particle[:N//2] selects the first N/2 points.
# Its multiplied by 2 to account for the mirrored half of the spectrum, then normalized by N.
t_fi['xf'] = np.linspace(start=0.0, stop=1.0 / (2.0 * spf),
num=num_frames // 2) # The double divison sign is integer division.
t_fi['particle'] = np.full(len(t_fi), particle_u)
highest_peak_frequency = t_fi.loc[t_fi['yf'].idxmax(), 'xf'] # pulls out the frequency that corresponds to the highest peak
return highest_peak_frequency
def batch_fft(data, data_grouped):
"""
For each particle in the `data` dataframe, run the above compute_fft function, which outputs the frequency which corresponds to the highest peak.
Take this peak for each particle and combine it with a grouped dataframe.
"""
temp = []
for p in data['particle_u'].unique(): # run compute_fft for every particle in the input data
peak = compute_fft(data=data, y='Angle', particle_u=p)
temp.append([p, peak])
# Assemble it all into a dataframe that has a column for particle_u and a column for the frequency guess.
dfpeaks = pd.DataFrame(temp)
dfpeaks.rename(columns={0: 'particle_u', 1:'freq_guess'}, inplace=True)
dfpeaks.set_index('particle_u', inplace=True)
# Merge with a grouped dataframe. I.E. the dataframe only contains one entry for each particle_u. Most often the mean of each particle over time.
dfmerge = data_grouped.merge(dfpeaks, on='particle_u')
dfmerge['omega'] = dfmerge['freq_guess'] * np.pi
return dfmerge
###Output
_____no_output_____
###Markdown
Selecting WheelsUsing the excel file `chosenwheels.xls`, enter particles that you either want to blacklist or whitelist. Specify which type you'd like to use with the `filter_type` keyword argument (kwarg) when calling the function.
###Code
dfilter = pd.read_excel('chosenwheels.xls')
def filterwheels(a, b, filter_type="whitelist"):
"""
Filter dataframe `a` using columns from the filter dataframe `b`.
Based on `filter_type`
"""
if filter_type == "whitelist":
# Only take particle_u's
df1 = a[a.particle_u.isin(b.particle_u)]
elif filter_type == "blacklist":
# Take everything BUT particle_u's
df1 = a[~a.particle_u.isin(b.particle_u)]
return df1
else:
raise Exception("Invalid filter type. filter_type must be whitelist or blacklist.")
# Only take the frames specified if not NaN
# TODO Something in here is removing some of the particles I want
res = pd.DataFrame()
for p in df1.particle_u.unique():
df_sub = df1[df1.particle_u == p] # create subset dataframe of df1 with only particle_u `p`'s data
chosen_frames = b[b.particle_u == p]['frames'].values[0] # extract the `frames` column value from the filter dataframe
# Extract the chosen frames for the given particle_u `p`
if str(chosen_frames) != "nan": # if there's an entry
first, last = chosen_frames.split(':') # look at the range
# convert wildcard ! to the first or last frame
if first == "!":
first = df_sub['frame'].min()
else:
first = int(first)
if last == "!":
last = df_sub['frame'].max()
else:
last = int(last)
else: # take all frames
first = df_sub['frame'].min()
last = df_sub['frame'].max()
# Execute the filtering
clipped = df_sub[df_sub.frame.isin(np.arange(first, last+1))] # last+1 because arange does not include the last value
if len(clipped) < 100:
print(f"Warning: Particle {p} with chosen_frames {first} and {last} is {len(clipped)} frames.")
res = pd.concat([res, clipped]) # add to result
return res
df_filtered = filterwheels(df, dfilter, filter_type="blacklist")
# df_filtered['particle_u'].unique()
###Output
_____no_output_____
###Markdown
Grouped DataSince time series data is a bit messy, we can take each trajectory and take the mean of columns of interest. We expect that the mean of all of the velocity data is the steady-state velocity.
###Code
df_grouped = df_filtered.groupby('particle_u')[['dx_m', 'Area_m', 'dv_m', 'Major', 'Minor']].mean()
df_grouped.head()
df_grouped.describe()
###Output
_____no_output_____
###Markdown
Now, using the above FFT functions, we can feed in the time series data in `df` as well as our new grouped dataframe to get a new column for our frequency guess (1/s) and calculated `omega` (radians/s).
###Code
dfg = batch_fft(df_filtered, df_grouped)
dfg['filename'] = dfg.index.str.split('-').str[0]
dfg.head()
###Output
_____no_output_____
###Markdown
Lastly, we can add a few columns that are useful for plotting.
###Code
CAMBER_ANGLE = 30 # degrees
dfg['area_flat'] = dfg['Area_m'] / np.cos(np.radians(CAMBER_ANGLE))
dfg['R'] = np.sqrt(dfg['area_flat'] / np.pi)
dfg.describe()
###Output
_____no_output_____
###Markdown
Plotting
Here, we plot the data. Seaborn has a variety of features to allow for great plots. Here I'll walk you through a typical process for plotting according to an experimental condition.
The basic process is using a scatterplot command like:
```
sns.scatterplot(data=dfg, x='R', y='dv_m', edgecolor='k', hue='filename', alpha=0.7)
```
By using the `hue` parameter, we can show a third variable in our scatterplot. Here I'll name the column I'd like to use and Seaborn will do the rest.
See this tutorial (https://pandas.pydata.org/docs/getting_started/intro_tutorials/10_text_data.html) to figure out how to extract experimental conditions from the filename into a new column.
###Code
sns.scatterplot(data=dfg, x='R', y='dv_m', edgecolor='k', hue='filename', alpha=0.7)
ax = plt.gca()
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
ax.set(xlabel="R (µm)", ylabel="Velocity (µm/s)")
# uncomment this to save the figure
# plt.savefig('plotnamehere.png', dpi=400)
sns.histplot(data=dfg, x='dv_m', kde='True', stat='density', common_norm=False)
ax = plt.gca()
ax.set_xlim(left=0)
ax.set(xlabel='Mean µWheel Velocity (µm)')
sns.scatterplot(data=dfg, x='R', y='omega')
ax = plt.gca()
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
ax.set(xlabel="R (µm)", ylabel="ω (1/s)")
###Output
_____no_output_____
###Markdown
Generate new data
###Code
%%bash
data_dir="./data/clean/wd50k"
cp -r ${data_dir}/statements ${data_dir}/statements_switch
data_dir = "./data/clean/wd50k/statements_switch"
modify.aug_switch(data_dir + "/train.txt", data_dir + "/train_switch.txt")
%%bash
data_dir="./data/clean/wd50k/statements_switch"
mv ${data_dir}/train_switch.txt ${data_dir}/train.txt
a=torch.randn(5)
print(a)
print(torch.argsort(a, descending=True))
torch.argsort(torch.argsort(a, descending=True))
###Output
tensor([-0.2562, -0.5812, -1.1616, -0.9717, -0.4676])
tensor([0, 4, 1, 3, 2])
###Markdown
Solving the [8-puzzle](https://8-puzzle.readthedocs.io/en/latest/) (with a slight twist) Heuristic search vs uninformed search, admissibility, performance tests, empirical results, and some charts! Fun stuff ahead Instead of only being able to slide tiles vertically and horizontally (with a cost of 1), when the empty tile is at a corner, the tiles on the same row (or on the same column) can wrap around (if you have more than two rows) with a cost of 2. Similarly the diagonal tiles (both the adjacent one and the opposing corner) can slide into an empty corner with a cost of 3. We use the number 0 to represent the empty tile.
###Code
# you need to install these in your env to run this notebook
import numpy as np
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
import copy
# local modules, no installs needed
from board import Board
from node import Node
import search
from heuristics import hamming_distance, manhattan_distance, row_col_out_of_place, euclidean_distance, permutation_inversion
###Output
_____no_output_____
###Markdown
We have implemented a bunch of heuristics, but we don't know which are the best for our problem. We know from theory that hamming distance and manhattan distance are both [admissible](https://en.wikipedia.org/wiki/Admissible_heuristic) and that permutation inversion is not. But we came up with two new heuristics and would like to know about their admissibility.1. Rows and columns out of place: Is the sum of all tiles that are out of their goal row position and all tiles that are out of their goal column position. For example, if a tile is out of both row and column position it adds 2 to the running sum of the heuristic, if it's out of row XOR column it adds 1 to the sum and if the tile is in its goal position it add 0 to the heuristic sum. We expect it to be admissible because when conceptually compared with hamming and manhattan distance, this heuristic seems to be more optimistic than manhattan distance but slightly less optimistic than hamming distance. It s more optimistic than manhattan because it essentially three states: either in place, out of place along one axis or out of place along two axis. Manhattan will assign more add more estimated cost to tiles that are further away from their goal position, making it less optimistic than this heuristic.2. Euclidean distance: Is the sum of the euclidean distances between each tile and it's goal position. Euclidean distance should be more optimistic than manhattan distance for the following reason: given any two coordinates on the board manhattan distance calculates the number of steps that the tile has to move but the tile is restricted to moves that are parallel to the x-axis or the y-axis. if you were to take any two tiles and draw the euclidean and manhattan distance between them, you would get a right triangle where the euclidean distance draws the hypothenuse and the manhattan distance draws the opposite and adjacent sides. The hypothenuse is always smaller than the sum of the two other sides, which is why we expect euclidean distance to be admissible. In order to validate our intuition, we will generate 100 random [8-puzzles](https://8-puzzle.readthedocs.io/en/latest/) in 2x4 and 3x3 format and run algorithms A* and Greedy Best First Search using these heuristics. Of course to verify whether we get the shortest cost solution path to those puzzle we will also run Uniform Cost Search to obtain the shortest paths with certainty.
###Code
HEURISTICS = [hamming_distance, manhattan_distance, row_col_out_of_place, euclidean_distance, permutation_inversion]
NUMBER_PUZZLES = 50
two_by_two = [np.random.permutation(8).reshape(2,4) for _ in range(NUMBER_PUZZLES)]
three_by_three = [np.random.permutation(9).reshape(3,3) for _ in range(NUMBER_PUZZLES)]
print(f"Here are the {NUMBER_PUZZLES} random 2x4 puzzles")
for puzzle in two_by_two:
print(puzzle)
print(f"Here are the {NUMBER_PUZZLES} random 3x3 puzzles")
for puzzle in three_by_three:
print(puzzle)
def build_experiment_object():
'''Build's the structure that will run the experiment and hold results'''
experiment = {}
for func in HEURISTICS:
experiment[func.__name__] = {
'func': func,
'algos': {
'GBF': {
'func': search.greedy_best_first,
'shape': {
(2, 4): {'results': []},
(3, 3): {'results': []}
}
},
'A*': {
'func': search.a_star,
'shape': {
(2, 4): {'results': []},
(3, 3): {'results': []}
}
}
},
}
return experiment
experiment = build_experiment_object()
def run_experiment(experiment: dict, puzzles: list):
for heurist_name in tqdm(experiment):
heuristic_func = experiment[heurist_name]['func']
for algo_name in experiment[heurist_name]['algos']:
algo_func = experiment[heurist_name]['algos'][algo_name]['func']
for puzzle in puzzles:
b = Board(puzzle)
result: dict = algo_func(board=b, H=heuristic_func)
experiment[heurist_name]['algos'][algo_name]['shape'][puzzle.shape]['results'].append(result)
return experiment
###Output
_____no_output_____
###Markdown
Let's Run A* and Greedy Best First search on the 50 2x4 puzzles
###Code
print('\nRunning 2 search algorithms on 50 puzzles, 5 different times to test the 5 heuristics:')
start = time.time()
run_experiment(experiment,two_by_two)
elapsed = round(time.time()-start, 2)
print(f'\n\nTotal of 2x50x5 = {2*50*5} puzzles solved in {elapsed} seconds')
###Output
0%| | 0/5 [00:00<?, ?it/s]
Running 2 search algorithms on 50 puzzles, 5 different times to test the 5 heuristics:
100%|██████████| 5/5 [01:21<00:00, 16.29s/it]
Total of 2x50x5 = 500 puzzles solved in 81.45 seconds
###Markdown
Imports for the Analysis
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import scipy
import stats
###Output
_____no_output_____
###Markdown
Reading in the Data
###Code
# DataFrame for Crime Data Between 1996-2007
df1 = pd.read_csv('seattle_data/seattle-crime-stats-by-1990-census-tract-1996-2007.csv')
# DataFrame for Crime Data from 2008-Present
df2 = pd.read_csv('seattle_data/seattle-crime-stats-by-police-precinct-2008-present.csv')
###Output
_____no_output_____
###Markdown
What kinds of information does each set of years tell us?To do this, let's take a look through some of the rows in the DataFrames.
###Code
df1.head()
df2.loc[3:20]
df2.loc[21:41]
###Output
_____no_output_____
###Markdown
Purpose of the AnalysisThe goal of this exploratory data analysis is to understand trends in crime for the City of Seattle over roughly the past 24 years. The outcome of asking and seeking this answers about this data will hopefully help lead us to make suggestions for how urban communities in Seattle and elsewhere can improve. Question 1: How has the amount of crime in Seattle changed over the years 1996-Present?
###Code
def count_annual_crimes(df):
"""Return a dict of all the years in a DataFrame, with the total number of crimes that occured in that year."""
# figure out which df we have, and which years we have data for in the df
if 'Report_Year' in df.columns:
years = df['Report_Year'].unique()
else:
reported_years = pd.Series([int(date[:4]) for date in df['REPORT_DATE']])
years = sorted(reported_years.unique())
# iterate over rows of dataset, and count up the total crimes in each year
year_crimes = dict()
# separate algorithms for one df over the other
# condition for 1996-2007
if 'Report_Year' in df.columns:
for index, year in enumerate(years):
# grab all the rows with this year, and sum crime numbers
crimes_ls = (df.loc[df['Report_Year'] == year])['Report_Year_Total'].dropna()
# print(crimes_ls)
crimes = sum([int(crime) for crime in crimes_ls])
year_crimes[year] = crimes
# condition for 2008-present
else:
for year in years:
crimes = len([date for date in df['REPORT_DATE'].dropna().values if str(year) in date])
year_crimes[year] = crimes
return year_crimes
annual_crimes_1996 = count_annual_crimes(df1)
annual_crimes_2008 = count_annual_crimes(df2)
# combine the two dictionaries
annual_crime_counts = annual_crimes_1996.copy()
annual_crime_counts.update(annual_crimes_2008)
###Output
_____no_output_____
###Markdown
Line Graph Representation
###Code
years, annual_crimes = (
list(annual_crime_counts.keys()),
list(annual_crime_counts.values())
)
plt.title("Annual Crimes in Seattle, 1996-2014")
plt.xlabel('Year')
plt.ylabel('Crimes')
plt.plot(years, annual_crimes)
plt.show()
###Output
_____no_output_____
###Markdown
Question 2: What Kinds of Crime Were Most Prevalent over 1996-2007? 2008-2014?
###Code
def make_pie_chart(x, labels, title):
"""Creates a pie chart to represent the data, x.
Depends on Matplotlib.
Parameters:
x(list of numbers): data to plot
labels(list of str): must be same length as x,
and index position of each str corresponds
with whichever group in x that it labels
title(str): descriptive name for the plot as a whole
Returns: None
"""
plt.pie(x, labels=labels, autopct='%1.1f%%')
plt.title(title)
plt.show()
# Store variables for column names
ct, reported = 'Crime_Type', 'Report_Year_Total'
# For each DataFrame, we can determine all the different crime types
crime_types_96 = df1[ct].unique()
# For 2008-2014, count amounts of different crime types
crime_counts_by_type = list()
for crime in crime_types_96:
counts = df1.loc[df1[ct] == crime, [reported]].values
counts = counts.reshape(1, -1)[0]
crime_count = sum(counts)
crime_counts_by_type.append(crime_count)
# Pie Chart Representation
make_pie_chart(crime_counts_by_type, crime_types_96,
"Types of Crime in Seattle, 1996-2007")
# Find the Crime Types for 2008-2014
crime_types_08 = df2[ct.upper()].unique()
# Create a Pie Chart for the different crime types
crime_counts_by_type = list()
for crime in crime_types_08:
crime_count = df2[ct.upper()].value_counts()[crime]
crime_counts_by_type.append(crime_count)
# Pie Chart Representation
make_pie_chart(crime_counts_by_type, crime_types_08,
"Types of Crime in Seattle, 2008-2014")
###Output
_____no_output_____
###Markdown
Interesting! So it looks like all the crimes levelled off in Seattle to equivalent amounts. But will this equality also hold out for different regions of the city. Question 3: Which precincts of the city saw the most crime in Seattle, from 2008-2014?
###Code
df2['Precinct'].hist()
plt.title("Distribution of Crime in Seattle Precincts, 2008-2014")
plt.show()
###Output
_____no_output_____
###Markdown
Okay, so the police may need to focus on some areas more than others. How should they best prepare for the criminals in each precinct of Seattle? Question 4: Which Crime Type Was the Most Common, for each Precinct between the years 2008-2014?
###Code
# store each precinct with its most common crime type
precincts_most_common = dict()
# iterate over the city precincts
precincts = df2['Precinct'].unique()
for precinct in precincts:
# find the most common
crimes_in_precinct = df2[df2['Precinct'] == precinct]['CRIME_TYPE'].value_counts()
precincts_most_common[precinct] = crimes_in_precinct
# Print the results!
# print(precincts_most_common.items())
print('For the years 2008-2014:\n')
for p in precincts:
conclusion = precincts_most_common[p]
print(f'Here is the breakdown of crime types in precinct {p}: \n{conclusion}.\n')
###Output
For the years 2008-2014:
Here is the breakdown of crime types in precinct SE:
Motor Vehicle Theft 684
Rape 684
Assault 684
Robbery 684
Burglary 684
Larceny-Theft 684
Homicide 684
Name: CRIME_TYPE, dtype: int64.
Here is the breakdown of crime types in precinct W:
Assault 913
Robbery 913
Burglary 912
Motor Vehicle Theft 912
Rape 912
Larceny-Theft 911
Homicide 911
Name: CRIME_TYPE, dtype: int64.
Here is the breakdown of crime types in precinct E:
Motor Vehicle Theft 684
Rape 684
Assault 684
Robbery 684
Burglary 684
Larceny-Theft 684
Homicide 684
Name: CRIME_TYPE, dtype: int64.
Here is the breakdown of crime types in precinct SW:
Motor Vehicle Theft 456
Rape 456
Assault 456
Robbery 456
Burglary 456
Larceny-Theft 456
Homicide 456
Name: CRIME_TYPE, dtype: int64.
Here is the breakdown of crime types in precinct N:
Burglary 1139
Motor Vehicle Theft 1139
Rape 1139
Assault 1139
Robbery 1139
Larceny-Theft 1139
Homicide 1139
Name: CRIME_TYPE, dtype: int64.
###Markdown
get all settlements links
###Code
driver.get('https://www.gov.il/he/departments/news/?OfficeId=104cb0f4-d65a-4692-b590-94af928c19c0&limit=10&topic=3ef9cac8-a1a9-4352-91d4-860efd3b720d&subTopic=626a30f9-8b50-495a-9b9f-e4ce4b433ca5')
settlements_url_list_of_lists = []
while True:
series = pd.Series(driver.page_source.split(' '))
settlements_url_list = series[series.str.contains('departments/news/') & ~series.str.contains('\?')].apply(lambda x: x[6:-1]).tolist()
print('number of settlements:', len(settlements_url_list))
settlements_url_list_of_lists.append(settlements_url_list)
element = driver.find_elements_by_xpath("//div[contains(@class, 'button-gov blue xs-pl-5 xs-pr-5')]")[1]
if element.is_displayed():
element.click()
time.sleep(0.5)
else:
break
all_settlements_list = list(itertools.chain.from_iterable(settlements_url_list_of_lists))
print(len(all_settlements_list))
temp_settlements_series = pd.Series(all_settlements_list)
all_settlements_series = pd.Series(temp_settlements_series.unique())
display(all_settlements_series.shape)
display(all_settlements_series.head())
all_settlements_csv_path = './all_settlements_links.csv'
all_settlements_series.to_csv(all_settlements_csv_path, index=False, header=False)
# all_settlements_series = pd.read_csv(all_settlements_csv_path, header=None)[0]
display(all_settlements_series.head())
display(all_settlements_series.shape)
###Output
_____no_output_____
###Markdown
get settlements datums
###Code
def get_settlement_data(driver):
main_xpath = '//div[@class=\'margin-for-ul txt dark-gray-txt lg-mb-30 tbl-accesabilty tbl-responsive sub-links-permanent-underline\']//'
xpath = f'{main_xpath}h3 | {main_xpath}h2 | {main_xpath}p | {main_xpath}li'
elements_list = driver.find_elements_by_xpath(xpath)
datum_list = [element.text for element in elements_list]
b = pd.Series(datum_list)
datum_clean_list = b[:b[b.str.contains('הנחיות לציבור')].index[0]].tolist()
return datum_clean_list
all_settlements_dict = {}
for index, settlement_link in enumerate(all_settlements_series):
if not re.search('[a-zA-Z]', settlement_link.split('/')[-1]):
continue
driver.get(settlement_link)
time.sleep(0.25)
clear_output(wait=True)
settlement_datum_list = get_settlement_data(driver)
all_settlements_dict[settlement_link] = {'title': driver.title, 'datum': settlement_datum_list}
print(f'{index + 1} / {len(all_settlements_series)}: {settlement_link}')
print(all_settlements_dict[settlement_link])
json_path = 'all_settlements_dict.json'
with open(json_path, 'w') as fp:
json.dump(all_settlements_dict, fp)
# with open(json_path, 'r') as fp:
# all_settlements_dict = json.load(fp)
###Output
_____no_output_____
###Markdown
extract incident date
###Code
def get_update_date_if_update(row):
if 'עדכון' in re.findall('עדכון|עודכן', row['raw']):
return get_date_from_string(row['raw'])
else:
return None
def get_date_from_string(string):
date_strings_list = re.findall('\d{1,2}\.\d{1,2}\.\d{1,4}|\d{1,2}/\d{1,2}/\d\d', string)
if date_strings_list:
if date_strings_list[0] == '30.30.2020':
date_strings_list[0] = '30.3.2020'
# print(string, date_strings_list[0])
date = pd.to_datetime(date_strings_list[0], dayfirst=True)
else:
date_strings_list_2 = re.findall('\d{1,2}\.\d{1,2}', string)
if date_strings_list_2:
if date_strings_list_2[0] == '24.32':
date_strings_list_2[0] = '24.3'
date = pd.to_datetime(date_strings_list_2[0] + '.20', dayfirst=True)
else:
date = None
return date
def get_settlement_df(settlement_link, settlement_dict):
settlement_name = " ".join(settlement_dict['title'].split()[:-9])
df = pd.Series(settlement_dict['datum']).to_frame('raw')
df['update_date_temp'] = df.apply(get_update_date_if_update, axis=1)
df['update_date'] = df['update_date_temp'].ffill()
clean_df = df[~df['update_date_temp'].notna()][['raw', 'update_date']].dropna(subset=['update_date'])
clean_df['incident_day'] = clean_df['raw'].apply(get_date_from_string)
clean_df['settlement_name'] = settlement_name
clean_df['settlement_link'] = settlement_link
return clean_df
temp_df = [get_settlement_df(key, value) for key, value in all_settlements_dict.items()]
incidents_df = pd.concat(temp_df).reset_index(drop=True)
incidents_df.head()
all_settlements_links_series = pd.Series(list(all_settlements_dict.keys()))
all_settlements_links_series.head()
all_settlements_links_set = set(all_settlements_links_series)
incidents_settlement_links_set = set(incidents_df['settlement_link'])
all_settlements_links_set.difference(incidents_settlement_links_set)
incidents_df['incident_day'].describe()
incidents_df[incidents_df['incident_day'] == '2020-04-29']
incidents_df[incidents_df['incident_day'] == '2020-07-04']
incidents_csv_path = './incidents.csv'
incidents_df.to_csv(incidents_csv_path, index=False)
# incidents_df = pd.read_csv(incidents_csv_path, parse_dates=['update_date', 'incident_day']).dropna()
incidents_df.head()
###Output
_____no_output_____
###Markdown
statistics
###Code
incidents_stat_df = incidents_df.groupby('incident_day').size().to_frame('size').reset_index()
incidents_clean_stat_df = incidents_stat_df[(incidents_stat_df['incident_day'] >= '2020-03-01') & (incidents_stat_df['incident_day'] <= '2020-05-02')]
incidents_clean_stat_df.tail(10)
%matplotlib notebook
plt.rcParams['figure.figsize'] = [10, 5]
incidents_clean_stat_df.plot(x='incident_day', y='size', marker='*')
plt.title('Incidents per day')
plt.xlabel('date')
plt.ylabel('number of incidents')
###Output
_____no_output_____
###Markdown
Cities
###Code
def check_name(df, name):
settlement_names_series = df['settlement_name']
return settlement_names_series[settlement_names_series.str.contains(name)].unique()
def show_stat(df, settlement_name):
print(check_name(df, settlement_name))
settlement_df = df[df['settlement_name'].str.contains(settlement_name)]
stat_df = settlement_df.groupby('incident_day').size().to_frame('size').reset_index()
stat_df.plot(x='incident_day', y='size', marker='*')
plt.title('Incidents per day')
plt.xlabel('date')
plt.ylabel('number of incidents')
show_stat(incidents_df, 'בני ברק')
show_stat(incidents_df, 'פתח')
show_stat(incidents_df, 'כפר סבא')
show_stat(incidents_df, 'תל אביב')
show_stat(incidents_df, 'ירושלים')
show_stat(incidents_df, 'חיפה')
incidents_df['incident_to_update_days_delay'] = incidents_df['update_date'] - incidents_df['incident_day']
incidents_df.head()
incidents_df['incident_to_update_days_delay'].describe()
incidents_df.loc[incidents_df['incident_to_update_days_delay'].idxmin()]
incidents_df.loc[incidents_df['incident_to_update_days_delay'].idxmax()]
a = incidents_df['incident_to_update_days_delay'].dt.days
b = a[(a >= 0) & (a < 50)]
b.hist(bins=20)
plt.title('delay in days distribution')
plt.xlabel('delay in days')
plt.ylabel('count')
c = incidents_df.groupby('incident_day')['incident_to_update_days_delay'].apply(lambda x: x.mean()).reset_index()
c['incident_to_update_days_delay'] = c['incident_to_update_days_delay'].dt.days
c.head()
d = c['incident_to_update_days_delay']
e = c[(d < 50) & (d >= 0)]
e.head()
e.plot(x='incident_day', y='incident_to_update_days_delay')
plt.title('average delay in days between incident and report')
plt.xlabel('date')
plt.ylabel('average delay in days')
###Output
_____no_output_____
###Markdown
Maryland schools star ratings analysis By [Christine Zhang](mailto:[email protected]) An analysis of data from the [Maryland State Department of Education Report Card](http://mdreportcard.org/) for a December 4, 2018 Baltimore Sun story titled ["Maryland releases first star ratings for every public school; 60 percent earn four or five stars out of five"](https://www.baltimoresun.com/news/maryland/education/k-12/bs-md-star-rating-release-20181203-story.html) by Liz Bowie and Talia Richman.Here are the key findings:- Only 35 of the state's more than 1,300 schools received one star, the lowest rating, while 219 received five stars.- In Baltimore City, 23 schools earned one star.- More than half of the city’s schools received one- or two-star ratings.- Howard County had 91 percent of its schools rated four and five stars, while Baltimore County had 96 of its 160 schools rated as four or five stars.- In Harford County, 70 percent of schools earned either a four- or five-star rating.- Fourteen school systems in the state had no one- or two-star schools. How we did it Import R data analysis libraries
###Code
suppressMessages(library('tidyverse'))
suppressMessages(library('janitor'))
###Output
_____no_output_____
###Markdown
Read in the scores data for analysis.
###Code
scores <- suppressMessages(read_csv('input/accountability_schools_download_file.csv', na = 'na') %>% clean_names())
###Output
_____no_output_____
###Markdown
Finding: Only 35 of the state's more than 1,300 schools received one star, the lowest rating, while 219 received five stars. Print the number of schools in the state dataset.
###Code
print(paste("There were", length(scores$school_name),
"public schools in the Maryland school system in the 2017-18 school year."))
###Output
[1] "There were 1319 public schools in the Maryland school system in the 2017-18 school year."
###Markdown
Use `table()` to view the breakdown of schools by star rating.
###Code
table(scores$star_rating)
###Output
_____no_output_____
###Markdown
Finding: In Baltimore City, 23 schools earned one star. Use `filter()` and `table()` to view the breakdown of schools in Baltimore City by star rating.
###Code
scores %>% filter(lea_name == 'Baltimore City') %>% select(star_rating) %>% table()
###Output
_____no_output_____
###Markdown
Finding: More than half of the city’s schools received one- or two-star ratings. Use `group_by()` and `summarise()` to calculate the percentage breakdown of schools by star rating. Save this into a dataframe called `scores.sum`.
###Code
scores.sum <- scores %>% group_by(lea_name, star_rating) %>%
summarise(n = n()) %>%
mutate(percent = n/sum(n) * 100)
###Output
_____no_output_____
###Markdown
Use `filter()` to look just at Baltimore City.
###Code
scores.sum %>% filter(lea_name == 'Baltimore City')
###Output
_____no_output_____
###Markdown
Print the percentage of Baltimore City schools receiving one- or two-star ratings.
###Code
print(paste(round(scores.sum[scores.sum$star_rating == 1 & scores.sum$lea_name == 'Baltimore City', ]$percent +
scores.sum[scores.sum$star_rating == 2 & scores.sum$lea_name == 'Baltimore City', ]$percent),
"percent of the city's schools (more than half) received one- or two-star ratings."))
###Output
[1] "60 percent of the city's schools (more than half) received one- or two-star ratings."
###Markdown
Finding: Howard County had 91 percent of its schools rated four and five stars, while Baltimore County had 96 of its 160 schools rated as four or five stars. Use `filter()` on the `scores.sum` dataframe to view the number and percentage of schools in Howard and Baltimore County rated four or five stars.
###Code
scores.sum %>% filter(lea_name == 'Howard' | lea_name == 'Baltimore County')
###Output
_____no_output_____
###Markdown
Print the percentage of Howard County schools and the number of Baltimore County schools receiving one- or two-star ratings.
###Code
print(paste(round(scores.sum[scores.sum$star_rating == 4 & scores.sum$lea_name == 'Howard', ]$percent +
scores.sum[scores.sum$star_rating == 5 & scores.sum$lea_name == 'Howard', ]$percent),
"percent of Howard County schools received four- or five-star ratings."))
print(paste(scores.sum[scores.sum$star_rating == 4 & scores.sum$lea_name == 'Baltimore County', ]$n +
scores.sum[scores.sum$star_rating == 5 & scores.sum$lea_name == 'Baltimore County', ]$n,
"Baltimore County's", sum(scores.sum[scores.sum$lea_name == 'Baltimore County', ]$n),
"schools received four- or five-star ratings."))
###Output
[1] "91 percent of Howard County schools received four- or five-star ratings."
[1] "96 Baltimore County's 160 schools received four- or five-star ratings."
###Markdown
Finding: In Harford County, 70 percent of schools earned either a four- or five-star rating. Use `filter()` on the `scores.sum` dataframe to view the number and percentage of schools in Harford County rated four or five stars.
###Code
scores.sum %>% filter(lea_name == 'Harford')
###Output
_____no_output_____
###Markdown
Print the percentage of Harford County schools receiving one- or two-star ratings.
###Code
print(paste(round(scores.sum[scores.sum$star_rating == 4 & scores.sum$lea_name == 'Harford', ]$percent +
scores.sum[scores.sum$star_rating == 5 & scores.sum$lea_name == 'Harford', ]$percent),
"percent of Harford County schools received four- or five-star ratings."))
###Output
[1] "70 percent of Harford County schools received four- or five-star ratings."
###Markdown
Fourteen school systems in the state had no one- or two-star schools. Use `group_by()` and `mutate()` to create a column, `lowest_rating`, which gives lowest rating received by a school in the LEA (local education agency). Use `filter()` to include school with a lowest rating of three stars or above — meaning they had no one- or two-star schools. Use `select()`, `distinct()`, `ungroup()` and `mutate()` to print out and tally LEAs aka school systems with no one- or two-star schools.
###Code
scores.sum %>% group_by(lea_name) %>%
mutate(lowest_rating = min(star_rating)) %>%
filter(lowest_rating >= 3) %>%
select(lea_name) %>%
distinct() %>%
ungroup() %>% mutate(row_number = row_number())
###Output
_____no_output_____
###Markdown
Distribution of star ratings Ratings are assigned to schools based on the number of points a school receives as a percentage of the total possible points it could earn:- Less than 30% = one star- 30% or more and less than 45% = two stars- 45% or more and less than 60% = three stars- 60% or more and less than 75% = four stars- 75% or more = five starsFor more information, read the [story](https://www.baltimoresun.com/news/maryland/education/k-12/bs-md-star-rating-release-20181203-story.html). Below are a histogram of star ratings, showing the schools that fell into each "earned points percent" bucket. Statewide
###Code
scores.grouped.points.md <- scores %>% group_by(total_earned_points_percent) %>%
summarise(n = n()) %>%
mutate(perc = n/sum(n) * 100) %>% mutate(lea_name = 'Statewide')
options(repr.plot.width = 6, repr.plot.height = 4)
ggplot(scores.grouped.points.md, aes(x = total_earned_points_percent,
y = perc)) +
geom_bar(stat = 'identity', fill = '#2484C6') +
scale_y_continuous(breaks = seq(0, 10, 2))+
geom_vline(xintercept = 29, size = .3)+
geom_vline(xintercept = 44.9, size = .3)+
geom_vline(xintercept = 59.9, size = .3)+
geom_vline(xintercept = 74.9, size = .3)+
geom_bar(stat = 'identity', fill = '#2484C6')+
labs(x = '', y ='') +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(color = 'lightgrey', size = .1),
panel.background = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(fill = NA, colour = "grey")) +
facet_wrap(~lea_name) + scale_x_continuous(breaks = seq(0, 100, 10)) + expand_limits(x = 0)
###Output
_____no_output_____
###Markdown
Baltimore region, by county
###Code
scores.grouped.points <- scores %>% group_by(lea_name, total_earned_points_percent) %>%
summarise(n = n()) %>%
mutate(perc = n/sum(n) * 100)
scores.grouped.filter <- scores.grouped.points %>% filter(lea_name == 'Baltimore City' |
lea_name == 'Baltimore County' |
lea_name == 'Anne Arundel' |
lea_name == 'Carroll' |
lea_name == 'Harford' |
lea_name == 'Howard')
options(repr.plot.width = 6, repr.plot.height = 3)
ggplot(scores.grouped.filter, aes(x = total_earned_points_percent,
y = perc)) +
geom_bar(stat = 'identity', fill = '#2484C6') +
scale_y_continuous(breaks = seq(0, 10, 2))+
geom_vline(xintercept = 29, size = .3)+
geom_vline(xintercept = 44.9, size = .3)+
geom_vline(xintercept = 59.9, size = .3)+
geom_vline(xintercept = 74.9, size = .3)+
geom_bar(stat = 'identity', fill = '#2484C6')+
labs(x = '', y ='') +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(color = 'lightgrey', size = .1),
panel.background = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(fill = NA, colour = "grey")) +
facet_wrap(~lea_name) + scale_x_continuous(breaks = seq(0, 100, 10))
###Output
_____no_output_____
###Markdown
AnalysisDo analysis across a number of files.
###Code
# ignore whitespace warnings
%env SPACY_WARNING_IGNORE=W008
import ipywidgets as widgets
import itertools
import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
# offline mode
py.init_notebook_mode(connected=False)
###Output
_____no_output_____
###Markdown
Re-run this cell when Python code in the repository changes.
###Code
import importlib
import fismatic.core as fismatic
import fismatic.helpers as helpers
importlib.reload(fismatic)
importlib.reload(helpers);
###Output
_____no_output_____
###Markdown
Load files
###Code
path_widget = widgets.Text(description="Path:", value=".")
display(path_widget)
files = fismatic.get_files(path_widget.value)
control_sets = [fismatic.control_set_for(f) for f in files]
###Output
_____no_output_____
###Markdown
Compare files
###Code
stats = [fismatic.stats_for(cs) for cs in control_sets]
df = pd.DataFrame(stats)
df.set_index("Filename", inplace=True)
df
control_token_counts = helpers.flatten([cs.implementation_token_counts() for cs in control_sets])
data = [go.Histogram(x=control_token_counts)]
layout = go.Layout(
title="Control token counts",
xaxis={
"title": "Number of tokens"
},
yaxis={
"title": "Number of controls"
}
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='basic histogram')
from collections import Counter
control_names = helpers.flatten([cs.control_names() for cs in control_sets])
counter = Counter(control_names)
top_controls = counter.most_common(20)
pd.DataFrame(top_controls, columns=["Control", "# occurrences"])
###Output
_____no_output_____
###Markdown
Carregando bibliotecas
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import statsmodels.api as sm
from sklearn import metrics
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
# special matplotlib argument for improved plots
from matplotlib import rcParams
###Output
_____no_output_____
###Markdown
Carregando dataset
###Code
from sklearn.datasets import load_boston
boston = load_boston()
bos = pd.DataFrame(boston.data)
bos.columns = boston.feature_names
bos['PRICE'] = boston.target
print(bos.head())
###Output
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX \
0 0.00632 18.0 2.31 0.0 0.538 6.575 65.2 4.0900 1.0 296.0
1 0.02731 0.0 7.07 0.0 0.469 6.421 78.9 4.9671 2.0 242.0
2 0.02729 0.0 7.07 0.0 0.469 7.185 61.1 4.9671 2.0 242.0
3 0.03237 0.0 2.18 0.0 0.458 6.998 45.8 6.0622 3.0 222.0
4 0.06905 0.0 2.18 0.0 0.458 7.147 54.2 6.0622 3.0 222.0
PTRATIO B LSTAT PRICE
0 15.3 396.90 4.98 24.0
1 17.8 396.90 9.14 21.6
2 17.8 392.83 4.03 34.7
3 18.7 394.63 2.94 33.4
4 18.7 396.90 5.33 36.2
###Markdown
Análises básicas
###Code
print(bos.describe())
###Output
CRIM ZN INDUS CHAS NOX RM \
count 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000
mean 3.613524 11.363636 11.136779 0.069170 0.554695 6.284634
std 8.601545 23.322453 6.860353 0.253994 0.115878 0.702617
min 0.006320 0.000000 0.460000 0.000000 0.385000 3.561000
25% 0.082045 0.000000 5.190000 0.000000 0.449000 5.885500
50% 0.256510 0.000000 9.690000 0.000000 0.538000 6.208500
75% 3.677083 12.500000 18.100000 0.000000 0.624000 6.623500
max 88.976200 100.000000 27.740000 1.000000 0.871000 8.780000
AGE DIS RAD TAX PTRATIO B \
count 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000
mean 68.574901 3.795043 9.549407 408.237154 18.455534 356.674032
std 28.148861 2.105710 8.707259 168.537116 2.164946 91.294864
min 2.900000 1.129600 1.000000 187.000000 12.600000 0.320000
25% 45.025000 2.100175 4.000000 279.000000 17.400000 375.377500
50% 77.500000 3.207450 5.000000 330.000000 19.050000 391.440000
75% 94.075000 5.188425 24.000000 666.000000 20.200000 396.225000
max 100.000000 12.126500 24.000000 711.000000 22.000000 396.900000
LSTAT PRICE
count 506.000000 506.000000
mean 12.653063 22.532806
std 7.141062 9.197104
min 1.730000 5.000000
25% 6.950000 17.025000
50% 11.360000 21.200000
75% 16.955000 25.000000
max 37.970000 50.000000
###Markdown
Separando o dataset de treino do de teste
###Code
from sklearn.model_selection import train_test_split
X = bos.drop('PRICE', axis = 1)
Y = bos['PRICE']
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size = 0.33, random_state = 5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
###Output
(339, 13)
(167, 13)
(339,)
(167,)
###Markdown
Iniciando a regressão
###Code
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, Y_train)
Y_pred = lm.predict(X_test)
plt.scatter(Y_test, Y_pred)
plt.xlabel("Preços originais:")
plt.ylabel("Preços preditos:")
plt.title("Preços originais vs Preços preditos:")
diff = pd.DataFrame({'Original': Y_test, 'Predito': Y_pred})
diff1 = diff.head(20)
print(diff.head(15))
diff1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
diff.describe()
print('Erro absoluto médio:', metrics.mean_absolute_error(Y_test, Y_pred))
print('Erro quadrático médio:', metrics.mean_squared_error(Y_test, Y_pred))
print('Raiz quadrada do erro médio:', np.sqrt(metrics.mean_squared_error(Y_test, Y_pred)))
###Output
Erro absoluto médio: 3.4550349322483482
Erro quadrático médio: 28.530458765974583
Raiz quadrada do erro médio: 5.341391089030514
###Markdown
Starbucks Capstone Challenge OverviewThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.The task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.Transactional data shows user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. It also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. TipsTake into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. Therefore, try to assess what a certain demographic group will buy when not receiving any offers. Objectives In this notebook, I will develop heurisitics to determine which offer should be sent and how spends vary across customers with demographics.Based on the findings, two machine learning models will be built:- a classification model to predict whether or not a customer will respond to an offer- a regression model to predict spends of customers based on demographics and offer type Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record Load packages
###Code
# data analysis
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import seaborn as sns
import math
# utils
import os
import json
import pickle
from tqdm import tqdm
import datetime
%matplotlib inline
# Check if xgboost package exists
# or install it
try:
import xgboost
except ImportError as e:
!pip install xgboost
# modeling
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# Oversampling & undersampling
import imblearn
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from collections import Counter
print(imblearn.__version__)
###Output
0.9.0
###Markdown
Table of contentsThis project is divided into two Jupyter Notebooks.- wrangling.ipynb- analysis.ipynbThis notebook (`analysis.ipynb`) starts continues the data wrangling works that can be found in `wrangling.ipynb`. 3. Explore data: Part1 [link](explore-part1) <- to skip data cleaning4. Explore data: Part2 [link](explore-part2)5. Feature engineering [link](feature)6. Modeling [link](model)7. Build the final model [link](final)8. Conclusion [link](conclude) --- SECTION 3 Explore data : Part 1Here we explore the individual dataset first. In the next section (Part2) we will continue the analysis with the merged dataset.
###Code
# Load the clean dataset
portfolio = pd.read_csv('data/portfolio_clean.csv')
profile = pd.read_csv('data/profile_clean.csv')
transcript = pd.read_csv('data/transcript_clean.csv')
transaction = pd.read_csv('data/transactions_pivoted.csv')
portfolio.head()
###Output
_____no_output_____
###Markdown
3a. Offer types and channels
###Code
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(12,5))
sns.heatmap(portfolio.iloc[:, :4].groupby('offer_type').mean(),
annot=True, fmt='.2f', square=True, cmap='Blues', ax=ax1);
sns.heatmap(portfolio.iloc[:, 3:].groupby('offer_type').mean(),
annot=True, fmt='.2f', cmap='Greens', ax=ax2);
ax1.set(title ='How does each offer type look?', xlabel='', ylabel='');
ax2.set(title ='Which channel does each offer use?\n(0: not at all - 1: always)',
xlabel='', ylabel='')
plt.tight_layout()
# Save figure
plt.show()
###Output
_____no_output_____
###Markdown
Offers- **discount** offer requires the highest spends to redeem (the most difficult) with the longest duration - longest engagment with customers. - **bogo** offer gives the most rewards with comparatively lower spends requred - as the name (Buy One Get One For Free) suggests.- **informational** offer does not require reward and spends. ChannelsRegardless of offer type, **email** is always used when communicating the offer. **bogo** uses all four channels more intensively. The analysis will be more meaningful when offer type is explored together with customer profile and transction data, which will be done later after mering the dataset. 3b. Demographics
###Code
profile['became_member_on'] = pd.to_datetime(profile['became_member_on'])
profile.head()
base_colors = sns.color_palette()[:4]
fig, axes = plt.subplots(2, 2, figsize=(16,8))
fig1 = sns.countplot(x=profile.gender, color=base_colors[0], ax=axes[0,0])
fig2 = sns.countplot(x=profile.became_member_on.dt.year, color=base_colors[1], ax=axes[0,1])
fig3 = sns.histplot(x=profile.age, color=base_colors[2], kde=True, element='step', ax=axes[1,0])
fig4 = sns.histplot(x=profile.income, color=base_colors[3], kde=True, element='step', ax=axes[1,1])
fig1.set(title='Count by gender', xlabel='', ylabel='')
fig2.set(title='New customers by years', xlabel='', ylabel='')
fig3.set(title='Age distribution of members', xlabel='', ylabel='')
fig4.set(title='Income distribution of members\n(unit: US dollars)', xlabel='', ylabel='')
plt.tight_layout(pad=3.5)
plt.show()
###Output
_____no_output_____
###Markdown
**Gender:** slightly more male respondants (57%) than female while others exist. **New customers:** The growth of new customer base is increasing assuming other factors, such as total annual visitor volume, are consistent. **Age:** The minimum age is 18 which may be due to age restriction for members. With the median of 55, 50% of the members fall into between 42 and 66. Although trivial, some members are above 100, which may be correct but likely caused by survey error.**Income:** Median income is $64,000. The distribution is skewed to the right which seems natural for income distribution. 3c. Offer completionUsing the cleaned, pivoted dataframe saved in `transaction_pivoted.csv`
###Code
print('Number of unique customers: ', transaction.person.nunique())
print('Number of unique offer types: ', transaction.offer_id.nunique())
###Output
Number of unique customers: 17000
Number of unique offer types: 10
###Markdown
The number of total record is 169940, which I interpret as the total mix of possible transactions.
###Code
# Map label number to name
label_num_to_name = {1: 'complete', 2: 'inactive', 3: 'active', 4: 'indifferent', 5: 'not received'}
transaction['label_descr'] = transaction['label'].map(label_num_to_name)
# Set color for the plots
label_order = label_num_to_name.values()
fig = sns.countplot(x=transaction.label_descr, palette="rocket", order=label_order);
fig.set(title='Counts by Label', xlabel='label type', ylabel='count');
plt.xticks()
plt.show()
# Proportions of each label
transaction['label_descr'].value_counts() / transaction.shape[0]
###Output
_____no_output_____
###Markdown
17000 unique customers and 10 unique offers are represented in this dataset, which gives 170,000 mix of offers . Around 63% offers are `unsent` (label 5), most likely because- offers were not relevant for customers- opportunities were misinterpreted and missed
###Code
count_total_sent = transaction.loc[transaction.label != 5, 'label'].shape[0]
transaction.loc[transaction.label != 5, 'label_descr'].value_counts() / count_total_sent
###Output
_____no_output_____
###Markdown
Among those offers that were sent, over 47% led to the completion. In other words, either offer led to purchase or offer is information and viewed. Additionally,- 24% of customers viewed offers but not redeemed- 15% were not responsive to the offer- 13% (purchased without viewing
###Code
# Get subset data for informational and transactional offers
info_offer_ids = portfolio.loc[portfolio['offer_type'] == 'informational', 'id'].to_list()
offer_informational = transaction[transaction['offer_id'].isin(info_offer_ids)]
offer_transactional = transaction[~transaction['offer_id'].isin(info_offer_ids)]
print('# informational offers:', len(offer_informational))
print('# transactional offers:', len(offer_transactional))
# Visualization
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(16,5), sharey=True)
# Set order for plots
label_order = label_num_to_name.values()
fig1 = sns.countplot(x=offer_informational.label_descr, ax=ax1, palette="Blues", order=label_order);
fig2 = sns.countplot(x=offer_transactional.label_descr, ax=ax2, palette="Greens", order=label_order);
ax1.set(title='Offer completion for information offers', xlabel='Labels');
ax2.set(title='Offer completion for transactional offers', xlabel='Labels', ylabel='');
plt.tight_layout(pad=1.2)
# Save figure
plt.show()
offer_informational['label'].value_counts() / 34000
offer_transactional['label'].value_counts() / 136000
###Output
_____no_output_____
###Markdown
Around 20% of the total dataset are informational and the rest 80% are transactional. - informational offers have higher completion rate- unsent offers were proportionally similar between informational and transactional offers, but still dominant in counts- no inactive, active customers for informational offers (with no purchase) SECTION 4 Explore data - Part2In this section, we would like to go deeper into offer completion by offer type and demographics. Completion can be measured for transactions that were actually sent. 4a. Completion by offer type
###Code
transaction.head(3)
# Subset transactions with offers sent
transactions_sent = transaction[transaction['label'] != 5].copy()
# Create a dataframe that shows
# number of completed and incomplete offers by offer id
completion_by_offer = transactions_sent.groupby(['offer_id', 'label_descr']).size().unstack()
# Compute completion rate
completion_by_offer['completion_rate'] = completion_by_offer['complete'] / completion_by_offer.sum(axis=1)
# Sort by completed rate
completion_by_offer.sort_values('completion_rate', ascending=False, inplace=True)
# Merge the data sets
completion_by_offer = pd.merge(completion_by_offer.reset_index(), portfolio, left_on='offer_id', right_on='id', how='left')
# Drop duplicated id column
completion_by_offer.drop(columns='id', inplace=True)
# show the prepared dataset
completion_by_offer
# Get custom offer names
# offer_type + reward + difficulty + duration
# Instantiate a list of offer names
offer_names = list()
# Subset the dataframe
offset_subset = completion_by_offer[['offer_type', 'reward', 'difficulty', 'duration']]
# Iterate by row
for idx, values in offset_subset.iterrows():
# Instantiate name
name = ''
for item in values:
# If item is string, get the first 4 letters
if type(item) == str:
item = item[:4]
# If item is integer, check if the value < 10,
# then add '0' in front and transform it to string
if type(item) == int and item < 10:
item = '0' + str(item)
else:
item = str(item)
# Concatenate item
name += item
# Add completed name to a list of offer names
offer_names.append(name)
# Add the custom column name
completion_by_offer['offer_name'] = offer_names
completion_by_offer['completion_rate'].plot(kind='bar', stacked=True, figsize=(16,5));
plt.title('Completion rate by offer type')
plt.xlabel('offer type')
plt.ylabel('completion rate')
plt.xticks(ticks=range(10), labels=offer_names, rotation=0)
plt.yticks(ticks=np.arange(0, 1+0.1, 0.1), labels=['{:.0f}%'.format(n*100) for n in np.arange(0, 1+0.1, 0.1)])
plt.show()
###Output
_____no_output_____
###Markdown
Informational offer with 3 day duration has nearly 90% completion. Amongst transtional offers (discount, bogo), disc021010 and disc030707 offers have comparatively higher completion- disc021010 : discount offer, reward 2, difficulty 7, duration 10 - disc030707 : discount offer, reward 3, difficulty 7, duration 7Although **bogo** has relatively more rewards than the two discount offers, completion rate is lower.
###Code
channels_by_offer = completion_by_offer.iloc[:, -5:].set_index('offer_name')
plt.figure(figsize=(10,6))
sns.heatmap(channels_by_offer.T, cmap='YlGn', annot=True, cbar=False, square=True)
plt.xlabel('offer name')
plt.ylabel('channel')
plt.show()
###Output
_____no_output_____
###Markdown
For transactional offers, higher completion rate could be associated with number of channels. Not communiting through **social** media channel is very likely to lead lower completion rate. Let's make it more granular by offer type.
###Code
## Informational offers
# Subset dataset
channels = ['mobile', 'web', 'social', 'email']
completion_info = completion_by_offer.query('offer_type == "informational"')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
# Figure1 - heatmap for binary
sns.heatmap(completion_info[channels].T, linewidths = 0.30, annot = True,
cmap='YlGn', cbar=False, ax=ax1);
# Labels
ax1.set_title('Channels for informational offer\n(1: Yes, 0: No)')
ax1.set(xlabel='', ylabel='')
ax1.set_xticklabels(labels=completion_info['offer_name'], rotation=0)
# Figure2 - bar
sns.barplot(x='offer_name', y='completion_rate', data=completion_info, ax=ax2, color='green')
# Labels
ax2.set(title='Completion rate by informational offer')
ax2.set_xticks(ticks=np.arange(len(completion_info)))
ax2.set_xticklabels(labels=completion_info['offer_name'], rotation=0)
ax2.set_yticks(ticks=np.arange(0, 1+0.1, 0.1))
ax2.set_yticklabels(labels=['{:.0f}'.format(n*100) + '%' for n in np.arange(0, 1+0.1, 0.1)])
plt.tight_layout(pad=1.2)
## Discount offers
# Subset dataset
channels = ['mobile', 'web', 'social', 'email']
completion_discount = completion_by_offer.query('offer_type == "discount"')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
# Figure1 - heatmap for binary
sns.heatmap(completion_discount[channels].T, linewidths = 0.30, annot = True,
cmap='YlGn', cbar=False, ax=ax1);
# Labels
ax1.set_title('Channels for discount offer\n(1: Yes, 0: No)')
ax1.set_xticklabels(labels=completion_discount['offer_name'], rotation=0)
ax1.set(xlabel='', ylabel='')
# Figure2 - bar
sns.barplot(x='offer_name', y='completion_rate', data=completion_discount, ax=ax2, color='green')
# Labels
ax2.set(title='Completion rate by discount offer')
ax2.set_xticks(ticks=np.arange(len(completion_discount)))
ax2.set_xticklabels(labels=completion_discount['offer_name'], rotation=0)
ax2.set_yticks(ticks=np.arange(0, 1+0.1, 0.1))
ax2.set_yticklabels(labels=['{:.0f}'.format(n*100) + '%' for n in np.arange(0, 1+0.1, 0.1)])
plt.tight_layout(pad=1.2)
## Discount offers
# Subset dataset
channels = ['mobile', 'web', 'social', 'email']
completion_bogo = completion_by_offer.query('offer_type == "bogo"')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
# Figure1 - heatmap for binary
sns.heatmap(completion_bogo[channels].T, linewidths = 0.30, annot = True,
cmap='YlGn', cbar=False, ax=ax1);
# Labels
ax1.set_title('Channels for bogo offer\n(1: Yes, 0: No)')
ax1.set(xlabel='', ylabel='')
ax1.set_xticklabels(labels=completion_bogo['offer_name'], rotation=0)
# Figure2 - bar
sns.barplot(x='offer_name', y='completion_rate', data=completion_bogo, ax=ax2, color='green')
# Labels
ax2.set(title='Completion rate by bogo offer')
ax2.set_xticks(ticks=np.arange(len(completion_bogo)))
ax2.set_xticklabels(labels=completion_bogo['offer_name'], rotation=0)
ax2.set_yticks(ticks=np.arange(0, 1+0.1, 0.1))
ax2.set_yticklabels(labels=['{:.0f}'.format(n*100) + '%' for n in np.arange(0, 1+0.1, 0.1)])
plt.tight_layout(pad=1.2)
###Output
_____no_output_____
###Markdown
4c. Offer completion by demographics There are 5 possible behaviours identified through the funnels and each will be labeled as following.1. complete2. inactive : incomplete, no purchase after offer viewed3. active : incomplete, but purchased without offer viewed4. indifferent: incomplete, no purchase no view5. not received For this analysis, label 5 (not received) is not considered.
###Code
# Prepare dataset for analysis
# Merge with profile_v1 dataset
completion_demo = pd.merge(transactions_sent, profile, left_on='person', right_on='id', how='left')
# Drop the duplicated id column
completion_demo = completion_demo.drop(columns='id')
# Set the label order
label_order = list(label_num_to_name.values())[:-1]
completion_demo.label_descr = pd.Categorical(completion_demo.label_descr, categories=label_order)
completion_demo.head()
# Pivot completion by gender
completion_gender = completion_demo.groupby(['label_descr', 'gender']).size().unstack()
completion_gender_perc = completion_gender / completion_gender.sum(axis=0)
# Plot
completion_gender_perc.T.plot(kind='bar', stacked=True, figsize=(10,5));
# Annotation
for i, _type in enumerate(completion_gender.columns):
compl_rate = completion_gender_perc.loc['complete', _type]
plt.text(i-0.05, 0.2, '{:.0f}%'.format(compl_rate * 100),
color='#fff', fontsize=12)
plt.title('Offer completion by gender\n(arrow represents completion rate)')
plt.xticks(rotation=0, ticks=np.arange(3), labels=['Female', 'Male', 'Others'])
plt.ylabel('Percentage')
plt.legend(loc=8, ncol=len(completion_gender.index), bbox_to_anchor=(0.50, -0.30))
plt.tight_layout(pad=1.2)
plt.show()
###Output
_____no_output_____
###Markdown
Female customers generally have higher completion rate than male. Male customers are less active to promotional offers - viewing an offere less likely to lead to purchase. Completion by age
###Code
# Divide each record with age group
bin_edges = np.arange(10, 100+10, 10)
bin_label = [str(n)+ 's' for n in bin_edges[:-1]]
completion_demo['age_group'] = pd.cut(completion_demo.age, bins=bin_edges, labels=bin_label)
# Pivot completion by age
completion_age = completion_demo.groupby(['label_descr', 'age_group']).size().unstack()
completion_age_perc = completion_age / completion_age.sum(axis=0)
# Plot
completion_age_perc.T.plot(kind='bar', stacked=True, figsize=(10,5));
# Annotation
for i, val in enumerate(completion_age_perc.columns):
compl_rt = completion_age_perc[val][0]
plt.text(i-0.14, 0.2, '{:.0f}%'.format(compl_rt * 100),
color='#fff', fontsize=10)
plt.title('Offer completion by age group\n(box represents completion rate)')
plt.xticks(rotation=0)
plt.ylabel('percent')
plt.legend(loc=8, ncol=len(completion_age_perc.index), bbox_to_anchor=(0.50, -0.30))
plt.tight_layout(pad=1.2)
plt.show()
###Output
_____no_output_____
###Markdown
Completion rate is low (lower than incomplete) for customers below 30s whereas those above 40s have completion rate higher than 50%. Completion by income
###Code
# Divide each record with income group
bin_edges = np.arange(completion_demo.income.min(), completion_demo.income.max() + 10000, 10000)
bin_label = ['$' + str(int(n))[:-3] + 'k' for n in bin_edges[:-1]]
completion_demo['income_group'] = pd.cut(completion_demo.income, bins=bin_edges, labels=bin_label)
# Pivot completion by income
completion_income = completion_demo.groupby(['label_descr', 'income_group']).size().unstack()
completion_income_perc = completion_income / completion_income.sum(axis=0)
completion_income_perc
# Plot
completion_income_perc.T.plot(kind='bar', stacked=True, figsize=(10,5));
# Annotation
for i, val in enumerate(completion_income_perc.columns):
compl_rt = completion_income_perc[val][0]
plt.text(i-.17, 0.2, '{:.0f}%'.format(compl_rt * 100),
color='#fff', fontsize=10)
plt.title('Offer completion by income group\n(box represents completion rate)')
plt.xticks(rotation=0)
plt.ylabel('percent')
plt.legend(loc=8, ncol=len(completion_income_perc.index), bbox_to_anchor=(0.50, -0.30))
plt.show()
###Output
_____no_output_____
###Markdown
Completion rate is lower for customers with income less than \\$50k. The higest completion rate is observed in the income group between \\$80k and \\$100k. --- SECTION5 Feature engineeringNow merge the cleaned dataset to prepare for a classifer model. Load the cleand dataset again to ensure that the correct data is used.
###Code
# Load dataset
portfolio = pd.read_csv('data/portfolio_clean.csv')
profile = pd.read_csv('data/profile_clean.csv')
transactions = pd.read_csv('data/transactions_pivoted.csv')
###Output
_____no_output_____
###Markdown
5a. Merging the dataset
###Code
df = pd.merge(transactions, profile, left_on='person', right_on='id', how='left')
df.head()
df = pd.merge(df, portfolio, left_on='offer_id', right_on='id', how='left')
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 170000 entries, 0 to 169999
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 person 170000 non-null object
1 offer_id 170000 non-null object
2 offer received 63288 non-null float64
3 offer viewed 49135 non-null float64
4 transaction 0 non-null float64
5 offer completed 28996 non-null float64
6 label 170000 non-null float64
7 amount 28996 non-null float64
8 reward_x 28996 non-null float64
9 gender 148250 non-null object
10 age 148250 non-null float64
11 id_x 148250 non-null object
12 became_member_on 148250 non-null object
13 income 148250 non-null float64
14 reward_y 170000 non-null int64
15 difficulty 170000 non-null int64
16 duration 170000 non-null int64
17 offer_type 170000 non-null object
18 id_y 170000 non-null object
19 web 170000 non-null int64
20 email 170000 non-null int64
21 social 170000 non-null int64
22 mobile 170000 non-null int64
dtypes: float64(9), int64(7), object(7)
memory usage: 31.1+ MB
###Markdown
5b. Clean data After merging, drop unnecessary columns:- offer received, offer viewed, transaction, offer completed : **label** can replace- reward_y, id_x, id_y: duplicated
###Code
# Drop unncessary columns
dropcols = ['offer received', 'offer viewed', 'transaction', 'offer completed',
'reward_y', 'id_x', 'id_y']
df = df.drop(columns=dropcols)
df = df.rename(columns={'reward_x': 'reward'})
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 170000 entries, 0 to 169999
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 person 170000 non-null object
1 offer_id 170000 non-null object
2 label 170000 non-null float64
3 amount 28996 non-null float64
4 reward 28996 non-null float64
5 gender 148250 non-null object
6 age 148250 non-null float64
7 became_member_on 148250 non-null object
8 income 148250 non-null float64
9 difficulty 170000 non-null int64
10 duration 170000 non-null int64
11 offer_type 170000 non-null object
12 web 170000 non-null int64
13 email 170000 non-null int64
14 social 170000 non-null int64
15 mobile 170000 non-null int64
dtypes: float64(5), int64(6), object(5)
memory usage: 22.0+ MB
###Markdown
Further feature cleaning- **amount** and **reward** are null when there is no transaction: fill the missing values with 0- demographic information are missing for some persons (21,750, 12.7% of the total data) : drop them as there are not much of data to use for imputation
###Code
# Fill missing amount and reweard values with 0
df['amount'] = df['amount'].fillna(0)
df['reward'] = df['reward'].fillna(0)
# Drop records with missing demo values
df = df[df['gender'].notnull()]
df.shape
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Continue with future cleaning**target variable** : `label` - Create the subset dataframe excluding **unsent** offer status**predictors**- drop **person** column: too specific to use an individual person as a feature- drop **offer_id** : shares the same information as **offer_type**- convert **gender** and **offer_type** into dummy variables (avoid reduncy or [dummy variable trap](https://www.geeksforgeeks.org/ml-dummy-variable-trap-in-regression-models/))- transform **became_member_on** into numeric value: The most recent record is 2018-07-26. Create **recency** variable - suppose that this analysis was performed on '2019-01-01', calculate days difference from this reference date
###Code
# Futher processing for predictors
# Drop person, offer_id features
df = df.drop(columns=['person', 'offer_id'])
# Make dummy variables for gender and offer_type
df = pd.concat([df, pd.get_dummies(df.gender, drop_first=True)], axis=1)
df = pd.concat([df, pd.get_dummies(df.offer_type, drop_first=True)], axis=1)
# Convert into clear feature names
df = df.rename(columns={'M' : 'gender_male', 'O': 'gender_other'})
df = df.drop(columns=['gender', 'offer_type'])
# Compute recency
def calculate_recency(date):
ref_date = datetime.date(2019,1,1)
date_obj = datetime.datetime.strptime(date, '%Y-%m-%d').date()
recency = ref_date - date_obj
recency = int(recency.days)
return recency
df['recency'] = df['became_member_on'].apply(calculate_recency)
df = df.drop(columns='became_member_on')
df.info()
# Save the final data
df.to_csv('data/starbucks_data_final.csv', index=False)
###Output
_____no_output_____
###Markdown
5c. Create train, test setNormally, train and test sets are randomly split. However, in this project, a different approach will be taken.Label 5 corresponds to **not received**. In other words, they represent unsent offers and cannot possibly tell customer behaviours. I will set these data records aside, and use it to . This process is not ideal as it reduces train set significantly given a large proportion of the unsent offer data. However, using unsent offers in the training does not make sense anyways.
###Code
# Reload the final dataset
df = pd.read_csv('data/starbucks_data_final.csv')
# Divide data for dev_set and out of bag sets
dev_set = df.query('label != 5')
oob_set = df.query('label == 5')
print('Development data size:', dev_set.shape[0])
print('Out of bag data size:', oob_set.shape[0])
X = dev_set.drop(columns='label')
y = dev_set['label']
# Train and test split with data set for development (dev_set)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
print('X train size:', X_train.shape[0])
print('y train size:', y_train.shape[0])
print('X test size:', X_test.shape[0])
print('y test size:', y_test.shape[0])
###Output
X train size: 44177
y train size: 44177
X test size: 11045
y test size: 11045
###Markdown
5d. Handle imbalanced labelThe completion will be classified into 4 classes. In order to avoid label imbalance issue, invariant metric will be introduced. In case that labels are not balance, the model may end up performing poorly on a minority label better and will product biased classification results.
###Code
# Plot to see the imbalance issue
sns.countplot(x = y_train);
###Output
_____no_output_____
###Markdown
The labels are highly imbalanced with a lot of Label 1 (completed) existing in the training set. Before modeling, I will create two different train sets with oversampling and undersampling using **imbalanced-learn** Python library.- Oversampling: [Reference](https://machinelearningmastery.com/multi-class-imbalanced-classification/)By default, the library uses SMOTE (Synthetic Minority Oversampling Technique) that will oversample(synthesizes) minority classes so all labels share the same number of examples as the class with the most examples.- Undersampling:[Reference](https://machinelearningmastery.com/undersampling-algorithms-for-imbalanced-classification/)
###Code
# Function to apply SMOTE
def oversample_data(X, y, return_results=True):
oversample = SMOTE()
X_train, y_train = oversample.fit_resample(X, y)
# summarize distribution
counter = Counter(y_train)
for k, v in counter.items():
per = v / len(y_train) * 100
print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
if return_results:
# plot the distribution
plt.bar(counter.keys(), counter.values())
plt.show()
return X_train, y_train
# Oversample training set
X_train_over, y_train_over = oversample_data(X_train, y_train)
def undersample_data(X, y, return_results=True):
# define the undersampling method
undersample = NearMiss(version=1)
X_train, y_train = undersample.fit_resample(X, y)
counter = Counter(y_train)
for k, v in counter.items():
per = v / len(y_train) * 100
print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
if return_results:
# plot the distribution
plt.bar(counter.keys(), counter.values())
plt.show()
return X_train, y_train
# Undersample training set
X_train_under, y_train_under = undersample_data(X_train, y_train)
###Output
Class=1, n=6500 (25.000%)
Class=2, n=6500 (25.000%)
Class=3, n=6500 (25.000%)
Class=4, n=6500 (25.000%)
###Markdown
--- SECTION6 ModelingI would like to build a classifier model to predict offer completion given a set of variables, such as demogrpahics, offer types, channels, etc.Each model will have a different classification algorithm and will be fit with three different data sets with- oversampled labels (_over)- undersampled labels (_under)
###Code
# Function to evaluate training
def scale_predictors(X_train, X_test, returen_s):
''' Using the statistics from X_train,
standardize the values in the training sets.
INPUT:
X_train: X predictors in the train set
X_test: X predictors in the test set
OUTPUT:
X_train, X_test: scaled predictors
'''
# Instantiate scaler
scaler = StandardScaler()
# Fit and tranform X_train
X_train_scaled = scaler.fit_transform(X_train) # fitting training set only
# Transform X_test
X_test_scaled = scaler.transform(X_test)
return X_train_scaled, X_test_scaled
def model_predict(model, X_test):
''' Run predictions for a fitted classifer model
INPUT:
model: a fitted classifer model
X_test: test features used for prediction
OUTPUT:
y_pred: the predicted target variable
'''
# Make predictions
y_pred = model.predict(X_test)
return y_pred
def evaluate_model(y_test, y_pred):
''' Show test scores for classification as one go
by combining accuracy score and classification report.
INPUT:
model: the classifier model that fit training sets
y_test: a true y test values
y_pred: a predicted y values
OUTPUT:
None
'''
# Get test scores
print(f'Accuracy score: {accuracy_score(y_test, y_pred) * 100:.2f}%')
print(f'Classfication report:\n')
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
5a. Scale predictorsEach feature has different units and ranges of values. Scaling will make the model trained more effectively.
###Code
# Scale predictors for both train and test set
X_train_over, X_test_over = scale_predictors(X_train_over, X_test)
X_train_under, X_test_under = scale_predictors(X_train_under, X_test)
print('X train size (oversampled):', X_train_over.shape[0])
print('X train size (oversampled):', X_test_over.shape[0])
print('X test size (undersampled):', X_train_under.shape[0])
print('X test size (undersampled):', X_test_under.shape[0]) # same as X_test_over
###Output
X train size (oversampled): 89220
X train size (oversampled): 11045
X test size (undersampled): 26000
X test size (undersampled): 11045
###Markdown
5b. model_01 logistic regression
###Code
# Fitting the model
# model_01o : oversampled
model_01o = LogisticRegression(random_state=123, max_iter=1000)
model_01o.fit(X_train_over, y_train_over)
# Make predictions on train and test set
ypred_01o_train = model_predict(model_01o, X_train_over)
ypred_01o_test = model_predict(model_01o, X_test_over)
# Evaluate the model performance
evaluate_model(y_train_over, ypred_01o_train)
evaluate_model(y_test, ypred_01o_test)
# Fitting the model
# model_01u : undersampled
model_01u = LogisticRegression(random_state=123, max_iter=1000)
model_01u.fit(X_train_under, y_train_under)
# Make predictions on train and test set
ypred_01u_train = model_predict(model_01u, X_train_under)
ypred_01u_test = model_predict(model_01u, X_test_under)
# Evaluate the model performance
evaluate_model(y_train_under, ypred_01u_train)
evaluate_model(y_test, ypred_01u_test)
###Output
Accuracy score: 78.45%
Classfication report:
precision recall f1-score support
1.0 0.69 0.72 0.70 6500
2.0 0.92 0.82 0.87 6500
3.0 0.74 0.68 0.71 6500
4.0 0.81 0.91 0.86 6500
accuracy 0.78 26000
macro avg 0.79 0.78 0.78 26000
weighted avg 0.79 0.78 0.78 26000
Accuracy score: 69.02%
Classfication report:
precision recall f1-score support
1.0 0.86 0.57 0.69 5573
2.0 0.93 0.82 0.87 2203
3.0 0.48 0.69 0.56 1606
4.0 0.49 0.91 0.64 1663
accuracy 0.69 11045
macro avg 0.69 0.75 0.69 11045
weighted avg 0.76 0.69 0.70 11045
###Markdown
Not extremely bad score for all metrics for the first model (test accuracy around 73%). However, overfitting exists and there still is a room to improve seeing lower f1-score for label 3 and 4 classification with the test set. Between the two sampling methods, the model with overasmpled dataset produced slightly higher accuracy for both train and test set.Nevertheless, training accuracy still is around 80%, which suggests that model itself can be improved more. Let's use other machine learning classifiers. Feature importanceWhen fitting the logistic regression model, features were selected based on availability and relevance. Let's see how the model evaluates the importance of each feature by comparing the coefficients of each feature.
###Code
# Get the feature names
feature_names = X.columns.tolist()
def plot_feature_importances(series, c):
plt.barh(series.index, series.Importance, color=c)
plt.axvline(x=0, color='.5')
plt.xlabel('coefficient')
plt.ylabel('feature')
def feature_importance(model, feature_names, plot_result=True):
''' Find the importance of each features by coefficients.
The highest coefficient corresponds to the most importance feature here.
The computed result will be proportional to the highest coefficient value
amont the features. Therefore, the most important feature should return 1.0.
If plot_result=True, horizional barplot will be returned instead of dataframe.
'''
coefs = model.coef_[0]
coefs_prop = 100.0 * (coefs / coefs.max())
result = pd.DataFrame(index=feature_names, data=coefs_prop)
result.columns = ['Importance']
result = result.sort_values(by='Importance', ascending=False)
if plot_result:
pos_data = result[result.Importance >= 0]
neg_data = result[result.Importance < 0]
plt.figure(figsize=(10,6))
plot_feature_importances(pos_data, 'b')
plot_feature_importances(neg_data, 'r')
plt.show()
else:
return feature_importance
feature_importance(model_01o, feature_names)
###Output
_____no_output_____
###Markdown
The feature importance chart tells that types and channels related to each offer contributes more to predicting completion, rather than demographic features like age or income. The earlier exploratory analysis suggested these demographic factors would play a role no matter how significant. Let's continue with another model and evaluate the feature importance. 5c. model_02 decision tree
###Code
# Fitting the model
# model_02o : oversampled
model_02o = DecisionTreeClassifier(max_depth=12)
model_02o.fit(X_train_over, y_train_over)
# Make predictions on train and test set
ypred_02o_train = model_predict(model_02o, X_train_over)
ypred_02o_test = model_predict(model_02o, X_test_over)
# Evaluate the model performance
evaluate_model(y_train_over, ypred_02o_train)
evaluate_model(y_test, ypred_02o_test)
# Fitting the model
# model_02u : undersampled
model_02u = DecisionTreeClassifier(max_depth=12)
model_02u.fit(X_train_under, y_train_under)
# Make predictions on train and test set
ypred_02u_train = model_predict(model_02u, X_train_under)
ypred_02u_test = model_predict(model_02u, X_test_under)
# Evaluate the model performance
evaluate_model(y_train_under, ypred_02u_train)
evaluate_model(y_test, ypred_02u_test)
###Output
Accuracy score: 83.56%
Classfication report:
precision recall f1-score support
1.0 0.73 0.83 0.78 6500
2.0 0.91 0.91 0.91 6500
3.0 0.82 0.70 0.76 6500
4.0 0.89 0.90 0.90 6500
accuracy 0.84 26000
macro avg 0.84 0.84 0.84 26000
weighted avg 0.84 0.84 0.84 26000
Accuracy score: 66.34%
Classfication report:
precision recall f1-score support
1.0 0.83 0.55 0.66 5573
2.0 0.90 0.84 0.87 2203
3.0 0.43 0.63 0.51 1606
4.0 0.48 0.85 0.61 1663
accuracy 0.66 11045
macro avg 0.66 0.72 0.66 11045
weighted avg 0.73 0.66 0.67 11045
###Markdown
With the decision tree model, test accuracy increased from 73% to 76% only when oversampling technique is applied. Undersampling technique produced zero division error for precision, implying zero instances of positive prediction for label 2. Feature importanceSklearn's decision tree API has a built-in feature importance feature.
###Code
# The built-in method produces feature importances that are > 0
feature_result = pd.DataFrame(index=feature_names, data=model_02o.feature_importances_)
feature_result.columns = ['Importance']
feature_result = feature_result.sort_values(by='Importance', ascending=False)
plt.figure(figsize=(10,6))
plot_feature_importances(feature_result, c='b')
plt.show()
###Output
_____no_output_____
###Markdown
In the decision tree, demographic factors like income and age contributed more to predicting the offer completion. **reward**, **social** and **duration**, however, are seen more important predictors. 5d. Model_03 random forecast classifier
###Code
# Fitting the model
# model_03o : oversampled
model_03o = RandomForestClassifier(max_depth=12)
model_03o.fit(X_train_over, y_train_over)
# Make predictions on train and test set
ypred_03o_train = model_predict(model_03o, X_train_over)
ypred_03o_test = model_predict(model_03o, X_test_over)
# Evaluate the model performance
evaluate_model(y_train_over, ypred_03o_train)
evaluate_model(y_test, ypred_03o_test)
# Fitting the model
# model_03u : undersampled
model_03u = DecisionTreeClassifier(max_depth=12)
model_03u.fit(X_train_under, y_train_under)
# Make predictions on train and test set
ypred_03u_train = model_predict(model_03u, X_train_under)
ypred_03u_test = model_predict(model_03u, X_test_under)
# Evaluate the model performance
evaluate_model(y_train_under, ypred_03u_train)
evaluate_model(y_test, ypred_03u_test)
###Output
Accuracy score: 83.57%
Classfication report:
precision recall f1-score support
1.0 0.73 0.83 0.78 6500
2.0 0.91 0.91 0.91 6500
3.0 0.82 0.70 0.76 6500
4.0 0.89 0.90 0.90 6500
accuracy 0.84 26000
macro avg 0.84 0.84 0.84 26000
weighted avg 0.84 0.84 0.84 26000
Accuracy score: 66.36%
Classfication report:
precision recall f1-score support
1.0 0.82 0.55 0.66 5573
2.0 0.90 0.85 0.87 2203
3.0 0.43 0.62 0.51 1606
4.0 0.48 0.85 0.62 1663
accuracy 0.66 11045
macro avg 0.66 0.72 0.66 11045
weighted avg 0.73 0.66 0.67 11045
###Markdown
The random forest model produced slighly better test accuracy (77%) with oversampling technique than logistic regression (73%) and (76%). Feature importance
###Code
feature_result = pd.DataFrame(index=feature_names, data=model_03o.feature_importances_)
feature_result.columns = ['Importance']
feature_result = feature_result.sort_values(by='Importance', ascending=False)
plt.figure(figsize=(10,6))
plot_feature_importances(feature_result, c='b')
plt.show()
###Output
_____no_output_____
###Markdown
reward, amount, social, difficulty and duration plays an important role for prediction when the random forest model is used. Offer type and channel are deemed more important than demographic factors just as logistic regression model told. 5e. Model_04 gradient boost classifier
###Code
# Fitting the model
# model_04o : oversampled
model_04o = GradientBoostingClassifier()
model_04o.fit(X_train_over, y_train_over)
# Make predictions on train and test set
ypred_04o_train = model_predict(model_04o, X_train_over)
ypred_04o_test = model_predict(model_04o, X_test_over)
# Evaluate the model performance
evaluate_model(y_train_over, ypred_04o_train)
evaluate_model(y_test, ypred_04o_test)
# Fitting the model
# model_04u : undersampled
model_04u = GradientBoostingClassifier()
model_04u.fit(X_train_under, y_train_under)
# Make predictions on train and test set
ypred_04u_train = model_predict(model_04u, X_train_under)
ypred_04u_test = model_predict(model_04u, X_test_under)
# Evaluate the model performance
evaluate_model(y_train_under, ypred_04u_train)
evaluate_model(y_test, ypred_04u_test)
###Output
Accuracy score: 80.29%
Classfication report:
precision recall f1-score support
1.0 0.71 0.73 0.72 6500
2.0 0.91 0.85 0.88 6500
3.0 0.75 0.72 0.74 6500
4.0 0.84 0.90 0.87 6500
accuracy 0.80 26000
macro avg 0.80 0.80 0.80 26000
weighted avg 0.80 0.80 0.80 26000
Accuracy score: 68.06%
Classfication report:
precision recall f1-score support
1.0 0.86 0.54 0.66 5573
2.0 0.93 0.85 0.89 2203
3.0 0.44 0.72 0.54 1606
4.0 0.51 0.89 0.65 1663
accuracy 0.68 11045
macro avg 0.68 0.75 0.69 11045
weighted avg 0.76 0.68 0.69 11045
###Markdown
Gradient boost classifier performed on par with random forecast (77%) with oversampling method. There still is a room for the model to improve given the training accuracy of ~85%. Let's train with the final classifier, xgboost. 5f. Model_05 gradient boost classifier
###Code
# Fitting the model
# model_05o : oversampled
model_05o = XGBClassifier(max_depth=12)
model_05o.fit(X_train_over, y_train_over)
# Make predictions on train and test set
ypred_05o_train = model_predict(model_05o, X_train_over)
ypred_05o_test = model_predict(model_05o, X_test_over)
# Evaluate the model performance
evaluate_model(y_train_over, ypred_05o_train)
evaluate_model(y_test, ypred_05o_test)
# Fitting the model
# model_05u : undersampled
model_05u = GradientBoostingClassifier()
model_05u.fit(X_train_under, y_train_under)
# Make predictions on train and test set
ypred_05u_train = model_predict(model_05u, X_train_under)
ypred_05u_test = model_predict(model_05u, X_test_under)
# Evaluate the model performance
evaluate_model(y_train_under, ypred_05u_train)
evaluate_model(y_test, ypred_05u_test)
###Output
Accuracy score: 80.29%
Classfication report:
precision recall f1-score support
1.0 0.71 0.73 0.72 6500
2.0 0.91 0.85 0.88 6500
3.0 0.75 0.72 0.74 6500
4.0 0.84 0.90 0.87 6500
accuracy 0.80 26000
macro avg 0.80 0.80 0.80 26000
weighted avg 0.80 0.80 0.80 26000
Accuracy score: 68.07%
Classfication report:
precision recall f1-score support
1.0 0.86 0.54 0.66 5573
2.0 0.93 0.85 0.89 2203
3.0 0.44 0.72 0.54 1606
4.0 0.51 0.89 0.65 1663
accuracy 0.68 11045
macro avg 0.68 0.75 0.69 11045
weighted avg 0.76 0.68 0.69 11045
###Markdown
XGBoost classifier clearly overfits given the large difference in accuracy between training (99%) and test set (77%). XGboost also clearly performs better with oversampled dataset. --- SECTION 7 Build the final modelDuring the initial modeling phase, three machine learning classifier were used: logistic regression, decision tree, random forecast, graident boost and XGboost. The test results are summarized as following:| Model | Logistic Regression | Decision Tree | Random Forecast | Gradient Boosting | XGboost ||:---------------------------------: |:-----------------------: |:-----------------: |:-------------------: |:---------------------: |:-------: || Train accuracy - oversampled | 81% | 86% | 86% | 85% | 99% || Test accuracy - oversampled | 73% | 76% | **77%** | **77%** | **77%** || Train accuracy - undersampled | 78% | 84% | 84% | 80% | 80% || Test accuracy - undersampled | 69% | 67% | 67% | 68% | 68% | Random forest, Gradient boost and XGboost share the similar test accuracy. However, random forest will be used due to : - less computation than the other two- XGboost overfits significantlyAlso, oversampling method will be adopted as it performed better than undersampling for almost all classifiers. 7a. Functions to faciliate modeling
###Code
# Function to load the data for modeling
def load_data_for_modeling():
# Load the final dataset
df = pd.read_csv('data/starbucks_data_final.csv')
# Create X, y for training set
dev_set = df.query('label != 5')
X = dev_set.drop(columns='label')
y = dev_set['label']
return X, y
# Functions to preprocess data
def oversample_data(X, y, return_results=True):
oversample = SMOTE()
X_train, y_train = oversample.fit_resample(X, y)
# summarize distribution
counter = Counter(y_train)
for k, v in counter.items():
per = v / len(y_train) * 100
print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
if return_results:
# plot the distribution
plt.bar(counter.keys(), counter.values())
plt.show()
return X_train, y_train
def scale_predictors(X_train, X_test):
''' Using the statistics from X_train,
standardize the values in the training sets.
This time the scaler used for the final model
will be saved for the future use.
INPUT:
X_train: X predictors in the train set
X_test: X predictors in the test set
OUTPUT:
X_train, X_test: scaled predictors
'''
# Instantiate scaler
scaler = StandardScaler()
# Fit and tranform X_train
X_train_scaled = scaler.fit_transform(X_train) # fitting training set only
# Transform X_test
X_test_scaled = scaler.transform(X_test)
# Save the scaler
# Date of build for the model name
date_built = datetime.date.today()
date_built = datetime.datetime.strftime(date_built, format='%Y-%m-%d').replace('-', '')
with open(f'models/final_model_{date_built}_scaler', 'wb') as scaler_pkl:
pickle.dump(scaler, scaler_pkl)
print(f'Scaler stored at the path: <models/final_model_{date_built}_scaler>')
return X_train_scaled, X_test_scaled
def preprocess_data(X, y):
# Set predictors and label
print('Preprocessing data...')
# Split train and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
# Oversample
print('Oversampling data...')
X_train, y_train = oversample_data(X_train, y_train)
# Standardize predictors
X_train, X_test = scale_predictors(X_train, X_test)
return X_train, X_test, y_train, y_test
# Function to build model
def build_final_model(model, X, y, param_grid=None, cv=5, scoring='accuracy', verbose=1):
''' Fit a model given by its name. Make a prediction on the train set
The function requires training and test sets (scaled predictor X) and stored as global variables.
INPUT:
model_name: a classification algorithm (abbreviated)
param_grid: A dictionary of hyperparameters. If None, skip parameter tuning.
random_search:
If True, perform random grid search.
If False, perform grid search to find best hyperparameters (may take longer time to train)
OUTPUT:
fitted model
'''
# Extract preprocessed data
X_train, X_test, y_train, y_test = preprocess_data(X, y)
# Save preprocessed train / test set
cache = {
'X_train': X_train,
'X_test': X_test,
'y_train': y_train,
'y_test': y_test
}
# Skip parameter tuning
if param_grid == None:
print('Training without parameter tuning')
final_model = model.fit(X_train, y_train)
print('Training has been completed.')
# Train the best estimator with parameter tuning
else:
print('Training with parameter tuning. This process may take time upto several minutes.')
gridCV = GridSearchCV(model, param_grid=param_grid, cv=cv, scoring=scoring, verbose=verbose)
gridCV.fit(X_train, y_train)
print('Training has been completed with the best hyperparameters found.', gridCV.best_params_)
final_model = gridCV.best_estimator_
return final_model, cache
# Functions for model evaluation
def model_predict(model, X_test):
''' Run predictions for a fitted classifer model
INPUT:
model: a fitted classifer model
X_test: test features used for prediction
OUTPUT:
y_pred: the predicted target variable
'''
# Make predictions
y_pred = model.predict(X_test)
return y_pred
def evaluate_model(y_test, y_pred):
''' Show test scores for classification as one go
by combining accuracy score and classification report.
INPUT:
model: the classifier model that fit training sets
y_test: a true y test values
y_pred: a predicted y values
OUTPUT:
None
'''
# Get test scores
print(f'Accuracy score: {accuracy_score(y_test, y_pred) * 100:.2f}%')
print(f'Classfication report:\n')
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
7b. Parameter tuning Grid search may take lots of time. Therefore run random grid search instead. Range of hyperparameters replicate the workflow from the reference following [the link](https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74).Nonetheless, parameter tuning still takes time. Therefore, in this section, the actual codes were commented out and the tuned best hyperparameters were presented. In order to run the grid search again, uncomment the below code. The below training is made compact with less iterlation and cross validation, but still takes up to 30 minutes (~2 minutes for run of 15 fits).
###Code
# # Load the dataset
# X, y = load_data_for_modeling()
# X_train, X_test, y_train, y_test = preprocess_data(X, y)
# # Instantiate the final model - random forest
# model = RandomForestClassifier()
# # Uncomment the below code to run the grid search again
# # below training takes up to 30 minutes (~2 minutes for run of 15 fits)
# # Set hyperparameters
# # Number of trees in random forest
# n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 5)]
# # Number of features to consider at every split
# max_features = ['auto', 'sqrt']
# # Maximum number of levels in tree
# max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
# max_depth.append(None)
# # Minimum number of samples required to split a node
# min_samples_split = [2, 5, 10]
# # Minimum number of samples required at each leaf node
# min_samples_leaf = [1, 2, 4]
# # Method of selecting samples for training each tree
# bootstrap = [True, False]
# # Create the random grid
# param_grid = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split,
# 'min_samples_leaf': min_samples_leaf,
# 'bootstrap': bootstrap}
# # Randomized grid search
# randGridCV = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=5, cv=3, scoring='accuracy', verbose=3)
# randGridCV.fit(X_train, y_train)
# print('Best params:', randGridCV.best_params_)
# # Mean test accuracy for each iternation
# randGridCV.cv_results_['mean_test_score']
###Output
_____no_output_____
###Markdown
The random grid search produced the combination of best hyperparameters as below: Best params: {'n_estimators': 1550, 'min_samples_split': 2, 'min_samples_leaf': 2, 'max_features': 'sqrt', 'max_depth': 60, 'bootstrap': False}
###Code
# Build the model
# with the best combination of hyperparameters
# Instantiate the final model - random forest
model = RandomForestClassifier(n_estimators=1550, min_samples_split=2, min_samples_leaf=2,
max_features='sqrt', max_depth=60, bootstrap=False)
# Reload the dataset
X, y = load_data_for_modeling()
# Build models
final_model, cache = build_final_model(model, X, y, param_grid=None)
# Make a prediction on train and test set
y_pred_train = model_predict(final_model, cache['X_train'])
y_pred_test = model_predict(final_model, cache['X_test'])
# Evaluate the model performance
evaluate_model(cache['y_train'], y_pred_train)
evaluate_model(cache['y_test'], y_pred_test)
###Output
Accuracy score: 99.57%
Classfication report:
precision recall f1-score support
1.0 1.00 1.00 1.00 22305
2.0 0.99 1.00 0.99 22305
3.0 1.00 1.00 1.00 22305
4.0 1.00 0.99 0.99 22305
accuracy 1.00 89220
macro avg 1.00 1.00 1.00 89220
weighted avg 1.00 1.00 1.00 89220
Accuracy score: 76.99%
Classfication report:
precision recall f1-score support
1.0 0.80 0.85 0.82 5573
2.0 0.89 0.88 0.89 2203
3.0 0.56 0.47 0.51 1606
4.0 0.68 0.65 0.66 1663
accuracy 0.77 11045
macro avg 0.73 0.71 0.72 11045
weighted avg 0.76 0.77 0.77 11045
###Markdown
The parameter tuning did not improve the model performance given that the test accuracy stays similar, while the overfitting got worse. Classification for label 3 and 4 still performs poorly on the test set.The earlier random forest with default set up had a test accuracy of 77% and it only had a change in max_depth set to 12. I will try training the model with a set of light-weight hyperparameters (less computation), searched through grid search. 7c. Build the final model
###Code
# Rebuild the final model
# Instantiate the final model - random forest
model = RandomForestClassifier()
# Reload the dataset
X, y = load_data_for_modeling()
# Set params
param_grid = {
'max_depth': [2, 5, 10, 15, 20, 25],
'n_estimators': [5, 10, 50, 100]
}
# Build models
final_model, cache = build_final_model(model, X, y, param_grid=param_grid, verbose=3)
# Make a prediction on train and test set
y_pred_train = model_predict(final_model, cache['X_train'])
y_pred_test = model_predict(final_model, cache['X_test'])
# Evaluate the model performance
evaluate_model(cache['y_train'], y_pred_train)
evaluate_model(cache['y_test'], y_pred_test)
###Output
Accuracy score: 99.63%
Classfication report:
precision recall f1-score support
1.0 0.99 1.00 0.99 22305
2.0 1.00 1.00 1.00 22305
3.0 1.00 0.99 0.99 22305
4.0 1.00 1.00 1.00 22305
accuracy 1.00 89220
macro avg 1.00 1.00 1.00 89220
weighted avg 1.00 1.00 1.00 89220
Accuracy score: 77.04%
Classfication report:
precision recall f1-score support
1.0 0.80 0.84 0.82 5573
2.0 0.89 0.88 0.89 2203
3.0 0.56 0.49 0.52 1606
4.0 0.68 0.65 0.67 1663
accuracy 0.77 11045
macro avg 0.73 0.72 0.72 11045
weighted avg 0.77 0.77 0.77 11045
###Markdown
The final model was trained with a new set of light-weight hyperwparameters. The model fits perfectly on the training set so the model complexity is fine. However, the accuracy on the out of bag data stays 77% - no noticeable improvement when compared with the earlier trainings.However, what is encouraging is the model predicts label 1.0 and 2.0 quite well considering F1-score higher than 80%. In other words, this classification model would work fine to tell if a potential customer will complete an offer or not, which is the original objective of this project. The limitation is when conducting a granular analysis for incomplete offers. For example, the model may not so precise in telling if customers actually viewed an offer or made a purchase when they didn't activate offers. 1. complete2. inactive : incomplete, no purchase after offer viewed3. active : incomplete, but purchased without offer viewed4. indifferent: incomplete, no purchase no viewRecall that we haven't used a lots data with unsent offers (93,028 records, vs 55,222 used for the training). Therefore, I suggest that we make predictions on these out of bag data for the next round of offer distributions. Then, a new model could be built with more data records, which might help improve the model performance to more granular level and overfitting issues. 7d. Store the final model
###Code
# Date of build for the model name
date_built = datetime.date.today()
date_built = datetime.datetime.strftime(date_built, format='%Y-%m-%d').replace('-', '')
print(date_built)
# Save the final model and train/set set
with open(f'models/final_model_{date_built}', 'wb') as model_pkl:
pickle.dump(final_model, model_pkl)
# Save the final model and train/set set
with open(f'models/final_model_{date_built}_cache', 'wb') as data_pkl:
pickle.dump(cache, data_pkl)
###Output
_____no_output_____
###Markdown
--- SECTION 8 ConclusionTo this point, we performed exploratory analysis on individual and trained machine learning classification models. We aimed to find how customers have interacted with differnt offers and finally to predict if offers will lead to completion given a combination of offer types, distributed channels and customer demographics. Insights on offer completion OffersOffers were grouped into three categories: bogo, discount and informational. bogo and discount are transactional following the below funnel and offers are considered completion if customers were activate at all stages : > offer received -> offer viewed -> purchased -> offer completed- Of those offers sent, 47% were copmleted indicating that customers viewed offers and made purchases (bogo / disount) or were made aware of offers (informational).- Amongst the three offer categories, discount offers are the most difficult to redeem (on average 11.75) but have the longest duration allowing customers to be interacted and influenced the most.- Two transactional offers marked above 60% completion rate and they both fall into discount category. Although these two offers are more difficult to redeem (and less rewards given), they have higher duration and wider distribution across all existing channels so customers had more chances to interact and be influenced. In the meantime, informational offers are considered compelted when customers received and viewed offers (as they do not lead to transactions). - The two informational offers named as **info000003** and **info000004** recorded high completion rate above 60%. Especially, **info000003** showed the highest completion rate (around 90%) despite a slightly shorter duration than the other. It was found that this different may come from communication channels: other than email and mobile, **info000003** distributed on social media as opposed web that **info000004** used. Channels- Regardless of offer type, email is always used - It is more likely to increase the completion when more channels are used- Across all offer types, using social media led to more completion Demographics- Female customers have slighly higher completion rate than male customers (53% vs 45%).- Offer completion rate is lower than 50% for age groups below 50.- Income groups bewteen 80k and 100k have the highest completion rate (60%). It marked at least 40% completion rate across all income groups. Model resultsThere are 5 possible behaviours identified through the funnels and each will be labeled as following.1. complete2. inactive : incomplete, no purchase after offer viewed3. active : incomplete, but purchased without offer viewed4. indifferent: incomplete, no purchase no view5. not received For this analysis, label 5 (not received) is not considered as it does show describe customer behaviours on completion at all. The final model used random forest classifier, and was built using grid search to find the best combination of hyperparameters.The training accuracy reached 99% whereas the test accuracy stayed at 77%. The near perfect train accuracy score tells that the model complexity is enough. However, the model is not ideally generalized when predicting out of bag data. The model, however, predicts the completion fairly well. However, it does not sufficiently provide more granular analysis on incomplete offers - whether customers actually viewed an offer / made a purchase when offers were not activated. Given that the original dataset contains lots of unsent offers, I suggest that we re-utilize these out of bag data - by sending across offers to those customers who likely complete the offers, and gain more insights. The new set of insights will then be trained with another round modeling using a similar workflow presented in this notebook.Pratically speaking, the costs of offer distribution might not be too significant when leveraging the existing digital medium (social, web, email and mobile). Handling unsent offers 63% of them are labeled unsent possibly because the previous business decisions identified that some offers would not match customers with certain characterstics, resources(time, costs, etc.) were limited or just by chance.Therefore, in this conclusion section, we would also like to apply our classifier model to those offers unsent and find opportunities for any future promotional activites.
###Code
# Reload the final dataset
df = pd.read_csv('data/starbucks_data_final.csv')
# Get the out of bag data (offer unsent)
oob_set = df.query('label == 5')
print('Out of bag data size:', oob_set.shape[0])
X = oob_set.drop(columns='label')
y = oob_set['label']
X.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 93028 entries, 0 to 148248
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 amount 93028 non-null float64
1 reward 93028 non-null float64
2 age 93028 non-null float64
3 income 93028 non-null float64
4 difficulty 93028 non-null int64
5 duration 93028 non-null int64
6 web 93028 non-null int64
7 email 93028 non-null int64
8 social 93028 non-null int64
9 mobile 93028 non-null int64
10 gender_male 93028 non-null int64
11 gender_other 93028 non-null int64
12 discount 93028 non-null int64
13 informational 93028 non-null int64
14 recency 93028 non-null int64
dtypes: float64(4), int64(11)
memory usage: 11.4 MB
###Markdown
The out of bag data do not need the entire preprocessing methods used for modeling. For example, splitting train/test set or oversampling is not ncessary. Rescaling will be helpful to standardize ranges and units of values.
###Code
# Load scaler
with open('models/final_model_20220223_scaler', 'rb') as scaler_pkl:
scaler = pickle.load(scaler_pkl)
# Load model
with open('models/final_model_20220223', 'rb') as model_pkl:
model = pickle.load(model_pkl)
# Scale the out of bag predictors
X_scaled = scaler.transform(X)
# Make predictions for offer completion
predictions = model.predict(X_scaled)
print('Number of predicted labels:', len(predictions))
print('Counts:', Counter(predictions))
# Plot the predicted labels
sns.countplot(x=predictions)
plt.xlabel('label')
plt.show()
###Output
_____no_output_____
###Markdown
California "Conservation-Consumption Score" analysisBy Ryan Menezes, Matt Stevens and Ben Welsh [A Los Angeles Times analysis published on Oct. 31, 2016](http://www.latimes.com/local/lanow/la-me-ln-water-conservation-backslide-20161018-snap-htmlstory.html), found that the overwhelming majority of California water districts increased their usage after the state eased its drought restrictions. Some of the most extreme increases were found in inland Northern California, led by the San Juan Water District near Folsom Lake.How did The Times come to that conclusion? Using the computer code that follows.**Here's how it worked.**We started by downloading data from California’s State Water Resources Control Board, which publishes a monthly accounting of each district’s water usage on its website.That data has been used by state regulators to monitor and enforce mandatory water-use reductions introduced as part of the state’s emergency drought response. Regulators ended mandatory conservation for the vast majority of urban water suppliers this spring.The state measures each district’s water savings by comparing the number of gallons it supplies to homes, businesses and institutions each month versus the same month in 2013, a baseline that precedes Gov. Jerry Brown’s proclamation of a drought State of Emergency.The code below calculates that statistic for three months this summer after restrictions were eased, then compares it against the same months in 2015. In total, 93% of 387 districts increased water usage this year. Nineteen districts were excluded because they did not report enough data to the state.California’s water districts vary greatly in size, from large urban areas like Los Angeles to small districts in the rural north. To compare suppliers and identify areas where residents use large amounts of water at home, state officials also track the total amount of water used by each district’s average resident each day.This code combines that measure with each district’s change in total summer water usage to create a ranking we’re calling a Conservation-Consumption Score. By including both factors, this statistic -- sometimes known as a z-score -- better identifies areas where residents account for increases.Some of the highest ranking districts by this score were found in Northern California and around Folsom Lake near Sacramento. The top score belonged to the San Juan Water District, the ultimate focus of our story. Import and configure analysis tools.
###Code
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from __future__ import division
%matplotlib inline
pd.set_option('display.float_format', lambda x: '%.2f' % x)
pd.set_option("display.max_columns", 500)
###Output
_____no_output_____
###Markdown
Import raw water usage data from the state
###Code
supplier_path = os.path.join(os.getcwd(), 'uw_supplier_data100516.xlsx')
SUPPLIER_TABLE = pd.read_excel(supplier_path)
###Output
_____no_output_____
###Markdown
Keep the columns we want
###Code
supplier_table = SUPPLIER_TABLE.iloc[:,[0,3,18,19,21]]
supplier_table.columns = [
'supplier_name',
'month',
'total_water_production_gallons',
'total_water_production_gallons_2013',
'residential_water_usage'
]
###Output
_____no_output_____
###Markdown
Clean them up
###Code
supplier_table['month'] = supplier_table['month'].astype(str)
supplier_table.info()
supplier_table.head()
###Output
_____no_output_____
###Markdown
Filter the data to only the three summer months in 2015 and 2016
###Code
target_months = ['2016-08-15', '2016-07-15', '2016-06-15', '2015-08-15', '2015-07-15', '2015-06-15',]
month_table = supplier_table[supplier_table['month'].isin(target_months)]
month_table.drop_duplicates(inplace=True)
"Total records: {}".format(len(supplier_table))
"Month records: {}".format(len(month_table))
###Output
_____no_output_____
###Markdown
Eliminate any suppliers who have fewer or greater than six months of data with those labels
###Code
supplier_counts = month_table.groupby("supplier_name")['supplier_name'].count().to_frame("count").reset_index()
incomplete_month_table = supplier_counts[supplier_counts['count'] <> 6]
incomplete_month_table
complete_month_table = month_table[~month_table['supplier_name'].isin(incomplete_month_table['supplier_name'])]
"Complete month records: {}".format(len(complete_month_table))
###Output
_____no_output_____
###Markdown
Group and sum the total water production for each summer
###Code
summer_16_table = complete_month_table[complete_month_table['month'].isin(['2016-08-15', '2016-07-15', '2016-06-15',])]
summer_16_totals = summer_16_table.groupby("supplier_name")['total_water_production_gallons'].sum().to_frame("total_water_production_16").reset_index()
summer_16_totals.head(5)
"Summer 16 records: {}".format(len(summer_16_totals))
summer_15_table = complete_month_table[complete_month_table['month'].isin(['2015-08-15', '2015-07-15', '2015-06-15',])]
summer_15_totals = summer_15_table.groupby("supplier_name")['total_water_production_gallons'].sum().to_frame("total_water_production_15").reset_index()
summer_15_totals.head(5)
"Summer 15 records: {}".format(len(summer_15_totals))
summer_13_totals = summer_16_table.groupby("supplier_name")['total_water_production_gallons_2013'].sum().to_frame("total_water_production_13").reset_index()
summer_13_totals.head()
"Summer 13 records: {}".format(len(summer_13_totals))
###Output
_____no_output_____
###Markdown
Join those summer production totals into a combined table
###Code
summer_table = summer_16_totals.merge(summer_15_totals, on="supplier_name")
summer_table = summer_table.merge(summer_13_totals, on="supplier_name")
"Total summer records: {}".format(len(summer_table))
summer_table.head(5)
###Output
_____no_output_____
###Markdown
Calculate the percentage change of summers 15 and 16 versus the baseline of summer 2013
###Code
summer_table['savings_16'] = summer_table.apply(
lambda x: (x['total_water_production_16']-x['total_water_production_13'])/float(x['total_water_production_13']),
axis=1
)
summer_table['savings_15'] = summer_table.apply(
lambda x: (x['total_water_production_15']-x['total_water_production_13'])/float(x['total_water_production_13']),
axis=1
)
summer_table.sort_values('savings_16', ascending=False).head()
###Output
_____no_output_____
###Markdown
Calculate the difference between in that statistic between 15 and 16
###Code
summer_table['savings_change'] = summer_table.apply(
lambda x: x['savings_16']-x['savings_15'],
axis=1
)
summer_table.head(5)
###Output
_____no_output_____
###Markdown
Rank the cities that have regressed the most towards their 2013 baseline
###Code
summer_table.sort_values("savings_change", ascending=False).head()
###Output
_____no_output_____
###Markdown
Calculate the average monthly water usage per person (R-GPCD) in each district for the summer of 2016
###Code
summer_16_means = summer_16_table.groupby('supplier_name')['residential_water_usage'].mean().to_frame("residential_water_usage_mean_16").reset_index()
summer_16_means.head(5)
###Output
_____no_output_____
###Markdown
Join those water usage average to our combined table
###Code
summer_table = summer_table.merge(summer_16_means, on="supplier_name")
summer_table.head(5)
###Output
_____no_output_____
###Markdown
Calculate summary statistics to judge how many districts regressed in summer 2016
###Code
savings_16 = (summer_table.total_water_production_16.sum() - summer_table.total_water_production_13.sum()) / (summer_table.total_water_production_13.sum())
savings_15 = (summer_table.total_water_production_15.sum() - summer_table.total_water_production_13.sum()) / (summer_table.total_water_production_13.sum())
"State water use overall backslid {} percentage points".format((savings_16 - savings_15)*100)
pct_backslid = len(summer_table[summer_table['savings_change'] > 0]) / len(summer_table)
"{}% of urban districts in the state backslid".format(pct_backslid*100)
plt.figure(figsize=(16,8))
summer_table.savings_change.hist(bins=30)
plt.axvline(0, linewidth=3, c='red')
plt.axvline(savings_16 - savings_15, c='black', linewidth=3)
plt.annotate("Statewide backslide", (0.093, 48))
plt.annotate("Used less water\nin summer 2016", (-0.075, 46), color='red')
plt.annotate("Used more water\nin summer 2016", (0.25, 46), color='red')
plt.ylabel("Number of districts")
plt.xlabel("Change in water savings between summer '15 and summer '16")
###Output
_____no_output_____
###Markdown
Calculate a "Conservation-Consumption Score" that adjusts the savings change by the amount of water usage to surface the high-usage districts that regressed the most This indexed score:1. Accounts for how much a district's savings changed between the summers of 2015 and 2016 (in the numerator)2. Gives greater weight to districts with high residential water use (RGPCD). Positive scores indicate districts that backslid (in the denominator) $$CCS = \frac{SavingsChange}{\frac{1}{\sqrt{RGPCD16}}}$$
###Code
summer_table['cc_score'] = (summer_table['savings_change']) / np.sqrt(1/summer_table['residential_water_usage_mean_16'])
summer_table.sort_values("cc_score", ascending=False).head(10)
summer_table.sort_values("cc_score").head(10)
plt.figure(figsize=(16,8))
summer_table.cc_score.hist(bins=30)
plt.axvline(0, linewidth=3, c='red')
plt.annotate("Used less water\nin summer 2016", (-1.5,52), color='red')
plt.annotate("Used more water\nin summer 2016", (4.05,52), color='red')
plt.ylabel("Number of districts")
plt.xlabel("Conservation-Consumption Score")
###Output
_____no_output_____
###Markdown
Write the combined table out to a CSV
###Code
summer_table.sort_values("cc_score", ascending=False).to_csv("analysis.csv", index=False)
###Output
_____no_output_____
###Markdown
Count occurrences of specific words or types of words in both types of statements. Visualize the proportion
###Code
from collections import Counter
right=0
wrong=0
nlp = spacy.load("en_core_web_sm")
def visualizeFeature(name, tcount, fcount):
labels = ['Truthful', 'Deceptive']
sizes = [tcount, fcount]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, textprops={'color': "black"})
printmd(
f"\n\n## **{name}:** {labels[0]} {tcount} {labels[1]} {fcount}")
plt.show()
###Output
_____no_output_____
###Markdown
Significance testsH0 : Words from wordlist are equally likely to occur in truthful and deceptive statementsHA : Words from wordlist occur in truthful and deceptive statements with different probability**Sample**: measure the proportion truthful/deceptive in the data**Simulation**: Generate 1000 pairs of 160 docs, randomly inserting words of interest according to the probability with which they occur in the text according to H0. Of course, this kind of simulation is rather naïve, since actual language places a lot more constraints on the simulation. Theoretically, an advanced generative model could be used to generate better data. However, such simulation may be skewed, because generative models are typically trained on truthful data.**Assumption**: If less than 1% of the simulations show a truthful/deceptive proportion equal to or less (or greater, for proportions > 1) than the measured one, reject H0If H0 is rejected, the feature can be used to predict the veracity of a statement
###Code
#generate a set of "truthful" and "deceptive" "documents" with the same number of words
#count "occurrences" of a "word" that has a probability prob to appear in a "document"
def simstat(numdoc, numwords, prob):
tcount = 0
for i in range(numdoc):
for j in range(numwords):
if(random.uniform(0, 1) < prob):
tcount += 1
return tcount
#simulation: 1000 times generate 2 random documents, each containing words_per_doc
def simulation(numdoc, numwords, prob):
sim = []
for k in range(1000):
sim.append(simstat(numdoc, int(words_per_doc), prob) /
simstat(numdoc, int(words_per_doc), prob))
return sim
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
def visualizeSimulation(sim, measured_proportion):
#sort sim
sim = sorted(sim)
bottom = sim[int(0.01*len(sim))]
top = sim[int(0.99*len(sim))]
#count members of sim that are below the measured proportion
below = sum(map(lambda x: x < measured_proportion, sim))
#above = sum(map(lambda x: x > 1/measured_proportion, sim))
alpha = 0.01
pfactor = below/len(sim)
plt.hist(sim, density=False, bins=30) # density=False would make counts
plt.ylabel('Frequency')
plt.xlabel('Proportion')
plt.show()
print(
f"Probability of getting a ratio at or below {measured_proportion:.2f}: {pfactor*100:.2f}%")
if pfactor < alpha:
printmd("Feature can be used for veracity assessment\n", color="green")
else:
printmd("Feature cannot be used for veracity assessment\n", color="red")
###Output
_____no_output_____
###Markdown
Extract a few language features that Pennebaker claims can be used to assess veracity of written text.The built-in "count_by" method of spaCy cannot be used because we want the ability to count not just spacific POS, but specific POS that are also a part of a short list. For example, from the auxilliary verb group we only care about the modal verbs. As it turns out, further splitting that group into two yields a really good results in terms of distribution between deceptive and truthful statements.The idea is to count the occurences of members of each feature group in deceptive as well as truthful statements, and if they are unbalanced, perform a significance test.
###Code
import matplotlib.pyplot as plt
from spacy.tokens import Doc
def count_words(doc, type, wordlist):
alloftype = [token.lower_ for token in doc if token.pos_ == type]
if wordlist:
alloftype = [x for x in alloftype if x in wordlist]
return sum(Counter(alloftype).values())
featureTypes=[
{
'name':'i-words',
'POS':'PRON',
'wordlist': ['we','i', 'me', 'myself', 'my', 'mine'],
'tcount':0,
'fcount':0,
'indicates':None
},
{
'name': 'verbs',
'POS': 'VERB',
'wordlist': None,
'tcount': 0,
'fcount': 0,
'indicates': None
},
{
'name': 'articles',
'POS': 'DET',
'wordlist': ['a', 'an', 'the'],
'tcount':0,
'fcount':0,
'indicates':None
},
{
'name': 'modal verbs 1',
'POS': 'AUX',
'wordlist': ["could", "should"],
'tcount': 0,
'fcount': 0,
'indicates':None
},
{
'name': 'modal verbs 2',
'POS': 'AUX',
'wordlist': ["would", "may"],
'tcount': 0,
'fcount': 0,
'indicates':None
},
{
'name': 'cognitive verbs',
'POS': 'VERB',
'wordlist': ['realize' , 'think', 'understand', 'figure', 'derive', "know", "believe", "recognize", "appreciates"],
'tcount': 0,
'fcount': 0,
'indicates':None
},
{
'name': 'interjections',
'POS': 'INTJ',
'wordlist': None,
'tcount': 0,
'fcount': 0,
'indicates': None
}
]
twcount = 0
fwcount = 0
docs = []
labels = []
for index, row in df.iterrows():
text = row['Transcription']
doc = nlp(text)
docs.append(doc)
labels.append(row['Type'])
cdoc = Doc.from_docs(docs)
for doc, label in zip(docs, labels):
for feature in featureTypes:
if label == 'Truthful':
feature['tcount'] += count_words(doc, feature['POS'], feature['wordlist'])
else:
feature['fcount'] += count_words(doc, feature['POS'], feature['wordlist'])
if label == 'Truthful':
twcount += len(doc)
else:
fwcount += len(doc)
numdocs = len(docs)
total_wordcount = twcount + fwcount
words_per_doc = total_wordcount/len(docs)
for feature in featureTypes:
listlen = len(feature['wordlist']) if feature['wordlist'] else 1
global_occurences = feature['tcount'] + feature['fcount']
visualizeFeature(feature['name'], feature['tcount'], feature['fcount'])
prob = global_occurences/total_wordcount
sim = simulation(numdocs, words_per_doc, prob)
measured_proportion = feature['tcount']/feature['fcount']
if measured_proportion > 1. :
measured_proportion = 1./measured_proportion
feature['indicates'] = 'truthful'
else:
feature['indicates'] = 'deceptive'
visualizeSimulation(sim, measured_proportion)
###Output
_____no_output_____
###Markdown
Next steps: construct features by checking for the presence of multiple significant words in a statement. Perhaps add a score, either 1 for each significant word present, or assign different weight based on the calculated significance
###Code
def pos_list(doc, pos):
pos_list = [token.lemma for token in doc if token.pos_ == pos]
return Counter(pos_list)
def rwratio(lieword, trueword, counter):
right = 0
wrong = 0
if counter[lieword] > counter[trueword] and label == 'Truthful':
wrong += 1
elif counter[lieword] < counter[trueword] and label == 'Deceptive':
wrong += 1
elif counter[lieword] != counter[trueword]:
right += 1
return right, wrong
MINSCORE = 0
def veval(doc, features):
tscore = 0
fscore = 0
for feature in features:
fcount = count_words(doc, feature['POS'], feature['wordlist'])
if feature['indicates'] == 'truthful':
tscore += fcount
else:
fscore += fcount
#print(f"{feature['name']} {fcount}")
score = tscore - fscore/2 #compensate for the feature set being unbalanced
if score == 0:
return score
elif score > 0:
return 'Truthful'
else:
return 'Deceptive'
right = 0
wrong = 0
usable_features = ['modal verbs 1', 'modal verbs 2', 'cognitive verbs']
features = [x for x in featureTypes if x['name'] in usable_features]
for doc, label in zip(docs, labels):
score = veval(doc, features)
if score != 0:
if score == label:
right += 1
else:
wrong += 1
print(f"Right: {right}")
print(f"Wrong: {wrong}")
print(f"Accuracy: {right/(right+wrong)*100:.2f}%")
###Output
Right: 126
Wrong: 94
Accuracy: 57.27%
###Markdown
Importing the required libraries
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
df = pd.read_csv('data/owid-covid-latest.csv')
df.info()
df.head(10)
###Output
_____no_output_____
###Markdown
Continent specific visualisations
###Code
continent_obj = df.groupby('continent')
asia_df = continent_obj.get_group('Asia')
na_df = continent_obj.get_group('North America')
sa_df = continent_obj.get_group('South America')
###Output
_____no_output_____
###Markdown
ASIA
###Code
asia_df.head()
asia_df.drop(['last_updated_date','new_cases_smoothed','new_deaths_smoothed','new_cases_smoothed_per_million','new_deaths_smoothed_per_million','icu_patients','hosp_patients','weekly_icu_admissions','weekly_hosp_admissions','new_tests','new_tests_per_thousand','new_tests_smoothed','new_tests_smoothed_per_thousand','new_vaccinations','new_vaccinations_smoothed','total_vaccinations_per_hundred','people_vaccinated_per_hundred','people_fully_vaccinated_per_hundred','new_vaccinations_smoothed_per_million'],axis=1,inplace=True)
###Output
/Users/thegeorgejoseph/opt/anaconda3/envs/proton/lib/python3.8/site-packages/pandas/core/frame.py:4167: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
return super().drop(
###Markdown
Dealing with Null Data
###Code
asia_df.shape
#logic
asia_df.dropna(axis=0,subset=['total_cases','total_deaths'],inplace=True,how='any')
df.dropna(axis=0,subset=['total_cases','total_deaths'],inplace=True,how='any')
print(asia_df.columns)
###Output
Index(['iso_code', 'continent', 'location', 'total_cases', 'new_cases',
'total_deaths', 'new_deaths', 'total_cases_per_million',
'new_cases_per_million', 'total_deaths_per_million',
'new_deaths_per_million', 'reproduction_rate',
'icu_patients_per_million', 'hosp_patients_per_million',
'weekly_icu_admissions_per_million',
'weekly_hosp_admissions_per_million', 'total_tests',
'total_tests_per_thousand', 'positive_rate', 'tests_per_case',
'tests_units', 'total_vaccinations', 'people_vaccinated',
'people_fully_vaccinated', 'stringency_index', 'population',
'population_density', 'median_age', 'aged_65_older', 'aged_70_older',
'gdp_per_capita', 'extreme_poverty', 'cardiovasc_death_rate',
'diabetes_prevalence', 'female_smokers', 'male_smokers',
'handwashing_facilities', 'hospital_beds_per_thousand',
'life_expectancy', 'human_development_index'],
dtype='object')
###Markdown
Bubble Maps
###Code
fig = px.scatter_geo(df, locations="iso_code",
size="total_cases", # size of markers, "pop" is one of the columns of gapminder
)
fig.show()
###Output
_____no_output_____
###Markdown
![](svd.gif)
###Code
v = array(ex.metrics['val_loss'])
v = v-v.mean()
v = v/v.std()
s = array(srs[1:])
s = s-s.mean()
s = s/s.std()
###Output
_____no_output_____
###Markdown
pyloess is used for smoothing. It can be found on [github](https://github.com/joaofig/pyloess)
###Code
sys.path.append(os.path.abspath('../others/pyloess/'))
from pyloess.Loess import Loess
###Output
_____no_output_____
###Markdown
Import data Death data 20th century
###Code
project_path = pathlib.Path.cwd()
dataset_path = project_path / 'Datasets'
figure_path = project_path / 'Figures'
metadata = []
description = []
data = {}
other = {}
datset_path_20th = dataset_path / '20thcenturymortality'
for f in datset_path_20th.glob('*'):
if f.suffix in ['.xls', '.xlsx']:
icd_v = f.stem.split('_')[0].lower()
print(f.stem)
excel_data = pd.read_excel(f, sheet_name=None, dtype=str)
for k, v in excel_data.items():
if k == 'metadata':
metadata.append(v)
elif k == 'description':
#Add the ICD version to the data
v['ICD_V'] = icd_v
v.rename(columns = {'CODE': 'icdcode', 'DESCRIPTION': 'description1', 'description': 'description1'}, inplace=True)
v.icdcode = v.icdcode.str.strip('*')
description.append(v)
elif 'icd' in k.lower():
icd_col = v.columns[0]
v['ICD_V'] = icd_v
v.rename(columns = {'yr':'year', 'sex':'gender', 'ndths':'numdeaths', icd_col:'icdcode'}, inplace=True)
v.icdcode = v.icdcode.str.strip('*')
data[k.lower()] = v.astype({'year':int, 'gender':int, 'numdeaths':int, 'age':str, 'icdcode':str, 'ICD_V':str})
#print(data[k].dtypes)
else:
#print(f.parent.stem, k)
other[f'{f.parent.stem}_{k}'] = v
description = pd.concat(description)#.set_index(['ICD_V', 'icdcode'])
def get_category(x):
if any(word in x.description for word in ['fever']):
return 'infectious'
elif x.description == 'Unknown':
return 'error'
else:
return ''
def get_description(x):
try:
result = ICD_key[ICD_key.icdcode == x]['description1'].values[0]
return result
except IndexError:
print(f'{icd} contains unknown code: {x}')
return 'Unknown'
data['icd1'].icdcode = data['icd1'].icdcode.str.lstrip('0')
for icd, dat in data.items():
ICD_id = f"icd{icd.split('_')[0][-1]}"
ICD_key = description[description.ICD_V == ICD_id]
print(icd, ICD_id)
dat['description'] = dat.icdcode.apply(get_description)
data[icd] = dat.astype({'year':int, 'gender':int, 'numdeaths':int, 'age':str, 'icdcode':str, 'ICD_V':str})
print("done")
keysets = [s.split('_') for s in data.keys()]
keylists = defaultdict(list)
for v in keysets:
if len(v) > 1:
keylists[v[0]].append(v[1])
else:
keylists[v[0]] = []
for icd, vl in keylists.items():
dat = []
for v in vl:
dat.append(data.pop(f'{icd}_{v}'))
if dat:
data[icd] = pd.concat(dat)
###Output
_____no_output_____
###Markdown
Death data 21st century
###Code
excel_data = pd.read_excel(dataset_path/'21stcenturymortality2019final.xls', sheet_name=None)
dats = []
for k in [j for j in excel_data.keys() if '20' in j]:
dat = excel_data[k]
dat.columns = dat.iloc[0].to_list()
dat['ICD_V'] = 'icd10'
dat.rename(columns = {'ICD-10': 'icdcode', 'YEAR': 'year', 'YR': 'year', 'SEX': 'gender', 'AGE':'age', 'ICD10': 'icdcode', 'Year': 'year', 'Sex': 'gender', 'Age':'age', 'NDTHS': 'numdeaths'}, inplace=True)
dats.append(dat[['icdcode', 'year', 'gender', 'age', 'numdeaths', 'ICD_V']][1:])
#print(dat.columns)
dats = pd.concat(dats)
data['icd10'] = dats
###Output
_____no_output_____
###Markdown
Population data
###Code
excel_data = pd.read_excel(dataset_path/'ukpopulationestimates_1851-2014.xlsx', sheet_name=None)#, dtype=str)
totals = excel_data['UK Total Pop 1851-2014'].iloc[0:149]
totals.columns = totals.iloc[0].to_list()
totals = totals[['Year', 'Total Population']].iloc[34:].astype({'Year':int, 'Total Population':int})
totals.rename(columns={'Year':'year', 'Total Population': 'total_pop'}, inplace=True)
initial = excel_data['UK Quinary 1953-1970'].iloc[:20]
initial_male = excel_data['UK Quinary 1953-1970'].iloc[23:42]
initial_female = excel_data['UK Quinary 1953-1970'].iloc[45:64]
initial_columns = [s[4:] if 'Mid' in s else s for s in initial.iloc[0].to_list()]
initial.columns = initial_columns
initial_male.columns = initial_columns
initial_female.columns = initial_columns
initial.drop(['Code', 'Name'], inplace=True, axis=1)
initial_male.drop(['Code', 'Name'], inplace=True, axis=1)
initial_female.drop(['Code', 'Name'], inplace=True, axis=1)
initial = initial.iloc[1:].set_index('Age')
initial_male = initial_male.iloc[1:].set_index('Age')
initial_female = initial_female.iloc[1:].set_index('Age')
initial_corrected_columns = initial.columns[:12]
initial[initial_corrected_columns] = initial[initial_corrected_columns]*1000
initial_male[initial_corrected_columns] = initial_male[initial_corrected_columns]*1000
initial_female[initial_corrected_columns] = initial_female[initial_corrected_columns]*1000
initial = initial.astype(int)
initial_male = initial_male.astype(int)
initial_female = initial_female.astype(int)
totals['Male'] = totals.total_pop/ 2
totals['Female'] = totals.total_pop/ 2
total_male = excel_data['UK Quinary 1953-1970'].iloc[23].values.copy()
total_male = total_male[3:]
total_male[:12] = total_male[:12]*1000
total_female = excel_data['UK Quinary 1953-1970'].iloc[45].values.copy()
total_female = total_female[3:]
total_female[:12] = total_female[:12]*1000
totals.loc[(totals.year > 1952) & (totals.year < 1971), 'Male'] = total_male
totals.loc[(totals.year > 1952) & (totals.year < 1971), 'Female'] = total_female
total_male = excel_data['UK SYOA 1971-2014'].iloc[96].values.copy()
total_female = excel_data['UK SYOA 1971-2014'].iloc[191].values.copy()
total_male = total_male[3:]
total_female = total_female[3:]
totals.loc[totals.year > 1970, 'Male'] = total_male
totals.loc[totals.year > 1970, 'Female'] = total_female
totals = totals.astype({'Male':int, 'Female':int})
final = excel_data['UK SYOA 1971-2014'].iloc[:93]
final_male = excel_data['UK SYOA 1971-2014'].iloc[97:188]
final_female = excel_data['UK SYOA 1971-2014'].iloc[192:283]
final_columns = [s[4:] if 'Mid' in s else s for s in final.iloc[0].to_list()]
final.columns = final_columns
final_male.columns = final_columns
final_female.columns = final_columns
final.drop(['Code', 'Name'], inplace=True, axis=1)
final_male.drop(['Code', 'Name'], inplace=True, axis=1)
final_female.drop(['Code', 'Name'], inplace=True, axis=1)
final.Age = final.Age.astype(str)
final_male.Age = final_male.Age.astype(str)
final_female.Age = final_female.Age.astype(str)
final.Age[87] = '85+'
final_male.Age[182] = '85+'
final_female.Age[277] = '85+'
final = final.iloc[2:].set_index('Age').replace(':', '0').astype(int)
final_male = final_male.set_index('Age').replace(':', '0').astype(int)
final_female = final_female.set_index('Age').replace(':', '0').astype(int)
final.iloc[86] = final.iloc[86:].sum()
final_male.iloc[85] = final_male.iloc[85:].sum()
final_female.iloc[85] = final_female.iloc[85:].sum()
final = final.iloc[:86]
final_male = final_male.iloc[:86]
final_female = final_female.iloc[:86]
excel_data = pd.read_excel(dataset_path/'nomis_2021_01_15_011854.xlsx', sheet_name=None)
final_next = excel_data['United Kingdom Total']
final_next_male = excel_data['United Kingdom Male']
final_next_female = excel_data['United Kingdom Female']
final_next.columns = [str(s)[:4] for s in final_next.iloc[6].values]
final_next_male.columns = final_next.columns
final_next_female.columns = final_next.columns
total_next = pd.DataFrame(final_next.iloc[7,-5:]).reset_index()
total_next['Male'] = final_next_male.iloc[7,-5:].values
total_next['Female'] = final_next_female.iloc[7,-5:].values
total_next.columns = totals.columns
totals = totals.append(total_next).reset_index(drop=True).astype(int)
final_next = final_next.iloc[8:]
final_next_male = final_next_male.iloc[8:]
final_next_female = final_next_female.iloc[8:]
final_next.Age = [a[4:].strip(' ') for a in final_next.Age]
final_next_male.Age = [a[4:].strip(' ') for a in final_next_male.Age]
final_next_female.Age = [a[4:].strip(' ') for a in final_next_female.Age]
final_next = final_next.set_index('Age').astype(int)
final_next_male = final_next_male.set_index('Age').astype(int)
final_next_female = final_next_female.set_index('Age').astype(int)
span = [str(s) for s in range(2015, 2020)]
final = final.join(final_next[span])
final_male = final_male.join(final_next_male[span])
final_female = final_female.join(final_next_female[span])
###Output
C:\Users\domhu\Anaconda3\lib\site-packages\pandas\core\frame.py:4312: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
errors=errors,
C:\Users\domhu\Anaconda3\lib\site-packages\pandas\core\frame.py:4312: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
errors=errors,
C:\Users\domhu\Anaconda3\lib\site-packages\pandas\core\generic.py:5491: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
self[name] = value
C:\Users\domhu\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py:3418: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
exec(code_obj, self.user_global_ns, self.user_ns)
###Markdown
Figure helper functions
###Code
def setup_ax(ax, xlabel, ylabel):
ax.yaxis.set_tick_params(labelsize=10, colors="dimgrey")
ax.xaxis.set_tick_params(labelsize=10, colors="dimgrey")
ax.set_facecolor("0.97")
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.yaxis.set_label_position('left')
ax.set_ylabel(ylabel,
rotation=90,
color="dimgrey",
size=15,
labelpad=10,
verticalalignment='center',
horizontalalignment='center')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_label_position('bottom')
ax.set_xlabel(xlabel, color="dimgrey", size=15)
ax.grid(color='w')
ax.set_xticks(np.arange(1900, 2030, 10))
ax.set_xlim(1900, 2020)
def setup_colorbar(fig, ax):
cbar_ax = fig.add_axes([0.91, 0.125, 0.02, 0.75])
cb = fig.colorbar(ax.collections[0],
ax=ax,
orientation='vertical',
extend='both',
cax=cbar_ax)
cb.set_label('fit quality',
labelpad=15,
size=13,
color="dimgrey",
rotation=270)
cb.ax.tick_params(labelsize=10, colors="grey")
###Output
_____no_output_____
###Markdown
Basic analysis Population
###Code
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
ax.plot(totals.year, totals.total_pop.values/1000000)
ax.set_ylim(0, 70)
setup_ax(ax, 'year', 'Total population (millions)')
deaths = []
for k, icd in data.items():
deaths.append(icd.groupby(['year']).numdeaths.sum())
deaths = pd.concat(deaths).reset_index()
deaths['proportion'] = deaths.apply(lambda x: x.numdeaths/totals.total_pop[totals.year==x.year].values[0], axis=1)
deaths['deaths per million'] = deaths.apply(lambda x: x.numdeaths/totals.total_pop[totals.year==x.year].values[0]*1000000, axis=1)
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
ax.plot(deaths.year, deaths.numdeaths.values/1000000)
ax.set_ylim(0, 0.65)
setup_ax(ax, 'year', 'Deaths (millions)')
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
deaths.plot('year', 'deaths per million', xlim=(1900, 2020), legend=False, ax=ax)
ax.set_ylim(0, 16000)
setup_ax(ax, 'year', 'deaths per million')
fig.savefig(figure_path/'deaths_per_million.png')
deaths_male = []
deaths_female = []
for k, icd in data.items():
deaths_male.append(icd[icd.gender==1].groupby(['year']).numdeaths.sum())
deaths_female.append(icd[icd.gender==2].groupby(['year']).numdeaths.sum())
deaths_male = pd.concat(deaths_male).reset_index()
deaths_female = pd.concat(deaths_female).reset_index()
deaths_male['proportion'] = deaths_male.apply(lambda x: x.numdeaths/totals['Male'][totals.year==x.year].values[0], axis=1)
deaths_male['deaths per million'] = deaths_male.apply(lambda x: x.numdeaths/totals['Male'][totals.year==x.year].values[0]*1000000, axis=1)
deaths_female['proportion'] = deaths_female.apply(lambda x: x.numdeaths/totals['Female'][totals.year==x.year].values[0], axis=1)
deaths_female['deaths per million'] = deaths_female.apply(lambda x: x.numdeaths/totals['Female'][totals.year==x.year].values[0]*1000000, axis=1)
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
deaths_male.plot('year', 'deaths per million', xlim=(1900, 2020), label='Male', ax=ax)#, color='b')
deaths_female.plot('year', 'deaths per million', xlim=(1900, 2020), label='Female', ax=ax)
ax.set_ylim(0, 16500)
setup_ax(ax, 'year', 'deaths per million')
fig.savefig(figure_path/'deaths_per_million_gender.png')
###Output
_____no_output_____
###Markdown
Smoothed equivalents
###Code
loess = Loess(deaths.year.values, deaths['deaths per million'].values)
step = 0.25
year_s = np.arange(deaths.year.min(), deaths.year.max() + step, step)
deaths_s = np.empty_like(year_s)
for i in range(len(year_s)):
deaths_s[i] = loess.estimate(year_s[i], window=10)
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
deaths.plot('year', 'deaths per million', xlim=(1900, 2020), label='Raw data', ax=ax)
ax.plot(year_s, deaths_s, label='2.5 year averaged')
ax.set_ylim(0, 16000)
setup_ax(ax, 'year', 'deaths per million')
fig.savefig(figure_path/'deaths_per_million_smoothed.png')
###Output
_____no_output_____
###Markdown
Residuals
###Code
difference = deaths['deaths per million'].values - deaths_s[0::int(1/step)]
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
ax.bar(deaths.year, difference)
ax.set_ylim(-1000, 3100)
setup_ax(ax, 'year', 'residual deaths per million')
fig.savefig(figure_path/'deaths_per_million_residuals.png')
window = 5
VarValues = np.empty_like(difference)
for count in range(0, len(difference)):
window_min = count - window
window_max = count + window
if window_min < 0:
window_min = 0
VarValues[count] = np.var(difference[window_min: window_max])
fig = plt.figure(figsize=(15, 4), dpi=80)
ax = fig.add_subplot(1,1,1)
ax.plot(deaths.year.values, VarValues)
setup_ax(ax, 'year', 'variation in deaths')
fig.savefig(figure_path/'deaths_per_million_residuals_variation.png')
###Output
_____no_output_____
###Markdown
Death breakdowns
###Code
description[description.ICD_V=='icd2']
for k in data.keys():
print(k)
data_ageless= data[k].groupby(['icdcode', 'year']).numdeaths.sum()
print(set([k[0] for k in data_ageless[data_ageless > 10000].index]))#.sort_values(ascending=False).iloc[:20])
###Output
icd1
{'1670', '1050', '460', '380', '220', '470', '1060', '210', '1180', '390', '700', '1440', '890', '60', '120', '1660', '130', '990', '1070', '680', '760'}
icd2
{'91', '151A', '40', '28B', '79A', '151B', '154B', '28A', '81B', '79C', '120A', '89&90B', '92A', '104A', '92B', '10', '64E', '6'}
icd3
{'74a(1)', '11a(1)', '90(9)', '101a', '100', '45', '91b(1)', '113,114(3)', '99c,99d', '164(2)', '99a', '90(4)', '99b', '31', '90(2)', '91b(2)', '161(1)', '49', '44', '90(7)', '129'}
icd4
{'97(3)', '11a(1)', '162b', '82a(1)', '94', '186', '159', '107', '93c', '92(2)', '46', '106c', '93b(3)', '92(5)', '93b(2)', '108', '23', '131'}
icd5
{'93d', '47b', '94a', '162c', '106b', '107(2)', '93c(1)', '97', '46b', '13b', '46c', '83a', '106c', '93c(3)', '197', '106a', '83bc'}
icd6
{'1620', '4221', '4910', '4222', '3320', '1530', '4430', '5021', '1630', '5020', '4500', '1510', '3310', '4201', '0020'}
icd7
{'3310', '4221', '4910', '4222', '3320', '4430', '5021', '1700', '5020', '4500', '1510', '1621', '4201'}
icd8
{'4910', '4369', '4850', '4339', '1519', '4379', '1740', '4280', '4409', '4109', '1621', '4123', '4319'}
icd9
{'4340', '4140', '4960', '1749', '4850', '4919', '4310', '4360', '4860', '4149', '4100', '7970', '1629'}
icd10
{'R54 ', 'I64', 'I679', 'F03 ', 'C61', 'F019', 'J180', 'J449', 'F03', 'G309', 'C509', 'J189', 'I251', 'I259', 'I219', 'J440', 'C349', 'I64 ', 'C80 ', 'C61 '}
###Markdown
Age corrections
###Code
ageset = set()
for k in data.keys():
a = data[k].age.unique()
ageset = ageset.union(a)
ages = list(ageset)
ages.sort()
print(ages)
###Output
_____no_output_____
###Markdown
Assumptions for CalculationsAssumptions necessary for calculation of trace completion times, i.e. the point in time a all Spans of a trace have been written to a database.1. All Traces are completed, i.e. all Spans part of a Trace are in the dataset1. All Traces are sorted by trace id and consist of an equal number of spans
###Code
df_merged['span_visibility'] = df_merged.write_time - df_merged.StartTime
starttime = 0
completion_times = [0]*len(df_merged)
firstTraceIdx = 0
lastTraceIdx = 0
write_times_tmp = []
current_trace_id = df_merged['trace_id'].iloc[0]
#init with first trace id
for index, row in df_merged.iterrows():
if row['trace_id'] == current_trace_id:
write_times_tmp.append(row['write_time'])
else:
lastTraceIdx = index
trace_completion_time = max(write_times_tmp) - starttime
write_times_tmp = []
for i in range(firstTraceIdx, lastTraceIdx):
completion_times[i] = trace_completion_time
#use starttime of root span
if row['operation_name'] == 'svc01-parent':
starttime = row['StartTime']
firstTraceIdx = index
current_trace_id = row['trace_id']
#save the write time of current span in temp list
df_merged['trace_completion_time'] = completion_times
df_merged.head(10)
#df_merged.drop(df_merged.index[:100], inplace=True)
df_roots = df_merged.loc[df_merged['operation_name'] == 'svc01-parent']
df_roots = df_roots.sort_values(by = ['StartTime'])
colors = {'red', 'blue'}
categories = {'req-resp-lat', 'trace-visibility-lat'}
columns = {'duration', 'trace_completion_time'}
scatterplot = plt.figure()
ax = scatterplot.add_subplot(1, 1, 1)
for column, color, cat in zip(columns, colors, categories):
x, y = df_roots['StartTime'], df_roots[column]
ax.scatter(x, y, alpha=0.8, c=color, edgecolors='none', s=30, label=cat)
plt.title('Scatter plot')
plt.yscale("log")
plt.legend(loc=2)
plt.show()
plt.savefig('shower.pdf')
#plt.title('Visibility Delay')
figure1, axes = plt.subplots(1, 2)
#apparently we have too much stuff going on at the righthand y-axis, so we need to add extra space
#figure1.subplots_adjust(right=0.8)
#axes[0].text('Latency [µs]')
label = 'exp'
#label = 'large-baggage'
axes[0].boxplot([df_roots['duration']], labels=['Request-Response'])
#plt.savefig('latency-high-baggage.pdf')
axes[1].boxplot([df_roots['trace_completion_time']], labels=['Trace Completion Time'])
axes[1].yaxis.tick_right()
figure1.savefig('latency-'+label+'.pdf')
#plt.savefig('completion-high-baggage.pdf')
#plt.savefig('completion'+label+'.pdf')
#plot = plt.boxplot([df_roots['duration'], df_roots['completion_time']], labels=['Request-Response','Trace Completion'])
df_roots['completion_ratio'] = df_roots['trace_completion_time'] / df_roots['duration']
df_roots['completion_delta'] = df_roots['trace_completion_time'] - df_roots['duration']
figure2, axes2 = plt.subplots(1, 2)
#apparently we have too much stuff going on at the righthand y-axis, so we need to add extra space
#figure1.subplots_adjust(right=0.8)
#axes[0].text('Latency [µs]')
axes2[0].boxplot([df_roots['completion_ratio']], labels=['Completion Ratio'])
axes2[1].boxplot([df_roots['completion_delta']], labels=['Completion Delta'])
axes2[1].yaxis.tick_right()
figure2.savefig('difference-'+label+'.pdf')
df_roots = df_roots[['duration', 'SpanDuration', 'span_visibility', 'trace_completion_time']]
numpy.round(df_roots.describe().T, 2).to_csv('summary-'+label+'.csv', sep=',')
###Output
_____no_output_____
###Markdown
Reading the Facebook Graph
###Code
g = nx.read_edgelist('/Users/user/Desktop/node2vec-master/graph/facebook_combined.txt', create_using=nx.Graph(), nodetype=int)
print(nx.info(g))
sp=nx.spring_layout(g)
plt.axis('off')
###Output
Name:
Type: Graph
Number of nodes: 4039
Number of edges: 88234
Average degree: 43.6910
###Markdown
Processing the embeddings from Node2Vec
###Code
all_values = pd.read_csv('facebook.emd')['4039 128'].values
fb_dict = {'node_id': [], 'embedding': []}
for v in all_values:
v_split = [float(x) for x in v.split(' ')]
fb_dict['embedding'] += [v_split[1:]]
fb_dict['node_id'] += [int(v_split[0])]
fb_df = pd.DataFrame.from_dict(fb_dict)
fb_df.head()
###Output
_____no_output_____
###Markdown
Experiment 1: Clustering Q 1.1 : Are the nodes who took the survey the k-means center of the embedding space?
###Code
embeddings_X = np.array([np.array(x) for x in fb_df['embedding'].values])
kmeans = KMeans(n_clusters=10, random_state=0).fit(embeddings_X)
centroids = {}
for i in range(len(kmeans.cluster_centers_)):
c_id = 'c_id_' + str(i)
centroids[c_id] = []
for e in fb_df['embedding'].values:
centroids[c_id] += [np.linalg.norm(kmeans.cluster_centers_[i] - e)]
centroids = {}
for i in range(len(kmeans.cluster_centers_)):
c_id = 'c_id_' + str(i)
centroids[c_id] = []
for e in fb_df['embedding'].values:
centroids[c_id] += [np.linalg.norm(kmeans.cluster_centers_[i] - e)]
for k in centroids.keys():
fb_df[k] = centroids[k]
cluster_to_node = {}
for label in fb_df.dtypes.index.values:
if 'c_id' in label:
cluster_to_node[label] = fb_df.sort_values(label)['node_id'].values[0]
cluster_to_node.values()
cluster_to_node
survey_nodes = [107, 1684, 1912, 3437, 0, 348, 3980, 414, 686, 698]
survey_nodes
###Output
_____no_output_____
###Markdown
Q 1.2 : Are the nodes who took the survey significantly closer to a centers than other nodes ?
###Code
def node_dist(emb1, emb2):
return np.linalg.norm(emb1 - emb2)
survey_to_centroid = []
for node in survey_nodes:
node_emb = fb_df.loc[fb_df['node_id'] == node]['embedding'].values[0]
survey_to_centroid += [min([node_dist(node_emb, centroid_i) for centroid_i in kmeans.cluster_centers_])]
average_to_centroid = []
for boot_strap in range(1000):
# sampling random nodes
random_nodes = []
while len(random_nodes) != 10:
r_node = random.choice(fb_df['node_id'].values)
if r_node not in survey_nodes:
random_nodes += [r_node]
# measuring average distance to closest centroid
random_to_centroid = []
for node in random_nodes:
node_emb = fb_df.loc[fb_df['node_id'] == node]['embedding'].values[0]
random_to_centroid += [min([node_dist(node_emb, centroid_i) for centroid_i in kmeans.cluster_centers_])]
average_to_centroid += [np.mean(random_to_centroid)]
# p-value of the Null Hypothesis: survey nodes are closer to the centroids that other nodes
p_value = np.mean(np.mean(survey_to_centroid) < average_to_centroid)
p_value
###Output
_____no_output_____
###Markdown
Experiment 2: Semantic interpretation of Embeddings
###Code
def node_sum(node1, node2):
"""Takes in two nodes and returns the node whose embedding is closest to the sum of their embeddings"""
node1_e = np.array(fb_df.loc[fb_df['node_id'] == node1]['embedding'].values[0])
node2_e = np.array(fb_df.loc[fb_df['node_id'] == node2]['embedding'].values[0])
sum_n1_n2 = node1_e + node2_e
fb_df['dist to n12'] = [node_dist(sum_n1_n2, n) for n in fb_df['embedding'].values]
closest_node = fb_df.sort_values('dist to n12')['node_id'].values[0]
return closest_node
def node_average(node1, node2):
"""Takes in two nodes and returns the node whose embedding is closest to the sum of their embeddings"""
node1_e = np.array(fb_df.loc[fb_df['node_id'] == node1]['embedding'].values[0])
node2_e = np.array(fb_df.loc[fb_df['node_id'] == node2]['embedding'].values[0])
av_n1_n2 = (node1_e + node2_e)*0.5
fb_df['midpoint'] = [node_dist(av_n1_n2, n) for n in fb_df['embedding'].values]
closest_node = fb_df.sort_values('midpoint')['node_id'].values[0]
return closest_node
def nodes_connected(u, v):
return u in g.neighbors(v)
###Output
_____no_output_____
###Markdown
Random interesting observation(first try observation which in turn motivated statistical exploration of the idea)
###Code
node_average(5, 1987) in nx.shortest_path(g, 5, 1987)
###Output
_____no_output_____
###Markdown
Q 2.1: Does the node closest to the average point between two embeddings lie on the shortest path linking them a) comparing it to a probability that a random node lies on two others' shortest path
###Code
# average length on shortest_path
sp = []
for i in range(1000):
two_random_nodes = np.random.choice(4039, 2, replace=False)
sp += [len(nx.shortest_path(g, two_random_nodes[0], two_random_nodes[1]))]
av_sp_len = np.mean(sp)
# proportion of random nodes whose average point lies on the shortest path
mean_av_sp = []
mean_rd_sp = []
for bootstrap in range(100):
average_on_sp = []
random_on_sp = []
for simulation in range(100):
two_random_nodes = np.random.choice(4039, 3, replace=False)
node_1, node_2, node_3 = two_random_nodes[0], two_random_nodes[1], two_random_nodes[2]
sp_n1_n2 = nx.shortest_path(g, node_1, node_2)
while len(sp_n1_n2) < av_sp_len:
two_random_nodes = np.random.choice(4039, 3, replace=False)
node_1, node_2, node_3 = two_random_nodes[0], two_random_nodes[1], two_random_nodes[2]
sp_n1_n2 = nx.shortest_path(g, node_1, node_2)
average_on_sp += [node_average(node_1, node_2) in sp_n1_n2]
random_on_sp += [node_3 in sp_n1_n2]
mean_av_sp += [np.mean(average_on_sp)]
mean_rd_sp += [np.mean(random_on_sp)]
# average nodes are more likely to be on the shortest path than a random node
p_value = np.mean(np.mean(mean_av_sp) < mean_rd_sp)
p_value
###Output
_____no_output_____
###Markdown
b) comparing it to a probability that a node connected to one of two nodes lies on the two nodes' shortest path
###Code
for bootstrap in range(100):
average_on_sp = []
random_on_sp = []
for simulation in range(100):
two_random_nodes = np.random.choice(4039, 2, replace=False)
node_1, node_2 = two_random_nodes[0], two_random_nodes[1]
sp_n1_n2 = nx.shortest_path(g, node_1, node_2)
while len(sp_n1_n2) < av_sp_len:
two_random_nodes = np.random.choice(4039, 3, replace=False)
node_1, node_2, node_3 = two_random_nodes[0], two_random_nodes[1], two_random_nodes[2]
sp_n1_n2 = nx.shortest_path(g, node_1, node_2)
average_on_sp += [node_average(node_1, node_2) in sp_n1_n2]
random_on_sp += [random.choice(list(g.neighbors(node_1))) in sp_n1_n2]
mean_av_sp += [np.mean(average_on_sp)]
mean_rd_sp += [np.mean(random_on_sp)]
# average nodes are more likely to be on the shortest path than a random node
p_value = np.mean(np.mean(mean_av_sp) < mean_rd_sp)
p_value
###Output
_____no_output_____
###Markdown
Analysis of the results
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import myscripts.helper as H
from myscripts.myplot import *
# matplotlib settings
%matplotlib inline
plt.style.use('/Users/sst/visual.mplstyle')
# pandas settings
pd.set_option('float_format', '{:.2e}'.format)
###Output
_____no_output_____
###Markdown
Data information
###Code
DATA = pd.read_csv('meta/old_data.csv')
DATA
###Output
_____no_output_____
###Markdown
Characteristic functionsWhat characteristic functions are the best? What do them tell?
###Code
CSV = pd.read_csv('meta/csv.csv').loc[85:]
FGR = pd.read_csv('meta/fgr.csv').loc[85:]
FGR['c'] = DATA.set_index('sample').loc[FGR['name'], 'c'].to_list()
index = pd.DataFrame({
'Two SphericalCFs': [85, 86, 94, 87, 95], # Two SphericalCFs
'SpheroidalCF': [88, 89, 96, 90, 97], # SpheroidalCF
'LognormalSphericalCF': [91, 92, 98, 93, 99], # LognormalSphericalCF
}, dtype=int)
###Output
_____no_output_____
###Markdown
Fitted PDFsAll three characteristic functions are indistinguishable by fitted PDFs. The $R_w$ and residuals are basically the same. All functions fit the data well.
###Code
plt.figure(figsize=(24, 16))
ax = plt.subplot(131)
fitting = index.iloc[:, 0]
fgr = FGR.loc[fitting]
plot_fgr(fgr['file'], names=fgr['name'], colors=fgr['c'], normal=True, auto_rw=True)
plt.title(fitting.name)
plt.subplot(132, sharey=ax)
fitting = index.iloc[:, 1]
fgr = FGR.loc[fitting]
plot_fgr(fgr['file'], names=fgr['name'], colors=fgr['c'], normal=True, auto_rw=True)
plt.title(fitting.name)
plt.subplot(133, sharey=ax)
fitting = index.iloc[:, 2]
fgr = FGR.loc[fitting]
plot_fgr(fgr['file'], names=fgr['name'], colors=fgr['c'], normal=True, auto_rw=True)
plt.title(fitting.name)
plt.show()
###Output
_____no_output_____
###Markdown
Fitting Results2Spherical: Nonthing to say.Spheroidal: The JBNP31 prefers the polar direction while the others tend to be longer along equator direction.Lognormal: The average particle size is small.
###Code
res_dct = {}
num_rows = [4, 3, 3]
for (cf, index), num_row in zip(index.iteritems(), num_rows):
csv = CSV.loc[index]
res = H.join_result_with_std(csv['file'], column_names=csv['name'])
res = res.iloc[2:]
res_dct[cf] = res
print(res.head(num_row).to_string(col_space=16), end='\n\n')
###Output
JBNP31 JBNP32 JBNP32L JBNP33 JBNP33L
frac_b1 0.7+/-0.4 0.5+/-0.4 0.4+/-1.1 0.71+/-0.32 0.7+/-0.6
psize_Bronze_1 46+/-14 43+/-18 (4+/-5)e+01 52+/-14 53+/-23
psize_Bronze_2 21+/-16 19+/-10 22+/-23 21+/-17 24+/-28
scale_Bronze 0.54+/-0.09 0.53+/-0.11 0.30+/-0.14 0.52+/-0.09 0.39+/-0.09
JBNP31 JBNP32 JBNP32L JBNP33 JBNP33L
erad_Bronze 14.6+/-3.5 29+/-13 26+/-21 38+/-22 37+/-28
prad_Bronze (0.7+/-2.9)e+02 8+/-4 8+/-8 12+/-5 12+/-7
scale_Bronze 0.52+/-0.07 0.53+/-0.11 0.30+/-0.16 0.50+/-0.07 0.39+/-0.07
JBNP31 JBNP32 JBNP32L JBNP33 JBNP33L
psize_Bronze 25+/-21 17+/-20 (2+/-4)e+01 27+/-25 29+/-34
psig_Bronze 10.0+/-2.2 8.2+/-2.2 8.0+/-2.4 11.8+/-1.7 11.8+/-3.2
scale_Bronze 0.54+/-0.09 0.53+/-0.13 0.30+/-0.17 0.51+/-0.08 0.39+/-0.08
###Markdown
Linear Regression
###Code
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.8, random_state=10)
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print (linreg.intercept_)
print (linreg.coef_)
predict_result = linreg.predict(X_test)
print(predict_result)
acc = linreg.score(X_test,y_test)
print(acc)
y = y_test.flatten()
y_hat = predict_result.flatten()
fig, ax = plt.subplots(figsize=(12,6))
ax.scatter(y, y_hat)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
fig.savefig('layer.png')
house1 = [2,2,90,6,0,0,1,2000,0,1,0,0]
house2 = [2,1,83,6,0,0,1,1998,0,0,1,0]
list = [house1,house2]
predict_result = linreg.predict(list)
print(predict_result)
neighbor_dict = {}
for i in range(len(df)):
location = re.findall("-(\w+)",df.location[i])[0]
if(location in neighbor_dict):
neighbor_dict[location] = neighbor_dict[location] + 1
else:
neighbor_dict[location] = 1
neighbor_dict
#countdf = pd.DataFrame(data = neighbor_dict)
countdf = pd.Series(neighbor_dict)
countdf = countdf.sort_values(ascending=False)
countdf = countdf.to_frame()
countdf
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['Arial Unicode MS']
fig = plt.figure(figsize=(12,6))
plt.bar(range(len(neighbor_dict)), (neighbor_dict.values()))
plt.xticks(range(len(neighbor_dict)), (neighbor_dict.keys()))
# # for python 2.x:
# plt.bar(range(len(D)), D.values(), align='center') # python 2.x
# plt.xticks(range(len(D)), D.keys()) # in python 2.x
plt.xticks(rotation=90)
plt.show()
fig.savefig('neighbor_name.png')
new_neighbor_dict = {}
for key,value in neighbor_dict.items():
if value >=30:
new_neighbor_dict.update({key:value})
print(new_neighbor_dict)
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['Arial Unicode MS']
fig = plt.figure(figsize=(12,6))
plt.bar(range(len(new_neighbor_dict)), (new_neighbor_dict.values()))
plt.xticks(range(len(new_neighbor_dict)), (new_neighbor_dict.keys()))
# # for python 2.x:
# plt.bar(range(len(D)), D.values(), align='center') # python 2.x
# plt.xticks(range(len(D)), D.keys()) # in python 2.x
plt.xticks(rotation=90)
plt.show()
fig.savefig('new_neighbor_name.png')
neighbor_name = []
for key,value in neighbor_dict.items():
if value >=30:
neighbor_name.append(key)
print(neighbor_name)
dic = {}
a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
co = [-6.32730677e+04, -6.30612421e+04, -6.28865505e+04, -6.36125907e+04, -6.28302585e+04,
-6.32471820e+04, -6.29959605e+04, -6.27682295e+04, -6.28805346e+04, -6.31831148e+04,
-6.31075775e+04, -6.28579271e+04,-6.33238003e+04, -6.27960561e+04, -6.27852625e+04]
for i in range(15):
dic[neighbor_name[i]] = co[i]
print(sorted(dic.items(), key = lambda kv:(kv[1], kv[0])))
rate = []
for i in range(15):
rate.append(co[i]/-62768.2295)
rate
col_names = ['title','number_of_rooms','number_of_halls','size','layer', 'high','mid','low',
'completed_year']
col_names = col_names+neighbor_name+['price']
new_house_df = pd.DataFrame(columns = col_names)
new_house_df
for i in range(len(df)):
title = df.title[i]
d = re.findall("\d+",df.detail[i])
if(len(d)!=5):
room = int(d[0])
hall = 0
size = int(d[1])
layer = int(d[2])
completed = int(d[3])
else:
room = int(d[0])
hall = int(d[1])
size = int(d[2])
layer = int(d[3])
completed = int(d[4])
high = 0
mid = 0
low = 0
height = re.findall("\w+",df.detail[i])[2]
if(height == "高层"):
high = 1
elif(height == "中层"):
mid = 1
else:
low = 1
neighbor = re.findall("-(\w+)",df.location[i])[0]
dj = 0
mf = 0
kd = 0
jbh = 0
ws = 0
yh = 0
hcz = 0
wd = 0
rmgc = 0
xwz = 0
tsy = 0
gd = 0
drf =0
gl = 0
dmk =0
if(neighbor == "东津世纪城"):
dj = 1
elif(neighbor == "民发世界城"):
mf = 1
elif(neighbor == "凯地广场"):
kd = 1
elif(neighbor == "嘉佰惠广场"):
jbh = 1
elif(neighbor == "武商沃尔玛"):
ws = 1
elif(neighbor == "悦活荟"):
yh = 1
elif(neighbor == "火车站"):
hcz = 1
elif(neighbor == "万达广场"):
wd = 1
elif(neighbor == "人民广场"):
rmgc = 1
elif(neighbor == "新五中"):
xwz = 1
elif(neighbor == "铁四院"):
tsy = 1
elif(neighbor == "广电中心"):
gd = 1
elif(neighbor == "大润发广场"):
drf = 1
elif(neighbor == "鼓楼"):
gl = 1
elif(neighbor == "东门口"):
dmk = 1
else:
continue
p = re.findall("\d+",df.price[i])[0]
price = int(p)*10000
location = df.location[i]
new_house_df.loc[i] = [title,room,hall,size,layer,high,mid,low,completed,
dj,mf,kd,jbh,ws,yh,hcz,wd,rmgc,xwz,tsy,gd,drf,gl,dmk,price]
y1 = new_house_df[['price']]
x_names = ['number_of_rooms','number_of_halls','size','layer', 'high','mid','low',
'completed_year']
x_names = x_names+neighbor_name
X1 = new_house_df[x_names]
X1 = normalization(X1)
y1 = normalization(y1)
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
X_train1, X_test1, y_train1, y_test1 = train_test_split(X1, y1, random_state=10)
from sklearn.linear_model import LinearRegression
linreg1 = LinearRegression()
linreg1.fit(X_train1, y_train1)
print (linreg1.intercept_)
print (linreg1.coef_)
predict_result1 = linreg1.predict(X_test1)
acc1 = linreg1.score(X_test1,y_test1)
print(acc1)
y1 = y_test1.flatten()
y_hat1 = predict_result1.flatten()
fig, ax = plt.subplots(figsize=(12,6))
ax.scatter(y1, y_hat1)
ax.plot([y1.min(), y1.max()], [y1.min(), y1.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
fig.savefig('newplot.png')
###Output
_____no_output_____
###Markdown
Agrupamento de dados
###Code
import numpy as np # Numpy: biblioteca para manipular vetores e matrizes
import pandas as pd # Pandas: biblioteca para manipular tabelas
data = pd.read_csv('shenzhenPlatoons13.csv')
print(data.shape)
print(data.head())
#X = data[['lat', 'long', 'hour']]
X = data[['long', 'hour']]
X
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
pca = PCA(n_components=2)
imputer = SimpleImputer()
Xpca = pca.fit_transform(imputer.fit_transform(X))
plt.scatter(Xpca[:, 0], Xpca[:, 1])
plt.show()
# Sementes aleatórias para reproducibilidade dos experimentos (reproducão dos experimentos)
seeds = [11156, 28750, 3509, 20838, 5907,
20167, 10632, 26137, 12628, 13922,
1124, 32301, 17230, 21, 7432,
16445, 29820, 28931, 11104, 2741]
# O ideal são 100 bootstraps
###Output
_____no_output_____
###Markdown
Determinando número de grupos
###Code
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
from sklearn.utils import resample
result = {}
for k in range(2, 20):
result[f'k={k}'] = []
for seed in seeds:
imputer = SimpleImputer(strategy='mean')
scaler = StandardScaler()
alg = KMeans(n_clusters=k, random_state=seed)
#alg = AgglomerativeClustering(n_clusters=k, linkage='single')
Xb = resample(X, random_state=seed) # Reamostragem da base de dados (bootstrapping)
Xb = scaler.fit_transform(imputer.fit_transform(Xb))
clusters = alg.fit_predict(Xb)
result[f'k={k}'].append(silhouette_score(Xb, clusters))
result = pd.DataFrame.from_dict(result)
print(result)
result.apply(lambda x: "{:.2f} ± {:.2f}".format(x.mean(), x.std()))
import matplotlib.pyplot as plt
plt.plot(range(2, 20), result.mean())
plt.errorbar(range(2, 20), result.mean(), result.std())
plt.show()
###Output
_____no_output_____
###Markdown
Melhores grupos (deploying)
###Code
imputer = SimpleImputer(strategy='mean')
scaler = StandardScaler()
Xfixed = imputer.fit_transform(scaler.fit_transform(X))
alg = KMeans(n_clusters=4, random_state=seed)
clusters = alg.fit_predict(Xfixed)
for k in range(4):
plt.scatter(Xpca[np.where(clusters == k), 0], Xpca[np.where(clusters == k), 1], color=['red', 'blue', 'green', 'yellow', 'gray'][k])
plt.show()
k = 4
algorithms = {
'kmeans': KMeans(n_clusters=k),
'single': AgglomerativeClustering(n_clusters=k, linkage='single'),
'average': AgglomerativeClustering(n_clusters=k, linkage='average'),
'complete': AgglomerativeClustering(n_clusters=k, linkage='complete'),
'dbscan': DBSCAN(eps = 0.09, min_samples = 2, metric = 'euclidean'),
}
result = {}
for key, alg in algorithms.items():
result[key] = []
for seed in seeds:
imputer = SimpleImputer(strategy='mean')
scaler = StandardScaler()
Xb = scaler.fit_transform(imputer.fit_transform(resample(X, random_state=seed)))
clusters = alg.fit_predict(Xb)
result[key].append(silhouette_score(Xb, clusters))
result = pd.DataFrame.from_dict(result)
print(result)
result.apply(lambda x: "{:.2f} ± {:.2f}".format(x.mean(), x.std()))
import matplotlib.pyplot as plt
plt.boxplot([ scores for alg, scores in result.iteritems() ])
plt.xticks(1 + np.arange(result.shape[1]), result.columns)
plt.show()
###Output
_____no_output_____
###Markdown
ModCloth
###Code
dataset = 'modcloth'
df_review = pd.read_csv('./data/df_'+dataset+'.csv')
df_review['timestamp'] = pd.to_datetime(df_review['timestamp'])
df_review['fit_score'] = 0.0
df_review['fit_score'].loc[df_review['fit'] == 'Just right'] = 1.0
df_review['fit_score'].loc[df_review['fit'].isna()] = None
df_review['timestamp'] = pd.to_datetime(df_review['timestamp'])
###Output
/home/mengting/anaconda3/lib/python3.7/site-packages/pandas/core/indexing.py:190: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
self._setitem_with_indexer(indexer, value)
###Markdown
Product Selection vs. Marketing Bias (ModCloth) Chi2 test of contingency table
###Code
contingency_table(df_review, ['Small', 'Large', 'All'])
chi2_test_by_year(df_review)
###Output
_____no_output_____
###Markdown
Consumer Satisfaction vs. Marketing Bias (ModCloth) 2-way ANOVA on rating score
###Code
_ = two_way_anova(df_review, 'rating')
plot_avg_by_segment(df_review, 'rating', (2.2,2), ['Small', 'Large'], dataset, dump=False)
###Output
_____no_output_____
###Markdown
2-way ANOVA on clothing fit feedback
###Code
_ = two_way_anova(df_review, 'fit_score')
plot_avg_by_segment(df_review, 'fit_score', (2.2,2), ['Small', 'Large'], dataset, dump=False)
###Output
_____no_output_____
###Markdown
Amazon Electronics
###Code
dataset = 'electronics'
df_review = pd.read_csv('./data/df_'+dataset+'.csv')
df_review['timestamp'] = pd.to_datetime(df_review['timestamp'])
###Output
_____no_output_____
###Markdown
Product Selection vs. Marketing Bias (Electronics) Chi2 test of contingency table
###Code
contingency_table(df_review)
###Output
contingency table
###Markdown
Consumer Satisfaction
###Code
_ = two_way_anova(df_review, 'rating')
plot_avg_by_segment(df_review, 'rating', (3.2,2), [], dataset, dump=False)
###Output
_____no_output_____
###Markdown
This contains lots of methods including cluster, dimension reduction and regression for test.
###Code
data = pd.read_csv(folder + '/match_total.csv')
data = data[data.columns[~data.columns.str.contains('gk')]]
###Output
/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py:2718: DtypeWarning:
Columns (933,934,976,977) have mixed types.Specify dtype option on import or set low_memory=False.
###Markdown
**获取label**
###Code
y = data['home_team_goal'] - data['away_team_goal']
###Output
_____no_output_____
###Markdown
获取某些data
###Code
'''
feature_list = ['overall_rating', # 总体评分
'potential', # 总体评分上升潜力
'ball_control', # 对方传来的球能不能接住
'dribbling', # 带球
'agility', # 变换能力
'sprint_speed', # 冲刺
'short_passing', # 短传
'long_passing', # 长传
'crossing', # 横传
'vision', # 对周围环境的理解力,这对提升长传能力有效
'volleys', # 凌空传球
'curve', # 香蕉球
'finishing', # 进球
'sliding_tackle', # 铲球
'standing_tackle', # 拦截
'interceptions', # 抢断
'marking', # 盯人
'stamina', # 体力
'reactions', # 对身边环境的反应能力
'positioning', # 站位
]
'''
feature_list = ['long_passing', 'curve', 'short_passing', 'crossing',
'overall_rating', 'ball_control', 'potential', 'reactions',
'vision', 'finishing', 'volleys', 'positioning',
'marking', 'sliding_tackle', 'standing_tackle', 'interceptions',
'dribbling', 'agility', 'sprint_speed', 'stamina']
def get_data(feat, try_data=None):
if try_data is None:
home = data[data.columns[data.columns.str.contains(feat) & data.columns.str.contains('home')]]
away = data[data.columns[data.columns.str.contains(feat) & data.columns.str.contains('away')]]
return home, away
else:
home = try_data[data.columns[try_data.columns.str.contains(feat) & try_data.columns.str.contains('home')]]
away = try_data[data.columns[try_data.columns.str.contains(feat) & try_data.columns.str.contains('away')]]
return home, away
home_dict = {}
away_dict = {}
home_mean = pd.DataFrame()
away_mean = pd.DataFrame()
for feat in feature_list:
a, b = get_data(feat)
home_dict[feat] = a
away_dict[feat] = b
home_mean[feat] = a.mean(axis=1)
away_mean[feat] = b.mean(axis=1)
mean_df = pd.concat([home_mean, away_mean], axis=0)
corr_mat = mean_df.corr()
corr_mat
sns.heatmap(mean_df.corr())
sns.heatmap(mean_df[np.random.permutation(mean_df.columns)].corr())
sns.heatmap(mean_df.corr('spearman'))
###Output
_____no_output_____
###Markdown
Flat Clustering
###Code
dissimilarity = 1 - np.abs(mean_df.corr())
hierarchy = linkage(squareform(dissimilarity), method='average')
labels = fcluster(hierarchy, 0.3, criterion='distance')
labels
mean_df.columns
c = Counter(labels)
c
temp = rearrange(mean_df.copy(), dict(zip(mean_df.columns, labels)))
sns.heatmap(temp)
temp.columns
hierarchy
###Output
_____no_output_____
###Markdown
DBSCAN
###Code
from sklearn.cluster import DBSCAN
dist = mean_df.corr()
X = 1 - dist.values
clustering = DBSCAN(eps=0.5, min_samples=3).fit(X)
clustering.labels_
temp = rearrange(mean_df.copy(), dict(zip(mean_df.columns, clustering.labels_.tolist())))
sns.heatmap(temp)
Counter(clustering.labels_)
temp.columns
###Output
_____no_output_____
###Markdown
Block Modeling Clustering (old version)
###Code
n_variables = 20
n_clusters = 5
cluster_size = n_variables // n_clusters
C = mean_df.corr().values
belongs_to_cluster = np.repeat(range(n_clusters), cluster_size)
def score(C):
'''
Function to assign a score to an ordered covariance matrix.
High correlations within a cluster improve the score.
High correlations between clusters decease the score.
'''
score = 0
for cluster in range(n_clusters):
inside_cluster = np.arange(cluster_size) + cluster * cluster_size
outside_cluster = np.setdiff1d(range(n_variables), inside_cluster)
# Belonging to the same cluster
score += np.sum(C[inside_cluster, :][:, inside_cluster])
# Belonging to different clusters
score -= np.sum(C[inside_cluster, :][:, outside_cluster])
score -= np.sum(C[outside_cluster, :][:, inside_cluster])
return score
initial_C = C
initial_score = score(C)
initial_ordering = np.arange(n_variables)
plt.figure()
plt.imshow(C, interpolation='nearest')
plt.title('Initial C')
print('Initial ordering:', initial_ordering)
print('Initial covariance matrix score:', initial_score)
# Pretty dumb greedy optimization algorithm that continuously
# swaps rows to improve the score
def swap_rows(C, var1, var2):
'''
Function to swap two rows in a covariance matrix,
updating the appropriate columns as well.
'''
D = C.copy()
D[var2, :] = C[var1, :]
D[var1, :] = C[var2, :]
E = D.copy()
E[:, var2] = D[:, var1]
E[:, var1] = D[:, var2]
return E
current_C = C
current_ordering = initial_ordering
current_score = initial_score
max_iter = 1000
for i in range(max_iter):
# Find the best row swap to make
best_C = current_C
best_ordering = current_ordering
best_score = current_score
for row1 in range(n_variables):
for row2 in range(n_variables):
if row1 == row2:
continue
option_ordering = best_ordering.copy()
option_ordering[row1] = best_ordering[row2]
option_ordering[row2] = best_ordering[row1]
option_C = swap_rows(best_C, row1, row2)
option_score = score(option_C)
if option_score > best_score:
best_C = option_C
best_ordering = option_ordering
best_score = option_score
if best_score > current_score:
# Perform the best row swap
current_C = best_C
current_ordering = best_ordering
current_score = best_score
else:
# No row swap found that improves the solution, we're done
break
# Output the result
plt.figure()
plt.imshow(current_C, interpolation='nearest')
plt.title('Best C')
print('Best ordering:', current_ordering)
print('Best score:', current_score)
print()
print('Cluster [variables assigned to this cluster]')
print('------------------------------------------------')
for cluster in range(n_clusters):
meaning_list = [mean_df.columns[i] for i in current_ordering[cluster*cluster_size:(cluster+1)*cluster_size]]
print('Cluster %02d %s' % (cluster + 1, meaning_list))
###Output
Best ordering: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19]
Best score: -345.768936391987
Cluster [variables assigned to this cluster]
------------------------------------------------
Cluster 01 ['long_passing', 'curve', 'short_passing', 'crossing']
Cluster 02 ['overall_rating', 'ball_control', 'potential', 'reactions']
Cluster 03 ['vision', 'finishing', 'volleys', 'positioning']
Cluster 04 ['marking', 'sliding_tackle', 'standing_tackle', 'interceptions']
Cluster 05 ['dribbling', 'agility', 'sprint_speed', 'stamina']
###Markdown
Block Modeling Clustering (new version)
###Code
n_variables = 20
n_clusters = 5
cluster_size = n_variables // n_clusters
columns_num_dict = dict(zip(list(mean_df.columns), np.repeat(range(n_clusters), cluster_size)))
class_count = Counter(columns_num_dict.values())
reg = sum([i ** 2 for i in class_count.values()])
df = abs(mean_df.corr())
lamda = 2
def score(df, columns_num_dict, verbose=False):
'''
Function to assign a score to an ordered covariance matrix.
High correlations within a cluster improve the score.
High correlations between clusters decease the score.
'''
m, n = df.shape
C = df.values
assert m == n
score = 0
for cluster in range(n_clusters):
is_inside = np.array([columns_num_dict[i] == cluster for i in df.columns])
inside_cluster = np.arange(m)[is_inside]
outside_cluster = np.setdiff1d(range(m), inside_cluster)
# Belonging to the same cluster
score += np.sum(C[inside_cluster, :][:, inside_cluster])
# Belonging to different clusters
score -= np.sum(C[inside_cluster, :][:, outside_cluster])
score -= np.sum(C[outside_cluster, :][:, inside_cluster])
if verbose:
return score, - lamda * reg, score - lamda * reg
return score - lamda * reg
def recombinant(var1, var2, columns_num_dict):
'''
Function to swap two rows in a covariance matrix,
updating the appropriate columns as well.
'''
p1 = columns_num_dict[var1]
p2 = columns_num_dict[var2]
columns_num_dict[var1] = p2
columns_num_dict[var2] = p1
return columns_num_dict
def metamorphosis(var, j, columns_num_dict):
global reg
columns_num_dict[var] = j
class_count = Counter(columns_num_dict.values())
reg = sum([i ** 2 for i in class_count.values()])
if len(class_count.values()) != 5:
return False, columns_num_dict
return True, columns_num_dict
def rearrange(mean_df, column_dict):
col_list = []
for i in column_dict.keys():
col_list.append((column_dict[i], i))
col_list.sort()
cols = [i for _, i in col_list]
return mean_df[cols].corr()
def print_cluster(column_dict):
for i in range(n_clusters):
print('Cluster', i, end='')
print(':', end=' ')
for j in column_dict.keys():
if column_dict[j] == i:
print(j, end='\t')
print()
initial_df = df
initial_score = score(df, columns_num_dict)
intial_dict = columns_num_dict
plt.figure()
sns.heatmap(mean_df.corr())
plt.title('Initial df')
print_cluster(intial_dict)
import copy
current_df = df
current_dict = copy.deepcopy(intial_dict)
current_score = initial_score
max_iter = 1000
for p in range(max_iter // 30):
# if p == 0:
# var = np.random.choice(df.columns)
# j = np.random.randint(0, 5)
# res, d = metamorphosis(var, j, copy.deepcopy(current_dict))
# if not res:
# continue
# current_dict = d
for i in range(max_iter):
# Find the best row swap to make
best_dict = copy.deepcopy(current_dict)
best_score = current_score
for row1 in range(n_variables):
for row2 in range(n_variables):
if row1 == row2:
continue
option_dict = recombinant(df.columns[row1], df.columns[row2], copy.deepcopy(best_dict))
option_score = score(df, option_dict)
if option_score > best_score:
best_dict = copy.deepcopy(option_dict)
best_score = option_score
if best_score > current_score:
# Perform the best row swap
current_C = best_C
current_dict = copy.deepcopy(best_dict)
current_score = best_score
print_cluster(best_dict)
print(score(df, best_dict, True))
else:
# No row swap found that improves the solution, we're done
break
current_score
df = rearrange(mean_df, current_dict)
current_dict
sns.heatmap(df)
print_cluster(best_dict)
###Output
_____no_output_____
###Markdown
LDA Data Preprocessing
###Code
'''
From Zeng Xin
'''
# def avg_max_data(data, k):
# result_control = []
# for i in range(len(data)):
# curr_arr = data.iloc[i].values
# curr_arr = curr_arr[~np.isnan(curr_arr)]
# result_control.append(np.nanmean(sorted(curr_arr, reverse=True)[:k]))
# return np.array(result_control)
# for feat in tqdm(feature_list, total=len(feature_list)):
# a, b = get_data(feat)
# home_mean[feat] = avg_max_data(a, 6)
# away_mean[feat] = avg_max_data(b, 6)
# home_mean.to_csv(folder + '/home_mean.csv')
# away_mean.to_csv(folder + '/away_mean.csv')
home_mean = pd.read_csv(folder + '/home_mean.csv', index_col=0)
away_mean = pd.read_csv(folder + '/away_mean.csv', index_col=0)
y = data['home_team_goal'] - data['away_team_goal']
y.name = 'label'
lda_data = pd.concat([home_mean - away_mean, y], axis=1)
lda_data = lda_data.loc[lda_data.label != 0]
lda_data['label'] = lda_data['label'] > 0
lda_data['label']
feature_cluster_list = [df.columns[i * 4:i * 4 + 4] for i in range(5)]
ans = pd.DataFrame()
trans_list = []
minmax_list = []
for i in range(5):
val = lda_data[df.columns[i * 4:i * 4 + 4]].values
y = lda_data['label']
minmax = MinMaxScaler()
minmax.fit(val)
val = minmax.transform(val)
lda = LinearDiscriminantAnalysis(n_components=1)
lda.fit(val, y)
trans_list.append(lda)
minmax_list.append(minmax)
ans['feat' + str(i)] = lda.transform(val)[:, 0]
ans.feat0.plot(kind='hist')
trans_list[0].coef_
for i in range(5):
for j in range(4):
print(round(trans_list[i].coef_[0][j], 3), '*', df.columns[i * 4 + j], sep=' ', end=' + ')
print()
for i in range(5):
print(trans_list[i].explained_variance_ratio_)
###Output
[1.]
[1.]
[1.]
[1.]
[1.]
###Markdown
Build Evaluation System For Classification
###Code
y = lda_data['label'].astype(int)
team_features = ['buildUpPlaySpeed', 'buildUpPlayPassing', 'chanceCreationPassing', 'chanceCreationCrossing',
'chanceCreationShooting', 'defencePressure', 'defenceAggression', 'defenceTeamWidth']
mean_df
system_df = lda_data[df.columns]
system_df
sys_lda_df = pd.DataFrame()
name_list = ['passing', 'overall', 'shot', 'tackle', 'physical']
for i in range(5):
mmtrans = minmax_list[i]
ldatrans = trans_list[i]
val = system_df.iloc[:, i * 4:i * 4 + 4].values
val = ldatrans.transform(mmtrans.transform(val))
sys_lda_df[name_list[i]] = np.squeeze(val)
sys_lda_df.passing.plot(kind='hist')
X = np.array(sys_lda_df.iloc[:, [0, 2, 3, 4]]).reshape(-1, 4)
reg = sm.Logit(y, sm.add_constant(X)).fit()
reg.summary()
X = np.array(sys_lda_df.iloc[:]).reshape(-1, 5)
reg = sm.Logit(y, sm.add_constant(X)).fit()
reg.summary()
def get_team_data(feat):
home = data[data.columns[data.columns.str.endswith(feat) & data.columns.str.contains('home')]]
away = data[data.columns[data.columns.str.endswith(feat) & data.columns.str.contains('away')]]
return home, away
team_df = pd.DataFrame()
for feat in team_features:
home, away = get_team_data(feat)
team_df[feat] = (home.values - away.values)[:, 0]
print((home.values - away.values).shape)
team_df
team_df.dropna(how='all')
###Output
_____no_output_____
###Markdown
For Regression
###Code
y = data['home_team_goal'] - data['away_team_goal']
system_df = home_mean[df.columns] - away_mean[df.columns]
system_df
sys_lda_df = pd.DataFrame()
name_list = ['passing', 'general', 'shot', 'tackle', 'quality']
for i in range(5):
mmtrans = minmax_list[i]
ldatrans = trans_list[i]
val = system_df.iloc[:, i * 4:i * 4 + 4].values
val = ldatrans.transform(mmtrans.transform(val))
sys_lda_df[name_list[i]] = np.squeeze(val)
X = np.array(sys_lda_df.iloc[:, [0, 2, 3, 4]]).reshape(-1, 4)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
X = np.array(sys_lda_df.iloc[:]).reshape(-1, 5)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
sys_lda_df
sys_lda_df.quality.plot(kind='hist')
home_mean_df = home_mean[df.columns]
home_sys_lda_df = pd.DataFrame()
name_list = ['passing', 'general', 'shot', 'tackle', 'quality']
for i in range(5):
mmtrans = minmax_list[i]
ldatrans = trans_list[i]
val = home_mean_df.iloc[:, i * 4:i * 4 + 4].values
val = ldatrans.transform(mmtrans.transform(val))
home_sys_lda_df[name_list[i]] = np.squeeze(val)
home_score_df = home_sys_lda_df.copy()
home_score_df['passing'] = home_score_df['passing'].apply(lambda x: np.clip(7 * x - 13, 60, 100))
home_score_df['general'] = home_score_df['general'].apply(lambda x: np.clip(7 * x - 27, 60, 100))
home_score_df['shot'] = home_score_df['shot'].apply(lambda x: np.clip(7 * x + 1, 60, 100))
home_score_df['tackle'] = home_score_df['tackle'].apply(lambda x: np.clip(7 * x + 1, 60, 100))
home_score_df['quality'] = home_score_df['quality'].apply(lambda x: np.clip(7 * x - 27, 60, 100))
away_mean_df = away_mean[df.columns]
away_sys_lda_df = pd.DataFrame()
name_list = ['passing', 'general', 'shot', 'tackle', 'quality']
for i in range(5):
mmtrans = minmax_list[i]
ldatrans = trans_list[i]
val = away_mean_df.iloc[:, i * 4:i * 4 + 4].values
val = ldatrans.transform(mmtrans.transform(val))
away_sys_lda_df[name_list[i]] = np.squeeze(val)
away_score_df = away_sys_lda_df.copy()
away_score_df['passing'] = away_score_df['passing'].apply(lambda x: np.clip(7 * x - 13, 60, 100))
away_score_df['general'] = away_score_df['general'].apply(lambda x: np.clip(7 * x - 27, 60, 100))
away_score_df['shot'] = away_score_df['shot'].apply(lambda x: np.clip(7 * x + 1, 60, 100))
away_score_df['tackle'] = away_score_df['tackle'].apply(lambda x: np.clip(7 * x + 1, 60, 100))
away_score_df['quality'] = away_score_df['quality'].apply(lambda x: np.clip(7 * x - 27, 60, 100))
away_score_df.to_csv(folder + '/away_score.csv')
home_score_df.iloc[:, 3].plot(kind='hist')
away_mean_df = away_mean[df.columns]
home_score_df = home_sys_lda_df.applymap(lambda x: np.clip(80 + 5 * x, 60, 100))
home_sys_lda_df.quality.plot(kind='hist')
home_score_df
score_df.to_csv(folder + '/score.csv')
###Output
_____no_output_____
###Markdown
PCA
###Code
pca_data = MinMaxScaler().fit(mean_df).transform(mean_df)
pca_data = pd.DataFrame(pca_data, columns=mean_df.columns)
pca = PCA(n_components=10)
pca = pca.fit(pca_data)
var_exp_list = pca.explained_variance_ratio_
var_exp_list
plt.plot((1 - var_exp_list.cumsum()))
pca.components_.shape
wgt = abs(pd.DataFrame(pca.components_, columns=mean_df.columns))
sns.heatmap(wgt)
###Output
_____no_output_____
###Markdown
**potential**
###Code
home_potential, away_potential = get_data('potential')
home_potential
###Output
_____no_output_____
###Markdown
取平均分
###Code
mean_home = home_potential.mean(axis=1)
mean_away = away_potential.mean(axis=1)
mean_diff = mean_home - mean_away
X = np.array(mean_diff).reshape(-1, 1)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
y_pred = reg.predict(sm.add_constant(X))
plt.scatter(X, y)
plt.plot(X, y_pred, color='r')
plt.xlabel('mean_potential_difference')
plt.ylabel('score_difference')
plt.show()
###Output
_____no_output_____
###Markdown
平方后取平均
###Code
mean_home = home_potential.apply(lambda x: x ** 2).mean(axis=1)
mean_away = away_potential.apply(lambda x: x ** 2).mean(axis=1)
mean_diff = mean_home - mean_away
X = np.array(mean_diff).reshape(-1, 1)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
y_pred = reg.predict(sm.add_constant(X))
plt.scatter(X, y)
plt.plot(X, y_pred, color='r')
plt.xlabel('mean_potential_difference')
plt.ylabel('score_difference')
plt.show()
###Output
_____no_output_____
###Markdown
立方后取平均
###Code
mean_home = home_potential.apply(lambda x: x ** 3).mean(axis=1)
mean_away = away_potential.apply(lambda x: x ** 3).mean(axis=1)
mean_diff = mean_home - mean_away
X = np.array(mean_diff).reshape(-1, 1)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
y_pred = reg.predict(sm.add_constant(X))
plt.scatter(X, y)
plt.plot(X, y_pred, color='r')
plt.xlabel('mean_potential_difference')
plt.ylabel('score_difference')
plt.show()
###Output
_____no_output_____
###Markdown
指数后取平均
###Code
mean_home = home_potential.apply(lambda x: np.exp(x - 100)).mean(axis=1)
mean_away = away_potential.apply(lambda x: np.exp(x - 100)).mean(axis=1)
mean_diff = mean_home - mean_away
X = np.array(mean_diff).reshape(-1, 1)
reg = sm.OLS(y, sm.add_constant(X)).fit()
reg.summary()
y_pred = reg.predict(sm.add_constant(X))
plt.scatter(X, y)
plt.plot(X, y_pred, color='r')
plt.xlabel('mean_potential_difference')
plt.ylabel('score_difference')
plt.show()
###Output
_____no_output_____
###Markdown
###Code
import matplotlib
from matplotlib.font_manager import *
import numpy as np
import matplotlib.pyplot as plt
matplotlib.rcParams['axes.unicode_minus']=False
#=======自己设置开始============
#标签
labels = np.array(['Passing', 'Overall', 'Shot', 'Tackle', 'Physical'])
#数据个数
dataLenth = 5
#数据
data = away_score_df.iloc[471]# data = np.array([60,70,80,90,100])
labels = [labels[i] + '(' + str(round(data[i])) + ')' for i in range(5)]
#========自己设置结束============
angles = np.linspace(0, 2*np.pi, dataLenth, endpoint=False)
data = np.concatenate((data, [data[0]])) # 闭合 # #将数据结合起来
angles = np.concatenate((angles, [angles[0]])) # 闭合
fig = plt.figure()
ax = fig.add_subplot(121, polar=True)# polar参数!!121代表总行数总列数位置
ax.plot(angles, data, 'bo-', linewidth=1)# 画线四个参数为x,y,标记和颜色,闲的宽度
ax.fill(angles, data, facecolor='r', alpha=0.5)# 填充颜色和透明度
ax.set_thetagrids(angles * 180/np.pi , labels)
ax.set_rlim(0,100)
ax.grid(True)
home_score_df.iloc[471]
away_score_df.iloc[471]
data.iloc[471].home_team_id
data.iloc[471].away_team_id
data.iloc[471].home_team_goal
data.iloc[471].away_team_goal
###Output
_____no_output_____
###Markdown
Analysis of crimes in Phoenix, AZThe analysis of crime datasets has become a standard practice among people learning data science. Not only these datasets are rich in terms of their features, but they also offer an opportunity to study a region with much more information when combined with other datasets. And finally, these studies can be used to make a safer community using the tools of data science.The city of Phoenix started to publish their crime dataset from November 2015 (other datasets are also [available](https://www.phoenix.gov/opendata)). The dataset is a CSV file (under _Neighborhood and Safetey_ category) which is updated daily by 11 am and includes incidents from November 1st, 2015 forward through 7 days before the posting date. The dataset used for this analysis is downloaded on 6 Feb 2017. In this analysis, I try to break down the crimes into different categroies and study their daily, monthly and weekly trends. CleaningI use the following packages in `Python`:* `numpy`* `pandas`* `matplotlib`* `seaborn`I use `seaborn ` only once to create a heatmap. If you don't have `seaborn` installed, the code still works without producing the heatmap.
###Code
import numpy as np
import pandas as pd
try:
# module exists
import seaborn as sns
seaborn_exists = True
except ImportError:
# module doesn't exist
seaborn_exists = True
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
%matplotlib inline
# custom features of plots
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Helvetica Neue'
plt.rcParams['font.monospace'] = 'Helvetica Neue'
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['figure.titlesize'] = 12
df = pd.read_csv('./data/cleaneddataset.csv')
print (df['crime'].unique())
df.head(5)
# replace long names with short names
crimemap = {
'MOTOR VEHICLE THEFT': 'VEHICLE THEFT',
'LARCENY-THEFT': 'LARCENY THEFT',
'MURDER AND NON-NEGLIGENT MANSLAUGHTER' : 'MURDER',
'AGGRAVATED ASSAULT': 'ASSAULT'
}
df['crime'].replace(crimemap, inplace=True)
###Output
_____no_output_____
###Markdown
Less safe zipcodesLet's see how many crimes have happend in each zipcode during the last 15 months. Only zipcodes with more than 50 crimes are plotted.
###Code
cutoff = 50
plt.figure(figsize=(15,8))
sd = df['zip'].value_counts(sort=True,ascending=True)
sd.index = sd.index.astype(int)
sd = sd[~(sd<cutoff)]
ax = sd.plot.bar()
ax.set_ylabel('Number of Incidents')
ax.set_xlabel('Zipcodes with more than '+str(cutoff)+' crimes')
plt.show()
###Output
_____no_output_____
###Markdown
Crime monthly
###Code
crime_year = pd.crosstab([df['year'],df['month']],df['crime'])
"""fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(12,6))
crime_year.plot(kind='bar', stacked=False, grid=False,ax=ax)
ax.set_ylabel("number of incidents")
ax.set_xlabel("year")
ax.legend(loc = (1,0.1))
ax.set_ylim(0,3000)
plt.tight_layout()
plt.show()"""
"""ax = crime_year.plot()
ax.set_ylabel("number of incidents")
ax.set_xlabel("year")
ax.legend(loc = (1,0.1))
ax.set_ylim(0,3000)
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.tight_layout()
plt.show()"""
#sns.heatmap(crime_year.T)
#plt.show()
# a set of colors to plot the bars
color_sequence = ['#1f77b4', '#ff7f0e', '#2ca02c','#d62728','#8c564b',
'#377eb8','#4daf4a','#984ea3','#f781bf']
# create the figure
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12,12), sharex=True)
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_year_col = crime_year.ix[:,k]
#plot the data with a selected color
crime_year_col.plot(kind='bar', ax=ax, color=color_sequence[k])
ax.legend(loc = (0,1))
# rotate the x-axis ticks
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
#df.time = pd.to_datetime(df['datetime'], format='%m/%d/%Y %H:%M')
#df.head(5)
df.groupby(['year','month'])['crime'].count().plot(kind='bar')
plt.show()
###Output
_____no_output_____
###Markdown
Weekly trendsTo see weekly trends| Crime | Highest | Lowest || --- | --- | --- || ARSON | Saturday (59) | Tuesday (27) || ASSAULT | Sunday (801) | Wednesday (636) || BURGLARY | Friday (2274) | Sunday (1383) || DRUG OFFENSE | Wednesday (1029) | Sunday (411) || LARCENY THEFT | Friday (5424) | Sunday (4655) || MURDER | Sunday (28) | Wednesday (15) || RAPE | Saturday (155) | Thursday (118) || ROBBERY | Wednesday (465) | Thursday (394) || VEHICLE THEFT | Friday (1221) | Thursday (1115) |While assault increase going towards the weekend, while drug offense decreases. In fact, drug offense has its peak on wednesdays. Heatmap
###Code
crime_weekday = pd.crosstab(df['weekday'],df['crime'])
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12,8), sharex=True)
if seaborn_exists:
daysOfWeekList = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
#daysOfWeekList = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
crime_weekday=crime_weekday.reindex(daysOfWeekList)
ax=sns.heatmap(crime_weekday.T,annot=True, fmt="d",linewidths=0.5,cmap='RdBu_r')
ax.set_xticklabels(ax.get_xticklabels(),rotation=30)
plt.tight_layout()
plt.savefig('heatmap.png')
plt.show()
fig, axes = plt.subplots(nrows=3, ncols=3,figsize=(12,12),sharex=True)
print ('| Crime | Highest | Lowest |')
print ('| --- | --- | --- |')
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_weakday_col = crime_weekday.ix[:,k]
crime_name = crime_weakday_col.name
crime_max_label,crime_max_val = crime_weakday_col.idxmax(), crime_weakday_col.max()
crime_min_label,crime_min_val = crime_weakday_col.idxmin(), crime_weakday_col.min()
print ('| {} | {} ({}) | {} ({}) |'.format(crime_name,crime_max_label,crime_max_val,crime_min_label,crime_min_val))
crime_weakday_col.plot(kind='line',ax=ax,color='r',marker='o')
#crime_weakday_col.plot(kind='bar',ax=ax,color='r')
ax.legend(loc = (0,1))
ax.set_xticklabels(ax.get_xticklabels(),rotation=60)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
###Output
| Crime | Highest | Lowest |
| --- | --- | --- |
| ARSON | Saturday (128) | Wednesday (92) |
| ASSAULT | Saturday (1660) | Thursday (1366) |
| BURGLARY | Friday (4297) | Sunday (2686) |
| DRUG OFFENSE | Thursday (1868) | Sunday (963) |
| LARCENY THEFT | Friday (10747) | Sunday (9360) |
| MURDER | Saturday (62) | Wednesday (31) |
###Markdown
Month Days trend
###Code
crime_monthday = pd.crosstab(df['day'],df['crime'])
fig, axes = plt.subplots(nrows=3, ncols=3,figsize=(12,12),sharex=True)
#print ('| Crime | Highest | Lowest |')
#print ('| --- | --- | --- |')
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_monthday_col = crime_monthday.ix[:,k]
'''crime_name = crime_weakday_col.name
crime_max_label,crime_max_val = crime_weakday_col.idxmax(), crime_weakday_col.max()
crime_min_label,crime_min_val = crime_weakday_col.idxmin(), crime_weakday_col.min()
print ('| {} | {} ({}) | {} ({}) |'.format(crime_name,crime_max_label,crime_max_val,crime_min_label,crime_min_val))'''
crime_monthday_col.plot(kind='line',ax=ax,color='r',marker='o')
ax.legend(loc = (0,1))
ax.set_xticklabels(ax.get_xticklabels(),rotation=0)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
dg = pd.crosstab(df['date'],df['crime'])
for col in dg.columns:
print (col)
print (dg.sort_values(by=col,ascending=False).index[0:3])
###Output
ARSON
Index(['2017-12-27', '2018-01-16', '2016-11-14'], dtype='object', name='date')
ASSAULT
Index(['2018-01-01', '2017-08-10', '2017-04-16'], dtype='object', name='date')
BURGLARY
Index(['2017-05-01', '2016-12-13', '2017-01-20'], dtype='object', name='date')
DRUG OFFENSE
Index(['2017-01-19', '2016-03-03', '2017-02-02'], dtype='object', name='date')
LARCENY THEFT
Index(['2017-10-01', '2015-12-19', '2016-12-14'], dtype='object', name='date')
MURDER
Index(['2017-06-29', '2016-02-23', '2017-10-06'], dtype='object', name='date')
RAPE
Index(['2016-01-01', '2016-03-01', '2017-04-01'], dtype='object', name='date')
ROBBERY
Index(['2017-04-25', '2017-07-13', '2017-10-28'], dtype='object', name='date')
VEHICLE THEFT
Index(['2017-10-13', '2016-12-26', '2016-05-06'], dtype='object', name='date')
###Markdown
check zipcodes , which crime more, local buisessnes. For example, does the location of bars have any correlation with car theft or rape?
###Code
daysOfWeekList = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
"""wdf = pd.crosstab(df['crime'],df['weekday'])[daysOfWeekList]
wdf.to_json('crime_weekly.json')
wdf.to_csv('crime_weekly.csv')"""
def save_crime(names):
#make sure there is no white space in the filename
for name in names:
wdf = pd.crosstab(df['weekday'],df['crime'])[name]
wdf = pd.DataFrame(wdf).reindex([daysOfWeekList])
wdf.columns = ['count']
wdf.to_csv('./crime_weekly/'+name.replace(" ", "_")+'.csv',sep=',')
save_crime(sorted(df.crime.unique())) # for all types of crimes, rem
sorted(df.crime.unique())
###Output
_____no_output_____
###Markdown
Intorduction Task descriptionData source: Wikipedia Clickstream https://meta.wikimedia.org/wiki/Research:Wikipedia_clickstreamPrepare a Jupyter Notebook that shows how to:1. Determine which links people click on most frequently in a given article.2. Determine the most common referrers for a given article.3. Determine what percentage of all visitors clicked on a link within a given article.4. Determine and visualize the most popular articles people accessed from all external search engines.Requirements:1. You should create Jupyter Notebook that can be re-run to validate your tasks. Any comments in notebook oradditional readme file are welcome.2. You are free to choose any libraries you need.3. It would be great if you share your code, dependencies and notebook on Github or any other similar platform. Notebook technical aspectsMost libraries used in this notebook should be present in latest Anaconda bundle distribution (**Anaconda 5.1.0**) from [here](https://repo.continuum.io/archive/).List of main packages used in the analysis: | Package | Version | Description ||-------------|-------------|--------------------------------------------------------|| jupyter | >=1.0.0 | Code interpreter in browser environment engine || jupyterlab | >=0.31.4 | Next generation notebook environment | | numpy | >=1.14..0 | Efficient, vectorized matrix and vectror computations || pandas | >=0.22.0 | Data manipulation tool (tabular display, grouping) || matplotlib | >=2.1.2 | Basic visualization tool (2D plots) || seaborn | >=0.8.1 | Statistical visualizations tool || watermark | >=1.6.0 | Jupyter system info display |Due to archive size I chose English Wikipedia clickstream data file from June 2018 is 314 MB in compressed form:https://dumps.wikimedia.org/other/clickstream/2018-06/clickstream-enwiki-2018-06.tsv.gz Packages installation
###Code
# Manual for each package
#-q quite, -U upgrade if package exists and newer is avaliable
!pip install -q -U jupyterlab numpy pandas matplotlib seaborn watermark
# Tool to list basic properties of the system and python environment
%load_ext watermark
%watermark -a "Michal Dyzma" -d -m -v -p jupyterlab,numpy,pandas,matplotlib,seaborn,watermark
###Output
Michal Dyzma 2018-07-19
CPython 3.6.5
IPython 6.4.0
jupyterlab 0.32.1
numpy 1.14.3
pandas 0.23.0
matplotlib 2.2.2
seaborn 0.8.1
watermark 1.6.0
compiler : MSC v.1900 64 bit (AMD64)
system : Windows
release : 10
machine : AMD64
processor : Intel64 Family 6 Model 60 Stepping 3, GenuineIntel
CPU cores : 4
interpreter: 64bit
###Markdown
Analysis Data description https://meta.wikimedia.org/wiki/Research:Wikipedia_clickstreamFor each release, and for several Wikipedia language versions, we take one months worth of requests for articles in the main namespace. Referrers are mapped to a fixed set of values, based on this scheme:an article in the main namespace -> the article titlea page from any other Wikimedia project -> other-internalan external search engine -> other-searchany other external site -> other-externalan empty referrer -> other-emptyanything else -> other-otherThe current data includes the following 4 fields:* prev: the result of mapping the referer URL to the fixed set of values described above* curr: the title of the article the client requested* type: describes (prev, curr) - link: if the referrer and request are both articles and the referrer links to the request - external: if the referrer host is not en(.m)?.wikipedia.org - other: if the referer and request are both articles but the referrer does not link to the request. This can happen when clients search or spoof their refer.* n: the number of occurrences of the (referrer, resource) pair Load data
###Code
import pandas as pd
import numpy as np
# Download may take a while (314 MB), please be patient
data = pd.read_csv('https://dumps.wikimedia.org/other/clickstream/2018-06/clickstream-enwiki-2018-06.tsv.gz', compression='gzip', sep='\t', header=None)
data.columns= ['prev', 'curr', 'type', 'n']
# alternatively you can download via web browser and place in the folder with notebook
# data = pd.read_csv('clickstream-enwiki-2018-06.tsv.gz', compression='gzip', sep='\t', header=None)
# data.columns= ['prev', 'curr', 'type', 'n']
###Output
_____no_output_____
###Markdown
Descriptive statistics and exploratory data analysis
###Code
data.head(10)
data.describe(include='all')
# Another way of getting table (data frame object) basic composition is info method
data.info()
# Any missing data?
pd.isnull(data).sum().sum()
###Output
_____no_output_____
###Markdown
Total amount of missing data in any of the columns is very low.
###Code
pd.isnull(data).sum().sum()/len(data) * 100
#45 Missing data I wonder which are they
data[data.isnull().any(axis=1)]
###Output
_____no_output_____
###Markdown
Anyway, I can safely remove 45 NaN's from the dataframe
###Code
data.dropna(axis=0, inplace=True)
data.isnull().sum().sum()
data.info()
# Lets check if all examples of prev, curr and type are strings
data[['prev', 'curr', 'type']].applymap(type).eq(str).all()
# maximal value in n column?
print('idx: {}, value = {}, type: {}'.format(data.n.idxmax(), data.n.max(), type(data.n.max())))
#Still less than np.int32 range, which is 2147483647
395349956<2147483647
# We can optimize a bit memory usage
data['n'] = data['n'].astype('int32')
data.info()
print('idx: {}, value = {}, type: {}'.format(data.n.idxmax(), data.n.max(), type(data.n.max())))
###Output
idx: 17841134, value = 395349956, type: <class 'numpy.int32'>
###Markdown
Determine which links people click on most frequently in a given article.According to wiki metadata all internal sources have type link or other, only distinction is, that links are articles requested and served, while other are requested, but referer does not link to the reqauest. So in order for link to be "clickable by people it must be connecting two articles and be of type link
###Code
df_link = data[data['type'] == 'link']
# is there any other source in prev, except Unique Wiki title?
'^other*' in df_link.prev.values
df_link.head()
###Output
_____no_output_____
###Markdown
Now, when I have links clicked from articles to articles I can group them and count frequency. I will group by curr column, since this column refers to the link requested and sum up all events recorded in n column.
###Code
df_link.groupby('curr').sum().sort_values('n', ascending=False)[:5]
###Output
_____no_output_____
###Markdown
Most frequently clicked link in Jun 2018 was ... **2018_FIFA_World_Cup**. Determine the most common referrers for a given article. Referrer is any element from prev column, no matter where it came from, it always leads to the wiki article in curr column. I will just group by prev in entire dataset and sum all events denoted in n column.
###Code
data.groupby('prev').sum().sort_values('n', ascending=False)[:10]
###Output
_____no_output_____
###Markdown
Analysis says, that in June 2018 most common referrer (2.86 bln) to wikipedia articles was group of **search engines**. It is also worth to note, that it was 2018 World Cup seazon, and it can be seen in Wikipedia clickstream results. Wiki page about World Cup Wiki pages came up also very high (7th, 8th and 9th) in top ten referrers. Determine what percentage of all visitors clicked on a link within a given article.I already calculated nuumber of links, which were served from links in df_link. Calculating percentage will be easy as: taking all events from that data frame and divide it by total number of visitors (all n summ from entire data set).
###Code
'Percentage of all visitors, who clicked on a link within a given article is {:.2%}'.format(df_link.n.sum()/data.n.sum())
###Output
_____no_output_____
###Markdown
Answer: **22.88%** Determine and visualise the most popular articles people accessed from all external search engines. I need to get all rows containing 'other-search' in prev column. Then group by curr column (requested articles) and sort by n descending.
###Code
df_search = data[data['prev'].str.contains('other-search')]
# Just pre-view of the data-set
df_search.head()
group = df_search.groupby('curr').sum().sort_values('n', ascending=False)[:20]
group = group.reset_index()
group
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=(15, 20))
sns.set_color_codes("pastel")
sns.barplot(x="n", y="curr", data=group,
color="b")
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(16)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(16)
ax.set_title('20 Most popular articles accessed from search engines\n in June 2018', fontdict={'fontsize':30, 'fontweight': 'bold'})
ax.set_xlabel("Number of visits", fontsize=24)
ax.set_ylabel('')
sns.despine(left=True, bottom=True)
fig.tight_layout()
fig.savefig('popular_20.png', dpi=72)
###Output
_____no_output_____
###Markdown
Real-Time TDDFT*Roberto Di Remigio* Running the simulationsYou will, of course, need the appropriate version of ReSpect. The following are sample files for the water molecule using HF and uncontracted cc-pVDZ basis set. Change as needed. SCF step```scf: geometry: O 0.000000 0.000000 0.000000 H 0.000000 0.000000 0.940000 H 0.903870 0.000000 -0.258105 method: hf initmo: atomic nc-model: point charge: 0 multiplicity: 1 maxiterations: 30 convergence: 1.e-7 basis: H: ucc-pvdz O: ucc-pvdz```Assuming the input file is named `scf.inp`, run with:```bashrespect --scf --inp=scf```the `scf.out_scf` output file and `scf.50` checkpoint file will be generated. If the calculation ends successfully, that is. TDSCF step(s)The TDSCF trajectory will, most likely, be calculated in batches of $n_\mathrm{steps}$ points. The first batch of points will restart from the SCF step, perform $n_\mathrm{steps}$, and save the results to the corresponding `.out_tdscf` output and `.50` checkpoint file.The template TDSCF step input is:```tdscf: spectroscopy: eas solver: magnus time-steps: nsteps x 0.005 maxiterations: 6 convergence: 1.0e-7 checkpoint: 3 field: model: delta amplitude: 0.2 direction: 1.0 0.0 0.0```this one is for the perturbation applied in the $x$ direction. We will thus have three templates, one per direction, named `tdscf_x.inp`, `tdscf_y.inp`, and `tdscf_z.inp`, respectively.Before starting the simulation, we need to substitute `nsteps` with the number of actual steps to be run in the calculation. We'll just look at the $x$ direction, but the gist should be evident. To keep everything in order:```bashnsteps_previous=0nsteps_next=5cp tdscf_x.inp ${nsteps_next}_tdscf_x.inpsed -i -e "s/nsteps/${nsteps_next}/g" ${nsteps_next}_tdscf_x.inp```The launcher script will look for a similarly named checkpoint file to start the calculation from. Instead of copying the one from SCF (or a previous TDSCF batch of points) we use symlinks:```bashif [ "$nsteps_previous" -eq "0" ]; then ln -sf scf.50 ${nsteps_next}_tdscf_x.50else ln -sf ${nsteps_previous}_tdscf_x.50 ${nsteps_next}_tdscf_x.50fi ```We are finally ready to roll with:```bashif [ "$nsteps_previous" -eq "0" ]; then respect --tdscf --inp=${nsteps_next}_tdscf_x else respect --tdscf --inp=${nsteps_next}_tdscf_x --restartfi ``` Obtain raw data from simulation outputOnce we have the output files, we are ready to extract the data from it. The Python code is inspired by the `spectrum.py` script shipped with ReSpect. We are now reading from some reference output files contained in this repository. Change paths accordingly!**WARNING** This code is written with Python 3 in mind.
###Code
# All imports at the top
import pathlib
import numpy as np
import collections
import re
import sys
import scipy.constants
import scipy.signal
from scipy import interpolate
from scipy.interpolate import BSpline
import matplotlib.pyplot as plt
from io import StringIO
def read_data_from_output(fname, what):
"""Read time-dependent signal from output file.
Parameters
----------
fname: Path
The output file to read.
what: str
What to extract from the output, e.g. "Step EAS" for EAS data.
Returns
-------
A tuple with field strength, size to of the time step, and the data into a NumPy array.
"""
timestep = 0.0
field = 0.0
nsteps = 0
data = []
with fname.open() as handle:
for line in handle:
if what in line:
data.append(np.loadtxt(StringIO(line), dtype=float, usecols=(4, 5, 6, 7)))
elif 'time step length:' in line:
timestep = float(line.split()[-1])
elif 'field strength:' in line:
field = float(line.split()[-1])
elif 'number of time steps:' in line:
nsteps = int(line.split()[-1])
signalTD = np.array(data)
return field, timestep, signalTD
###Output
_____no_output_____
###Markdown
The data is read in using an intrinsic NumPy function. The return time is an array of size number of time steps _times_ 4, the number of columns comprising the energy and the $x$, $y$, and $z$ components of the detected signal.
###Code
def check_equal(iterator):
"""Checks that the contents of iterator are all equal.
"""
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def reader(data_dir, root_name):
"""Read data from output files.
Parameters
----------
data_dir: Path
Path to the directory with the output files.
root_name: str
Common root of the output filenames.
Returns
-------
The filename of a NumPy compressed array. This is saved to the current working directory
and contains the data extracted from the plain-text files.
Notes
-----
It is assumed that simulations in the three Cartesian directions have been performed
and the output is saved to filenames `root_name`_{x,y,z}.out_tdscf
"""
# Get out_tdscf files, sorted by direction (x, y, z)
out_tdscf = sorted(data_dir.glob('{}*.out_tdscf'.format(root_name)))
raw_signal = {}
field = 0.0
timestep = 0.0
what = 'Step EAS' # What to extract
for _, v in enumerate(out_tdscf):
# Get filename, split on underscore, get the last element in the list
# FIXME this is not a general way to extract this information
# FIXME (probably?) the field is assumed to always have the same strength!
direction = v.stem.replace(root_name + '_', '')
field, timestep, data = read_data_from_output(v, what)
raw_signal.update({direction : data})
# Check that sizes of raw data are matching, abort if not
error_message = ''
if not check_equal((v.shape for v in raw_signal.values())):
error_message += ' Shapes of TD signals in different directions DO NOT MATCH\n'
for k, v in raw_signal.items():
error_message += ' {0} direction has shape {1}'.format(k, v.shape)
error_message += '\n ( check whether the input file is formatted correctly )'
print(error_message)
sys.exit(-1)
# Save to compressed NumPy array the raw data
npz_name = '{}-raw_TD-data.npz'.format(root_name)
np.savez(npz_name, field=field,
timestep=timestep,
nsteps=raw_signal['x'].shape[0],
xdata=raw_signal['x'],
ydata=raw_signal['y'],
zdata=raw_signal['z'])
print('Raw data saved to {}'.format(npz_name))
return npz_name
###Output
_____no_output_____
###Markdown
We are now ready to run the analysis. At each time step $t_{i}$ we have an induced signal matrix $G^{(i)}_{uv}$ with $u, v \in {x, y, z}$. We will save the whole thing, even though to get the spectrum only the trace of the induced signal is needed: $G^{(i)} = \frac{1}{3}\mathrm{tr} \mathbf{G}^{(i)}$. This approach is possibly a bit wasteful of memory, but it could make it easier to extend to the extraction of higher order properties.
###Code
# Extract the diagonal components of the induced signal, i.e. t-direction signal from t-direction perturbation
# At each time step we have a signal matrix G_{ij}(\omega),
# so our data structure will be a list (which gives the step-indexing) of matrices.
# We define a custom data structure by means of a namedtuple
class Signal(collections.namedtuple('Signal', 'nsteps timestep field signal')):
__slots__ = ()
def __new__(cls, raw_data):
nsteps, timestep, signal = cls._signal_from_raw_data(raw_data)
field_strength = raw_data.f.field
return super(cls, Signal).__new__(cls, nsteps, timestep, field_strength, signal)
def _signal_from_raw_data(raw):
"""Read raw data into more amenable data structures
Parameters
----------
raw: dict
Raw signal.
Returns
-------
A tuple of data from the analysed trajectory.
"""
# Extract timestep
timestep = raw.f.timestep
# Extract number of steps
nsteps = raw.f.nsteps
# Stacking TD signals for the various direction into a 3-Dimensional array
# The first dimension is the time step,
# the second dimension is the direction of the perturbing field (x: 0, y: 1, z: 2),
# the third dimension is the detection direction for the induced signal (x: 0, y: 1, z: 2)
# Hence: signal_timestep_view[n] is the full 3x3 induced signal at time step n.
# NOTE: We are discarding the energy.
signal_timestep_view = np.stack((raw.f.xdata[:, 1:], raw.f.ydata[:, 1:], raw.f.zdata[:, 1:]), axis=1)
return nsteps, timestep, signal_timestep_view
def compute_spectrum(cls, prefactor, field_time=0.0, damping=None, scaling=1.0):
"""Compute the spectrum from the time-dependent signal using the Fast Fourier Transform.
Parameters
----------
prefactor: function
Prefactor for the spectroscopy, e.g. :math:`\frac{4\pi\omega}{3c}` for EAS.
field_time: float, optional
Center of perturbing field.
damping: float, optional
Damping factor for the computed signal.
scaling: float, optional
Scaling factor for the spectrum.
Returns
-------
A tuple with the spectrum and poles.
The `spectrum` is a tuple of NumPy arrays: (frequency, intensity).
The `poles` is a tuple of NumPy arrays: (frequency at pole, intensity at pole)
Notes
-----
https://docs.scipy.org/doc/numpy/reference/routines.fft.html
"""
# Compute isotropic average of the signal
signal_iso = np.array([np.trace(v)/3.0 for v in cls.signal])
signal_iso /= cls.field
# Generate array of time steps
time = np.array([i*cls.timestep for i in range(cls.nsteps)])
# Set damping factor
if damping is None:
damping = -np.log(0.005) / time[-1]
# Report settings
cls._report(time[-1], damping, field_time)
# Damp isotropic average of signal
signal_iso *= np.exp(-damping * time)
# Real discrete Fourier transform
# signal_iso is now the signal in the frequency domain
signal_iso = np.fft.rfft(signal_iso)
# Get the frequencies, only the positive part is relevant
freq = np.fft.rfftfreq(time.size)
# for an even number of points, make the last frequency positive
freq[-1] = abs(freq[-1])
freq *= 2 * np.pi / cls.timestep
# Correct the phase to get the factor d_k in \sum_k d_k \exp(-iw_k t) from real-FFT
signal_iso *= - np.exp(1j * field_time * freq)
# Normalize
signal_iso *= cls.timestep
# Detect poles by looking at the maxima of the imaginary (absorptive) part of the signal
pole_idx = sorted(scipy.signal.argrelmax(np.imag(signal_iso))[0])
# The frequency-dependent polarizability (FDP) as a tuple of NumPy arrays.
# alpha[0] is the array of frequencies (a.u.)
# alpha[1] is the complex FDP (a.u.)
alpha = (freq, signal_iso)
# The poles of the FDP as a tuple of NumPy arrays.
# alpha_poles[0] is the array of poles (a.u.)
# alpha_poles[1] is the complex FDP at the poles, scaled by the damping factor (a.u.)
alpha_poles = (freq[pole_idx], signal_iso[pole_idx] * damping)
# Get the spectrum, i.e. apply spectroscopy-specific prefactor and scaling to the FDP
factor = np.array([prefactor(omega) for omega in alpha[0]])
intensity = np.multiply(alpha[1].real, factor.real) + 1j * np.multiply(alpha[1].imag, factor.imag)
spectrum = (alpha[0], intensity * scaling)
# Get the poles, i.e. apply spectroscopy-specific prefactor and scaling to the poles of the FDP
poles = (spectrum[0][pole_idx], spectrum[1][pole_idx] * damping)
return spectrum, poles
def _report(cls, final_time, damping, field_time):
"""Print report on spectrum calculation.
"""
report = '''
RT-TDDFT Analysis --- Computing EAS
Number of steps = {nsteps:4d}
Time step = {timestep:.3E} a.u.
Field strength = {field:.3E} a.u.
Field centered at time = {fieldtime:.3E} a.u.
Damping = {damping:.3E} a.u.
Resolution = {resEh:14.8f} E_h ({reseV:14.8f} eV)
Hartree energy = {hartree2eV:20.8f} eV
Speed of light = {c:20.8f} a.u.
'''
# Resolution in E_h
resEh = 2 * np.pi / (final_time - field_time)
reseV = resEh * scipy.constants.value('Hartree energy in eV')
print(report.format(nsteps=cls.nsteps,
hartree2eV=scipy.constants.value('Hartree energy in eV'),
c=scipy.constants.value('inverse fine-structure constant'),
timestep=cls.timestep, field=cls.field, fieldtime=field_time,
damping=damping, resEh=resEh, reseV=reseV))
def eas_prefactor(omega):
"""
Prefactor for EAS spectroscopy.
Parameters
----------
omega: float
Frequency
Returns
-------
The prefactor for EAS spectroscopy as a complex number.
.. math::
f(\omega) = 1 + \mathrm{i}\frac{4\pi\omega}{3c}
Notes
-----
Why a complex number? Because the EAS spectrum is the imaginary (absorptive) part
of the frequency-dependent polarizability and the scaling need only be applied
to that part.
"""
c = scipy.constants.value('inverse fine-structure constant')
return np.complex(1.0, (4.0 * np.pi * omega) / (c))
###Output
_____no_output_____
###Markdown
Since the spectrum is only known at a discrete set of point, we will have to interpolate to get a smooth curve. We use B-spline interpolation as implemented in `scipy`.
###Code
def cubic_spline_smoothing(x, y, s):
"""Smoothing cubic spline.
Parameters
----------
x: array_like
Array of x values
y: array_like
Array of corresponding y values
s: float
Spline smoothing parameter
Returns
-------
A tuple with the x values in the interval and the y values computed from the smoothing spline
"""
t, c, k = interpolate.splrep(x, y, s=s, k=3)
spline = BSpline(t, c, k, extrapolate=False)
x_smooth = np.linspace(x.min(), x.max(), 3 * x.shape[0])
return (x_smooth, spline(x_smooth))
def filter_spectrum_data(data, threshold):
"""
Filter the spectrum tuple based on its first dimension.
"""
indexing = np.where(data[0] <= threshold)
return (data[0][indexing], data[1][indexing])
# Where is the data? We use pathlib to manipulate paths (directories and files)
data_dir = pathlib.Path('ref').resolve()
print('Where is the data? {}\n'.format(data_dir))
root_name = '20_tdscf'
npz_file_name = reader(data_dir, root_name)
raw_signal = np.load(npz_file_name)
signal = Signal(raw_signal)
spectrum, poles = signal.compute_spectrum(eas_prefactor)
# We are now ready to plot the spectrum.
# The energy scale of the spectrum data structure is in Hartree, but we can easily convert it to electronvolt.
Hartree2eV = scipy.constants.value('Hartree energy in eV')
spectrum_eV = (spectrum[0] * Hartree2eV, spectrum[1])
# x is the excitation energy axis (eV). It is the same for all data sets
# y is the imaginary part of the intensity (arbitrary units)
# We interpolate with a cubic B-spline with smoothing factor 0.01
x, y = cubic_spline_smoothing(spectrum_eV[0], np.imag(spectrum_eV[1]), s=0.01)
plt.xlim(2.5, x.max())
plt.ylim(0.0, np.amax(y) + 0.5)
plt.plot(x, y, label='Vacuum')
plt.xlabel('Energy[eV]', fontsize=14)
plt.ylabel('$I(\omega)$[arb. units]', fontsize=14)
plt.legend(fontsize=14)
# Save plot to file
fname = str(pathlib.Path('RT-spectrum.svg').resolve())
plt.savefig(fname, format='svg', dpi=300, bbox_inches='tight')
plt.show()
print('Analysis for uranyl')
Hartree2eV = scipy.constants.value('Hartree energy in eV')
print('Computing TD signal and spectrum in vacuum')
root_name = 'UO2_2+_magnus-delta'
npz_file_name = reader(data_dir, root_name)
raw_signal = np.load(npz_file_name)
signal = Signal(raw_signal)
vacuum, poles = signal.compute_spectrum(eas_prefactor)
print('Computing TD signal and spectrum in water, delayed propagation')
root_name = 'UO2_2+_magnus-delta_pcm'
npz_file_name = reader(data_dir, root_name)
raw_signal = np.load(npz_file_name)
signal = Signal(raw_signal)
delayed, poles = signal.compute_spectrum(eas_prefactor)
print('Computing TD signal and spectrum in water, equilibrium propagation')
root_name = 'UO2_2+_magnus-delta_eq+pcm'
npz_file_name = reader(data_dir, root_name)
raw_signal = np.load(npz_file_name)
signal = Signal(raw_signal)
equilibrium, poles = signal.compute_spectrum(eas_prefactor)
# We are now ready to plot the spectrum.
# The energy scale of the spectrum data structure is in Hartree, but we can easily convert it to electronvolt.
vacuum_eV = (vacuum[0] * Hartree2eV, vacuum[1])
# Plot only below 16 eV
vacuum_eV = filter_spectrum_data(vacuum_eV, 16.0)
delayed_eV = (delayed[0] * Hartree2eV, delayed[1])
# Plot only below 16 eV
delayed_eV = filter_spectrum_data(delayed_eV, 16.0)
equilibrium_eV = (equilibrium[0] * Hartree2eV, equilibrium[1])
# Plot only below 16 eV
equilibrium_eV = filter_spectrum_data(equilibrium_eV, 16.0)
# x is the excitation energy axis (eV). It is the same for all data sets
# y is the imaginary part of the intensity (arbitrary units)
# We interpolate with a cubic B-spline with smoothing factor 0.01
x, y_vacuum = cubic_spline_smoothing(vacuum_eV[0], np.imag(vacuum_eV[1]), s=0.01)
_, y_delayed = cubic_spline_smoothing(delayed_eV[0], np.imag(delayed_eV[1]), s=0.01)
_, y_equilibrium = cubic_spline_smoothing(equilibrium_eV[0], np.imag(equilibrium_eV[1]), s=0.01)
plt.xlim(2.5, x.max())
plt.ylim(0.0,
max(np.amax(y_vacuum), np.amax(y_delayed), np.amax(y_equilibrium)) + 0.5)
plt.plot(x, y_vacuum, label='Vacuum')
plt.plot(x, y_delayed, label='Water, delayed')
plt.plot(x, y_equilibrium, label='Water, equilibrium')
plt.xlabel('Energy [eV]', fontsize=14)
plt.ylabel('$I(\omega)$ [arb. units]', fontsize=14)
plt.legend(fontsize=14)
fname = str(pathlib.Path('uranyl-RT-spectrum.svg').resolve())
plt.savefig(fname, format='svg', dpi=300, bbox_inches='tight')
plt.show()
###Output
Analysis for uranyl
Computing TD signal and spectrum in vacuum
Raw data saved to UO2_2+_magnus-delta-raw_TD-data.npz
RT-TDDFT Analysis --- Computing EAS
Number of steps = 10001
Time step = 2.000E-01 a.u.
Field strength = 5.000E-04 a.u.
Field centered at time = 0.000E+00 a.u.
Damping = 2.649E-03 a.u.
Resolution = 0.00314159 E_h ( 0.08548709 eV)
Hartree energy = 27.21138602 eV
Speed of light = 137.03599914 a.u.
Computing TD signal and spectrum in water, delayed propagation
Raw data saved to UO2_2+_magnus-delta_pcm-raw_TD-data.npz
RT-TDDFT Analysis --- Computing EAS
Number of steps = 10001
Time step = 2.000E-01 a.u.
Field strength = 5.000E-04 a.u.
Field centered at time = 0.000E+00 a.u.
Damping = 2.649E-03 a.u.
Resolution = 0.00314159 E_h ( 0.08548709 eV)
Hartree energy = 27.21138602 eV
Speed of light = 137.03599914 a.u.
Computing TD signal and spectrum in water, equilibrium propagation
Raw data saved to UO2_2+_magnus-delta_eq+pcm-raw_TD-data.npz
RT-TDDFT Analysis --- Computing EAS
Number of steps = 10001
Time step = 2.000E-01 a.u.
Field strength = 5.000E-04 a.u.
Field centered at time = 0.000E+00 a.u.
Damping = 2.649E-03 a.u.
Resolution = 0.00314159 E_h ( 0.08548709 eV)
Hartree energy = 27.21138602 eV
Speed of light = 137.03599914 a.u.
###Markdown
TüEyeQ AnalysisThis notebook contains experiments and plots accompanying the TüEyeQ data set. Please refer to our paper when using this script: [citation]The TüEyeQ data set can be downloaded at [link].This notebook comprises the following parts:1. Load Packages and Data2. General Analysis of the Raw Data (prior to pre-processing)3. Preprocessing4. Distance Correlations of Features5. Logistic Regression Model---This code is published under the MIT license.*Copyright (c) 2020 Johannes Haug* 1. Load Packages and Data
###Code
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.linear_model import LogisticRegression
import shap
from lime import lime_tabular
import dcor
import pickle
tiq = pd.read_csv('./data/cft_full.csv') # !!! you may need to substitute this with a different path to the data
tiq.head()
tiq.shape
###Output
_____no_output_____
###Markdown
2. General AnalysisNext, we investigate frequencies and distributions of the raw (unprocessed) data set. Solved TasksHere, we illustrate the **number of participants, who managed to solve each task correctly**. In addition, we show the incremental mean of participants and the number of unsolved tasks. Note that the tasks are shown in the order of appearance. Each block of tasks is highlighted in a different color. We also indicate all tasks with an error during the data collection process.
###Code
color_palette = ['#fee090','#91cf60','#e0f3f8','#91bfdb']
tasks = tiq.groupby('task_id', sort=False)['cft_task'].sum() # sum of participants who solved the task
mean = pd.Series(tasks.values).expanding().mean() # incremental mean no. of participants with correct answer
# Count number of NaNs (i.e. no. of participants that did not solve the task)
unsolved = tiq[['task_id','cft_task']].copy()
unsolved['cft_task'] = unsolved['cft_task'].isnull()
unsolved = unsolved.groupby('task_id', sort=False)['cft_task'].sum()
itr = 0 # task iterator
clr = 0 # color indicator
group_size = [15,15,14,10] # no. tasks per group
fig = plt.figure(figsize=(20,5))
for tk in tasks.items():
if itr == group_size[clr]:
itr = 0 # reset iterator (next task group begins)
clr += 1 # increment color indicator
# Mark erroneous measurements with pattern
if tk[0][-1] == 'e':
pattern = '//'
else:
pattern = ''
plt.bar(tk[0], tk[1], color=color_palette[clr], hatch=pattern, label='_nolegend_')
itr += 1 # increment task iterator
# Line plot of incremental mean
plt.plot(unsolved.keys(), mean, color='black', lw=2, marker='.', markersize=12, label='Incremental Mean')
# Line plot of unsolved tasks
plt.plot(unsolved.keys(), unsolved.values, color='black', lw=2, ls=':', marker='x', markersize=8, label='Participants without an answer')
plt.ylabel('Participants with Correct Answer', size=12)
plt.xlabel('Task', size=12)
plt.xticks(rotation=90)
plt.xlim(-1, 54)
plt.ylim(0, 335)
plt.legend()
# Save histogram
plt.savefig('./figures/task_histogram.pdf', bbox_inches='tight', format='pdf')
plt.savefig('./figures/task_histogram.png', bbox_inches='tight', format='png')
plt.plot()
###Output
_____no_output_____
###Markdown
BiasWe **investigate whether the data is biased against age or gender**. To this end, we plot the distribution/frequency of both variables, as well as the normalized fraction of correctly solved tasks.
###Code
# Generic function to plot distribution histogram (bar chart)
def plot_bar(x, y_1, y_2, x_ticklabels, label_x, label_y, path, stacked=False):
wd = .4
if stacked:
plt.bar(x, y_1, color='#91bfdb')
plt.bar(x, y_2, color='#fc8d59', bottom=y_1)
else:
plt.bar(x - wd/2, y_1, width=wd, color='#91bfdb')
plt.bar(x + wd/2, y_2, width=wd, color='#fc8d59')
plt.ylim(0,1)
plt.xlabel(label_x, size=12)
plt.xticks(x, x_ticklabels)
plt.ylabel(label_y, size=12)
plt.xlim(min(x)-.5, max(x)+.5)
plt.legend(['Male','Female'], loc='upper right')
plt.savefig('./figures/{}.png'.format(path), bbox_inches='tight', format='png')
plt.savefig('./figures/{}.pdf'.format(path), bbox_inches='tight', format='pdf')
plt.show()
##############################################################################
# Plot histogram of age and gender
age_count_male = tiq[tiq['gender'] == 1]['age'].value_counts().sort_index() # count male participants per age
age_count_female = tiq[tiq['gender'] == 2]['age'].value_counts().sort_index() # count female participants per age
# Correctly solved tasks per age and gender
tasks_age_male = tiq[tiq['gender'] == 1].groupby('age', sort=False)['cft_task'].sum().sort_index()
tasks_age_female = tiq[tiq['gender'] == 2].groupby('age', sort=False)['cft_task'].sum().sort_index()
plot_bar(age_count_male.keys(), age_count_male.values, age_count_female.values,
age_count_male.keys().values.astype(int), 'Age', 'No. of Tasks', 'age_hist', True)
plot_bar(age_count_male.keys(), tasks_age_male.values / age_count_male.values, tasks_age_female.values / age_count_female.values,
age_count_male.keys().values.astype(int), 'Age', 'Norm. % of Tasks Solved Correctly', 'age_tasks_norm')
###Output
_____no_output_____
###Markdown
CFT DistributionWe investigate the histogram of the aggregated CFT score (cft_sum_full) and find that it is approximately normally distributed.
###Code
cft_unique = tiq[['subject', 'cft_sum_full']].drop_duplicates() # extract unique subjects
cft_unique.hist(bins=26, color='#91bfdb') # separate into 26 bins, since there are 26 unique aggr. CFT scores
plt.grid(False)
plt.title(None)
plt.xlabel('Aggregated CFT', size=12)
plt.ylabel('No. of Participants', size=12)
# Save histogram
plt.savefig('./figures/cft_histogram.png', bbox_inches='tight', format='png')
plt.savefig('./figures/cft_histogram.pdf', bbox_inches='tight', format='pdf')
plt.show()
###Output
_____no_output_____
###Markdown
3. PreprocessingAll subsequent analysis requires the data set to be preprocessed. For illustration, we consider a binary classification scenario of **cft_task (target)**. Specifically, we use the **true class "correct answers (label 1)"** and the **negative class "wrong answers (label 0)"**. We ignore missing answers (NaN values) in this experiment (removes 1,248 observations).Specifically, we apply the following pre-processing steps:1. Specify the categorical and the continuous features (according to the paper)1. Remove all observations with missing target (i.e. NaN value in cft_task).2. Remove subject and cft_sum_full to avoid information leakage due to high dependendy with the target.3. Impute NaN-values in categorical features with a new category.4. Impute NaN-values in continuous features with the median.5. Factorize the categorical features (to encode strings as integers).6. Normalize the continuous features to the intervall [0,1] (since we will be using l2-regularization).7. Split data into a training and test set (20% holdout for testing)
###Code
# 1. Extract categorical and continuous features
ftr_cont = ['mean_grade_degree','grades_math', 'grades_german', 'grades_biology',
'grades_physics', 'grades_chemistry','grades_geography','grades_history','grades_art',
'gaming_hours_weekly_min', 'gaming_hours_weekly_max', 'cft_sum_full']
ftr_cat = tiq.iloc[:,~tiq.columns.isin(ftr_cont)].columns.tolist()
# Remove the target 'cft_task' from the list of categorical features
ftr_cat.remove('cft_task')
# 2. Remove all observations with missing target
tiq = tiq[tiq['cft_task'].notna()]
# 3. Remove subject and cft_sum_full, due to possible information leakage
# Note that we did NOT remove these variables for the computation of the distance correlation as reported in the paper
# However, it should be noted that none of these variables has a pairwise correlation above 0.5
tiq = tiq.drop(['subject','cft_sum_full'], axis=1)
ftr_cat.remove('subject')
ftr_cont.remove('cft_sum_full')
# 4. Impute NaN-values in categorical features with a new category
tiq[ftr_cat] = tiq[ftr_cat].fillna('new_category')
# 5. Impute NaN-values in continuous features with the median
medians = tiq[ftr_cont].median()
tiq[ftr_cont] = tiq[ftr_cont].fillna(medians)
# 6. Factorize the categorical features
tiq[ftr_cat] = tiq[ftr_cat].apply(lambda x: pd.factorize(x)[0])
# 7. Normalize the continuous features
tiq[ftr_cont] = MinMaxScaler().fit_transform(tiq[ftr_cont])
# 8. Train/Test split of the data
X_train, X_test, y_train, y_test = train_test_split(tiq.drop('cft_task', axis=1), tiq['cft_task'], test_size=.2, random_state=42)
###Output
_____no_output_____
###Markdown
4. Distance CorrelationsWe compute the pairwise Distance Correlation of features in a test set according toSzékely, Gábor J., Maria L. Rizzo, and Nail K. Bakirov. "Measuring and testing dependence by correlation of distances." The annals of statistics 35.6 (2007): 2769-2794.
###Code
# Compute Distance Correlation (on a sample of the data)
# !!! Note that we also provide pre-computed distance correlation scores, which can be loaded in the next cell !!!
dist_crl = []
combs = set() # save feature combinations
crl_sample = tiq.sample(frac=0.2, random_state=42) # sample a test set (as computation of distance correlation is costly)
for ftr1 in crl_sample:
for ftr2 in crl_sample:
# Check if feature combination was already evaluated (distance correlation is symmetric!)
if '{}-{}'.format(ftr1, ftr2) not in combs and '{}-{}'.format(ftr2, ftr1) not in combs and ftr1 != ftr2:
combs.add('{}-{}'.format(ftr1, ftr2)) # add feature pair to list of combinations
dist_crl.append([ftr1, ftr2, dcor.distance_correlation(crl_sample[ftr1], crl_sample[ftr2])])
print(dist_crl[-1])
dist_crl = pd.DataFrame(dist_crl, columns=['Feature 1', 'Feature 2', 'Dist. Correlation'])
# Save/Load the Distance correlation object
#filehandler = open('./pre-computed/distance_correlation_full.obj', 'wb')
#pickle.dump(dist_crl, filehandler) # Save Object
filehandler = open('./pre-computed/distance_correlation.obj', 'rb')
dist_crl = pickle.load(filehandler) # Load Object
filehandler.close()
# Plot most strongly correlated features
top_crl = dist_crl.reindex(dist_crl['Dist. Correlation'].sort_values(ascending=False).index).reset_index(drop=True) # sort values
top_crl['Dist. Correlation'] = top_crl['Dist. Correlation'].round(5) # round scores
ltx_crl = top_crl.iloc[:54,:] # select top entries
print(ltx_crl.to_latex(index=False)) # plot in latex style
# Plot histogram of the distance correlation scores
def plot_hist(var, b, xmin, xlabel, ylabel, path):
var.hist(bins=b, color='#91bfdb')
plt.grid(False)
plt.xlim(xmin, 1)
plt.title(None)
plt.xlabel(xlabel, size=12)
plt.ylabel(ylabel, size=12)
# Save histogram
plt.savefig('./figures/{}_histogram.png'.format(path), bbox_inches='tight', format='png')
plt.savefig('./figures/{}_histogram.pdf'.format(path), bbox_inches='tight', format='pdf')
plt.show()
##############################################################################
plot_hist(dist_crl, 100, 0, 'Distance Correlation', 'No. of Feature Pairs', 'dist_corr')
# Plot correlation with cft_sum_full
cft_crl = top_crl[(top_crl['Feature 1'] == 'cft_sum_full') | ((top_crl['Feature 2'] == 'cft_sum_full'))].iloc[:20,:]
print(cft_crl.to_latex(index=False)) # plot in latex style
###Output
\begin{tabular}{llr}
\toprule
Feature 1 & Feature 2 & Dist. Correlation \\
\midrule
grades\_math & cft\_sum\_full & 0.34335 \\
mean\_grade\_degree & cft\_sum\_full & 0.29253 \\
grades\_chemistry & cft\_sum\_full & 0.24498 \\
grades\_physics & cft\_sum\_full & 0.22522 \\
native\_german\_father & cft\_sum\_full & 0.21195 \\
education\_father & cft\_sum\_full & 0.20961 \\
cft\_task & cft\_sum\_full & 0.20898 \\
native\_language\_father & cft\_sum\_full & 0.20001 \\
grades\_biology & cft\_sum\_full & 0.18949 \\
programming\_experience & cft\_sum\_full & 0.17870 \\
spreadsheet\_usage & cft\_sum\_full & 0.14477 \\
native\_german\_mother & cft\_sum\_full & 0.13621 \\
native\_german & cft\_sum\_full & 0.13383 \\
text\_editor\_usage & cft\_sum\_full & 0.13258 \\
education\_mother & cft\_sum\_full & 0.13026 \\
leisure\_travel\_tourism & cft\_sum\_full & 0.12710 \\
grades\_art & cft\_sum\_full & 0.12149 \\
age & cft\_sum\_full & 0.12012 \\
grades\_german & cft\_sum\_full & 0.11945 \\
training\_father & cft\_sum\_full & 0.11916 \\
\bottomrule
\end{tabular}
###Markdown
5. Logistic Regression ModelFor illustration, we train a simple Logistic Regression model to predict the **cft_task (target)**. We also compute the **SHAP and LIME values** to quantify every feature's contribution in the prediction.Specifically, we perform the following training and evaluation steps:1. Train and test the Logistic Regression (LR) model2. Plot the weights of the LR model3. Compute Shapley values for the LR model4. Compute LIME values for the LR model
###Code
# Function to plot ROC curve
def plot_roc(y_pred, path):
# Compute ROC-AUC Score
auc = roc_auc_score(y_test, y_pred)
# Plot ROC Curve
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, color='#4575b4', label='ROC Curve (AUC = {})'.format(round(auc, 4)), lw=2)
plt.plot([0, 1], [0, 1], color='black', ls='--', label='_nolegend_', lw=2)
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('False Positive Rate', size=12)
plt.ylabel('True Positive Rate', size=12)
plt.legend()
plt.savefig('{}.pdf'.format(path), bbox_inches='tight', format='pdf') # save as PDF
plt.savefig('{}.png'.format(path), bbox_inches='tight', format='png') # save as PNG
plt.show()
# 1. Train and test the LR model
model = LogisticRegression(penalty='l2', random_state=42, max_iter=1000, solver='lbfgs')
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_test)
plot_roc(y_pred[:,1], './figures/roc_lr')
# 2. Plot the weights of the LR model
coef = model.coef_.flatten()
coef_idx = abs(coef).argsort()[-20:] # get indices of top coefficients
coef_sorted = coef[coef_idx]
coef_names = X_test.columns[coef_idx] # get coefficient names
fig = plt.figure(figsize=(8,6))
plt.xlabel('Logistic Regression Coefficient', size=12)
plt.ylabel('Feature', size=12)
plt.barh(coef_names, coef_sorted, color='#4575b4')
plt.ylim(-1,20)
plt.vlines(0, -1, 21, ls='--')
plt.savefig('./figures/coef_lr.pdf', bbox_inches='tight', format='pdf') # save as PDF
plt.savefig('./figures/coef_lr.png', bbox_inches='tight', format='png') # save as PNG
plt.show()
# 3. Compute SHAP (LinearSHAP)
reference_X = shap.sample(X_train, 100)
explainer = shap.LinearExplainer(model, reference_X)
shap_values = explainer.shap_values(X_test)
shap.summary_plot(shap_values, X_test, show=False)
plt.savefig('./figures/shap_lr.png', bbox_inches='tight', format='png')
plt.savefig('./figures/shap_lr.pdf', bbox_inches='tight', format='pdf')
plt.show()
# 4. Compute LIME (TabularLIME)
lime_exp = lime_tabular.LimeTabularExplainer(
reference_X.to_numpy(),
feature_names=X_test.columns.values.tolist(),
discretize_continuous=False,
random_state=42
)
lime_mean = np.zeros(X_test.shape[1])
for xt in X_test.to_numpy(): # Compute average LIME scores
exp = lime_exp.explain_instance(xt, model.predict_proba, num_samples=100, num_features=X_test.shape[1], top_labels=2).local_exp
for itm in exp[1]: # collect explanations for positive class (label=1)
lime_mean[itm[0]] += itm[1] # sum local explanations
lime_mean /= X_test.shape[0] # compute mean attribution
lime_mean_idx = abs(lime_mean).argsort()[-20:] # get indices of top coefficients
lime_mean_sorted = lime_mean[lime_mean_idx]
coef_names = X_test.columns[lime_mean_idx] # get coefficient names
fig = plt.figure(figsize=(8,6))
plt.xlabel('Average LIME Attribution', size=12)
plt.ylabel('Feature', size=12)
plt.barh(coef_names, lime_mean_sorted, color='#4575b4')
plt.ylim(-1,20)
plt.vlines(0, -1, 21, ls='--')
plt.savefig('./figures/lime_lr.pdf', bbox_inches='tight', format='pdf') # save as PDF
plt.savefig('./figures/lime_lr.png', bbox_inches='tight', format='png') # save as PNG
plt.show()
###Output
_____no_output_____
###Markdown
Performing Analysis on Nutrtient Data- Using the New Zealand Food Composition Database
###Code
import math
import requests
import string
import pandas as pd
import numpy as np
# working director with the api
food_url = "https://api.foodcomposition.co.nz/api/food"
# response = requests.get(food_url)
# foods_df = pd.json_normalize(response.text)
food_file = './food-data/nz-concise-13-edition.xlsx'
food_df = pd.read_excel(food_file)
renamed_columns = {}
for i, column in enumerate(food_df.columns):
unit = food_df.iloc[1][i]
if unit is not np.nan:
renamed_columns[food_df.columns[i]] = f'{column.replace(" "," ").title()}, {unit}'
food_df.rename(columns = renamed_columns, inplace=True) # columns have units
food_df.drop([0, 1, 2], inplace=True) # remove empty rows
food_df
food_df = food_df[food_df["FoodID"].notnull()]
food_df
# egg
# chicken_wings
# beef
# carrots
# strawberries
# blueberries
# honey
# banana
# apple
# mushrooms
list(food_df.columns)
# Eggs
egg_match = food_df[food_df['Short Food Name'].str.contains("^Egg,")]
egg_match
from typing import NamedTuple, Tuple
class Food(NamedTuple):
"""
Food
parameters
name (str): name of the food
TODO: add more params
protein (Tuple(float, str)): amount and unit e.g. 10 g
carbohydrate (Tuple(float, str)): amount and unit e.g. 10 g
fat (Tuple(float, str)): amount and unit e.g. 10 g
Example
egg = Food("egg",
"G1001"
(10, g), # protein 10 grams
...
(10, g), # fat
)
"""
food_id: str
food_name: str
water: Tuple[float, str]
energy: Tuple[float, str]
energy_nip: Tuple[float, str]
protein: Tuple[float, str]
fat: Tuple[float, str]
carbohydrate: Tuple[float, str]
dietary_fibre: Tuple[float, str]
sugars: Tuple[float, str]
startch: Tuple[float, str]
sfa: Tuple[float, str]
mufa: Tuple[float, str]
pufa: Tuple[float, str]
alpha_linolec_acid: Tuple[float, str]
linoleic_acid: Tuple[float, str]
cholesterol: Tuple[float, str]
sodium: Tuple[float, str]
iodine: Tuple[float, str]
potassium: Tuple[float, str]
phosphorus: Tuple[float, str]
calcium: Tuple[float, str]
iron: Tuple[float, str]
zinc: Tuple[float, str]
selenium: Tuple[float, str]
vitamin_a: Tuple[float, str]
beta_carotene: Tuple[float, str]
thiamin: Tuple[float, str]
riboflavin: Tuple[float, str]
niacin: Tuple[float, str]
vitamin_b6: Tuple[float, str]
vitamin_b12: Tuple[float, str]
dietary_folate: Tuple[float, str]
vitamin_c: Tuple[float, str]
vitamin_d: Tuple[float, str]
vitamin_e: Tuple[float, str]
def __repr__(self):
INDENT = 10
return f"""
{self.name}
{'-' * len(self.name)}
{'Protein'.ljust(INDENT)}{self.protein[0]} ({self.protein[1]})
{'Carbs'.ljust(INDENT)}{self.carbohydrate[0]} ({self.carbohydrate[1]})
{'Fat'.ljust(INDENT)}{self.fat[0]} ({self.fat[1]})
"""
# eggs use G1001 as an example
food_df[food_df["FoodID"]=="G1001"]["Protein, g"].values[0]
food_ids = ["G1001"]
for food_id in food_ids:
for macro in food_df.columns:
if macro in ["FoodID", "Short Food Name"]:
macro_nutrient = food_df[food_df["FoodID"]==food_id][macro].values[0]
elif macro not in ["Measure, g"]:
macro_nutrient = (
food_df[food_df["FoodID"]==food_id][macro].values[0],
macro.split(',')[-1].replace(" ", "")
)
print(macro_nutrient)
###Output
G1001
Egg, chicken, white & yolk, boiled
Egg, chicken, white & yolk, boiled
(76.9, 'g')
(568, 'kJ')
(568, 'kJ')
(12.2, 'g')
(9.5, 'g')
(0.6, 'g')
(0, 'g')
(0.6, 'g')
(0, 'g')
(2.6, 'g')
(4, 'g')
(0.9, 'g')
(nan, 'g')
(nan, 'g')
(395, 'mg')
(140, 'mg')
(46, 'µg')
(140, 'mg')
(190, 'mg')
(52, 'mg')
(1.8, 'mg')
(1.1, 'mg')
(23, 'µg')
(105, 'µg')
(0, 'µg')
(0.05, 'mg')
(0.44, 'mg')
(3.8, 'mg')
(0.06, 'mg')
(1.3, 'µg')
(66, 'µg')
(0, 'mg')
(1.8, 'µg')
(1.5, 'mg')
###Markdown
About this projectThere is a lot of misinformation about guns in the media coming from both sides of the aisle. It's hard to get unbiased information from any one source - but luckily for us, data doesn't lie.I have taken it upon myself to collect data from multiple sources and compile it in one place, so that we can do a comprehensive analysis and take a look at the *facts*.I will not pretend to be unbiased about this issue - but I have tried to present the data in an unbiased manner, and since all of the data and code used to analyze it is freely available, I invite you to look at it yourself.Before we get to the graphs and data, lets go over some definitions.* Automatic weapon: The firearm will continue to fire bullets as long as the trigger is depressed.* Semi-automatic weapon: The firearm will fire one bullet per pull of the trigger. By far the most popular kind of the weapon in the United States, and most likely, the world.* Assault weapon: a rather nebulous term used by the media to describe AR-15 style rifles. In military parlance, an assault rifle is a "select-fire" weapon, meaning it can switch between fully automatic and semi-automatic rates of fire. AR-15s, as purchasable by US citizens, are not "select-fire", as they only fire in semi-automatic mode.All analyses are performed on data that is publically available, from reputable sources - typically government agencies, or the World Bank. Where I believe the sources are more spurious, I will tell you. Gun Crime in the United StatesBoth gun homicides, and general homicides have experienced steep declines since 1995. Rates of gun homicides dropped 50% from the high of 1995 to the low of 2014, and have only slightly rebounded since then, mirroring the overall homicide trend in the United States.There has been a [lot](https://www.vox.com/policy-and-politics/2018/2/21/17028930/gun-violence-us-statistics-charts) of [talk](https://www.npr.org/2016/01/05/462017461/guns-in-america-by-the-numbers) about the increasing number of guns owned by civilians in the United States. Several of these pieces draw a direct link between the number of guns, and rates of gun homicides, even going so far as to suggest that more guns = more gun homicides.The data suggests otherwise.
###Code
plt.subplot(211)
plt.plot(data['year'], data['usa.guns.rate'], 'g-', label='Civilian firearms')
plt.ylabel('Civilian firearm ownership, per person', fontsize=8)
plt.title("Fig 1a. Gun ownership vs firearm homicides, over time, USA", fontsize=12, y=1.08)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlim(1995, 2016)
plt.subplot(212)
plt.plot(data['year'], data['usa.homicide.firearms.total'], label='Firearm homicides')
plt.ylabel('Firearm homicides, per 100k people', fontsize=8)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlim(1995, 2016)
plt.text(1992, 1.5, "Source: FBI Uniform Crime Reporting: https://ucr.fbi.gov/ucr-publications")
plt.text(1992, 1, "Source: Congressional Research Service: https://fas.org/sgp/crs/misc/RL32842.pdf")
plt.text(1992, 0.5, "Source: Hill, 2013:https://engagedscholarship.csuohio.edu/cgi/viewcontent.cgi?article=1679&context=urban_facpub")
plt.show()
x = data['usa.guns.rate']
y = data['usa.homicide.firearms.total']
z = data['year']
regr = linear_model.LinearRegression()
fit = regr.fit(x.reshape(-1, 1), y.reshape(-1, 1))
y_pred = regr.predict(x.reshape(-1, 1))
plt.plot(x, y, 'go')
plt.plot(x, y_pred, 'b--')
plt.title("Fig 1b. Inverse relationship between number of civilian guns and homicides", fontsize=12, y=1.05)
plt.ylabel("Firearm homicides per 100k people", fontsize=8)
plt.xlabel("Firearms per person", fontsize=8)
# annotate r-squared value
plt.text(0.9, 3.5, 'R²: {0}'.format(regr.score(x.reshape(-1,1), y.reshape(-1,1)).round(2)), color='blue')
# annotate first and last three years
for i in [0, 1, 2, len(x)-3, len(x)-2, len(x)-1]:
plt.text(x[i] + 0.01, y[i] + 0.01, z[i])
plt.show()
###Output
_____no_output_____
###Markdown
Are assault weapons really that deadly?The rate of rifle homicides (including so-called "assault rifles") has dropped even quicker than general homicide rate. Despite all of the furor raised over civilian ownership of "assault rifles", less than 400 people have been killed with rifles (hunting, military, semiautomatic, etc) for the last 10 years.The answer seems to be no. The overwhelming majority of homicides committed with firearms involve handguns.
###Code
plt.plot(data['year'], data['usa.homicide.firearms.rifles'])
plt.title('Fig 2a. Homicide rate, rifles, US', fontsize=12, y=1.05)
plt.show()
pos = np.arange(len(data['year'])) + .5
bars = plt.barh(pos, data['usa.fbi.firearms.rifles'], align='center')
plt.title('Fig 2b. Total people killed with rifles in the US', fontsize=12, y=1.05)
plt.yticks(pos, data['year'], fontsize=10)
plt.ylim(0, len(data['year']))
plt.show()
###Output
_____no_output_____
###Markdown
Mass ShootingsData on mass shootings was collected from [Mother Jones US Mass Shooting Database](https://www.motherjones.com/politics/2012/12/mass-shootings-mother-jones-full-data/)Mother Jones used the following criteria were used to define a mass shooting:* The perpetrator took the lives of at least three people. * The killings were carried out by a lone shooter. (Except in the case of the Columbine massacre and the Westside Middle School killings, which involved two shooters.)* The shootings occurred in a public place. (Except in the case of a party on private property in Crandon, Wisconsin, and another in Seattle, where crowds of strangers had gathered.) Crimes primarily related to gang activity or armed robbery are not included, nor are mass killings that took place in private homes (often stemming from domestic violence).* Perpetrators who died or were wounded during the attack are not included in the victim counts.* "Spree Killings": more than one location over a short period of time, that otherwise fit the above criteria.___There have been only 97 mass shootings in the United States since 1982___Processing on data was performed, with the following categorical terms lumped under semi-automatic rifles:* Semi-automatic/semiautomatic rifle* AR-15* Assault rifleData on firearm homicides was collected from the [FBI Uniform Crime Reporting page](https://ucr.fbi.gov/ucr-publications)I have selected the years since 1995 for this case study, since reliable data for some statistics tends to be harder to find for years prior to that (FBI crime reports, World Bank development indicators, homicide data, etc). 1996 was also the year that the Australian ban on semi-automatic weapons was implemented. Semi-automatic rifle use in mass shootingsIn the last 23 years (since 1995), there have been a total of 21 instances of mass shootings involving the use of semi-automatic rifles (also called "assault" rifles). In those 21 cases, a total of 229 people were killed. 160 (69%, expected: 19%) of those fatalities came from just four mass shootings: 1. Las Vegas, 2017: 582. Orlando nightclub, 2016: 493. Sandy Hook, 2012: 274. Texas First Baptist, 2017: 26 Semi-automatic handgun use in mass shootingsInterestingly, in the same time span there have been 36 instances of mass shootings where only semi-automatic handguns were used. In these cases, a total of 267 people were killed. 85 (25%, expected: 11%) of those fatalities came from four mass shootings: 1. Virginia Tech, 2007: 322. Killeen, Texas, 1996: 243. USPS, 1986: 154. Binghamton, 2009: 14 Are mass shootings increasing in frequency?Looking at the data, there is an increase in the number of victims of mass shootings, and the percentage of gun homicides from mass shootings. However, this increase is relatively small (accounting for approximately 0.5% of gun homicides), and is significantly smaller than the increase in defensive gun use by citizens
###Code
x = data['year']
y = data['usa.mass_shootings.fatalities'] / data['usa.fbi.firearms.total'] * 100
z = data['usa.justifiable.firearms.total'] / data['usa.fbi.firearms.total'] * 100
regr = linear_model.LinearRegression()
fit = regr.fit(x.reshape(-1,1), z.reshape(-1,1))
z_pred = regr.predict(x.reshape(-1, 1))
plt.plot(x, z, 'bo', label='Percent of gun homicides that are justifiable')
plt.plot(x, z_pred, 'b--')
plt.text(1995, 2.5, 'R²: {0}'.format(regr.score(x.reshape(-1,1), z.reshape(-1,1)).round(2)), color='blue')
regr = linear_model.LinearRegression()
fit = regr.fit(x.reshape(-1,1), y.reshape(-1,1))
y_pred = regr.predict(x.reshape(-1, 1))
plt.plot(x, y, 'ro', label='Percent of gun homicides from mass shootings')
plt.plot(x, y_pred, 'r--')
plt.text(1995, 0.3, 'R²: {0}'.format(regr.score(x.reshape(-1,1), y.reshape(-1,1)).round(2)), color='red')
plt.title('Fig 3. Defensive gun use vs mass shootings', y=1.05)
plt.ylabel('Percent of gun homicides', fontsize=10)
plt.xlim(1994, 2017)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
###Output
_____no_output_____
###Markdown
Setup
###Code
%matplotlib inline #this line makes matplotlib plots show directly in the notebook
import pandas
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Creating the Pandas DataFrame
###Code
#read in the input data. Skip the first line as a header line.
df = pandas.read_csv('datalog_Richards_Hall.csv', header=1, sep=',', index_col=0, parse_dates=True,infer_datetime_format=True, low_memory=False)
# Get one day of data
beginDate = '2017-03-04 00:00:00'
endDate = '2017-03-04 23:59:59'
df_sub = df[beginDate:endDate]
###Output
_____no_output_____
###Markdown
Create timeseries plot of FlowRate variable
###Code
#Plot just the FlowRate variable
df_sub.plot(y = 'FlowRate', marker='o')
#get current axes
ax = plt.gca()
# set the x and y-axis labels
ax.set_ylabel('Flow (gpm)')
ax.set_xlabel('Date/Time')
# set the x and y-axis limits
ax.set_xlim([df_sub.index[0], df_sub.index[-1]])
ax.grid(True)
# Add a legend with some customizations
legend = ax.legend(loc='upper right', shadow=True)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig('test2png.png', dpi=100)
###Output
_____no_output_____
###Markdown
Resample the data from 1s to 1hr
###Code
#aggregate data to hourly time step
df_sub = df.resample(rule='1H', base=0).sum()
###Output
_____no_output_____
###Markdown
Plot the 1hr FlowRate data
###Code
#Plot just the FlowRate variable
df_sub.plot(y = 'FlowRate', marker='o')
#get current axes
ax = plt.gca()
# set the x and y-axis labels
ax.set_ylabel('Flow (gpm)')
ax.set_xlabel('Date/Time')
# set the x and y-axis limits
ax.set_xlim([df_sub.index[0], df_sub.index[-1]])
ax.grid(True)
# Add a legend with some customizations
legend = ax.legend(loc='upper right', shadow=True)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig('test2png.png', dpi=100)
###Output
_____no_output_____
###Markdown
Create a plot to show total volume of water used on an hourly timestep Plot is for each hour of the day and across all days in the period of record including error bars showing +/- 1 standard deviation in water use for that hour
###Code
# First aggregate the incremental flow volume to a
# total volume for each hourly time step
hourlyTotVol = df['IncrementalVolume'].resample(rule='1H', base=0).sum()
# Calculate an average volume for each hour
# of the day by aggregating across days using
# the groupby function
hourlyAvgVol = hourlyTotVol.groupby(hourlyTotVol.index.hour).mean()
# Also calculate the standard deviation for each hour
hourlyStDevVol = hourlyTotVol.groupby(hourlyTotVol.index.hour).std()
# Generate a plot of the data with some indication of the variability in
# the hourly average values (e.g., add error bars with +- one Std. Dev.)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.errorbar(x=hourlyAvgVol.index, y=hourlyAvgVol,
yerr=hourlyStDevVol, capsize=3,
capthick=0.5, fmt='--',
label='Average Hourly Volumes', marker='o')
# Set the limits on the x-axis and the tick
# mark locations
ax.set_xlim(-0.5, 23.5)
xmarks = range(0, 23 + 1, 1)
plt.xticks(xmarks)
# Set the x and y-axis labels and title
ax.set_ylabel('Average Hourly Volume (gal)')
ax.set_xlabel('Hour of the Day')
ax.grid(False)
plt.title('Average Hourly Volume Estimates')
#set figure size and save figure
legend = ax.legend(loc='upper right', shadow=True)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig('test2png.png', dpi=100)
###Output
_____no_output_____
###Markdown
Some tricks that may help with your Assignment 4
###Code
# add a new column to the dataset that in the day of the week (0 = Monday, 6 = Sunday)
df['weekday'] = df.index.weekday
#show the result of the above with the new weekday column
df.head()
#create a dataset that includes just the weekday (and not weekend) values
df_weekday = df[(df.weekday >= 0) & (df.weekday <= 5)]
#Plot this new weekday dataset to show the filter worked correctly
df_weekday.plot(y = 'FlowRate', marker='o')
#get current axes
ax = plt.gca()
# set the x and y-axis labels
ax.set_ylabel('Flow (gpm)')
ax.set_xlabel('Date/Time')
# set the x and y-axis limits
ax.set_xlim([df_sub.index[0], df_sub.index[-1]])
ax.grid(True)
# Add a legend with some customizations
legend = ax.legend(loc='upper right', shadow=True)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig('test2png.png', dpi=100)
###Output
_____no_output_____
###Markdown
The State of Open Data on School Bullying and HarassmentBy [Two Sigma Data Clinic](https://www.twosigma.com/dataclinic) On March 6th, 2018, the Two Sigma Data Clinic hosted "The State of Open Data on School Bullying and Harassment" as part of [NYC Open Data Week](https://www.open-data.nyc/), a weeklong celebration of the city's [open data portal](https://opendata.cityofnewyork.us/). The two-hour event featured a comparison of federal and local datasets about New York City schools, followed by a panel discussion on what open data can reveal—and conceal—about this important school safety issue.This notebook documents our comparative analysis of the bullying/harassment data in the New York City Department of Education's School Survey of parents, students, and teachers in NYC public schools, and the federal Office for Civil Rights's Civil Rights Data Collection Survey, which is filled out by all public schools and districts nationwide, both for the 2013-14 school year. Data Sources The [raw Excel file](http://schools.nyc.gov/documents/misc/2014%20Public%20Data%20File%20SUPPRESSED.xlsx) of the NYC survey responses comes from the New York City Department of Education (NYCDOE). The full page for the 2014 NYC School Survey Results (representing the 2013-14 school year) is [here](http://schools.nyc.gov/Accountability/tools/survey/2014+NYC+School+Survey+Results). NYCDOE posts archived survey information [here](http://schools.nyc.gov/Accountability/tools/survey/SurveyArchives.htm).The [raw file](https://inventory.data.gov/dataset/2acc601e-9806-4dff-b144-f8a5e7c095b8/resource/3dc84a95-526a-4b90-aacd-72f60d4fecbc/download/crdc201314csv.zip) of the federal civil rights survey responses comes from the U.S. Department of Education (ED)'s Office for Civil Rights (OCR), available on [Data.gov](https://catalog.data.gov/dataset/civil-rights-data-collection-2013-14). The full page for the Civil Rights Data Collection for the 2013-14 school year is [here](https://www2.ed.gov/about/offices/list/ocr/docs/crdc-2013-14.html). OCR also hosts a data portal with information from earlier years [here](https://ocrdata.ed.gov/).For details on these pre-processing of these datasets, see the notebooks in the `processing/` folder referenced under the Load Data section below. Import Python libraries and set working directories
###Code
from itertools import product
import os
import feather
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn import preprocessing
import statsmodels.api as sm
import statsmodels.formula.api as smf
import csv
import time
from sklearn.linear_model import LogisticRegression
%matplotlib inline
%load_ext rpy2.ipython
intermediate_dir = os.path.join(os.getcwd(), 'data', 'intermediate')
output_dir = os.path.join(os.getcwd(), 'data', 'output')
###Output
_____no_output_____
###Markdown
Load data Let's read in the combined dataset, containing information from both surveys. The data is saved as a [feather](https://blog.cloudera.com/blog/2016/03/feather-a-fast-on-disk-format-for-data-frames-for-r-and-python-powered-by-apache-arrow/) file in the `data/output` folder. It is also available a csv file in the same folder. For details on data cleaning and pre-processing, see the notebooks in the `processing/` folder, specifically `processing/01_combine_surveys.ipynb` and `processing/02_add_school_characteristics.ipynb`.
###Code
df = pd.read_feather(
os.path.join(output_dir, 'combined_data.feather')
)
###Output
_____no_output_____
###Markdown
NYC School SurveyLet's first look at the responses to the NYC School Survey, which is filled out by parents, teachers, and grade 6-12 students. Plot the distribution of responses on the NYC School Survey Let's plot (using the `ggplot2` library in R) the percent of respondents selecting each answer value to the two NYC School Survey questions about bullying/harassment: a general question about bullying/harassment, and a specific question about bullying/harassment based on differences. Since each group has four answer choices, we rank orderer these from 1 to 4, going from weak to strong feelings, by creating a 4-point ordinal scale. Note we exclude "Don't Know" responses from Parents. For more information, see `processing/nyc_school_survey.ipynb`. Set up [`rpy2`](https://rpy2.bitbucket.io/) in order to run R in this Jupyter notebook
###Code
%%R -i df
library('dplyr')
library('ggplot2')
library('tidyr')
survey.answers <- df %>% select(dbn, school_name, answer_code, grep('perc_harass', names(df)))
survey.answers.long <- survey.answers %>% gather(category, value, -answer_code, -dbn, -school_name,
-answer_code)
survey.answers.long$population <- ifelse(grepl('parents', survey.answers.long$category), 'parents',
ifelse(grepl('students', survey.answers.long$category), 'students',
'teachers'))
survey.answers.long$question <- ifelse(grepl('differences', survey.answers.long$category), 'differences', 'harass')
survey.answers.long.agg <- survey.answers.long %>%
group_by(answer_code, question, population) %>%
summarise(response = mean(value, na.rm = T))
survey.answers.long.agg$q <- ifelse(survey.answers.long.agg$question == 'differences',
'Students bully/harass each other based on differences',
'Students bully/harass each other')
survey.answers.long.agg$col <- ifelse(survey.answers.long.agg$answer_code == 3, '#80cdc1',
ifelse(survey.answers.long.agg$answer_code == 4, '#018571',
ifelse(survey.answers.long.agg$answer_code == 2, '#dfc27d', '#a6611a')))
highs <- survey.answers.long.agg %>% filter(answer_code >= 3)
lows <- survey.answers.long.agg %>% filter(answer_code < 3)
p <- ggplot() +
geom_bar(data = survey.answers.long.agg[order(survey.answers.long.agg$answer_code, decreasing = T),],
aes(x = population, y = response, fill = factor(answer_code, levels = c(
'4', '3', '2', '1'))),
stat="identity", width = 0.6) +
coord_flip() +
facet_wrap(~q, nrow = 2, scales = 'free') + labs(y = '', x = '') +
theme(legend.position = 'bottom') +
labs(fill = 'feelings scale:\n1 = weak; 4 = strong ') + guides(fill = guide_legend(reverse=T)) +
scale_fill_manual(values = c('#018571', '#80cdc1', '#dfc27d', '#a6611a'))
p + theme(panel.background = element_rect(fill = "white"), axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12), strip.text = element_text(size = 16),
panel.spacing = unit(2, "lines"), axis.ticks.y = element_line(size = 0),
strip.background = element_rect(fill = "white")) + labs(x = '', y = '')
###Output
_____no_output_____
###Markdown
Calculate standarized scores for each school, based on responses to the NYC School Survey Because we have an ordinal scale representing the responses, we can compute a "score" for each school by taking a weighted average of the response percentages in each category.
###Code
def condense_to_score(df_orig, weight_cols):
df = df_orig.copy()
df[weight_cols] = df[weight_cols] / 100.0
score_cols = [c + '_score' for c in weight_cols]
for w, s in zip(weight_cols, score_cols):
df[s] = df[w] * df['answer_code']
df = df[['dbn', 'school_name'] + score_cols].groupby(['dbn', 'school_name']).sum(min_count = 1)
return df
survey_cols = [''.join(tup) for tup in product(['perc_harass_'],
['', 'differences_'],
['parents', 'students', 'teachers'])]
unique_cols = [c for c in df.columns if c not in set(survey_cols + ['answer_code'])]
df_schools = df[unique_cols].drop_duplicates()
df_scores = condense_to_score(df, survey_cols).reset_index()
###Output
_____no_output_____
###Markdown
To understand where each school falls with respect to the citywide average, we can compute the citywide average and then calculate a "z-score" for each school that standardizes the school's score with respect to this average. Schools with higher-than-average feelings about bullying/harassment will have positive z-scores, and schools with lower-than-avrage feelings will have negative z-scores.
###Code
def calculate_z_score(df_orig):
df = df_orig.copy()
score_cols = [c for c in df_scores.columns if 'score' in c]
z_score_cols = [c + '_z' for c in score_cols]
for s, z in zip(score_cols, z_score_cols):
df[z] = (df[s] - df[s].agg('mean')) / df[s].agg('std')
return df
df_scores_z = calculate_z_score(df_scores)
df_merge = pd.merge(df_schools, df_scores_z, on = ['dbn', 'school_name'])
###Output
_____no_output_____
###Markdown
How correlated are responses? Let's generate a heatmap to see how correlated respones are between each of the questions.
###Code
m = df_scores_z[['perc_harass_parents_score_z', 'perc_harass_differences_parents_score_z',
'perc_harass_students_score_z', 'perc_harass_differences_students_score_z',
'perc_harass_teachers_score_z', 'perc_harass_differences_teachers_score_z']]
# Compute the correlation matrix
corr = m.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(10, 8))
# Draw the heatmap with the mask and correct aspect ratio
heatmap = sns.heatmap(corr, cmap='YlGnBu', annot_kws={"size": 18})
###Output
_____no_output_____
###Markdown
Within each response group, the two bullying/harassment questions are strongly correlated, as shown by the dark blue squares on the diagonal. So, students agree with students, parent with parents, and so on. Student + parent scores tend to be more correlated with each other than teachers + students or teachers + parents. Where do teachers and students appear to disagree on the NYC school survey? Let's look at teacher and student agreement within individual schools. Here we plot the difference in scores between teachers and students on the y axis where each dot represents a school. Schools are ranked by total enrollment along the x-axis. If there was perfect agreement between teachers and students, the orange trend line would be horizontal but interestingly, we notice a slight trend of disagreement that changes with school enrollment – in smaller schools, teachers tend to perceive more bullying and harassment than students but in larger schools, this is reversed. Note, however, that the correlation is small.
###Code
%%R -i df_merge -w 600 -h 600
library('ggplot2')
library('dplyr')
library('forcats')
df_merge$school_sorted <- as.numeric(fct_reorder(as.factor(df_merge$dbn),
df_merge$total_enrollment))
df_merge <- df_merge %>% mutate(students_teachers_diff = perc_harass_differences_teachers_score_z - perc_harass_differences_students_score_z)
plot <- ggplot(df_merge,
aes(x = log(total_enrollment), y = students_teachers_diff)) +
geom_point(color = '#00747A') +
labs(x = 'School enrollment (log values)',
y = 'Feelings about bullying/harassment\n(Top = Stronger; Bottom = Weaker)') +
theme_minimal() +
geom_smooth(method = 'lm', se = F, color = '#E37222', size = 1.5) +
theme(axis.text = element_blank(),
axis.title=element_text(size = 12),
axis.ticks.x = element_blank(), plot.title = element_text(size=16)) +
ggtitle('Teachers and students appear to disagree in the largest and smallest schools\n(though this correlation is small)')
print(cor(df_merge$students_teachers_diff, log(df_merge$total_enrollment), use = 'complete.obs', method = 'spearman'))
plot(plot)
###Output
_____no_output_____
###Markdown
Federal civil rights dataNow let's look at the responses to the federal Office for Civil Rights's Civil Rights Data Collection survey, which is filled out by the schools. When we refer to allegations, we mean either race, sex, or disability-based allegations as we did not examine each category independently. Also note that in addition to the number of allegations, the Office for Civil Rights collects the number of students reported to be bullied/harassed and the number of students disciplined for bullying/harassment. For ease of comparison with the NYC School Survey, we only used allegations in our analysis. For more information, see `processing/federal_civil_rights_survey.ipynb`.
###Code
df_merge['max_allegations'] = df_merge[['allegations_harass_sex',
'allegations_harass_race',
'allegations_harass_dis']].max(axis = 1)
df_merge['allegations_binary'] = np.where(df_merge['max_allegations'] == 0, 0, 1)
df_merge['allegations_binary'] = np.where(pd.isnull(df_merge['max_allegations']), np.nan, df_merge['allegations_binary'])
df = pd.merge(df, df_merge[['dbn', 'allegations_binary', 'max_allegations']], on = 'dbn', how = 'outer')
###Output
_____no_output_____
###Markdown
How many schools report bullying/harassment allegations? Let's plot (using the `ggplot2` library in R) the percent of NYC schools reporting 0, 1, and more than 1 allegation of bullying/harassment.
###Code
%%R -i df_merge
library('ggplot2')
library('dplyr')
library('forcats')
df_merge$allegations_cat <- ifelse(df_merge$max_allegations == 0, 'zero',
ifelse(df_merge$max_allegations == 1, 'one',
'multiple'))
bar <- df_merge %>% group_by(allegations_cat) %>% summarise(n = n()) %>% mutate(perc = n/sum(n) * 100) %>% filter(is.na(allegations_cat) == F)
bar <- bar %>%
mutate(allegations_cat = fct_reorder(allegations_cat, n, .desc = TRUE))
plot <- ggplot(bar, aes(x = allegations_cat, y = n)) + geom_bar(stat='identity',
width = .8,
fill = '#00747A') +
labs(x = '', y = '') + theme(panel.background = element_rect(fill = "white"),
axis.ticks.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.text.x = element_text(size = 12),
) +
scale_y_continuous(limits = c(0, 1500), expand = c(0, 0)) +
theme(axis.line.x = element_line(color="darkgrey", size = 2), plot.title = element_text(size = 18)) +
ggtitle('Number of schools reporting ____ allegations of\nbullying/harassment\n')
print(paste(bar[bar$allegations_cat == 'zero',]$n, 'schools, or', round(bar[bar$allegations_cat == 'zero',]$perc),
"percent of NYC schools report zero allegations of bullying/harassment to the federal Office for Civil Rights."))
plot(plot)
###Output
_____no_output_____
###Markdown
Let's use the full dataset (created in `processing/federal_civil_rights_survey.ipynb`) to see how this compares with the percent of schools reporting zero allegations nationwide.
###Code
ocr_nationwide = pd.read_feather(os.path.join(intermediate_dir, 'federal_ocr_survey.feather'))
print(str(ocr_nationwide.allegations_harass_ind.value_counts()[0]) +
" schools, or " + str(round(ocr_nationwide.allegations_harass_ind.value_counts(normalize = True)[0] * 100)) +
" percent of schools nationwide report zero allegations of bullying/harassment to the federal Office for Civil Rights.")
###Output
76916 schools, or 81.0 percent of schools nationwide report zero allegations of bullying/harassment to the federal Office for Civil Rights.
###Markdown
Comparing the NYC School Survey and the federal civil rights data Now let's compare the two surveys. Characteristics of schools with 0 or 1+ allegations Using this classification of zero versus one or more, let's explore the differences in school characteristics.
###Code
f = csv.writer(open(os.path.join(intermediate_dir, 't_stats.csv'), 'w'))
f.writerow(['variable', 't', 'p'])
for var in df_merge.drop(['allegations_binary'], axis = 1).columns:
try:
group1 = df_merge[df_merge['allegations_binary'] == 0][var].astype(float).dropna()
group2 = df_merge[df_merge['allegations_binary'] == 1][var].astype(float).dropna()
t, p = ttest_ind(group1, group2, equal_var=False)
f.writerow([var, t, p])
except:
continue
time.sleep(2)
t_stats = pd.read_csv(os.path.join(intermediate_dir, 't_stats.csv'))
t_stats = t_stats.loc[abs(t_stats['t']) > 1.96]
t_stats.columns.tolist() # to get significant variables
grouped = df_merge.loc[df_merge['allegations_binary'].notnull()].groupby('allegations_binary')
grouped_means = grouped.mean()
grouped_means.reset_index(inplace = True)
grouped_means[['allegations_binary', 'total_enrollment', 'perc_students_with_disabilities', 'perc_english_language_learners',
'perc_harass_differences_parents_score_z', 'perc_harass_differences_students_score_z',
'perc_harass_differences_teachers_score_z']]
###Output
_____no_output_____
###Markdown
![characteristics](img/characteristics.PNG) Schools with 1+ allegation of bullying & harassment have a significantly higher total school enrollment (which is to be expected given we are grouping based on counts) and percent of students with disabilities, but a lower percent of English language learners. Schools reporting allegations had on average much higher z-scores for the NYC survey, indicating that perceived harassment is generally higher in schools with at least one federal allegation of bullying, suggesting general agreement between the federal and local data sources. How much do the two surveys (dis)agree? Although the above section shows that survey agreement is generally in the "right" direction (namely federal allegations are associated with student, teacher, and parent perceptions of bullying), let's take a closer look at individual schools.Let's create a dataframe, `df_all_the_time`, that filters for the percentage of students/parents/teachers saying bullying is happening all of the time.
###Code
df_all_the_time = df.loc[df['answer_code'] == 4]
###Output
_____no_output_____
###Markdown
Now we can find outliers. For example, here are some schools with a high percentage of students saying bullying based on differences happens all of the time but that reported 0 allegations to the federal Civil Rights Data Collection. (Each row below is a separate school.)
###Code
df_all_the_time[['dbn',
'perc_harass_differences_students']].sort_values(by = 'perc_harass_differences_students', ascending = False).head()
###Output
_____no_output_____
###Markdown
As shown above, there is a school where nearly 35 percent of students said bullying based on differences was happening all of the time in the NYC School Survey but that reported 0 allegations to the federal Office for Civil Rights.On the other hand, here are some schools that reported 1 or more allegations of bullying to the federal Civil Rights Data Collection but where a low percentage of students said bullying was happening all of the time in the NYC School Survey (Each row below is a separate school.)
###Code
df_all_the_time.loc[df['allegations_binary'] == 1][['dbn',
'perc_harass_differences_students', 'max_allegations']].sort_values(by = 'max_allegations', ascending = False).head()
###Output
_____no_output_____
###Markdown
As shown above, there is a school that reported 26 allegations of harassment to the federal Office for Civil Rights, but where less than 1 percent of students said bullying was happening all the time. Let's plot the distribution of students responding that bullying based on differences is happening all of the time among schools that report 0 and and 1+ allegations. Here we will remove schools with question response rates < 75%.The dark teal(top plot) represents schools reporting 0 allegations and the light teal (bottom plot) represents school reporting 1+ allegations.
###Code
df_all_the_time = df_all_the_time[~(df_all_the_time['perc_harass_differences_students'].isnull())]
df_all_the_time_rr = df_all_the_time[df_all_the_time['question_rr_harass_differences_students'] >= 75.0]
allegations_mags = [1.0 if x else -1.0 for x in df_all_the_time_rr['allegations_harass_ind'].values]
df_all_the_time_rr['mag_all'] = df_all_the_time_rr['perc_harass_differences_students'] * (allegations_mags)
df_all_the_time_rr['mag_color'] = df_all_the_time_rr['allegations_harass_ind'].apply(lambda i: '#63CECA' if i else '#00747A')
df_all_the_time_rr = df_all_the_time_rr.sort_values(['allegations_harass_ind', 'perc_harass_differences_students'], ascending=False)
base_plot_size = 15.0
for df in [df_all_the_time_rr]:
plt.style.use('seaborn-ticks')
f, ax = plt.subplots(figsize=(base_plot_size, base_plot_size * 2))
ax.barh(range(len(df)), df['perc_harass_differences_students'], 0.6, color=df['mag_color'])
ax.grid()
ticks = range(0, 41, 5)
plt.xticks(ticks, [str(abs(t)) + '%' for t in ticks], fontsize=16)
plt.yticks([])
plt.show()
###Output
_____no_output_____
###Markdown
Zones of (dis)agreement Now let's try to see if we can better characterize what makes a school more or less likely to show agreement between data sources. To do this, we need to first define what agreement and disagreement are. For the federal data, we have a binary classification of 0 allegations or 1+ allegations. Similarly, for the NYC school survey we can categorize schools into those with a low perception of bullying on the bottom versus those with a high perception on the top. To figure out what constitutes "low" and "high" perceptions, we ranked schools using z-scores and used a threshold to define "low" perception that would bucket roughly the same percent of schools as those with zero allegations ("high" perception would be the rest). We then mapped schools into zones of agreement and disagreement. - Agreement is defined as schools with zero allegations + low perception OR at least one allegation + high perception - Conversely, disagreement refers to schools with zero allegations + high perception OR one plus allegation + low perception. ![zones](img/zones_disagreement.PNG) Now let's take a look at the data for each school to demonstrate how to translate our conceptual framework into an agreement versus disagreement metric for analysis.
###Code
%%R -i df_merge -w 800 -h 600
library('ggplot2')
ggplot(df_merge, aes(x = max_allegations, y = perc_harass_differences_students_score_z)) +
geom_point(color = '#00747A', size = 3) +
labs(x = '', y = '') + theme_minimal() +
theme(axis.text = element_text(size = 14),
axis.ticks.x = element_blank())
###Output
_____no_output_____
###Markdown
Here we can see the outlier we discussed earlier with 26 allegations + low student perceptions of bullying. Classifying schools into agreement/disagreement zones ~75 percent of schools have 0 allegations and ~25 percent have 1 or more allegations. We take a look at the (bottom) 75th & (top) 25th percentile of scores for parents/students/teachers. For instance, for each question and survey population, we ask: What is the threshold/cutoff score of the bottom 75% of schools? Then, we look at each school's score. If it is lower than this number, we classify the school as having "low" bullying/harassment perceptions, and if it's higher than this number, we classify the school as having "high" bullying/harassment perceptions.
###Code
#determine what qualifies as low response rates
rr = ['question_rr_harass_parents', 'question_rr_harass_students', 'question_rr_harass_teachers',
'question_rr_harass_differences_parents', 'question_rr_harass_differences_students', 'question_rr_harass_differences_teachers']
df_merge[rr].quantile(q=[0.005, 0.01, .05, 0.1]) #response rate cut off 75% corresponds to <.05 of a percent of data.
#nan values where response rate <75%
df_merge['avg_score_parents_harass2'] = df_merge['perc_harass_parents_score']
df_merge['avg_score_parents_harass_differences2'] = df_merge['perc_harass_differences_parents_score']
df_merge['avg_score_students_harass2'] = df_merge['perc_harass_students_score']
df_merge['avg_score_students_harass_differences2'] = df_merge['perc_harass_differences_students_score']
df_merge['avg_score_teachers_harass2'] = df_merge['perc_harass_teachers_score']
df_merge['avg_score_teachers_harass_differences2'] = df_merge['perc_harass_differences_teachers_score']
df_merge.loc[df_merge.question_rr_harass_parents < 75, 'avg_score_parents_harass2'] = np.nan
df_merge.loc[df_merge.question_rr_harass_differences_parents < 75, 'avg_score_parents_harass_differences2'] = np.nan
df_merge.loc[df_merge.question_rr_harass_students < 75, 'avg_score_students_harass2'] = np.nan
df_merge.loc[df_merge.question_rr_harass_differences_students < 75, 'avg_score_students_harass_differences2'] = np.nan
df_merge.loc[df_merge.question_rr_harass_parents < 75, 'avg_score_teachers_harass2'] = np.nan
df_merge.loc[df_merge.question_rr_harass_differences_teachers < 75, 'avg_score_teachers_harass_differences2'] = np.nan
#reclassify school type and grade category
df_merge['school_type2'] = df_merge['school_type'].map(lambda x: 'general' if x == 'General Academic' else 'Other' if x == 'Transfer School'
else 'Other' if x == 'Career Technical' else 'Other' if x == 'Special Education' else x)
df_merge['school_grade_category2'] = df_merge['school_grade_category'].map(lambda x: 'Elementary' if x == 'Elementary' else 'Elementary' if x == 'Early Childhood'
else 'Middle' if x == 'Junior High-Intermediate-Middle' else 'High' if x == 'High school' else 'High'
if x == 'Secondary School' else 'Mixed' if x == 'K-8' else 'Mixed' if x == 'K-12 all grades'
else 'Mixed' if x == 'Ungraded' else x)
#classify percentiles for agreement/disagreement metric
alleg = df_merge.allegations_binary.value_counts()
alleg / alleg.sum()
survey = ['perc_harass_parents_score_z', 'perc_harass_differences_parents_score_z',
'perc_harass_students_score_z', 'perc_harass_differences_students_score_z',
'perc_harass_teachers_score_z', 'perc_harass_differences_teachers_score_z']
df_merge[survey].quantile(q=[0.755172])
#create indicator to match percentiles between surveys
df_merge['bin_parents_harass_z'] = df_merge.perc_harass_parents_score_z.map(lambda x: 0 if x <= 0.668263
else 1 if x > 0.668263 else x)
df_merge['bin_parents_harass_differences_z'] = df_merge.perc_harass_differences_parents_score_z.map(lambda x: 0 if x <= 0.700869
else 1 if x > 0.700869 else x)
df_merge['bin_students_harass_z'] = df_merge.perc_harass_students_score_z.map(lambda x: 0 if x <= 0.678322
else 1 if x > 0.678322 else x)
df_merge['bin_students_harass_differences_z'] = df_merge.perc_harass_differences_students_score_z.map(lambda x: 0 if x <= 0.706146
else 1 if x > 0.706146 else x)
df_merge['bin_teachers_harass_z'] = df_merge.perc_harass_teachers_score_z.map(lambda x: 0 if x <= 0.68863
else 1 if x > 0.68863 else x)
df_merge['bin_teachers_harass_differences_z'] = df_merge.perc_harass_differences_teachers_score_z.map(lambda x: 0 if x <= 0.650904
else 1 if x > 0.650904 else x)
surveyx = ['bin_parents_harass_z', 'bin_parents_harass_differences_z',
'bin_students_harass_z', 'bin_students_harass_differences_z',
'bin_teachers_harass_z', 'bin_teachers_harass_differences_z']
v = df_merge[surveyx].apply(lambda x: x.value_counts())
v / v.sum()
def f(row):
val = np.nan
if row.allegations_binary == 0:
if row.bin_parents_harass_differences_z == 0:
val = 1
elif row.bin_parents_harass_differences_z == 1:
val = 0
if row.allegations_binary == 1:
if row.bin_parents_harass_differences_z == 1:
val = 1
elif row.bin_parents_harass_differences_z == 0:
val = 0
return val
df_merge['agreement_parents_harass_diff'] = df_merge.apply(f, axis=1)
def f(row):
val = np.nan
if row.allegations_binary == 0:
if row.bin_students_harass_differences_z == 0:
val = 1
elif row.bin_students_harass_differences_z == 1:
val = 0
if row.allegations_binary == 1:
if row.bin_students_harass_differences_z == 1:
val = 1
elif row.bin_students_harass_differences_z == 0:
val = 0
return val
df_merge['agreement_students_harass_diff'] = df_merge.apply(f, axis=1)
def f(row):
val = np.nan
if row.allegations_binary == 0:
if row.bin_teachers_harass_differences_z == 0:
val = 1
elif row.bin_teachers_harass_differences_z == 1:
val = 0
if row.allegations_binary == 1:
if row.bin_teachers_harass_differences_z == 1:
val = 1
elif row.bin_teachers_harass_differences_z == 0:
val = 0
return val
df_merge['agreement_teachers_harass_diff'] = df_merge.apply(f, axis=1)
###Output
_____no_output_____
###Markdown
Which factors are associated with survey (dis)agreement? Now that every school has been assigned either a 1 for agreement between local + federal surveys or a 0 for disagreement, we use this variable as the response in a first pass simple logistic regression model to investigate the relationship between survey disagreement and various school characteristics.Let's run 3 different models - for parent, students, and teachers. Note we are using the bullying/harassment based on differences question here.
###Code
#final model variables
final = df_merge[['dk_parents_perc_harass_parents',
'dk_parents_perc_harass_differences_parents',
'survey_rr_parents', 'survey_rr_teachers',
'perc_allegations_harass_sex',
'perc_allegations_harass_race', 'perc_allegations_harass_dis',
'avg_class_size', 'pupil_teacher_ratio',
'total_enrollment', 'perc_female', 'perc_male', 'perc_asian',
'perc_black', 'perc_hispanic', 'perc_multiple_other', 'perc_white',
'perc_students_with_disabilities', 'perc_english_language_learners',
'perc_free_lunch', 'perc_harass_parents_score_z', 'perc_harass_differences_parents_score_z',
'perc_harass_students_score_z', 'perc_harass_differences_students_score_z',
'perc_harass_teachers_score_z', 'perc_harass_differences_teachers_score_z',
'avg_score_parents_harass2',
'avg_score_parents_harass_differences2',
'avg_score_students_harass2',
'avg_score_students_harass_differences2',
'avg_score_teachers_harass2',
'avg_score_teachers_harass_differences2',
'doe_or_charter', 'borough', 'school_type2', 'school_grade_category2',
'agreement_parents_harass_diff', 'agreement_teachers_harass_diff','allegations_binary']]
final_students = df_merge[['dk_parents_perc_harass_parents',
'dk_parents_perc_harass_differences_parents',
'survey_rr_parents',
'survey_rr_students', 'survey_rr_teachers',
'perc_allegations_harass_sex',
'perc_allegations_harass_race', 'perc_allegations_harass_dis',
'avg_class_size', 'pupil_teacher_ratio',
'total_enrollment', 'perc_female', 'perc_male', 'perc_asian',
'perc_black', 'perc_hispanic', 'perc_multiple_other', 'perc_white',
'perc_students_with_disabilities', 'perc_english_language_learners',
'perc_free_lunch', 'perc_harass_parents_score_z', 'perc_harass_differences_parents_score_z',
'perc_harass_students_score_z', 'perc_harass_differences_students_score_z',
'perc_harass_teachers_score_z', 'perc_harass_differences_teachers_score_z',
'avg_score_parents_harass2',
'avg_score_parents_harass_differences2',
'avg_score_students_harass2',
'avg_score_students_harass_differences2',
'avg_score_teachers_harass2',
'avg_score_teachers_harass_differences2',
'doe_or_charter', 'borough', 'school_type2', 'school_grade_category2',
'agreement_parents_harass_diff', 'agreement_students_harass_diff', 'agreement_teachers_harass_diff','allegations_binary']]
###Output
_____no_output_____
###Markdown
Parent model for school survey agreement
###Code
#school demographics:
parents = smf.logit(formula = 'agreement_parents_harass_diff ~ total_enrollment + perc_female + perc_black + perc_hispanic + \
perc_students_with_disabilities + perc_english_language_learners + perc_free_lunch + \
C(school_grade_category2, Treatment(reference="High")) + \
C(school_type2) + C(doe_or_charter)', data = final).fit()
parents.summary()
###Output
_____no_output_____
###Markdown
Student model for school survey agreement
###Code
#school demographics:
students = smf.logit(formula = 'agreement_students_harass_diff ~ total_enrollment + perc_female + perc_black + perc_hispanic + \
perc_students_with_disabilities + perc_english_language_learners + perc_free_lunch + \
C(school_grade_category2, Treatment(reference="High")) + \
C(school_type2) + C(doe_or_charter)', data = final_students).fit()
parents.summary()
###Output
_____no_output_____
###Markdown
Teacher model for school survey agreement
###Code
#school demographics:
teacher = smf.logit(formula = 'agreement_teachers_harass_diff ~ total_enrollment + perc_female + perc_black + perc_hispanic + \
perc_students_with_disabilities + perc_english_language_learners + perc_free_lunch + \
C(school_grade_category2, Treatment(reference="High")) + \
C(school_type2) + C(doe_or_charter)', data = final).fit()
teacher.summary()
###Output
_____no_output_____
###Markdown
Course Co-occurence AnalysisLearning Analytics, Visual Analytics @ UBCCraig Thompson, CTLT Academic programs often prescribe some number of official pathways. However, students may also choose to take combinations of courses other than those we intend.We'd like to reveal those patterns. ![desire path](https://live.staticflickr.com/3203/2847766967_8e7ae25768_h.jpg)[Desire path](https://flic.kr/p/5kDxUt) by [wetwebwork](https://www.flickr.com/photos/wetwebwork/][/url]), on Flickr [(CC BY 2.0)](https://creativecommons.org/licenses/by/2.0/) Data we have- For every student who earns a degree, we have a record of all the courses they took and counted towards their degree requirements.- We've limited the dataset to courses from a single department.- Our dataset is two dimensional **binary indicator matrix**: - Across the horizontal axis: all the courses offered by the department - Down the vertical axis: IDs for each student who earned a degree - For each student/course pair we indicate whether the student took the course - This is fake data
###Code
df.head(20)
###Output
_____no_output_____
###Markdown
Data we don't have- We don't have data for non-majors or anyone who did not complete their degree.- We don't have performance data, so we don't know how well any student did in any particular course.- We don't have any temporal information, so we don't know: - Which courses students took in sequence - Whether they took a pair of courses in back to back terms or with gaps - Which courses they took in concurrently About the analysis- We are doing an exploratory analysis. This is not experimental.- We will try to answer questions about *what* has happened, but we are unable to address *why*.- We'd like to say "students are discovering their own pathways through our planned degrees". The reality is that students may be taking these groupings of courses because they: - Fit nicely in their timetable - Are offered by instructors they like - Have a reputation of being easy or fun courses - Have free or inexpensive textbooks - Are the only courses left at registration time Analysis via clusteringCommon questions:- What does an "average" student look like, in terms of the courses they study?- If there were $N$ prototypical students, what would they look like?Answer:- We can formulate a *mathematically* average student, but there is no *pedagogically meaningful* average student.- This sort of analysis is messy and hard to interpret.- We'll do it anyway just to see!
###Code
im = plt.imshow(df.head(100), cmap=plt.cm.binary)
# most common courses
df.sum().sort_values(ascending=False).head(10)
# how many courses does each student take?
df.sum(axis=1).value_counts().sort_index().plot(kind='bar');
# helper functions for clustering students in course-space
def cluster(n, df):
kmedoids = KMedoids(n_clusters=n, metric='manhattan', random_state=0).fit(df)
nearest_medoid = kmedoids.predict(df)
distances = kmedoids.transform(df)
nearest_distance = distances[[np.arange(distances.shape[0])],nearest_medoid].T
return (kmedoids, nearest_medoid, distances, nearest_distance)
def describe_clusters(kmedoids, nearest_medoid, distances, nearest_distance):
plt.figure(figsize=(10, 10))
for i in range(kmedoids.cluster_centers_.shape[0]):
print("cluster", i+1, "centroid:", list(df.columns[kmedoids.cluster_centers_[i,:] == 1]))
print("number of students in this cluster:", (nearest_medoid == i).sum())
cluster_member_distances = nearest_distance[nearest_medoid == i]
if cluster_member_distances.size > 0:
print("minimum distance to centroid:", cluster_member_distances.min())
print("maximum distance to centroid:", cluster_member_distances.max())
print("mean distance to centroid:", cluster_member_distances.mean())
print("median distance to centroid:", np.median(cluster_member_distances))
print()
plt.plot(sorted(cluster_member_distances))
describe_clusters(*cluster(4, df))
###Output
_____no_output_____
###Markdown
Lessons (re-)learned- Course enrolment datasets are big, and hard to construct a clear mental picture of.- We're working with (only) 50 students and 22 courses.- Within academic programs, there aren't usually clear, strong, non-prescribed patterns at the level of whole-enrolment historiesSo, let's try something different... HistoryWhat other domains work with similarly shaped data? Consumer purchases!- Each individual shopper collects a bunch of items- When a customer checks out, a sales invoice is generated listing all the items that were purchased togetherFrom all the sales invoices, we may wish to look for patterns in consumer behaviour:- Are there items that are **frequently** purchased together?- Are some items good **predictors** of other items being purchased?Why would someone care?- If consumers buy hot dogs whenever they buy hotdog buns, then grocery stores can attempt to manipulate custormers into buying hotdogs by putting hotdog buns on sale. **Profit!** Content warningThe following slides contain math. Set theory- a *set* is an unordered collection of distinct objects.- For this analysis, each student's course enrolment history is being treated as a set. Sets are often written like this: $\{a,b,c\}$- All the student enrolment histories are jointly represented as a collection of sets. - They are not a set-of-sets, because sets have distinct elements, and two students are able to have exactly the same course enrolment history. - So, this collection of sets is called a *multiset* or a *bag*, to denote that it may contain duplicate elements. Frequent itemsetsGiven a multiset (such as a stack of grocery store receipts, or a table of student-course enrolments), how do we find the frequently occurring subsets (or itemsets)?Example: Given $[\{a\},\{a,b\},\{a,c\},\{a,b,c,d\}]$We can see that:- $\{a\}$ occurs in all 4 sets- $\{a,b\}$ and $\{a,c\}$ each occur in 2 sets \begin{align}&\mathrm{Apriori}(T,\epsilon)\\&\quad L_1 \gets \{\textrm{large 1 item sets}\}\\&\quad k \gets 2\\&\quad \textbf{while}\ L_{k-1} \neq \emptyset\\&\quad \quad C_k \gets \{c = a \cup \{b\} \mid a \in L_{k-1} \land b \notin a, \{s \subseteq c \mid |s| = k - 1 \} \subseteq L_{k-1} \}\\&\quad \quad \textbf{for}\ \textrm{transactions}\ t \in T\\&\quad \quad \quad D_t \gets \{c \in C_k \mid c \subseteq t \}\\&\quad \quad \quad \textbf{for}\ \textrm{candidates}\ c \in D_t\\&\quad \quad \quad \quad count[c] \gets count[c] + 1\\&\quad \quad L_k \gets \{c \in C_k \mid count[c] \geq \epsilon \}\\&\quad \quad k \gets k + 1\\&\quad \textbf{return} \bigcup_k L_k\end{align}Rakesh Agrawal and Ramakrishnan Srikant. 1994. Fast Algorithms for Mining Association Rules in Large Databases. In Proceedings of the 20th International Conference on Very Large Data Bases (VLDB ’94). Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 487–499. ( > 26k citations!) Let $X$ be an itemset and $T$ be the set of transactions/records in the database\begin{align}&\textrm{Support}(X) = \frac{\mid \{t \in T \mid X \subseteq t \}\mid }{\mid T \mid}\end{align}*Support* indicates how frequently a given itemset appears in the transactions of the database.- A support of 1 indicates the itemset appears in every transaction.- A support of 0.5 indicates the itemset appears in half of the transactions.
###Code
course_frequency = apriori(df, min_support=np.nextafter(0,1), max_len=1, use_colnames=True)
course_frequency['course'] = course_frequency['itemsets'].apply(lambda x: set(x).pop())
course_frequency['course_number'] = course_frequency['course'].apply(lambda x: x[4:])
course_frequency[['support', 'course']].sort_values(by='support',ascending=False)
cf = course_frequency[['support', 'course']].set_index('course').sort_values(by='support',ascending=False)
def f(limit):
cf.head(limit).plot(kind='bar')
i = interact(f, limit=(1,20))
frequent_itemsets = apriori(df, min_support=np.nextafter(0,1), max_len=2, use_colnames=True)
frequent_itemsets.sort_values(by='support',ascending=False)
###Output
_____no_output_____
###Markdown
Association rules\begin{equation*}X \Rightarrow Y, \textrm{where}\ X,Y \subseteq I\end{equation*}X is called the *antecedent* and Y is the *consequent*.$I$ is the set of all items (e.g. courses).example: $\textrm{Math110} \Rightarrow \textrm{Math210}$ would be read as "if Math110, then Math210".Now we have a notation for a relationship between two itemsets (in this case, the two itemsets each contain a single item), but we need to describe the *qualities* of that relationship...Rakesh Agrawal, Tomasz Imieliński, and Arun Swami. 1993. Mining association rules between sets of items in large databases. In Proceedings of the 1993 ACM SIGMOD international conference on Management of data (SIGMOD ’93). Association for Computing Machinery, New York, NY, USA, 207–216. DOI:https://doi.org/10.1145/170035.170072 (> 22k citations!) Metrics for quantifying association rules: Support- *Antecedent Support*: indicates how frequently the antecedent item set appears in the database.$$\textrm{Antecedent Support}(X \Rightarrow Y) = \frac{\mid \{t \in T \mid X \subseteq t \}\mid }{\mid T \mid}$$- *Consequent Support*: indicates how frequently the consequent item set appears in the database.$$\textrm{Consequent Support}(X \Rightarrow Y) = \frac{\mid \{t \in T \mid Y \subseteq t \}\mid }{\mid T \mid}$$- *(Rule) Support*: indicates how frequently the all them items of the antecedent and consequent jointly appear in the database.$$\textrm{Support}(X \Rightarrow Y) = \frac{\mid \{t \in T \mid X \cup Y \subseteq t \}\mid }{\mid T \mid}$$ Metrics for quantifying association rules: Confidence$$\textrm{Confidence}(X \Rightarrow Y) = \frac{ \textrm{Support}(X \Rightarrow Y) }{ \textrm{Support}(X) }$$*Confidence*: the ratio of rule support to antecedent support. - Or, given that the antecedent has been observed, how likely are we to also observe the consequent?If a rule has high confidence, and the antecedent is observed, then we can be fairly confident that the consequent will be observed as well. Metrics for quantifying association rules: Lift$$\textrm{Lift}(X \Rightarrow Y) = \frac{ \textrm{Confidence}(X \Rightarrow Y) }{ \textrm{Support}(Y) }$$*Lift*: ratio of confidence to consequent support. Lift is a measure of how much more often the antecedent and the consequent occur together than would be expected if they were statistically independent. When the antecedent of a rule with high lift is observed, we can be more confident that the consequent will also be observed.Confidence and lift are both descriptors of the "power" of a rule.
###Code
rules = association_rules(frequent_itemsets, metric="support", min_threshold=np.nextafter(0,1))
rules['antecedent_course'] = rules['antecedents'].apply(lambda x: set(x).pop())
rules['consequent_course'] = rules['consequents'].apply(lambda x: set(x).pop())
rules['antecedent_course_number'] = rules['antecedent_course'].apply(lambda x: int(x[4:]))
rules['consequent_course_number'] = rules['consequent_course'].apply(lambda x: int(x[4:]))
rules['antecedent_year_level'] = rules['antecedent_course_number'].apply(lambda x: x//100 )
rules['consequent_year_level'] = rules['consequent_course_number'].apply(lambda x: x//100)
rules
pairwise_rules = rules[(rules['antecedent_year_level']==3) & (rules['consequent_year_level']==3)]
pairwise_support = pairwise_rules.pivot(index='antecedent_course',columns='consequent_course',values='support').fillna(0)
ax = sns.heatmap(pairwise_support, xticklabels=True, yticklabels=True, cmap='BuPu')
pairwise_rules = rules[(rules['antecedent_year_level']==3) & (rules['consequent_year_level']==3)]
pairwise_confidence = pairwise_rules.pivot(index='antecedent_course',columns='consequent_course',values='confidence').fillna(0)
ax = sns.heatmap(pairwise_confidence, xticklabels=True, yticklabels=True, cmap='BuPu')
pairwise_rules = rules[(rules['antecedent_year_level']==3) & (rules['consequent_year_level']==3)]
pairwise_lift = pairwise_rules.pivot(index='antecedent_course',columns='consequent_course',values='lift').fillna(0.1)
#pairwise_lift = pairwise_lift.applymap(lambda x: x if x >=1 else 0.01)
ax = sns.heatmap(pairwise_lift, xticklabels=True, yticklabels=True, cmap='BuPu', norm=LogNorm())
# exploring 'significant' rules
sig_rules = rules[
(rules['support'] > 0.01)
#& (rules['antecedent support'] > 0.01)
#& (rules['consequent support'] > 0.01)
& (rules['antecedent_year_level'] <= rules['consequent_year_level'])
& (rules['confidence'] > 0.5)
& (rules['lift'] > 1.5)
].sort_values(by='lift',ascending=False)
sig_rules
def plot_rules(sig_rules):
antecedents = sig_rules[['antecedent_course','antecedent_course_number']]
antecedents.columns = ['course','course_number']
consequents = sig_rules[['consequent_course','consequent_course_number']]
consequents.columns = ['course','course_number']
figure_courses = pd.concat([antecedents, consequents]).drop_duplicates()
dot = Digraph()
for course in figure_courses.itertuples():
dot.node(str(course.course_number),course.course)
for association in sig_rules.itertuples():
dot.edge(str(association.antecedent_course_number), str(association.consequent_course_number), label=f"{association.lift:.2f}")
dot.graph_attr['overlap'] = 'False'
dot.engine = 'neato'
return dot
dot = plot_rules(sig_rules)
dot
###Output
_____no_output_____
###Markdown
Fake News websites data analysisWill use data downloaded from CrowdTangle's "historical data" feature rather than making multiple requests to the API. The latter option would end up taking longer due to API limitations.The data was downloaded on several .csv files, saved on `./data/in`.Time period for the analysis:* Start - 2019-01-01* End - 2021-03-27
###Code
import requests
import json
import pandas as pd
import numpy as np
from datetime import datetime
import timeit
import time
import glob
import matplotlib.pyplot as plt
import seaborn as sb
%matplotlib inline
###Output
_____no_output_____
###Markdown
Get list of pages on each categoryGiven that the .csv files generated by CrowdTangle do not specify from which list they come from, it will be necessary to make API calls go get the IDs of pages related to each list.The lists are:* 'least-biased' : '1525935'* 'conspiracy-pseudoscience' : '1525936'* 'pro-science' : '1525937'
###Code
lists = {
'least-biased' : '1525935',
'conspiracy-pseudoscience' : '1525936',
'pro-science' : '1525937'
}
token = open('./ctoken').read()
def generate_account_list_url(listid, token=token):
'''
Generates the API URL for the get request with the lists of accounts.
ARGS:
ListId = The id of the list for which to retrieve accounts. This is provided as a path variable in the URL
Token = API Token
Returns:
STR - CrowdTangle API URL, for getting IDs of accounts in a list
'''
return 'https://api.crowdtangle.com/lists/{}/accounts?token={}&count=100'.format(listid, token)
platformid_to_list = dict()
for listname, listid in lists.items():
print(listname)
page = 0
nextpage = True
url = generate_account_list_url(listid)
while nextpage:
page += 1
print('DOWNLOADING PAGE', page)
re = requests.get(url)
for account in re.json()['result']['accounts']:
platformid_to_list[account['platformId']] = listname
if 'nextPage' in re.json()['result']['pagination']:
url = re.json()['result']['pagination']['nextPage']
time.sleep(10)
else:
nextpage = False
###Output
least-biased
DOWNLOADING PAGE 1
DOWNLOADING PAGE 2
DOWNLOADING PAGE 3
DOWNLOADING PAGE 4
conspiracy-pseudoscience
DOWNLOADING PAGE 1
DOWNLOADING PAGE 2
pro-science
DOWNLOADING PAGE 1
DOWNLOADING PAGE 2
###Markdown
Creates and cleans DFData was downloaded on several .csv files. Merge them into one single DF.*Note: yes, this is will probably use up a lot of RAM. I have recently bought 32gb, though, so I am going to use it ;)*
###Code
path = './data/in'
files = glob.glob(path + '/*.csv')
df_list = []
for filename in files:
df = pd.read_csv(filename, index_col=None, low_memory=False, dtype={'Facebook Id' : str})
df_list.append(df)
df = pd.concat(df_list, axis=0, ignore_index=True)
###Output
_____no_output_____
###Markdown
CleaningRemove unnecessary columns and pages with under 100 average followers, the same threshold used by NYU researchers for [this article](https://medium.com/cybersecurity-for-democracy/far-right-news-sources-on-facebook-more-engaging-e04a01efae90).
###Code
df.columns
columns_to_drop = ['User Name', 'Page Category', 'Page Admin Top Country', 'Page Description', 'Sponsor Id',
'Page Created','Likes at Posting', 'Post Created Date', 'Post Created Time', 'Video Length',
'Total Interactions', 'Video Share Status', 'Is Video Owner?', 'Post Views', 'Total Views For All Crossposts',
'Overperforming Score (weighted — Likes 1x Shares 1x Comments 1x Love 1x Wow 1x Haha 1x Sad 1x Angry 1x Care 1x )']
df.drop(columns_to_drop, axis = 1, inplace=True)
# TURN ALL REACTIONS INTO ONE COLUMN
df['Reactions'] = df[['Likes', 'Love', 'Wow', 'Haha', 'Sad', 'Angry','Care']].sum(axis=1)
columns_to_drop = ['Likes', 'Love', 'Wow', 'Haha', 'Sad', 'Angry','Care']
df.drop(columns_to_drop, axis = 1, inplace=True)
'''
This part will recreate the Total Interactions column.
My computer is in PT-BR and CrowdTangle uses commas in their decimal separator.
The workaround for this is so dramatic that it is easier to just recreate the column.
'''
df['Total Interactions'] = df[['Reactions', 'Comments', 'Shares']].sum(axis=1)
###Output
_____no_output_____
###Markdown
Lists pages below the 100 avg. followers threshold
###Code
grouped_by_followers = df.groupby('Facebook Id').agg({'Followers at Posting' : 'mean'})
grouped_by_followers['Followers at Posting'].min()
###Output
_____no_output_____
###Markdown
None of the pages fall under the threshold, so no action is necessary. Renames columnsTo avoid mistakes later, all column names will be turned to lower case and will have no spaces.
###Code
column_names = list()
for c in df.columns:
column_names.append(c.lower().replace(' ', '_'))
df.columns = column_names
###Output
_____no_output_____
###Markdown
Add category column
###Code
def check_category(facebookid,
platformid_to_list=platformid_to_list):
'''
Checks the Facebook ID and finds it in the dictionary with
category names. Returns category.
ARGS:
facebookid - STR - id to be found
platformid_to_list - List of IDs and their categories
RETURN:
'least-biased'|'conspiracy-pseudoscience'|'pro-science'
'''
return platformid_to_list[facebookid]
df['category'] = df['facebook_id'].apply(lambda x: check_category(x))
# CHECKS FOR ERRORS
df[df['category'].isna()]
###Output
_____no_output_____
###Markdown
Removes duplicates
###Code
len(df)
df.drop_duplicates(inplace=True)
len(df)
###Output
_____no_output_____
###Markdown
Converts post creation date to datetime
###Code
df['post_created'] = pd.to_datetime(df['post_created'])
###Output
C:\Users\rapha\OneDrive\python\science-fakenews-facebook\env\lib\site-packages\dateutil\parser\_parser.py:1218: UnknownTimezoneWarning: tzname EST identified but not understood. Pass `tzinfos` argument in order to correctly return a timezone-aware datetime. In a future version, this will raise an exception.
category=UnknownTimezoneWarning)
C:\Users\rapha\OneDrive\python\science-fakenews-facebook\env\lib\site-packages\dateutil\parser\_parser.py:1218: UnknownTimezoneWarning: tzname EDT identified but not understood. Pass `tzinfos` argument in order to correctly return a timezone-aware datetime. In a future version, this will raise an exception.
category=UnknownTimezoneWarning)
###Markdown
Removes pages not available after 2021The analysis will focus on the trends for the past few months, so it makes no sense to include in the analysis pages that have no data after 2021.
###Code
# CHECKS IF THERE ARE PAGES THAT LACK 2021 DATA
posts_per_year = df.groupby('facebook_id').resample('Y', on='post_created').count()[['facebook_id']]
posts_per_year.columns = ['count']
posts_per_year.reset_index()['post_created'].value_counts()
january_first_21 = datetime.strptime('2021-01-01','%Y-%m-%d')
pages_before_2021 = df[df['post_created'] < january_first_21]['facebook_id'].unique()
pages_in_2021 = df[df['post_created'] > january_first_21]['facebook_id'].unique()
# GENERATES A LIST OF PAGES THAT ARE NOT IN 2021
pages_to_remove = list(np.setdiff1d(pages_before_2021, pages_in_2021))
df = df[~df['facebook_id'].isin(pages_to_remove)].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Analysis
###Code
df.head()
df.columns
df.dtypes
df.describe()
###Output
_____no_output_____
###Markdown
Data distributionPlots histograms for the pages average of followers and interactions
###Code
df_page_average = df.groupby(['category','facebook_id'], as_index=False).mean()
df_page_average.head()
bins = np.arange(1000000, df_page_average['followers_at_posting'].max()+1000000, 1000000)
g = sb.FacetGrid(data = df_page_average, col = 'category', col_wrap = 3)
g.map(plt.hist, 'followers_at_posting', bins=bins)
g.set_titles('{col_name}');
g.fig.set_size_inches(20,5)
g = sb.FacetGrid(data = df_page_average, col = 'category', col_wrap = 3)
g.map(plt.hist, 'total_interactions', bins=30)
g.set_titles('{col_name}');
g.fig.set_size_inches(20,5)
df.sort_values(['total_interactions'], ascending=False).head()
df_page_average[df_page_average['followers_at_posting']>15000000]
df_page_average[df_page_average['total_interactions']>50000]
###Output
_____no_output_____
###Markdown
There are some outliers in the data. Some huge pages, such as the WHO, will be removed.The threshold will be set to 1.5 million average followers and posts over 50 thousand interactions.
###Code
pages_to_remove = list(df_page_average[df_page_average['followers_at_posting']>15000000]['facebook_id'])
df = df[~df['facebook_id'].isin(pages_to_remove)].reset_index(drop=True)
pages_to_remove = list(df_page_average[df_page_average['total_interactions']>50000]['facebook_id'])
df = df[~df['facebook_id'].isin(pages_to_remove)].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Interaction per thousand followers
###Code
df['intactions_to_follow_ratio'] = (df['total_interactions'] / df['followers_at_posting'])*1000
###Output
_____no_output_____
###Markdown
Time comparison
###Code
df_general_agg = df.groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_general_agg
df_2021_agg = df[df['post_created'] > '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_2021_agg
df_2020_agg = df[df['post_created'] < '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_2020_agg
# COMPARES
for k, v in lists.items():
ratio_2020 = df_2020_agg.loc[k]['intactions_to_follow_ratio']
ratio_2021 = df_2021_agg.loc[k]['intactions_to_follow_ratio']
print(k, 'ratio:', (ratio_2021-ratio_2020)/ratio_2020)
follows_2020 = df_2020_agg.loc[k]['followers_at_posting']
follows_2021 = df_2021_agg.loc[k]['followers_at_posting']
print(k, 'follows:', (follows_2021-follows_2020)/follows_2020)
print('')
df.groupby('category').resample('M', on='post_created').mean()[['intactions_to_follow_ratio']].unstack(level=0).plot(kind='line')
###Output
_____no_output_____
###Markdown
Exports to Excel to make this same graph a little prettier
###Code
df_to_export =df.groupby('category').resample('M', on='post_created').mean()[['intactions_to_follow_ratio',
'followers_at_posting', 'reactions',
'shares', 'comments']].unstack(level=0)
df_to_export.to_excel('./data/out/time_series.xlsx')
###Output
_____no_output_____
###Markdown
Vaccines and Covid analysisThis part of the analysis will focus only on posts about vaccines and covid.
###Code
df.columns
df['all_texts'] = df[['message','image_text',
'link_text',
'description']].apply(
lambda x: ','.join(x.dropna().astype(str)), axis =1)
df.head()
df_vaccine = df[df['all_texts'].str.contains('vaccine|vaccination')]
df_vaccine.groupby('category').resample('M', on='post_created').mean()[['intactions_to_follow_ratio']].unstack(level=0).plot(kind='line')
df_vaccine.groupby('category').resample('M', on='post_created').count()[['url']].unstack(level=0).plot(kind='line')
df_vaccine_agg = df_vaccine.groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_vaccine_agg
df_vaccine_agg_21 = df_vaccine[df_vaccine['post_created'] > '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_vaccine_agg_21
df_vaccine_agg_20 = df_vaccine[df_vaccine['post_created'] < '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_vaccine_agg_20
# COMPARES
for k, v in lists.items():
ratio_2020 = df_vaccine_agg_20.loc[k]['intactions_to_follow_ratio']
ratio_2021 = df_vaccine_agg_21.loc[k]['intactions_to_follow_ratio']
print(k, 'ratio:', (ratio_2021-ratio_2020)/ratio_2020)
shares_2020 = df_vaccine_agg_20.loc[k]['shares']
shares_2021 = df_vaccine_agg_21.loc[k]['shares']
print(k, 'shares:', (shares_2021-shares_2020)/shares_2020)
print('')
###Output
least-biased ratio: -0.1289134174343198
least-biased shares: -0.516550744676836
conspiracy-pseudoscience ratio: -0.3727788691403627
conspiracy-pseudoscience shares: -0.26458470170750653
pro-science ratio: -0.30568180208326373
pro-science shares: -0.19035270269373356
###Markdown
Mentioning Covid
###Code
df_covid = df[df['post_created'] > '2020-01-01']
df_covid = df_covid[df_covid['all_texts'].str.contains('covid|corona|pandemic|lockdown|sars-cov')]
df_covid.groupby('category').resample('M', on='post_created').mean()[['intactions_to_follow_ratio']].unstack(level=0).plot(kind='line')
df_covid.groupby('category').resample('M', on='post_created').count()[['url']].unstack(level=0).plot(kind='line')
df_covid_agg = df_covid.groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_covid_agg
df_covid_agg_21 = df_covid[df_covid['post_created'] > '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_covid_agg_21
df_covid_agg_20 = df_covid[df_covid['post_created'] < '2021'].groupby('category').agg({'facebook_id' : 'nunique',
'followers_at_posting' : 'mean',
'comments' : 'mean',
'shares' : 'mean',
'reactions' : 'mean',
'intactions_to_follow_ratio' : 'mean'})
df_covid_agg_20
# COMPARES
for k, v in lists.items():
ratio_2020 = df_covid_agg_20.loc[k]['intactions_to_follow_ratio']
ratio_2021 = df_covid_agg_21.loc[k]['intactions_to_follow_ratio']
print(k, 'ratio:', (ratio_2021-ratio_2020)/ratio_2020)
shares_2020 = df_covid_agg_20.loc[k]['shares']
shares_2021 = df_covid_agg_21.loc[k]['shares']
print(k, 'shares:', (shares_2021-shares_2020)/shares_2020)
print('')
###Output
least-biased ratio: -0.17223082853926036
least-biased shares: -0.537887201359583
conspiracy-pseudoscience ratio: -0.2819580565575689
conspiracy-pseudoscience shares: -0.48770243897102883
pro-science ratio: -0.21483026034165886
pro-science shares: -0.12395791149121203
###Markdown
Read the data from GP, XGB, and FFNN
###Code
df = pd.read_csv("comparison.csv")
###Output
_____no_output_____
###Markdown
Read the ChemProp data
###Code
cp_df = pd.read_csv("cp_comparison.csv")
###Output
_____no_output_____
###Markdown
Merge the two datasets
###Code
df = df.merge(cp_df,on=["dataset","split"])
###Output
_____no_output_____
###Markdown
Label the datasets with random and scaffold splits
###Code
df['random'] = [x.startswith("RND") for x in df.split]
###Output
_____no_output_____
###Markdown
Split the datasets into two dataframes, one for scaffold splits and one for random splits
###Code
df_rnd = df.query("random")
df_scaf = df.query("random == False")
df_rnd
df_scaf
###Output
_____no_output_____
###Markdown
A simple function to count the number of lines in a file, we'll use this to order the datasets from smallest to largest
###Code
def count_lines(file_name):
return sum(1 for line in open(file_name))
###Output
_____no_output_____
###Markdown
Put the datasets in order from smallest to largest
###Code
line_counts = [[x.split("/")[-1].replace(".smi",""),count_lines(x)] for x in glob("data/*.smi")]
df_line_count = pd.DataFrame(line_counts,columns=['dataset','count'])
df_line_count.sort_values("count",inplace=True)
sort_order = df_line_count.dataset.values
###Output
_____no_output_____
###Markdown
Create the y-axis labels for the boxplots with the number of molecules in the dataset and the dataset name
###Code
target_labels = [f"{a} {b}" for a,b in df_line_count[["dataset","count"]].values]
###Output
_____no_output_____
###Markdown
A function to draw boxplots showing performance over multiple folds of cross validation
###Code
def draw_boxplots(df,title):
r2_cols = ["dataset"] + [x for x in df.columns if x.find("r2") > 0]
rms_cols = ["dataset"] + [x for x in df.columns if x.find("rms") > 0]
r2_df = df[r2_cols].melt(id_vars="dataset")
rms_df = df[rms_cols].melt(id_vars="dataset")
r2_df.columns = ['Dataset',"algorithm","R2"]
rms_df.columns = ['Dataset',"algorithm","RMSE"]
r2_df['Method'] = [x.split("_")[0].upper() for x in r2_df.algorithm]
rms_df['Method'] = [x.split("_")[0].upper() for x in rms_df.algorithm]
sns.set(rc={'figure.figsize': (15, 18)})
sns.set_context('talk')
ax = sns.boxplot(x="R2",y="Dataset",data=r2_df,orient="h",hue="Method",order=sort_order)
for i in range(0,len(df.dataset.unique())):
ax.axhline(0.5+i,linestyle="--",color="grey")
ax.set(xlabel="R${^2}$")
ax.set(yticklabels=target_labels)
ax.set(title=title)
plt.show()
plt.tight_layout()
ax.figure.savefig(title.replace(" ","_")+"_r2.png",bbox_inches='tight')
ax = sns.boxplot(x="RMSE",y="Dataset",data=rms_df,orient="h",hue="Method",order=sort_order)
for i in range(0,len(df.dataset.unique())):
ax.axhline(0.5+i,linestyle="--",color="grey")
ax.set(title=title)
ax.set(yticklabels=target_labels)
plt.show()
ax.figure.savefig(title.replace(" ","_")+"_rmse.png",bbox_inches='tight');
draw_boxplots(df_rnd,"Random Split")
draw_boxplots(df_scaf,"Scaffold Split")
###Output
_____no_output_____
###Markdown
ANALYSIS NOTEBOOK - DONNELLY 2019 PLOS ONE Patrick M. Donnelly University of Washington September 25, 2020
###Code
# import necessary databases and libraries
import pandas as pd
import numpy as np
from scipy import stats
# pull data from data folder in repository
data = pd.read_csv('data/data.csv')
###Output
_____no_output_____
###Markdown
Demographics TableT-tests and Wilcoxon signed rank tests for Demographics Table 1 Age
###Code
stats.wilcoxon(corr_data.visit_age, corr_data_cntrl.visit_age)
###Output
_____no_output_____
###Markdown
Gender
###Code
stats.wilcoxon(corr_data.gender, corr_data_cntrl.gender)
###Output
_____no_output_____
###Markdown
Norm-referenced Measures
###Code
# WJ Basic Reading Skills composite
stats.ttest_ind(corr_data.wj_brs, corr_data_cntrl.wj_brs)
# TOWRE-2 Index
stats.ttest_ind(corr_data.twre_index, corr_data_cntrl.twre_index)
# WASI-II FS-2 Composite
stats.ttest_ind(corr_data.wasi_fs2, corr_data_cntrl.wasi_fs2)
# CTOPP-2 Phonological Awareness composite
stats.ttest_ind(corr_data.ctopp_pa, corr_data_cntrl.ctopp_pa)
# CTOPP-2 Rapid Naming composite
stats.ttest_ind(corr_data.ctopp_rapid, corr_data_cntrl.ctopp_rapid)
###Output
_____no_output_____
###Markdown
Correlation Analysis
###Code
# look at difference scores and practice metrics
data_sifted = data[['record_id','int_session', 'gender', 'pigs_casecontrol', 'word_acc_diff',
'pseudo_acc_diff', 'first_acc_diff', 'second_rate_diff', 'pigs_practice_numstories',
'visit_age', 'wj_brs', 'twre_index', 'ctopp_rapid', 'wasi_fs2', 'ctopp_pa', 'ctopp_pm']]
# look just at intervention participants
corr_data = data_sifted[data_sifted['pigs_casecontrol'] == 1]
corr_data_cntrl = data_sifted[data_sifted['pigs_casecontrol'] == 0]
# Look just at session 2 for data clarity
corr_data = corr_data[corr_data['int_session'] == 2]
corr_data_cntrl = corr_data_cntrl[corr_data_cntrl['int_session'] == 2]
###Output
_____no_output_____
###Markdown
Growth and Practice - Intervention Group
###Code
stats.pearsonr(corr_data['word_acc_diff'], corr_data['pigs_practice_numstories'])
stats.pearsonr(corr_data['pseudo_acc_diff'], corr_data['pigs_practice_numstories'])
###Output
_____no_output_____
###Markdown
Growth and Practice - Control Group
###Code
stats.pearsonr(corr_data_cntrl['word_acc_diff'], corr_data_cntrl['pigs_practice_numstories'])
stats.pearsonr(corr_data_cntrl['pseudo_acc_diff'], corr_data_cntrl['pigs_practice_numstories'])
###Output
_____no_output_____
###Markdown
Real Word Decoding & Predictors
###Code
stats.pearsonr(corr_data['word_acc_diff'], corr_data['visit_age'])
stats.pearsonr(corr_data['word_acc_diff'], corr_data['wasi_fs2'])
stats.pearsonr(corr_data['word_acc_diff'], corr_data['ctopp_pa'])
stats.pearsonr(corr_data['word_acc_diff'], corr_data['ctopp_pm'])
###Output
_____no_output_____
###Markdown
Pseudo Word Decoding & Predictors
###Code
stats.pearsonr(corr_data['pseudo_acc_diff'], corr_data['visit_age'])
stats.pearsonr(corr_data['pseudo_acc_diff'], corr_data['wasi_fs2'])
stats.pearsonr(corr_data['pseudo_acc_diff'], corr_data['ctopp_pa'])
stats.pearsonr(corr_data['pseudo_acc_diff'], corr_data['ctopp_pm'])
###Output
_____no_output_____
###Markdown
Passage Reading Accuracy & Predictors
###Code
# resift data so that nan-removal is only affected by nans in accuracy
data_accuracy = data[['record_id','int_session', 'pigs_casecontrol', 'word_acc_diff',
'pseudo_acc_diff', 'first_acc_diff', 'pigs_practice_numstories',
'visit_age', 'wj_brs', 'twre_index', 'ctopp_rapid', 'wasi_fs2', 'ctopp_pa', 'ctopp_pm']]
# look just at intervention participants
corr_accuracy = data_accuracy[data_accuracy['pigs_casecontrol'] == 1]
corr_accuracy_cntrl = data_accuracy[data_accuracy['pigs_casecontrol'] == 0]
corr_accuracy = corr_accuracy[corr_accuracy['int_session'] == 2].dropna()
corr_accuracy_cntrl = corr_accuracy_cntrl[corr_accuracy_cntrl['int_session'] == 2].dropna()
stats.pearsonr(corr_accuracy['first_acc_diff'], corr_accuracy['visit_age'])
stats.pearsonr(corr_accuracy['first_acc_diff'], corr_accuracy['wasi_fs2'])
stats.pearsonr(corr_accuracy['first_acc_diff'], corr_accuracy['ctopp_pa'])
stats.pearsonr(corr_accuracy['first_acc_diff'], corr_accuracy['ctopp_pm'])
###Output
_____no_output_____
###Markdown
Passage Reading Rate & Predictors
###Code
# resift data so that nan-removal is only affected by nans in rate
data_rate = data[['record_id','int_session', 'pigs_casecontrol', 'word_acc_diff',
'pseudo_acc_diff', 'second_rate_diff', 'pigs_practice_numstories',
'visit_age', 'wj_brs', 'twre_index', 'ctopp_rapid', 'wasi_fs2', 'ctopp_pa', 'ctopp_pm']]
# look just at intervention participants
corr_rate = data_rate[data_rate['pigs_casecontrol'] == 1]
corr_rate_cntrl = data_rate[data_rate['pigs_casecontrol'] == 0]
corr_rate = corr_rate[corr_rate['int_session'] == 2].dropna()
corr_rate_cntrl = corr_rate_cntrl[corr_rate_cntrl['int_session'] == 2].dropna()
stats.pearsonr(corr_rate['second_rate_diff'], corr_rate['visit_age'])
stats.pearsonr(corr_rate['second_rate_diff'], corr_rate['wasi_fs2'])
stats.pearsonr(corr_rate['second_rate_diff'], corr_rate['ctopp_pa'])
stats.pearsonr(corr_rate['second_rate_diff'], corr_rate['ctopp_pm'])
###Output
_____no_output_____
###Markdown
Effect Size Analyses Real Word data structure most relevant is corr_data for intervention group and corr_data_cntrl for control group
###Code
x = corr_data.word_acc_diff
y = corr_data_cntrl.word_acc_diff
d = (np.mean(x) - np.mean(y)) / np.sqrt((np.std(x, ddof=1) ** 2 + np.std(y, ddof=1) ** 2) / 2.0)
print("Cohen's d: ",d)
###Output
Cohen's d: 0.5686864296949145
###Markdown
Pseudo Word
###Code
x = corr_data.pseudo_acc_diff
y = corr_data_cntrl.pseudo_acc_diff
d = (np.mean(x) - np.mean(y)) / np.sqrt((np.std(x, ddof=1) ** 2 + np.std(y, ddof=1) ** 2) / 2.0)
print("Cohen's d: ",d)
###Output
Cohen's d: 0.7429769651763583
###Markdown
Passage Accuracy
###Code
x = corr_accuracy.first_acc_diff
y = corr_accuracy_cntrl.first_acc_diff
d = (np.mean(x) - np.mean(y)) / np.sqrt((np.std(x, ddof=1) ** 2 + np.std(y, ddof=1) ** 2) / 2.0)
print("Cohen's d: ",d)
###Output
Cohen's d: 0.3635296154143578
###Markdown
Passage Rate
###Code
x = corr_rate.second_rate_diff
y = corr_rate_cntrl.second_rate_diff
d = (np.mean(x) - np.mean(y)) / np.sqrt((np.std(x, ddof=1) ** 2 + np.std(y, ddof=1) ** 2) / 2.0)
print("Cohen's d: ",d)
###Output
Cohen's d: 0.1270477151527451
###Markdown
What makes a movie a commercial success? A data science approach to dissect the economics of movie-making business Section 1: Business UnderstandingIn this notebook, I will use the IMDb publlic dataset and visualization tools in order to answer improtant questions about what factors lead to a commercial success of a movie. The key questions tht I will try to answer are as follows:1. Do certain movie genres make more money than others?2. Does the amount of funding for a movie have an impact on commercial success?3. What impact does duration have on the success of a movie?4. Do viewer ratings have an impact on how well a movie will do in the Box Office?5. Does the number of votes impact the success of a movie?Answering these questions could dramatically improve the decision processes underlying moie production. Section 2: Data UnderstandingThe IMDB movie dataset covers over 80000 movies and accounts for all the basic attributes one would expect such as duration, genre, country, language, and gross income. The dataset can be accessed here: https://www.imdb.com/interfaces/The following codeblock enables me to import the data and convert into a Pandas Dataframe
###Code
#import necessary libraries
import pandas as pd
import os
import sys
import seaborn as sns
import matplotlib.pyplot as plt
from forex_python.converter import CurrencyRates
cwd = os.getcwd()
import re
import math
import numpy as np
pd.reset_option('^display.', silent=True)
np.set_printoptions(suppress=True)
imdb_df = pd.read_csv(cwd+'/datasets/imdb.csv') #import imdb csv
###Output
_____no_output_____
###Markdown
Section 3: Data PreparationIn this section, we will try to understand the dataset on a colummn by column basis and figure out which columns are valauble and how we could still make the most out of seemingly not valuable ones and also address issues related to missing data.
###Code
imdb_df.columns #Printing columns to understand the dataset
imdb_df.isna().sum()/imdb_df.shape[0] * 100 #Let's get a macroview of which columns are useful and which ones aren't
#we will drop the Nan rows from output and also from metscore since 84% of its values are missing
imdb_df = imdb_df.drop(columns=['metascore'])
#Then, we will remove the rows from imdb_df that do not have worldwide & usa revenue numbers as this is the output we are looking to compare with
imdb_df = imdb_df.dropna(subset=['worlwide_gross_income'])
#Genre column can serve as a great categorical variable
imdb_df['genre'] = imdb_df['genre'].str.replace(' ', '') # But first, we need to split the genres separated by commas
genre_encoded = imdb_df['genre'].str.get_dummies(sep=',') #We encode the genres by using get_dummies
genre_encoded
#Now, we will make use of the encoded genre column
imdb_df = pd.concat([imdb_df,genre_encoded], axis=1) #join the encoded data with original dataframe
imdb_df.drop(columns=['genre']) #drop the original genre column
#Next, we will attempt at converting the income related columns to one unified currency - USD
c = CurrencyRates() #instantiating the forex conversion module
def get_symbol(price):
""" function for reading in the price and returning the currency
inputs:
- price: amount in local currency
outputs:
- currency: currency of the local price
"""
import re
pattern = r'(\D*)\d*\.?\d*(\D*)'
g = re.match(pattern,price).groups()
return g[0]
def return_USD(budget):
""" function for reading in the currency and converting to USD
inputs:
- price: amount in local currency
outputs:
- price_in_USD: amount in USD
"""
if budget!='nan':
if '$' not in budget:
try:
return c.get_rate(get_symbol(budget).strip(), 'USD') * int(re.findall('\d+', budget)[0])
except:
return float('NaN')
else:
return int(re.findall('\d+', budget)[0])
else:
return float('NaN')
#lambda function for applying the USD conversion to the budget column
imdb_df['budget'] = imdb_df['budget'].apply(lambda x: return_USD(str(x)))
imdb_df
#similarly, we'll convert the worldwide_gross_income and usa_gross_icome to USD
imdb_df['worlwide_gross_income'] = imdb_df['worlwide_gross_income'].apply(lambda x: return_USD(str(x)))
imdb_df['usa_gross_income'] = imdb_df['usa_gross_income'].apply(lambda x: return_USD(str(x)))
imdb_df.to_csv(cwd+'/datasets/imdb_clean.csv') #we will save the cleaned up dataframe to a csv in order to create a milestone
###Output
_____no_output_____
###Markdown
We will now address the next few steps sequentially for each question Section 3.1 Prepare data for question 1First question - Do certain genres make more $ than others?
###Code
#We will extract the grenres columns and save them into a new dataframe
imdb_genres_df = imdb_df[['worlwide_gross_income','Animation', 'Biography', 'Comedy', 'Crime', 'Documentary', 'Drama',
'Family', 'Fantasy', 'Film-Noir', 'History', 'Horror', 'Music',
'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Sport', 'Thriller', 'War',
'Western' ]]
###Output
_____no_output_____
###Markdown
Section 4: Data ModelingWe are not applying any machine learning techniques so we will skip this section Section 5: Evaluating results Section 5.1 Evlauting results for Question 1Question: First question - Do certain genres make more $ than others?We will make use of seaborn to generate a heatmap of correlations between various genres and the worldwide gross income
###Code
#
fig, ax = plt.subplots(figsize=(15,15)) #instantiate the plot
income_corr = imdb_genres_df.corr() #calculate correlation
mask = np.zeros_like(income_corr[['worlwide_gross_income']], dtype=np.bool) #masking all the 1 values since they don't add any value
mask[np.triu_indices_from(mask)] = True #masking list
sns.heatmap(income_corr[['worlwide_gross_income']].sort_values(by=['worlwide_gross_income'],ascending=False), annot=True, fmt=".2f",linewidths=.5, ax=ax, vmin=-1, square=True, mask = mask, cmap='coolwarm');
###Output
_____no_output_____
###Markdown
Sci-fi, animation, and fantasy are clear winners. Drama and romance have a negative correlation implying that these genres lead to unimpressive returns. Section 5.2 Evlauting results for Question 2Question 2 - Does the amount of funding for a movie have an impact on commercial success?We will generate a scatter plot of the budget vs worldwide_gross_income to assess the distribution
###Code
imdb_df.plot.scatter(x='budget',
y='worlwide_gross_income',
c='DarkBlue', figsize=(20,10),style='plain') #simple scatter plot generation function that defines the axes and size of plot
###Output
_____no_output_____
###Markdown
Based on the above plot, we can somewhat infer that budget and gross income are correlated. Let's see if we can draw a regression line to fit the plot
###Code
sns.lmplot(x='budget',y='worlwide_gross_income',data=imdb_df,fit_reg=True,line_kws={'color': 'red'},height=8, aspect=2)
#Clearly, there is correlation. Let's calculate the Pearson correltion between the two columns.
imdb_df['budget'].corr(imdb_df['worlwide_gross_income'])
###Output
_____no_output_____
###Markdown
Conclusion: Budget and worldwide gross income are highly correlated Section 5.3 Evlauting results for Question 3Question 2 - What impact does duration have on the success of a movie?
###Code
#Let's get an idea what the duration distribution looks like
imdb_df['duration'].describe()
#We will generate a scatter plot of the duration vs worldwide_gross_income to assess the distribution
imdb_df.plot.scatter(x='duration',
y='worlwide_gross_income',
c='DarkBlue', figsize=(20,10))
#The average length of a movie in our database is 105 minutes. Anything less or more tends to taper off the commercial value.
#We will seperate the duration into buckets
imdb_df['duration_binned'] = pd.cut(imdb_df['duration'], [0,30,60,90,120,150,180,210,240,270,300])
#We will then generate a bar chart distribution
imdb_df.groupby('duration_binned')['worlwide_gross_income'].mean().plot.bar()
#It seems the ideal movie falls within the bucket of 180 minutes to 210 minutes.
imdb_df.groupby('duration_binned')['worlwide_gross_income'].count().plot.bar()
#Turns out that the average commercial value in the 180-210 bucket which is high seems to be driven by a small number of highly successful movies.
imdb_df.groupby('duration')['worlwide_gross_income'].mean().idxmax() #For context, let's understand which movie is the source of greatest success
###Output
_____no_output_____
###Markdown
Conclusion: For production studios, perhaps this means pushing to fall within this bucket if they’ve got other variables right. But if they want to play it safe, falling within the 120 to 150 minutes will be a safer bet. Section 5.4 Evlauting results for Question 4Question 4 - Do viewer ratings have an impact on how well a movie will do in the Box Office?
###Code
#We will generate a scatter plot of the avg_vote vs worldwide_gross_income to assess the distribution
imdb_df.plot.scatter(x='avg_vote',
y='worlwide_gross_income',
c='DarkBlue',figsize=(20,10))
#We will draw a regression line to see if there's some correlation
sns.lmplot(x='avg_vote',y='worlwide_gross_income',data=imdb_df,fit_reg=True,line_kws={'color': 'red'},height=8, aspect=2)
#Shocking! Let's calculate the Pearson correltion between the two columns.
imdb_df['avg_vote'].corr(imdb_df['worlwide_gross_income'])
###Output
_____no_output_____
###Markdown
Conclusion: The average user rating has very little to no impact on worldwide gross income. In fact, the correlation between the two is just 13%! This is yet another incentive for movie studios to continue doing what they do best, and care little about ratings they receive from the audience. Section 5.5 Evaluating results for Question 5Question 5 - Does the number of votes impact the success of a movie?Similar to my approach in Question 4, I will perform an analysis and derive the correlation between the numbers of votes and the corresponding worldwide gross income.
###Code
sns.lmplot(x='votes',y='worlwide_gross_income',data=imdb_df,fit_reg=True,line_kws={'color': 'red'},height=8, aspect=2)
#Clearly, there is correlation. Let's calculate the Pearson correltion between the two columns.
imdb_df['votes'].corr(imdb_df['worlwide_gross_income'])
###Output
_____no_output_____
###Markdown
Data analysis portion
###Code
import pandas as pd
import numpy as np
df = pd.read_csv("test.csv")
df = df.replace("--",np.nan)
df.head()
df.info()
df["Greencard Processing Time"] = df["Greencard Processing Time"].astype(str)
split_function = lambda x: x.split()[0]
df["Greencard Processing Time"] = df["Greencard Processing Time"].apply(split_function)
# df["Greencard Processing Time"]
df["Greencard Processing Time"].astype(float).describe()
###Output
_____no_output_____
###Markdown
Analyzing GOOG, AAPL & FB 2010-2020
###Code
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import datetime
import seaborn as sns
from pandas_datareader import data as pdr
from datetime import date, timedelta
import yfinance as yf
import os
from pathlib import Path
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from sklearn.metrics import mean_squared_error
yf.pdr_override()
STOCKS = ["AAPL", "GOOG", "FB"]
LAST_N_DAYS = 10 * 365
DATADIR = "data"
SIGNIFICANCE_LEVEL = 0.05
!mkdir -p $DATADIR
today = date.today()
start_date = today - timedelta(days=LAST_N_DAYS)
dfs = dict()
for stock in STOCKS:
filename = Path(DATADIR) / f"{stock}_{today}.csv"
if filename.exists():
dfs[stock] = pd.read_csv(filename).set_index("Date")
else:
dfs[stock] = pdr.get_data_yahoo(stock, start=start_date, end=today)
dfs[stock].to_csv(filename)
df = pd.DataFrame({k: v.Close for k, v in dfs.items()})
returns = (df.shift(-1) - df) / df
returns.head()
returns.describe()
fig = plt.figure(dpi=100)
for stock in STOCKS:
ax = sns.lineplot(x=df.index, y=df[stock])
fig.legend(labels=STOCKS)
plt.ylabel("Ticker")
plt.xlabel("Date")
ax.set_xticks(ax.get_xticks()[::500])
plt.title("Stock Dynamics 2010-2020")
plt.show()
for stock in STOCKS:
X = df[stock].dropna().values
result = adfuller(X)
if result[1] < SIGNIFICANCE_LEVEL:
print(f"{stock} is stationary by the Augmented Dickey-Fuller test.")
else:
print(f"{stock} is NOT stationary by the Augmented Dickey-Fuller test.")
###Output
AAPL is NOT stationary by the Augmented Dickey-Fuller test.
GOOG is NOT stationary by the Augmented Dickey-Fuller test.
FB is NOT stationary by the Augmented Dickey-Fuller test.
###Markdown
Autocorrelation Analysis
###Code
for stock in STOCKS:
X = df[stock].dropna()
plot_acf(X, title=f"{stock} Autocorrelation")
plt.show()
for stock in STOCKS:
X = df[stock].dropna()
plot_pacf(X, title=f"{stock} Partial Autocorrelation")
plt.show()
###Output
_____no_output_____
###Markdown
Profit & Loss
###Code
def model_pnl(n_models):
fig = plt.figure(dpi=140, figsize=(20, 10))
idx = 0
for model_idx in range(n_models):
pnl = returns * np.random.normal(size=returns.shape)
for stock_idx, stock in enumerate(STOCKS):
idx += 1
plt.subplot(n_models, len(STOCKS) * 2, idx)
pnl[stock].plot(title=f"{stock} PNL")
idx += 1
plt.subplot(n_models, len(STOCKS) * 2, idx)
pnl[stock].cumsum().plot(title=f"{stock} Cum. PNL")
plt.tight_layout()
fig.autofmt_xdate()
plt.show()
model_pnl(3)
###Output
_____no_output_____
###Markdown
Scratchpad
###Code
def hit_rate(returns, prediction):
return (np.sign(prediction) == np.sign(returns)).mean(axis=1)
def approximate_sharpe_ratio(series):
return series.mean(axis=1) / series.std(axis=1)
def plot_sharpe_ratio(returns, n_draws=1000, acc="hit_rate"):
for stock in STOCKS:
stock_returns = returns[stock].dropna()
sim_stock_pnl = pd.DataFrame({draw: stock_returns for draw in range(n_draws)})
plt.title(f"{stock} SR vs {acc}")
plt.xlabel(acc)
plt.ylabel("SR")
if acc == "hit_rate":
data = pd.DataFrame(
{
"sr": approximate_sharpe_ratio(sim_stock_pnl),
"hit_rate": hit_rate(stock_returns, sim_stock_pnl),
},
index=sim_stock_pnl.index
)
elif acc == "rmse":
data = pd.DataFrame(
{
"sr": approximate_sharpe_ratio(sim_stock_pnl),
"rmse": mean_squared_error(stock_returns, sim_stock_pnl),
}
)
sns.boxplot(x="hit_rate", y="sr", data=data)
plt.show()
plot_sharpe_ratio(returns, acc="hit_rate")
from alpha_vantage.timeseries import TimeSeries
from ipython_secrets import get_secret
ALPHA_VANTAGE_API_KEY = get_secret("ALPHA_VANTAGE_API_KEY")
ts = TimeSeries(key=ALPHA_VANTAGE_API_KEY, output_format="pandas", indexing_type="date")
def build_df(stocks=STOCKS):
cols = dict()
for stock in stocks:
data, meta = ts.get_daily(symbol=stock, outputsize="full")
cols[stock] = data.rename(columns={"4. close": "close"})["close"]
return pd.DataFrame(cols)
df = build_df()
now = datetime.datetime.now()
train_df = df[now - df.index <= datetime.timedelta(days=LAST_N_DAYS)]
###Output
_____no_output_____
###Markdown
Questions - For each participant: - number of messages - words per message - most used words - number of emojis - most used emojis- Sentiment analysis: - group sentiment over time - sentiment index of each participant - pie chart of message sentiments - worst and best message and from who - wordcloud Library Imports
###Code
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from nltk.corpus import stopwords
from textblob import TextBlob
import operator
from collections import Counter
import emoji
import requests
import json
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import asyncio
import concurrent
import re
from functools import partial
from pandas.api.types import CategoricalDtype
from datetime import datetime
from wordcloud import WordCloud
%matplotlib inline
###Output
_____no_output_____
###Markdown
Some details about the file.
###Code
data = "data/chat.txt"
language = 'portuguese'
language_code = 'pt'
table = pd.DataFrame(columns=('date', 'time', 'sender', 'message'))
###Output
_____no_output_____
###Markdown
Processing the file and building the dataframe:
###Code
with open(data) as f:
raw_message = ""
counter = 0
for line in tqdm(f):
if line.startswith('['):
# date
datePattern = '(\d+/\d+/\d+)'
try:
date = re.search(datePattern, raw_message).group(0)
except AttributeError:
date = "No date"
# time
timePattern = '(\d+:\d+:\d+)'
try:
time = re.search(timePattern, raw_message).group(0)
except AttributeError:
time = "No Time"
# sender
personPattern = '((?<=]).+?(?=:))'
try:
person = re.search(personPattern, raw_message).group(0).replace("] ", "")
except AttributeError:
person = "No Person"
# message
messagePattern = '(:\s).*'
try:
text = re.search(messagePattern, raw_message).group(0).replace(": ", "")
except AttributeError:
text = "No message"
table.loc[counter] = [date, time, person, text]
counter += 1
raw_message = line
else:
raw_message += line
###Output
13524it [00:19, 700.64it/s]
###Markdown
We clean the imports and print the head of the dataframe:
###Code
table = table[~table.message.str.contains("omitted")]
table = table[~table.message.str.contains("No message")]
table = table[~table.message.str.contains("Messages to this group are")]
table = table[~table.date.str.contains("No date")]
table.head()
###Output
_____no_output_____
###Markdown
Participant comparison
###Code
table.groupby('sender').message.count().plot(kind='barh')
plt.title('Total Messages send by each participant')
table['message_size'] = table.message.str.split().str.len()
table.groupby('sender').message_size.count().plot(kind='barh')
plt.title('Words per message for each participant')
senders = list(set(table.sender))
raw_text = {}
for sender in senders:
raw_text[sender] = ""
for idx, row in table.iterrows():
sender = row.sender
message = row.message
raw_text[sender] += message + " "
for sender in senders:
# clean words and extract most used
bad_words = stopwords.words(language)
blob = list(TextBlob(raw_text[sender]).lower().words)
clean_blob = [word for word in blob if word not in bad_words]
top_words = Counter(clean_blob).most_common()[0:5]
print(f"\nTop words for {sender}:")
for element in top_words:
print(f"{element[0]} with {element[1]} uses.")
# get most used emojis
emoji_list = []
for item in clean_blob:
if item in emoji.UNICODE_EMOJI:
emoji_list.append(item)
top_emoji = Counter(emoji_list).most_common()[0:3]
print(f"\nTop emojis for {sender}:")
for element in top_emoji:
print(f"{element[0]} with {element[1]} uses.")
# create a cloud of words for each participant
print(f"\nCloud for {sender}:")
stopwords_set = set(bad_words)
wc = WordCloud(stopwords=stopwords_set).generate(raw_text[sender])
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
print('-' * 20)
###Output
Top words for Pai:
boa with 250 uses.
ló with 137 uses.
docas with 129 uses.
catocas with 116 uses.
é with 110 uses.
Top emojis for Pai:
❤ with 11 uses.
👍 with 9 uses.
🤙 with 5 uses.
Cloud for Pai:
###Markdown
Sentiment Analysis
###Code
def convert_to_weekday(date_string):
day_names = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
datetime_object = datetime.strptime(date_string, '%d/%m/%Y')
return day_names[datetime_object.weekday()]
cats = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
cat_type = CategoricalDtype(categories=cats, ordered=True)
table['weekday'] = [convert_to_weekday(element) for element in table['date']]
table.groupby('weekday').message.count().reindex(cats).plot(kind='bar')
plt.title('Total messages per weekday')
sender = senders[3]
a = table[(table['sender'] == sender)]
a.groupby('weekday').message.count().reindex(cats).plot(kind='bar')
plt.title(f"Total messages per weekday for {sender}")
def happy_message(message):
if '!' in message:
return True
else: return False
table['happy_message'] = [happy_message(message) for message in table['message']]
#table.groupby('happy_message').message.count().plot(kind='bar')
#plt.title(f"Are messages happy?")
happy_percentage = table['happy_message'].sum() / table['message'].count() * 100
sad_percentage = 100 - happy_percentage
data = [happy_percentage, sad_percentage]
labels = ['happy', 'not happy']
plt.pie(data, labels=labels, autopct='%1.1f%%')
happyness_ratios = []
for sender in senders:
personal = table[(table['sender']== sender)]
happyness_ratio = personal.happy_message.sum() / personal.message.count()
happyness_ratios.append(happyness_ratio)
plt.bar(senders, happyness_ratios)
plt.xticks(rotation=90)
plt.title('Happiness Ratios')
table['date_datetime'] = pd.to_datetime(table['date'], format='%d/%m/%Y')
table['time_datetime'] = pd.to_datetime(table['time'], format='%H:%M:%S')
# I could do mean
table.groupby(table.date_datetime.dt.month).happy_message.count().plot()
plt.title('Happy messages throughout the year')
table.groupby(table.date_datetime.dt.day).happy_message.sum().plot(label='Happy messages')
#table.groupby(table.date_datetime.dt.day).message.count().plot(label='Total messages')
plt.title('Happy messages throughout the month')
plt.legend()
table.groupby(table.date_datetime.dt.dayofweek).happy_message.sum().plot(label='Happy messages')
#table.groupby(table.date_datetime.dt.dayofweek).message.count().plot(label='Total messages')
plt.title('Happy messages throughout the week')
plt.legend()
plt.xticks(range(len(cats)), cats)
table.groupby(table.time_datetime.dt.hour).happy_message.sum().plot(label='Happy messages')
#table.groupby(table.time_datetime.dt.hour).message.count().plot(label='Total messages')
plt.title('Happy messages throughout the day')
plt.legend()
###Output
_____no_output_____
###Markdown
Estimating text loss in Middle Dutch chivalric epics This English-language, Python notebook accompanies the following publication:> Mike Kestemont and Folgert Karsdorp, "Het Atlantis van de Middelnederlandse ridderepiek. Een schatting van het tekstverlies met methodes uit de ecodiversiteit". *Spiegel der letteren* (2020).All figures and numbers were prepared using the code below. Future updates of the code and data will be managed in an open [Github repository](https://github.com/mikekestemont/chivalric_diversity). The code block below loads all (third-party) packages and modules necessary to run the module. These can be installed from the file `requirements.txt`: pip install -r requirements.txt
###Code
from functools import partial
from itertools import product
import numpy as np
np.random.seed(12345)
from scipy.special import erfinv
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("tufte.mplstyle")
plt.rcParams["text.usetex"] = False
%matplotlib inline
import scipy.stats as stats
from scipy.special import gammaln
###Output
_____no_output_____
###Markdown
Data We load the data from the spreadsheet file `mnl.xlsx`:
###Code
mnl = pd.read_excel('mnl.xlsx', header=None, names=('text', 'witness'))
mnl.head(10)
###Output
_____no_output_____
###Markdown
We are only interested in the count data, i.e. the number of witnesses per text (the technical term is "abundance data").
###Code
mnl.groupby('text').size().sort_values(ascending=False).head()
###Output
_____no_output_____
###Markdown
The counts per text can be plotted as follows:
###Code
fig, ax = plt.subplots(figsize=(10,18))
mnl.groupby('text').size().sort_values(ascending=True).plot.barh(ax=ax);
ax.set(xlabel='aantal handschriften', ylabel='',
title='Distributie van de (ons bekende) ridderepische teksten over tekstgetuigen')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig1.jpeg', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
Yet a different perspective is to list the size of the frequency bins that we can distinguish within the manuscript counts:
###Code
types = mnl.groupby('text').size().sort_values(ascending=False).value_counts().sort_index()
types = types.to_frame(name='aantal teksten')
types['aantal handschriften'] = types.index
types.to_excel('output/Tab1.xlsx')
types
###Output
_____no_output_____
###Markdown
Finally, we define the auxiliary function `species_richness` to count the number of unique texts in the data (i.e. the number of non-zero counts):
###Code
def species_richness(counts):
return np.sum(counts > 0)
print('# unique texts:', species_richness(mnl.groupby('text').size()))
print('# witnesses:', len(mnl))
###Output
# unique texts: 74
# witnesses: 164
###Markdown
Jackknife The following function computes the first-order Jackknife estimate, on the basis of the abundance data in our data frame, as well as a confidence interval (.95 be default). This approach is detailed in the following paper:> K. Burnham & W. Overton, "Robust Estimation of Population Size When Capture Probabilities Vary Among Animals". *Ecology* (1979), 927-936.
###Code
def jackknife(data, conf_lvl=0.95):
jack_stat = species_richness(data)
x = np.array(sum([[i] * c for i, c in enumerate(data, 1)], []))
index = np.arange(x.shape[0])
vals = []
for i in range(x.shape[0]):
t = x[index != i]
vals.append(species_richness(np.bincount(t)))
mean_jack_stat = np.mean(vals)
bias = (x.shape[0] - 1) * (mean_jack_stat - jack_stat)
estimate = jack_stat - bias
std_err = np.sqrt(
(x.shape[0] - 1) *
np.mean((mean_jack_stat - vals) *
(mean_jack_stat - vals), axis=0)
)
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, std_err, conf_interval
results = jackknife(mnl.groupby('text').size())
print('jackknife-estimate (order=1):', results[0], results[-1])
###Output
jackknife-estimate (order=1): 117.73170731707278 [106.64468284 128.8187318 ]
###Markdown
This implementation is verbose and uses an explicit `for`-loop, which iteratively leaves out observations and tracks the drops in diversity that follow from this operation. In the code blocks below we show that the same estimate can also be obtained in a fully analytical fashion. First we calculate the frequency counts for each unique text:
###Code
num_per_text = mnl.groupby('text').size()
num_per_text
###Output
_____no_output_____
###Markdown
Next, we store the species richness (the number of unique texts) in $t$:
###Code
t = species_richness(num_per_text)
t
###Output
_____no_output_____
###Markdown
Then we set $s$ to the number of texts that are only attested in a single witness:
###Code
s = sum(num_per_text == 1)
s
###Output
_____no_output_____
###Markdown
Only the $s$ texts that occur once will affect the species richness during the iterative Jackknife procedure. We can therefore predict that we will obtain the following deviations when applying the bootstrap:
###Code
mu = (((t - s) * t) + (s * (t - 1))) / t
mu
###Output
_____no_output_____
###Markdown
That means that we can calculate the bias as follows:
###Code
bias = (t - 1) * (mu - t)
bias
###Output
_____no_output_____
###Markdown
To account for this bias, we can subtract it from the original species richness in the observed data:
###Code
t - bias
###Output
_____no_output_____
###Markdown
Simple example
###Code
counts = [5, 4, 3, 3, 1, 1, 1, 1, 1]
names = 'ABCDEFGHI'
data = zip(counts, names)
df = pd.DataFrame(zip(names, counts), columns=('naam', 'mss'))
df.to_excel('output/Tab2.xlsx')
df
print('total # of witnesses:', df['mss'].sum())
species_richness(df['mss'])
jackknife(df['mss'])
data = np.array(df['mss'])
x = np.array(sum([[i]*c for i, c in enumerate(data, 1)], []))
tradition = [names[i - 1] for i in x]
print(tradition)
bootstrap = []
for i in range(len(tradition)):
tradition_ = [tradition[j] for j in range(len(tradition)) if i != j]
bootstrap.append((
(i + 1), tradition[i], ''.join(tradition_),
len(set(tradition_)), len(set(tradition_)) - len(set(tradition))))
df = pd.DataFrame(bootstrap, columns=('iteration', 'leftout', 'imputed tradition', 'richness', 'error'))
df.to_excel('output/Tab3.xlsx')
df
mean_estimate = np.mean(df['richness'])
print('Average estimate:', mean_estimate)
print('Bias:', mean_estimate - 9)
bias = 19 * (mean_estimate - 9)
bias
corrected = 9 - bias
corrected
conf_lvl = .95
std_err = np.sqrt(
19 * np.mean((mean_estimate - df['richness']) *
(mean_estimate - df['richness']), axis=0))
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = corrected + z_score * np.array((-std_err, std_err))
conf_interval
###Output
_____no_output_____
###Markdown
Chao1 In the paper we eventually opt for the more recent, non-parametric formula "Chao1", which is described in this paper:> A. Chao & L. Jost, ‘Estimating diversity and entropy profiles via discovery rates of new species". *Methods in Ecology and Evolution* (2015), 873-882.Because we have "doubletons" in our data, we use can the following formula, where:- $\hat{f_0}$ is the (theoretical) number of non-observed species/texts;- $f_1$ is the number of species/texts attested exactly once ("singletons");- $f_2$ is the number of species/texts attested exactly twice ("doubletons");- $n$ is the total number of individuals/manuscripts in the observed data.\begin{equation}\hat{f_0} = \frac{(n - 1)}{n} \frac{f_1^2}{2f_2}\end{equation}The code block below returns the full, theoretical species richness as etimated by Chao1, i.e. it adds the estimated $\hat{f_0}$ to the species richness that was observed in the sample:
###Code
def chao_richness(x):
x, n = x[x > 0], x.sum()
t = x.shape[0]
f1, f2 = (x == 1).sum(), (x == 2).sum()
return t + (n - 1) / n * ((f1 ** 2 / 2 / f2) if f2 > 0 else (f1 * (f1 - 1) / 2))
###Output
_____no_output_____
###Markdown
If we apply this function to our data, we obtain an even higher (but arguably more realistic) estimate of the loss in textual diversity for this literature. Note, however, that this estimate is still a theoretical *minimum estimate*, since the original loss could still be higher.
###Code
chao_richness(num_per_text)
###Output
_____no_output_____
###Markdown
Instead of reporting just this number, we apply a bootstrapped procedure in which we sample from the material using a multinomial distribution (see the Appendix Chao and Jost, 2015) and apply Chao1 to the resulting samples. This procedure allows us to calculate a .95 confidence interval for this value.
###Code
def bt_prob(x):
x, n = x[x > 0], x.sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
C = 1 - f1 / n * (((n - 1) * f1 / ((n - 1) * f1 + 2 * f2)) if f2 > 0 else
((n - 1) * (f1 - 1) / ((n - 1) * (f1 - 1) + 2)) if f1 > 0 else
0)
W = (1 - C) / np.sum(x / n * (1 - x / n) ** n)
p = x / n * (1 - W * (1 - x / n) ** n)
f0 = np.ceil(((n - 1) / n * f1 ** 2 / (2 * f2)) if f2 > 0 else
((n - 1) / n * f1 * (f1 - 1) / 2))
p0 = (1 - C) / f0
p = np.hstack((p, np.array([p0 for i in np.arange(f0)])))
return p
def bootstrap(x, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
pro = np.array([chao_richness(row) for row in data_bt])
pro_mean = pro.mean(0)
lci_pro = -np.quantile(pro, (1 - conf) / 2, axis=0) + pro_mean
uci_pro = np.quantile(pro, 1 - (1 - conf) / 2, axis=0) - pro_mean
sd_pro = np.std(pro, axis=0)
pro = pro_mean - pro
return (lci_pro, uci_pro, sd_pro, pro)
def chao_estimate(x, n_iter=1000, conf=0.95):
pro = chao_richness(x)
(lci_pro, uci_pro, sd_pro, bt_pro) = bootstrap(x, n_iter=n_iter, conf=conf)
lci_pro, uci_pro = pro - lci_pro, pro + uci_pro
bt_pro = pro - bt_pro
return (lci_pro, uci_pro, bt_pro, pro)
###Output
_____no_output_____
###Markdown
The following block applies this bootstrapped procedure to obtain the final estimates:
###Code
lci_pro, uci_pro, bt_pro, pro = chao_estimate(num_per_text, n_iter=10000)
print('pro:', pro)
print('lci_pro:', lci_pro)
print('uci_pro:', uci_pro)
###Output
pro: 148.00750469043152
lci_pro: 106.21863495939421
uci_pro: 219.01578019221017
###Markdown
The array `bt_pro` contains the estimates that were collected during the bootstrap (1,000 iterations by default). Below, we plot the distribution of these numbers using a rainplot:
###Code
import ptitprince as pt
fig, ax = plt.subplots(figsize=(8, 6))
d = list([(x, 'bootstrap') for x in bt_pro])
bt = pd.DataFrame(d, columns=('bootstrap', 'type'))
pt.RainCloud(
data=bt, x="type", y="bootstrap", ax=ax,
orient="h", alpha=.8, bw=.2, rain_alpha=.3, palette="Greys"
)
ax.axvline(pro, c='black', ls='--')
ax.axvline(uci_pro, c='darkgrey', ls='--')
ax.axvline(lci_pro, c='darkgrey', ls='--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel('')
plt.savefig('output/Fig2.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
The idea that there were at least 100 texts is not completely unlikely, but it is a veryconservative estimate, at the very bottom of the probability continuum. The estimate of ~148 manuscripts (or more) is much more plausible, which would mean that *at least half ofthe chivalric texts have been lost*. Just as 100 is an extremely optimisticestimate, ~219 is the most pessimistic estimate: in thatcase, only a third of the ever available chivalric epics would have been persisted throughtime, which is quite a dramatic, but not entirely unrealistic figure. Species accumulation curve In what preceded, we have investigated how many unique texts may have been lost, or, more positively, how many unique texts we may have not yet seen. In this concluding section, we investigate how many texts should be retrieved before we arrive at this diversity estimate. This new estimate provides us with information about the total population size, i.e. the total number of text witnesses. We follow Hsieh, Ma and Chao (2016) to compute this estimate using "Rarefaction Extrapolation". For details about this method, see:> Hsieh, Ma and Chao (2016): iNEXT: an R package for rarefaction and extrapolation ofspecies diversity. *Methods in Ecology and Evolution*, 7, 1451–1456.
###Code
def bootstrap_re(x, fn=chao_richness, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
Dq = fn(x)
pro = np.array([fn(row) for row in data_bt])
error = stats.norm.ppf(1 - (1 - conf) / 2) * np.std(pro, 0)
lci_pro = Dq - error
uci_pro = Dq + error
sd_pro = np.std(pro, axis=0)
return (lci_pro, uci_pro, sd_pro, Dq, )
def rarefaction_extrapolation(x, max_steps):
x, n = x[x > 0], x.sum()
def _sub(m):
if m <= n:
return np.sum(1 - np.array(
[np.exp(gammaln(n - i + 1) + gammaln(n - m + 1) -
gammaln(n - i - m + 1) - gammaln(n + 1)) if i <= (n - m) else
0 for i in x]))
else:
S = (x > 0).sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
f0 = ((n - 1) / n * f1 * (f1 - 1) / 2) if f2 == 0 else ((n - 1) / n * f1**2 / 2 / f2)
A = n * f0 / (n * f0 + f1)
return S if f1 == 0 else (S + f0 * (1 - A**(m - n)))
return np.array([_sub(mi) for mi in range(1, max_steps)])
counts = np.bincount(mnl.groupby('text').size())[1:] # ignore zero
x = np.array(sum([[i] * c for i, c in enumerate(counts, 1)], []))
###Output
_____no_output_____
###Markdown
Here too we use a bootstrap method with 100 samples:
###Code
max_steps = 1000
lci_pro, uci_pro, sd_pro, Dq = bootstrap_re(
x,
fn=partial(rarefaction_extrapolation, max_steps=max_steps),
n_iter=100
)
steps = np.arange(1, max_steps)
interpolated = np.arange(1, max_steps) < x.sum()
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(steps[interpolated], Dq[interpolated], color='C0')
ax.plot(x.sum(), Dq[x.sum() - 1], 'o')
ax.plot(steps[~interpolated], Dq[~interpolated], '--', color='C0')
ax.fill_between(steps, lci_pro, uci_pro, alpha=0.3)
ax.grid()
ax.set(xlabel='# handschriften', ylabel='# teksten', title='Species Accumulation Curve')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig3.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
SymPy 2022 Documentation Theme Survey Analysis MethodologySymPy ran a user survey about its documentation theme from February 5-19, 2022. The primary purpose of the survey was to guide the selection of a Sphinx theme for the SymPy Documentation at https://docs.sympy.org. A total of 22 people responded. The survey was done on Google Surveys and was shared on the SymPy public mailing list, the [@SymPy](https://twitter.com/SymPy) Twitter account, and a [SymPy discussion on GitHub](https://github.com/sympy/sympy/discussions/23055). The survey consisted of 14 questions, all of which were optional. The results of these responses are summarized here. We would like to thank everyone who took and shared the survey.Four themes were [chosen based on factors such as layout, navigation, performance, and accessibility](https://github.com/sympy/sympy/issues/22716) for evaluation by the SymPy community. Each theme was "prototyped" by - Applying the theme to the SymPy dev documentation - Removing SymPy Live, which has several problems, is [planned to be removed in the live documentation](https://github.com/sympy/sympy/issues/22835), and affects the formatting of the documentation site due to importing a style sheet - [Hosting them on GitHub Pages](https://bertiewooster.github.io/sympy-doc/)No attempt was made to customize the four themes because that is anticipated to be a time-consuming process with both technical (styling) and consensus-building components. Respondents were thus encouraged to focus on the layout, navigation, and interactive features, rather than the exact styling, for example colors.For each of the four themes, respondents were asked to - Rate the theme's usefulness of a scale of 1 (Not very useful) to 4 (Very useful) - Share what they liked and disliked SummaryA detailed analysis of the responses is provided below. At a high level, there are three main takeaways from the results.1. The themes can be divided into three ratings categories, where the rating scale was 1 (Not very useful) to 4 (Very useful): 1. Highest: Furo at 2.95. 2. Middle: PyData and Book, nearly tied at 2.85 and 2.86, respectively. 3. Lowest: Read the Docs (RTD) at 2.47.2. Most comments about themes, both likes and dislikes, were about formatting, look and feel, and navigation.3. We should proceed with the Furo theme, customizing it to address respondents' dislikes about its formatting. We can keep the PyData and Book themes in mind as backup options.
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas
import seaborn
import matplotlib.pyplot as plt
import textwrap
# Set the plot format to SVG for better quality plots
from matplotlib_inline.backend_inline import set_matplotlib_formats
set_matplotlib_formats('retina')
%matplotlib inline
df = pandas.read_csv('theme-responses.csv')
# Set up columns, themes, colors
timestamp, experience_level, pydata_rating, pydata_like, pydata_dislike, book_rating, book_like, book_dislike, furo_rating, furo_like, furo_dislike, rtd_rating, rtd_like, rtd_dislike, other_comments, = df.columns
rating_cols = [pydata_rating, book_rating, furo_rating, rtd_rating]
themes = ["PyData", "Book", "Furo", "RTD"]
n_themes = len(themes)
theme_colors = ["blue", "red", "yellow", "green"]
n_responses = len(df)
###Output
_____no_output_____
###Markdown
Experience Level The first question asked the respondents to place their SymPy experience level on a scale of 1 to 5, with 1 being "novice user" and 5 being "expert user".Most respondents self-reported a mid-level experience with SymPy.
###Code
n_nr = df[experience_level].isna().sum()
experience_cat_nr = pandas.Series({'0': n_nr})
experience_cats_r = df[experience_level].dropna().astype(int).value_counts(sort=False).sort_index()
experience_cats = experience_cat_nr.append(experience_cats_r)
percents_experience_cats = ["%.1f%%" % i for i in experience_cats/n_responses*100]
ax = seaborn.countplot(df[experience_level].fillna(0).astype(int))
ax.bar_label(ax.containers[0], percents_experience_cats, label_type='center')
ax.set_xticklabels(['No response', "1 Novice user", "2", "3", "4", "5 Expert user"])
max_width = 10
# Split x tick labels across multiple lines
# https://stackoverflow.com/questions/57144682/split-string-xticks-into-multiple-lines-matplotlib
xtl = ax.set_xticklabels(textwrap.fill(x.get_text(), max_width) for x in ax.get_xticklabels())
###Output
_____no_output_____
###Markdown
Theme Ratings The survey asked respondents to rate the usefulness of four themes on a 1-4 scale, with 1 being Not very useful and 4 being Very useful. The mean and standard deviation of the rating for each theme are expressed numerically and graphically as:
###Code
df[rating_cols].describe().transpose()[["mean","std"]].round(2)
# So seaborn can automatically plot standard deviations as error bars,
# combine all ratings into one column, paired with theme
all_themes = []
for theme in themes:
this_theme = [theme] * n_responses
all_themes += this_theme
all_ratings = []
for col in rating_cols:
this_theme_ratings = list(df[col])
all_ratings += this_theme_ratings
df_combined = pandas.DataFrame(list(zip(all_themes, all_ratings)), columns = ['theme', 'rating'])
rating_min = 1
rating_max = 4
num_bins = rating_max - rating_min + 1
rating_values = range(rating_min, rating_max + 1)
t = seaborn.barplot(
data=df_combined,
x="theme",
y="rating",
capsize=0.2,
errwidth=0.5,
palette=theme_colors,
alpha=.6,
)
t.set_yticks(rating_values)
t.bar_label(t.containers[0], label_type = 'center', fmt='%.2f')
t.set(xlabel='', ylabel='How useful is each theme?\n1= Not very useful Very useful = 4')
t.set(ylim=(rating_min,rating_max))
t.grid(False)
###Output
_____no_output_____
###Markdown
Furo is the highest-rated theme by about 0.1 points. PyData and Book are virtually tied for second place. Read the Docs is rated lowest, about 0.5 points below Furo. Rating Distribution for Themes For each theme, a histogram displays the count of responses for each rating level, from 1 to 4, and the dashed vertical line indicates the mean rating.
###Code
## Functions to determine complimentary color
## https://stackoverflow.com/questions/40233986/python-is-there-a-function-or-formula-to-find-the-complementary-colour-of-a-rgb
# Sum of the min & max of (a, b, c)
def hilo(a, b, c):
if c < b: b, c = c, b
if b < a: a, b = b, a
if c < b: b, c = c, b
return a + c
# Get complimentary color
def complement(r, g, b):
k = hilo(r, g, b)
return tuple(k - u for u in (r, g, b))
import matplotlib
import matplotlib.ticker as mticker
fig, axes = plt.subplots(n_themes, figsize=(5, 15))
plt.subplots_adjust(hspace = 0)
for theme_num, theme in enumerate(themes):
graph = seaborn.histplot(
ax=axes[theme_num],
data=df,
x = rating_cols[theme_num],
bins = num_bins,
binrange=[rating_min,rating_max],
color=theme_colors[theme_num],
alpha = 0.6,
edgecolor="white"
)
# Add vertical line at mean of each theme
# Get RGB of bar's color
bar_rgb = matplotlib.colors.to_rgb(theme_colors[theme_num])
line_rgb = complement(*bar_rgb)
# https://datavizpyr.com/how-to-add-a-mean-median-line-to-a-seaborn-displot/
graph.axvline(x=df[rating_cols[theme_num]].mean(),
ls='--',
color=line_rgb,
ymax = 0.95
)
graph.set(ylabel=themes[theme_num] + " count")
graph.grid(False) # remove gridlines
graph.set(yticklabels=[]) # remove y-axis tick labels
# Add labels to bars: Percents
theme_cats = df[rating_cols[theme_num]].dropna().astype(int).value_counts(sort=False).sort_index()
# Ensure each bar has an entry in list
denom = sum(theme_cats) / 100
percents_cats = []
for cat in range(rating_min, rating_max + 1):
if cat in theme_cats:
pct = theme_cats[cat] / denom
percents_cats += ["%.0f%%" % pct]
else:
percents_cats += [""]
l = graph.bar_label(
graph.containers[0],
percents_cats,
label_type= 'center',
# color=line_rgb,
# color="black",
bbox=dict(
fc = "white",
lw = 1,
)
)
# Hide tick marks by making them zero length
graph.tick_params(length = 0)
if theme_num in range(1, n_themes - 2):
# For graphs in the middle (neither top nor bottom),
# remove x axis and tick labels
graph.set(xlabel='')
graph.set_xticklabels([])
else:
# For graphs at top and bottom,
# show x-axis title and tick labels
# Fixing yticks with matplotlib.ticker "FixedLocator"
# https://stackoverflow.com/questions/63723514/userwarning-fixedformatter-should-only-be-used-together-with-fixedlocator
label_format = '{:,.0f}'
ticks_loc = graph.get_xticks().tolist()
graph.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
graph.set_xticklabels([label_format.format(x) for x in ticks_loc])
# Center labels on bars (columns)
# https://stackoverflow.com/questions/63516973/how-can-i-mark-xticks-at-the-center-of-bins-for-a-seaborn-distplot
mids = [rect.get_x() + rect.get_width() / 2 for rect in graph.patches]
graph.set_xticks(mids)
graph.set(xlabel='How useful is each theme?')
graph.set_xticklabels(['1\nNot very','2\n','3\n','4\nVery'])
if theme_num == 0:
graph.xaxis.set_ticks_position("top")
graph.xaxis.set_label_position("top")
###Output
_____no_output_____
###Markdown
For Furo, the mode is 4, Very useful. The mode of the other three themes is 3. Furo themeGiven that Furo is the highest-rated theme, it is worth considering other factors before deciding to proceed with it. Comments about FuroHere are consolidated lists of highlights from what respondents liked and disliked about Furo. - Like - Clean look and clear font - Left and right sidebars for site and page navigation, respectively - Has both light and dark themes, and is easy to switch between them- Dislike - Colors are distracting (for example, behind code blocks), too dark without enough contrast - Bold and highlighting seem a little cartoonish - Not more widely used in documentation for data science packages - Collapse of in-page navigation not optimal Other factors - Furo SymPy prototype gets excellent [Lighthouse scores](https://googlechrome.github.io/lighthouse/viewer/?psiurl=https%3A%2F%2Fbertiewooster.github.io%2Fsympy-doc%2Ffuro%2Fmodules%2Fassumptions%2Findex.html%23querying&strategy=desktop&category=performance&category=accessibility&category=best-practices&category=seo&category=pwa&utm_source=lh-chrome-ext) before any customization: - Performance: 100 - Accessibility: 98 - Best Practices: 100 - SEO (search engine optimization): 90 - Furo is well supported, having frequent updates - Furo's developer is very accessible, even [commenting on a SymPy thread](https://github.com/sympy/sympy/issues/22716issuecomment-1013016667) without our asking them - ["Pretty much every color on the screen is customizable"](https://pradyunsg.me/furo/customisation/colors/defining-overrides-for-defaults) per Furo's developer so we should be able to address what respondents disliked about colors - Furo is the only theme that shows a fully expanded table of contents on the right sidebar - Furo was recommended by Joannah Nanjekye, who spent much time working on the documentation for [2021 Google Season of Docs](https://github.com/sympy/sympy/wiki/GSoD-2021-Report-Joannah-Nanjekye:-Reorganizing-the-SymPy-Documentation) RecommendationFor the above reasons, we should proceed with Furo as the new Sphinx theme. Customizing the theme should address some of the deficits of the prototype, such as colors.Should there be some unexpected reason we cannot customize Furo as desired, we could try PyData or Book. Other comments from respondentsFinally, nine people responded to "Are there any other comments you'd like to make?". Here is a summary of some things that stood out. - All four options are good.- Whichever theme you go with it’ll be an improvement.- Please use that nice dark mode Appendix: All comments from respondentsFor the sake of completeness, all comments are shown below.
###Code
def print_answers(col):
i = 1
for e, v in df[[experience_level,col]].iloc:
if pandas.isna(v):
continue
print(f"{i}. {v.strip()} (experience level: {int(e) if not pandas.isna(e) else 'N/A'})\n")
i += 1
tag = "---"
for name, values in df.iteritems():
if "like" in str(name).lower():
print(tag + " " + name + " " + tag)
print_answers(name)
###Output
--- What do you LIKE about the PyData theme? ---
1. search easy to find, updating position in doc on right (experience level: 5)
2. It is a neat theme (experience level: 4)
3. Clear, succint, very little clutter (experience level: 5)
4. I like the clean layout. (experience level: 4)
5. It is neat and simple, with left and right sidebars proving useful. The top sidebar is also quite convenient since scrolling on the left sidebar would be reduced as opposed to themes which lack the top sidebar. (experience level: 3)
6. clear view without distractions (experience level: 2)
7. Flat design, style uniformity accros sur the data exosystem, maintainability of a shared product (experience level: N/A)
8. The docstring rendering is easier to read than the old docs page. Better colors and less clutter. Fits on screen better. (experience level: 3)
9. Readable, not a huge shift (experience level: 3)
10. Desktop: Clean look. The categories across the top. The search box in a fixed position on the left navbar. Good highlighting format (for ?highlight=xxx in URL). Good differentiation between regular text, links (bold, blue), and code (pink). Good permalink setup: ¶ appears when link text moused over, then ¶ highlights when moused over. Code blocks are clearly delineated (using gray box) and their background is the same as rest of page. Easy to triple-click to select a line of code from a code block. Phone: Fairly easy to copy code from a code block. (experience level: 3)
11. It's very clean looking. Easy to navigate. (experience level: 3)
12. It looks neat and has a floating content menu. (experience level: 5)
13. Reminds me of scipy (experience level: 4)
14. it's clean (experience level: 4)
--- What do you DISLIKE about the PyData theme? ---
1. Looks kinda incomplete (experience level: 4)
2. 3 levels of nesting (top, left, right) was not instantly obvious for how to use. (experience level: 5)
3. I am bothered by the way useful links switch from side-to-side. I do understand the the links at right are to anchors on the particular page. However, sometimes having links on one side, but not the other did not seem natural and was initially confusing. (experience level: 4)
4. Left sidebar occupies a lot of space on the screen. (experience level: 3)
5. nothing (experience level: 2)
6. The first thing I did was click "API ref" in the right side menu, but it didn't take me to a new page. I would expect selecting from a menu on the right would then show new content about that topic.
It needs more sympy colors to feel like sympy's documentation.
The front page is quite boring with only four big headers. If you look at matplotlib, numpy, pandas, etc. the front page is very engaging with the graphics and big buttons.
You have to click too many times to drill down to seeing an actual page with information on it. In the old docs you have a big ToC on the front page and one click usually gets you to useful information.
Most pages feel too short. Web browsers can scroll (infinitely). It's preferable to make longer pages with hyperlink targets and table of contents menus. (experience level: 3)
7. White background, black text. Could use more links or tables of contents. (experience level: 3)
8. Desktop: In left navbar, not super-obvious which item this page is (highlighting not that strong). Distinction between h2 and h3 not that obvious, especially when h2 text is lowercase and h3 text is uppercase. Phone: Initial view of many pages (home, search) wastes a lot of vertical space. When tap a link in tree at top of page, there's no visible indication that the page content changed because link tree takes up entire screen height. (experience level: 3)
9. I don't like the right-hand TOC that only expands the section you're currently in. (experience level: 3)
10. Nothing in particular but if possible there should be an option for dark mode. (experience level: 5)
11. Doesn't remind me of sympy (experience level: 4)
12. content is too narrow (experience level: 4)
--- What do you LIKE about the Book theme? ---
1. Simple and minimalistic (experience level: 4)
2. Clean, intuitive (experience level: 5)
3. Good clean styling. Accordion navigation at left. Easy to find way to collapse left hand navigation pane. (experience level: 4)
4. Collapsible sidebar and full screen mode would help to view docs better once users find what they are looking for. (experience level: 3)
5. clean view (experience level: 2)
6. I like the font (experience level: 3)
7. Desktop: Clear differentiation of headings from regular text, and h2 from h3; and current section in left navbar; due to blue color. Clear differentiation of function syntax and parameters due to background color. Permalinks nicely handled: ¶ appears when mouse over link text, then gets darker when mouse over ¶. Phone: Pretty good use of vertical space. Page content is front and center, and nicely presented when go to a new page. Previous and Next links at bottom of page give visitor suggestions of where to go next. (experience level: 3)
8. Again, very clean and easy to navigate. Works well on my phone, too. (experience level: 3)
9. it's appealing to read (experience level: 4)
--- What do you DISLIKE about the Book theme? ---
1. I don't like the expandable index as much as I like the top selection of doc + left index; I also find the larger logo overpowering of the important element (like search and index). (experience level: 5)
2. top bar, that menu button, full screen toggle and download button will hardly ever be used, yet they consume precious vertical space. (experience level: 5)
3. SymPy logo takes up a lot of space on every page. Code indents seem off in some pages. (experience level: 3)
4. top controls distract me ) (experience level: 2)
5. I can't really tell the difference in this one and the first one I was shown. So all the same comments apply. (experience level: 3)
6. White background (experience level: 3)
7. Phone: Icons at top right (fullscreen and download) unlikely to be used often, and draw visitor's attention. No index (link tree) for within current page. No constant reminder of which site I'm on--could we add "SymPy" or the logo to the header, between hamburger menu and two icons at top right? (experience level: 3)
8. Same as pydata: I don't like the right-hand TOC that only expands the section you're currently in. (experience level: 3)
9. the contest has got to be really organized in this format (experience level: 4)
--- What do you LIKE about the Furo theme? ---
1. The sidebar for contents is pretty cool (experience level: 4)
2. Clean, intuitive and puts all vertical space to use. (experience level: 5)
3. Almost as good as book. Clean theme, easy to navigate. (experience level: 4)
4. The inbuilt dark mode could be useful. (experience level: 3)
5. I like that right and left menus are collapsable. Many times I checked Sympy documentation using my tablet and I like to have a bigger are dedicated to the document I’m reading. (experience level: 1)
6. nothing (experience level: 2)
7. The dark theme button is right here, I find this layout very easy on the eye (experience level: N/A)
8. Black background, clear font, THIS IS THE ONE (experience level: 3)
9. Desktop: Clear differentiation between headings and regular text, and h2 from h3. Pretty clear which is current section on left navbar due to bolding. Permalink setup pretty good: ¶ appears when mouse over link text, cursor turns to hand when mouse over ¶. Phone: Having two hamburger menus, one for site tree and one for sections-on-this-page tree. Always displays site title at top. (experience level: 3)
10. * I like the right-hand TOC where you always see all sections. For me, this is much better than the ones (book and pydata) where it only expands the section you're currently in.
* I think this one just "looks" the best overall. (experience level: 3)
11. As before, clean and easy to navigate. Also, I like the light and dark theme options. Easy on the eyes. (experience level: 3)
12. In my opinion it is the perfect theme. (experience level: 5)
13. It's dark. I like dark (experience level: 4)
14. looks nice (experience level: 4)
--- What do you DISLIKE about the Furo theme? ---
1. bold/highlighting seems a little cartoonish; I prefer a cleaner style (experience level: 5)
2. The lack of a secondary color makes it look unfinished (experience level: 4)
3. Nothing in particular (experience level: 5)
4. Does not collapse in-page navigation as well as 'book'. Also not as much easy changing of the view. (experience level: 4)
5. SymPy logo takes up a lot of space on every page. (experience level: 3)
6. too dark without sufficient contrast (experience level: 2)
7. That it’s not more widely used in data science docs (experience level: N/A)
8. Same comment as last. Looks practically the same as the prior two. Same comments apply. (experience level: 3)
9. Nothing (experience level: 3)
10. Desktop: Not a big fan of background color in code blocks; could be improved by making the color lighter (less saturated). Phone: Bit of an "interference effect" when scroll vertically as top title bar hides (?) page content. (experience level: 3)
11. colors are distracting (experience level: 4)
--- What do you LIKE about the Read the Docs (RTD) theme? ---
1. I like the "stickies" like notes on the page and that IDE examples seem less intrusive (experience level: 5)
2. The colors are nice (experience level: 4)
3. Conventional, i.e. familiar to many users (experience level: 5)
4. Very familiar interface as this is used by lots of standard documentation. Easy to navigate. All the quicklinks on one side. (experience level: 4)
5. User friendly interface. Familiar theme, therefore could attract many users. (experience level: 3)
6. bright and colorfull (experience level: 2)
7. Simple, effective (experience level: N/A)
8. I like this one the best! The menu on the left works as expected. You click and it takes you to a new page of info. And then the menu expands letting you quickly navigate to new relevant content. I think if that big blue square in the top left was sympy green (and probably other blues need to be swapped to greens) then we have a winner! (experience level: 3)
9. Ehh (experience level: 3)
10. Desktop: Headings are nice and bold. Clear differentiation of h2 from h3. Using increasingly opaque colors in left navbar demonstrates where in site tree visitor is. Permalinks pretty good: ¶ appears when mouse over link text, cursor changes when mouse over ¶. Phone: . (experience level: 3)
11. Nice and familiar. (experience level: 3)
--- What do you DISLIKE about the Read the Docs (RTD) theme? ---
1. I kind of like to know where the Next and Previous buttons are taking me. (experience level: 5)
2. I'd like the side bar to be collapsible (experience level: 4)
3. One cannot expand subsections of the Reference documentation directly to the left, but is forced to click the links in the body. Easier to get lost on big pages since it lacks the "on-page-navigation" bar to the right. (experience level: 5)
4. Does not have the easy view customization of 'book' (experience level: 4)
5. Lack of right sidebar to filter topics on a particular page. (experience level: 3)
6. I find the other three themes clearer. (experience level: 1)
7. colours distract from text (experience level: 2)
8. Seems devoid of personality, this theme is far too popular for its own good (experience level: N/A)
9. Needs green in colors. Needs more engaging front page. There is a lot of prose style docs (often module docstring) in the API. That really needs to be move into one of the other big 4 groups of the documentation. The api should strictly be the docstrings of classes, functions, etc. (experience level: 3)
10. It’s not the fur theme (experience level: 3)
11. Previous and Next links don't display what those page titles are (until visitor mouses over). Desktop: Not a big fan of color backgrounds of left navbar and especially to the right of page content; could be improved by changing colors. Determining which page vistior is on, in left navbar, could be easier: highlighting of current page is link a little weak. Phone: Very much dislike how left navbar pushes page content to the right; prefer overlay style of other themes. Very much dislike that there's no way to navigate to another page, or jump to another section of this page, after visitor scrolls down, because the hamburger menu isn't sticky. (experience level: 3)
12. left-hand TOC has issues
e.g. Try going to "Reference Documentation" > "Topics". Now under "Topics" you see the same sections as "Reference Documentation" repeated again. Now click "Topics" in here, and you actually get into "Topics". (experience level: 3)
13. Again it looks like any other python project website. It doesn't stand out in particular. (experience level: 5)
14. too old fashioned (experience level: 4)
--- Are there any other comments you'd like to make? ---
1. Thanks for organizing this! (experience level: 5)
2. None in particular, looks great! (experience level: 5)
3. Book is cleaner looking and allows the viewer more adaptations of the display. However, it is likely to be less familiar and a little harder to navigate than rtd for most people. I think either would be acceptable. Book seems a little more 'modern'. The 'modernity' will probably only last for a couple of years as styles change so rapidly. (experience level: 4)
4. All four options are good. (experience level: 1)
5. thank you for your work on SymPy. (experience level: 2)
6. Whichever theme you go with it’ll be an improvement. Thank you for taking the time to implement this and to ask for feedback ! (experience level: N/A)
7. The first 3 all look the same and have poorly designed menus that don't function as expected. It also seems that there are two menus on some, which is confusing. The last one RTD is really the only one that looks functional to me. (experience level: 3)
8. Please use that nice dark mode (experience level: 3)
9. PyData, Book, and Furo would all be good choices.
Category names (SymPy Tutorial, SymPy Guides, Explanation, Reference Documentation, Miscellaneous) should be shortened, for example remove "SymPy", change "Reference Documentation" to API(?). (experience level: 3)
###Markdown
Author: Abhinav Nadh ThirupathiRun this notebook top to bottom to reproduce the results
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("data/study/study_data.csv",low_memory=False)
###Output
_____no_output_____
###Markdown
Data Normalization
###Code
from scipy import stats
cols = data.columns.values
# Groups the companies by 'Years Since Founded' and standardizes non-binary features in each group
for col in cols[:-2]:
if col.startswith('Details.Description') or col.startswith('Website.') or col.startswith('Overview') or col.startswith('Education') or col.startswith('Major'):
if col not in ["Overview.Gender.Agender", "Overview.Gender.Non-Binary"]:
data[col] = data.groupby('Details.Years Since Founded')[col].transform(lambda x : stats.zscore(x,ddof=1,nan_policy='omit'))
###Output
_____no_output_____
###Markdown
LOOCV
###Code
# Splits the data into features and target
Y = data[data.columns[-2:]].copy()
X = data.drop(columns=['Target', 'Details.Years Since Founded'])
import xgboost as xgb
xg = xgb.XGBClassifier(random_state=1)
xg.fit(X,Y['Target'])
###Output
_____no_output_____
###Markdown
Permutation Importance
###Code
from sklearn import inspection
r = inspection.permutation_importance(xg, X, Y['Target'], n_repeats=3160, random_state=1, n_jobs=-1)
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
print("{:<8}: {:.3f} +/- {:.3f}".format(X.columns.values[i],r.importances_mean[i],r.importances_std[i]))
###Output
_____no_output_____
###Markdown
SHAP Feature Importance
###Code
import shap
shap_values = shap.TreeExplainer(xg).shap_values(X)
pd.DataFrame((zip(X.columns[np.argsort(np.abs(shap_values).mean(0))],
np.abs(shap_values).mean(0)[np.argsort(np.abs(shap_values).mean(0))])),
columns=["Feature", "Importance" ]).sort_values(by=['Importance'], ascending=False)
shap.summary_plot(shap_values, X, plot_type="bar")
###Output
_____no_output_____
###Markdown
Performance Metrics
###Code
import xgboost as xgb
from sklearn import model_selection
from sklearn import metrics
xg1 = xgb.XGBClassifier(random_state=1)
Y_proba = model_selection.cross_val_predict(xg1, X, Y['Target'], cv=model_selection.LeaveOneOut(), n_jobs=-1, method='predict_proba')
Y_hat = np.argsort(Y_proba,axis=1)[:,1]
Y_proba1 = Y_proba[:,1]
print("AUC : ", metrics.roc_auc_score(Y['Target'], Y_proba1))
print("Accuracy : ", metrics.accuracy_score(Y['Target'], Y_hat))
print("Precision : ", metrics.precision_score(Y['Target'], Y_hat))
print("Recall : ", metrics.recall_score(Y['Target'], Y_hat))
print("F-score : ", metrics.f1_score(Y['Target'], Y_hat))
print("Brier Score: ", metrics.brier_score_loss(Y['Target'], Y_hat))
###Output
_____no_output_____
###Markdown
Prediction Thresholds
###Code
fpr, tpr, thresholds = metrics.roc_curve(Y['Target'], Y_proba1)
print('{:<30}{:<30}'.format('FPR', 'TPR', 'Threshold'))
for x, y, z in zip(fpr,tpr,thresholds):
print('{:<30}{:<30}{:<30}'.format(x, y, z))
###Output
_____no_output_____
###Markdown
Reliability Diagram
###Code
from sklearn import calibration
probs = Y_proba1
fraction_of_positives, mean_predicted_value = calibration.calibration_curve(Y['Target'], probs, n_bins = 10)
ax1 = plt.figure()
plt.plot(mean_predicted_value, fraction_of_positives, marker = '.', label = 'XGBoost')
plt.xlabel('Mean Predicted Value')
plt.ylabel('Fraction of Positives')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Preparation
###Code
import pandas as pd
import numpy as np
import msgpack
with open('reviewers.msgpack', 'rb') as reviewers_file:
reviewers_data = msgpack.load(reviewers_file)
with open('reviews.msgpack', 'rb') as reviews_file:
reviews_data = msgpack.load(reviews_file)
reviewers_data.append({
b'is_publication': False,
b'key': b'swarmer',
b'name': b'Anton Barkovsky',
b'publication_link': None,
b'publication_title': None,
})
my_reviews = {
'blade-runner-2049': 100,
'baby-driver': 85,
'dunkirk': 80,
'loveless-2017': 95,
'kiss-kiss-bang-bang': 80,
'zero-dark-thirty': 85,
'sicario': 100,
'rogue-one': 90,
'the-prestige': 90,
'the-martian': 90,
'the-big-lebowski': 90,
'gran-torino': 90,
'citizenfour': 90,
'snowden': 80,
'arrival': 80,
'mulholland-dr': 80,
'the-danish-girl': 70,
'the-theory-of-everything': 80,
'the-big-short': 90,
'edge-of-tomorrow': 80,
'carol': 90,
'drive': 85,
'warcraft': 80,
'a-clockwork-orange': 80,
'the-hateful-eight': 80,
'apocalypse-now': 90,
'the-descendants': 80,
'the-social-network': 85,
'star-wars-episode-vii---the-force-awakens': 80,
'the-best-offer': 70,
'in-the-loop': 80,
'fight-club': 80,
'batman-begins': 80,
'the-fault-in-our-stars': 80,
'the-spectacular-now': 70,
'children-of-men': 90,
'ex-machina': 90,
'the-kings-speech': 90,
'the-imitation-game': 80,
'what-we-do-in-the-shadows': 80,
'up-in-the-air': 70,
'argo': 90,
'interstellar': 85,
'guardians-of-the-galaxy': 70,
'inglourious-basterds': 80,
'the-avengers-2012': 70,
'serenity': 80,
'5050': 70,
'hot-fuzz': 90,
'her': 90,
'moon': 90,
'about-time': 80,
'the-hurt-locker': 100,
'silver-linings-playbook': 80,
'the-hunger-games-catching-fire': 80,
'american-hustle': 70,
'the-wolf-of-wall-street': 80,
'dr-strangelove-or-how-i-learned-to-stop-worrying-and-love-the-bomb': 100,
'blade-runner': 85,
'the-perks-of-being-a-wallflower': 80,
'the-lives-of-others': 100,
'its-a-wonderful-life': 90,
'the-dark-knight': 90,
'pulp-fiction': 90,
'star-wars-episode-iv---a-new-hope': 80,
'the-godfather': 90,
'inception': 100,
'forrest-gump': 90,
'star-wars-episode-vi---return-of-the-jedi': 80,
'the-lord-of-the-rings-the-fellowship-of-the-ring': 70,
'pirates-of-the-caribbean-the-curse-of-the-black-pearl': 80,
'the-matrix': 90,
'star-wars-episode-v---the-empire-strikes-back': 80,
'gladiator': 100,
'the-godfather-part-ii': 90,
'black-swan': 80,
'the-lord-of-the-rings-the-return-of-the-king': 70,
'eternal-sunshine-of-the-spotless-mind': 80,
'the-good-the-bad-and-the-ugly-re-release': 90,
'the-lord-of-the-rings-the-two-towers': 70,
'amelie': 90,
}
reviews_data.extend([
{
b'date': None,
b'film': key.encode('utf-8'),
b'movie_link': None,
b'movie_title': None,
b'pub_title': None,
b'review_link': None,
b'reviewer': b'swarmer',
b'score': str(score).encode('utf-8'),
}
for key, score in my_reviews.items()
])
reviewers = sorted(set(
reviewer[b'key'].decode('utf-8')
for reviewer in reviewers_data
if not reviewer[b'is_publication']
))
reviewers_index = {key: i for i, key in enumerate(reviewers)}
swarmer_index = reviewers_index['swarmer']
films = sorted(set(review[b'film'].decode('utf-8') for review in reviews_data))
films_index = {key: i for i, key in enumerate(films)}
matrix = np.empty((len(films), len(reviewers)))
matrix[:] = np.nan
vals, counts = numpy.unique(matrix, return_counts=True, axis=None)
for review in reviews_data:
reviewer_key = review[b'reviewer'].decode('utf-8')
if reviewer_key not in reviewers_index:
continue
film_row = films_index[review[b'film'].decode('utf-8')]
reviewer_col = reviewers_index[reviewer_key]
matrix[film_row, reviewer_col] = float(review[b'score'].decode('utf-8'))
matrix_df = pd.DataFrame(matrix)
###Output
_____no_output_____
###Markdown
Similar reviewers
###Code
reviewer_correlation_matrix = matrix_df.corr(min_periods=10)
top_reviewer_corrs = reviewer_correlation_matrix[swarmer_index].nlargest(10)
top_reviewer_corrs.index = top_reviewer_corrs.index.map(lambda i: reviewers[i])
top_reviewer_corrs
def common_films(rkey1, rkey2):
rid1, rid2 = reviewers_index[rkey1], reviewers_index[rkey2]
col1 = matrix[:, rid1]
col2 = matrix[:, rid2]
for i, (score1, score2) in enumerate(zip(col1, col2)):
if np.isnan(score1) or np.isnan(score2):
continue
print(f'{films[i]}: {reviewers[rid1]}={score1}, {reviewers[rid2]}={score2}')
common_films('swarmer', 'carrie-rickey')
###Output
batman-begins: swarmer=80.0, carrie-rickey=63.0
drive: swarmer=85.0, carrie-rickey=75.0
gran-torino: swarmer=90.0, carrie-rickey=75.0
in-the-loop: swarmer=80.0, carrie-rickey=75.0
pirates-of-the-caribbean-the-curse-of-the-black-pearl: swarmer=80.0, carrie-rickey=75.0
the-dark-knight: swarmer=90.0, carrie-rickey=75.0
the-kings-speech: swarmer=90.0, carrie-rickey=100.0
the-lives-of-others: swarmer=100.0, carrie-rickey=100.0
the-lord-of-the-rings-the-two-towers: swarmer=70.0, carrie-rickey=75.0
the-perks-of-being-a-wallflower: swarmer=80.0, carrie-rickey=75.0
the-social-network: swarmer=85.0, carrie-rickey=100.0
###Markdown
Similar films
###Code
matrix_df_t = matrix_df.transpose()
film_correlation_matrix = matrix_df_t.corr(min_periods=20)
stacked = film_correlation_matrix.stack()
stacked = stacked[stacked.index]
stacked = stacked[stacked.index.get_level_values(0) < stacked.index.get_level_values(1)]
top_film_corrs = stacked[stacked != 1.0].nlargest(20)
top_film_corrs.index = top_film_corrs.index.map(lambda i: (films[i[0]], films[i[1]]))
top_film_corrs
###Output
_____no_output_____
###Markdown
Dissimilar films
###Code
bottom_film_corrs = stacked[stacked != 1.0].nsmallest(20)
bottom_film_corrs.index = bottom_film_corrs.index.map(lambda i: (films[i[0]], films[i[1]]))
bottom_film_corrs
###Output
_____no_output_____
###Markdown
샤프 지수 최대화
###Code
import scipy.optimize as sco
opts = max_sharpe_point(rets)
plt.scatter(pvols, prets, c=prets/pvols, marker='o', cmap=mpl.cm.jet)
plt.grid(True)
plt.xlabel('expected volatility')
plt.ylabel('expected return')
plt.colorbar(label='Sharpe ratio')
pt_opts = statistics(opts, rets).round(3)
plt.scatter(pt_opts[1], pt_opts[0], marker="*", s=500, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
포트폴리오 분산 최대화
###Code
optv = min_variance_point(rets)
plt.scatter(pvols, prets, c=prets/pvols, marker='o', cmap=mpl.cm.jet)
plt.grid(True)
plt.xlabel('expected volatility')
plt.ylabel('expected return')
plt.colorbar(label='Sharpe ratio')
pt_optv = statistics(optv, rets).round(3)
plt.scatter(pt_optv[1], pt_optv[0], marker="*", s=500, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
효율적 투자선
###Code
bnds = tuple((0, 1) for x in range(noa))
statistics_rets = partial(statistics, ret_df=rets)
def min_func_port(weights):
return statistics_rets(weights)[1]
%%time
trets = np.linspace(0.0, 0.1, 20)
tvols = []
for tret in trets:
cons = ({'type': 'eq', 'fun': lambda x: statistics(x, rets)[0] - tret},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
res = sco.minimize(min_func_port, noa * [1. / noa,], method='SLSQP',
bounds=bnds, constraints=cons)
tvols.append(res['fun'])
tvols = np.array(tvols)
plt.scatter(pvols, prets,
c=prets / pvols, marker='o', cmap=mpl.cm.jet)
# 무작위 포트폴리오
plt.scatter(tvols, trets,
c=trets / tvols, marker='x', s=70, linewidth=2, cmap=mpl.cm.jet)
# 효율적 투자선
plt.plot(statistics(opts, rets)[1], statistics(opts, rets)[0],
'r*', markersize=30)
# 최대 샤프 지수를 가진 포트폴리오
plt.plot(statistics(optv, rets)[1], statistics(optv, rets)[0],
'y*', markersize=30)
# 최소 분산 포트폴리오
plt.grid(True)
plt.xlabel('expected volatility')
plt.ylabel('expected return')
plt.colorbar(label='Sharpe ratio')
plt.show()
###Output
_____no_output_____
###Markdown
Data Cleaning & Translation
###Code
# Filtering year of birth
df[['day', 'month','birthyear']] = df['birthday'].str.split(" ", 2, expand=True)
df.drop(['birthday','day', 'month'], axis=1, inplace=True)
# Filtering year of status decision
df[['day', 'month','date']] = df['decision-date'].str.split(" ", 2, expand=True)
df.drop(['decision-date','day', 'month'], axis=1, inplace=True)
# Age
df['age'] = df['date'].astype(int) - df['birthyear'].astype(int)
# Translating status
df.replace({"status": status_translation}, inplace=True)
df.replace({"military_category": rank_translation}, inplace=True)
# dtype
df['birthyear'] = df['birthyear'].astype(int)
df['date'] = df['date'].astype(int)
# Invalid Date filters
df = df[df['date']>2019]
df = df[df['birthyear']!=2021]
df = df[df['birthyear']!=2020]
df.head(5)
def hist_by_category(df: object, column: str, title: str, x: str, y: str, kind: str) -> None:
"""
This function plots barchart for a given dataframe column.
---
Args:
df (object): pandas DataFrame
column(str): column name
title (str): figure title
x (str): x axis title
y (str): y axis title
kind (str): plot type (bar, barh, hist..)
Returns:
None
"""
# Creating Figure & Axes
fig, ax = plt.subplots(figsize=(16,9))
ax = df[column].value_counts(sort=True).plot(kind=kind)
# Setting Labels
ax.set_title( title, fontsize=20, pad=20)
ax.set_xlabel(x, fontsize=15)
ax.set_ylabel(y, fontsize=15)
# Legend & Grid
ax.grid(linestyle=":", color='#696969')
# Watermark
ax.text(0.99,
0.01,
'© Github/Geometrein',
verticalalignment='bottom',
horizontalalignment='right',
transform=ax.transAxes,
color='#606060',
fontsize=15,
alpha = 0.9)
column = 'military_category'
title = "Servicemen by Category"
x = 'Category of Servicemen'
y = 'Number of Servicemen'
hist_by_category(df, column, title, x, y, 'bar')
column = 'status'
title = "Servicemen Status"
x = 'Status of Servicemen'
y = 'Number of Servicemen'
hist_by_category(df, column, title, x, y,'bar')
###Output
_____no_output_____
###Markdown
Causalities---
###Code
deaths_df = df[df['status'] == 'deceased']
deaths_df['age'].describe()
column = 'military_category'
title = "Deaths by Category"
x = 'Category of Servicemen'
y = 'Number of Servicemen'
hist_by_category(deaths_df, column, title, x, y,'bar')
###Output
_____no_output_____
###Markdown
Deaths by Age
###Code
column = 'age'
title = "Number of Deaths by Age during the Second Nagorno-Karabakh war."
x = 'Age of Servicemen'
y = 'Number of Deaths'
# Creating Figure & Axes
fig, ax = plt.subplots(figsize=(16,9))
ax.hist(deaths_df[column], bins = range(18,75), rwidth=0.5, align='left')
# Setting Labels
ax.set_title( title, fontsize=15, pad=10)
ax.set_xlabel(x, fontsize=15)
ax.set_ylabel(y, fontsize=15)
ax.set_xticks(range(18, 75, 1))
ax.set_yticks(range(0, 800, 100))
# Legend & Grid
ax.grid(linestyle=":", color='#696969')
# Watermark
ax.text(0.99,
0.01,
'© Github/Geometrein',
verticalalignment='bottom',
horizontalalignment='right',
transform=ax.transAxes,
color='#606060',
fontsize=15,
alpha = 0.9
)
conscrips = deaths_df[deaths_df['military_category'] == 'conscript']
contractors = deaths_df[deaths_df['military_category'] == 'contractor']
reserve = deaths_df[deaths_df['military_category'] == 'reserve']
data = [conscrips['age'], contractors['age'], reserve['age']]
title = "Age by Military Category."
x = 'Age of Servicemen'
# Creating Figure & Axes
fig, ax = plt.subplots(figsize=(16,9))
medianprops = dict(linestyle='-', linewidth=2.5, color='white')
box = ax.boxplot(data, vert=False, patch_artist=True, widths=0.7, whis = 1, medianprops=medianprops)
# Setting Labels
ax.set_title( title, fontsize=15, pad=10)
ax.set_xlabel(x, fontsize=15)
ax.set_yticklabels(['Conscript', 'Contractor', 'Reserve'])
# Legend & Grid
ax.grid(linestyle=":", color='#696969')
# Watermark
ax.text(0.99,
0.01,
'© Github/Geometrein',
verticalalignment='bottom',
horizontalalignment='right',
transform=ax.transAxes,
color='#606060',
fontsize=15,
alpha = 0.9
)
###Output
_____no_output_____
###Markdown
Analysis
###Code
import datetime
import random
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load the Data
###Code
from faces import FaceShard
from emotions import EmotionShard
from behavior import WindowShard, ScreenShard
from mirror import Mirror
from config import EMOTIONLOG, WINDOWLOG, SCREENSHOT_DIR, MIRRORLOG, FACE_DIR
shards = []
shards.append(EmotionShard(logfile=EMOTIONLOG))
shards.append(FaceShard(FACE_DIR))
shards.append(WindowShard(logfile=WINDOWLOG))
shards.append(ScreenShard(logdir=SCREENSHOT_DIR))
mirror = Mirror(shards=shards, lens=None, logfile=MIRRORLOG)
states = mirror.remember(from_date=datetime.datetime(year=2020, month=11, day=1))
###Output
_____no_output_____
###Markdown
Have a Look at States with Specific Emotions
###Code
# Let's consider the more interesting ones by filtering
emotions = set([state['emotions'][0]['emotion'] for state in states if len(state['emotions'])>0])
# Filter by detected emotion
ids_by_emotion = {}
for emotion in emotions:
ids_by_emotion[emotion] = [state['ID'] for state in states
if len(state['emotions'])>0 and state['emotions'][0]['emotion']==emotion]
ids_by_emotion.keys()
emotion = 'anger'
ids = sorted(ids_by_emotion[emotion])
print("%d relevant logs" % len(ids))
state_by_id = {state['ID']: state for state in states}
# Find for which IDs we have a capture available
ids = [i for i in ids if 'faces' in state_by_id[i] and len(state_by_id[i]['faces'])]
print("%d relevant logs with captures" % len(ids))
# Find for which IDs we also have a screenshot
ids = [i for i in ids if 'screenshot' in state_by_id[i] and state_by_id[i]['screenshot'] is not None]
print("%d relevant logs with screenshots" % len(ids))
id_ = random.choice(ids)
print(id_)
state = state_by_id[id_]
print("Detected emotion:", state['emotions'][0]['emotion'])
print("Behavior at the time:", state['active_window'])
#plt.figure(figsize=(15,15))
#plt.imshow(state['screenshot'][:, :, ::-1])
###Output
_____no_output_____
###Markdown
Display Emotions over Time
###Code
x = []
y = []
for state in states:
#x.append(id_)
if len(state['emotions'])>0:
x.append(datetime.datetime.fromisoformat(state['timestamp']))
y.append(state['emotions'][0]['emotion'])
plt.plot(x, y, 'b.')
###Output
_____no_output_____
###Markdown
CorrelationsLet's have a look at the behavior information and see if any terms correlate with any emotions.
###Code
emotion = 'neutral'
vocab = []
vocab_set = set(vocab)
X = []
Y = []
for state in states:
if len(state['emotions'])<1 or 'title' not in state['active_window']:
continue
info = state['active_window']['title']
X.append([0]*len(vocab))
# Simple tokenization
tokens = [t.lower() for t in info.split()]
# Create a bag of words vector
# (This implementation is not efficient at all, but we are dealing with small datasets for now)
for token in tokens:
if token not in vocab_set:
vocab.append(token)
vocab_set.update([token])
X[-1].append(0)
X[-1][vocab.index(token)] += 1
if state['emotions'][0]['emotion']==emotion:
Y.append(1)
else:
Y.append(0)
for i in range(len(X)):
if len(X[i])<len(vocab):
X[i].extend([0]*(len(vocab)-len(X[i])))
import numpy as np
X = np.array(X)
Y = np.array(Y)
correlations = []
for ix,token in enumerate(vocab):
correlations.append(np.corrcoef(X[:,ix], Y)[0,1])
args = np.argsort(correlations)
print("Most negatively correlating:")
for pos in args[:10]:
print("-", vocab[pos], correlations[pos])
print("\nMost positively correlating:")
for pos in args[::-1][:10]:
print("-", vocab[pos], correlations[pos])
###Output
_____no_output_____
###Markdown
FASTGenomics Scanpy Analysis You might want to describe your analysis briefly here, if you are planning to share it.
###Code
# Place all your Python imports here.
import fgread
import scanpy as sc
# Place all your parameter values here.
sc.settings.verbosity = 1 # scanpy verbosity: errors (0), warnings (1), info (2), hints (3)
###Output
_____no_output_____
###Markdown
Raw DataFirst, the raw dataset(s) will be read into AnnData object(s). You can describe your data here using markdown or delete this text.
###Code
# Print metadata of all attached datasets
ds_info = fgread.ds_info()
ds_info
# Load the attached dataset
fgread.load_data() # If multiple datasets are attached, you have to select one by its id or tile
# Alternatively, if you started the analysis without datasets, load your data from elsewhere
# (see our tutorial "How to Load Data in FASTGenomics (Python)")
###Output
_____no_output_____
###Markdown
1.Import Data
###Code
# path to data
RAW_DATA_FOLDER = '/Users/yogisharosarumaha/Documents/GitHub/predict_future_sales_kaggle/data/'
#Load Data
import pandas as pd
items =pd.read_csv(os.path.join(RAW_DATA_FOLDER, 'items.csv'))
item_categories =pd.read_csv(os.path.join(RAW_DATA_FOLDER,'item_categories.csv'))
shops =pd.read_csv(os.path.join(RAW_DATA_FOLDER,'shops.csv'))
train_df =pd.read_csv(os.path.join(RAW_DATA_FOLDER,'sales_train.csv'))
test_df =pd.read_csv(os.path.join(RAW_DATA_FOLDER,'test.csv'))
#Dataset informations
print('items: ' + str(items.shape))
print()
items.info(null_counts=True)
print()
print('-'*50)
print('item_categories :' + str(item_categories.shape))
print()
item_categories.info(null_counts=True)
print()
print('-'*50)
print('shops :' + str(shops.shape))
print()
shops.info(null_counts=True)
print('train_df :' + str(train_df.shape) )
print()
train_df.info(null_counts=True)
print('-'*50)
print()
print('test_df :' + str(test_df.shape))
print()
test_df.info(null_counts=True)
print('-'*50)
print('Proportion of unique item in train set : ' + str(train_df.item_id.nunique()) + ' / ' + str(items.item_id.nunique()))
print('Proportion of unique item in test set : ' + str(test_df.item_id.nunique()) + ' / ' + str(items.item_id.nunique()))
print()
print('Proportion of unique shops in train set : ' + str(train_df.shop_id.nunique()) + ' / ' + str(shops.shop_id.nunique()))
print('Proportion of unique shops in test set : ' + str(test_df.shop_id.nunique()) + ' / ' + str(shops.shop_id.nunique()))
###Output
train_df :(2935849, 6)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2935849 entries, 0 to 2935848
Data columns (total 6 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 date 2935849 non-null object
1 date_block_num 2935849 non-null int64
2 shop_id 2935849 non-null int64
3 item_id 2935849 non-null int64
4 item_price 2935849 non-null float64
5 item_cnt_day 2935849 non-null float64
dtypes: float64(2), int64(3), object(1)
memory usage: 134.4+ MB
--------------------------------------------------
test_df :(214200, 3)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 214200 entries, 0 to 214199
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 214200 non-null int64
1 shop_id 214200 non-null int64
2 item_id 214200 non-null int64
dtypes: int64(3)
memory usage: 4.9 MB
--------------------------------------------------
Proportion of unique item in train set : 21807 / 22170
Proportion of unique item in test set : 5100 / 22170
Proportion of unique shops in train set : 60 / 60
Proportion of unique shops in test set : 42 / 60
###Markdown
Data Overview
###Code
print('Train_data Minimum Date: ' + train_df['date'].min())
print('Train_data Maximum Date: ' + train_df['date'].max())
###Output
Train_data Minimum Date: 01.01.2013
Train_data Maximum Date: 31.12.2014
###Markdown
2. Data Cleaning
###Code
#Display outlier for all variables
#Visible outlier from item_price
train_df.boxplot( rot=45)
#Display outlier for item count in a day
train_df.boxplot(column=['item_cnt_day'])
#Remove outlier based on boxplot
print('Data set size before remove outlier:', train_df.shape)
train_df = train_df[(train_df.item_price < 300000 )& (train_df.item_cnt_day < 1000)]
print('Data set size before remove outlier:', train_df.shape)
#Display distribution of item count day
plt.rcParams['figure.figsize'] = (13,7)
sns.distplot(train_df['item_price'], color = 'red')
plt.title('Distribution of Item Price',fontsize=20)
plt.xlabel('Item price',fontsize=15)
plt.ylabel('Density')
plt.show()
###Output
_____no_output_____
###Markdown
There's chunk of items with 0 item prices, this is considered as outlier
###Code
#Display items count of day with negative numbers
train_df[train_df['item_cnt_day'] < 0].head()
#Item price should at least 1 and not 0
print('Data size before remove 0 item price:', train_df.shape)
train_df = train_df.query('item_price > 0')
print('Data size after remove 0 item price:', train_df.shape)
###Output
Data size before remove 0 item price: (2935846, 6)
Data size after remove 0 item price: (2935845, 6)
###Markdown
3. Data Transformation
###Code
# Create column for date
train_df['date'] = pd.to_datetime(train_df['date'], errors='coerce')
# Create column for month
train_df['month'] = pd.to_datetime(train_df['date'], errors='coerce')
# Create column for year
train_df['year'] = pd.to_datetime(train_df['date'], errors='coerce')
# Create column for week
train_df['week'] =pd.to_datetime(train_df['date'], errors='coerce')
# View columns
train_df.columns
train_df.head()
# Computing days with high demand
plt.rcParams['figure.figsize'] = (15, 7)
sns.countplot(train_df['date'])
plt.title('Shops with busy days', fontsize = 20)
plt.xlabel('Days', fontsize = 15)
plt.ylabel('Frequency', fontsize = 15)
plt.show()
# Computing Months with high demands
plt.rcParams['figure.figsize'] = (15, 7)
sns.countplot(train_df['month'], palette = 'dark')
plt.title('Shops with busy month', fontsize = 30)
plt.xlabel('Months', fontsize = 15)
plt.ylabel('Frequency', fontsize = 15)
plt.show()
# Convert data to Monthly
# Dataset only for monthly data
data = train_df.groupby([train_df['date'].apply(lambda x: x.strftime('%Y-%m')),'item_id','shop_id']).sum().reset_index()
# Get important attributes to add for the data
data = data[['date','item_id','shop_id','item_cnt_day']]
# Select attributes to observe in the dataset
data = data.pivot_table(index=['item_id','shop_id'], columns = 'date', values = 'item_cnt_day', fill_value = 0).reset_index()
# looking at the newly prepared datset
data.shape
# Merge monthly sales data prepared to the test data set
test_df=pd.merge(test_df, data, on = ['item_id', 'shop_id'], how = 'left')
# filling the empty values
test_df.fillna(0, inplace = True)
# dataset check
test_df.head()
# Create Training data
x_train = test_df.drop(['2015-10', 'item_id', 'shop_id'], axis = 1)
y_train = test_df['2015-10']
# Omit first columns to predict sales data
x_test = test_df.drop(['2013-01', 'item_id', 'shop_id'], axis = 1)
# Dataset shape check
print("Shape of x_train :", x_train.shape)
print("Shape of x_test :", x_test.shape)
print("Shape of y_test :", y_train.shape)
# Splits data into training/testing
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size = 0.2, random_state = 0)
# checking the shapes
print("Shape of x_train :", x_train.shape)
print("Shape of x_valid :", x_valid.shape)
print("Shape of y_train :", y_train.shape)
print("Shape of y_valid :", y_valid.shape)
###Output
Shape of x_train : (171360, 36)
Shape of x_valid : (42840, 36)
Shape of y_train : (171360,)
Shape of y_valid : (42840,)
###Markdown
Modeling
###Code
#Get time to run model
ts = time.time()
from lightgbm import LGBMRegressor
model_lgb = LGBMRegressor( n_estimators=500,
learning_rate=0.009,
num_leaves=100,
colsample_bytree=0.95,
subsample=0.90,
max_depth=10,
reg_alpha=0.4,
reg_lambda=0.1,
min_split_gain=0.1,
min_child_weight=40)
model_lgb.fit(x_train, y_train)
y_pred_lgb = model_lgb.predict(x_test)
print("It took : " + str(time.time() - ts) + " to run")
###Output
It took : 7.88634991645813 to run
###Markdown
Generate Prediction
###Code
# Test set and clip certain range
y_pred_lgb = model_lgb.predict(x_test).clip(0., 20.)
# File for submission
preds = pd.DataFrame(y_pred_lgb, columns=['item_cnt_month'])
preds.to_csv('submission.csv',index_label='ID')
###Output
_____no_output_____
###Markdown
Most Important Variables
###Code
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
feature_imp = pd.DataFrame(sorted(zip(model_lgb.feature_importances_,x_train.columns)), columns=['Value','Feature'])
plt.figure(figsize=(20, 10))
sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.show()
plt.savefig('lgbm_importances-01.png')
###Output
_____no_output_____
###Markdown
**Gather Data for Seattle and Boston**
###Code
# read calendar seattle
df_cal_s = pd.read_csv('./calendar_seattle.csv')
# read calendar boston
df_cal_b = pd.read_csv('./calendar_boston.csv')
###Output
_____no_output_____
###Markdown
**Access Data**
###Code
df_cal_s.head() # seattle
df_cal_b.head() # boston
###Output
_____no_output_____
###Markdown
Business Question 1How is the distribution of the home prices and are there differences between both cities?Data Preparation- dropping na price values because the na values dont provide information for this analysis- formatting datetime and using date as index- categorizing the price values in ranges to make the plot more expressive **Clean Data and analyze**
###Code
# clean price values
df_cal_s['price'] = df_cal_s['price'].replace({'\$':''}, regex = True).dropna().squeeze()
df_cal_s['price'] = pd.to_numeric(df_cal_s['price'], errors='coerce')
# format datetime
df_cal_s['date'] = pd.to_datetime(df_cal_s[['date']].squeeze())
df_cal_s = df_cal_s.set_index('date')
# calc avg price and occupancy rate
print('Seattle')
print("average price: ",df_cal_s['price'].mean())
print("occupancy rate (False: sold): ", df_cal_s['price'].isna().value_counts() / len(df_cal_s['price']) * 100)
# clean price values
df_cal_b['price'] = df_cal_b['price'].replace({'\$':''}, regex = True).dropna().squeeze()
df_cal_b['price'] = pd.to_numeric(df_cal_b['price'], errors='coerce')
# format datetime
df_cal_b['date'] = pd.to_datetime(df_cal_b[['date']].squeeze())
df_cal_b = df_cal_b.set_index('date')
# calc avg price and occupancy rate
print('Boston')
print("average price: ",df_cal_b['price'].mean())
print("occupancy rate (False: sold): ", df_cal_b['price'].isna().value_counts() / len(df_cal_b['price']) * 100)
###Output
Seattle
average price: 137.19222676140043
occupancy rate (False: sold): False 67.010986
True 32.989014
Name: price, dtype: float64
Boston
average price: 192.45390955690283
occupancy rate (False: sold): True 51.067775
False 48.932225
Name: price, dtype: float64
###Markdown
**Data understanding:**- The average price is having a big different between both cities. Should be analysed if that´s changing with time.- The utilization of the homes seems to be different as well. The percentage of _False_ means the homes are sold. So in Boston there are much more home that are free. The offer seems to be much higher than the request. **Visualise data**
###Code
# making a figure
fig = plt.figure()
# getting categories
cat_s = pd.cut(df_cal_s['price'], bins=[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 300, 500, 1000, 3000], include_lowest=True)
cat_b = pd.cut(df_cal_b['price'], bins=[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 300, 500, 1000, 3000], include_lowest=True)
# count categories
cat_counts_s = cat_s.value_counts(sort=False)/len(df_cal_s)
cat_counts_b = cat_b.value_counts(sort=False)/len(df_cal_b)
# plot categories
cat_counts_s.plot(kind='bar', color='c', width = 0.4, position=1, label='Seattle', legend=True)
cat_counts_b.plot(kind='bar', color='b', width = 0.4, position=0, label='Boston', legend=True)
# plot layout
plt.ylabel("price distribution")
plt.tight_layout()
plt.show()
# save fig
fig.savefig('./occupany.png')
###Output
_____no_output_____
###Markdown
Are there price changings during the year and is it caused by any events?- groupby date and using the mean value of all listings at the same time- rotating the date to make the axis ticks readable **Visualise data**
###Code
# Start and end of the date range to extract
start, end = '2014-01', '2023-01'
# Plot daily and weekly resampled time series together
fig, ax = plt.subplots()
ax.plot(df_cal_s.loc[start:end, 'price'].groupby('date').mean(), marker='.', linestyle='-', color='c', markersize=1, label='Seattle')
ax.plot(df_cal_b.loc[start:end, 'price'].groupby('date').mean(), marker='.', linestyle='-', color='b', markersize=1, label='Boston')
ax.set_ylabel('avg price [$]')
ax.legend()
plt.xticks(rotation=90)
plt.tight_layout()
fig = ax.get_figure()
fig.savefig('./avg_price.png')
###Output
_____no_output_____
###Markdown
**Data understanding:**- in Jan/Feb/March the prices are low but rise untill midsommer and go down again untill the winter months- crazy price drop in Boston - maybe the prices were too high and the request for the homes were too low- in April there must have been a local event in Boston causing this price peak not used analysis - I wanted to make a plot about the utilization of the homes.
###Code
#print(df_cal_s)
df_s = df_cal_s[df_cal_s.available != 'f']
#df_b = df_cal_b[df_cal_s.available != 'f']
auslastung_s = df_s.groupby('date')['available'].value_counts()
#auslastung_b = df_b.groupby('date')['available'].value_counts()
print(auslastung)
auslastung_s.plot()
#auslastung_b.plot()
###Output
_____no_output_____
###Markdown
Are there different weighted influences for the total review score value?Making a linear model to predict the **review_scores_rating** to see which coefs are most significant. Comparing the output of seattle and boston. including some functions of the lesson
###Code
def create_dummy_df(df, cat_cols, dummy_na):
'''
INPUT:
df - pandas dataframe with categorical variables you want to dummy
cat_cols - list of strings that are associated with names of the categorical columns
dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not
OUTPUT:
df - a new dataframe that has the following characteristics:
1. contains all columns that were not specified as categorical
2. removes all the original columns in cat_cols
3. dummy columns for each of the categorical columns in cat_cols
4. if dummy_na is True - it also contains dummy columns for the NaN values
5. Use a prefix of the column name with an underscore (_) for separating
'''
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na)], axis=1)
except:
continue
return df
def clean_fit_linear_mod(df, response_col, cat_cols, dummy_na, test_size=.3, rand_state=42):
'''
INPUT:
df - a dataframe holding all the variables of interest
response_col - a string holding the name of the column
cat_cols - list of strings that are associated with names of the categorical columns
dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not
test_size - a float between [0,1] about what proportion of data should be in the test dataset
rand_state - an int that is provided as the random state for splitting the data into training and test
OUTPUT:
test_score - float - r2 score on the test data
train_score - float - r2 score on the test data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
Your function should:
1. Drop the rows with missing response values
2. Drop columns with NaN for all the values
3. Use create_dummy_df to dummy categorical columns
4. Fill the mean of the column for any missing values
5. Split your data into an X matrix and a response vector y
6. Create training and test sets of data
7. Instantiate a LinearRegression model with normalized data
8. Fit your model to the training data
9. Predict the response for the training data and the test data
10. Obtain an rsquared value for both the training and test data
'''
# Drop the rows with missing response values
df = df.dropna(subset=[response_col], axis=0)
# Drop columns with all NaN values
df = df.dropna(how='all', axis=1)
# Dummy categorical variables
df = create_dummy_df(df, cat_cols, dummy_na)
# Mean function
fill_mean = lambda col: col.fillna(col.mean())
# Fill the mean
df = df.apply(fill_mean, axis=0)
# Split into explanatory and response variables
X = df.drop(columns=[response_col], axis=1)
y = df[response_col]
# Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=rand_state)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) # Fit
# Predict using your model
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
# Score using your model
test_score = r2_score(y_test, y_test_preds)
train_score = r2_score(y_train, y_train_preds)
return test_score, train_score, lm_model, X_train, X_test, y_train, y_test
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
###Output
_____no_output_____
###Markdown
**Gather & Assess Data**
###Code
# Read in linstings data and store in a list
df_lis_s = pd.read_csv('./listings_seattle.csv')
df_lis_b = pd.read_csv('./listings_boston.csv')
df_lists = [df_lis_s, df_lis_b]
###Output
_____no_output_____
###Markdown
**Clean data - analyze and model**
###Code
# Loop for seattle and boston
for df_list in df_lists:
# Filter for categorical variables
df_cat = df_list.select_dtypes(include=[np.number])
cat_cols_lst = df_cat.select_dtypes(include=['object']).columns
# Value of interest:
response_col = 'review_scores_rating'
# Clean and fit linear model
test_score, train_score, lm_model, X_train, X_test, y_train, y_test = clean_fit_linear_mod(df_cat, 'review_scores_rating', cat_cols_lst, dummy_na=False)
print("test_score, train_score: ", test_score, train_score)
# Calc the coef weights
coef_df = coef_weights(lm_model.coef_, X_train)
###Output
test_score, train_score: -190.06134734358062 -212.10421669598512
test_score, train_score: 0.7075743811567 0.7986075716094313
###Markdown
**Visualize coefs with weights**
###Code
# relevant for my analysis are just the review scores
review_scores = ['review_scores_location','review_scores_value','review_scores_cleanliness','review_scores_checkin', 'review_scores_accuracy','review_scores_communication']
# Show the 20 most significant influencing variables
print(coef_df.head(20))
###Output
est_int coefs abs_coefs
25 review_scores_value 2.929826 2.929826
21 review_scores_cleanliness 2.703228 2.703228
22 review_scores_checkin 1.688343 1.688343
23 review_scores_communication 1.650156 1.650156
6 longitude 1.495893 1.495893
20 review_scores_accuracy 1.473371 1.473371
5 latitude 1.330772 1.330772
24 review_scores_location 0.612614 0.612614
9 bedrooms 0.330221 0.330221
27 reviews_per_month -0.249368 0.249368
8 bathrooms 0.199281 0.199281
7 accommodates -0.189503 0.189503
10 beds 0.175778 0.175778
12 guests_included 0.088851 0.088851
15 availability_30 -0.034356 0.034356
13 minimum_nights -0.024564 0.024564
26 calculated_host_listings_count -0.017771 0.017771
16 availability_60 0.010719 0.010719
19 number_of_reviews 0.006389 0.006389
3 host_listings_count 0.002396 0.002396
###Markdown
Statistical analyses for the Percolation Theory SimulatorThe purpose of this notebook is to analyze the phenomena of Percolation.1. We want to see if the percolation threshold depends on the size.2. We want to analyze the distribution when the sample size gets increasingly bigger.For this purpose we want to import the `api_utils` library, which implements the `APIConnector` class.
###Code
from api_utils import APIConnector
###Output
_____no_output_____
###Markdown
Now we want to define the server address, port and API path.
###Code
SERVER_ADDRESS = "0.0.0.0"
SERVER_PORT = "5000"
SERVER_PATH = "simulation/simulate"
ac = APIConnector(SERVER_ADDRESS, SERVER_PORT, SERVER_PATH)
print(ac.base_path)
###Output
http://0.0.0.0:5000/simulation/simulate/
###Markdown
1. How does size affect the Percolation Threshold?The purpose of this section is to analyze if size affects the percolation threshold (which is defined as the probability of a cell site to be open). Data generationFirst thing we need to do is to generate some simulation data. Simulation is done by using a minimum size of $n = 1$ and a maximum size of $n = 20$ (thus avoiding complex simulations due to the lattice increasing). Sample size will be $sample\_size = 100$, to reduce the standard error due to the sampling procedure.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
min_size = 1
max_size = 20
sample_size = 100
# Run only once. It might take up to 10 minutes
sample_thresholds = [ac.get_data(n, sample_size).get("threshold") for n in range(min_size, max_size + 1)]
###Output
_____no_output_____
###Markdown
Data VisualizationLet's now visualize the results.
###Code
plt.title("Percolation Threshold by size")
plt.xlabel("Size")
plt.ylabel("Threshold")
plt.bar(range(min_size, max_size + 1), sample_thresholds)
plt.xticks(range(min_size, max_size + 1))
plt.show()
###Output
_____no_output_____
###Markdown
Hypothesis Testing using _Chi-Squared Test_From a first sight, we see that, except for size one, all the threshold are roughly equal. If we want to be rigorous about our statements, we might use the statistical framework, in particular we can use the _Chi-Squared Test_ and see if these differences are truly relevant (_Alternative Hypothesis_) or they are just due to random chance (_Null Hypothesis_).It's trivial to understand why for size one the threshold is 1: in order for the system to percolate, the only site has to be open.Having clarified this, and realizing that the percolation threshold is just the proportion of open sites, we can apply the _Chi-Squared Test_ using the relevant `scipy.stats` modules.
###Code
from scipy.stats import chisquare
chi2, pval = chisquare(sample_thresholds)
print(f"Calculated chi2 statistic is {chi2} and p-value {pval}")
###Output
Calculated chi2 statistic is 0.9768659203176914 and p-value 0.9999999993722517
###Markdown
ConclusionWith such a huge p-value (0.99) it's impossible to reject the null hypothesis, therefore we can easily affirm that __the percolation threshold is not affected by the size__. 2. Distribution Analysis as the sample size variesLet's now see what varying the sample size causes to the distribution. Even though this is a purely statistical matter, we also want to see the shape of the distribution and we want to compare it with the Normal Distribution. DataSince we realized that the threshold does not vary with size (for $n \gt 1$), we can choose $n = 10$ and our sample size will vary in the range $[20,\ 200]$ with a step of $20$. For each of these samples, we see perform a normality test to see whether the distribution is normal or not.
###Code
from scipy.stats import normaltest
n_sample_size = 10
min_sample_size = 20
max_sample_size = 200
step = 20
resulting_pvals = []
for sample_size in range(min_sample_size, max_sample_size + 1, step):
sample = ac.get_data(n_sample_size, sample_size).get("results")
resulting_pvals += [normaltest(sample)[1]]
resulting_pvals = np.array(resulting_pvals)
###Output
_____no_output_____
###Markdown
AnalysisLet's check for which sample size the normality condition holds true. To do so, we choose a significance level of $\alpha = 0.05$, and when the resulting p-value is greater than such level, we can affirm that the sample comes from the normal distribution. Otherwise, we have to reject such hypothesis.
###Code
alpha = 0.05
np.where(resulting_pvals > alpha)[0].tolist()
###Output
_____no_output_____
###Markdown
The results look interesting, but it would be nice to repeat the process different times (let's say 20), and see if the results hold again. What we are going to do is: for each size we keep track how many times the sample is normal, and see if there's consistency among the results.
###Code
repeat = 20
results = [0 for _ in range(min_sample_size, max_sample_size + 1, step)]
for _ in range(repeat):
resulting_pvals = []
for sample_size in range(min_sample_size, max_sample_size + 1, step):
sample = ac.get_data(n_sample_size, sample_size).get("results")
resulting_pvals += [normaltest(sample)[1]]
resulting_pvals = np.array(resulting_pvals)
for index in np.where(resulting_pvals > alpha)[0].tolist():
results[index] += 1
frequencies = [result / repeat for result in results]
for i, frequency in enumerate(frequencies):
print(f"For size {min_sample_size + i * step}, the sample resulted normal {frequency * 100}% times.")
###Output
For size 20, the sample resulted normal 75.0% times.
For size 40, the sample resulted normal 95.0% times.
For size 60, the sample resulted normal 75.0% times.
For size 80, the sample resulted normal 40.0% times.
For size 100, the sample resulted normal 20.0% times.
For size 120, the sample resulted normal 5.0% times.
For size 140, the sample resulted normal 0.0% times.
For size 160, the sample resulted normal 0.0% times.
For size 180, the sample resulted normal 0.0% times.
For size 200, the sample resulted normal 0.0% times.
###Markdown
Atlantic Hurricanes from 2005-2015
###Code
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
data = pd.read_csv("https://people.sc.fsu.edu/~jburkardt/data/csv/hurricanes.csv")
data
df = data.iloc[:,np.r_[2:len(data.columns)]]
df.columns = ['2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
yearly_totals = df.sum(axis=0)
yearly_totals
plt.title('# Hurricanes and Tropical Storms in the Atlantic')
yearly_totals.plot.bar()
###Output
_____no_output_____
###Markdown
Manuscript Figures 3.1 Optimization of A21 Complexes---
###Code
# ==> Fig. 2 <==
data = stdbas
x = [x * 3 for x in range(1, len(data)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
boxcolors = ['pink', 'lightblue']
dcom_patch = mpatches.Patch(color='pink', label='A21 $\Delta$COM Signed Error')
lrmsd_patch = mpatches.Patch(color='lightblue', label='A21 LRMSD')
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
boxprops = {'linewidth': 1.5}
medianprops = dict(linestyle='-', linewidth=1.5, color='k')
medianprops_dcom = dict(linestyle='-', linewidth=1.5, color='cyan')
medianprops_lrmsd = dict(linestyle='-', linewidth=1.5, color='m')
whiskerprops = dict(linestyle='-', linewidth=1.5, color='k')
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean dCOM', linewidth=0)
plt.rcParams['figure.figsize'] = [10,15]
f, axarr = plt.subplots(3, 1, figsize=(10,15), sharex=True)
k = 0
for m in mt.columns.levels[0][1:]:
# Plot
i = 0
for b in stdbas:
dCOM_data = a21_serr.loc[idx['a24'], idx[m,b,'dCOM']].values.reshape(-1,1)
lrmsd_data = a21_serr.loc[idx['a24'], idx[m,b,'LRMSD']].values.reshape(-1,1)
bp = axarr[k].boxplot(dCOM_data, positions=[x[i]-1],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
bx = axarr[k].boxplot(lrmsd_data, positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in bp['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in bx['boxes']:
patch.set_facecolor(boxcolors[1])
i += 1
# Plot Options
plt.xticks([i-0.5 for i in x], data)
axarr[k].set_xlim(x[0] - 1.5, x[-1] + 0.5)
axarr[k].set_ylim(-0.3, 0.3)
axarr[k].set_ylabel('A21 $\Delta$COM Signed Error & LRMSD ($\AA$)')
axarr[k].hlines(0, 0, x[-1] + 1, linestyle='--', linewidth=1, zorder=1)
plt.legend(handles=[dcom_patch, lrmsd_patch], loc='lower right', fontsize='x-large')
axarr[k].fill_between(np.arange(x[0]-4,x[-1]+2), -0.1, 0.1, facecolor='grey', alpha=0.1)
axarr[k].fill_between(np.arange(x[0]-4,x[-1]+2), -0.05, 0.05, facecolor='grey', alpha=0.2)
axarr[k].fill_between(np.arange(x[0]-4,x[-1]+2), -0.01, 0.01, facecolor='grey', alpha=0.3)
k += 1
# ==> Fig. 3a <==
plt.rcParams["figure.figsize"] = [10,5]
x = [x * 3 for x in range(1, len(mt.columns.levels[0][1:])+1)]
ticklabels = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-b97-d3': 'DF-B97-D3',
'df-m05-2x': 'DF-M05-2X'}
titles = {'DZ': 'cc-pVDZ', 'TZ': 'cc-pVTZ', 'aDZ': 'aug-cc-pVDZ', 'aTZ': 'aug-cc-pVTZ'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean LRMSD', linewidth=0)
for b in stdbas:
plt.figure(figsize=(10,5))
boxes = mt.loc[idx['a24'], idx[list(ticklabels.keys())[:],b,'LRMSD']].plot.box(whis='range', showmeans=True, positions=x, meanprops=meanprops)
i = 0
hb_dots = mx_dots = dd_dots = []
for m in mt.columns.levels[0][1:]:
hb = plt.scatter([x[i] - 1.5]*len(a24_hb), mt.loc[idx['a24', a24_hb], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='r', label='HB Subset Members')
mx = plt.scatter([x[i] - 1]*len(a24_mx), mt.loc[idx['a24', a24_mx], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='g', label='MX Subset Members')
dd = plt.scatter([x[i] - 0.5]*len(a24_dd), mt.loc[idx['a24', a24_dd], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
plt.xlim(0.5, x[-1] + 0.5)
plt.ylim(0, 0.55) if b == 'aTZ' else None
plt.xticks([i-0.75 for i in x], [ticklabels[i] for i in mt.columns.levels[0][1:-2]])
plt.ylabel('LRMSD of Optimized Geometry ($\AA$)')
plt.title(titles[b])
plt.legend(handles=[hb, mx, dd, k_square])
ax = plt.gca()
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.1, facecolor='grey', alpha=0.1)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.05, facecolor='grey', alpha=0.2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.01, facecolor='grey', alpha=0.3)
# ==> Fig. 3b <==
plt.rcParams["figure.figsize"] = [10,5]
x = [x * 3 for x in range(1, len(stdbas))]
ticklabels = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-b97-d3': 'DF-B97-D3',
'df-m05-2x': 'DF-M05-2X'}
titles = {'DZ': 'cc-pVDZ', 'TZ': 'cc-pVTZ', 'aDZ': 'aug-cc-pVDZ', 'aTZ': 'aug-cc-pVTZ'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean LRMSD', linewidth=0)
for b in mt.columns.levels[1][1:]:
# Plot
plt.rcParams["figure.figsize"] = [10,5]
fig = plt.figure(figsize=(10,5))
bp = a21_serr.loc[idx['a24'], idx[:,b,'dCOM']].plot.box(whis='range', showmeans=True, positions=x,
meanprops=meanprops)
i = 0
for m in mt.columns.levels[0][1:]:
hb = plt.scatter([x[i] - 1.5]*len(a24_hb), a21_serr.loc[idx['a24', a24_hb], idx[m,b,'dCOM']],
facecolors='none', edgecolors='r', label='HB Subset Members')
mx = plt.scatter([x[i] - 1]*len(a24_mx), a21_serr.loc[idx['a24', a24_mx], idx[m,b,'dCOM']],
facecolors='none', edgecolors='g', label='MX Subset Members')
dd = plt.scatter([x[i] - 0.5]*len(a24_dd), a21_serr.loc[idx['a24', a24_dd], idx[m,b,'dCOM']],
facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
# Plot Options
plt.xlim(0.5, x[-1] + 0.5)
plt.ylim(-0.15, 0.3) if b == 'aTZ' else None
plt.xticks([i-0.75 for i in x], [ticklabels[i] for i in mt.columns.levels[0][1:]])
plt.ylabel('A21 dCOM Signed Error ($\AA$)')
plt.title(titles[b])
plt.legend(handles=[hb, mx, dd, k_square])
ax = plt.gca()
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.1, 0.1, facecolor='grey', alpha=0.1)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.05, 0.05, facecolor='grey', alpha=0.2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.01, 0.01, facecolor='grey', alpha=0.3)
###Output
_____no_output_____
###Markdown
3.2 Prediction of Optimal Intermolecular Separation in NBC7x and HBC6 Interaction Energy Scans----
###Code
# ==> Fig. 4 <==
plt.rcParams['figure.figsize'] = [8,6]
mtdlabel = {'df-b3lyp-d3': 'B3LYP-D3', 'df-b97-d3': 'B97-D3', 'df-m05-2x': 'M05-2X', 'REF': 'CCSD(T)/CBS'}
dbse_label = {'hbc6': 'HBC6', 'nbc10ext': 'NBC7x'}
system_id = {'faoofaoo': '1', 'faonfaon': '2', 'fannfann': '3',
'faoofaon': '4', 'faonfann': '5', 'faoofann': '6',
'BzBz-S': '1', 'BzBz-T': '2', 'BzH2S': '4', 'BzMe': '5',
'MeMe': '6', 'PyPy-S2': '7', 'PyPy-T3': '8'
}
bas_label = {'DZ': 'cc-pVDZ', 'aDZ': 'aug-cc-pVDZ', 'TZ': 'cc-pVTZ', 'aTZ': 'aug-cc-pVTZ'}
colors = ['r','b','g']
markers = ['s','>','*']
d = 'hbc6'
b = 'DZ'
cp = 'unCP'
s = 'fannfann'
if s != 'faoofann':
minie = []
maxie = []
curvemins = []
j = 0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx[d,s], idx[m,b,cp]]
plt.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
pesmin = pes.loc[idx[d,s], idx[m,b,cp]]
plt.vlines(pesmin, -30, curve.min(), linestyle='--', color=colors[j], linewidth=2)
minie.append(curve[1].min())
maxie.append(curve[1].max())
curvemins.append(pesmin)
j+=1
# Plot reference curve
ref = scans.loc[idx[d,s], idx['REF']].values[0]
minie.append(ref[1].min())
maxie.append(ref[1].max())
plt.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'],zorder=5)
refmin = pes.loc[idx[d,s], idx['REF']].values[0]
plt.vlines(refmin, -30, ref.min(), linestyle='--', color='k', linewidth=2)
curvemins.append(refmin)
print(max(maxie))
# Plot Options
plt.xlim(ref[0].min()-0.05, refmin+.9)
plt.ylim(min(minie) - 2,1)
plt.minorticks_on()
ax = plt.gca()
ax.tick_params(axis='y',which='minor',bottom='off')
plt.xlabel('Intermolecular Separation, $R$ ($\AA$)',fontsize='xx-large')
plt.ylabel('Interaction Energy (kcal/mol)',fontsize='xx-large')
plt.legend(loc='upper left',fontsize='x-large')#, ncol=2)
# Create inset
axins = zoomed_inset_axes(ax, 2, loc=1)
j=0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx[d,s], idx[m,b,cp]]
axins.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
pesmin = pes.loc[idx[d,s], idx[m,b,cp]]
axins.vlines(pesmin, -30, curve.min(), linestyle='--', color=colors[j], linewidth=2)
j+=1
axins.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'])
axins.vlines(refmin, -30, ref.min(), linestyle='--', color='k', linewidth=2)
mins = np.array(curvemins)
axins.set_xlim(mins.min()-0.05, mins.max()+0.05) # apply the x-limits
axins.set_ylim(min(minie) - 1,max(minie) + 1) # apply the y-limits
axins.xaxis.set_tick_params(labelsize=12)
axins.yaxis.set_tick_params(labelsize=12)
axins.minorticks_on()
axins.tick_params(axis='y',which='minor',bottom='on')
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="silver", ls='--', zorder=0)
else:
minie = []
maxie = []
curvemins = []
j = 0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx[d,s], idx[m,b,cp]]
plt.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
j+=1
# Plot reference curve
ref = scans.loc[idx[d,s], idx['REF']].values[0]
plt.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'],zorder=5)
# Plot Options
plt.title('%s-%s: %s Curves with the %s basis' % (dbse_label[d], system_id[s], cp,
bas_label[b]))
plt.hlines(0,0,12,linestyle='-',linewidth=1)
plt.xlim(ref[0].min()-0.1, ref[0].max()+.1)
#plt.ylim(min(minie) - 2,12)
plt.ylim(-28, 8)
plt.minorticks_on()
ax = plt.gca()
ax.tick_params(axis='y',which='minor',bottom='off')
plt.xlabel('Intermolecular Separation, $R$ ($\AA$)',fontsize='xx-large')
plt.ylabel('Interaction Energy (kcal/mol)',fontsize='xx-large')
plt.legend(loc='upper left',fontsize='x-large',ncol=2)
# ==> Fig. 5 <==
m = mt.columns.levels[0][1:]
x = [x * 3 for x in range(1, len(stdbas)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
boxcolors = ['xkcd:light red','lightblue','lightgreen']
b3lyp_patch = mpatches.Patch(color='xkcd:light red', label='DF-B3LYP-D3')
b97_patch = mpatches.Patch(color='lightblue', label='DF-B97-D3')
m05_patch = mpatches.Patch(color='lightgreen', label='DF-M05-2X')
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh'}
boxprops = {'linewidth': 1.5}
medianprops = dict(linestyle='-', linewidth=1.5, color='k')
medianprops_dcom = dict(linestyle='-', linewidth=1.5, color='cyan')
medianprops_lrmsd = dict(linestyle='-', linewidth=1.5, color='m')
whiskerprops = dict(linestyle='-', linewidth=1.5, color='k')
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, linewidth=0)
plt.rcParams['figure.figsize'] = [20,10]
f, axarr = plt.subplots(2, 2, figsize=(20,10), sharex=True)
# ==> Plot <==
# Plot NBC7x/CP on axarr[0,0]
for i in range(len(stdbas)):
b1 = axarr[0,0].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[0],stdbas[i],'CP']].values.reshape(-1,1),
positions=[x[i]-0.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
b2 = axarr[0,0].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[1],stdbas[i],'CP']].values.reshape(-1,1),
positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
b3 = axarr[0,0].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[2],stdbas[i],'CP']].values.reshape(-1,1),
positions=[x[i]+.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in b1['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in b2['boxes']:
patch.set_facecolor(boxcolors[1])
for patch in b3['boxes']:
patch.set_facecolor(boxcolors[2])
# Plot NBC7x/unCP on axarr[0,1]
for i in range(len(stdbas)):
b1 = axarr[0,1].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[0],stdbas[i],'unCP']].values.reshape(-1,1),
positions=[x[i]-0.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
b2 = axarr[0,1].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[1],stdbas[i],'unCP']].values.reshape(-1,1),
positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
b3 = axarr[0,1].boxplot(pes_err.loc[idx['nbc10ext', nbc7], idx[m[2],stdbas[i],'unCP']].values.reshape(-1,1),
positions=[x[i]+.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in b1['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in b2['boxes']:
patch.set_facecolor(boxcolors[1])
for patch in b3['boxes']:
patch.set_facecolor(boxcolors[2])
# Plot HBC6/CP on axarr[1,0]
for i in range(len(stdbas)):
b1 = axarr[1,0].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[0],stdbas[i],'CP']].dropna().values.reshape(-1,1),
positions=[x[i]-0.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
b2 = axarr[1,0].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[1],stdbas[i],'CP']].dropna().values.reshape(-1,1),
positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
b3 = axarr[1,0].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[2],stdbas[i],'CP']].dropna().values.reshape(-1,1),
positions=[x[i]+.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in b1['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in b2['boxes']:
patch.set_facecolor(boxcolors[1])
for patch in b3['boxes']:
patch.set_facecolor(boxcolors[2])
# Plot NBC7x/unCP on axarr[1,1]
for i in range(len(stdbas)):
b1 = axarr[1,1].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[0],stdbas[i],'unCP']].dropna().values.reshape(-1,1),
positions=[x[i]-0.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
b2 = axarr[1,1].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[1],stdbas[i],'unCP']].dropna().values.reshape(-1,1),
positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
b3 = axarr[1,1].boxplot(pes_err.loc[idx['hbc6', hbc6], idx[m[2],stdbas[i],'unCP']].dropna().values.reshape(-1,1),
positions=[x[i]+.65],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in b1['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in b2['boxes']:
patch.set_facecolor(boxcolors[1])
for patch in b3['boxes']:
patch.set_facecolor(boxcolors[2])
# ==> Plot Options <==
plt.xticks([i for i in x], stdbas)
axarr[0,0].set_xlim(x[0] - 1.5, x[-1] + 1.5)
# NBC7x: CP
axarr[0,0].set_title('(a) NBC7x: CP Curves')
axarr[0,0].set_ylabel('$\Delta$COM Signed Error ($\AA$)',fontsize='x-large')
axarr[0,0].hlines(0, 0, x[-1] + 5, linestyle='--', linewidth=1, zorder=1)
axarr[0,0].set_ylim(-0.16, 0.25)
axarr[0,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.1, 0.1, facecolor='grey', alpha=0.1)
axarr[0,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.05, 0.05, facecolor='grey', alpha=0.2)
axarr[0,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.01, 0.01, facecolor='grey', alpha=0.3)
# NBC7x: unCP
axarr[0,1].set_title('(b) NBC7x: unCP Curves')
axarr[0,1].set_ylabel('$\Delta$COM Signed Error ($\AA$)',fontsize='x-large')
axarr[0,1].set_ylim(-0.16, 0.25)
axarr[0,1].hlines(0, 0, x[-1] + 5, linestyle='--', linewidth=1, zorder=1)
axarr[0,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.1, 0.1, facecolor='grey', alpha=0.1)
axarr[0,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.05, 0.05, facecolor='grey', alpha=0.2)
axarr[0,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.01, 0.01, facecolor='grey', alpha=0.3)
# HBC6: CP
axarr[1,0].set_title('(c) HBC6: CP Curves')
axarr[1,0].set_ylabel('$\Delta$COM Signed Error ($\AA$)',fontsize='x-large')
axarr[1,0].set_ylim(-0.13, 0.15)
axarr[1,0].hlines(0, 0, x[-1] + 5, linestyle='--', linewidth=1, zorder=1)
axarr[1,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.1, 0.1, facecolor='grey', alpha=0.1)
axarr[1,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.05, 0.05, facecolor='grey', alpha=0.2)
axarr[1,0].fill_between(np.arange(x[0]-4,x[-1]+5), -0.01, 0.01, facecolor='grey', alpha=0.3)
# HBC6: unCP
axarr[1,1].set_title('(d) HBC6: unCP Curves')
axarr[1,1].set_ylabel('$\Delta$COM Signed Error ($\AA$)',fontsize='x-large')
axarr[1,1].set_ylim(-0.13, 0.15)
axarr[1,1].hlines(0, 0, x[-1] + 5, linestyle='--', linewidth=1, zorder=1)
axarr[1,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.1, 0.1, facecolor='grey', alpha=0.1)
axarr[1,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.05, 0.05, facecolor='grey', alpha=0.2)
axarr[1,1].fill_between(np.arange(x[0]-4,x[-1]+5), -0.01, 0.01, facecolor='grey', alpha=0.3)
plt.legend(handles=[b3lyp_patch, b97_patch, m05_patch], loc='upper center', fontsize='x-large', ncol=3)
###Output
_____no_output_____
###Markdown
Supporting Information---
###Code
# ==> Figs. S-3 -- S-5 <==
x = [x * 3 for x in range(1, len(stdbas)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
#k_square = mpatches.Patch(color='k', label='The red data')
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean LRMSD', linewidth=0)
for j in range(len(mt.columns.levels[0][1:])):
m = mt.columns.levels[0][1:][j]
plt.figure(figsize=(10,5))
i = 0
for b in mt.reindex(columns=stdbas, level=1).columns.levels[1]:
bx = plt.boxplot(mt.loc[idx['a24'], idx[m,b,'LRMSD']].values.reshape(-1,1),
positions=[x[i]], whis='range', showmeans=True, meanprops=meanprops, widths=0.5)
hb = plt.scatter([x[i] - 1.5]*len(a24_hb), mt.loc[idx['a24', a24_hb], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='r', label='HB Subset Members')
mx = plt.scatter([x[i] - 1]*len(a24_mx), mt.loc[idx['a24', a24_mx], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='g', label='MX Subset Members')
dd = plt.scatter([x[i] - 0.5]*len(a24_dd), mt.loc[idx['a24', a24_dd], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
plt.xlim(0.5, x[-1] + 0.5)
plt.xticks([i-0.75 for i in x], stdbas)
plt.ylabel('LRMSD of Optimized Geometry ($\AA$)')
plt.title(titles[m])
plt.legend(handles=[hb, mx, dd, k_square])
ax = plt.gca()
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.1, facecolor='grey', alpha=0.1)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.05, facecolor='grey', alpha=0.2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.01, facecolor='grey', alpha=0.3)
# ==> Figs. S-6 -- S-8 <==
x = [x * 3 for x in range(1, len(stdbas)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
boxprops = {'linewidth': 1.5}
medianprops = dict(linestyle='-', linewidth=1.5, color='cyan')
whiskerprops = dict(linestyle='-', linewidth=1.5, color='k')
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean dCOM', linewidth=0)
for m in mt.columns.levels[0][1:]:
# Plot
plt.rcParams["figure.figsize"] = [10,5]
fig = plt.figure(figsize=(10,5))
bp = a21_serr.loc[idx['a24'], idx[m,:,'dCOM']].plot.box(whis='range', showmeans=True, positions=x,
meanprops=meanprops)
i = 0
for b in mt.columns.levels[1][1:]:
hb = plt.scatter([x[i] - 1.5]*len(a24_hb), a21_serr.loc[idx['a24', a24_hb], idx[m,b,'dCOM']],
facecolors='none', edgecolors='r', label='HB Subset Members')
mx = plt.scatter([x[i] - 1]*len(a24_mx), a21_serr.loc[idx['a24', a24_mx], idx[m,b,'dCOM']],
facecolors='none', edgecolors='g', label='MX Subset Members')
dd = plt.scatter([x[i] - 0.5]*len(a24_dd), a21_serr.loc[idx['a24', a24_dd], idx[m,b,'dCOM']],
facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
# Plot options
plt.xlim(1, x[-1] + 0.5)
plt.xticks([i-0.75 for i in x], stdbas)
plt.ylabel('A21 dCOM Signed Error ($\AA$)')
plt.hlines(0, 0, x[-1] + 1, linestyle='--', linewidth=1, zorder=1)
plt.title(titles[m])
plt.legend(handles=[hb, mx, dd, k_square], loc='best', ncol=2)
ax = plt.gca()
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.1, 0.1, facecolor='grey', alpha=0.1)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.05, 0.05, facecolor='grey', alpha=0.2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.01, 0.01, facecolor='grey', alpha=0.3)
# ==> Figs. S-15 -- S-118 <==
plt.rcParams['figure.figsize'] = [8,6]
mtdlabel = {'df-b3lyp-d3': 'B3LYP-D3', 'df-b97-d3': 'B97-D3', 'df-m05-2x': 'M05-2X', 'REF': 'CCSD(T)/CBS'}
dbse_label = {'hbc6': 'HBC6', 'nbc10ext': 'NBC7x'}
system_id = {'hbc6': {'faoofaoo': '1', 'faonfaon': '2', 'fannfann': '3',
'faoofaon': '4', 'faonfann': '5', 'faoofann': '6'},
'nbc10ext': {'BzBz-S': '1', 'BzBz-T': '2', 'BzH2S': '4', 'BzMe': '5',
'MeMe': '6', 'PyPy-S2': '7', 'PyPy-T3': '8'}
}
bas_label = {'DZ': 'cc-pVDZ', 'aDZ': 'aug-cc-pVDZ', 'TZ': 'cc-pVTZ', 'aTZ': 'aug-cc-pVTZ'}
colors = ['r','b','g']
markers = ['s','>','*']
for d in system_id.keys():
for s in system_id[d].keys():
for b in stdbas:
for cp in ['CP', 'unCP']:
if s != 'faoofann':
# Plot DFT
fig = plt.figure()
minie = []
maxie = []
curvemins = []
j = 0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx[d,s], idx[m,b,cp]]
plt.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
pesmin = pes.loc[idx[d,s], idx[m,b,cp]]
plt.vlines(pesmin, -30, curve.min(), linestyle='--', color=colors[j], linewidth=2)
minie.append(curve[1].min())
maxie.append(curve[1].max())
if pesmin > 0:
curvemins.append(pesmin)
j+=1
# Plot reference curve
ref = scans.loc[idx[d,s], idx['REF']].values[0]
minie.append(ref[1].min())
maxie.append(ref[1].max())
plt.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'],zorder=5)
refmin = pes.loc[idx[d,s], idx['REF']].values[0]
plt.vlines(refmin, -50, ref.min(), linestyle='--', color='k', linewidth=2)
curvemins.append(refmin)
# Plot Options
plt.title('%s-%s: %s Curves with the %s basis' % (dbse_label[d], system_id[d][s], cp,
bas_label[b]))
plt.hlines(0,0,12,linestyle='-',linewidth=1)
plt.xlim(ref[0].min()-0.05, refmin+1)
plt.ylim(min(minie) - 2,max(maxie) + 5)
plt.minorticks_on()
ax = plt.gca()
ax.tick_params(axis='y',which='minor',bottom='off')
plt.xlabel('Intermolecular Separation, $R$ ($\AA$)',fontsize='xx-large')
plt.ylabel('Interaction Energy (kcal/mol)',fontsize='xx-large')
plt.legend(loc='upper left',fontsize='x-large')#,ncol=2)
# Create inset
axins = zoomed_inset_axes(ax, 2, loc=1)
j=0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx[d,s], idx[m,b,cp]]
axins.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
pesmin = pes.loc[idx[d,s], idx[m,b,cp]]
axins.vlines(pesmin, -30, curve.min(), linestyle='--', color=colors[j], linewidth=2)
j+=1
axins.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'])
axins.vlines(refmin, -50, ref.min(), linestyle='--', color='k', linewidth=2)
axins.hlines(0,0,12,linestyle='-',color='k',linewidth=1)
mins = np.array(curvemins)
axins.set_xlim(mins.min()-0.05, mins.max()+0.05) # apply the x-limits
if d=='nbc10ext':
axins.set_ylim(min(minie) - 0.5,max(minie) + 0.5) # apply the y-limits
else:
axins.set_ylim(min(minie) - 1,max(minie) + 1) # apply the y-limits
axins.xaxis.set_tick_params(labelsize=12)
axins.yaxis.set_tick_params(labelsize=12)
mark_inset(ax, axins, loc1=2, loc2=4, fc="white", ec="silver", ls='--', zorder=0)
else:
fig = plt.figure()
j = 0
for m in scans.columns.levels[0][1:]:
curve = scans.loc[idx['hbc6','faoofann'], idx[m,b,cp]]
plt.plot(curve[0], curve[1], color=colors[j], marker=markers[j], label=mtdlabel[m])
j+=1
# Plot reference curve
ref = scans.loc[idx['hbc6','faoofann'], idx['REF']].values[0]
plt.plot(ref[0],ref[1],'ko-', label=mtdlabel['REF'],zorder=5)
# Plot Options
plt.title('%s-%s: %s Curves with the %s basis' % (dbse_label['hbc6'],
system_id['hbc6']['faoofann'],
cp,
bas_label[b]))
plt.hlines(0,0,12,linestyle='-',linewidth=1)
plt.xlim(ref[0].min()-0.1, ref[0].max()+.1)
plt.ylim(-32, 8)
plt.minorticks_on()
ax = plt.gca()
ax.tick_params(axis='y',which='minor',bottom='off')
plt.xlabel('Intermolecular Separation, $R$ ($\AA$)',fontsize='xx-large')
plt.ylabel('Interaction Energy (kcal/mol)',fontsize='xx-large')
plt.legend(loc='upper left',fontsize='x-large',ncol=2)
###Output
_____no_output_____
###Markdown
Additional Figures
###Code
# ==> LRMSD Boxplots: Grouped by Basis Set (subplots) <==
x = [x * 3 for x in range(1, len(mt.columns.levels[0][1:])+1)]
xticklabels = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
titles = {'DZ': 'cc-pVDZ', 'TZ': 'cc-pVTZ', 'aDZ': 'aug-cc-pVDZ', 'aTZ': 'aug-cc-pVTZ'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean LRMSD', linewidth=0)
plt.rcParams["figure.figsize"] = [10,20]
f, axarr = plt.subplots(4, 1, sharex=True, figsize=(10,20))
j = 0
for b in stdbas:
i = 0
hb_dots = mx_dots = dd_dots = []
for m in mt.columns.levels[0][1:]:
bx = axarr[j].boxplot(mt.loc[idx['a24'], idx[m,b,'LRMSD']].values.reshape(-1,1),
positions=[x[i]], whis='range', showmeans=True, meanprops=meanprops, widths=0.5)
hb = axarr[j].scatter([x[i] - 1.5]*len(a24_hb), mt.loc[idx['a24', a24_hb], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='r', label='HB Subset Members')
mx = axarr[j].scatter([x[i] - 1]*len(a24_mx), mt.loc[idx['a24', a24_mx], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='g', label='MX Subset Members')
dd = axarr[j].scatter([x[i] - 0.5]*len(a24_dd), mt.loc[idx['a24', a24_dd], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
axarr[j].set_xlim(x[0] - 2, x[-1] + 0.5)
axarr[j].set_ylim(0, 0.6) if b == 'aTZ' else None
plt.xticks([i-0.75 for i in x], [xticklabels[m] for m in mt.columns.levels[0][1:-2]])#, rotation=45)
axarr[j].set_ylabel('LRMSD of Optimized Geometry ($\AA$)')
axarr[j].set_title(titles[b])
plt.legend(handles=[hb, mx, dd, k_square], loc='best')
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.1, facecolor='grey', alpha=0.1)
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.05, facecolor='grey', alpha=0.2)
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.01, facecolor='grey', alpha=0.3)
j += 1
# ==> LRMSD Boxplots: Grouped by Method (subplots) <==
x = [x * 3 for x in range(1, len(stdbas)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean LRMSD', linewidth=0)
plt.rcParams["figure.figsize"] = [10,15]
f, axarr = plt.subplots(3, 1, sharex=True, figsize=(10,15))
#mt = mt.reindex(columns=dtz, level=1)
for j in range(len(mt.columns.levels[0][1:])):
i = 0
m = mt.columns.levels[0][1:][j]
for b in mt.reindex(columns=stdbas, level=1).columns.levels[1]:
bx = axarr[j].boxplot(mt.loc[idx['a24'], idx[m,b,'LRMSD']].values.reshape(-1,1),
positions=[x[i]], whis='range', showmeans=True, meanprops=meanprops, widths=0.5)
hb = axarr[j].scatter([x[i] - 1.5]*len(a24_hb), mt.loc[idx['a24',a24_hb], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='r', label='HB Subset Members')
mx = axarr[j].scatter([x[i] - 1]*len(a24_mx), mt.loc[idx['a24',a24_mx], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='g', label='MX Subset Members')
dd = axarr[j].scatter([x[i] - 0.5]*len(a24_dd), mt.loc[idx['a24',a24_dd], idx[m,b,'LRMSD']].values, facecolors='none', edgecolors='b', label='DD Subset Members')
i+=1
axarr[j].set_xlim(0.5, x[-1] + 0.5)
axarr[j].set_ylim(0, 0.7) if m == 'df-m05-2x' else None
plt.xticks([i-0.75 for i in x], stdbas, rotation=45)
axarr[j].set_ylabel('LRMSD of Optimized Geometry ($\AA$)')
axarr[j].set_title(titles[m])
plt.legend(handles=[hb, mx, dd, k_square], loc='best')
#ax = plt.gca()
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.1, facecolor='grey', alpha=0.1)
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.05, facecolor='grey', alpha=0.2)
axarr[j].fill_between(np.arange(x[0]-4,x[-1]+2), 0, 0.01, facecolor='grey', alpha=0.3)
# ==> Plotting LRMSD & dCOM SE: Box & Whisker (indiv plots) <==
data = XZ + aXZ
x = [x * 3 for x in range(1, len(data)+1)]
titles = {'df-b3lyp-d3': 'DF-B3LYP-D3', 'df-wpbe-d3': 'DF-$\omega$PBE-D3', 'df-b97-d3': 'DF-B97-D3',
'df-wb97x-d': 'DF-$\omega$B97X-D', 'df-m05-2x': 'DF-M05-2X'}
boxcolors = ['pink', 'lightblue']
dcom_patch = mpatches.Patch(color='pink', label='A21 dCOM Signed Error')
lrmsd_patch = mpatches.Patch(color='lightblue', label='A21 LRMSD')
# Boxplot & legend options
meanprops = {'marker': 's', 'markeredgecolor': 'k', 'markerfacecolor': 'k', 'label': 'blargh', #'markersize': 5
}
boxprops = {'linewidth': 1.5}
medianprops = dict(linestyle='-', linewidth=1.5, color='k')
medianprops_dcom = dict(linestyle='-', linewidth=1.5, color='cyan')
medianprops_lrmsd = dict(linestyle='-', linewidth=1.5, color='m')
whiskerprops = dict(linestyle='-', linewidth=1.5, color='k')
k_square = mlines.Line2D([], [], color='k', marker='s', markersize=7, label='A21 Mean dCOM', linewidth=0)
for m in mt.columns.levels[0][1:]:
# Plot
plt.rcParams['figure.figsize'] = [10,5]
fig = plt.figure(figsize=(10,5))
ax = plt.gca()
for i in range(len(data)):
bp = ax.boxplot(a21_serr.loc[idx['a24'], idx[m,data[i],'dCOM']].values.reshape(-1,1),
positions=[x[i]-1],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
showfliers=False,
widths=0.5,
patch_artist=True)
bx = ax.boxplot(mt.loc[idx['a24'], idx[m,data[i],'LRMSD']].values.reshape(-1,1),
positions=[x[i]],
whis='range',
showmeans=True,
meanprops=meanprops,
medianprops=medianprops,
widths=0.5,
patch_artist=True)
for patch in bp['boxes']:
patch.set_facecolor(boxcolors[0])
for patch in bx['boxes']:
patch.set_facecolor(boxcolors[1])
# Plot Options
plt.xticks([i-0.5 for i in x], data)
ax.set_xlim(x[0] - 1.5, x[-1] + 0.5)
ax.set_ylim(-0.4, 0.7) if m == 'df-m05-2x' else None
ax.set_ylabel('A21 $\Delta$COM Signed Error & LRMSD ($\AA$)')
plt.hlines(0, 0, x[-1] + 1, linestyle='--', linewidth=1, zorder=1)
ax.set_title(titles[m])
plt.legend(handles=[dcom_patch, lrmsd_patch], loc='lower center', ncol=2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.1, 0.1, facecolor='grey', alpha=0.1)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.05, 0.05, facecolor='grey', alpha=0.2)
ax.fill_between(np.arange(x[0]-4,x[-1]+2), -0.01, 0.01, facecolor='grey', alpha=0.3)
###Output
_____no_output_____
###Markdown
Things to try* [a Treemap](https://vega.github.io/vega/examples/treemap/)
###Code
from sqlalchemy import create_engine
engine = create_engine(f"sqlite:///birdnest.db")
import pandas as pd
import altair as alt
from spotclient import Client
import models
db = models.Database()
session = models.get_session()
session = models.get_session()
playlist = session.query(models.Playlist).first()
pdf = pd.DataFrame(playlist.to_json(True))
from functools import reduce
pljson = map(lambda x: x.to_json(True),session.query(models.Playlist))
cumpl = [] # seems like I should be able to do this with reduce but it's erroring
for pl in pljson:
cumpl.extend(pl)
[x for x in cumpl if x['name'] == 'Ghost Riders In The Sky']
len(cumpl)
alt.Chart(alt.Data(values=cumpl[:1000])).mark_bar().encode(
alt.X('name:N', title='', sort='y',axis=alt.Axis(labels=False)),
alt.Y('acousticness:Q', title='acousticness'),
tooltip=alt.Tooltip(['name:N','artists:N','acousticness:Q','time_signature:Q', 'track_id:N']),
color=alt.condition(alt.datum.acousticness > .9, alt.value('red'), alt.value('lightgrey'))
).properties(width=500)
c2 = list(map(lambda d: dict((k,v) for k,v in d.items() if k in ['name','artists','acousticness','time_signature', 'track_id']), cumpl))
alt.Chart(alt.Data(values=c2[:1000])).mark_bar().encode(
alt.X('name:N', title='', sort='y',axis=alt.Axis(labels=False)),
alt.Y('acousticness:Q', title='acousticness'),
tooltip=alt.Tooltip(['name:N','artists:N','acousticness:Q','time_signature:Q', 'track_id:N']),
color=alt.condition(alt.datum.acousticness > .9, alt.value('red'), alt.value('lightgrey'))
).properties(width=500)
alt.Chart(alt.Data(values=playlist.to_json(True))).mark_bar().encode(
alt.X('energy:Q',bin=True, title='Energy (histogram)'),
alt.Y(aggregate='count',type='quantitative', title='Number of tracks'))
)
def playlist_as_df(cols=None):
if cols is None:
af_clause = 'af.*'
elif isinstance(cols,str):
af_clause = cols
else:
clause_cols = []
for c in cols:
if not c.startswith('af.'):
c = f"af.{c}"
clause_cols.append(c)
af_clause = ','.join(clause_cols)
SQL = f"""
SELECT
p.date,
p.name playlist_name, p.playlist_id,
t.name,
GROUP_CONCAT(a.name,';') artist,
t.duration_ms,
{af_clause}
FROM
playlist p,
playlist_track pt,
track t,
track_artist ta,
artist a,
audio_features af
where t.track_id = ta.track_id
and ta.artist_id = a.artist_id
and p.playlist_id = pt.playlist_id
and pt.track_id = t.track_id
and af.track_id = t.track_id
group by t.name
"""
engine = create_engine(f"sqlite:///birdnest.db")
return pd.read_sql(SQL, engine)
sql = '''
select artist.name, count(*) count
from artist, track_artist, playlist_track
where artist.artist_id = track_artist.artist_id
and track_artist.track_id = playlist_track.track_id
group by artist.name
'''
artists_df = pd.read_sql(sql,engine)
alt.Chart
artists_df.sort_values('count',ascending=False).head()
alt.Chart(artists_df[artists_df['count'] >= 5].sort_values('count',ascending=False)).mark_bar().encode(
x=alt.X('count',title='times played'),
y=alt.Y('name',sort='-x', title='Artist')
)
alt.Chart(artists_df.groupby('count').count().reset_index().rename(columns={
'name': '# Artists played X times'
})).mark_bar().encode(
x=alt.X('# Artists played X times'),
y=alt.Y('count:N',sort='-y'),
tooltip=['count', '# Artists played X times']
)
###Output
_____no_output_____
###Markdown
Genre frequencyGenres attach to artists, not tracks, tracks can have multiple artists... the meaning of this is suspect but was curious
###Code
#
sql = '''
select track.name track, genre.name genre
from track, track_artist, artist_genre, genre
where track.track_id = track_artist.track_id
and track_artist.artist_id = artist_genre.artist_id
and artist_genre.genre_id = genre.genre_id
'''
df = pd.read_sql(sql,engine).groupby('genre').count().rename(columns={'track': 'tracks'}).reset_index()
alt.Chart(df.sort_values('tracks',ascending=False).head(50)).mark_bar().encode(
x='tracks',
y=alt.Y('genre:N',sort='-x')
)
def playlist_stats(df):
"""
Given a dataframe, produce a summarization in the same shape as AudioFeature,
but aggregated. For now, aggregated as the sum of (each feature * track length)
but that could become switchable.
'key', 'mode': probably the mode for each set, and another for the two as a unit?
'tempo': average
'time_signature', maybe mode or mode
'acousticness', 'danceability', 'energy',
'instrumentalness', 'liveness', 'loudness', 'speechiness', 'valence'
"""
for
df = df
df['gross_valence'] = df['valence'] * df['duration_ms']
df.groupby('playlist_name').sum()[['gross_valence']].sort_values('gross_valence',ascending=False)
print(duration)
previewless = session.query(models.Track).filter(models.Track.preview_url == None).all()
t_info = Client().tracks([t.spotify_id for t in previewless[:10]])
"{:,}".format(10000)
alt.Chart(alt.Data(values=list(artist_count))).mark_bar().encode(
x=alt.X('count:Q',title='times played'),
y=alt.Y('name:N',sort='-x', title='Artist')
).transform_filter(datum.count >= )
q = session.query(models.Track)
q
#pd.read_sqjoin.jo.statement,session.bind)
###Output
_____no_output_____
###Markdown
![title](image/title.png) THIS REPOSITORY IS CREATED BY:Ng Crew1. Sebastian Cahyo Ardhi Iswara 11031741742. Adli Farhan Ibrahim 1103174092 ![title](image/workflow.png) PROJECT WORKFLOW:1. Data Collection2. Data Preprocessing3. Data Normalization4. Model Training5. Model Validation DEPEDENCY TO RUN THIS PROJECT:1. Python 3.XX (Im using 3.77)2. Jupyterlab (For run ipynb / notebook)3. Pandas (For data analysis and processing)4. matplotlib (For data visualization and Dependency for seaborn)5. seaborn (For data visualization)4. Sklearn (For data preprocessing and machine learning algorithm) I. MODULE IMPORTimport the module that we use to on this project
###Code
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import RandomizedSearchCV
###Output
_____no_output_____
###Markdown
1. DATA COLLECTIONwe need to collect our data. We use Bank Marketing Data Set from UCI Dataset, the goals of this dataset is to campaigns bank term deposit subcribe dataset source: https://archive.ics.uci.edu/ml/datasets/Bank+Marketing![title](image/dataset.png) bank client data:1 - age (numeric)2 - job : type of job (categorical: 'admin.','blue-collar','entrepreneur','housemaid','management','retired','self-employed','services','student','technician','unemployed','unknown')3 - marital : marital status (categorical: 'divorced','married','single','unknown'; note: 'divorced' means divorced or widowed)4 - education (categorical: 'basic.4y','basic.6y','basic.9y','high.school','illiterate','professional.course','university.degree','unknown')5 - default: has credit in default? (categorical: 'no','yes','unknown')6 - housing: has housing loan? (categorical: 'no','yes','unknown')7 - loan: has personal loan? (categorical: 'no','yes','unknown') related with the last contact of the current campaign:8 - contact: contact communication type (categorical: 'cellular','telephone')9 - month: last contact month of year (categorical: 'jan', 'feb', 'mar', ..., 'nov', 'dec')10 - day_of_week: last contact day of the week (categorical: 'mon','tue','wed','thu','fri')11 - duration: last contact duration, in seconds (numeric). Important note: this attribute highly affects the output target (e.g., if duration=0 then y='no'). Yet, the duration is not known before a call is performed. Also, after the end of the call y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model. other attributes:12 - campaign: number of contacts performed during this campaign and for this client (numeric, includes last contact)13 - pdays: number of days that passed by after the client was last contacted from a previous campaign (numeric; 999 means client was not previously contacted)14 - previous: number of contacts performed before this campaign and for this client (numeric)15 - poutcome: outcome of the previous marketing campaign (categorical: 'failure','nonexistent','success') social and economic context attributes16 - emp.var.rate: employment variation rate - quarterly indicator (numeric)17 - cons.price.idx: consumer price index - monthly indicator (numeric)18 - cons.conf.idx: consumer confidence index - monthly indicator (numeric)19 - euribor3m: euribor 3 month rate - daily indicator (numeric)20 - nr.employed: number of employees - quarterly indicator (numeric)Output variable (desired target):21 - y - has the client subscribed a term deposit? (binary: 'yes','no')
###Code
pd.set_option('display.max_columns', None)
df = pd.read_csv('dataset/bank-additional-full.csv',sep=';')
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 41188 entries, 0 to 41187
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 41188 non-null int64
1 job 41188 non-null object
2 marital 41188 non-null object
3 education 41188 non-null object
4 default 41188 non-null object
5 housing 41188 non-null object
6 loan 41188 non-null object
7 contact 41188 non-null object
8 month 41188 non-null object
9 day_of_week 41188 non-null object
10 duration 41188 non-null int64
11 campaign 41188 non-null int64
12 pdays 41188 non-null int64
13 previous 41188 non-null int64
14 poutcome 41188 non-null object
15 emp.var.rate 41188 non-null float64
16 cons.price.idx 41188 non-null float64
17 cons.conf.idx 41188 non-null float64
18 euribor3m 41188 non-null float64
19 nr.employed 41188 non-null float64
20 y 41188 non-null object
dtypes: float64(5), int64(5), object(11)
memory usage: 6.6+ MB
###Markdown
2. DATA PREPROCESSINGafter the data already loaded, we must check the data is there a duplicate data or null value because machine learning algorithm can take it
###Code
df.isna().sum() / len(df)
for col in df.columns:
print(f'{df[col].value_counts()}\n')
###Output
31 1947
32 1846
33 1833
36 1780
35 1759
...
89 2
91 2
87 1
94 1
95 1
Name: age, Length: 78, dtype: int64
admin. 10422
blue-collar 9254
technician 6743
services 3969
management 2924
retired 1720
entrepreneur 1456
self-employed 1421
housemaid 1060
unemployed 1014
student 875
unknown 330
Name: job, dtype: int64
married 24928
single 11568
divorced 4612
unknown 80
Name: marital, dtype: int64
university.degree 12168
high.school 9515
basic.9y 6045
professional.course 5243
basic.4y 4176
basic.6y 2292
unknown 1731
illiterate 18
Name: education, dtype: int64
no 32588
unknown 8597
yes 3
Name: default, dtype: int64
yes 21576
no 18622
unknown 990
Name: housing, dtype: int64
no 33950
yes 6248
unknown 990
Name: loan, dtype: int64
cellular 26144
telephone 15044
Name: contact, dtype: int64
may 13769
jul 7174
aug 6178
jun 5318
nov 4101
apr 2632
oct 718
sep 570
mar 546
dec 182
Name: month, dtype: int64
thu 8623
mon 8514
wed 8134
tue 8090
fri 7827
Name: day_of_week, dtype: int64
85 170
90 170
136 168
73 167
124 164
...
1108 1
980 1
4918 1
2453 1
2015 1
Name: duration, Length: 1544, dtype: int64
1 17642
2 10570
3 5341
4 2651
5 1599
6 979
7 629
8 400
9 283
10 225
11 177
12 125
13 92
14 69
17 58
15 51
16 51
18 33
20 30
19 26
21 24
22 17
23 16
24 15
27 11
29 10
25 8
26 8
28 8
30 7
31 7
35 5
33 4
32 4
34 3
40 2
42 2
43 2
37 1
39 1
41 1
56 1
Name: campaign, dtype: int64
999 39673
3 439
6 412
4 118
9 64
2 61
7 60
12 58
10 52
5 46
13 36
11 28
1 26
15 24
14 20
8 18
0 15
16 11
17 8
18 7
19 3
22 3
21 2
26 1
20 1
25 1
27 1
Name: pdays, dtype: int64
0 35563
1 4561
2 754
3 216
4 70
5 18
6 5
7 1
Name: previous, dtype: int64
nonexistent 35563
failure 4252
success 1373
Name: poutcome, dtype: int64
1.4 16234
-1.8 9184
1.1 7763
-0.1 3683
-2.9 1663
-3.4 1071
-1.7 773
-1.1 635
-3.0 172
-0.2 10
Name: emp.var.rate, dtype: int64
93.994 7763
93.918 6685
92.893 5794
93.444 5175
94.465 4374
93.200 3616
93.075 2458
92.201 770
92.963 715
92.431 447
92.649 357
94.215 311
94.199 303
92.843 282
92.379 267
93.369 264
94.027 233
94.055 229
93.876 212
94.601 204
92.469 178
93.749 174
92.713 172
94.767 128
93.798 67
92.756 10
Name: cons.price.idx, dtype: int64
-36.4 7763
-42.7 6685
-46.2 5794
-36.1 5175
-41.8 4374
-42.0 3616
-47.1 2458
-31.4 770
-40.8 715
-26.9 447
-30.1 357
-40.3 311
-37.5 303
-50.0 282
-29.8 267
-34.8 264
-38.3 233
-39.8 229
-40.0 212
-49.5 204
-33.6 178
-34.6 174
-33.0 172
-50.8 128
-40.4 67
-45.9 10
Name: cons.conf.idx, dtype: int64
4.857 2868
4.962 2613
4.963 2487
4.961 1902
4.856 1210
...
1.045 1
0.956 1
0.933 1
3.282 1
0.996 1
Name: euribor3m, Length: 316, dtype: int64
5228.1 16234
5099.1 8534
5191.0 7763
5195.8 3683
5076.2 1663
5017.5 1071
4991.6 773
5008.7 650
4963.6 635
5023.5 172
5176.3 10
Name: nr.employed, dtype: int64
no 36548
yes 4640
Name: y, dtype: int64
###Markdown
job,marital,education,default,housing,loan have a unknown value, we can assume that unknown value is equal to NaN / missing value, so we must remove it
###Code
df_new = df[(df['job'] != 'unknown') & (df['marital'] != 'unknown') & (df['education'] != 'unknown') & (df['default'] != 'unknown') & (df['housing'] != 'unknown') & (df['loan'] != 'unknown')]
for col in df_new.columns:
print(f'{df_new[col].value_counts()}\n\n')
###Output
31 1643
32 1555
33 1524
30 1441
34 1431
...
91 2
89 2
94 1
87 1
95 1
Name: age, Length: 76, dtype: int64
admin. 8737
blue-collar 5675
technician 5473
services 2857
management 2311
retired 1216
self-employed 1092
entrepreneur 1089
unemployed 738
housemaid 690
student 610
Name: job, dtype: int64
married 17492
single 9443
divorced 3553
Name: marital, dtype: int64
university.degree 10412
high.school 7699
professional.course 4321
basic.9y 4276
basic.4y 2380
basic.6y 1389
illiterate 11
Name: education, dtype: int64
no 30485
yes 3
Name: default, dtype: int64
yes 16521
no 13967
Name: housing, dtype: int64
no 25720
yes 4768
Name: loan, dtype: int64
cellular 20443
telephone 10045
Name: contact, dtype: int64
may 9733
jul 5081
aug 4673
jun 3614
nov 3496
apr 2115
oct 642
sep 495
mar 482
dec 157
Name: month, dtype: int64
thu 6395
mon 6279
wed 6125
tue 5955
fri 5734
Name: day_of_week, dtype: int64
90 134
85 128
72 122
104 121
111 121
...
2680 1
745 1
2516 1
1058 1
2486 1
Name: duration, Length: 1441, dtype: int64
1 13246
2 7873
3 3905
4 1937
5 1156
6 696
7 440
8 283
9 195
10 164
11 124
12 89
13 54
14 48
17 41
15 30
16 30
18 23
20 21
19 18
23 14
21 14
24 11
22 11
29 8
27 7
30 7
25 6
28 6
31 5
26 5
35 4
32 4
33 3
40 2
34 2
42 2
37 1
41 1
43 1
39 1
Name: campaign, dtype: int64
999 29178
3 381
6 363
4 102
2 53
9 53
12 50
7 50
5 43
10 40
13 33
11 25
15 22
1 21
14 17
0 14
8 13
16 8
17 6
18 5
19 3
22 3
21 2
25 1
26 1
27 1
Name: pdays, dtype: int64
0 25836
1 3752
2 633
3 190
4 56
5 16
6 4
7 1
Name: previous, dtype: int64
nonexistent 25836
failure 3461
success 1191
Name: poutcome, dtype: int64
1.4 11220
-1.8 7392
1.1 4938
-0.1 3117
-2.9 1461
-3.4 951
-1.7 687
-1.1 565
-3.0 147
-0.2 10
Name: emp.var.rate, dtype: int64
93.994 4938
93.918 4646
92.893 4616
93.444 3798
93.200 3054
94.465 2776
93.075 1970
92.201 676
92.963 628
92.431 396
92.649 326
94.215 278
94.199 266
92.843 261
92.379 229
93.369 221
94.055 210
94.027 199
94.601 183
93.876 179
92.469 157
92.713 147
93.749 145
94.767 116
93.798 63
92.756 10
Name: cons.price.idx, dtype: int64
-36.4 4938
-42.7 4646
-46.2 4616
-36.1 3798
-42.0 3054
-41.8 2776
-47.1 1970
-31.4 676
-40.8 628
-26.9 396
-30.1 326
-40.3 278
-37.5 266
-50.0 261
-29.8 229
-34.8 221
-39.8 210
-38.3 199
-49.5 183
-40.0 179
-33.6 157
-33.0 147
-34.6 145
-50.8 116
-40.4 63
-45.9 10
Name: cons.conf.idx, dtype: int64
4.857 1836
4.963 1808
4.962 1803
4.961 1225
1.405 885
...
3.053 1
0.969 1
1.047 1
3.669 1
0.933 1
Name: euribor3m, Length: 314, dtype: int64
5228.1 11220
5099.1 6847
5191.0 4938
5195.8 3117
5076.2 1461
5017.5 951
4991.6 687
4963.6 565
5008.7 545
5023.5 147
5176.3 10
Name: nr.employed, dtype: int64
no 26629
yes 3859
Name: y, dtype: int64
###Markdown
Dataset is already NaN free, lets check is there duplicate instance or not
###Code
df_new.shape
###Output
_____no_output_____
###Markdown
The total NaN Value of our data is about 25%
###Code
df_new[df_new.duplicated() == True].shape
###Output
_____no_output_____
###Markdown
our dataset has 10 duplicate data, we need to elimate duplicate data
###Code
df_new = df_new.drop_duplicates(keep='last')
df_new.shape
df_new.head()
sns.set()
sns.set_palette('rainbow')
plt.figure(figsize=[30,20])
plt.subplot(4,2,1)
sns.countplot(df_new['job'],hue=df_new['y'])
plt.subplot(4,2,2)
sns.countplot(df_new['marital'],hue=df_new['y'])
plt.subplot(4,2,3)
sns.countplot(df_new['education'],hue=df_new['y'])
plt.subplot(4,2,4)
sns.countplot(df_new['housing'],hue=df_new['y'])
plt.subplot(4,2,5)
sns.countplot(df_new['loan'],hue=df_new['y'])
plt.subplot(4,2,6)
sns.countplot(df_new['month'],hue=df_new['y'])
plt.subplot(4,2,7)
sns.countplot(df_new['day_of_week'],hue=df_new['y'])
###Output
_____no_output_____
###Markdown
our customer have high chance rate to accept our marketing with these criteria:* Admin Job* University Degree* Dont have loan* Thuesday* Married* Have House* In May
###Code
sns.set()
corr = df_new.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
plt.figure(figsize=[15,10])
plt.title('THE CORRELATION OF FEATURE IN THIS DATASET',fontweight='bold')
sns.heatmap(corr, mask=mask, cmap='Blues', center=0, linewidths=1, annot=True, fmt=".2f")
plt.show()
###Output
_____no_output_____
###Markdown
There 3 column that have feature that has high corelation, we must drop these feature so our model can get work well
###Code
df_new = df_new.drop(['euribor3m','nr.employed','cons.price.idx'],axis=1)
sns.set()
corr = df_new.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
plt.figure(figsize=[15,10])
plt.title('THE CORRELATION OF FEATURE IN THIS DATASET',fontweight='bold')
sns.heatmap(corr, mask=mask, cmap='Blues', center=0, linewidths=1, annot=True, fmt=".2f")
plt.show()
###Output
_____no_output_____
###Markdown
Our data is clean, lets check our target variable balance
###Code
sns.set_palette('coolwarm')
sns.countplot(df_new['y'])
plt.title('DIFFERENCE BETWEEN NO AND YES',fontweight='bold')
plt.show()
df_new['y'].value_counts() / len(df_new)
###Output
_____no_output_____
###Markdown
Our data is not well balance, this not good, our model can't predict well if we still use this data, we must balance the target variable in 1:1 ratio
###Code
y_no = df_new[df_new['y'] == 'no']
y_yes = df_new[df_new['y'] == 'yes']
dummy = df_new
y_no = y_no.sample(len(y_yes),random_state=46)
df_new = pd.concat([y_no,y_yes])
sns.set_palette('coolwarm')
sns.countplot(df_new['y'])
plt.title('DIFFERENCE BETWEEN NO AND YES',fontweight='bold')
plt.show()
###Output
_____no_output_____
###Markdown
4. DATA NORMALIZATION ![title](image/robustscaler.png)
###Code
numeric = ['age','duration','campaign','pdays','previous','emp.var.rate','cons.conf.idx']
for col in df_new[numeric]:
df_new[col] = RobustScaler().fit_transform(df_new[[col]])
dummy[col] = RobustScaler().fit_transform(dummy[[col]])
###Output
_____no_output_____
###Markdown
Our data right now well balance and already normalization using robustscaler too, lets split our data into feature X and target variable y
###Code
y = df_new['y']
X = df_new.drop('y',axis=1)
y_dummy = dummy['y']
X_dummy = dummy.drop('y',axis=1)
#OneHot Encoder
X = pd.get_dummies(X)
X_dummy = pd.get_dummies(X_dummy)
X.head()
###Output
_____no_output_____
###Markdown
4. Model TrainingBefore we do model training, we must split the data to 2 type, which is training data and test data ![title](image/logistic.png)
###Code
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.25,stratify=y,random_state=46)
X_train_dum, X_test_dum, y_train_dum, y_test_dum = train_test_split(X_dummy,y_dummy,test_size=0.25,stratify=y_dummy,random_state=46)
###Output
_____no_output_____
###Markdown
Fit our Logistic Regression model to our dataset
###Code
lr = LogisticRegression()
lr.fit(X_train,y_train)
lr_dummy = LogisticRegression()
lr_dummy.fit(X_train_dum,y_train_dum)
###Output
_____no_output_____
###Markdown
Check the accuracy
###Code
print(accuracy_score(y_test,lr.predict(X_test)))
###Output
0.8522550544323484
###Markdown
![title](image/confusionmatrix.jpg) Check the Confusion_Matrix and F1 Score of our data
###Code
print(confusion_matrix(y_test,lr.predict(X_test)))
###Output
[[806 158]
[127 838]]
###Markdown
![title](image/f1.png)
###Code
print(classification_report(y_test,lr.predict(X_test)))
print(accuracy_score(y_test_dum,lr_dummy.predict(X_test_dum)))
print(classification_report(y_test_dum,lr_dummy.predict(X_test_dum)))
###Output
precision recall f1-score support
no 0.92 0.97 0.94 6655
yes 0.67 0.43 0.52 965
accuracy 0.90 7620
macro avg 0.79 0.70 0.73 7620
weighted avg 0.89 0.90 0.89 7620
###Markdown
5. Hyperparameter Tuning-+ 80% of F1 Score, not bad, but we want to push our limit to the boundary, so right now we will hypterparameter tuning our model
###Code
lr.get_params().keys()
param = {'C' : [0.0001,0.001,0.01,0.1,1], 'class_weight' : ['dict','balanced',None], 'dual' : [True,False], 'fit_intercept' : [True,False], 'intercept_scaling' : [0.0001,0.001,0.01,0.1,1], 'l1_ratio' : [0.0001,0.001,0.01,0.1,1], 'max_iter' : [1,100,1000], 'multi_class' : ['auto','ovr','multinominal'],'penalty' : ['l1','l2','none','liblinear'] , 'random_state' : [46,48], 'solver' : ['newton-cg','lbfgs','liblinear','sag','saga'], 'tol' : [0.0001,0.001,0.01,0.1,1], 'verbose' : [0.0001,0.001,0.01,0.1,1], 'warm_start' : [True,False]}
lr_new = RandomizedSearchCV(LogisticRegression(),param,random_state=46,n_iter=20)
lr_new.fit(X_train,y_train)
lr_new.best_params_
###Output
_____no_output_____
###Markdown
Now we know, that our best hyperparameter setting is above
###Code
print(accuracy_score(y_test,lr_new.predict(X_test)))
print(classification_report(y_test,lr_new.predict(X_test)))
display(df_new.iloc[46])
display(X.iloc[46])
print(f'Hasil prediksi dari data di row 46 adalah {lr_new.predict(X.iloc[[46]])}')
###Output
Hasil prediksi dari data di row 46 adalah ['no']
###Markdown
analysis for getting evidence
###Code
import time
import pathlib
from os.path import isfile
import math
import torch
import numpy as np
import models
from utils import *
from data import DataLoader
class config(object):
def __init__(self):
self.dataset = 'cifar10'
self.arch = 'resnet'
self.layers = 14
self.ckpt = 'ckpt_best.pth'
self.bn = False
self.width_mult = 1.0
self.cuda = True
self.types = ['max', 'min', 'avg', 'median', 'threshold']
self.threshold = 0.7
self.gpuids = [0]
def main():
global opt, arch_name, all_dist
opt = config()
# set model name
arch_name = set_arch_name(opt)
print('\n=> creating model \'{}\''.format(arch_name))
model = models.__dict__[opt.arch](data=opt.dataset, num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
if model is None:
print('==> unavailable model parameters!! exit...\n')
exit()
# checkpoint file
ckpt_dir = pathlib.Path('checkpoint')
dir_path = ckpt_dir / arch_name / opt.dataset
ckpt_file = dir_path / opt.ckpt
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=None, use_cuda=False)
print('===> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, checkpoint['epoch']))
print(f'\n==> Get and Calculate distribution of absolute PCC')
all_dist = get_dist_abs_pcc(model)
print('\n===> done')
return
else:
print('==> no Checkpoint found at \'{}\''.format(
opt.ckpt))
return
def get_dist_abs_pcc(model):
w_kernel = get_kernel(model, opt)
num_layer = len(w_kernel)
dist_dict = {}
for type in opt.types:
dist_all = []
for i in tqdm(range(num_layer), ncols=80, unit='layer'):
ref_layer = torch.Tensor(w_kernel[i])
if opt.arch in hasDiffLayersArchs:
ref_layer = ref_layer.view(-1, 9)
else:
ref_layer = ref_layer.view(len(w_kernel[i]), -1)
ref_length = ref_layer.size()[0]
ref_mean = ref_layer.mean(dim=1, keepdim=True)
ref_norm = ref_layer - ref_mean
ref_norm_sq = (ref_norm * ref_norm).sum(dim=1)
ref_norm_sq_rt = torch.sqrt(ref_norm_sq)
dist = []
for j in range(i+1, num_layer):
cur_weight = torch.Tensor(w_kernel[j])
# change kernels to dw-kernel
if opt.arch in hasDiffLayersArchs:
cur_weight = cur_weight.view(-1, 9)
else:
cur_weight = cur_weight.view(len(w_kernel[j]), -1)
cur_length = cur_weight.size()[0]
cur_mean = cur_weight.mean(dim=1, keepdim=True)
cur_norm = cur_weight - cur_mean
cur_norm_sq_rt = torch.sqrt((cur_norm * cur_norm).sum(dim=1))
cur_dist = []
for k in range(cur_length):
numer = torch.matmul(cur_norm[k], ref_norm.T)
denom = ref_norm_sq_rt * cur_norm_sq_rt[k]
pcc = numer / denom
abs_pcc = torch.abs(pcc)
if type == 'max':
cur_dist.append(torch.max(abs_pcc).item())
elif type == 'min':
cur_dist.append(torch.min(abs_pcc).item())
elif type == 'avg':
cur_dist.append(torch.mean(abs_pcc).item())
elif type == 'median':
cur_dist.append(torch.median(abs_pcc).item())
elif type == 'threshold':
num_over_thr = torch.sum(torch.ge(abs_pcc, opt.threshold)).item()
ratio_over_thr = num_over_thr / len(abs_pcc)
cur_dist.append(ratio_over_thr)
dist.append(cur_dist)
dist_all.append(dist)
dist_dict[type] = dist_all
print(dist_dict.keys())
return dist_dict
if __name__ == '__main__':
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("====> total time: {:.2f}s".format(elapsed_time))
###Output
0%| | 0/13 [00:00<?, ?layer/s]
=> creating model 'resnet14'
==> Loading Checkpoint 'ckpt_best.pth'
===> Loaded Checkpoint 'ckpt_best.pth' (epoch 189)
==> Get and Calculate distribution of absolute PCC
100%|████████████████████████████████████████| 13/13 [00:04<00:00, 2.80layer/s]
100%|████████████████████████████████████████| 13/13 [00:04<00:00, 2.79layer/s]
100%|████████████████████████████████████████| 13/13 [00:05<00:00, 2.44layer/s]
100%|████████████████████████████████████████| 13/13 [00:06<00:00, 1.90layer/s]
100%|████████████████████████████████████████| 13/13 [00:06<00:00, 1.94layer/s]dict_keys(['max', 'min', 'avg', 'median', 'threshold'])
===> done
====> total time: 28.21s
###Markdown
Draw total histogram
###Code
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
# make directory
dir_plots = pathlib.Path('Histograms') / arch_name / opt.dataset / 'total'
dir_plots.mkdir(parents=True, exist_ok=True)
print('Drawing total histograms...\n')
for i in tqdm(range(len(all_dist['max'])), ncols=80, unit='layer'):
for j in range(len(all_dist['max'][i])):
cur_num = i + j + 1
num_pcc = len(all_dist['max'][i][j])
plt.style.use('seaborn-deep')
fig, ax = plt.subplots(figsize=(8,6), dpi=150)
list_ymax = []
for type in all_dist.keys():
if type == 'threshold':
continue
cur_dist = all_dist[type][i][j]
y_vals, x_vals, e_ = ax.hist(cur_dist, label=type, alpha=0.75, bins=min(num_pcc, 256))
ymax = round((max(y_vals) / num_pcc) + 0.02, 2)
list_ymax.append(ymax)
y_max = max(list_ymax)
ax.set_yticks(ticks=np.arange(0.0, y_max * num_pcc, 0.01 * num_pcc))
ax.set_ylim(ax.get_yticks()[0], ax.get_yticks()[-1])
ax.set_xlim(-0.01, 1.01)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=num_pcc))
plt.legend(loc='upper right')
plt.savefig(dir_plots / 'abs_pcc_ref{:02d}_cur{:02d}.png'.format(i, cur_num),
bbox_inches='tight', dpi=150)
plt.clf()
print('\nDone!!!')
###Output
0%| | 0/13 [00:00<?, ?layer/s]Drawing total histograms...
100%|████████████████████████████████████████| 13/13 [02:06<00:00, 9.70s/layer]
Done!!!
###Markdown
Draw all histogram
###Code
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
for type in all_dist.keys():
# make directory
type_name = type
if type == 'threshold':
type_name += str(opt.threshold)
dir_plots = pathlib.Path('Histograms') / arch_name / opt.dataset / type_name
dir_plots.mkdir(parents=True, exist_ok=True)
print(f'Drawing all histograms (type: {type_name})...\n')
for i in tqdm(range(len(all_dist[type])), ncols=80, unit='layer'):
for j in range(len(all_dist[type][i])):
cur_dist = all_dist[type][i][j]
num_pcc = len(cur_dist)
min_pcc = min(cur_dist)
max_pcc = max(cur_dist)
med_pcc = np.median(cur_dist)
avg_pcc = np.mean(cur_dist)
var_pcc = np.var(cur_dist)
std_pcc = np.std(cur_dist)
textstr = '\n'.join((
r'$\min=%.6f$' % (min_pcc, ),
r'$\max=%.6f$' % (max_pcc, ),
r'$\mathrm{median}=%.6f$' % (med_pcc, ),
r'$\mu=%.6f$' % (avg_pcc, ),
r'$\sigma^{2}=%.6f$' % (var_pcc, ),
r'$\sigma=%.6f$' % (std_pcc, )))
plt.style.use('seaborn-deep')
fig, ax = plt.subplots(figsize=(8,6), dpi=150)
cur_num = i + j + 1
y_vals, x_vals, e_ = ax.hist(cur_dist, bins=min(num_pcc, 256))
y_max = round((max(y_vals) / num_pcc) + 0.02, 2)
ax.set_yticks(ticks=np.arange(0.0, y_max * num_pcc, 0.01 * num_pcc))
ax.set_ylim(ax.get_yticks()[0], ax.get_yticks()[-1])
ax.set_xlim(-0.01, 1.01)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=num_pcc))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='lightsteelblue', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.03, 0.96, textstr, transform=ax.transAxes, fontsize=9,
verticalalignment='top', bbox=props)
plt.savefig(dir_plots / 'abs_pcc_ref{:02d}_cur{:02d}.png'.format(i, cur_num),
bbox_inches='tight', dpi=150)
plt.clf()
print('\nDone!!!')
###Output
0%| | 0/13 [00:00<?, ?layer/s]Drawing all histograms (type: max)...
100%|████████████████████████████████████████| 13/13 [00:52<00:00, 4.06s/layer]
0%| | 0/13 [00:00<?, ?layer/s]Drawing all histograms (type: min)...
100%|████████████████████████████████████████| 13/13 [00:49<00:00, 3.83s/layer]
0%| | 0/13 [00:00<?, ?layer/s]Drawing all histograms (type: avg)...
100%|████████████████████████████████████████| 13/13 [00:57<00:00, 4.42s/layer]
0%| | 0/13 [00:00<?, ?layer/s]Drawing all histograms (type: median)...
100%|████████████████████████████████████████| 13/13 [00:50<00:00, 3.86s/layer]
0%| | 0/13 [00:00<?, ?layer/s]Drawing all histograms (type: threshold0.4)...
100%|████████████████████████████████████████| 13/13 [00:51<00:00, 3.98s/layer]
Done!!!
###Markdown
Draw total merge histogram
###Code
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
# make directory
dir_plots = pathlib.Path('Histograms') / arch_name / opt.dataset / 'merged'
dir_plots.mkdir(parents=True, exist_ok=True)
print('Drawing total histograms...\n')
plt.style.use('seaborn-deep')
fig, axs = plt.subplots(nrows=opt.layers-1, ncols=opt.layers-1, figsize=(80,60), dpi=150)
for i in tqdm(range(len(all_dist['max'])), ncols=80, unit='layer'):
for j in range(len(all_dist['max'][i])):
cur_num = i + j + 1
num_pcc = len(all_dist['max'][i][j])
list_ymax = []
for type in all_dist.keys():
if type == 'threshold':
continue
cur_dist = all_dist[type][i][j]
y_vals, x_vals, e_ = axs[i,cur_num].hist(cur_dist, label=type,
alpha=0.75, bins=min(num_pcc, 256))
ymax = round((max(y_vals) / num_pcc) + 0.02, 2)
list_ymax.append(ymax)
y_max = max(list_ymax)
axs[i,cur_num].set_yticks(ticks=np.arange(0.0, y_max * num_pcc, 0.01 * num_pcc))
axs[i,cur_num].set_ylim(axs[i,cur_num].get_yticks()[0], axs[i,cur_num].get_yticks()[-1])
axs[i,cur_num].set_xlim(-0.01, 1.01)
axs[i,cur_num].yaxis.set_major_formatter(PercentFormatter(xmax=num_pcc))
if i == 0 and j == len(all_dist['max'][i]) - 1:
axs[i,cur_num].legend(loc='center', bbox_to_anchor=(1.2, 0), ncol=1, fontsize=15)
# plt.legend(loc='upper right')
# plt.tight_layout()
plt.savefig(dir_plots / 'total.png', bbox_inches='tight', dpi=150)
# plt.show()
plt.clf()
###Output
Drawing total histograms...
100%|████████████████████████████████████████| 13/13 [00:47<00:00, 3.63s/layer]
###Markdown
Draw histograms of weights each layer
###Code
import time
import pathlib
from os.path import isfile
import math
import torch
import numpy as np
import models
from utils import *
from data import DataLoader
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
class config(object):
def __init__(self):
self.dataset = 'cifar10'
self.arch = 'resnet'
self.layers = 14
self.ckpt = 'ckpt_best.pth'
self.bn = False
self.width_mult = 1.0
self.cuda = True
self.types = ['max', 'min', 'avg', 'median', 'threshold']
self.threshold = 0.4
self.gpuids = [0]
def main():
opt = config()
# set model name
arch_name = set_arch_name(opt)
print('\n=> creating model \'{}\''.format(arch_name))
model = models.__dict__[opt.arch](data=opt.dataset, num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
if model is None:
print('==> unavailable model parameters!! exit...\n')
exit()
# checkpoint file
ckpt_dir = pathlib.Path('checkpoint')
dir_path = ckpt_dir / arch_name / opt.dataset
ckpt_file = dir_path / opt.ckpt
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=None, use_cuda=False)
print('===> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, checkpoint['epoch']))
else:
print('==> no Checkpoint found at \'{}\''.format(
opt.ckpt))
return
# make directory
dir_plots = pathlib.Path('Histograms') / arch_name / opt.dataset / 'conv_weights'
dir_plots.mkdir(parents=True, exist_ok=True)
w_kernel = get_kernel(model, opt)
num_layer = len(w_kernel)
print('Drawing convolution weights histogram...\n')
for i in tqdm(range(num_layer), ncols=80, unit='layer'):
cur_w = np.reshape(w_kernel[i], (-1)).tolist()
num_w = len(cur_w)
min_w = min(cur_w)
max_w = max(cur_w)
med_w = np.median(cur_w)
avg_w = np.mean(cur_w)
var_w = np.var(cur_w)
std_w = np.std(cur_w)
textstr = '\n'.join((
r'$\mathrm{\# weights}=%d$' % (num_w, ),
r'$\min=%.6f$' % (min_w, ),
r'$\max=%.6f$' % (max_w, ),
r'$\mathrm{median}=%.6f$' % (med_w, ),
r'$\mu=%.6f$' % (avg_w, ),
r'$\sigma^{2}=%.6f$' % (var_w, ),
r'$\sigma=%.6f$' % (std_w, )))
plt.style.use('seaborn-deep')
fig, ax = plt.subplots(figsize=(8,6), dpi=150)
y_vals, x_vals, e_ = ax.hist(cur_w, alpha=0.75, bins=min(num_w, 256))
y_max = round((max(y_vals) / num_w) + 0.02, 2)
ax.set_yticks(ticks=np.arange(0.0, y_max * num_w, 0.01 * num_w))
ax.set_ylim(ax.get_yticks()[0], ax.get_yticks()[-1])
ax.yaxis.set_major_formatter(PercentFormatter(xmax=num_w))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='lightsteelblue', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.03, 0.96, textstr, transform=ax.transAxes, fontsize=9,
verticalalignment='top', bbox=props)
plt.savefig(dir_plots / 'Weights_in_Layer{0:02d}.png'.format(i),
bbox_inches='tight', dpi=150)
plt.clf()
if __name__ == '__main__':
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("====> total time: {:.2f}s".format(elapsed_time))
torch.__version__
###Output
_____no_output_____
###Markdown
zip histogram folder
###Code
import zipfile
import os
plots_zip = zipfile.ZipFile('Histograms.zip', 'w')
for folder, subfolders, files in os.walk('Histograms'):
for file in files:
if file.endswith('.png'):
plots_zip.write(os.path.join(folder, file), os.path.relpath(os.path.join(folder, file), 'Histograms'), compress_type = zipfile.ZIP_DEFLATED)
plots_zip.close()
###Output
_____no_output_____
###Markdown
torch profiler test
###Code
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.autograd.profiler as profiler
def main():
global opt, arch_name, all_dist
opt = config()
# set model name
arch_name = set_arch_name(opt)
print('\n=> creating model \'{}\''.format(arch_name))
model = models.__dict__[opt.arch](data=opt.dataset, num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
if model is None:
print('==> unavailable model parameters!! exit...\n')
exit()
if opt.cuda:
torch.cuda.set_device(opt.gpuids[0])
with torch.cuda.device(opt.gpuids[0]):
model = model.cuda()
model = nn.DataParallel(model, device_ids=opt.gpuids,
output_device=opt.gpuids[0])
cudnn.benchmark = True
# checkpoint file
ckpt_dir = pathlib.Path('checkpoint')
dir_path = ckpt_dir / arch_name / opt.dataset
ckpt_file = dir_path / opt.ckpt
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=opt.gpuids[0], use_cuda=opt.cuda)
print('===> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, checkpoint['epoch']))
inputs = torch.randn(256, 3, 32, 32).cuda()
with profiler.profile(use_cuda=True, profile_memory=True, record_shapes=True) as prof:
model(inputs)
print(prof.key_averages().table(sort_by="cuda_memory_usage"))
prof.export_chrome_trace("trace.json")
return
else:
print('==> no Checkpoint found at \'{}\''.format(
opt.ckpt))
return
if __name__ == '__main__':
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("====> total time: {:.2f}s".format(elapsed_time))
###Output
=> creating model 'resnet14'
==> Loading Checkpoint 'ckpt_best.pth'
===> Loaded Checkpoint 'ckpt_best.pth' (epoch 189)
-------------------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg CUDA total % CUDA total CUDA time avg CPU Mem Self CPU Mem CUDA Mem Self CUDA Mem Number of Calls
-------------------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- ---------------
empty 6.73% 660.294us 6.73% 660.294us 5.412us 1.19% 433.409us 3.553us 0 b 0 b 337.24 Mb 337.24 Mb 122
batch_norm 1.12% 110.248us 37.81% 3.707ms 247.149us 10.86% 3.961ms 264.053us 0 b 0 b 141.01 Mb 0 b 15
_batch_norm_impl_index 3.90% 382.729us 36.68% 3.597ms 239.799us 10.69% 3.900ms 260.028us 0 b 0 b 141.01 Mb 0 b 15
cudnn_batch_norm 24.87% 2.439ms 30.65% 3.005ms 200.337us 9.57% 3.490ms 232.653us 0 b 0 b 141.01 Mb 0 b 15
empty_like 0.88% 86.724us 1.90% 186.238us 12.416us 0.34% 124.929us 8.329us 0 b 0 b 141.00 Mb 0 b 15
conv2d 1.00% 97.848us 42.61% 4.178ms 278.515us 14.41% 5.256ms 350.432us 0 b 0 b 140.00 Mb 0 b 15
convolution 1.04% 102.310us 41.61% 4.080ms 271.992us 14.24% 5.196ms 346.377us 0 b 0 b 140.00 Mb 0 b 15
_convolution 4.58% 448.866us 40.57% 3.978ms 265.171us 14.06% 5.130ms 341.969us 0 b 0 b 140.00 Mb 0 b 15
cudnn_convolution 24.41% 2.393ms 33.35% 3.270ms 217.994us 12.69% 4.630ms 308.691us 0 b 0 b 140.00 Mb -56.15 Mb 15
adaptive_avg_pool2d 0.28% 27.292us 1.38% 135.294us 135.294us 0.29% 106.496us 106.496us 0 b 0 b 64.00 Kb 0 b 1
mean 0.47% 46.321us 0.57% 56.184us 56.184us 0.14% 52.225us 52.225us 0 b 0 b 64.00 Kb 0 b 1
resize_ 0.84% 82.585us 0.84% 82.585us 2.664us 0.17% 63.202us 2.039us 0 b 0 b 10.00 Kb 10.00 Kb 31
addmm 1.26% 124.013us 1.68% 164.586us 164.586us 0.29% 105.473us 105.473us 0 b 0 b 10.00 Kb 0 b 1
add 3.40% 333.778us 4.38% 429.530us 28.635us 0.58% 209.918us 13.995us 0 b 0 b 7.50 Kb 0 b 15
Scatter 0.50% 49.092us 1.18% 116.032us 116.032us 0.32% 115.936us 115.936us 0 b 0 b 0 b 0 b 1
chunk 0.13% 12.851us 0.65% 63.498us 63.498us 0.17% 63.168us 63.168us 0 b 0 b 0 b 0 b 1
size 7.52% 737.481us 7.52% 737.481us 2.269us 1.76% 642.136us 1.976us 0 b 0 b 0 b 0 b 325
split 0.18% 17.900us 0.48% 47.283us 47.283us 0.13% 47.648us 47.648us 0 b 0 b 0 b 0 b 1
narrow 0.11% 10.792us 0.27% 26.601us 26.601us 0.07% 26.528us 26.528us 0 b 0 b 0 b 0 b 1
slice 0.09% 8.758us 0.14% 13.529us 13.529us 0.04% 13.472us 13.472us 0 b 0 b 0 b 0 b 1
as_strided 0.14% 13.486us 0.14% 13.486us 3.372us 0.03% 10.078us 2.520us 0 b 0 b 0 b 0 b 4
to 0.04% 3.442us 0.04% 3.442us 3.442us 0.01% 3.296us 3.296us 0 b 0 b 0 b 0 b 1
contiguous 2.91% 285.138us 2.91% 285.138us 2.357us 0.66% 242.463us 2.004us 0 b 0 b 0 b 0 b 121
stride 1.48% 145.112us 1.48% 145.112us 2.303us 0.34% 125.348us 1.990us 0 b 0 b 0 b 0 b 63
is_complex 0.44% 42.684us 0.44% 42.684us 2.846us 0.08% 28.671us 1.911us 0 b 0 b 0 b 0 b 15
view 1.36% 133.020us 1.36% 133.020us 7.390us 0.21% 77.826us 4.324us 0 b 0 b 0 b 0 b 18
relu_ 5.98% 586.799us 7.61% 746.437us 57.418us 2.93% 1.069ms 82.235us 0 b 0 b 0 b 0 b 13
threshold_ 1.63% 159.638us 1.63% 159.638us 12.280us 1.96% 714.755us 54.981us 0 b 0 b 0 b 0 b 13
add_ 2.19% 215.185us 2.19% 215.185us 35.864us 1.54% 560.128us 93.355us 0 b 0 b 0 b 0 b 6
flatten 0.10% 10.197us 0.43% 42.042us 42.042us 0.08% 29.696us 29.696us 0 b 0 b 0 b 0 b 1
reshape 0.07% 6.650us 0.30% 29.779us 29.779us 0.06% 21.503us 21.503us 0 b 0 b 0 b 0 b 1
t 0.20% 20.056us 0.29% 28.476us 28.476us 0.05% 17.408us 17.408us 0 b 0 b 0 b 0 b 1
transpose 0.05% 5.389us 0.09% 8.420us 8.420us 0.02% 6.145us 6.145us 0 b 0 b 0 b 0 b 1
expand 0.06% 6.131us 0.09% 8.373us 8.373us 0.02% 6.144us 6.144us 0 b 0 b 0 b 0 b 1
-------------------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 9.805ms
CUDA time total: 36.480ms
====> total time: 0.16s
###Markdown
heatmap of number of most similar kernel
###Code
import time
import pathlib
from os.path import isfile
import math
import torch
import numpy as np
import models
from utils import *
from data import DataLoader
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
class config(object):
def __init__(self):
self.dataset = 'cifar10'
self.arch = 'resnet'
self.layers = 14
self.ckpt = 'ckpt_best.pth'
self.bn = False
self.width_mult = 1.0
self.cuda = True
self.threshold = 0.4
self.gpuids = [0]
def main():
global opt, arch_name, all_dist
opt = config()
# set model name
arch_name = set_arch_name(opt)
print('\n=> creating model \'{}\''.format(arch_name))
model = models.__dict__[opt.arch](data=opt.dataset, num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
if model is None:
print('==> unavailable model parameters!! exit...\n')
exit()
# checkpoint file
ckpt_dir = pathlib.Path('checkpoint')
dir_path = ckpt_dir / arch_name / opt.dataset
ckpt_file = dir_path / opt.ckpt
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\'..'.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=None, use_cuda=False)
print('===> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, checkpoint['epoch']))
print(f'\n==> Get and Calculate distribution of absolute PCC..')
all_dist = get_dist_abs_pcc(model)
print('\n===> done')
print('\n==> Draw histogram..')
histograms(all_dist)
return
else:
print('==> no Checkpoint found at \'{}\''.format(
opt.ckpt))
return
def get_dist_abs_pcc(model):
w_kernel = get_kernel(model, opt)
num_layer = len(w_kernel)
dist_all = []
for i in tqdm(range(num_layer), ncols=80, unit='layer'):
ref_layer = torch.Tensor(w_kernel[i])
if opt.arch in hasDiffLayersArchs:
ref_layer = ref_layer.view(-1, 9)
else:
ref_layer = ref_layer.view(len(w_kernel[i]), -1)
ref_length = ref_layer.size()[0]
ref_mean = ref_layer.mean(dim=1, keepdim=True)
ref_norm = ref_layer - ref_mean
ref_norm_sq_rt = torch.sqrt((ref_norm * ref_norm).sum(dim=1))
dist = []
for j in range(i+1, num_layer):
cur_weight = torch.Tensor(w_kernel[j])
# change kernels to dw-kernel
if opt.arch in hasDiffLayersArchs:
cur_weight = cur_weight.view(-1, 9)
else:
cur_weight = cur_weight.view(len(w_kernel[j]), -1)
cur_length = cur_weight.size()[0]
cur_mean = cur_weight.mean(dim=1, keepdim=True)
cur_norm = cur_weight - cur_mean
cur_norm_sq_rt = torch.sqrt((cur_norm * cur_norm).sum(dim=1))
cur_dist = []
for k in range(cur_length):
numer = torch.matmul(cur_norm[k], ref_norm.T)
denom = ref_norm_sq_rt * cur_norm_sq_rt[k]
pcc = numer / denom
abs_pcc = torch.abs(pcc)
cur_dist.append(torch.max(abs_pcc).item())
dist.append(cur_dist)
dist_all.append(dist)
return dist_all
def histograms(all_dist):
# make directory
dir_plots = pathlib.Path('Histograms') / arch_name / opt.dataset / 'heatmap_N_maxpcc'
dir_plots.mkdir(parents=True, exist_ok=True)
# calculate
histogram_dist = []
heatmap_dist = []
for j in range(len(all_dist[0])):
cur_num = j+1
max_nums = []
max_layer_nums = []
for k in range(len(all_dist[0][j])):
cur_max = 0.0
max_ref_layer_num = 0
for i in range(cur_num):
if cur_max < all_dist[i][j-i][k]:
cur_max = all_dist[i][j-i][k]
max_ref_layer_num = i
max_nums.append(cur_max)
max_layer_nums.append(max_ref_layer_num)
histogram_dist.append(max_nums)
heatmap_dist.append(max_layer_nums)
# draw heatmap
print('===> Draw heatmap...')
plt.clf()
num_layer = len(all_dist)
heatmap_cnt = np.zeros((num_layer,num_layer))
for i in range(1, num_layer):
for j in range(len(heatmap_dist[i-1])):
similar_layer_num = heatmap_dist[i-1][j]
heatmap_cnt[i][similar_layer_num] += 100
heatmap_cnt[i] = heatmap_cnt[i] / len(heatmap_dist[i-1])
heatmap_cnt = heatmap_cnt.transpose()
fig = plt.pcolor(heatmap_cnt, cmap='hot')
plt.xticks(np.arange(0.5, num_layer, 1), ["{}".format(x) for x in range(num_layer)])
plt.yticks(np.arange(0.5, num_layer, 1), ["{}".format(x) for x in range(num_layer)])
plt.xlabel('Source layer', fontsize=12)
plt.ylabel('Target layer', fontsize=12)
plt.colorbar()
plt.savefig(dir_plots / 'heatmap.png', figsize=(8,6), dpi=150, bbox_inches='tight')
plt.clf()
print('====> done')
# draw histograms
print('===> Draw histograms...')
for i in tqdm(range(len(histogram_dist)), ncols=80, unit='layer'):
cur_pcc = histogram_dist[i]
num_pcc = len(cur_pcc)
min_pcc = min(cur_pcc)
max_pcc = max(cur_pcc)
med_pcc = np.median(cur_pcc)
avg_pcc = np.mean(cur_pcc)
var_pcc = np.var(cur_pcc)
std_pcc = np.std(cur_pcc)
textstr = '\n'.join((
r'$\mathrm{\# weights}=%d$' % (num_pcc, ),
r'$\min=%.6f$' % (min_pcc, ),
r'$\max=%.6f$' % (max_pcc, ),
r'$\mathrm{median}=%.6f$' % (med_pcc, ),
r'$\mu=%.6f$' % (avg_pcc, ),
r'$\sigma^{2}=%.6f$' % (var_pcc, ),
r'$\sigma=%.6f$' % (std_pcc, )))
plt.style.use('seaborn-deep')
fig, ax = plt.subplots(figsize=(8,6), dpi=150)
y_vals, x_vals, e_ = ax.hist(cur_pcc, alpha=0.75, bins=min(num_pcc, 256))
y_max = round((max(y_vals) / num_pcc) + 0.02, 2)
ax.set_yticks(ticks=np.arange(0.0, y_max * num_pcc, 0.01 * num_pcc))
ax.set_ylim(ax.get_yticks()[0], ax.get_yticks()[-1])
ax.set_xlim(-0.01, 1.01)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=num_pcc))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='lightsteelblue', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.03, 0.96, textstr, transform=ax.transAxes, fontsize=9,
verticalalignment='top', bbox=props)
plt.savefig(dir_plots / 'Max_PCCs_in_cur{:02d}.png'.format(i+1),
bbox_inches='tight', dpi=150)
plt.clf()
print('====> done')
if __name__ == '__main__':
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("====> total time: {:.2f}s".format(elapsed_time))
###Output
_____no_output_____
###Markdown
Load data
###Code
# Default toy model
paths = {
'01' : 'data/stream_01.csv'
}
streams = {s : pd.read_csv(p, dtype=int) for s, p in paths.items()}
# # Prophesee
# paths = {
# '01' : '../data_prophesee/moorea_2019-02-18_000_td_2928500000_2988500000_td.csv'
# }
# streams = {s : pd.read_csv(p, dtype=int) for s, p in paths.items()}
# for s, df in streams.items():
# df['y'] = 719 - df['y']
# # Stereo iniviation
# paths = {
# '01' : '../data_inivation/stream_1.csv',
# '02' : '../data_inivation/stream_2.csv'
# }
# streams = {s : pd.read_csv(p, dtype=int) for s, p in paths.items()}
streams['01'].info()
streams['01'][:10]
timestamp_min = 1e99
for s, df in streams.items():
print('\nStream: ', s)
print('x_min:', df['x'].min())
print('x_max:', df['x'].max())
print('y_min:', df['y'].min())
print('y_max:', df['y'].max())
print('Timestamp_min:', df['timestamp'].min())
print('Timestamp_max:', df['timestamp'].max())
print('Timestamp diff:', df['timestamp'].max() - df['timestamp'].min())
print('Average number of events per timestamp:',len(df.index)/(df['timestamp'].max() - df['timestamp'].min()))
timestamp_min = min(df['timestamp'].min(), timestamp_min)
###Output
Stream: 01
x_min: 40
x_max: 160
y_min: 40
y_max: 160
Timestamp_min: 0
Timestamp_max: 99999
Timestamp diff: 99999
Average number of events per timestamp: 1.000010000100001
###Markdown
Set starting timestamp to zero. The code below assumes that the starting timestamp is 0.
###Code
for s, df in streams.items():
df['timestamp'] -= timestamp_min
print(df['timestamp'].min())
###Output
0
###Markdown
Load event annotations if they already exist. Put them in the same folder as original csv file and add "_anno.csv"
###Code
for s, df in streams.items():
path_anno = paths[s].replace('.csv', '_anno.csv')
if os.path.exists(path_anno):
print('Loading: {}'.format(path_anno))
df['anno'] = pd.read_csv(path_anno, dtype=int)
###Output
_____no_output_____
###Markdown
Initial studies Take sample of each stream
###Code
num_sample = 100000
samples = {s:df.sample(n=num_sample) for s, df in streams.items()}
###Output
_____no_output_____
###Markdown
Lets see how is event polarity distributed as a function of a timestamp.
###Code
df_hist = pd.DataFrame({
'Stream #1 (p=0)' : samples['01']['timestamp'][samples['01']['p']==0],
'Stream #1 (p=1)' : samples['01']['timestamp'][samples['01']['p']==1],
});
df_hist.plot_bokeh(kind='hist', bins=100, title='Sample of events as function of timestamp.', xlabel='Timestamp', xticks=[]);
###Output
_____no_output_____
###Markdown
Event video
###Code
event_tool = utils.EventTool(streams)
# bokeh part
app = Application(FunctionHandler(event_tool.app_function))
show(app)
###Output
_____no_output_____
###Markdown
travelTime AnalysisDifferent analyses of data collected using https://github.com/amadeuspzs/travelTime/blob/master/travelTime.py
###Code
%matplotlib inline
import pandas as pd, matplotlib.pyplot as plt, matplotlib.dates as dates, math
from datetime import datetime
from utils import find_weeks, find_days # custom
from pytz import timezone
from detect_peaks import detect_peaks
from ipywidgets import interact, interactive, fixed, interact_manual
###Output
_____no_output_____
###Markdown
Load data
###Code
filename = 'data/home-montauk.csv'
tz = timezone('US/Eastern')
data = pd.read_csv(filename)
data.head(5)
###Output
_____no_output_____
###Markdown
Convert the unix timestamp to a datetime object:
###Code
data.Timestamp=data.apply(lambda row: datetime.fromtimestamp(int(row['Timestamp']),tz),axis=1)
data.head(5)
###Output
_____no_output_____
###Markdown
Add a new column with the duration in hours
###Code
data['Duration(h)']=data.apply(lambda row: float(row['Duration(s)'])/(60*60),axis=1)
data.head(5)
###Output
_____no_output_____
###Markdown
Let's have a quick visualization:
###Code
ax = data.plot(x='Timestamp',y='Duration(h)')
###Output
_____no_output_____
###Markdown
Week by Week plotsIdentify weeks in the dataset and plot them:
###Code
weeks = find_weeks(data)
num_cols = 2
num_rows = int(math.ceil(len(weeks) / float(num_cols)))
ylim = [min([min(data[week[0]:week[1]+1]['Duration(h)']) for week in weeks]),
max([max(data[week[0]:week[1]+1]['Duration(h)']) for week in weeks])]
plt.figure(1,figsize=(14, 7))
for e, week in enumerate(weeks):
ax = plt.subplot(num_rows,num_cols,e+1)
data[week[0]:week[1]].plot(x='Timestamp',y='Duration(h)',ax=ax)
ax.grid()
ax.set_ylim(ylim)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Day plotsPick a day to compare across weeks:
###Code
days = find_days(data,5) #Friday
num_cols = 3
num_rows = int(math.ceil(len(weeks) / float(num_cols)))
ylim = [min([min(data[day[0]:day[1]+1]['Duration(h)']) for day in days]),
max([max(data[day[0]:day[1]+1]['Duration(h)']) for day in days])]
plt.figure(1,figsize=(14, 7))
for e, day in enumerate(days):
ax = plt.subplot(num_rows,num_cols,e+1)
data[day[0]:day[1]].plot(x='Timestamp',y='Duration(h)',ax=ax)
ax.xaxis.set_major_formatter(dates.DateFormatter('%H',tz))
ax.xaxis.set_major_locator(dates.HourLocator(interval=1))
ax.grid()
ax.set_ylim(ylim)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Peak/valley detectionDetect highs and lows
###Code
week = find_weeks(data)[2] # choose one week
week_data = data[week[0]:week[1]+1]
@interact(mpd=50,mph=1.0)
def peaks(mpd, mph):
indexes = detect_peaks(week_data['Duration(h)'],mpd=mpd,mph=mph,show=True)
for index in indexes:
print week_data.iloc[[index]].Timestamp.dt.strftime("%a %H:%M").values[0]
@interact(mpd=130)
def peaks(mpd):
indexes = detect_peaks(week_data['Duration(h)'],valley=True,mpd=mpd,show=True)
for index in indexes:
print week_data.iloc[[index]].Timestamp.dt.strftime("%a %H:%M").values[0]
###Output
_____no_output_____
###Markdown
Walmart sales data analysis AimTo predict aggregate monthly sales using Regression models over Walmart dataset.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Loading Data into dataframes
###Code
train = pd.read_csv("./data/train.csv")
stores = pd.read_csv("./data/stores.csv")
features = pd.read_csv("./data/features.csv")
###Output
_____no_output_____
###Markdown
Exploring data **Total rows are 8190.****There are twelve columns.**
###Code
features.info() #can be shown on the web page
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 8190 entries, 0 to 8189
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Store 8190 non-null int64
1 Date 8190 non-null object
2 Temperature 8190 non-null float64
3 Fuel_Price 8190 non-null float64
4 MarkDown1 4032 non-null float64
5 MarkDown2 2921 non-null float64
6 MarkDown3 3613 non-null float64
7 MarkDown4 3464 non-null float64
8 MarkDown5 4050 non-null float64
9 CPI 7605 non-null float64
10 Unemployment 7605 non-null float64
11 IsHoliday 8190 non-null bool
dtypes: bool(1), float64(9), int64(1), object(1)
memory usage: 712.0+ KB
###Markdown
- Date is recognised as an "Object" by pandas.- It means that it is not recognised as any pre-defined Python type Getting an overview of data
###Code
features.describe()
#can also be shown on the web page
# Analysis and calculations regarding quantitative columns
# Including object
# Date column
features.describe(include=object)
# Including object
# Date column
features.describe(include=bool)
features.count()
# Counting Null values
features.isna().sum()
print(len(stores))
stores.isna().sum()
print(len(train))
train.isna().sum()
###Output
421570
###Markdown
Import and quality check: wiki json data
###Code
file_dir = 'C://git repos/movies_ETL/data/'
file_name = f'{file_dir}wikipedia-movies.json'
with open(file_name, mode='r') as file:
wiki_movies_raw = json.load(file)
len(wiki_movies_raw)
# check quality of first records
wiki_movies_raw[:5]
# check quality of last records
wiki_movies_raw[-5:]
# check some records in the middle
wiki_movies_raw[3600:3605]
###Output
_____no_output_____
###Markdown
Import and quality check: kaggle
###Code
kaggle_metadata = pd.read_csv(f'{file_dir}movies_metadata.csv', low_memory=False)
ratings = pd.read_csv(f'{file_dir}ratings.csv')
kaggle_metadata.count()
kaggle_metadata.head()
kaggle_metadata.tail()
kaggle_metadata.sample(n=5)
ratings.count()
ratings.head()
ratings.tail()
ratings.sample(n=5)
wiki_movies_raw_df = pd.DataFrame(wiki_movies_raw)
wiki_movies_raw_df
# 193 cols!
wiki_movies_raw_df.count()
wiki_movies_raw_df.columns.tolist()
###Output
_____no_output_____
###Markdown
Wiki column analysis
###Code
# List comprehension to narrow down wiki set
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie]
len(wiki_movies)
wiki_movies_df = pd.DataFrame(wiki_movies)
wiki_movies_df
wiki_movies_df.columns.tolist()
#only four records? is that accurate?
wiki_movies_df.loc[wiki_movies_df['No. of episodes'].notnull()]
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie
and 'No. of episodes' not in movie]
len(wiki_movies)
def clean_movie(movie):
movie = dict(movie) # creates a non-destructive copy. DON'T UNDERSTAND THIS SYNTAX
# Clean alternate titles
alt_titles = dict()
languages = ['Arabic',
'Cantonese',
'Chinese',
'French',
'Hangul',
'Hebrew',
'Hepburn',
'Japanese',
'Literally',
'Mandarin',
'McCune–Reischauer',
'Polish',
'Revised Romanization',
'Romanized',
'Russian',
'Simplified',
'Traditional',
'Yiddish']
for language in languages:
if language in movie:
alt_titles[language] = movie[language]
movie.pop(language)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director(s)')
change_column_name('Director', 'Director(s)')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
###Output
_____no_output_____
###Markdown
Clean up language data
###Code
# wiki_movies_df[wiki_movies_df['Arabic'].notnull()]
# skill drill: go through each column and determine which ones hold alternate titles
sorted(wiki_movies_df.columns.tolist())
# Full list (I think):
# Arabic
# Cantonese
# Chinese
# French
# Hangul
# Hebrew
# Hepburn
# Japanese
# Literally
# Mandarin
# McCune–Reischauer
# Polish
# Revised Romanization
# Romanized
# Russian
# Simplified
# Traditional
# Yiddish
# wiki_movies_df[wiki_movies_df['Arabic'].notnull()]
# wiki_movies_df[wiki_movies_df['Arabic'].notnull()]
# movie = clean_movie(wiki_movies_df.iloc[6838])
# movie
# Run clean movies function to get columns lined up etc
clean_movies = [clean_movie(movie) for movie in wiki_movies]
wiki_movies_df = pd.DataFrame(clean_movies)
sorted(wiki_movies_df.columns.tolist())
###Output
_____no_output_____
###Markdown
IMDB ID parsing
###Code
wiki_movies_df['imdb_link']
# it's regex time!! Remove dupes in accordance with standard IMDB ID
wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})')
print(len(wiki_movies_df))
wiki_movies_df.drop_duplicates(subset='imdb_id', inplace=True)
print(len(wiki_movies_df))
wiki_movies_df.head()
###Output
7076
7033
###Markdown
Remove cols with limited usages (< 10%)
###Code
# how many of these values are null in each col?
# This stuff *clearly* needs to be consolidated with some titles
# wiki_movies_df[[column for column in wiki_movies_df.columns]].count()
# [[column,wiki_movies_df[column].isnull().sum()] for column in wiki_movies_df.columns]
# [[column,wiki_movies_df[column].isnull().sum()] for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() > len(wiki_movies_df) * .9]
wiki_columns_to_keep = [column
for column in wiki_movies_df.columns
if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * .9]
wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]
wiki_movies_df
###Output
_____no_output_____
###Markdown
Get field types to see which need to be converted to int/date/etc
###Code
wiki_movies_df.dtypes
###Output
_____no_output_____
###Markdown
clean box office data
###Code
box_office = wiki_movies_df['Box office'].dropna()
box_office.count()
# not using this: lambda funciton instead on next cell
def is_not_a_string(x):
return type(x) != str
# 135 non-string
box_office[box_office.map(is_not_a_string)]
box_office = box_office.apply(lambda x: ' '.join(x) if type(x) == list else x)
box_office[box_office.map(lambda x: type(x) != str)]
import re # regex module
box_office[box_office.map(lambda x: type(x) != str)]
# capture refined million/billion results
form_one = r"\$\s*\d+\.?\d*\s*[mb]illi?on"
box_office.str.contains(form_one, flags=re.IGNORECASE, na=False).sum()
form_two = r"\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illi?on)"
box_office.str.contains(form_two, flags=re.IGNORECASE, na=False).sum()
box_office = box_office.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
matches_form_one = box_office.str.contains(form_one, flags=re.IGNORECASE, na=False)
matches_form_two = box_office.str.contains(form_two, flags=re.IGNORECASE, na=False)
box_office[~matches_form_one & ~matches_form_two]
box_office.str.extract(f'({form_one}|{form_two})')
###Output
_____no_output_____
###Markdown
Parse dollars func: move me to name space / func delcaration area somewhere else
###Code
def parse_dollars(s):
# if s is not a string, return NaN
if type(s) != str:
return np.nan
# if input is of form $###.# million
if re.match(r"\$\s*\d+\.?\d*\s*milli?on", s, flags=re.IGNORECASE):
# remove dollar sign and " million"
s = re.sub('\$|\s|[a-zA-Z]','',s)
# convert to float and multiply by a million
value = float(s) * 10**6
# return value
return value
# if input is of the form $###.# billion
elif re.match(r"\$\s*\d+\.?\d*\s*billi?on", s, flags=re.IGNORECASE):
# remove dollar sign and ' billion'
s = re.sub('\$|\s|[a-zA-Z]','',s)
# convert to float and multiply by a billion
value = float(s) * 10**9
# return value
return value
# if input is of the form $###,###,###
elif re.match(r"\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illi?on)", s, flags=re.IGNORECASE):
# remove dollar sign and commas
s = re.sub('\$|,','',s)
# convert to float
value = float(s)
# return value
return value
# otherwise, return NaN
else:
return np.nan
wiki_movies_df['box_office'] = box_office.str.extract(pat=f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
wiki_movies_df[['box_office', 'Box office']]
wiki_movies_df.drop('Box office', axis=1, inplace=True)
wiki_movies_df.drop('box office', axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Clean the budget data
###Code
budget = wiki_movies_df['Budget'].dropna()
budget = budget.map(lambda x: ' '.join(x) if type(x) == list else x)
#remove specified ranges
budget = budget.str.replace(r'\$.*[-—–](?![a-z])' , '$', regex=True)
# for value in budget:
# print(value)
matches_form_one = budget.str.contains(form_one, flags=re.IGNORECASE, na=False)
matches_form_two = budget.str.contains(form_two, flags=re.IGNORECASE, na=False)
budget[~matches_form_one & ~matches_form_two]
# get rid of strings
budget = budget.str.replace(r'\[\d+\]\s*', '')
budget[~matches_form_one & ~matches_form_two]
wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
wiki_movies_df.drop('Budget', axis=1, inplace=True)
wiki_movies_df['budget']
###Output
_____no_output_____
###Markdown
parse release date
###Code
# PARSE THE DATE
release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
release_date.head(50)
# match string one: Month Name, 1-2 digits, 4 digit year
date_pat_1 = r"\w*\s\d{1,2},\s\d{4}"
matches_pat_1 = release_date.str.contains(date_pat_1, flags=re.IGNORECASE, na=False)
# matches_pat_1.head(50)
# pattern 2: yyyy-dd-mm
date_pat_2 = r"\d{4}[-—–]\d{2}[-—–]\d{2}"
matches_pat_2 = release_date.str.contains(date_pat_2, flags=re.IGNORECASE, na=False)
# matches_pat_2.head(50)
# pattern 3: month name, year
date_pat_3 = r"\w{3,10}\s\d{4}"
matches_pat_3 = release_date.str.contains(date_pat_3, flags=re.IGNORECASE, na=False)
# release_date[matches_pat_3].sample(50)
# pattern 4: four letter year
date_pat_4 = r"\d{4}"
matches_pat_4 = release_date.str.contains(date_pat_4, flags=re.IGNORECASE, na=False)
# release_date[~matches_pat_1 & ~matches_pat_2 & ~matches_pat_3 & matches_pat_4].sample(50)
wiki_movies_df['release_date'] = pd.to_datetime(
release_date.str.extract(f'({date_pat_1}|{date_pat_2}|{date_pat_3}|{date_pat_4})')[0],
infer_datetime_format=True,
errors='coerce')
wiki_movies_df
###Output
_____no_output_____
###Markdown
Parse running time
###Code
# wiki_movies_df['Running time'].dropna().sample(50)
running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
# running_time
runtime_pat_0 = "\d+\s*ho?u?r?s?\s*\d*"
matches_pat_0 = running_time.str.contains(runtime_pat_0, flags=re.IGNORECASE, na=False)
runtime_pat_1 = r'\d{1,3}\s*min'
matches_pat_1 = running_time.str.contains(runtime_pat_1, flags=re.IGNORECASE, na=False)
# running_time[~matches_pat_0 & ~matches_pat_1]
running_time_extract = running_time.str.extract(r"(\d+)\s*ho?u?r?s?\s*(\d*)|(\d{1,3})\s*m")
running_time_extract
running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)
wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)
wiki_movies_df.drop('Running time', axis=1, inplace=True)
# wiki_movies_df[['Running time', 'running_time']].sample(50)
###Output
_____no_output_____
###Markdown
Clean the Kaggle data
###Code
kaggle_metadata
kaggle_metadata[["popularity", "poster_path", "production_companies", "status", "tagline", "vote_average"]]
#bad data here!! summaries.
kaggle_metadata["adult"].value_counts()
kaggle_metadata[~kaggle_metadata['adult'].isin(['True','False'])]
kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult', axis='columns')
kaggle_metadata['video'].value_counts()
# convert to bool
kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'
kaggle_metadata.dtypes
# no non-numeric budget cols. very nice!
kaggle_metadata[kaggle_metadata['budget'].str.isnumeric() == False]
kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)
# no non-numeric ids either.
kaggle_metadata[kaggle_metadata['id'].str.isnumeric() == False]
kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')
# why does popularity show up as non-numeric?
kaggle_metadata[kaggle_metadata['popularity'].str.isnumeric() == False]
kaggle_metadata.dtypes
kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')
ratings.info(null_counts=True)
# looks reasonable, let's assign to timestamp value
pd.to_datetime(ratings['timestamp'], unit='s')
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
ratings.head()
###Output
_____no_output_____
###Markdown
Poke around ratings value and run statistical analysis
###Code
pd.options.display.float_format = '{:20,.2f}'.format
ratings['rating'].plot(kind='hist')
ratings['rating'].describe()
###Output
_____no_output_____
###Markdown
Merge data sets
###Code
movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki','_kaggle'])
movies_df
# i thought i dropped the box office col?
# matching criteria on the above seems to be disregarding capitalization
# why is release date not in here?
movies_df.columns
# movies_df[['box office', 'box_office']]
###Output
_____no_output_____
###Markdown
Analysis of duplicative column merge| wikipedia | kaggle | result || --- | --- | --- || title_wiki | title_kaggle | drop wiki || running_time | runtime | keep kaggle, fill in with wiki data if missing || budget_wiki | budget_kaggle | keep kagle; fill in zeroes with wiki data || box_office | revenue | keep kagle; fill in zeroes with wiki data || release_date_wiki | release_date_kaggle | Drop wikipedia || Language | original_language | drop wikipedia || Production company(s) | production_companies | Drop wikipedia. |
###Code
movies_df[['title_wiki', 'title_kaggle']]
movies_df[movies_df['title_wiki'] != movies_df['title_kaggle']][['title_wiki','title_kaggle']]
# going with kaggle data based on clarity above. vetting quality
movies_df[(movies_df['title_kaggle'] == '') | (movies_df['title_kaggle'].isnull())]
# eventually: build a scatterplot to determine runtime.
movies_df.fillna(0).plot(x='running_time', y='runtime', kind='scatter')
movies_df.fillna(0).plot(x='budget_wiki', y='budget_kaggle', kind='scatter')
# movies_df[['budget_wiki', 'budget_kaggle']].dtypes
movies_df.fillna(0).plot(x='box_office', y='revenue', kind='scatter')
movies_df.fillna(0)[movies_df['box_office'] < 10**9].plot(x='box_office', y='revenue', kind='scatter')
# why is this not working?
movies_df[['release_date_wiki', 'release_date_kaggle']].fillna(0).plot(x='release_date_wiki', y='release_date_kaggle', style='.')
movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')]
movies_df = movies_df.drop(movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')].index)
# too many nulls in wiki here! release date patterns are not great
# movies_df[movies_df['release_date_wiki'].isnull()]
movies_df[movies_df['release_date_kaggle'].isnull()]
# lists in language col unsupported by value counts. mutable objects not hashable
# movies_df['Language'].value_counts()
movies_df['Language'].apply(lambda x: tuple(x) if type(x) == list else x).value_counts()
movies_df['original_language'].value_counts(dropna=False)
# movies_df[['Production company ', 'productioncompanies ', 'production_companies']].sample(50)
movies_df[['production_companies']].notnull().value_counts()
movies_df.columns
###Output
_____no_output_____
###Markdown
Init column merge based on analysis
###Code
movies_df.drop(columns=['title_wiki', 'release_date_wiki', 'Language', 'Productioncompany ', 'Productioncompanies '], inplace=True)
movies_df.columns
def fill_missing_kaggle_data(df, kaggle_column, wiki_column):
df[kaggle_column] = df.apply(
lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column]
, axis = 1)
df.drop(columns=wiki_column, inplace=True)
fill_missing_kaggle_data(movies_df, kaggle_column='runtime', wiki_column='running_time')
fill_missing_kaggle_data(movies_df, kaggle_column='budget_kaggle', wiki_column='budget_wiki')
fill_missing_kaggle_data(movies_df, kaggle_column='revenue', wiki_column='box_office')
# next value_count check
for col in movies_df.columns:
lists_to_tuples = lambda x: tuple(x) if type(x) == list else x
value_counts = movies_df[col].apply(lists_to_tuples).value_counts(dropna=False)
num_values = len(value_counts)
if num_values == 1:
print(col)
# skill drill: replace above with list comprehension
lists_to_tuples = lambda x: tuple(x) if type(x) == list else x
print([col for col in movies_df.columns if len(movies_df[col].apply(lists_to_tuples).value_counts(dropna=False)) == 1])
movies_df['video'].value_counts(dropna=False)
# movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
# 'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
# 'genres','original_language','overview','spoken_languages','Country',
# 'production_companies','production_countries','Distributor',
# 'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'
# ]]\
movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
'genres','original_language','overview','spoken_languages','Country',
'production_companies','production_countries','Distributor',
'Producer(s)','Starring','Cinematography','Editor(s)','Writer(s)','Based on'
]]
movies_df.rename({'id':'kaggle_id',
'title_kaggle':'title',
'url':'wikipedia_url',
'budget_kaggle':'budget',
'release_date_kaggle':'release_date',
'Country':'country',
'Distributor':'distributor',
'Producer(s)':'producers',
'Director':'director',
'Starring':'starring',
'Cinematography':'cinematography',
'Editor(s)':'editors',
'Writer(s)':'writers',
'Composer(s)':'composers',
'Based on':'based_on'
}, axis='columns', inplace=True)
movies_df
###Output
_____no_output_____
###Markdown
Merge rating data
###Code
rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \
.rename({'userId':'count'}, axis=1) \
.pivot(index='movieId', columns='rating', values='count')
rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]
rating_counts
movies_with_ratings_df = pd.merge(movies_df, rating_counts, left_on='kaggle_id', right_index=True, how='left')
movies_with_ratings_df[rating_counts.columns] = movies_with_ratings_df[rating_counts.columns].fillna(0)
movies_with_ratings_df.sample(50)
###Output
_____no_output_____
###Markdown
Import data to psotgres
###Code
# CONNECTION STRING TEMPLATE: "postgresql://[user]:[password]@[location]:[port]/[database]"
db_string = f"postgresql://postgres:{db_password}@127.0.0.1:5432/movie_data"
engine = create_engine(db_string)
movies_df.to_sql(name='movies', con=engine, if_exists='replace')
ratings.count()
### Export data to Postgres db.
# do not run me yet. 26m+ records, so 27 chunks
rows_imported = 0
for data in pd.read_csv(f'{file_dir}ratings.csv', chunksize=1000000):
start_time = time.time()
print(f"Importing rows {rows_imported} through {rows_imported + len(data)}...", end='')
data.to_sql(name='ratings', con=engine, if_exists='append')
rows_imported += len(data)
print(f'Complete. {time.time() - start_time} seconds elapsed.')
###Output
Importing rows 0 through 1000000...Complete. 24.349302768707275 seconds elapsed.
Importing rows 1000000 through 2000000...Complete. 24.126830339431763 seconds elapsed.
Importing rows 2000000 through 3000000...Complete. 23.85095453262329 seconds elapsed.
Importing rows 3000000 through 4000000...Complete. 23.537501335144043 seconds elapsed.
Importing rows 4000000 through 5000000...Complete. 29.568344831466675 seconds elapsed.
Importing rows 5000000 through 6000000...Complete. 25.218552112579346 seconds elapsed.
Importing rows 6000000 through 7000000...Complete. 26.988013982772827 seconds elapsed.
Importing rows 7000000 through 8000000...Complete. 29.5069739818573 seconds elapsed.
Importing rows 8000000 through 9000000...Complete. 28.95828080177307 seconds elapsed.
Importing rows 9000000 through 10000000...Complete. 26.633777141571045 seconds elapsed.
Importing rows 10000000 through 11000000...Complete. 25.27502179145813 seconds elapsed.
Importing rows 11000000 through 12000000...Complete. 25.12024974822998 seconds elapsed.
Importing rows 12000000 through 13000000...Complete. 29.118918657302856 seconds elapsed.
Importing rows 13000000 through 14000000...Complete. 29.758862495422363 seconds elapsed.
Importing rows 14000000 through 15000000...Complete. 27.570640325546265 seconds elapsed.
Importing rows 15000000 through 16000000...Complete. 27.411759614944458 seconds elapsed.
Importing rows 16000000 through 17000000...Complete. 25.803762674331665 seconds elapsed.
Importing rows 17000000 through 18000000...Complete. 24.299041509628296 seconds elapsed.
Importing rows 18000000 through 19000000...Complete. 25.393224477767944 seconds elapsed.
Importing rows 19000000 through 20000000...Complete. 26.41280436515808 seconds elapsed.
Importing rows 20000000 through 21000000...Complete. 25.334113836288452 seconds elapsed.
Importing rows 21000000 through 22000000...Complete. 22.99605417251587 seconds elapsed.
Importing rows 22000000 through 23000000...Complete. 26.940511465072632 seconds elapsed.
Importing rows 23000000 through 24000000...Complete. 26.68484115600586 seconds elapsed.
Importing rows 24000000 through 25000000...Complete. 25.545124530792236 seconds elapsed.
Importing rows 25000000 through 26000000...Complete. 26.5877742767334 seconds elapsed.
Importing rows 26000000 through 26024289...Complete. 0.6482179164886475 seconds elapsed.
###Markdown
How many questions (including dimensions) and how many participants?
###Code
df.shape # 55 participants, # 90 dimensions
###Output
_____no_output_____
###Markdown
Demographics
###Code
# Age (Q3)
print(df['Q3'].value_counts(normalize=True))
# Gender (Q9)
print(df['Q1'].value_counts(normalize=True))
# Income (Q5)
print(df['Q5'].value_counts(normalize=True))
# Race (Q9)
print(df['Q9'].value_counts(normalize=True))
# Education (Q4)
print(df['Q4'].value_counts(normalize=True))
# Marital Status (Q7)
print(df['Q7'].value_counts(normalize=True))
# Work related use (need to find the correlation between
# this answer and the people who said they were students)
print(df['Q38'].value_counts(normalize=True))
# How much do you use your phone
print(df['Q16'].value_counts(normalize=True))
# How long have you had a smartphone?
print(df['Q24'].value_counts(normalize=True))
# How many smartphones do you have?
print(df['Q25'].value_counts(normalize=True))
# How many other devices do you have?
print(df['Q26'].value_counts(normalize=True))
# Cell plan on your phone
# Phone Time
print(df['Q27_1'].value_counts(normalize=True))
# Text
print(df['Q27_2'].value_counts(normalize=True))
# Data Access
print(df['Q27_3'].value_counts(normalize=True))
# Security demographics
# Do you apply security measures to ensure privacy/security?
print(df['Q28'].value_counts(normalize=True))
print(df['Q29'])
# hide notifications
print(df['Q30'].value_counts(normalize=True))
# camera limit
print(df['Q33'].value_counts(normalize=True))
# biometrics security
print(df['Q36'].value_counts(normalize=True))
# smartlock features
print(df['Q57'].value_counts(normalize=True))
# get a permissiveness based on group
import math
groups = [["Q21_0_"+str(i)+"_RANK" for i in range(1,11)],
["Q21_1_"+str(i)+"_RANK" for i in range(1,11)],
["Q21_2_"+str(i)+"_RANK" for i in range(1,11)]]
# a singler person's permissiveness score in group0
# perm_score = [j for j in (df[i][0] for i in group0) if not math.isnan(j)]
# all of them now
# g0list = [[j for j in (df[i][k] for i in group0) if not math.isnan(j)] for k in range(len(df.index))]
# average for each person
# person_avg_perm = [(sum(k)/len(k)) for k in [[j for j in (df[i][k] for i in group1) if not math.isnan(j)] for k in range(len(df.index))] if len(k) != 0]
# average for an entire group
# sum([(sum(k)/len(k)) for k in [[j for j in (df[i][k] for i in group1) if not math.isnan(j)] for k in range(len(df.index))] if len(k) != 0])/len(df.index)
# average for each groups
# [(sum([(sum(k)/len(k)) for k in [[j for j in (df[i][k] for i in l) if not math.isnan(j)] for k in range(len(df.index))] if len(k) != 0])/len(df.index)) for l in groups]
# normalize it
avg_norm_perms = [m/5 for m in [(sum([(sum(k)/len(k)) for k in [[j for j in (df[i][k] for i in l) if not math.isnan(j)] for k in range(len(df.index))] if len(k) != 0])/len(df.index)) for l in groups]]
print(avg_norm_perms)
# so like, cool, there is a directional decrease in permissivenes for these groups.
# But I notice that this question could be interpreted in multiple ways.
# I am more concerened with security of a stranger because they are unknown and could fuck shit up
# I am more concered with security of my parents because I am more permisive with them meaning they have
# access to more sensitive data
# run anova
from scipy.stats import f_oneway
from scipy.stats import kruskal
values = [[(sum(k)/len(k)) for k in [[j for j in (df[i][k] for i in l) if not math.isnan(j)] for k in range(len(df.index))] if len(k) != 0] for l in groups]
g0 = values[0]
g1 = values[1]
g2 = values[2]
f_oneway(g0,g1,g2)
# get a permisiveness based on subgroup
# sum of permisivness for a single subgroup for a single person
# sum(i for i in[df[j[0]][0] for j in groups] if not math.isnan(i))
# sum of permisivness for a single subgroup for all people
# sum(i for i in[df[j[0]][k] for j in groups for k in range(len(df.index))]if not math.isnan(i))
# now for all subgroups
#[sum(i for i in[df[j[l]][k] for j in groups for k in range(len(df.index))]if not math.isnan(i)) for l in range(len(groups[0]))]
all_subs = [[i for i in[df[j[l]][k] for j in groups for k in range(len(df.index))]if not math.isnan(i)] for l in range(len(groups[0]))]
# ([sum(i)/len(i) for i in all_subs])
# average them
# [m/len(df.index) for m in [sum(i for i in[df[j[l]][k] for j in groups for k in range(len(df.index))]if not math.isnan(i)) for l in range(len(groups[0]))]]
#find any floor or ceilings
print("number of floors and ceilings: " + str(len([j for j in [sum(i)/len(i) for i in all_subs] if j < 2 or j > 4]))+"\n")
# normalize it
avg_sub_perms = [j/5 for j in [sum(i)/len(i) for i in all_subs]]
group_names = ["Parent/Guardian\t","Sibling\t\t","Child\t\t","Other Family\t","Friend\t\t","Roommate\t","Significant Other","Work Associate\t","Acquaintance\t","Stranger\t"]
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_sub_perms[i]))
# After child, work associate is more permissive than any other? interesting. Now I could have done this calculation
# wrong or this is because 50% of our responders are ages 18-24. Child I understand because its a parent child relastionship
# but after that, work associate then roomate. I'm surpirsed that significant other is so lower and Im surprised that
# friend is the lowest. But this could be explained in a few ways. The relationship you have with your co workers
# is different than with your friends. If i gitve my phone to a friend, they will probably lookup porn or prank call/text
# someone so I'll be less lenient with them.
# In terms of SO, at 18-24, very few peers have a stable long term SO and so it could be why its lower than I thought it
# would. But i could be biased as I would give my SO free reign of my phone.
# Two main reasons why aquaintence is so high comes to mind. first) you don't know them well and vis versa and so
# its not like youre going to go snooping on an aquantence's phone like you may have done with your friend.
# or) aquaintences don't know our passwords or phone lays like us so people trust built in phone security.
# take for example, some acquaintence asks to use our phone. I know they can't access any personal data because
# 1) I probably will be right there watching and 2) because they cant access my bank or stuff. My friends on the other
# hand could easily know my password or have thier fingerprint in my phone so they could. And I'm more likely to leave
# my phone out of sight with my friend than an aquaintence. This is self reported so there is some bias.
# The same goes for a roomate. idk. my roomates have always been my friends so I'm bias.
# Also now looking at it, i don't think we defined permissiveness on the survey and also these values could be skewed
# because they are put in different groups. So maybe adding a weight to each of these depending on which group you put
# them in? idk
# If we wanted to expand for future work, we could look at which factors relate to the self reported permissiveness.
# calculate anova between subgroups
g0 = all_subs[0]
g1 = all_subs[1]
g2 = all_subs[2]
g3 = all_subs[3]
g4 = all_subs[4]
g5 = all_subs[5]
g6 = all_subs[6]
g7 = all_subs[7]
g8 = all_subs[8]
g9 = all_subs[9]
f_oneway(g0,g1,g2,g3,g4,g5,g6,g7,g8,g9)
#F
datafrom2009=[0.5416666667,0.325,0.2685185185,0.1825396825,0.9166666667,0.2803030303,0.1086956522,0.64,0.3066666667,0.1666666667,0.1666666667,0.08666666667,0.9761904762,0.9285714286,0.4841269841,0.2619047619,0.5227272727,0.4772727273,0.4318181818,0.3257575758,0.6066666667,0.5666666667,0.4347826087,0.14,0.5733333333,0.5733333333,0.5362318841,0.34,0.34,0.34,0.8684210526,0.798245614,0.298245614,0.1052631579,0.5972222222,0.5972222222,0.3680555556,0.475308642,0.3888888889,0.2307692308,0.2037037037,0.2037037037,0.6866666667,0.25,0.1730769231,0.696969697,0.5151515152,0.5]
our_data=all_subs[0]+all_subs[1]+all_subs[2]+all_subs[3]+all_subs[4]+all_subs[5]+all_subs[6]+all_subs[7]+all_subs[8]+all_subs[9]
od = [i/5 for i in our_data]
new_groups = ["family\t","friends\t","aquaintance","stranger","work\t"]
ourfam=[i/5 for i in all_subs[0]+all_subs[1]+all_subs[2]+all_subs[3]+all_subs[6]]
theirfam=[0.5416666667,0.325,0.2685185185,0.64,0.9285714286,0.4772727273,0.9761904762,0.6066666667,0.5666666667,0.4347826087
,0.34,0.8684210526,0.5972222222,0.475308642,0.2307692308,0.6866666667,0.696969697]
of=[i/5 for i in all_subs[4]+all_subs[5]]
tf=[0.2803030303,0.3066666667,0.4841269841,0.4318181818,0.5733333333,0.798245614,0.3888888889,0.25,0.5151515152]
oa=[i/5 for i in all_subs[8]]
ta=[0.2619047619,0.34,0.1052631579]
os=[i/5 for i in all_subs[9]]
ts=[0.14]
ow=[i/5 for i in all_subs[7]]
tw=[0.1825396825,0.1666666667,0.5227272727,0.34,0.298245614,0.2037037037,0.1730769231,0.5]
all_stuff=[(ourfam,theirfam),(of,tf),(oa,ta),(os,ts),(ow,tw)]
print("Group\t\tanova\t\tkruskal")
for i in range(len(new_groups)):
print(new_groups[i] +"\t" + str(f_oneway(all_stuff[i][0], all_stuff[i][1])[1])+"\t"+str(kruskal(all_stuff[i][0], all_stuff[i][1])[1]))
print("as a whole")
print("Anova, Kruskal:" +"\t" + str(f_oneway(datafrom2009,od)[1]) + "\t" + str(kruskal(datafrom2009,od)[1]))
#high or low usage
dic = {"Hardly ever":0, "Rarely":1, "Sometimes":2, "Often":3, "Very often":4}
questions = ["Q13_1", "Q13_2","Q13_3","Q13_4","Q13_5","Q13_6","Q13_7","Q13_8","Q13_9","Q13_10","Q13_11","Q13_12","Q13_13","Q13_14","Q13_15","Q13_16"]
g0 = [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 7]
g1 = [i for i in range(len(df.index)) if 7 <= sum([dic[df[j][i]] for j in questions]) < 13]
g2 = [i for i in range(len(df.index)) if 13 <= sum([dic[df[j][i]] for j in questions]) < 20]
g3 = [i for i in range(len(df.index)) if 20 <= sum([dic[df[j][i]] for j in questions]) < 26]
g4 = [i for i in range(len(df.index)) if 26 <= sum([dic[df[j][i]] for j in questions]) < 33]
g5 = [i for i in range(len(df.index)) if 33 <= sum([dic[df[j][i]] for j in questions]) < 39]
g6 = [i for i in range(len(df.index)) if 39 <= sum([dic[df[j][i]] for j in questions]) < 45]
g7 = [i for i in range(len(df.index)) if 45 <= sum([dic[df[j][i]] for j in questions]) < 52]
g8 = [i for i in range(len(df.index)) if 52 <= sum([dic[df[j][i]] for j in questions]) < 58]
g9 = [i for i in range(len(df.index)) if 58 <= sum([dic[df[j][i]] for j in questions]) < 65]
for i in range(len(gs)):
print("group"+str(i)+": "+str(len(gs[i])))
hu = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) >= 42]]if not math.isnan(i)] for l in range(len(groups[0]))]
lu = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 42]]if not math.isnan(i)] for l in range(len(groups[0]))]
avg_hu_perms = [j/5 for j in [sum(i)/len(i) for i in hu]]
avg_lu_perms = [j/5 for j in [sum(i)/len(i) for i in lu]]
print("Group\t\t\tHigh Usage\t\tLow Usage")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_hu_perms[i]) + "\t" + str(avg_lu_perms[i]))
# anove between more secure and less secure groups
print("Group\t\t\tanova\t\t\tkruskal")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(f_oneway(hu[i], lu[i])[1])+"\t"+str(kruskal(hu[i], lu[i])[1]))
# anova as a whole
# idk how to weight it so i just took the average of all permisivness for how permissive someone was as a whole
print("anova: "+str(f_oneway([sum(i)/len(i) for i in hu], [sum(i)/len(i) for i in lu])[1]))
print("kruskal: "+str(kruskal([sum(i)/len(i) for i in hu], [sum(i)/len(i) for i in lu])[1]))
#getting permisiveness based on security answers
dic = {"Yes, I use it":1, "Yes, I don't use it":0, "No":0}
questions = ["Q30", "Q33","Q34","Q36","Q57"]
ms = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) >= 3]]if not math.isnan(i)] for l in range(len(groups[0]))]
ls = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 3]]if not math.isnan(i)] for l in range(len(groups[0]))]
avg_ms_perms = [j/5 for j in [sum(i)/len(i) for i in ms]]
avg_ls_perms = [j/5 for j in [sum(i)/len(i) for i in ls]]
print("Group\t\t\tMore secure\t\tLess Secure")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_ms_perms[i]) + "\t" + str(avg_ls_perms[i]))
# anove between more secure and less secure groups
print("Group\t\t\tanova\t\t\tkruskal")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(f_oneway(ms[i], ls[i])[1])+"\t"+str(kruskal(ms[i], ls[i])[1]))
# anova as a whole
# idk how to weight it so i just took the average of all permisivness for how permissive someone was as a whole
print("anova: "+str(f_oneway([sum(i)/len(i) for i in ms], [sum(i)/len(i) for i in ls])[1]))
print("kruskal: "+str(kruskal([sum(i)/len(i) for i in ms], [sum(i)/len(i) for i in ls])[1]))
#getting permisiveness based on data answers
dic = {"Unlimited":0, "Limited":1}
questions = ["Q27_3"]
ld = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) >= 1]]if not math.isnan(i)] for l in range(len(groups[0]))]
ud = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 1]]if not math.isnan(i)] for l in range(len(groups[0]))]
avg_ld_perms = [j/5 for j in [sum(i)/len(i) for i in ld]]
avg_ud_perms = [j/5 for j in [sum(i)/len(i) for i in ud]]
print("Group\t\t\tlimited data\t\tunlimited data")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_ld_perms[i]) + "\t" + str(avg_ud_perms[i]))
# anove between more ulimited data and limited data groups
print("Group\t\t\tanova\t\t\tkruskal")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(f_oneway(ud[i], ld[i])[1])+"\t"+str(kruskal(ud[i], ld[i])[1]))
# anova as a whole
# idk how to weight it so i just took the average of all permisivness for how permissive someone was as a whole
print("anova: "+str(f_oneway([sum(i)/len(i) for i in ud], [sum(i)/len(i) for i in ld])[1]))
print("kruskal: "+str(kruskal([sum(i)/len(i) for i in ud], [sum(i)/len(i) for i in ld])[1]))
#getting permisiveness based on type of phone
dic = {"Apple device (eg: any iPhone)":1, "Android device (eg: any Samsung, LG, Motorola, OnePlus, Pixel, etc)":0, "Nokia":2}
questions = ["Q11"]
ap = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) == 1]]if not math.isnan(i)] for l in range(len(groups[0]))]
an = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) == 0]]if not math.isnan(i)] for l in range(len(groups[0]))]
nk = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) == 2]]if not math.isnan(i)] for l in range(len(groups[0]))]
avg_ap_perms = [j/5 for j in [sum(i)/len(i) for i in ap]]
avg_an_perms = [j/5 for j in [sum(i)/len(i) for i in an]]
print("Group\t\t\tapple\t\t\tandroid")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_ap_perms[i]) + "\t" + str(avg_an_perms[i]))
# anove between more apple and android
print("Group\t\t\tanova\t\t\tkruskal")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(f_oneway(an[i], ap[i])[1])+"\t"+str(kruskal(an[i], ap[i])[1]))
# anova as a whole
# idk how to weight it so i just took the average of all permisivness for how permissive someone was as a whole
print("anova: "+str(f_oneway([sum(i)/len(i) for i in ap], [sum(i)/len(i) for i in an])[1]))
print("kruskal: "+str(kruskal([sum(i)/len(i) for i in ap], [sum(i)/len(i) for i in an])[1]))
#getting permisiveness based on work or not
dic = {"A lot":4, "A great deal":5, "A moderate amount":3, "A little":1, "None at all":0}
questions = ["Q38"]
#index of people who are more or less secure
#more_secure = [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) >= 3]
#less_secure = [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 3]
wp = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) >= 3]]if not math.isnan(i)] for l in range(len(groups[0]))]
nw = [[i for i in[df[j[l]][k] for j in groups for k in [i for i in range(len(df.index)) if sum([dic[df[j][i]] for j in questions]) < 3]]if not math.isnan(i)] for l in range(len(groups[0]))]
avg_wp_perms = [j/5 for j in [sum(i)/len(i) for i in wp]]
avg_nw_perms = [j/5 for j in [sum(i)/len(i) for i in nw]]
print("Group\t\t\tPhone for Work\t\tPhone for not work")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(avg_wp_perms[i]) + "\t" + str(avg_nw_perms[i]))
# anove between more work and not work pgone
print("Group\t\t\tanova\t\t\tkruskal")
for i in range(len(group_names)):
print(group_names[i] +"\t" + str(f_oneway(wp[i], nw[i])[1])+"\t"+str(kruskal(wp[i], nw[i])[1]))
# anova as a whole
# idk how to weight it so i just took the average of all permisivness for how permissive someone was as a whole
print("anova: "+str(f_oneway([sum(i)/len(i) for i in wp], [sum(i)/len(i) for i in nw])[1]))
print("kruskal: "+str(kruskal([sum(i)/len(i) for i in wp], [sum(i)/len(i) for i in nw])[1]))
# plot frequency bins for each group
# ie, how often each guest was put into each bin
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
guests = [
"Parent/Guardian",
"Sibling",
"Child",
"Other Family",
"Friend",
"Roommate",
"Significant Other",
"Work Associate",
"Acquaintance",
"Stranger"
]
g = {x:[0,0,0,0] for x in guests}
for guest in guests:
for idx,row in enumerate(df[['Q21_'+str(i)+'_GROUP' for i in range(3)]].replace(np.nan, '', regex=True).values):
if guest in row[0]:
g[guest][0] += 1
elif guest in row[1]:
g[guest][1] += 1
elif guest in row[2]:
g[guest][2] += 1
else:
g[guest][3] += 1
#
#
# todo
# https://stackoverflow.com/questions/14270391/python-matplotlib-multiple-bars
x = ['Q21_0_GROUP', 'Q21_1_GROUP', 'Q21_2_GROUP', 'Q21_3_GROUP']
fig, ax = plt.subplots(1, len(guests), figsize=(24,6))
for i in range(len(guests)):
ax[i].bar(x, g[guests[i]])
ax[i].title.set_text(guests[i])
ax[i].set_ylim([0, 35])
#
# test if there's any significant difference in people who classify
# specific guests into the three different groups
# with respect to security awareness
# "security awareness" generated as follows
# Q30, Q33, Q34, Q36, Q57 -> 2,1,0
# take average, divide by 2
sec_awareness = np.zeros(len(df)) # index i == participant
for idx,row in enumerate(df[['Q30', 'Q33', 'Q34', 'Q36', 'Q57']].replace({"Yes, I use it":2,"Yes, I don't use it":1,"No":0}).values):
sec_awareness[idx] = sum(row)/5
# print(sec_awareness)
# do kruskal-wallis
# for each guest type X
# generate 3 populations -- X in gropu1, X in group2, X in group3
# k-w versus "security awareness" score
guests = [
"Parent/Guardian",
"Sibling",
"Child",
"Other Family",
"Friend",
"Roommate",
"Significant Other",
"Work Associate",
"Acquaintance",
"Stranger"
]
for guest in guests:
print(guest)
g_conc = [] # "Definitely have security and privacy concerns when sharing" -> Q21_0_GROUP
g_mild = [] # "Some security and privacy concerns when sharing" -> Q21_1_GROUP
g_none = [] # "Definitely do not have security and privacy concerns when sharing" -> Q21_2_GROUP
for idx,row in enumerate(df[['Q21_'+str(i)+'_GROUP' for i in range(3)]].replace(np.nan, '', regex=True).values):
if guest in row[0]:
g_conc.append(sec_awareness[idx])
elif guest in row[1]:
g_mild.append(sec_awareness[idx])
elif guest in row[2]:
g_none.append(sec_awareness[idx])
#
#
print(len(g_conc))
print(len(g_mild))
print(len(g_none))
print(kruskal(g_conc, g_mild, g_none))
print()
#
# -> no statistically significant difference
# in population means (medians?) between those who categorized
# <guest> in <concern-level> and "security awareness" score
# possible to-dos
# -- calculate "security awareness" another way
# -- just use score to each individual question, instead of score average
# -- do random-forest regression with the input feature vector
# == the classifications of each guest, and the output
# == whatever security awareness score we care about
###Output
_____no_output_____
###Markdown
How to Pivot and Plot Data With PandasA big challenge of working with data is manipulating its format for the analysis at hand. To make things a bit more difficult, the "proper format" can depend on what you are trying to analyze, meaning we have to know how to melt, pivot, and transpose our data.In this article, we will discuss how to create a pivot table of aggregated data in order to make a stacked bar visualization of 2019 airline market share for the top 10 destination cities. All the code for this analysis is available on GitHub [here](https://github.com/stefmolin/airline-market-share-analysis) and can also be run using [this](https://mybinder.org/v2/gh/stefmolin/airline-market-share-analysis/master) Binder environment.We will be using 2019 flight statistics from the United States Department of Transportation’s Bureau of Transportation Statistics (available [here](https://www.transtats.bts.gov/DL_SelectFields.asp?gnoyr_VQ=FMF&QO_fu146_anzr=Nv4%20Pn44vr45)). It contains 321,409 rows and 41 columns:
###Code
import pandas as pd
df = pd.read_csv('865214564_T_T100_MARKET_ALL_CARRIER.zip')
df.shape
###Output
_____no_output_____
###Markdown
Each row contains monthly-aggregated information on flights operated by a variety of airline carriers, including both passenger and cargo service. Note that the columns are all in uppercase at the moment:
###Code
df.columns
###Output
_____no_output_____
###Markdown
To make the data easier to work with, we will transform the column names into lowercase using the `rename()` method:
###Code
df = df.rename(lambda x: x.lower(), axis=1)
df.head()
###Output
_____no_output_____
###Markdown
For our analysis, we want to look at passenger airlines to find the 2019 market share of the top 5 carriers (based on total number of passengers in 2019). To do so, we first need to figure out which carriers were in the top 5. Remember, the data contains information on all types of flights, but we only want passenger flights, so we first query `df` for flights marked `F` in the `class` column (note that we need backticks to reference this column because `class` is a reserved keyword). Then, we group by the carrier name and sum each carrier's passenger counts. Finally, we call the `nlargest()` method to return only the top 5:
###Code
# download flight class meanings at
# https://www.transtats.bts.gov/Download_Lookup.asp?Y11x72=Y_fReiVPR_PYNff
top_airlines = df.query('`class` == "F"')\
.groupby('unique_carrier_name').passengers.sum()\
.nlargest(5)
top_airlines
###Output
_____no_output_____
###Markdown
Note that the top 5 airlines also run flights of a different class, so we can't remove this filter for the rest of our analysis:
###Code
df.loc[
df.unique_carrier_name.isin(top_airlines.index), 'class'
].value_counts()
###Output
_____no_output_____
###Markdown
Now, we can create the pivot table; however, we cannot filter down to the top 5 airlines just yet, because, in order to get market share, we need to know the numbers for the other airlines as well. Therefore, we will build a pivot table that calculates the total number of passengers each airline flew to each destination city. To do so, we specify that we want the following in our call to the `pivot_table()` method:- Unique values in the `dest_city_name` column should be used as our row labels (the `index` argument)- Unique values in the `unique_carrier_name` column should be used as our column labels (the `columns` argument)- The values used for the aggregation should come from the `passengers` column (the `values` argument), and they should be summed (the `aggfunc` argument)- Row/column subtotals should be calculated (the `margins` argument)Finally, since we want to look at the top 10 destinations, we will sort the data in descending order using the `All` column, which contains the total passengers flown to each city in 2019 for all carriers combined (this was created by passing in `margins=True` in the call to the `pivot_table()` method):
###Code
pivot = df.query('`class` == "F"').pivot_table(
index='dest_city_name',
columns='unique_carrier_name',
values='passengers',
aggfunc='sum',
margins=True
).sort_values('All', ascending=False)
pivot.head(10)
###Output
_____no_output_____
###Markdown
Notice that the first row in the previous result is not a city, but rather, the subtotal by airline, so we will drop that row before selecting the first 10 rows of the sorted data:
###Code
pivot = pivot.drop('All').head(10)
###Output
_____no_output_____
###Markdown
Selecting the columns for the top 5 airlines now gives us the number of passengers that each airline flew to the top 10 cities. Note that we use `sort_index()` so that the resulting columns are displayed in alphabetical order:
###Code
pivot[top_airlines.sort_index().index]
###Output
_____no_output_____
###Markdown
Our data is now in the right format for a stacked bar plot showing passenger counts. To make this visualization, we call the `plot()` method on the previous result and specify that we want horizontal bars (`kind='barh'`) and that the different airlines should be stacked (`stacked=True`). Note that since we have the destinations sorted in descending order, Atlanta will be plotted on the bottom, so we call `invert_yaxis()` on the `Axes` object returned by `plot()` to flip the order:
###Code
from matplotlib import ticker
ax = pivot[top_airlines.sort_index().index].plot(
kind='barh', stacked=True,
title='2019 Passenger Totals\n(source: BTS)'
)
ax.invert_yaxis() # put destinations with more passengers on top
# formatting
ax.set(xlabel='number of passengers', ylabel='destination')
ax.legend(title='carrier')
# shows x-axis in millions instead of scientific notation
ax.xaxis.set_major_formatter(ticker.EngFormatter())
# removes the top and right lines from the figure to make it less boxy
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
###Output
_____no_output_____
###Markdown
One interesting thing to notice from the previous result is that Seattle is a top 10 destination, yet the top 5 carriers don't appear to be contributing as much to it as the rest of the destination cities, which are pretty much in the same order with the exception of Los Angeles. This could cause some confusion, so let's add in another stacked bar called `Other` that contains the passenger totals for all airlines not in the top 5. Since we calculated the `All` column when we created the pivot table, all we have to do here is add a column to our filtered data that contains the `All` column minus the top 5 airlines' passenger totals summed together. The plotting code only needs to be modified to shift the legend further out:
###Code
ax = pivot[top_airlines.sort_index().index].assign(
Other=lambda x: pivot.All - x.sum(axis=1)
).plot(
kind='barh', stacked=True,
title='2019 Passenger Totals\n(source: BTS)'
)
ax.invert_yaxis()
# formatting
ax.set(xlabel='number of passengers', ylabel='destination')
ax.xaxis.set_major_formatter(ticker.EngFormatter())
# shift legend to not cover the bars
ax.legend(title='carrier', bbox_to_anchor=(0.7, 0), loc='lower left')
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
###Output
_____no_output_____
###Markdown
We can now clearly see that Atlanta had the most passengers arriving in 2019 and that flights from Delta Air Lines were the biggest contributor. But, we can do better by representing market share as the percentage of all passengers arriving in each destination city. In order to do that, we need to modify our pivot table by dividing each airline's passenger counts by the `All` column:
###Code
normalized_pivot = \
pivot[top_airlines.sort_index().index].apply(lambda x: x / pivot.All)
normalized_pivot
###Output
_____no_output_____
###Markdown
Before plotting, we will also sort the bars by the total market share of the top 5 carriers. Viewing this information as percentages gives us a better idea of which carriers dominate which markets: Delta has by far the largest share of Atlanta and American Airlines has over 60% of Dallas/Fort Worth, while United has strong footholds in several markets:
###Code
# determine sort order
market_share_sorted = normalized_pivot.sum(axis=1).sort_values()
ax = normalized_pivot.loc[market_share_sorted.index,:].plot(
kind='barh', stacked=True, xlim=(0, 1),
title='2019 Market Share\n(source: BTS)'
)
# formatting
ax.set(xlabel='percentage of all passengers', ylabel='destination')
ax.legend(title='carrier', bbox_to_anchor=(0.7, 0), loc='lower left')
# show x-axis as percentages
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1))
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
###Output
_____no_output_____
###Markdown
As we noticed earlier, Seattle sticks out. The top 5 carriers have more than 50% combined market share for 9 out of the top 10 destinations, but not for Seattle. Using our pivot table, we can see that Alaska Airlines is the top carrier for Seattle:
###Code
pivot.loc['Seattle, WA', :].nlargest(6)
###Output
_____no_output_____
###Markdown
Initialize
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
plt.rcParams['figure.figsize'] = [14, 10]
import seaborn as sns
sns.set()
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, average_precision_score, f1_score, precision_score, recall_score, plot_confusion_matrix
from sklearn.utils import compute_class_weight
###Output
_____no_output_____
###Markdown
Load Data
###Code
# Load the Data
wine_df = pd.read_csv('data/winequality-red.csv', delimiter=";")
wine_df.head()
###Output
_____no_output_____
###Markdown
Descriptive Analysis
###Code
# Check to see the datatypes of the columns and null values
wine_df.info()
# Statistical Definition of each feature
wine_df.describe()
###Output
_____no_output_____
###Markdown
Distribution Analysis
###Code
# Distribution analysis for each feature
wine_df.hist(bins=50)
plt.show()
###Output
_____no_output_____
###Markdown
Correlation Analysis
###Code
# Correlation Analysis
pearson_correlation = wine_df.corr(method="pearson")
spearman_correlation = wine_df.corr(method="spearman")
kendall_correlation = wine_df.corr(method="kendall")
# Plot
fig, axes = plt.subplots(1, 3, constrained_layout=True, figsize=(22, 8), sharex=True, sharey=True)
sns.heatmap(pearson_correlation, annot=True, ax=axes[0], cbar=True)
axes[0].title.set_text("Pearson")
sns.heatmap(spearman_correlation, annot=True, ax=axes[1], cbar=True)
axes[1].title.set_text("Spearman")
sns.heatmap(kendall_correlation, annot=True, ax=axes[2], cbar=True)
axes[2].title.set_text("Kendall")
fig.suptitle("Correlations Analysis")
plt.show()
# Top ranked features
pearson_selected_names = set(pearson_correlation.quality.sort_values(ascending=False)[:8].index.values.tolist())
spearman_selected_names = set(spearman_correlation.quality.sort_values(ascending=False)[:8].index.values.tolist())
kendall_selected_names = set(kendall_correlation.quality.sort_values(ascending=False)[:8].index.values.tolist())
# Unique features
unique_features = set.intersection(pearson_selected_names, spearman_selected_names, kendall_selected_names)
print(f"Unique Features: {unique_features}")
# Get features and targets
features_df = wine_df[unique_features].drop("quality", axis=1)
targets_df = wine_df[unique_features].quality.to_frame()
###Output
/var/folders/n6/n9jt2skd19lgk9k6kh7bnzhr0000gn/T/ipykernel_88278/984932227.py:2: FutureWarning: Passing a set as an indexer is deprecated and will raise in a future version. Use a list instead.
features_df = wine_df[unique_features].drop("quality", axis=1)
/var/folders/n6/n9jt2skd19lgk9k6kh7bnzhr0000gn/T/ipykernel_88278/984932227.py:3: FutureWarning: Passing a set as an indexer is deprecated and will raise in a future version. Use a list instead.
targets_df = wine_df[unique_features].quality.to_frame()
###Markdown
Dimensionality Reduction
###Code
# Dimensionality Reduction
pca2D = PCA(n_components=2).fit_transform(features_df.values)
pca3D = PCA(n_components=3).fit_transform(features_df.values)
fig = plt.figure(figsize=(22, 12), constrained_layout=True)
fig.suptitle("PCA")
# PCA with 2 Components
ax = fig.add_subplot(2, 2, 1)
ax.title.set_text("PCA with 2 Components")
sns.scatterplot(
x=pca2D[:, 0], y=pca2D[:, 1],
hue=np.squeeze(targets_df.values),
palette=sns.color_palette("hls", 6),
legend="full",
alpha=0.9,
ax=ax
)
# PCA with 3 Components
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.title.set_text("PCA with 3 Components")
ax.scatter(
xs=pca3D[:, 0],
ys=pca3D[:, 1],
zs=pca3D[:, 2],
c=np.squeeze(targets_df.values),
cmap='tab10',
)
plt.show()
###Output
_____no_output_____
###Markdown
Normalization and Splits
###Code
# Train Test Split
X_train, X_test, y_train, y_test = train_test_split(features_df.values, np.squeeze(targets_df.values), test_size=0.2, shuffle=True)
# Encode the Labels
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
y_train = label_encoder.transform(y_train)
y_test = label_encoder.transform(y_test)
# Scale the data
features_scaler = MinMaxScaler()
features_scaler.fit(X_train)
X_train = features_scaler.transform(X_train)
X_test = features_scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
Random Forest
###Code
classifier = RandomForestClassifier(n_estimators=500, class_weight="balanced", n_jobs=-1)
print()
print("----------Training Random Forest----------")
print()
classifier.fit(X_train, y_train)
# Prediction of Testset
y_pred = classifier.predict(X_test)
# Confusion Matrix
acc_score = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred, average="micro")
ps = precision_score(y_test, y_pred, average="micro")
rs = recall_score(y_test, y_pred, average="micro")
cm = confusion_matrix(y_test, y_pred)
print('{:<23}: {:>10.2f}'.format('Accuracy Score', acc_score), sep='')
print('{:<23}: {:>10.2f}'.format('f1 Score', f1), sep='')
print('{:<23}: {:>10.2f}'.format('Precision Score', ps), sep='')
print('{:<23}: {:>10.2f}'.format('Recall Score', rs), sep='')
print()
plot_confusion_matrix(classifier, X_test, y_test)
plt.show()
###Output
----------Training Random Forest----------
Accuracy Score : 0.70
f1 Score : 0.70
Precision Score : 0.70
Recall Score : 0.70
###Markdown
Historical Market DevelopmentLet's first calculate the historical annual appreciation for real estate in Norway and the S&P500.
###Code
real_estate_price_15_y = 73339
real_estate_price_0_y = 27308
snp500_price_15_y = 3235
snp500_price_0_y = 1186
hist_growth_rate_real_estate = math.pow(real_estate_price_15_y/real_estate_price_0_y, 1/15) - 1
hist_growth_rate_snp500 = math.pow(snp500_price_15_y/snp500_price_0_y, 1/15) - 1
print(f'Annual growth rate real estate for the last 15 years: {hist_growth_rate_real_estate:.2%}, '
f'annual growth rate S&P500 for the last 15 years: {hist_growth_rate_snp500:.2%}.')
###Output
Annual growth rate real estate for the last 15 years: 6.81%, annual growth rate S&P500 for the last 15 years: 6.92%.
###Markdown
Real Estate Profit At Various Overpayment LevelsHere, we fix the annual real estate growth rate to its historic levels and compare the real estate profit for various overpayment amounts.
###Code
to_plot = history[(history.growth_rate_real_estate==round(hist_growth_rate_real_estate, 2))
& (history.growth_rate_stocks==history.growth_rate_stocks.min())
& (history.investment_amount==0)]
for scenario in to_plot.scenario_name.unique():
subplot = history[history.scenario_name==scenario]
plt.plot(subplot.month, subplot.current_profit_real_estate, label=scenario)
plt.legend()
plt.xlabel('month')
plt.ylabel('profit_nok')
plt.title('Real Estate Profit At Various Overpayment Levels')
plt.show()
###Output
_____no_output_____
###Markdown
As we can see, the overpayment amount influences primarily the number of payment terms. However, the profit is primarily influenced by time. This means that the compounding effect of the annual growth in the real estate market overpowers the reduction of expenses by paying less interest. Real Estate Profit At Various Growth Levels With No OverpaymentWe will now fix the overpayment to 0 and explore the real estate profit at various annual real estate growth levels.
###Code
to_plot = history[(history.mortgage_overpayment_amount==0)
& (history.growth_rate_stocks==history.growth_rate_stocks.min())
& (history.investment_amount==0)]
for scenario in to_plot.scenario_name.unique():
subplot = history[history.scenario_name==scenario]
plt.plot(subplot.month, subplot.current_profit_real_estate, label=scenario)
plt.legend()
plt.xlabel('month')
plt.ylabel('profit_nok')
plt.title('Real Estate Profit At Various Annual Growth Levels')
plt.show()
###Output
_____no_output_____
###Markdown
As expected, the real estate market grwoth rate has a significant influence on the real estate profit. If we extrapulate using the historic annual growth rates for the last 15 years, the expected profit is 2.8 MLN NOK in 30 years (without any mortgage overpayment). Real Estate And Stock Profit At Various Growth RatesHere we look at scenarios where the mortgage overpayment amount equals the investment amount in stocks. However the annual growth rates for stocks and the real estate market may differ. In addition, we are looking at the profit in 12 months.
###Code
n_months = 12
fig = plt.figure(figsize=(14, 10))
for i, amount in enumerate(list(range(5000, 20001, 5000))):
to_plot = history[(history.mortgage_overpayment_amount==amount)
& (history.investment_amount==amount)
& (history.month==n_months)].copy()
ax = plt.subplot(2, 2, i+1)
to_plot_pivot = pd.pivot_table(data=to_plot,
index='growth_rate_real_estate',
columns='growth_rate_stocks',
values='profit_ratio')
sns.heatmap(to_plot_pivot, annot=True)
plt.title(f'Investment Amount: {amount}')
plt.suptitle(f'Real Estate Profit As Percentage Of Stock Profit After {n_months} Months \nOver Various Annual Growth Rates For Real Estate And Stocks')
plt.subplots_adjust(hspace=0.3)
plt.show()
###Output
_____no_output_____
###Markdown
As we expect, higher real estate market growth leads to higher real estate profit and vice versa. However, there are certain cases where even lower stock market growth outperforms a slightly higher one. Profit Ratio Over Equal Investment Levels And Growth RatesHere we look at scenarios where the real estate grwoth rate equals the stock market growth rate and the mortgage overpayment equals the stock investment amount. We will plot the profitability ratio over time.
###Code
to_plot = history[(history.growth_rate_real_estate==history.growth_rate_stocks)
& (history.mortgage_overpayment_amount==history.investment_amount)
& (history.investment_amount>0)
& (history.growth_rate_stocks==round(hist_growth_rate_real_estate, 2))
& (history.month<=60)].copy()
for amount in to_plot.investment_amount.unique():
subplot = to_plot[to_plot.investment_amount==amount]
plt.plot(subplot.month, subplot.profit_ratio, label=amount)
plt.legend()
plt.xlabel('month')
plt.ylabel('profit_nok')
plt.title('Profit Ratio Over Time')
plt.show()
###Output
_____no_output_____
###Markdown
At equal investment / overpayment levels and equal annual growth rates, stocks outperform real estate in the first year. However after a year and a half, in the scenario with smallest investment, real estate outperforms stocks. For all other scenarios (where the investment amount is more or equal to 10000 NOK), real estate starts outperforming the investment after 3 years. Equal Growth Rate, No Overpayment, Maximum InvestmentHere we look at scenarios where the growth rates are equal to the historic ones, there is no mortgage overpayment and the investment is set to its maximum of 20000 NOK.
###Code
to_plot = history[(history.growth_rate_real_estate==round(hist_growth_rate_real_estate, 2))
& (history.mortgage_overpayment_amount==0)
& (history.investment_amount==2e4)
& (history.growth_rate_stocks==round(hist_growth_rate_snp500, 2))].copy()
plt.plot(to_plot.month, to_plot.cumulative_profit_stocks, label='stocks')
plt.plot(to_plot.month, to_plot.current_profit_real_estate, label='real_estate')
plt.legend()
plt.xlabel('month')
plt.ylabel('profit_nok')
plt.title('Profit Over Time')
plt.show()
###Output
_____no_output_____
###Markdown
As we can see, with a maximum investment level and no mortgage overpayment, the stock investment strongly outperforms the real estate investment in the first 15 years. After that, the investment amount's value diminishes compared to the inreased price index. In the second half of the mortgage annuity, the real estate keeps compounding and eventually outperforms the stocks at year 23. There are two events that influence this development, namely:* The real estate investment is made in bulk at the beginning;* The interest payments diminish as the principal gets repaid;Becasue of these, the real estate keeps increasing in value as the principal gets repaid and as the real estate market keeps compounding over the years. Equal Growth Rate Various Overpayment And Investment AmountsHere we look at equal growth rates and various overpayment and investment amounts.
###Code
fig = plt.figure(figsize=(14, 12))
to_plot = history[(history.growth_rate_real_estate==history.growth_rate_stocks)
& (history.growth_rate_stocks==round(hist_growth_rate_real_estate, 2))].copy()
for amount in list(range(5000, 20001, 5000)):
subplot_1 = to_plot[(to_plot.investment_amount==amount) & (to_plot.mortgage_overpayment_amount==0)]
subplot_2 = to_plot[(to_plot.investment_amount==0) & (to_plot.mortgage_overpayment_amount==amount)]
plt.plot(subplot_1.month, subplot_1.cumulative_profit_stocks, color='b', alpha=0.5)
plt.plot(subplot_2.month, subplot_2.current_profit_real_estate, color='r', alpha=0.5)
subplot_0 = to_plot[(to_plot.investment_amount==0) & (to_plot.mortgage_overpayment_amount==0)]
plt.plot(subplot_0.month, subplot_0.current_profit_real_estate, color='r', alpha=0.5)
cust_lines = [Line2D([0], [0], color='b', alpha=0.5, linewidth=4),
Line2D([0], [0], color='r', alpha=0.5, linewidth=4)]
plt.title(f'Profit Over Time For Growth Rate Of {hist_growth_rate_real_estate:.0%}', fontsize=20)
plt.xlabel('month', fontsize=14, fontweight="bold")
plt.ylabel('profit_nok', fontsize=14, fontweight="bold")
plt.legend(cust_lines, ['stocks', 'real_estate'])
plt.show()
###Output
_____no_output_____
###Markdown
global
###Code
data['time'].plot();
data['_speed'].plot();
data['_moveCount'].plot();
# c f o p over time
# normalized c f o p over time
# I vs E over time
# I vs E normalized over time
#ph.add_rolling(data, 20)
#ph.plot_rolling(data, 20)
#ph.plot_rollingN(data, 20)
###Output
_____no_output_____
###Markdown
Cross
###Code
## time
## number of moves
## record
data['_crossTime'].plot()
data['_crossTime'].sort_values().head(5)
###Output
_____no_output_____
###Markdown
F2L
###Code
## number of moves
## records
data['f2lI'].plot()
data['f2lE'].plot()
data['f2lMoves'].plot()
###Output
_____no_output_____
###Markdown
OLL
###Code
# records
d_oll = data.groupby('_ollCase')
d_oll['_ollITime'].mean().sort_values().plot(kind='bar')
d_oll['_ollETime'].mean().sort_values().plot(kind='bar')
###Output
_____no_output_____
###Markdown
PLL
###Code
# records
pll = data.groupby('_pllCase')
pll['_pllITime'].mean().sort_values().plot(kind='bar')
pll['_pllETime'].mean().sort_values().plot(kind='bar')
###Output
_____no_output_____
###Markdown
Add some columns that denote year and month
###Code
df['year'] = pd.DatetimeIndex(df['ACQ_DATE']).year
df['month'] = pd.DatetimeIndex(df['ACQ_DATE']).month
###Output
_____no_output_____
###Markdown
Check we have a full years data for each year
###Code
num_months = np.zeros(len(df['year'].unique()),)
for i, year in enumerate(df['year'].unique()):
num_months[i] = len(df.loc[df['year'] == year,'month'].unique())
fig, ax = plt.subplots(figsize = (10,5))
sns.barplot(x = df['year'].unique(),
y = num_months, ax = ax)
ax.set_xlabel('Year')
ax.set_ylabel('Counts')
###Output
_____no_output_____
###Markdown
Nope, drop year 2000
###Code
# drop year 2000
df = df.loc[df['year'] > 2000,:]
# What's going on for 2019? - Missing october..
df.loc[df['year'] == 2019,'month'].unique()
###Output
_____no_output_____
###Markdown
Exploratory data analysis
###Code
fig, ax = plt.subplots(figsize = (10,5))
sns.barplot(x = df['year'].value_counts().index,
y = df['year'].value_counts().values, ax = ax)
ax.set_title('Fires per year')
ax.set_xlabel('Year')
ax.set_ylabel('Counts')
years = (2016, 2017, 2018, 2019)
fig, ax = plt.subplots(1,4, figsize = (25,5))
for i, year in enumerate(years):
sns.barplot(x = df.loc[df['year'] == year,'month'].value_counts().index,
y =df.loc[df['year'] == year,'month'].value_counts().values, ax = ax[i])
ax[i].set_title('Fires per month: ' + str(year))
ax[i].set_xlabel('Month')
ax[i].set_ylabel('Counts')
###Output
_____no_output_____
###Markdown
Spatial plots Plot the fires on Dec 31st 2019, during the big surge of bushfires
###Code
my_date = df['ACQ_DATE'].unique()[-1]
fig, ax = plt.subplots(figsize = (5,5))
ax.set_title(my_date)
my_map.plot(ax = ax)
df[df['ACQ_DATE'] == my_date].plot(ax = ax, column='BRIGHT_T31', cmap='hot')
###Output
_____no_output_____
###Markdown
Let's look over a period of years, normalized by number of fires across the same period
###Code
years = (2016, 2017, 2018, 2019)
num_fires = np.zeros(1,)
for year in years:
num_fires = num_fires + df.loc[df['year'] == year,:].shape[0]
fig, ax = plt.subplots(1,4, figsize = (25,5))
for i, year in enumerate(years):
counts = df.loc[df['year'] == year,'postcode'].value_counts()
my_map['counts'] = np.zeros(my_map.shape[0],)
for postcode in counts.index:
my_map.loc[my_map['code'] == postcode, 'counts'] = counts[postcode] / num_fires * 100
ax[i].set_title(year)
my_map.plot(ax = ax[i], column = 'counts', cmap='OrRd', vmax = 4, legend=True)
###Output
_____no_output_____
###Markdown
Fit a simple time series forecasting model
###Code
# assemble dataframe of counts of fires per date
df_proph = pd.DataFrame()
df_proph['ds'] = df.groupby('ACQ_DATE').count().index
df_proph['y'] = df.groupby('ACQ_DATE').count().iloc[:,0].values
df_proph.head()
# Make the prophet model and fit on the data
my_prophet = Prophet(changepoint_prior_scale=0.15)
my_prophet.fit(df_proph)
# Make a future dataframe for 2 years
my_forecast = my_prophet.make_future_dataframe(periods=365, freq='D')
# Make predictions
my_forecast = my_prophet.predict(my_forecast)
my_prophet.plot(my_forecast, xlabel = 'Date', ylabel = 'Fire Counts')
plt.title('Fire Counts');
###Output
_____no_output_____
###Markdown
Well, last yeaer certainly was a big ol' outlier
###Code
# Plot the trends and patterns
my_prophet.plot_components(my_forecast);
###Output
_____no_output_____
###Markdown
Regional plots Let's pull out a region in NSW that regularly has fires
###Code
postcodes = (2877, 2875, 2873, 2825, 2835)
my_region = gpd.GeoDataFrame()
df_region = pd.DataFrame()
for postcode in postcodes:
region_tmp = my_map[my_map['code'] == postcode]
df_tmp = df.loc[df['postcode'] == postcode,:]
my_region = pd.concat((my_region, region_tmp), axis = 0)
df_region = pd.concat((df_region, df_tmp), axis = 0)
fig, ax = plt.subplots(figsize = (5,5))
ax.set_title('postcode: ' + str(postcode))
my_map.plot(ax = ax)
my_region.plot(ax = ax, color = 'g')
df_region.plot(ax = ax, column='BRIGHT_T31', cmap='hot')
fig, ax = plt.subplots(figsize = (10,5))
sns.barplot(x = df_region[['year','month']].groupby('year').count().index,
y = df_region[['year','month']].groupby('year').count().values.reshape(-1), ax = ax)
ax.set_xlabel('Year')
ax.set_ylabel('Counts')
###Output
_____no_output_____
###Markdown
Fit time series forecasting model
###Code
# assemble dataframe of counts of fires per date
df_proph = pd.DataFrame()
df_proph['ds'] = df_region.groupby('ACQ_DATE').count().index
df_proph['y'] = df_region.groupby('ACQ_DATE').count().iloc[:,0].values
df_proph.head()
# Make the prophet model and fit on the data
my_prophet = Prophet(changepoint_prior_scale=0.15)
my_prophet.fit(df_proph)
# Make a future dataframe for 2 years
my_forecast = my_prophet.make_future_dataframe(periods=365, freq='D')
# Make predictions
my_forecast = my_prophet.predict(my_forecast)
my_prophet.plot(my_forecast, xlabel = 'Date', ylabel = 'Fire Counts')
plt.title('Fire Counts');
# Plot the trends and patterns
my_prophet.plot_components(my_forecast);
###Output
_____no_output_____
###Markdown
Starbucks Challenge IntroductionThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record
###Code
from matplotlib.colors import ListedColormap
from tqdm import tqdm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandasql as pdsql
import seaborn as sns
import pandas as pd
import numpy as np
import math
import json
plt.rcParams['patch.force_edgecolor']=True
# read in the json files
portfolio = pd.read_json('portfolio.json', orient='records', lines=True)
profile = pd.read_json('profile.json', orient='records', lines=True)
transcript = pd.read_json('transcript.json', orient='records', lines=True)
###Output
_____no_output_____
###Markdown
---- Questions If we can map the behaviour and find some specific patterns about customers behaviour in using offers, hopefully the marketing team will target the right customers to be able to increase the sales.Here are a few questions we are going to answer in this notebook.1. How much we loss because of the "unecessary offer"? 2. What kind of customers that often completed the offer without viewing it? 3. How is the income differentiate between customers type?Before we answer the questions, we are going to do the data wrangling process first to clean the data so it can be analyzed. --- Data Wrangling Portfolio
###Code
portfolio
###Output
_____no_output_____
###Markdown
For the portfolio dataframe, we want to convert the channels column into one-hot-encoding type of column so we can analyze it better.
###Code
## PORTFOLIO PREPROCESSING
channels_list = list(np.unique(list(chain(*portfolio['channels']))))
for i in channels_list:
portfolio[i] = portfolio['channels'].apply(lambda x: 1 if i in x else 0)
portfolio.drop(columns=['channels'], inplace=True)
portfolio
###Output
_____no_output_____
###Markdown
Transcript
###Code
transcript.head()
###Output
_____no_output_____
###Markdown
For the transcript dataframe, we want to extract the values from column `value`. There are three values in that column which are `amount`, `offer_id`, and `reward`. This preprocessing will extract those values and convert it into columns.
###Code
# Convert the dictionary value into columns and concatenate with the current dataframe
value = pd.io.json.json_normalize(transcript['value'])
transcript = pd.concat([transcript, value], axis=1).drop(columns=['value'])
# Merge the offer_id column and offer id collumn so that it only has one column
transcript['offer_id'] = np.where(pd.isnull(transcript['offer_id']), transcript['offer id'], transcript['offer_id'])
transcript.drop(columns=['offer id'], inplace=True)
# Fill the null values with 0
transcript.fillna(0, inplace=True)
transcript.head()
###Output
_____no_output_____
###Markdown
Profile
###Code
profile.head()
profile['income'].plot.hist()
plt.title('The Distribution of Customers Income')
plt.show()
###Output
_____no_output_____
###Markdown
The distribution of income, we can see that it is positively skewed.
###Code
profile['age'].plot.hist()
plt.title('The Distribution of Customers Age')
plt.show()
###Output
_____no_output_____
###Markdown
**The high number of values of age 118*
###Code
profile[profile['age']>100]['age'].unique()
###Output
_____no_output_____
###Markdown
For this dataframe, there are quite high number of null values in the gender and income columns. If we see the data, we can also see that all of the rows with null values in gender and income also have the age 118 (and for this case, we assume the customers with the age > 100 are outliers, so we are gonna treat them the same).To handle this case, I am going to fill the null values in each column (and rows with age > 100) with the median and mode of that columns (mode for gender, median for age and income) of the specific day (from `became_member_on` column).So here is the plan to fix the data.- For the `gender` column, fill the null values with mode of that day (`became_member_on` column)- For the `income` column, fill the null values with median of that day (`became_member_on` column)- For the `age` column, change all of the value of 118 to median of that day (`became_member_on` column)
###Code
# Convert the became_member_on into datetime type
profile['became_member_on'] = pd.to_datetime(profile['became_member_on'], format='%Y%m%d')
# Create a new column with the value of the difference days between the column became_member_on and the max days
profile['difference_days'] = (profile['became_member_on'].max() - profile['became_member_on']).dt.days
# Find median of age
median_age_per_day = profile.groupby('became_member_on', as_index=False)['age'].median()
# Find median of income
median_income_per_day = profile.groupby('became_member_on', as_index=False)['income'].median()
# Find mode of gender
mode_gender_per_day = profile.groupby('became_member_on')['gender'].agg(lambda x: pd.Series.mode(x))
mode_gender_per_day_value = [i if isinstance(i, str) else 'M' for i in mode_gender_per_day]
# Convert age 118 to the median of that day
age_reference = dict(zip(median_age_per_day['became_member_on'], median_age_per_day['age']))
profile['age'] = profile['age'].replace({118: None, 101: None}).fillna(profile['became_member_on'].map(age_reference))
profile.loc[profile['age'] > 100, 'age'] = profile['age'].median()
# Fill the null values in gender column with the mode
gender_reference = dict(zip(mode_gender_per_day.index,mode_gender_per_day_value))
profile['gender'] = profile['gender'].fillna(profile['became_member_on'].map(gender_reference))
# Fill the null values in income column with the median
income_reference = dict(zip(median_income_per_day['became_member_on'], median_income_per_day['income']))
profile['income'] = profile['income'].fillna(profile['became_member_on'].map(income_reference))
profile['income'].fillna(profile['income'].median(), inplace=True)
profile['age'] = profile['age'].astype(int)
profile.head()
###Output
_____no_output_____
###Markdown
Combining the data In order to make it easier to analyze, we are going to compile the users activity history in one dataframe (this is gonna take a while, about 9-10 minutes).Each row in the dataframe represents each offer that was being sent to each user. So we can track each offer performance for each user or for specific segment of user.The `compiled_data` dataframe will have these columns:- `person`: each customer unique id- `offer_id`:offer id- `viewed`: did customer see the offer? `0` -> no and `1` -> yes- `completed`: did customer completed the offer? `0` -> no and `1` -> yes- `view_information`: shows whether the customer saw the informational offer or not (before completed the offer)- `time_completed`: how long the customer need for completing the offer (in hour, starts after they viewed the offer)- `reward`: reward of that specific offer- `offer_type`: type of offerThen we are going to merge the `compiled_data` with `profile` and `portfolio` dataframe.
###Code
offer_reference = {}
for i,j in zip(portfolio['id'], portfolio['offer_type']):
offer_reference[i] = j
offer_duration_reference = {}
for i,j in zip(portfolio['id'], portfolio['duration']):
offer_duration_reference[i] = j*24
full_data = []
# Iterate through each person
for person in tqdm(list(transcript['person'].unique())):
not_completed = {}
received = []
active = []
total_data = {}
information = []
for index, row in transcript[transcript['person'] == person].iterrows():
if row['event'] == 'offer received':
# Everytime there is an offer received, do this
received.append(row['offer_id'])
key = row['offer_id'] + '-' + str(received.count(row['offer_id']))
not_completed[key] = row['time']
total_data[key] = [row['person'], row['offer_id'], 0, 0, 0, 0]
if row['event'] == 'offer viewed':
# If the customers have seen the informational offer
if offer_reference[row['offer_id']] == 'informational':
information.append(row['offer_id'])
# Everytime the offer is viewed, do this
active = list(filter(lambda x: x.split('-')[0] == row['offer_id'], list(not_completed.keys())))
# If there is only one offer_id active
if len(active) == 1:
# Only change the value if the offer is not completed yet
if active[0] in not_completed:
total_data[active[0]][2] = 1
# If there are more than one offer_id active
else:
for offer_id in active:
if (row['time'] - not_completed[offer_id]) < offer_duration_reference[row['offer_id']]:
if total_data[offer_id][2] == 1:
continue
total_data[offer_id][2] = 1
break
if row['event'] == 'offer completed':
# If the users completed the offer and have seen the informational offer
info = False
if len(information) > 0:
info = True
# Everytime the offer is completed, do this
active = list(filter(lambda x: x.split('-')[0] == row['offer_id'], list(not_completed.keys())))
# If there is only one offer_id active
if len(active) == 1:
total_data[active[0]][3] = 1
total_data[active[0]][5] = row['time'] - not_completed[active[0]]
not_completed.pop(active[0])
if info:
total_data[active[0]][4] = 1
continue
# If there is more that one offer_id active
else:
for offer_id in active:
if (row['time'] - not_completed[offer_id]) < offer_duration_reference[row['offer_id']]:
total_data[offer_id][3] = 1
total_data[offer_id][5] = row['time'] - not_completed[offer_id]
not_completed.pop(offer_id)
if info:
total_data[offer_id][4] = 1
break
for index, value in total_data.items():
full_data += [value]
# Create a dataframe based on the compile result
compiled_data = pd.DataFrame(full_data, columns=['person', 'offer_id', 'viewed', 'completed', 'view_information', 'time_completed'])
compiled_data.head()
# Merge with the portfolio and profile dataframe
compiled_data_merged = compiled_data.merge(portfolio, left_on='offer_id', right_on='id').drop(columns=['id'])
complete_data = compiled_data_merged.merge(profile, left_on='person', right_on='id').drop(columns=['id'])
complete_data.head()
###Output
_____no_output_____
###Markdown
--- Data Exploration In this section, we are going to explore the data to answer our questions in the previous section. 1. How much we actually loss We already have the compiled dataframe, where each row represents each offer sent to each user. Column `viewed` means whether the offer has been viewed by the customers or not.Now let's calculate how much we actually gave the `reward` to those customers who didn't actually view the offer.
###Code
complete_data[(complete_data['viewed'] == 0) & (complete_data['completed'] == 1)]['reward'].sum()
###Output
_____no_output_____
###Markdown
Based on the data in the experiment, **we actually "lost" USD 49,032 in revenue**.If we take a look at column `time` in dataframe `transcript`, we can get maximum value of 714, which means the maximum experiment time is 714 hours = 30 days, so we can assume that the experiment is running for 30 days or a month. With that being said, on average **we have a potential loss of 49,032 x 12 = USD 588,384 of revenue in a year.**Let's take a look deeper into the case and breakdown it.
###Code
query = """
SELECT
complete.offer_id,
complete.offer_type,
total_completed,
total_completed_without_view,
ROUND(((1.0*total_completed_without_view) / (1.0*total_completed))*100, 2) as total_completed_without_view_ratio,
100 - ROUND(((1.0*total_completed_without_view) / (1.0*total_completed))*100, 2) as total_completed_with_view_ratio,
`loss ($)`
FROM
(SELECT
offer_id,
offer_type,
COUNT(*) AS total_completed
FROM
complete_data
WHERE
completed = 1
GROUP BY
offer_id) complete
JOIN
(SELECT
offer_id,
offer_type,
COUNT(*) AS total_completed_without_view,
SUM(reward) AS `loss ($)`
FROM
complete_data
WHERE
viewed = 0
AND completed = 1
GROUP BY
offer_id) complete_not_view ON complete.offer_id = complete_not_view.offer_id
ORDER BY
total_completed_without_view_ratio DESC
"""
completed_without_view = pdsql.sqldf(query)
completed_without_view
table = completed_without_view.groupby('offer_type', as_index=False) \
.agg({'loss ($)': ['sum'],
'total_completed_without_view': ['sum']})
table.columns = [' '.join(col).strip() for col in table.columns.values]
table
viz = completed_without_view[['offer_id', 'loss ($)', 'offer_type']].set_index('offer_id') \
.sort_values('loss ($)')
colors = tuple(np.where(viz['offer_type'] == 'discount', '#C6E5CC', '#6fb08e'))
viz['loss ($)'].plot(kind='barh',
color=colors,
figsize=(10,5))
discount = mpatches.Patch(color='#C6E5CC', label='Discount')
bogo = mpatches.Patch(color='#6fb08e', label='BOGO')
plt.legend(handles=[bogo, discount])
plt.title('Total Loss for Each Offer Id')
plt.xlabel('Loss ($)')
plt.show()
###Output
_____no_output_____
###Markdown
**Discount**Total: 5,391Loss: USD 17,802 **Buy One Get One (BOGO)**Total: 4,616Loss: USD 31,230 There are 8 offers, and **most of the loss are from BOGO offer**.Eventhough the total completed offer of discount is higher than BOGO, in fact **the total loss of BOGO offer is almost double the total loss of discount.** 2. Customers that complete the offer without viewing it before Let's take a look into gender specifically. Is there any difference in behaviour between Male, Female, and Others?
###Code
query = """
SELECT
complete.offer_type,
complete.gender,
complete.complete_without_view,
complete_view.complete_with_view,
(complete.complete_without_view + complete_view.complete_with_view) total_complete
FROM
(SELECT
offer_type,
gender,
COUNT(*) complete_without_view
FROM
complete_data
WHERE
viewed = 0
AND completed = 1
GROUP BY
offer_type, gender) complete
JOIN
(SELECT
offer_type,
gender,
COUNT(*) complete_with_view
FROM
complete_data
WHERE
viewed = 1
AND completed = 1
GROUP BY
offer_type, gender) complete_view ON complete.offer_type = complete_view.offer_type
AND complete.gender = complete_view.gender
"""
user_demographic_summary = pdsql.sqldf(query)
user_demographic_summary['complete_without_view_ratio'] = round((user_demographic_summary['complete_without_view'] / user_demographic_summary['total_complete']) * 100, 2)
user_demographic_summary['complete_with_view_ratio'] = round((user_demographic_summary['complete_with_view'] / user_demographic_summary['total_complete']) * 100, 2)
user_demographic_summary['gender'] = user_demographic_summary['gender'].map({'F': 'Female', 'M': 'Male', 'O': 'Others'})
user_demographic_summary
fig, (ax, ax2) = plt.subplots(ncols=2, sharey=True)
ax.title.set_text('BOGO Offer')
ax.set_xlabel('Completeness Percentage')
user_demographic_summary[user_demographic_summary['offer_type'] == 'bogo'] \
[['gender', 'complete_without_view_ratio', 'complete_with_view_ratio']] \
.set_index('gender') \
.plot(kind='barh',
legend=False,
stacked=True,
colormap=ListedColormap(sns.color_palette("ch:2.5,-.2,dark=.6")),
figsize=(13,5),
ax=ax)
ax2.title.set_text('Discount Offer')
ax2.set_xlabel('Completeness Percentage')
user_demographic_summary[user_demographic_summary['offer_type'] == 'discount'] \
[['gender', 'complete_without_view_ratio', 'complete_with_view_ratio']] \
.set_index('gender') \
.plot(kind='barh',
stacked=True,
colormap=ListedColormap(sns.color_palette("ch:2.5,-.2,dark=.6")),
figsize=(13,5),
ax=ax2)
plt.legend(loc="upper left", bbox_to_anchor=(1,1.02))
plt.show()
###Output
_____no_output_____
###Markdown
This visualization shows the percentages customers who complete the offer with and without viewing the offer.As we can see, in the BOGO Offer there is not much differencess between Male and Female, but **in the Discount Offer we can see that Female has slightly higher ratio than Male and Others, with 33.4% compared to 27.5% and 21.9% respectively**.It indicates that the Female tends to be less "discount-driven" than Male and Others.
###Code
avg_spending = transcript[transcript['event'] == 'transaction'].merge(profile, left_on='person', right_on='id') \
.groupby('gender', as_index=False)['amount'] \
.mean() \
.rename(columns={'amount': 'average_spending_per_transaction'}) \
.sort_values('average_spending_per_transaction')
avg_spending['gender'] = avg_spending['gender'].map({'F': 'Female', 'M': 'Male', 'O': 'Others'})
avg_spending
avg_spending.set_index('gender').plot(kind='barh', color='#6fb08e', legend=False)
plt.title('Average Spending per Transaction')
plt.xlabel('Amount ($)')
plt.show()
###Output
_____no_output_____
###Markdown
The visualization also confirms our previous assumption about Female customers, it shows that **the average spending per transaction for Female is higher than Male and Others, with the average of USD 16,3 per transaction**.
###Code
spending_distribution = transcript[transcript['event'] == 'transaction'].merge(profile, left_on='person', right_on='id')
plt.hist(spending_distribution[spending_distribution['gender'] == 'M']['amount'], range=(0, 40), alpha=0.5, bins=40, label='Male')
plt.hist(spending_distribution[spending_distribution['gender'] == 'F']['amount'], range=(0, 40), alpha=0.5, bins=40, label='Female')
plt.legend(loc='upper right')
plt.title('Spending per Transaction Distribution')
plt.xlabel('Amount ($)')
plt.show()
###Output
_____no_output_____
###Markdown
The spending distribution of each gender also shows that most of the Male customers tend to spend less money, where Female customers seems balanced in all the population. 3. Differences in average income between customers type
###Code
query = """
SELECT
complete.offer_type,
complete.complete_without_view_income,
complete_view.complete_with_view_income
FROM
(SELECT
offer_type,
AVG(income) complete_without_view_income
FROM
complete_data
WHERE
viewed = 0
AND completed = 1
GROUP BY
offer_type) complete
JOIN
(SELECT
offer_type,
AVG(income) complete_with_view_income
FROM
complete_data
WHERE
viewed = 1
AND completed = 1
GROUP BY
offer_type) complete_view ON complete.offer_type = complete_view.offer_type
"""
income_differences = pdsql.sqldf(query)
income_differences
plt.figure(figsize=(12,5))
sns.barplot(data=income_differences.melt(id_vars='offer_type'),
y='offer_type',
x='value',
hue='variable',
palette=sns.color_palette("ch:2.5,-.2,dark=.05"))
plt.legend(loc="upper left", bbox_to_anchor=(1,1.02))
plt.title('Average Income per Completeness and Offer Type')
plt.xlabel('average income ($)')
plt.show()
###Output
_____no_output_____
###Markdown
Georgia LDU Closure Analysis (2012–2016)
###Code
from helper import *
show_dfs = False
###Output
_____no_output_____
###Markdown
Datasets Patients- Data from the *Emory MCH Linked Vital Records Data Repository* (private data source) is used to identify per-patient birth data for births in 2011 by birthing LDU, payor status, race, ethnicity, and county of residence.
###Code
# Load patient data.
patients = pd.read_csv('data/patients.csv')
show_df(patients, show_dfs)
###Output
_____no_output_____
###Markdown
Labor & Delivery Units- Data from *Georgia Maternal and Infant Health Research Group (GMIHRG)* (private data source) is used to identify the LDUs of interest and their birth counts in 2008, 2011, and 2012; numbers of OBs, FPs, and CNMs in 2011 and 2016; and average ages of OBs in 2011 and 2016.- Data from the *Emory MCH Linked Vital Records Data Repository* (private data source) is used to obtain 2001 and 2011 number of births per-LDU to residents and non-residents of the county the LDU is in. It is also the source of LDU names that we consider standard.- Data from the [*U.S. Census Bureau*](https://www.census.gov/programs-surveys/geography/guidance/geo-areas/urban-rural/2010-urban-rural.html) is used to identify urban areas in 2010.- Data from [*Google Maps*](https://www.google.com/maps/d/u/0/edit?mid=1_xMZrJgPbcInCcq8CgdmwuncWMWSOoJj&usp=sharing) is used to identify, for each LDU, the closest (other) LDU (within Georgia), the number of driving miles to the closest LDU, the closest urban area (in any state), and the number of driving miles to the closest urban area in 2011.
###Code
# Load LDU data.
ldus = pd.read_csv('data/ldus.csv')
show_df(ldus, show_dfs)
###Output
_____no_output_____
###Markdown
Regional Data (Counties and PCSAs)- Data from [*OASIS*](https://oasis.state.ga.us) is used to obtain birth and population counts in 2001 and 2011 by county.- Data from the [*Office of Management and Budget (OMB)*](https://obamawhitehouse.archives.gov/sites/default/files/omb/bulletins/2013/b-13-01.pdf) is used to identify counties contained in the Atlanta-Sandy Springs-Roswell Metropolitan Statistical Area (MSA) based on the 2010 Census (see page 23).- Data from the [*U.S. Census Bureau*](https://data.census.gov/cedsci/table?q=&t=Income%20and%20Poverty&g=0400000US13%240500000&y=2011&tid=ACSST5Y2011.S1903) is used to obtain 2011 median household income by county.- Data from the [*Georgia Board of Health Care Workforce*](https://healthcareworkforce.georgia.gov/basic-physician-needs-reports-pcsa-primary-care-service-area) in the year 2008 is used to map counties to PCSAs.
###Code
# Load county data.
counties = pd.read_csv('data/counties.csv')
show_df(counties, show_dfs)
###Output
_____no_output_____
###Markdown
Determining the Sample Inclusion Criteria for Rural PCSAsPCSAs included in the sample are *rural*, meaning that in 2011:1. They did not contain any counties that were within the Atlanta MSA.2. They did not contain any counties with population at least 50,000.3. They contained exactly one LDU.
###Code
# Construct a DataFrame of 96 PCSAs.
pcsas = pd.DataFrame({'PCSA' : [x+1 for x in range(96)]})
# Identify PCSAs that have no counties in the Atlanta MSA.
df = (counties.groupby('PCSA')['In MSA (2010)'].sum() == 0).to_frame('Inc. MSA')
df1 = pcsas.join(df, on='PCSA')
# Identify PCSAs whose counties all have population strictly less than 50K.
df = (counties.groupby('PCSA')['Population'].max() < 50000).to_frame('Inc. Pop')
df2 = pcsas.join(df, on='PCSA')
# Identify PCSAs containing exactly one LDU.
df = ldus.groupby('County').size().to_frame('# LDUs')
df = counties.join(df, on='County')
df = (df.groupby('PCSA')['# LDUs'].sum() == 1).to_frame('Inc. 1 LDU')
df3 = pcsas.join(df, on='PCSA')
# Determine which PCSAs are in sample.
pcsas['In Sample'] = df1['Inc. MSA'] & df2['Inc. Pop'] & df3['Inc. 1 LDU']
show_df(pcsas, show_dfs)
###Output
_____no_output_____
###Markdown
Narrowing Patients, LDUs, and Counties to the SampleWith the 30 PCSAs in sample identified, the three other datasets can be winnowed down. All calculations from here on out can safely assume, due to Inclusion Criteria 3, that there is a 1:1 correspondence between LDUs and PCSAs.
###Code
# Collect the counties that are in sample.
df1 = counties.join(pcsas.set_index('PCSA'), on='PCSA')
s_counties = df1.drop(df1[df1['In Sample'].map(lambda x: not x)].index)
del s_counties['In Sample']
# Collect the LDUs that are in sample.
df2 = ldus.join(df1[['County', 'In Sample']].set_index('County'), on='County')
s_ldus = df2.drop(df2[df2['In Sample'].map(lambda x: not x)].index)
del s_ldus['In Sample']
# Collect the patients that are in sample.
df3 = patients.join(df2[['LDU', 'In Sample']].set_index('LDU'), on='LDU')
s_patients = df3.drop(df3[df3['In Sample'].map(lambda x: not x)].index)
del s_patients['In Sample']
# Finally, collect the PCSAs that are in sample.
s_pcsas = pcsas.drop(pcsas[pcsas['In Sample'].map(lambda x: not x)].index)
del s_pcsas['In Sample']
###Output
_____no_output_____
###Markdown
Derived ColumnsBased on the raw data above, we derive a series of new columns at the patient, LDU, and PCSA levels. Patient Payor Types/Groups and ResidenceWe aggregate different payor statuses into types and groups according to the dictionaries below. We also identify which patients gave birth to an LDU within their county and PCSA of residence.
###Code
# Assign patients their payor types.
ptypes = {'Unknown': 'Other/Unknown',
'Champus': 'Commercial/Employer-Based',
'Medicaid': 'Medicaid',
'Commercial Insurance': 'Commercial/Employer-Based',
'Other Government Assistance': 'Other Govt.',
'Other': 'Other/Unknown',
'Self Pay': 'Self Pay'}
s_patients['Payor Type'] = s_patients['Payor'].map(lambda x: ptypes[x])
# Assign patients their payor groups.
pgroups = {'Commercial/Employer-Based': 'Commercial/Employer-Based',
'Medicaid': 'Assistance/Self Pay',
'Other Govt.': 'Assistance/Self Pay',
'Self Pay': 'Assistance/Self Pay',
'Other/Unknown': 'Other/Unknown'}
s_patients['Payor Group'] = s_patients['Payor Type'].map(lambda x: pgroups[x])
# Use the counties of each LDU to identify whether patients gave birth in their
# county of residence.
df1 = s_patients.join(ldus[['LDU', 'County']].set_index('LDU'), on='LDU')
s_patients['In Res. County'] = s_patients['Res. County'] == df1['County']
# Use the counties of each LDU and the mappings of counties to PCSAs to identify
# whether patients gave birth in their PCSA of residence.
df2 = s_patients.join(counties[['County', 'PCSA']].set_index('County'), \
on='Res. County')
df3 = df1.join(counties[['County', 'PCSA']].set_index('County'), on='County')
s_patients['In Res. PCSA'] = df2['PCSA'] == df3['PCSA']
show_df(s_patients, show_dfs)
###Output
_____no_output_____
###Markdown
Patient Demographics, Payor Types/Groups, and Resident Births by LDUUsing the 2011 patient-level data in the above table, we aggregate the following measures by LDU:- Number of patients by race (Black, white, and other)- Number of patients by payor type (commercial/employer-based, Medicaid, self pay, other government, and other/unknown)- Number of patients by payor group (commercial/employer-based and assistance/self pay)- Number of patients in each pairwise intersection of Black vs. white and commercial/employer-based vs. assistance/self pay- Number of births to residents and non-residents of the LDU's county and PCSA
###Code
# Count total number of patients per LDU.
df1 = s_patients.groupby('LDU').size().to_frame('# Patients')
s_ldus = s_ldus.join(df1, on='LDU')
# Count patients by race per LDU.
df1 = s_patients.groupby(['LDU', 'Race']).size().to_frame('#').reset_index()
for race in ['Black or African-American', 'White']:
df2 = df1[df1['Race'] == race]
df2 = s_ldus.join(df2[['LDU', '#']].set_index('LDU'), on='LDU')
s_ldus[race.split()[0] + ' Patients'] = df2['#']
s_ldus['Other/Unknown Race'] = s_ldus['# Patients'] - s_ldus['Black Patients'] \
- s_ldus['White Patients']
# Count patients by payor type per LDU.
df1 = s_patients.groupby(['LDU', 'Payor Type']).size().to_frame('#').reset_index()
for ptype in ['Commercial/Employer-Based', 'Medicaid', 'Self Pay', \
'Other Govt.', 'Other/Unknown']:
df2 = df1[df1['Payor Type'] == ptype]
df2 = s_ldus.join(df2[['LDU', '#']].set_index('LDU'), on='LDU')
s_ldus[ptype + ' Payors'] = df2['#'].fillna(0)
# Count patients by payor group per LDU.
df1 = s_patients.groupby(['LDU', 'Payor Group']).size().to_frame('#').reset_index()
for pgroup in ['Commercial/Employer-Based', 'Assistance/Self Pay']:
df2 = df1[df1['Payor Group'] == pgroup]
df2 = s_ldus.join(df2[['LDU', '#']].set_index('LDU'), on='LDU')
s_ldus[pgroup + ' Payors'] = df2['#'].fillna(0)
# Count patients by intersection of race and payor groups.
df1 = s_patients.groupby(['LDU', 'Race', 'Payor Group']).size()\
.to_frame('#').reset_index()
for race, pgroup in product(['Black or African-American', 'White'], \
['Commercial/Employer-Based', 'Assistance/Self Pay']):
df2 = df1[(df1['Race'] == race) & (df1['Payor Group'] == pgroup)]
df2 = s_ldus.join(df2[['LDU', '#']].set_index('LDU'), on='LDU')
s_ldus[race.split()[0] + ' ' + pgroup] = df2['#'].fillna(0)
# Count births to county and PCSA residents.
for area in ['County', 'PCSA']:
res_str = area + ' Res. Patients'
nonres_str = area + ' Non-Res. Patients'
df1 = s_patients.groupby(['LDU', 'In Res. ' + area]).size()\
.to_frame(res_str).reset_index()
df2 = df1[df1['In Res. ' + area]]
s_ldus = s_ldus.join(df2[['LDU', res_str]].set_index('LDU'), on='LDU')
s_ldus[nonres_str] = s_ldus['# Patients'] - s_ldus[res_str]
show_df(s_ldus, show_dfs)
###Output
_____no_output_____
###Markdown
Provider Count and Load by LDUWe count providers by the number of OB equivalents per LDU in 2011 and use it to calculate the number of births per provider. An OB equivalent is calculated as:$$(\OBs) + \frac{1}{1.55} \cdot (\CNMs) + \frac{0.7}{1.55} \cdot (\FPs)$$We also identify the birth volume of the closest LDU.
###Code
# Calculate OB equivalents and births per provider.
s_ldus['OB Equiv.'] = s_ldus['# OBs'] + s_ldus['# CNMs'] / 1.55 \
+ (0.7/1.55) * s_ldus['# FPs']
s_ldus['Births per Provider'] = s_ldus['# Births'] / s_ldus['OB Equiv.']
# Get the 2011 birth volume at the closest LDU.
s_ldus = s_ldus.join(ldus[['LDU', '# Births']].set_index('LDU'), \
on='Closest GA LDU', rsuffix=' at Closest GA LDU')
show_df(s_ldus, show_dfs)
###Output
_____no_output_____
###Markdown
County Birth Volume and Population Demographics by PCSAWe additionally calculate, per-PCSA: the birth volume (2011), population (2011), female population (2011), Black female population (2011), white female population (2011), other female population (2011), and household income (2011). Median household income is available on a per-county basis; to calculate a PCSA's household income, we take a weighted average of its counties' median household incomes weighted by each county's proportion of the PCSA population. Mathematically, for a PCSA $p$ containing counties $c_1, \ldots, c_k$ we have:$$\text{income}(p) = \sum_{i=1}^k \left(\frac{\text{population}(c_i)}{\text{population}(p)}\right) \cdot \text{income}(c_i) = \frac{1}{\text{population}(p)} \cdot \sum_{i=1}^k \text{population}(c_i) \cdot \text{income}(c_i)$$
###Code
# Calculate the birth volume per-PCSA in 2011, the total population per-PCSA in
# 2011, and the female populations per-PCSA in 2011.
for m in ['# Births', 'Population', 'Females 15-44']:
s_pcsas = s_pcsas.join(s_counties.groupby('PCSA')[m].sum().to_frame(m), \
on='PCSA')
# Calculate populations of Black, white, and other race females by PCSA.
for m in ['Black Females 15-44', 'White Females 15-44']:
df = s_counties.groupby('PCSA')[m].sum().to_frame('#')
df = s_pcsas.join(df, on='PCSA')
s_pcsas[m] = df['#']
s_pcsas['Other Females 15-44'] = s_pcsas['Females 15-44'] \
- s_pcsas['Black Females 15-44'] \
- s_pcsas['White Females 15-44']
# Calculate the median household income per-PCSA using population-weighted
# proportions by county.
df = s_counties.groupby('PCSA')\
.apply(lambda x: (x['Population']*x['Median Household Income'])\
.sum()).to_frame('incprod')
df = s_pcsas.join(df, on='PCSA')
s_pcsas['Household Income'] = df['incprod'] / df['Population']
###Output
_____no_output_____
###Markdown
Finally, we identify which PCSAs had LDUs that closed (recall the assumption of 1:1 LDU:PCSA correspondence by Inclusion Criterium 3).
###Code
close_str = 'Closed 2012-2016'
df = s_ldus.groupby('County')[close_str].sum().to_frame(close_str)
df = s_counties[['County', 'PCSA']].join(df, on='County').fillna(0)
df = df.groupby('PCSA')[close_str].sum().to_frame(close_str)
s_pcsas = s_pcsas.join(df, on='PCSA')
show_df(s_pcsas, show_dfs)
###Output
_____no_output_____
###Markdown
Dumping the Processed DataWe write the processed patient, LDU, county, and PCSA data to file for easier inspection.
###Code
s_pcsas.to_csv('data/processed_pcsas.csv')
s_counties.to_csv('data/processed_counties.csv')
s_ldus.to_csv('data/processed_ldus.csv')
s_patients.to_csv('data/processed_patients.csv')
###Output
_____no_output_____
###Markdown
AnalysisThere are two classes of measures $m$ that we report statistics on.1. *Counts* (e.g., the number of births in a given year, household incomes, or populations), for which we report the total $\sum_{p \in P} m(p)$ and, for each subset of $P^{open}, P^{closed} \subseteq P$, the: - Median: $median(\{m(p) : p \in P^{open/closed}\})$ - Min: $min(\{m(p) : p \in P^{open/closed}\})$ - Max: $max(\{m(p) : p \in P^{open/closed}\})$ - p-value of a [Mann-Whitney U rank test](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.htmlscipy.stats.mannwhitneyu) on $\{m(p) : p \in P^{open}\}$ vs. $\{m(p) : p \in P^{closed}\}$2. *Proportions* (e.g., the percentage of the female population in a given PCSA that is Black), for which we report the same statistics as for Counts (but the total remains the raw total, not the sum of the proportions). Odds Ratios An odds ratio for Black and white populations by PCSA is investigated against LDU closure status.
###Code
two_by_two(s_pcsas, 'Black Females 15-44', 'White Females 15-44')
###Output
Black Females 15-44, Closed: 8,494.00
White Females 15-44, Closed: 17,795.00
Black Females 15-44, Open: 45,326.00
White Females 15-44, Open: 101,442.00
###Markdown
Using the [OpenEpi](https://openepi.com/TwobyTwo/TwobyTwo.htm) calculator, we obtain an odds ratio of $1.068 \in [1.039, 1.099]$. An odds ratio for Black and white patients with known payor groups is investigated against LDU closure status.
###Code
# Create a temporary DataFrame combining known payor groups.
df = s_ldus['Closed 2012-2016'].to_frame()
df['Black Known Payor Group'] = s_ldus['Black Commercial/Employer-Based'] \
+ s_ldus['Black Assistance/Self Pay']
df['White Known Payor Group'] = s_ldus['White Commercial/Employer-Based'] \
+ s_ldus['White Assistance/Self Pay']
two_by_two(df, 'Black Known Payor Group', 'White Known Payor Group')
###Output
Black Known Payor Group, Closed: 399.00
White Known Payor Group, Closed: 444.00
Black Known Payor Group, Open: 3,214.00
White Known Payor Group, Open: 5,212.00
###Markdown
Using the [OpenEpi](https://openepi.com/TwobyTwo/TwobyTwo.htm) calculator, we obtain an odds ratio of $1.457 \in [1.264, 1.680]$.To check if payor group is confounding, we further partition these black and white patients counts by payor groups "Assistance/Self Pay" and "Commercial/Employer-Based".
###Code
for pgroup in ['Assistance/Self Pay', 'Commercial/Employer-Based']:
two_by_two(s_ldus, 'Black '+pgroup, 'White '+pgroup)
print()
###Output
Black Assistance/Self Pay, Closed: 378.00
White Assistance/Self Pay, Closed: 372.00
Black Assistance/Self Pay, Open: 2,982.00
White Assistance/Self Pay, Open: 3,876.00
Black Commercial/Employer-Based, Closed: 21.00
White Commercial/Employer-Based, Closed: 72.00
Black Commercial/Employer-Based, Open: 232.00
White Commercial/Employer-Based, Open: 1,336.00
###Markdown
Using the [OpenEpi](https://openepi.com/TwobyTwo/TwobyTwo.htm) calculator on these strata, we obtain a Cochran-Mantel-Haenszel adjusted odds ratio of $1.344 \in [1.163, 1.553]$. An odds ratio for Assistance/Self Pay and Commercial/Employer-Based patient payor groups is investigated against LDU closure status.
###Code
two_by_two(s_ldus, 'Assistance/Self Pay Payors', 'Commercial/Employer-Based Payors')
###Output
Assistance/Self Pay Payors, Closed: 827.00
Commercial/Employer-Based Payors, Closed: 97.00
Assistance/Self Pay Payors, Open: 8,062.00
Commercial/Employer-Based Payors, Open: 1,654.00
###Markdown
Using the [OpenEpi](https://openepi.com/TwobyTwo/TwobyTwo.htm) calculator, we obtain an odds ratio of $1.749 \in [1.408, 2.173]$. An odds ratio for PCSA resident patients vs. PCSA non-resident patients is investigated against LDU closure status.
###Code
two_by_two(s_ldus, 'PCSA Res. Patients', 'PCSA Non-Res. Patients')
###Output
PCSA Res. Patients, Closed: 753.00
PCSA Non-Res. Patients, Closed: 370.00
PCSA Res. Patients, Open: 6,511.00
PCSA Non-Res. Patients, Open: 4,658.00
###Markdown
Using the [OpenEpi](https://openepi.com/TwobyTwo/TwobyTwo.htm) calculator, we obtain an odds ratio of $1.456 \in [1.278, 1.658]$. Table 1: Race and Payor Group by LDU Closure Status (2011) PCSA DataEach PCSA's 2011 total population ("Population"), 2011 female population ("Females 15-44"), and 2011 household income ("Household Income") are investigated as Counts.Each PCSA's 2011 Black female population ("Black Females 15-44"), white female population ("White Females 15-44"), and other race female population ("Other Females 15-44") are investigated as Proportions.
###Code
# Count measure statistics.
for m in ['Population', 'Females 15-44', 'Household Income']:
count_stats(s_pcsas, m)
print()
# Proportion measure statistics.
for m in ['Black Females 15-44', 'White Females 15-44', 'Other Females 15-44']:
proportion_stats(s_pcsas, m, 'Females 15-44')
print()
###Output
Population
----------
Total: 935,890.000
Open: median=26,827.000 (9,679.000 - 61,530.000)
Closed: median=23,035.500 (17,125.000 - 31,086.000)
Mann-Whit: pval=0.250633658
Females 15-44
-------------
Total: 178,044.000
Open: median=5,053.500 (1,646.000 - 12,226.000)
Closed: median=4,397.500 (3,281.000 - 6,405.000)
Mann-Whit: pval=0.296230053
Household Income
----------------
Total: 1,049,522.682
Open: median=34,248.881 (31,123.253 - 43,146.000)
Closed: median=33,588.500 (30,427.000 - 43,704.000)
Mann-Whit: pval=0.630949434
% Black Females 15-44
---------------------
Total: 53,820.000
Open: median=30.163% (0.884% - 53.831%)
Closed: median=34.450% (1.530% - 54.653%)
Mann-Whit: pval=0.595269252
% White Females 15-44
---------------------
Total: 119,237.000
Open: median=67.419% (42.783% - 97.054%)
Closed: median=63.439% (43.491% - 95.706%)
Mann-Whit: pval=0.667427898
% Other Females 15-44
---------------------
Total: 4,987.000
Open: median=2.582% (1.707% - 4.466%)
Closed: median=2.102% (1.652% - 2.763%)
Mann-Whit: pval=0.028637110
###Markdown
Patient Race by LDUEach LDU's Black patients ("Black Patients"), white patients ("White Patients"), and other race patients ("Other/Unknown Race") are investigated as Proportions.
###Code
for m in ['Black Patients', 'White Patients', 'Other/Unknown Race']:
proportion_stats(s_ldus, m, '# Patients')
print()
###Output
% Black Patients
----------------
Total: 4,101.000
Open: median=31.851% (0.392% - 66.667%)
Closed: median=41.918% (1.463% - 72.487%)
Mann-Whit: pval=0.402128752
% White Patients
----------------
Total: 6,713.000
Open: median=56.038% (22.180% - 94.828%)
Closed: median=40.285% (25.397% - 95.122%)
Mann-Whit: pval=0.493941308
% Other/Unknown Race
--------------------
Total: 1,478.000
Open: median=9.444% (3.072% - 45.113%)
Closed: median=3.844% (2.116% - 27.193%)
Mann-Whit: pval=0.173998568
###Markdown
Patient Insurance by LDUEach LDU's breakdown of patients by payor type ("Commercial/Employer-Based", "Medicaid", "Self Pay", "Other Govt.", and "Other/Unknown") are investigated as Proportions.
###Code
for ptype in ['Commercial/Employer-Based', 'Medicaid', 'Self Pay', \
'Other Govt.', 'Other/Unknown']:
proportion_stats(s_ldus, ptype+' Payors', '# Patients')
print()
###Output
% Commercial/Employer-Based Payors
----------------------------------
Total: 1,751.000
Open: median=10.798% (0.196% - 30.508%)
Closed: median=5.264% (3.285% - 25.366%)
Mann-Whit: pval=0.157773567
% Medicaid Payors
-----------------
Total: 7,390.000
Open: median=71.435% (5.501% - 91.892%)
Closed: median=62.939% (29.268% - 94.545%)
Mann-Whit: pval=0.781194897
% Self Pay Payors
-----------------
Total: 921.000
Open: median=4.447% (0.000% - 39.850%)
Closed: median=4.343% (0.000% - 26.496%)
Mann-Whit: pval=0.979852638
% Other Govt. Payors
--------------------
Total: 578.000
Open: median=0.343% (0.000% - 62.069%)
Closed: median=0.427% (0.000% - 2.555%)
Mann-Whit: pval=0.781194897
% Other/Unknown Payors
----------------------
Total: 1,652.000
Open: median=1.375% (0.000% - 86.531%)
Closed: median=11.419% (0.000% - 43.415%)
Mann-Whit: pval=0.526704560
###Markdown
Table 2: Birth Volume and Location by LDU Closure Status (2011)The number of births by PCSA (" Births) is investigated as a Count measure.
###Code
count_stats(s_pcsas, '# Births')
###Output
# Births
--------
Total: 11,976.000
Open: median=365.000 (101.000 - 776.000)
Closed: median=313.000 (213.000 - 361.000)
Mann-Whit: pval=0.493941308
###Markdown
Each LDU's 2011 birth volume (" Births") is investigated as a Count measure.
###Code
count_stats(s_ldus, '# Births')
###Output
# Births
--------
Total: 12,452.000
Open: median=435.500 (111.000 - 1,105.000)
Closed: median=197.000 (110.000 - 274.000)
Mann-Whit: pval=0.002684519
###Markdown
Each LDU's 2011 birth volume at the nearest Georgia LDU (" Births at Closest GA LDU"), distance to nearest Georgia LDU ("Miles to Closest GA LDU"), and distance to nearest urban area ("Miles to Closest Urban Area") are investigated as Counts.
###Code
for m in ['# Births at Closest GA LDU', 'Miles to Closest GA LDU', \
'Miles to Closest Urban Area']:
count_stats(s_ldus, m)
print()
###Output
# Births at Closest GA LDU
--------------------------
Total: 18,519.000
Open: median=327.000 (110.000 - 2,569.000)
Closed: median=773.500 (118.000 - 3,454.000)
Mann-Whit: pval=0.064681066
Miles to Closest GA LDU
-----------------------
Total: 700.000
Open: median=24.500 (7.000 - 34.000)
Closed: median=25.000 (19.000 - 32.000)
Mann-Whit: pval=0.595269252
Miles to Closest Urban Area
---------------------------
Total: 1,314.000
Open: median=41.500 (26.000 - 64.000)
Closed: median=41.500 (21.000 - 69.000)
Mann-Whit: pval=0.742624732
###Markdown
Each LDU's 2011 number of births to county residents ("County Res. Patients") is investigated as a Proportion.
###Code
proportion_stats(s_ldus, 'County Res. Patients', '# Patients')
###Output
% County Res. Patients
----------------------
Total: 6,710.000
Open: median=55.198% (6.518% - 89.723%)
Closed: median=66.140% (48.718% - 88.182%)
Mann-Whit: pval=0.092526630
###Markdown
Table 3: Obstetric Providers by LDU Closure StatusEach LDU's providers expressed as the number of OBs (" OBs"), CNMs (" CNMs"), FPs (" FPs"), and OB eqivalents ("OB Equiv.") as well as the average annual birth volume per provider ("Births per Provider") and average OB age ("Ave. OB Age") are investigated as Counts.
###Code
for m in ['# OBs', '# CNMs', '# FPs', 'OB Equiv.', 'Births per Provider', \
'Ave. OB Age']:
count_stats(s_ldus, m)
print()
###Output
# OBs
-----
Total: 79.000
Open: median=3.000 (0.000 - 6.000)
Closed: median=1.500 (1.000 - 2.000)
Mann-Whit: pval=0.043787630
# CNMs
------
Total: 15.000
Open: median=0.000 (0.000 - 4.000)
Closed: median=0.000 (0.000 - 1.000)
Mann-Whit: pval=0.939655593
# FPs
-----
Total: 11.000
Open: median=0.000 (0.000 - 4.000)
Closed: median=0.000 (0.000 - 2.000)
Mann-Whit: pval=0.526704560
OB Equiv.
---------
Total: 93.645
Open: median=3.000 (1.000 - 7.935)
Closed: median=2.000 (1.645 - 2.097)
Mann-Whit: pval=0.043787630
Births per Provider
-------------------
Total: 3,974.626
Open: median=133.638 (55.500 - 233.000)
Closed: median=108.250 (55.000 - 143.966)
Mann-Whit: pval=0.033120290
Ave. OB Age
-----------
Total: 1,252.800
Open: median=46.000 (40.000 - 59.500)
Closed: median=48.000 (45.000 - 60.000)
Mann-Whit: pval=0.308300395
###Markdown
Figure 1: Median Proportions of Black Women per PCSA and LDU
###Code
labels = ['% Black Women 14-45 Years Old\nper PCSA (2011)', \
'% Black Birthing Patients\nper LDU (2011)']
openvals = [30.163, 31.851]
closedvals = [34.450, 41.918]
pvals = [0.595269252, 0.402128752]
plot_medians(labels, openvals, closedvals, pvals, fmt='{:.2f}%', anno='-fig1')
###Output
_____no_output_____
###Markdown
Figure 2: Median Birth Volumes and Provider Loads
###Code
labels = ['PCSA Birth Volume', 'LDU Birth Volume', 'Births at Nearest LDU', \
'Average Annual Births\nper Provider']
openvals = [365, 435.5, 327, 133.638]
closedvals = [313, 197, 773.5, 108.250]
pvals = [0.493941308, 0.002684519, 0.064681066, 0.033120290]
plot_medians(labels, openvals, closedvals, pvals, fmt='{:.1f}', anno='-fig2')
###Output
_____no_output_____
###Markdown
![title](file/title.png) THIS REPOSITORY IS CREATED TO ACCOMPLISH DICODING: MACHINE LEARNING FOR BEGINNER CLASS ASSIGNMENTS
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/drive
###Markdown
1. RENAME IMAGE WITH IT'S FOLDER NAME
###Code
import os
#get dataset path
path = r'/content/drive/My Drive/dataset'
#loop over dataset folder
for folder in os.listdir(path):
#if value inside folder are real folder
if os.path.isdir(os.path.join(path,folder)):
#create new path
ipath = os.path.join(path,folder)
#then iterate it again over new path
for i, img in enumerate(os.listdir(ipath)):
#rename image inside it with it's folder name
os.rename(os.path.join(ipath,img),os.path.join(ipath,folder+str(i)+'.png'))
###Output
_____no_output_____
###Markdown
2. STORE IMAGE AND ITS LABEL TO VARIABLE
###Code
import os
import re
from skimage.io import imread, imsave
from skimage.transform import resize
from skimage.filters import median
import numpy as np
from sklearn.preprocessing import LabelEncoder
imagedata = []
imagelabel = []
#get dataset path
path = r'/content/drive/My Drive/dataset'
#loop over dataset folder
for folder in os.listdir(path):
#if value inside folder are real folder
if os.path.isdir(os.path.join(path,folder)):
#create new path
ipath = os.path.join(path,folder)
#then iterate it again over new path
for i, img in enumerate(os.listdir(ipath)):
#search image name & create it as label
label = re.match('[a-zA-Z]+',img)
label = label.group()
#read image, resize it then apply median filter
image = imread(os.path.join(ipath,img))
image = resize(image,(150,150))
image = median(image)
#save image and label
imagelabel.append(label)
imagedata.append(image)
from matplotlib import pyplot as plt
from skimage.filters import median
from skimage.color import rgb2gray
from skimage.feature import canny
median = median(image)
gray = rgb2gray(image)
graym = rgb2gray(median)
cannyy = canny(gray,sigma=2)
#visualize normal image
plt.figure(figsize=[10,5])
plt.subplot(1,5,1)
plt.imshow(image)
plt.title('normal')
#visualize median filter image
plt.subplot(1,5,2)
plt.imshow(median)
plt.title('median')
#visualize grayscale filter image
plt.subplot(1,5,3)
plt.imshow(gray,cmap='gray')
plt.title('grayscale')
#visualize grayscale + median filter image
plt.subplot(1,5,4)
plt.imshow(graym,cmap='gray')
plt.title('grayscale + median')
#visualize canny filter image
plt.subplot(1,5,5)
plt.imshow(cannyy,cmap='gray')
plt.title('canny')
plt.show()
###Output
_____no_output_____
###Markdown
BECAUSE KERAS IMAGEDATAGENERATOR CANT TAKE GRAYSCALE IMAGE, WE USE MEDIAN FILTER BECAUSE THIS FILTER CONTRAST IS GOOD FOR FEATURE EXTRACTION
###Code
from keras.utils import to_categorical
#input image into imagedata
imagedata = np.array(imagedata)
#transform label into labelencoder one
imagelabell = LabelEncoder().fit_transform(imagelabel)
#after labelencoder, transform our label into onehot (because our label is nominal not ordinal, we must remember this)
imagelabell = to_categorical(imagelabell,num_classes=3,dtype='float')
from sklearn.model_selection import train_test_split
#split our dataset into 80% training data and 20 % test data
X_train, X_test, y_train, y_test = train_test_split(imagedata, imagelabell, test_size=0.2)
from keras.preprocessing.image import ImageDataGenerator
#create training data generator
train_datagen = ImageDataGenerator(
rotation_range= 45,
horizontal_flip= True,
vertical_flip= True,
brightness_range=[0.5,1]
)
#create test data generator
test_datagen = ImageDataGenerator(
rotation_range= 45,
horizontal_flip= True,
vertical_flip= True,
brightness_range=[0.5,1]
)
#flow our traning data
train_generator = train_datagen.flow(
X_train,
y_train,
batch_size = 32,
shuffle = True,
)
#flow our test data
test_generator = test_datagen.flow(
X_test,
y_test,
batch_size = 1,
shuffle= True,
)
from matplotlib import pyplot as plt
#input our train data generator into variable
imgs, labels = next(train_generator)
#change the format of our image
img = np.array(imgs).astype(np.uint8)
#setting our matplotlib figure size
fig = plt.figure(figsize=[25,100])
#create loop for output our generated train dataset and the label
for i in range(len(img)):
sp = fig.add_subplot(len(img),len(img)/2,i+1)
sp.axis('off')
sp.set_title(labels[i], fontsize=16)
plt.imshow(img[i])
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout
from keras.callbacks.callbacks import EarlyStopping, ModelCheckpoint
#create sequential neural network
model = Sequential()
#add convolutional layer with 32 node, 3x3 kernel size and input shape of 150x150x pixel and 3 color channel, activate our activation layer = 'relu'
model.add(Conv2D(32,3,3, input_shape=(150,150,3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
#add convolutional layer with 32 node, 3x3 kernel size and input shape of 150x150x pixel and 3 color channel, activate our activation function = 'relu'
model.add(Conv2D(32,3,3, input_shape=(150,150,3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
#add convolutional layer with 32 node, 3x3 kernel size and input shape of 150x150x pixel and 3 color channel, activate our activation function = 'relu'
model.add(Conv2D(64,3,3, input_shape=(150,150,3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
#add convolutional layer with 64 node, 3x3 kernel size and input shape of 150x150x pixel and 3 color channel, activate our activation function = 'relu'
model.add(Flatten())
model.add(Dense(64, activation='relu'))
#dropout some of neuron to avoid death neuron
model.add(Dropout(0.5))
#output layer, because our model is multiclass category, we use softmax activation function, dont forget our label are 3, so create 3 node output
model.add(Dense(3, activation='softmax'))
#compile for model with these parameter
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)
#create early stopping that stop our training when val_loss not decreasing about 3 epoch
stopping = EarlyStopping(monitor = 'val_loss',
patience = 3,
mode = 'min',
restore_best_weights = True,
)
#fit our model with these parameter
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=100,
validation_data= (X_test, y_test),
shuffle=True,
use_multiprocessing=True,
callbacks = [stopping]
)
#Our convolutional neural network summary
model.summary()
import seaborn as sns
#create list of number training, because our earlystop is activated at epoch 12, then set np arrage to 0,12
n_train = np.arange(0,12)
sns.set()
#set our matplotlib figure
plt.figure(figsize=[20,10])
plt.subplot(1,2,1)
plt.title('SUMMARY OF MODEL LOSS',fontweight='bold')
sns.lineplot(n_train,history.history['loss'], label='training_loss')
sns.lineplot(n_train,history.history['val_loss'], label='test_loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.subplot(1,2,2)
plt.title('SUMMARY OF MODEL ACCURACY',fontweight='bold')
sns.lineplot(n_train,history.history['accuracy'], label='training_accuracy')
sns.lineplot(n_train,history.history['val_accuracy'], label='testing_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
#output our training summary
plt.savefig('/content/drive/My Drive/summary.png')
from sklearn.metrics import classification_report, accuracy_score
#check our accuracy and f1 score (mandatory)
predict = model.predict(X_test)
print(f'accuracy of our model is\t:\t{accuracy_score(y_test.argmax(axis=1), predict.argmax(axis=1))}%\n\n')
print(classification_report(y_test.argmax(axis=1), predict.argmax(axis=1)))
#save our model so when we need our model again we can just load it
model.save_weights('/content/drive/My Drive/dataset/mymodel.h5')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import matplotlib.pyplot as plt
%matplotlib inline
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
label = str(classes)
if label == '[[1. 0. 0.]]':
print('PHOTO IS PAPER')
elif label == '[[0. 1. 0.]]':
print('PHOTO IS ROCK')
elif label == '[[0. 0. 1.]]':
print('PHOTO IS SCRISSORS')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import matplotlib.pyplot as plt
%matplotlib inline
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
label = str(classes)
if label == '[[1. 0. 0.]]':
print('PHOTO IS PAPER')
elif label == '[[0. 1. 0.]]':
print('PHOTO IS ROCK')
elif label == '[[0. 0. 1.]]':
print('PHOTO IS SCRISSORS')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import matplotlib.pyplot as plt
%matplotlib inline
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
label = str(classes)
if label == '[[1. 0. 0.]]':
print('PHOTO IS PAPER')
elif label == '[[0. 1. 0.]]':
print('PHOTO IS ROCK')
elif label == '[[0. 0. 1.]]':
print('PHOTO IS SCRISSORS')
###Output
_____no_output_____
###Markdown
The Tidy Data
###Code
doc1.sample(20)
###Output
_____no_output_____
###Markdown
Show me all highlights
###Code
doc1[doc1['hl-id'].notnull()].groupby("hl-id")['word'].apply(lambda x: " ".join(x))
###Output
_____no_output_____
###Markdown
Show me all my annotations
###Code
doc1[doc1['is-annotation']].groupby("hl-id")['word'].apply(lambda x: " ".join(x))
###Output
_____no_output_____
###Markdown
Show me tagged sections
###Code
sids = set(doc1[doc1.tag]['section-id'])
doc1[doc1['section-id'].isin(sids)].groupby("section-id")['word'].apply(lambda x: " ".join(x))
###Output
_____no_output_____
###Markdown
Plot of the most frequent words for each section
###Code
doc1.groupby("section")['word'].apply(lambda x: nltk.FreqDist(x))
###Output
_____no_output_____
###Markdown
Pokemon GO shiny rates: a Bayesian perspective[The Silph Road](https://thesilphroad.com/) is the largest online and in-person network of Pokemon GO players and researchers. We investigate the question of how accurate their proposed shiny rates are by putting on our Bayesian hat, setting the "consensus" shiny rate as our prior, and using Silph field studies as observed data. Background: Silph, shinies, and statisticsThe Silph Road organizes regional groups of Pokemon GO players, sets up in-person tournaments, and conducts field studies to learn about game mechanics of Pokemon GO. Of particular interest to us here is the *shiny rate*, which is the probability that a Pokemon found in the wild will be shiny (for non-Pokemon players, this just means it's rare and specially coloured; it's like a trophy). Though not publicized by the game developer Niantic, this rate has been of great interest to Pokemon GO players (after all, shinies are not too far off from loot boxes).Silph publishes [field studies](https://thesilphroad.com/science/oddish-shiny-rates/) to determine shiny rates, and these studies have resulted in two consensus rates: one "standard" rate of 1/450 (used for the vast majority of Pokemon), and one "boosted" rate of 1/150 (used during certain events). Recently, however, those rates have been [called into question](https://old.reddit.com/r/TheSilphRoad/comments/dd79zk/its_time_to_rethink_the_assumed_shiny_rates_from/) on the Silph subreddit, saying that they are not consistent with the collected data. I am going to re-examine these findings from a Bayesian perspective. MethodologyI went through the Silph archives looking for their shiny rate publications posted this year, and gathered them into a file `rates.csv`. The null rows in this file were the result of Silph not reporting their exact numbers (e.g., see [Spoink](https://thesilphroad.com/science/lunar-new-year-boosted-lucky-rates/) ("over 16,500 Spoink") and [Adventure Week](https://thesilphroad.com/science/quick-discovery/adventure-week-shiny-rates/) ("over 30,000 encounters each")). I chose to keep these in the dataset in case someone asks "what happened?" Additionally, the presence of two rows from the Gligar event were the result of an apparent change in the shiny rate after ~24 hours, which I am taking to be fact.
###Code
import pandas as pd
rates = pd.read_csv("rates.csv")
rates.sample(5)
###Output
_____no_output_____
###Markdown
Let's compute the "rarity", defined as `n_encounters / n_shinies`. A rarity R means that we saw shinies with a rate of 1 in R.
###Code
rates["rarity"] = rates["n_encounters"] / rates["n_shiny"]
rates = rates.dropna()
rates.sample(5)
###Output
_____no_output_____
###Markdown
Domain knowledge tells us that there are three classes of shiny rates here: a highly boosted one (around 1 in 60, for Alolan Exeggutor and Meltan), one boosted one (which Silph claims to be 1 in 150), and one normal one (which Silph claims to be 1 in 450). We can use this to partition the dataset manuallly, discarding the highly boosted samples because they're not relevant to this debate.
###Code
boosted = rates[rates["rarity"].between(70, 200)].sort_values("date").reset_index(drop=True)
unboosted = rates[rates["rarity"] > 200].sort_values("date").reset_index(drop=True)
boosted
unboosted
###Output
_____no_output_____
###Markdown
Let's start with the proposed boosted shiny rate of 1 in 150. We'll come back to the standard one later. The boosted shiny rate: the Bayesian wayFrequentist statistics would construct a confidence interval on these rates--it's a simple proportions test--and call it a day. Indeed, that's what both Silph (see every publication they put out) and [critics of Silph](https://old.reddit.com/r/TheSilphRoad/comments/dd6ln1/world_wide_oddish_shiny_rates/f2egcsx/) have done. After constructing this confidence interval, we simply check if 1/150 lies within it.But we can do better than this yes/no response. Given that we believe that the boosted shiny rate is 1 in 150, the Bayesian way of thinking provides us with a natural way of incorporating this into our analysis: as a prior.
###Code
import arviz as az
import pymc3 as pm
az.style.use("fivethirtyeight")
###Output
_____no_output_____
###Markdown
Setting priorsLet's use a [Beta](https://en.m.wikipedia.org/wiki/Beta_distribution) prior over p, since a Beta can be used as a distribution over probabilities. Using the [success rate interpretation](https://stats.stackexchange.com/a/47782) of the Beta, our prior will be fairly weak: equivalent to having seen 10 shinies in 1500 encounters. Put otherwise, our prior is that anything between 1 in 100 and 1 in 300 is plausible.We'll add a second variable, rarity, which is 1 / p as defined before. This makes it easier to use phrases like "1 in 150" or "1 in N," and is more intuitive when talking about extremely small probabilities. Through the rest of this document, we'll mostly focus on the plots of the rarity.
###Code
with pm.Model() as model:
p = pm.Beta("p", alpha=10, beta=1490)
rarity = pm.Deterministic("rarity", 1. / p)
prior_samples = pm.sample_prior_predictive(samples=10000, model=model)
axes = az.plot_density(
prior_samples,
var_names=["p", "rarity"],
point_estimate=None,
credible_interval=0.99,
shade=0.5,
figsize=(12, 4),
)
###Output
_____no_output_____
###Markdown
From this, we can see that while 1/150 is at the center of our prior beliefs, we wouldn't be surprised with a rarity of 1 in 100 or 1 in 200 either. This is without having collected any data--if *all* we had heard was "the shiny rate is 1 in 150," but we weren't sure about that 150 number, this plot represents a plausible range of values. Adding dataOne advantage of the Bayesian approach is that it lets us add as much or as little data as we have. We will demonstrate how our beliefs in the shiny rate change over time as we show our model more data (i.e., as we progress through time and have more shinies released).
###Code
from typing import Tuple
def encounters_and_shiny(df: pd.DataFrame, species_name: str) -> Tuple[float, float]:
"""Given a species name, retrieve the number of encounters and number of shinies"""
row = df[df.name == species_name].iloc[0]
return (row["n_encounters"], row["n_shiny"])
assert encounters_and_shiny(boosted, "sneasel") == (1588, 13)
assert encounters_and_shiny(unboosted, "sentret") == (19297, 54)
###Output
_____no_output_____
###Markdown
Beacuse each encounter is independently shiny with probability p, a binomial distribution is appropriate for modeling the number of shinies we see. We will use Markov Chain Monte Carlo to learn the likely distributions over our parameters (shiny rate and rarity). In lay terms, we will try to infer a distribution of most probable values for those parameters, little by little as we see more data. We'll start with just Bronzor.
###Code
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "bronzor")
bronzor = pm.Binomial("bronzor", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
This plot represents what we might have believed in February 2019, after seeing 15 out of 2479 shinies for Bronzor. The left curves represent the likely ranges for the shiny rate p and the rarity 1-in-N. For those unfamiliar with MCMC, ignore the fuzzy-caterpillar-like plots on the right; for those familiar with it, this model exhibits excellent sampling behavior.Notice how we're already seeing that these distributions are a little bit tighter. We see virtually no likelihood of the rate being 1 in 300 now, but earlier we did. Meanwhile, 1 in 150 remains a highly likely shiny rate given our limited data.Let's add the next Pokemon we had an event for, Horsea.
###Code
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "horsea")
horsea = pm.Binomial("horsea", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
Because we observed a rate of 1 in 114 for Poliwag, the likelihood for the rarity has now shifted much further left. It is now almost entirely implausible for the shiny rate to be any lower than 1 in 200, and even 1 in 150 is starting to look unlikely.The next shiny released was Nidoran M.
###Code
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "nidoran_m")
nidoran_m = pm.Binomial("nidoran_m", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
Nidoran's observed rarity was 1 in 107 over 5700 encounters, shifting our rarity curve evne further left, and now it's becoming more clear that 1 in 150 is a pretty unlikely shiny rate. Let's do this one more time for Sneasel.
###Code
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "sneasel")
sneasel = pm.Binomial("sneasel", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
At this point (perhaps earlier) I would feel confident saying that the shiny rate, whatever it is, is not 1 in 150. The Sneasel event happened in July 2019, and I'm writing this in October, so clearly that wasn't enough for the Pokemon GO community. Fortunately, four more events happened between then and now, and we can pass them all at once.
###Code
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "poliwag")
poliwag = pm.Binomial("poliwag", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "gligar_later")
gligar = pm.Binomial("gligar", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "yanma")
yanma = pm.Binomial("yanma", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "oddish")
oddish = pm.Binomial("oddish", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
We can confidently say that **it is extremely unlikely that the boosted shiny rate is 1 in 150.** It is much more plausible that the rate is in the neighborhood of 1 in 120, as 150 hasn't even registered on our posterior plot of the rarity.Notice how natural a fit the Bayesian way of thinking was: we have some prior beliefs (that the rate is 1 in 150), and some data (the Silph studies), and we can marry the two together to get a posterior (the plot we see above). It's clear that the data do not support our prior beliefs, but that's okay; we're researchers, and that's how this is supposed to work. The normal shiny rate (supposedly 1 in 450)Let's look next at the normal shiny rate, which is supposedly 1 in 450. For brevity's sake, I won't take us through the step-by-step process again, but rather pass all the data at once.
###Code
with pm.Model() as model:
p = pm.Beta("p", alpha=10, beta=4490)
rarity = pm.Deterministic("rarity", 1. / p)
prior_samples = pm.sample_prior_predictive(samples=10000, model=model)
axes = az.plot_density(
prior_samples,
var_names=["p", "rarity"],
point_estimate=None,
credible_interval=0.99,
shade=0.5,
figsize=(12, 4),
)
###Output
_____no_output_____
###Markdown
Our prior is again relatively uninformative because we're not very confident in the particular value of 1 in 450. Let's add the data.
###Code
with model:
for name in unboosted.name.values:
n_encounters, n_shiny = encounters_and_shiny(unboosted, name)
_ = pm.Binomial(name, n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(2000, chains=4)
_ = az.plot_trace(trace)
###Output
_____no_output_____
###Markdown
Second Head
###Code
model_dir = '/home/lizhaochen/fyp/fyp-long-tail-recognition/logs/ImageNet_LT/stage1/e90_0.2/e90_0.2.pth'
checkpoint = torch.load(model_dir, map_location=torch.device('cpu'))
model_state = checkpoint['state_dict_best']
print(model_state.keys())
first_dot_product = torch.norm(model_state['classifier']['module.fc.weight'], 2, 1, keepdim=True).squeeze(1).tolist()
second_dot_product = torch.norm(model_state['second_dot_product']['module.fc.weight'], 2, 1, keepdim=True).squeeze(1).tolist()
###Output
_____no_output_____
###Markdown
Learnable Logits Weight
###Code
model_dir = '/home/lizhaochen/fyp/fyp-long-tail-recognition/logs/ImageNet_LT/stage1/ImageNet_LT_90_coslrres50/ImageNet_LT_90_coslrres50_with_weight.pth'
checkpoint = torch.load(model_dir, map_location=torch.device('cpu'))
model_state = checkpoint['state_dict_best']
print(model_state.keys())
w1 = model_state['w1']['module.logitsweight'].tolist()
w2 = model_state['w2']['module.logitsweight'].tolist()
###Output
_____no_output_____
###Markdown
Number of Effective Samples
###Code
df = pd.read_csv("./analysis/label.csv")
df.head()
df.sort_values(by='label_count', ascending=False).reset_index().label_count.plot(xlabel='class index', ylabel='number of training samples')
df['w1'] = w1
df['w2'] = w2
df['cls_norm_ce'] = first_dot_product
df['cls_norm_cekl'] = second_dot_product
df_sorted = df.sort_values(by="label_count", ascending=False)
df_sorted['w1'].reset_index()['w1'].plot(xlabel='class index sorted from low to high', ylabel='w1')
df_sorted['cls_norm_ce'].reset_index()['cls_norm_ce'].plot(legend=True)
df_sorted['cls_norm_cekl'].reset_index()['cls_norm_cekl'].plot(legend=True)
df_sorted['w2'].reset_index()['w2'].plot(xlabel='class index sorted from low to high', ylabel='w2')
###Output
_____no_output_____
###Markdown
Temperature Softmax
###Code
a = torch.tensor([7., 5., 2., 5., 10., 3.])
result = []
x = [0,1,2,3,4,5]
for t in [0.1, 1, 2, 10]:
result.append(torch.nn.functional.softmax(a/t).tolist())
fig, axs = plt.subplots(2, 2)
axs[0, 0].bar(x, result[0])
axs[0, 0].set_title('T=0.1')
axs[0, 0].set_xticks(x)
axs[0, 1].bar(x, result[1])
axs[0, 1].set_title('T=1')
axs[0, 1].set_xticks(x)
axs[1, 0].bar(x, result[2])
axs[1, 0].set_title('T=2')
axs[1, 0].set_xticks(x)
axs[1, 1].bar(x, result[3])
axs[1, 1].set_title('T=10')
axs[1, 1].set_xticks(x)
for ax in axs.flat:
ax.set(xlabel='class', ylabel='probability')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
###Output
_____no_output_____
###Markdown
sigmoid attention weight
###Code
df = pd.read_csv("sigmoid_weight.csv")
df_sorted = df.sort_values(by="freq")
df_sorted.reset_index().plot(x='freq', y='w')
with open("test_loss_dict.pkl", 'rb') as f:
data = pickle.load(f)
###Output
_____no_output_____
###Markdown
SQLAlchemy Homework - Surfs Up!
###Code
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy.stats as stats
import pandas as pd
import datetime as dt
###Output
_____no_output_____
###Markdown
Reflect Tables into SQLAlchemy ORM
###Code
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Base.metadata.tables
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# print columns of the measurement table
inspector = inspect(engine)
cols = inspector.get_columns('Measurement')
for col in cols:
print(col['name'], col['type'])
# print columns of the station table
cols = inspector.get_columns('Station')
for col in cols:
print(col['name'], col['type'])
# Create our session (link) from Python to the DB
session = Session(engine)
###Output
_____no_output_____
###Markdown
Exploratory Climate Analysis Precipitation Analysis
###Code
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# acquiring the last date of the data point
last_date_sel = ["Select date From Measurement Group By date Order By date Desc"]
last_date = engine.execute(*last_date_sel).fetchone()
# design a query to retrieve the last 12 months of precipitation data and plot the results
# determining date of last 12 month date from data point
last_12m_date = dt.datetime.strptime(last_date[0],"%Y-%m-%d") - dt.timedelta(days = 365)
# query to retrieve precipitation data in the last 12 months (grouped by month)
precip_by_month = session.query(func.strftime("%m",Measurement.date), func.sum(Measurement.prcp)).\
group_by(func.strftime("%m",Measurement.date)).\
filter(Measurement.date >= last_12m_date)
# converting to dataframe
precip_by_month_df = pd.DataFrame(precip_by_month,columns={'month':precip_by_month[0],'prcp':precip_by_month[1]})
precip_by_month_df.head()
# Use Pandas Plotting with Matplotlib to plot the data
# plotting precipitation data in the last 12 months
# setting figure size
plt.figure(figsize=(18,8));
# defining horizontal bar chart
plt.barh(precip_by_month_df.month, precip_by_month_df.prcp)
# setting title & labels
plt.xlabel('Precipitation [inches]')
plt.ylabel('Month')
plt.title('Precipitation in 12 months [08-2016 till 08-2017]')
plt.grid(True);
#plt.savefig('Images/precipitation_by_month.png')
# displaying the chart
plt.show()
# Calculate the date 1 year ago from the last data point in the database
# retrieving last date data point from given dataset
last_date_sel = ["Select date From Measurement Group By date Order By date Desc"]
last_date = engine.execute(*last_date_sel).fetchone()
one_year_ago = dt.datetime.strptime(last_date[0], "%Y-%m-%d") - dt.timedelta(days = 365)
print('--------------------------------------------------------')
print(f' The date 1 year ago from the data point is {one_year_ago.strftime("%Y-%m-%d")}.')
print('---------------------------------------------------------')
# Perform a query to retrieve the data and precipitation scores
# retrieving precipitation data in the last 12 month (by date), ordering by date
data = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= last_12m_date).\
group_by(Measurement.date)
# verifying data returning from query
for row in data.limit(5):
print(row)
# save the query results as a Pandas DataFrame and set the index to the date column
# sort the dataframe by date
measure_df = pd.DataFrame(data, columns=['Date', 'Precipitation']).sort_values('Date').set_index('Date')
# removing NaN values in the dataset
measure_df = measure_df.dropna()
# reseting index
measure_df.reset_index(inplace=True)
# Use Pandas to calcualte the summary statistics for the precipitation data
measure_df.describe()
# plotting chart for precipitation
measure_df.plot('Date','Precipitation', color='b',figsize=(12,8), legend='Precipitation');
# setting labels
plt.xticks(rotation=90,horizontalalignment='right', fontweight='light', fontsize='small');
plt.ylabel('Inches')
plt.xlabel('Date')
#plt.savefig('Images/precipitation.png')
plt.show()
# max, min precipitation
max_prcp = measure_df.Precipitation.max()
min_prcp = measure_df.Precipitation.min()
print('------------------------------------------------------------------')
print(f' The maximum and minimum precipitation recorded is: {max_prcp} inches, and {min_prcp} inches')
print('------------------------------------------------------------------')
# average precipitation by month
by_month_measure_df = measure_df.copy()
by_month_measure_df['Date'] = by_month_measure_df['Date'].apply(lambda x: x[5:7])
avg_by_month_df = by_month_measure_df.groupby('Date').mean()
avg_by_month_df.reset_index(inplace=True)
avg_by_month_df.head()
print('----------------------------------------------------------------------------------')
print(f'The average precipitation [Inches] by month recorded in the dataset is as follows:')
print('----------------------------------------------------------------------------------')
for row in avg_by_month_df.iterrows():
print(f' {row[1][0]} : {round(row[0:2][1][1],2)}')
###Output
----------------------------------------------------------------------------------
The average precipitation [Inches] by month recorded in the dataset is as follows:
----------------------------------------------------------------------------------
01 : 0.01
02 : 0.13
03 : 0.09
04 : 0.07
05 : 0.09
06 : 0.01
07 : 0.01
08 : 0.01
09 : 0.08
10 : 0.02
11 : 0.02
12 : 0.06
###Markdown
Station Analysis
###Code
# Design a query to show how many stations are available in this dataset?
station_counts = session.query(Station.station).count()
stations_name = session.query(Measurement.station).distinct().all()
print('-------------------------------------------------')
print(f' There are {station_counts} stations available in the dataset.')
print('-------------------------------------------------')
stations_name
# What are the most active stations? (i.e. what stations have the most rows)?
query = ["Select station, count(station) From Measurement Group By station Order By count(station) Desc"]
most_active = engine.execute(*query).fetchone()
print('---------------------------------------------------------------------------------')
print(f'The most active station is {most_active[0]} with total number of measurement is at {most_active[1]}.')
print('---------------------------------------------------------------------------------')
# List the stations and the counts in descending order.
query = ["Select station, count(station) From Measurement Group By station Order By count(station) Desc"]
station_list = engine.execute(*query).fetchall()
print('---------------------------------------------------------')
print('List the stations and the counts in descending order:')
print('---------------------------------------------------------')
i = 1
for station in station_list:
print(' ',i,')', station[0],': ', station[1])
i +=1
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
sel_station = 'USC00519281'
lowest_temp = session.query(func.min(Measurement.tobs)).\
filter(Measurement.station == sel_station).\
group_by(Measurement.station)
# highest temperature
highest_temp = session.query(func.max(Measurement.tobs)).\
filter(Measurement.station == sel_station).\
group_by(Measurement.station)
# avergage temperature
avg_temp = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.station == sel_station).\
group_by(Measurement.station, Measurement.date)
print('--------------------------------------------------------------------------------')
print(f' Station {sel_station} recorded lowest: {round(lowest_temp[0][0],2)}F,\
average: {round(avg_temp[0][0],2)}F, and highest: {round(highest_temp[0][0],2)}F')
print('--------------------------------------------------------------------------------')
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
sel_station = 'USC00519281'
Sel = [Measurement.date, Measurement.tobs]
sel_station_data = session.query(*Sel).filter(Measurement.date >= one_year_ago).\
filter(Measurement.station == sel_station)
# convert to dataframe
sel_station_df = pd.DataFrame(sel_station_data, columns=['Date', 'Temperature']).sort_values('Date').set_index('Date')
sel_station_df #.reset_index(inplace=True)
sel_station_df.head()
# plotting the results as a histogram
plt.figure(figsize=(10,8));
ax = sel_station_df.plot.hist(bins=12, alpha=0.75, figsize=(10,8), color='b');
# setting labels and title
plt.ylabel('Frequency')
plt.xlabel('Temperature [F]')
plt.grid(True)
plt.title(f'Temperature in last 12 months at the station {sel_station}')
#plt.savefig('Images/station-histogram.png')
plt.show()
###Output
_____no_output_____
###Markdown
Bonus: Challenge Assignment Temperature Analysis I * Hawaii is reputed to enjoy mild weather all year. Is there a meaningful difference between the temperature in, for example, June and December?
###Code
# retrieving average temperature by month across all years
avg_tmp_by_month = session.query(func.strftime("%Y-%m",Measurement.date), func.avg(Measurement.tobs)).\
group_by(func.strftime("%Y-%m",Measurement.date))
# converting to dataframe
avg_tmp_df = pd.DataFrame(avg_tmp_by_month, columns=['Month', 'Temp'])
avg_tmp_df.head()
# re-setting to month format (mm)
avg_tmp_df.Month = avg_tmp_df.Month.apply(lambda x: x[5:7])
# filtering average temperature for the month of June & December
jun_dec_avg_tmp = avg_tmp_df.loc[(avg_tmp_df["Month"] =='06') | (avg_tmp_df["Month"] =='12'),]
jun_dec_avg_tmp.head()
###Output
_____no_output_____
###Markdown
• Use the t-test to determine whether the difference in the means, if any, is statistically significant. Will you use a paired t-test, or an unpaired t-test? Why?
###Code
jun_dec_avg_tmp.boxplot("Temp", by="Month", figsize=(8, 8));
plt.show()
#jun_avg_tmp.boxplot("Temp", by="Month", figsize=(10, 8))
jun_avg_tmp = avg_tmp_df.loc[avg_tmp_df["Month"] =='06',]
dec_avg_tmp = avg_tmp_df.loc[avg_tmp_df["Month"] =='12',]
stats.ttest_ind(jun_avg_tmp.Temp, dec_avg_tmp.Temp, equal_var=False)
###Output
_____no_output_____
###Markdown
* Use the t-test to determine whether the difference in the means, if any, is statistically significant. Will you use a paired t-test, or an unpaired t-test? Why? Hypothesis test:The null hypothesis is that the average temperature in June and December is different and the alternative hypothesisis there is no different in average temperature between June and December.First, looking at the above box chart, the average temperature in June and December in Hawaii is vastly different.As pvalue is much greater than 0.05, there isn't enough evidence to reject the above null hypothesis. t-testBased on the calculation, the t-test is statistically significant. In this test, we use a paired t-test because we want to compare the average temperature in June vs December (two samples) month extracting from the same population data. Temperature Analysis II
###Code
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.round(func.avg(Measurement.tobs),1), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# assigning dates for planning trip
arrival_date = "2017-01-12"
departure_date = "2017-01-20"
temp_during_trip = calc_temps(arrival_date, departure_date)
print('---------------------------------------------------------------------------------------------')
print(f'The lowest, average and highest temperature during the period {arrival_date} till {departure_date}:')
print(f' {temp_during_trip[0][0]:.1f}F, {temp_during_trip[0][1]:.1f}F and {temp_during_trip[0][2]:.1f}F')
print('---------------------------------------------------------------------------------------------')
# Plot the results from your previous query as a bar chart.
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
fig, ax = plt.subplots(1, 1, figsize=(6,9))
ax.bar([1], temp_during_trip[0][1], yerr=temp_during_trip[0][2]-temp_during_trip[0][0], width=0.8, color='pink');
# setting range values for x & y axis
ax.set_xlim(0.08, 1.8)
ax.set_ylim(0, 100)
# Use "Trip Avg Temp" as your Title and set the axis labels
plt.title('Trip Avg Temp')
plt.ylabel('Temp(F)')
# turning off x ticks labels
plt.xticks([1],(''))
# saving image
#plt.savefig('Images/temperature.png')
# displaying the chart
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Daily Rainfall Average
###Code
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
Sel = [Measurement.station, Station.name, Station.latitude, \
Station.longitude, Station.elevation, func.sum(Measurement.prcp)]
# joining Measurement and Station tables by attribute 'station'
total_rainfall = session.query(*Sel).filter(Measurement.date >= arrival_date).\
filter(Measurement.date <= departure_date).\
filter(Measurement.station == Station.station).\
group_by(Measurement.station, Station.name,\
Station.latitude, Station.longitude, Station.elevation).\
order_by(func.sum(Measurement.prcp).desc()).all()
# verifying the results
total_rainfall
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.round(func.avg(Measurement.tobs),1), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# assigning dates for planning trip
arrival_date = "2017-01-12"
departure_date = "2017-01-20"
# Set the start and end date of the trip
m_arrival_date = dt.datetime.strptime(arrival_date, '%Y-%m-%d')
m_departure_date = dt.datetime.strptime(departure_date, '%Y-%m-%d')
# Use the start and end date to create a range of dates
range_dates = (m_departure_date - m_arrival_date).days + 1
# placing the range of dates into the list in text format
dates_lst = pd.date_range(start=arrival_date, periods=range_dates, freq='D')
dates_lst = dates_lst.astype(str)
# Strip off the year and save a list of %m-%d strings
stped_dates_lst = [x[5:] for x in dates_lst]
# Loop through the list of %m-%d strings and calculate the normals for each date
normals_lst = [daily_normals(day)[0] for day in stped_dates_lst]
#normals_lst
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
normals_df = pd.DataFrame(normals_lst, columns=['tmin', 'tavg', 'tmax'])
# add the `trip_dates` range as the `date` index
normals_df['Date'] = dates_lst
# setting 'date' as index
normals_df = normals_df.set_index('Date')
#normals_df.head()
# Plot the daily normals as an area plot with `stacked=False`
ax = normals_df.plot.area(stacked=False, rot=90, figsize=(12,8));
# setting x ticks, y axis labels
plt.xticks(rotation=45,horizontalalignment='right', fontweight='light', fontsize='small');
plt.ylabel('Temperature (F)')
plt.legend(loc='best')
#plt.xlable('Date')
# setting range for x, y axis
plt.xlim(0, len(normals_df))
plt.ylim(0, max(normals_df['tmax']))
plt.tight_layout()
# saving image to file
#plt.savefig('Images/daily-normals.png')
# displaying the chart
plt.show()
###Output
_____no_output_____
###Markdown
Final assignmentCalculate in Python basic statistics like min/average/max number of: * students per teacher broken down by the type of school, * students per school broken down by their year of birth, in each district (polish ‘gmina’) and in total for cities and rural districts.
###Code
from schoolstat.prepare_tables import tables
from time import time
t1 = time()
schools, dist_popul, popul = tables(file1, file2, file3, 2018, 2020, 2020, differences_file)
print(time()-t1)
print(schools.shape)
schools.head()
print(dist_popul.shape)
dist_popul.head()
print(popul.shape)
popul
from schoolstat.stats import group_students_per_teacher, students_per_school, group_students_per_school, filter_stats
###Output
_____no_output_____
###Markdown
students per teacher broken down by the type of school, in each district
###Code
d1 = group_students_per_teacher(schools, 'Gmina')
d1
filter_stats(d1, 'Świdnik')
filter_stats(d1, i2='Liceum ogólnokształcące')
###Output
_____no_output_____
###Markdown
students per teacher broken down by the type of school, in total for cities and rural districts
###Code
d2 = group_students_per_teacher(schools, 'Miasto czy wieś')
d2
filter_stats(d2, i2='Przedszkole')
###Output
_____no_output_____
###Markdown
students per school broken down by their year of birth, in each district
###Code
d3 = students_per_school(dist_popul)
d3
filter_stats(d3, 'Warszawa')
group_students_per_school(dist_popul, by='Rok urodzenia')
###Output
_____no_output_____
###Markdown
students per school broken down by their year of birth, in total for cities and rural districts
###Code
d4 = students_per_school(popul)
d4
filter_stats(d4, i2=2004)
group_students_per_school(popul, by='Miasto czy wieś')
###Output
_____no_output_____
###Markdown
Plots
###Code
import matplotlib.pyplot as plt
d = filter_stats(d1, 'Świdnik').droplevel(1)
d.plot(kind='bar', y='avg', figsize=(8,6), legend=False)
plt.title('Świdnik: średnia liczba uczniów na nauczyciela')
plt.show()
fig, axs = plt.subplots(2, 2)
types = ['Przedszkole', 'Szkoła podstawowa', 'Liceum ogólnokształcące', 'Technikum']
d00 = filter_stats(d2, i2=types[0]).droplevel(1)
d01 = filter_stats(d2, i2=types[1]).droplevel(1)
d10 = filter_stats(d2, i2=types[2]).droplevel(1)
d11 = filter_stats(d2, i2=types[3]).droplevel(1)
lab = 'Liczba uczniów na nauczyciela'
plt.suptitle('Średnia liczba uczniów na nauczyciela w mieście i na wsi', fontsize='x-large')
d00.plot(ax=axs[0, 0], kind='bar', y=['avg'], title=types[0], ylabel=lab, rot=0, figsize=(15, 8), xlabel='', legend=False)
d01.plot(ax=axs[0, 1], kind='bar', y=['avg'], title=types[1], ylabel=lab, rot=0, figsize=(15, 8), xlabel='', legend=False)
d10.plot(ax=axs[1, 0], kind='bar', y=['avg'], title=types[2], ylabel=lab, rot=0, figsize=(15, 8), xlabel='', legend=False)
d11.plot(ax=axs[1, 1], kind='bar', y=['avg'], title=types[3], ylabel=lab, rot=0, figsize=(15, 8), xlabel='', legend=False)
plt.show()
d = filter_stats(d3, 'Warszawa').droplevel(1)
d.plot(title='Warszawa: średnia liczba uczniów na szkołę', figsize=(8,5))
plt.show()
d = filter_stats(d4, 'W')
d.plot(title='Gminy wiejskie: średnia liczba uczniów na szkołę', figsize=(8,5))
plt.show()
###Output
_____no_output_____
###Markdown
Basic data exploration
###Code
board = Board()
df = pd.read_csv('stored_runs/1000_hands_000.csv')
df.head()
sum(df['result']=='EUCHRE')/len(df)
df.value_counts('caller')
df.value_counts('result')
def read_all_hands(folder='stored_runs/', use_tqdm=True):
df = None
iterable = notebook.tqdm(os.listdir(folder)) if use_tqdm else os.listdir(folder)
folder = folder + '/' if folder[-1] != '/' else folder
dict_list = []
for file in iterable:
if '.csv' not in file:
continue
dict_list.append(pd.read_csv(folder+file).to_dict('list'))
final_df = pd.DataFrame.from_dict(dict_list)
return(final_df)
df = read_all_hands()
for caller in range(4):
sub = df[df['caller']==caller]
print('PLAYER %i:' %caller)
print('Called it %.1f%% of the time' %(len(sub)/len(df)*100))
print('First round %.1f%% of their calls' %(sum(sub['round']==1)/len(sub)*100))
print('Swept it %.1f%% of their calls' %(sum(sub['result']=='Sweep')/len(sub)*100))
print('Singled %.1f%% of their calls' %(sum(sub['result']=='Single')/len(sub)*100))
print('Was euchred %.1f%% of their calls' %(sum(sub['result']=='EUCHRE')/len(sub)*100))
p, op = ['points02', 'points13'] if caller%2==0 else ['points13', 'points02']
print('Avg points per call: %.2f' %( (sum(sub[p])-sum(sub[op]))/len(sub) ))
print()
###Output
PLAYER 0:
Called it 31.9% of the time
First round 69.5% of their calls
Swept it 8.3% of their calls
Singled 58.2% of their calls
Was euchred 33.5% of their calls
Avg points per call: 0.08
PLAYER 1:
Called it 27.5% of the time
First round 83.2% of their calls
Swept it 24.4% of their calls
Singled 65.0% of their calls
Was euchred 10.6% of their calls
Avg points per call: 0.93
PLAYER 2:
Called it 18.4% of the time
First round 89.6% of their calls
Swept it 4.5% of their calls
Singled 58.5% of their calls
Was euchred 37.1% of their calls
Avg points per call: -0.07
PLAYER 3:
Called it 22.3% of the time
First round 96.7% of their calls
Swept it 26.6% of their calls
Singled 69.3% of their calls
Was euchred 4.1% of their calls
Avg points per call: 1.14
###Markdown
Consider conservative players
###Code
p0 = make_conservative_player(0)
p2 = make_conservative_player(2)
board = Board(p0=p0, p2=p2)
if 'stored_runs' in os.listdir():
os.system('rm -r stored_runs')
for epoch in notebook.tqdm(range(100)):
for hand in range(1000):
board.play_hand()
board.writeout()
print('Done!')
df = read_all_hands()
df['caller_trueid'] = df.progress_apply(caller_trueid, axis=1)
df['caller_points'] = df.progress_apply(lambda x: 4*(x['result']=='Loner') + 2*(x['result']=='Sweep') + 1*(x['result']=='Single') - 2*(x['result']=='EUCHRE'), axis=1)
calls = [df[df['caller_trueid']==i] for i in range(4)]
for i in range(4):
sub = calls[i]
print('PLAYER %i:' %i)
print('Called it %.1f%% of the time' %(len(sub)/len(df)*100))
print('First round %.1f%% of their calls' %(sum(sub['round']==1)/len(sub)*100))
print('Swept it %.1f%% of their calls' %(sum(sub['result']=='Sweep')/len(sub)*100))
print('Singled %.1f%% of their calls' %(sum(sub['result']=='Single')/len(sub)*100))
print('Was euchred %.1f%% of their calls' %(sum(sub['result']=='EUCHRE')/len(sub)*100))
#p, op = ['points02', 'points13'] if i%2==0 else ['points13', 'points02']
#print('Avg points per call: %.2f' %( (sum(sub[p])-sum(sub[op]))/len(sub) ))
print('Avg points per call: %.2f' %(sum(sub['caller_points'])/len(sub)))
print()
points = [calls[i]['caller_points'].sum() for i in range(4)]
print('AGGRESSIVE POINTS: %i (avg %.2f points per hand)' %(points[1]+points[3], (points[1]+points[3])/len(df)))
print('CONSERVATIVE POINTS: %i (avg %.2f points per hand)' %(points[0]+points[2], (points[0]+points[2])/len(df)))
###Output
AGGRESSIVE POINTS: 32694 (avg 0.33 points per hand)
CONSERVATIVE POINTS: 25451 (avg 0.25 points per hand)
###Markdown
Make the plot of aggressiveness vs performance
###Code
thresholds = range(55, 101, 5)
performance, error = search_performance(thresholds)
import matplotlib.pyplot as plt
plt.plot(thresholds, performance)
plt.fill_between(thresholds, performance+np.sqrt(1e4)*error, performance-np.sqrt(1e4)*error, alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Points in 10,000 hands')
plt.show()
###Output
_____no_output_____
###Markdown
Preliminarily, looks like 70ish is best. Let's look run a wide search again, but with finer specificity in the thresholds
###Code
thresholds2 = range(55, 101, 1)
performance2, error2 = search_performance(thresholds2, prnt=False) # last time took about 3-4 minutes, should make this take about 15-20
plt.plot(thresholds2, performance2)
plt.fill_between(thresholds2, performance2+np.sqrt(1e4)*error2, performance2-np.sqrt(1e4)*error2, alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Points in 10,000 hands')
plt.title('Threshold vs performance (opp = 70)')
if 'figs' not in os.listdir():
os.mkdir('figs')
plt.savefig('figs/threshold_vs_performance_shallow.png')
plt.show()
###Output
_____no_output_____
###Markdown
Make this plot for three different opponent sets
###Code
thresholds = range(60, 101, 1)
opp_thresholds = [65, 80, 100]
performance3 = {op_thresh : search_performance(thresholds, opp_thresh=op_thresh, prnt=False) for op_thresh in opp_thresholds}
# should take like 45 minutes
for op_thresh in opp_thresholds:
plt.plot(thresholds, performance3[op_thresh][0], label='opp = ' + str(op_thresh))
plt.fill_between(thresholds, performance3[op_thresh][0]+np.sqrt(1e4)*performance3[op_thresh][1],
performance3[op_thresh][0]-np.sqrt(1e4)*performance3[op_thresh][1],
alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Points per 10,000 games')
plt.legend()
plt.title('Varying opponent thresholds')
plt.savefig('figs/threshold_vs_performance_varyopp.png')
plt.show()
###Output
_____no_output_____
###Markdown
Make the plot, but with more trials
###Code
multiprocessing.cpu_count()
#search_performance_parallel(args=(thresholds[i], i, n_epochs, n_hands, None, folder, None, os.getcwd())))
search_performance_parallel(70, 0, 1, 100, None, 'thresh70', None, 'thresholds')
%%time
# goal is to do a million each, so 10^2 epochs x 10^4 hands per epoch seems fine
thresholds=range(70,92)
parallel_search_wrapper(thresholds=thresholds, n_epochs=1e2, n_hands=1e4, ROOT_DIR='thresholds/')
###Output
###Markdown
Metric should be $\Delta$ points, not total pointsIf the opponents call it always, you get 0 called points, but you might euchre them a lot, meaning you get more points than them Making the original graph (threshold vs performance, 55-100, shallow), but with this new understanding
###Code
%%time
parallel_search_wrapper(thresholds=range(55,101), n_epochs=10, n_hands=1000, ROOT_DIR='thresholds')
%%time
get_performance(ROOT_DIR=os.path.join(os.getcwd(), 'thresholds'), use_mp=True, amount_tqdm=2)
df = pd.read_csv('thresholds/performance.csv').sort_values('Threshold')
points, error = df['TotalSum']-df['OppSum'], np.sqrt(df['TotalStd']**2 + df['OppStd']**2)
plt.plot(df['Threshold'], points)
plt.fill_between(df['Threshold'], points+error, points-error, alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Net points in 10,000 hands')
mx_thresh = df[(df['TotalSum']-df['OppSum'])==(df['TotalSum']-df['OppSum']).max()]['Threshold']
plt.title('Threshold vs performance (opp = 70, max at %i)' %mx_thresh)
if 'figs' not in os.listdir():
os.mkdir('figs')
plt.savefig('figs/threshold_vs_performance_shallow.png')
plt.show()
###Output
_____no_output_____
###Markdown
Making the second graph, thresh vs performance for varying opp thresholds
###Code
for opp_thresh in [60, 80, 100]:
print()
print('OPP THRESHOLD:', opp_thresh)
parallel_search_wrapper(thresholds=range(65,106), n_epochs=100, n_hands=1000, ROOT_DIR='thresholds', opp_thresh=opp_thresh)
print('Getting performance...')
get_performance(ROOT_DIR=os.path.join(os.getcwd(), 'thresholds'), use_mp=True, amount_tqdm=0,\
outfile='performance_100k_oppthresh'+str(opp_thresh)+'.csv')
maxes = []
for opp_thresh in [60, 80, 100]:
df = pd.read_csv('thresholds/performance_100k_oppthresh' + str(opp_thresh)+'.csv').sort_values('Threshold')
df = df[df['TotalHands'] > 10000]
points, error = df['TotalSum']-df['OppSum'], np.sqrt(df['TotalStd']**2 + df['OppStd']**2)
plt.plot(df['Threshold'], points, label='Opp thresh = ' + str(opp_thresh))
plt.fill_between(df['Threshold'], points+error, points-error, alpha=0.2, facecolor='gray')
maxes.append(df[(df['TotalSum']-df['OppSum'])==(df['TotalSum']-df['OppSum']).max()]['Threshold'])
plt.xlabel('Calling threshold')
plt.ylabel('Net points in 100,000 hands')
plt.title('Threshold vs performance (maxes at %i, %i, %i)' %(maxes[0], maxes[1], maxes[2]))
plt.legend()
if 'figs' not in os.listdir():
os.mkdir('figs')
plt.savefig('figs/threshold_vs_performance_varyopp.png')
plt.show()
board = Board()
for i in range(100):
board.play_hand()
board.writeout(folder='testing', keep_results=False, ROOT_DIR='thresholds')
###Output
_____no_output_____
###Markdown
Making the 3rd plot, thresh vs performance deep
###Code
%%time
parallel_search_wrapper(thresholds=range(70,94), n_epochs=100, n_hands=10000, ROOT_DIR='thresholds', opp_thresh=70)
%%time
get_performance(ROOT_DIR=os.path.join(os.getcwd(), 'thresholds'), use_mp=True, amount_tqdm=1)
df = pd.read_csv('thresholds/performance_1M.csv').sort_values('Threshold')
df = df[df['TotalHands']==int(1e6)]
points, error = df['TotalSum']-df['OppSum'], np.sqrt(df['TotalStd']**2 + df['OppStd']**2)
plt.plot(df['Threshold'], points)
plt.fill_between(df['Threshold'], points+error, points-error, alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Points in 1,000,000 hands')
mx_thresh = df[(df['TotalSum']-df['OppSum'])==(df['TotalSum']-df['OppSum']).max()]['Threshold']
plt.title('Threshold vs performance (opp = 70, max at %i)' %mx_thresh)
if 'figs' not in os.listdir():
os.mkdir('figs')
plt.savefig('figs/threshold_vs_performance_deep.png')
plt.show()
###Output
_____no_output_____
###Markdown
Using data made on HPC
###Code
df = pd.read_csv('thresholds/performance_10M.csv').sort_values('Threshold')
df = df[df['TotalHands']==int(1e7)]
points, error = df['TotalSum']-df['OppSum'], np.sqrt(df['TotalStd']**2 + df['OppStd']**2)
plt.plot(df['Threshold'], points)
plt.fill_between(df['Threshold'], points+error, points-error, alpha=0.2, facecolor='gray')
plt.xlabel('Calling threshold')
plt.ylabel('Points in 10 million hands')
mx_thresh = df[(df['TotalSum']-df['OppSum'])==(df['TotalSum']-df['OppSum']).max()]['Threshold']
plt.title('Threshold vs performance (opp = 70, max at %i)' %mx_thresh)
if 'figs' not in os.listdir():
os.mkdir('figs')
plt.savefig('figs/threshold_vs_performance_superdeep.png')
plt.show()
###Output
_____no_output_____
###Markdown
Standard Imports Just matplotlib and seaborn for viz right now, interaction via bokeh might be added at a later date
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os import listdir
import seaborn as sns
###Output
_____no_output_____
###Markdown
Log Data Imports Find all the log files in the log directory
###Code
files = listdir('logs')
log_files = []
for file in files:
if (file.endswith('.log')) * ("Event" in file):
log_files.append(file)
else:
print('FYI - Non event log file detected and ignored in data folder - ', file)
print('Succesfully identified', len(log_files), 'log file(s)')
###Output
FYI - Non event log file detected and ignored in data folder - csv_user_log_db.csv
FYI - Non event log file detected and ignored in data folder - WAM_Conditions_1582392450.631387.log
Succesfully identified 1 log file(s)
###Markdown
Import the log files into a list of lists
###Code
raw_log_import_list = []
ignored_lines = []
for log_file_name in log_files:
raw_file = open('logs\\' + str(log_file_name))
raw_log_import_list.append(raw_file)
file_data_table = []
for raw_file in raw_log_import_list:
file_line = raw_file.readline()
while file_line:
try:
date = file_line[0:10]
time = file_line[11:23]
event = file_line.split('Event(')[1][0:5].split('-')[0]
log_event_detail = file_line[73:].split('{')[1][:-4].split(', \'')
file_data_table.append((raw_file,date,time,event,log_event_detail))
except:
ignored_lines.append((raw_file,file_line))
file_line = raw_file.readline()
raw_file.close()
len(file_data_table)
file_data_table
###Output
_____no_output_____
###Markdown
for the hit events, take the next ratings and append them (you append in history, as that is the most current, not exactly sure hwo I should do that right nowtry next line and if it's rating then refresh, else assume old ratings are current (but include a timestamp on them)')
###Code
cols = ['file', 'date', 'time', 'step', 'actual_hit', 'margin_hit', 'comm_hit', 'pos_x', 'pos_y', 'distance', 'relative_loc',
'skill_vs_luck_rating', 'hit_confidence', 'score', 'score_inc', 'mole_loc_x', 'mole_loc_y']
df = pd.DataFrame(columns = cols)
tmp_table = []
score = 0
step = 0
for line in file_data_table:
file = line[0]
date = line[1]
time = line[2]
event = line[3]
step += 1
if event == '10':
mole_loc = line[4][0].split(': ')[1]
mole_loc = mole_loc.split(',')
mole_loc_x = mole_loc[0]
mole_loc_y = mole_loc[1]
if len(tmp_lst) < 14:
tmp_table += t
'actual_hit'= None
'margin_hit'= None
'comm_hit' = None
'pos_x' = None
'pos_y' = None
'distance' = None
'relative_loc' = None
'skill_vs_luck_rating' = None
'hit_confidence' = None
'score' = None
'score_inc' = None
'mole_loc_x' = None
'mole_loc_y' = None
elif event == '9':
hits = line[4][0][11:]
hits = hits[:-1]
hits = hits.split(', ')
actual_hit = bool(hits[0])
margin_hit = bool(hits[1])
comm_hit = bool(hits[2])
pos = ((line[4][1])[7:])[:-1]
pos = pos.split(', ')
pos_x = pos[0]
pos_y = pos[1]
distance = (line[4][0])[11:]
relative_loc = ((line[4][3])[16:])[:-1]
relative_loc = relative_loc.split(', ')
relative_loc_x = relative_loc[0]
relative_loc_y = relative_loc[1]
hit_found = True
elif event == '7':
skill_vs_luck_rating = line[4][0].split(': ')[1]
hit_confidence = line[4][1].split(': ')[1]
rating_found *= True
elif event == '11':
score = line[4][1].split(': ')[1]
tmp_table
#Cast the columns to numbers
df['pos_x'] = df['pos_x'].astype('float')
df['pos_y'] = df['pos_y'].astype('float')
df['relative_loc_x'] = df['relative_loc_x'].astype('float')
df['relative_loc_y'] = df['relative_loc_y'].astype('float')
df['hit_conf'] = df['hit_conf'].astype('float')
df['reward_conf'] = df['reward_conf'].astype('float')
df['player_skill'] = df['player_skill'].astype('float')
df['distance'] = df['relative_loc_x']**2 + df['relative_loc_y']**2
df['distance'] = df['distance']**0.5
event_list = []
count = 1
file_prev = False
for file in df['file']:
if file == file_prev:
event_list.append(count)
count += 1
else:
count = 1
event_list.append(count)
file_prev = file
df['event_seq'] = event_list
df = df.assign(id=df['file'].astype('category').cat.codes)
df_lookup = df[['file', 'id']]
df = df.drop(columns = ['file'])
%matplotlib inline
df = df[['id', 'event_seq', 'event', 'pos_x', 'pos_y', 'relative_loc_x', 'relative_loc_y', 'distance',
'hit_conf', 'reward_conf', 'player_skill']]
###Output
_____no_output_____
###Markdown
Data Analysis Imported Data, Basic Stats
###Code
print('Number of unique files, and hence participant data sets -', len(df['id'].unique()))
df.describe().round(2)
###Output
Number of unique files, and hence participant data sets - 0
###Markdown
Heatmap Correlations Between Variables
###Code
plt.figure(figsize=(10,10))
sns.heatmap(df.corr())
###Output
_____no_output_____
###Markdown
Time Series Plot Between Self Ratings & Absolute Distance From Mole
###Code
df_norm = df
df_norm['distance'] = df_norm['distance']/df['distance'].max()
df_norm['hit_conf'] = df_norm['hit_conf']/7
df_norm['reward_conf'] = df_norm['reward_conf']/7
df_norm['player_skill'] = df_norm['player_skill']/7
df_sns = pd.melt(df_norm[['event_seq','distance',
'hit_conf', 'reward_conf', 'player_skill']], 'event_seq', var_name='cols', value_name='vals')
sns.lineplot(x="event_seq", y="vals",
hue="cols",
data=df_sns)
###Output
_____no_output_____
###Markdown
XY Scatterplot, Detailing Location of Hits vs Mole
###Code
x = df['relative_loc_x']
y = df['relative_loc_y']
plt.scatter(x, y, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Violin Plots, Describing Distributions of Variables
###Code
# plot
sns.set_style('ticks')
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.violinplot(data=df[cols[4:6]], inner="points", ax=ax, alpha=0.5)
sns.despine()
# plot
sns.set_style('ticks')
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.violinplot(data=df[cols[6:8]], inner="points", ax=ax, alpha=0.5)
sns.despine()
sns.set_style('ticks')
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.violinplot(data=df[cols[8:]], inner="points", ax=ax, alpha=0.5)
sns.despine()
%matplotlib inline
df['relative_loc_x'].plot.hist(bins=12, alpha=0.5)
df['relative_loc_y'].plot.hist(bins=12, alpha=0.5)
df['hit_conf'].plot.hist(alpha=0.5)
df['reward_conf'].plot.hist(alpha=0.5)
df['player_skill'].plot.hist(alpha=0.5)
###Output
_____no_output_____
###Markdown
Demo This is the demo to showcase some analysis process. For the analysis for each task, we have provided a corresponding class.
###Code
# import analysis tools
from analysis import SUMStat, D2TStat, WMTStat
def truncate_print(l, n=10):
""" Print the first n items of a list"""
for i, x in enumerate(l):
if i == n:
print('...')
break
print(x)
###Output
_____no_output_____
###Markdown
Summarization For all summarization datasets, including **REALSumm**, **SummEval** and **Newsroom**, the analysis tools are the same.
###Code
summ_stat = SUMStat('SUM/REALSumm/final_p.pkl') # The path to the scored file, _p means we have prompted metrics
###Output
_____no_output_____
###Markdown
See what metrics are out there.Since there are a lot, including P, R, F variants for some metrics as well as prompted metrics, we only print a truncated version of metrics
###Code
print('[All metrics]')
truncate_print(summ_stat.metrics) # change to print if you want to see all metrics
print('[Automatic metrics]')
truncate_print(summ_stat.auto_metrics)
print('[Human metrics]')
truncate_print(summ_stat.human_metrics)
###Output
[All metrics]
litepyramid_recall
bert_score_p
bert_score_r
bert_score_f
mover_score
bart_score_src_hypo
bart_score_hypo_ref
bart_score_ref_hypo
bart_score_avg_f
bart_score_harm_f
...
[Automatic metrics]
bert_score_p
bert_score_r
bert_score_f
mover_score
bart_score_src_hypo
bart_score_hypo_ref
bart_score_ref_hypo
bart_score_avg_f
bart_score_harm_f
bart_score_cnn_src_hypo
...
[Human metrics]
litepyramid_recall
###Markdown
We can choose some metrics that we are interested in to conduct analysis. For example, in **REALSumm**, we use recall-based metrics (e.g. bert_score_r, rouge1_r, bart_score_cnn_hypo_ref, ...)For others, we use F-based metrics (for metrics that only consider hypo and ref) and src->hypo (for generation based metrics like bart_score and prism)
###Code
valid_metrics = [
'rouge1_r',
'rouge2_r',
'rougel_r',
'bert_score_r',
'mover_score',
'prism_hypo_ref',
'bart_score_cnn_hypo_ref'
]
# The first argument is the human metric considered.
# The second argument is a list of considered automatic metrics, can omit it if all automatic metrics are considered
summ_stat.evaluate_summary('litepyramid_recall', valid_metrics)
###Output
Human metric: litepyramid_recall
metric spearman kendalltau
----------------------- ---------- ------------
rouge1_r 0.497526 0.407974
rougel_r 0.488254 0.402523
bart_score_cnn_hypo_ref 0.474608 0.374497
bert_score_r 0.440398 0.346489
rouge2_r 0.4233 0.353119
prism_hypo_ref 0.411005 0.323994
mover_score 0.372353 0.290156
###Markdown
We can also see the performance of some prompt-based metrics.
###Code
valid_metrics = [
'bart_score_cnn_hypo_ref_de_id est',
'bart_score_cnn_hypo_ref_de_Videlicet',
'bart_score_cnn_hypo_ref_de_To give an instance',
'bart_score_cnn_hypo_ref_de_To give an example',
'bart_score_cnn_hypo_ref_de_As an illustration'
]
summ_stat.evaluate_summary('litepyramid_recall', valid_metrics)
###Output
Human metric: litepyramid_recall
metric spearman kendalltau
---------------------------------------------- ---------- ------------
bart_score_cnn_hypo_ref_de_id est 0.49539 0.392728
bart_score_cnn_hypo_ref_de_Videlicet 0.491011 0.388237
bart_score_cnn_hypo_ref_de_To give an instance 0.49081 0.387054
bart_score_cnn_hypo_ref_de_To give an example 0.489033 0.38625
bart_score_cnn_hypo_ref_de_As an illustration 0.488977 0.385511
###Markdown
To combine prompt-based metrics, run the following
###Code
summ_stat.combine_prompt()
summ_stat.evaluate_summary('litepyramid_recall', ['bart_score_cnn_hypo_ref_de'])
###Output
Human metric: litepyramid_recall
metric spearman kendalltau
-------------------------- ---------- ------------
bart_score_cnn_hypo_ref_de 0.48784 0.386398
###Markdown
To conduct bootstrapping significant test, we provide the *sig_test_two ( )* and *sig_test ( )* method.
###Code
# The first two arguments are metrics that should be compared, the third argument is the human metric.
m1 = 'bart_score_cnn_hypo_ref'
m2 = 'bert_score_r'
result = summ_stat.sig_test_two(m1, m2, 'litepyramid_recall')
if result == 1:
print(f'{m1} is significantly better than {m2}')
elif result == -1:
print(f'{m2} is significantly better than {m1}')
else:
print('cannot decide')
# The first arguments are a list of metrics considered
# The second argument is the human metric
summ_stat.sig_test(['rouge1_r', 'bart_score_cnn_hypo_ref', 'bert_score_r'], 'litepyramid_recall')
###Output
100%|██████████| 1000/1000 [01:28<00:00, 11.32it/s]
100%|██████████| 1000/1000 [01:24<00:00, 11.81it/s]
100%|██████████| 1000/1000 [01:26<00:00, 11.55it/s]
###Markdown
Factuality We use **Rank19** dataset and **QAGS_CNN** dataset to showcase some basic usages. The former uses accuracy as its evaluation metric while the latter uses pearson correlation. Rank19 We first print out the factuality accuracy obtained using different metrics for the **Rank19** dataset.
###Code
fact_stat = SUMStat('SUM/Rank19/final_p.pkl')
fact_stat.combine_prompt()
# Set valid metrics
valid_metrics = [
'rouge1_f',
'rouge2_f',
'rougel_f',
'bert_score_f',
'mover_score',
'prism_src_hypo',
'bart_score_cnn_src_hypo',
'bart_score_cnn_src_hypo_de'
]
# Print accuracy, take a list of metrics
fact_stat.get_fact_acc(valid_metrics)
###Output
metric acc
-------------------------- --------
bart_score_cnn_src_hypo 0.836461
bart_score_cnn_src_hypo_de 0.796247
prism_src_hypo 0.780161
bert_score_f 0.713137
mover_score 0.713137
rouge2_f 0.630027
rougel_f 0.587131
rouge1_f 0.568365
###Markdown
Below are some methods that help to facilitate the siginificant test.
###Code
m1 = 'bart_score_cnn_src_hypo'
m2 = 'bert_score_f'
result = fact_stat.fact_acc_sig_test_two(m1, m2)
if result == 1:
print(f'{m1} is significantly better than {m2}')
elif result == -1:
print(f'{m2} is significantly better than {m1}')
else:
print('cannot decide')
# Take a list of metrics, print the best metrics
fact_stat.fact_acc_sig_test(['bart_score_cnn_src_hypo', 'prism_src_hypo', 'bert_score_f'])
###Output
100%|██████████| 1000/1000 [00:00<00:00, 2082.68it/s]
100%|██████████| 1000/1000 [00:01<00:00, 666.78it/s]
100%|██████████| 1000/1000 [00:01<00:00, 614.94it/s]
###Markdown
QAGS_CNN
###Code
fact_stat = SUMStat('SUM/QAGS_CNN/final_p.pkl')
fact_stat.combine_prompt()
# Set valid metrics
valid_metrics = [
'rouge1_f',
'rouge2_f',
'rougel_f',
'bert_score_f',
'mover_score',
'prism_src_hypo',
'bart_score_cnn_src_hypo',
'bart_score_cnn_src_hypo_de'
]
# Print accuracy, take a list of metrics
fact_stat.get_fact_pearson(valid_metrics)
m1 = 'bart_score_cnn_src_hypo'
m2 = 'bert_score_f'
result = fact_stat.fact_pearson_sig_test_two(m1, m2)
if result == 1:
print(f'{m1} is significantly better than {m2}')
elif result == -1:
print(f'{m2} is significantly better than {m1}')
else:
print('cannot decide')
# Take a list of metrics, print the best metrics
fact_stat.fact_pearson_sig_test(['bart_score_cnn_src_hypo', 'prism_src_hypo', 'bert_score_f'])
###Output
100%|██████████| 1000/1000 [00:00<00:00, 1986.75it/s]
100%|██████████| 1000/1000 [00:00<00:00, 1156.13it/s]
100%|██████████| 1000/1000 [00:00<00:00, 1173.93it/s]
###Markdown
Data-to-Text For all data-to-text datasets, including **BAGEL**, **SFHOT** and **SFRES**, the analysis tools are the same.
###Code
d2t_stat = D2TStat('D2T/BAGEL/final_p.pkl')
d2t_stat.combine_prompt() # combine the prompt-based resutls
###Output
_____no_output_____
###Markdown
See what metrics are out there. For data-to-text datasets, the human metrics are *informativeness*, *naturalness* and *quality*.
###Code
print('[All metrics]')
truncate_print(d2t_stat.metrics) # change to print if you want to see all metrics
print('[Automatic metrics]')
truncate_print(d2t_stat.auto_metrics)
print('[Human metrics]')
truncate_print(d2t_stat.human_metrics)
###Output
[All metrics]
informativeness
naturalness
quality
bert_score_p
bert_score_r
bert_score_f
mover_score
bart_score_ref_hypo
bart_score_hypo_ref
bart_score_avg_f
...
[Automatic metrics]
bert_score_p
bert_score_r
bert_score_f
mover_score
bart_score_ref_hypo
bart_score_hypo_ref
bart_score_avg_f
bart_score_harm_f
bart_score_cnn_ref_hypo
bart_score_cnn_hypo_ref
...
[Human metrics]
informativeness
naturalness
quality
###Markdown
We can print out the correlation w.r.t. human judgement as below.
###Code
# Set valid metrics
valid_metrics = [
'rouge1_f',
'rouge2_f',
'rougel_f',
'bert_score_f',
'mover_score',
'prism_avg',
'bart_score_para_avg_f',
'bart_score_para_avg_f_de'
]
# The first argument is human metric while the latter is a list of metrics considered.
d2t_stat.evaluate_text('informativeness', valid_metrics)
###Output
Human metric: informativeness
metric spearman kendalltau
------------------------ ---------- ------------
bart_score_para_avg_f_de 0.335997 0.248525
bart_score_para_avg_f 0.329896 0.246686
prism_avg 0.304946 0.224797
bert_score_f 0.289118 0.217179
mover_score 0.283694 0.20884
rouge1_f 0.234338 0.177972
rouge2_f 0.198585 0.151011
rougel_f 0.188592 0.145508
###Markdown
To perform significant test, use *sig_test_two ( )* method
###Code
m1 = 'bart_score_para_avg_f'
m2 = 'prism_avg'
# The first two arguments are metrics that should be compared, the third argument is the human metric.
result = d2t_stat.sig_test_two(m1, m2, 'informativeness')
if result == 1:
print(f'{m1} is significantly better than {m2}')
elif result == -1:
print(f'{m2} is significantly better than {m1}')
else:
print('cannot decide')
###Output
100%|██████████| 1000/1000 [01:19<00:00, 12.54it/s]
###Markdown
Machine Translation For all language pairs, the analysis tools are the same. We begin by looking at reference length statistics.
###Code
wmt_stat = WMTStat('WMT/kk-en/final_p.pkl')
wmt_stat.print_ref_len()
###Output
Mean reference length: 17.75
Max reference length: 180
Min reference length: 1
20% percentile: 10.0
80% percentile: 25.0
90% percentile: 31.0
###Markdown
Next, we print out k-tau for all automatic metrics.
###Code
print('All metrics')
print(wmt_stat.metrics) # Print out all metrics
print('\n')
print('All k-tau')
wmt_stat.print_ktau()
print('\n')
print('k-tau for some metrics')
wmt_stat.print_ktau(['prism', 'bart_score_para'])
###Output
64%|██████▎ | 7/11 [00:00<00:00, 69.64it/s]
###Markdown
To print out the k-tau over certain reference length, run the following.
###Code
print('All k-tau')
wmt_stat.print_len_ktau(15, 25)
print('\n')
print('k-tau for some metrics')
wmt_stat.print_len_ktau(15, 25, ['prism', 'bart_score_para'])
###Output
100%|██████████| 9728/9728 [00:00<00:00, 648147.63it/s]
100%|██████████| 11/11 [00:00<00:00, 179.12it/s]
100%|██████████| 9728/9728 [00:00<00:00, 625838.84it/s]
100%|██████████| 2/2 [00:00<00:00, 194.46it/s]
###Markdown
To perform significant test, use *sig_test_two ()*
###Code
m1 = 'bart_score_para'
m2 = 'bleurt'
# The first two arguments are metrics that should be compared, the third argument is the human metric.
result = wmt_stat.sig_test_two(m1, m2)
if result == 1:
print(f'{m1} is significantly better than {m2}')
elif result == -1:
print(f'{m2} is significantly better than {m1}')
else:
print('cannot decide')
###Output
100%|██████████| 1000/1000 [00:33<00:00, 29.77it/s]
###Markdown
Attribute Description:Attribute | Description----------|-------------`Invoice No` | Invoice ID, encoded as Label`StockCode` | Unique code per stock, encoded as Label`Description` | The Description, encoded as Label`Quantity` | Quantity purchased`InvoiceDate` | Date of purchase`UnitPrice` | The target value, price of every product`CustomerID` | Unique Identifier for every country`Country` | Country of sales, encoded as Label Target`UnitPrice` is the target. Performance MeasureRMSE (Root Mean Square Error)
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn import preprocessing
import utility_functions as uf
###Output
_____no_output_____
###Markdown
Initial Data Analysis
###Code
data = pd.read_csv("data/Train.csv")
test = pd.read_csv("data/Test.csv")
data.head()
print("Shape of:")
print("-"*10)
print("\t Training data:", data.shape)
print("\t Test data:", test.shape)
###Output
Shape of:
----------
Training data: (284780, 8)
Test data: (122049, 7)
###Markdown
Drop duplicates
###Code
print(f"There are {data[data.duplicated(keep=False)].shape[0]} duplicates in training data")
data = data.drop_duplicates(ignore_index=True)
###Output
_____no_output_____
###Markdown
Missing Value Check and Type casting
###Code
print("Number of Nulls in Training data:",
data.isna().sum()\
.sum())
print("Number of Nulls in Test data:",
test.isna().sum()\
.sum())
data.info()
###### change dtypes to appropriate data types as applicable #######
categorical_cols = ['InvoiceNo', 'StockCode', 'Description', 'CustomerID', 'Country']
# convert to string
data[categorical_cols] = data.loc[:,categorical_cols].astype('object')
# convert to datetime
data['InvoiceDate'] = pd.to_datetime(data.loc[:,'InvoiceDate'])
data.describe()
# drop(columns=['year','month','day_of_week','hour','minutes','day_of_month']).describe()
# corr = data[['Quantity', 'UnitPrice']].corr(method='spearman')
corr = data.corr(method='spearman')
mask = np.triu(np.ones_like(corr, dtype=bool))
sns.heatmap(corr, annot=True, mask=mask, cbar=False)
plt.title("Spearman Correlation plot")
plt.tight_layout()
plt.xticks(ticks=[0,1], labels=['Quantity', ''], )
plt.yticks(ticks=[0,1], labels=['','UnitPrice'])
plt.show()
###Output
_____no_output_____
###Markdown
Observations- There were $5093$ duplicate records in training data taht have been dropped.- No missing values have been found.- Both `Quantity` and `UnitPrice` have outliers. The severity of which needs to be further analysed. Although we will be limited in our ability to deal with outliers in `UnitPrice` since it is the target.- Minimum for `Quantity` is -80995 which seems improbable and also observe that maximum is 80995. It will be further analysed. Engineer Temporal Features from `InvoiceDate`
###Code
data['year'] = data.InvoiceDate.dt.year
data['month'] = data.InvoiceDate.dt.month
data['day'] = data.InvoiceDate.dt.dayofweek # Monday=0, Sunday=6
data['hour'] = data.InvoiceDate.dt.hour
data['minutes'] = data.InvoiceDate.dt.minute
data['day_of_month'] = data.InvoiceDate.dt.day
data.head()
###Output
_____no_output_____
###Markdown
General Hypothesis $Q$: Does every Invoice has only one Associated customer?$A:$ Yes every Invoice has only one unique customer associated with it. As shown below:
###Code
# No of unique InvoiceID
print("Number of unique invoices: ", data.InvoiceNo.nunique())
print('-'*70)
print("Number of Invoices with Number of unique customers !=1 :",
(data.groupby('InvoiceNo')['CustomerID'].nunique() != 1).sum()
)
###Output
Number of unique invoices: 20971
----------------------------------------------------------------------
Number of Invoices with Number of unique customers !=1 : 0
###Markdown
UNivariate ANalysis of Variables `UnitPrice` $\Longrightarrow$ Target variable
###Code
data.UnitPrice.describe()
# iqr
# UNITPrice
q1, q3 = np.percentile(data.UnitPrice, [25, 75])
UP_iqr = q3 - q1
UP_max_threshold = q3 + 1.5 * UP_iqr
## LOG of UnitPrice
q1, q3 = np.percentile(np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.0001)),
[25, 75])
iqr = q3 - q1
log_max_threshold = q3 + 1.5 * iqr
log_median = np.percentile(np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.0001)),
50)
fig, ax = plt.subplots(1, 4, figsize=(20,5))
plt.subplot(ax[0])
# sns.boxplot(x=data.UnitPrice)
sns.kdeplot(x=data.UnitPrice)
plt.yticks([])
plt.ylabel('')
plt.title("Many Outliers present.\n Median = 1.95, Q3 = 3.75")
plt.subplot(ax[1])
sns.boxplot(x=np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.0001)))
plt.xlabel('')
plt.title("Box plot for Log of UnitPrice")
plt.subplot(ax[2])
plt.plot(data.UnitPrice, np.ones_like(data.UnitPrice),
'o', alpha=0.5)
plt.vlines(UP_max_threshold, 0.96, 1.04, linestyles='dashed', colors='g', label='q3+1.5*IQR = 7.5')
plt.yticks([])
plt.legend()
# plt.xlim(right=5000)
plt.title("UnitPrice distribution")
plt.subplot(ax[3])
plt.plot(np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.0001)),
np.ones_like(data.UnitPrice),
'o', alpha=0.4, linewidth=0.5)
plt.vlines(log_max_threshold, 0.96, 1.04, linestyles='dashed', colors='g', label=f'q3+1.5*IQR = {round(max_threshold,2)}')
plt.vlines(log_median, 0.96, 1.04, linestyles='dashed', colors='lime', label=f'median = {round(median,2)}')
plt.yticks([]) # remove y-ticks
plt.legend(loc='upper left')
# plt.xlim(right=5000)
plt.title(f"UnitPrice (Log transformed) distribution")
plt.show()
a = np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.0001))
print(f"{round(len(data.UnitPrice[data.UnitPrice>UP_max_threshold])/len(data.UnitPrice) * 100,0)}\
% of observations are above Max threshold in UnitPrice")
print(f"{round(len(a[a>log_max_threshold])/len(a) * 100,2)}\
% of observations are above Max threshold in Log of UnitPrice")
###Output
9.0% of observations are above Max threshold in UnitPrice
0.37% of observations are above Max threshold in Log of UnitPrice
###Markdown
Observation from analysis of `UnitPrice`- Taking Log reduced the Outliers from $\approx 9\%$ to $0.37\%$.- Log transformation will be used on UnitPrice, $\because$ it reduces the number of outliers significantly.
###Code
# jitter added
data['log_UnitPrice'] = np.log(data.UnitPrice + np.full_like(data.UnitPrice, fill_value=0.01))
###Output
_____no_output_____
###Markdown
`Quantity`From above Initaial analysis wkt. there are some discrepencies in Quantity values. We will explore them in depth.
###Code
print(f"Number records where quantity is zero: {data[data.Quantity==0].shape[0]}")
print(f"Number of records where quantity < zero: {data[data.Quantity<0].shape[0]}")
###Output
Number records where quantity is zero: 0
Number of records where quantity < zero: 6153
###Markdown
Analysing `-ve` order QuantitiesPossibilities are that the negative quantities are either- mistakenly recorded as negative quantity or- result of negative proration such as return orders [(refer)](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/How_Do_I_._._./How_do_I_handle_a_negative_invoice%3F).It has been noticed that negative quantities are present even in the test set, thus reducing the chances of it being being data entry error.\This also takes away the possibility of dropping such records from analysis. Although the negative quatities are very small fraction of both the training as well as test datasets(as shown below).
###Code
print("Percentage of observations with negative quantity in Training data:",
round(data[data.Quantity < 0].shape[0] / data.shape[0] * 100
,2)
)
print("-"*100)
print("Percentage of observations with negative quantity in Test data:",
round(test[test.Quantity < 0].shape[0] / test.shape[0] * 100
,2)
)
data['Quantity_pos'] = data.Quantity.apply(lambda x: 'POS' if x>=0 else 'NEG')
fig, ax = plt.subplots(1,2, figsize=(10, 4), sharey=True)
plt.subplot(ax[0])
sns.boxplot(y='Quantity_pos', x='UnitPrice', data=data, hue='Quantity_pos')
plt.xlim(right=5000)
plt.title("Box plot of UnitPrice")
plt.subplot(ax[1])
sns.boxplot(y='Quantity_pos', x='log_UnitPrice', data=data, hue='Quantity_pos')
plt.ylabel('')
plt.yticks([])
plt.title("Box plot of UnitPrice(log applied)")
plt.suptitle("Difficult to visually differenciate between the Groups (Positive and negative Quantity)")
plt.tight_layout()
plt.show()
data.groupby('Quantity_pos')['UnitPrice'].describe().round(3)
###Output
_____no_output_____
###Markdown
Statistical test (Mann-Whitney U Test)- Although Relationship is not exactly Linear (that would indicate Normal distribution), but it is Linear enough give the large sample size.- Levene's test of Homogeneity of variance is violated. But if ratio of Largest: Smallest group is reasonable ($\approx$ 1.5 times), then violation of this assumption should not cause any major issue.However, the ratio is too high ($\approx$ 45 times). Thus violation is serious.Keeping in view above points the Non - parametric variant of the t-test, **Mann-Whitney U Test** is used. Normality test
###Code
stats.probplot(data.log_UnitPrice, plot=plt)
plt.title(f"Normal Probability Plot\nNo of Observations: {data.log_UnitPrice.shape[0]}")
plt.ylabel("Log (UnitPrice)")
plt.show()
###Output
_____no_output_____
###Markdown
Levene's test of Homogeneity
###Code
stat, p = stats.levene(data.UnitPrice[data.Quantity_pos=='POS'],
data.UnitPrice[data.Quantity_pos=='NEG'])
is_significant = lambda p_value: 'Significant' if p_value<=0.05 else 'NOT Significant'
print(f"Levenes test is {is_significant(p)}, with p = {round(p, 4)}.")
if is_significant(p).strip().lower() == 'significant':
print("Thus, assumption of Homogeneity of Variance is Violated.")
else:
print("Thus, assumption of Homogeneity of Variance is Not violated. ")
print(f"Ratio of Largest: Smallest = \
{round(data[data.Quantity_pos=='POS'].shape[0]/data[data.Quantity_pos=='NEG'].shape[0],0)}"
)
###Output
Ratio of Largest: Smallest = 45.0
###Markdown
Mann-Whitney U Test$H_0$: The distribution of UnitPrice is **same across categories** ie. POS and NEG Quantitites.\$H_a$: The distribution of UnitPrice is **not same across categories** ie. POS and NEG Quantitites.
###Code
stat, p = stats.mannwhitneyu(x=data.UnitPrice[data.Quantity_pos=='POS'],
y=data.UnitPrice[data.Quantity_pos=='NEG'],
alternative='two-sided')
if p < 0.05:
print(f"Mann-Whitney test is Significant (p={round(p, 4)}), meaning the distribution of UnitPrice is sepearate across groups.")
else:
print(f"The test is Not significant (p={round(p, 4)}), meaning the distribution of UnitPrice is same across groups. ")
###Output
Mann-Whitney test is Significant (p=0.0), meaning the distribution of UnitPrice is sepearate across groups.
###Markdown
Observation from Quantities divided into POS and NEG categories- Negative quantities are result of **negative proration**.- There is **statistically significant difference** in distribution of UnitPrice for **Postive and negative quantities**.- Thus `Quantity_pos` **may** prove to be a **useful feature** to any future predcitive model.
###Code
plt.scatter(data.Quantity, data.log_UnitPrice, alpha=0.3, color='r')
plt.xlabel('Quantity')
plt.ylabel("Log (UnitPrice)")
plt.xlim(-1000, 1500)
rho = r"$\rho$"
plt.title(f"Very weak Linear correlation with Target variable\n{rho} = {round(stats.pearsonr(data.Quantity, data.log_UnitPrice)[0], 2)}")
plt.show()
###Output
_____no_output_____
###Markdown
Treating `Quantity` distribution$\rightarrow$ Check Natural distribution \$\rightarrow$ Scale by Mean/Median (depending on outliers in distribution) \$\rightarrow$ Power Transformation \$\rightarrow$ Thresholding\$\rightarrow$ Which observations are above Threshold\$\rightarrow$ **Log Transformation** is **not an option** due to Negative values in the quantity data.
###Code
quant_YJ = preprocessing.PowerTransformer().fit_transform(data.Quantity.values.reshape(-1,1))
quant_scaled = preprocessing.StandardScaler().fit_transform(data.Quantity.values.reshape(-1,1))
def median_transformation(variable):
'''
z = (x - u) / s
'''
median = np.median(variable)
mad = stats.median_abs_deviation(variable, nan_policy='omit')
return pd.Series(variable).apply(lambda x: (x-median)/mad)
quant_med_scaled = median_transformation(data.Quantity)
### Box plots #########
fig, ax = plt.subplots(1,3, figsize=(15, 5))
plt.subplot(ax[0])
sns.boxplot(x=data.Quantity)
x_lim = (-100, 100)
plt.xlim(x_lim)
extreme_outliers = r"$\bf{Extreme \ Outliers}$"
plt.title(f"Quantitiy Distribution - zoomed in {x_lim}.\n \
Presence of {extreme_outliers}.\n \
Min = {data.Quantity.min().round(2)}, Median = {data.Quantity.median()}, Max = {data.Quantity.max()}")
plt.xlabel('')
plt.subplot(ax[1])
sns.boxplot(x=quant_YJ)
x_lim = (-10, 20)
plt.xlim(x_lim)
plt.title(f"Quantity Yeo Johnson transformed. - zoomed in {x_lim}.\n \
Min = {quant_YJ.min().round(2)}, Median = {np.median(quant_YJ).round(2)}, Max = {quant_YJ.max().round(2)}"
)
plt.xlabel('')
plt.subplot(ax[2])
sns.boxplot(x=quant_med_scaled)
x_lim = (-100, 100)
plt.xlim(x_lim)
plt.title(f"Quantity Median scaled - zoomed in {x_lim}.\n \
Min = {quant_med_scaled.min().round(2)}, Median = {quant_med_scaled.median().round(2)}, Max = {quant_med_scaled.max().round(2)}"
)
plt.xlabel('')
plt.tight_layout()
# plt.show()
### Distribution Plots ####
fig, ax = plt.subplots(1,4, figsize=(20,5))
plt.subplot(ax[0])
# x_lim = (-100, 100)
sns.kdeplot(data.Quantity)
plt.xlabel('')
# plt.xlim(x_lim)
plt.yticks([])
plt.ylabel('')
plt.title("Quantity")
plt.subplot(ax[1])
x_lim = (-10, 20)
sns.kdeplot(pd.Series(quant_YJ.reshape(len(quant_YJ))))
plt.xlim(x_lim)
plt.yticks([])
plt.ylabel('')
plt.title("Yeo Johnson Transformed")
plt.subplot(ax[2])
x_lim = (-10, 20)
sns.kdeplot(pd.Series(quant_scaled.reshape(len(quant_scaled))))
plt.xlim(x_lim)
plt.yticks([])
plt.ylabel('')
plt.title("Standard scaled")
plt.subplot(ax[3])
# x_lim = (-100, 100)
sns.kdeplot(quant_med_scaled)
plt.xlabel('')
# plt.xlim(x_lim)
plt.yticks([])
plt.ylabel('')
plt.title("Median Scaled")
# sns.kdeplot(pd.Series(quant_med_scaled.reshape(len(quant_med_scaled))))
plt.suptitle("Most of the observations are concentrated around a small range. Extreme Outliers need to be treated")
plt.show()
print('Range for Quantity: ', data.Quantity.max() - data.Quantity.min())
print('Range for Quantity - Yeo Johnson Transformed: ', (quant_YJ.max() - quant_YJ.min()).round(2))
print('Range for Quantity - Median Scaled: ', quant_med_scaled.max() - quant_med_scaled.min())
print('Range for Quantity - Standard Scaled: ', (quant_scaled.max() - quant_scaled.min()).round(2))
###Output
Range for Quantity: 161990
Range for Quantity - Yeo Johnson Transformed: 543.86
Range for Quantity - Median Scaled: 40497.5
Range for Quantity - Standard Scaled: 546.41
###Markdown
Thresholded data- Thresholded data- Yeo Johnson Transformed thresholded data- Median transformed thresholded data
###Code
# Thresholded Quantity data
quant_thresholded = uf.threshold_data(data.Quantity)['th_data']
# Yeo Johnson Transformed thresholded data
quant_th_YJ = preprocessing.PowerTransformer().fit_transform(quant_thresholded.values.reshape(-1,1))\
.reshape(len(quant_thresholded))
# median scaled quantity thresholded
quant_med_th = uf.threshold_data(quant_med_scaled)['th_data'].values.reshape(len(quant_med_scaled))
fig, ax = plt.subplots(1,3, figsize=(15,5))
plt.subplot(ax[0])
sns.kdeplot(quant_thresholded)
plt.title(f"Thresholded Quantity data.\n\
Minimum = {quant_thresholded.min().round(2)}, Median = {quant_thresholded.median().round(2)}, Maximum = {quant_thresholded.max().round(2)}"
)
plt.yticks([])
plt.ylabel('')
plt.subplot(ax[1])
sns.kdeplot(quant_th_YJ)
plt.title(f"Yeo Johnson Transformed Thresholded data.\n\
Minimum = {quant_th_YJ.min().round(2)}, Median = {np.median(quant_th_YJ).round(2)}, Maximum = {quant_th_YJ.max().round(2)}"
)
plt.yticks([])
plt.ylabel('')
plt.subplot(ax[2])
sns.kdeplot(quant_med_th, color='lime')
plt.title(f"Median Transformed Thresholded data.\n\
Minimum = {quant_med_th.min().round(2)}, Median = {np.median(quant_med_th).round(2)}, Maximum = {quant_med_th.max().round(2)}"
)
plt.yticks([])
plt.ylabel('')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Takeaway from `Quantity`- Quantity NEG/POS category can be a good feature.- Standard scaled and Median Transformed Quantity thresholded data will be experimented with while modelling.
###Code
data['Quantity_pos'] = data.Quantity.apply(lambda x: 'POS' if x>=0 else 'NEG')
###Output
_____no_output_____
###Markdown
`Country`
###Code
print(f"There are data from {data.Country.nunique()} countries.")
data.InvoiceNo.nunique()
len(set(data.InvoiceNo.unique())\
.intersection(set(test.InvoiceNo.unique()))
)
data.InvoiceDate = pd.to_datetime(data.InvoiceDate)
# data.rename(columns={'day':'day_of_week'}, inplace=True)
data.Description.nunique()
data[data.InvoiceNo==6141].shape
data[data.InvoiceNo==6141]
###Output
_____no_output_____
###Markdown
Basic Mean modelling
###Code
data.UnitPrice.mean()
predicted =
###Output
_____no_output_____
###Markdown
EDA
###Code
stats.probplot(response, dist="norm", plot=pylab)
pylab.show()
stats.probplot(control_runs, dist="norm", plot=pylab)
pylab.show()
###Output
_____no_output_____
###Markdown
We see that both the trial run and control run response values are very nearly normally-distributed.
###Code
n = run_order.shape[0]
x_axis = [[i] for i in range(1, int(n / 2) + 1)]
plt.scatter(x_axis, control_runs, color='g' * int(n/2), label='Control Runs')
plt.scatter(x_axis, response, color='r' * int(n/2), label='Trial Runs')
plt.title('Run Order Plot')
plt.xlabel('Run Order')
plt.ylabel('Average Heart Rate')
plt.legend()
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: MatplotlibDeprecationWarning: Using a string of single character colors as a color sequence is deprecated. Use an explicit list instead.
This is separate from the ipykernel package so we can avoid doing imports until
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: MatplotlibDeprecationWarning: Using a string of single character colors as a color sequence is deprecated. Use an explicit list instead.
after removing the cwd from sys.path.
###Markdown
We see that the control run values are roughly stable throughout the experiment, with no obvious increase over time, suggesting that the chosen wash-out period of one day between trials is in fact a sufficient rest period. We also note that four of the trial runs are associated with markedly elevated response values even as the control runs associated with them are not noticeably elevated above the other control runs. This suggests that the factor levels associated with runs 4, 5, 7, and 8 lead to an increased average heart rate over those associated with the other four trial runs.
###Code
low_factor = response[design_mat[:,i] == -1]
high_factor= response[design_mat[:,i] == -1]
n_factors = design_mat.shape[1]
for i in range(n_factors):
low_factor = response[design_mat[:,i] == -1]
high_factor= response[design_mat[:,i] == 1]
factors = np.hstack((low_factor.reshape(-1,1), high_factor.reshape(-1,1)))
plt.boxplot(factors, positions=(-1,1), )
plt.title('Box Plot for Factor %s' % int(i+1))
plt.show()
###Output
_____no_output_____
###Markdown
We see a marked difference in average response between the low and high levels of factor two, and a slightly smaller though still noticeable separation for factors four and three. Factor one appears to have no clear separation between the levels, algthough the variance in the low level runs is greater than in the high level runs. Fit Linear Models
###Code
colnames = list(factor_labels.keys()) + ['heart_rate']
data = pd.DataFrame(np.hstack((design_mat, response.reshape(-1,1))), columns=colnames)
data
from statsmodels.formula.api import ols
mod = ols('heart_rate ~ t_awake + fasting + coffee + t_run', data=data)
res = mod.fit()
table = sm.stats.anova_lm(res, typ=2)
print(res.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: heart_rate R-squared: 0.971
Model: OLS Adj. R-squared: 0.932
Method: Least Squares F-statistic: 24.94
Date: Fri, 03 Dec 2021 Prob (F-statistic): 0.0123
Time: 21:08:39 Log-Likelihood: -13.969
No. Observations: 8 AIC: 37.94
Df Residuals: 3 BIC: 38.33
Df Model: 4
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 165.1000 0.801 206.174 0.000 162.552 167.648
t_awake -1.1750 0.801 -1.467 0.239 -3.723 1.373
fasting -7.3000 0.801 -9.116 0.003 -9.848 -4.752
coffee 1.1500 0.801 1.436 0.246 -1.398 3.698
t_run -2.8250 0.801 -3.528 0.039 -5.373 -0.277
==============================================================================
Omnibus: 7.249 Durbin-Watson: 2.971
Prob(Omnibus): 0.027 Jarque-Bera (JB): 1.245
Skew: -0.027 Prob(JB): 0.536
Kurtosis: 1.068 Cond. No. 1.00
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
We see that factors two and four (`fasting` and `t_run`, respectively) are associated with effects which are signficant at the $5\%$ level, whereas factors one and three are not.Thus it appears that `fasting` has a large negative effect on heart rate. Runs 4, 5, 7, and 8, in which I was not fasting, had noticeable spikes in heart rate. To test this, we re-fit with just the fasting indicator and `t_run`:
###Code
from statsmodels.formula.api import ols
mod = ols('heart_rate ~ fasting + t_run', data=data)
res = mod.fit()
table = sm.stats.anova_lm(res, typ=2)
print(res.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: heart_rate R-squared: 0.930
Model: OLS Adj. R-squared: 0.902
Method: Least Squares F-statistic: 33.11
Date: Fri, 03 Dec 2021 Prob (F-statistic): 0.00131
Time: 21:27:13 Log-Likelihood: -17.479
No. Observations: 8 AIC: 40.96
Df Residuals: 5 BIC: 41.20
Df Model: 2
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 165.1000 0.962 171.628 0.000 162.627 167.573
fasting -7.3000 0.962 -7.589 0.001 -9.773 -4.827
t_run -2.8250 0.962 -2.937 0.032 -5.298 -0.352
==============================================================================
Omnibus: 0.052 Durbin-Watson: 2.205
Prob(Omnibus): 0.974 Jarque-Bera (JB): 0.227
Skew: 0.135 Prob(JB): 0.893
Kurtosis: 2.220 Cond. No. 1.00
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Looking at the R-squared value and noting that all of the effects are significant, it appears that this simpler model has a satisfactory fit.
###Code
plt.scatter(data['fasting'], res.resid)
plt.title('Residuals for Factor Two (`fasting`)')
plt.xlabel('Level')
plt.ylabel('Residual')
plt.show()
plt.scatter(data['t_run'], res.resid)
plt.title('Residuals for Factor Four (`t_run`)')
plt.xlabel('Level')
plt.ylabel('Residual')
plt.show()
plt.scatter(res.fittedvalues, res.resid)
plt.title('Residuals Versus Predicted Heart Rate')
plt.xlabel('Predicted Heart Rate')
plt.ylabel('Residual')
plt.show()
###Output
_____no_output_____
###Markdown
We see that the residuals associated with the model fit are evenly distributed about the origin. Note that the pairing present in the residual plots is a product of the design itself. Conclusions Based on the above analysis, factors two and four appear to have a significant effect on average heart rate during exercise, whereas factors one and three do not. We see that factor two is a associated with a large negative effect on average heart rate, which suggests that running while fasting leads to a reduced average heart rate versus running on a full stomach. We also see that factor four is associated with a negative effect on the response, suggesting that taking a day off between runs may lead to a lower average heart rate during the exercise.Anecdotally, running with a full stomach certainly feels much more difficult than running on an empty stomach, as does running on back to back days. This suggests that the results obtained are in accord with my own experience.
###Code
paces = np.loadtxt('/content/paces.txt')
paces_c, paces_t = paces.reshape(-1,2).T
n = run_order.shape[0]
x_axis = [[i] for i in range(1, int(n / 2) + 1)]
plt.scatter(x_axis, paces_c, color='g' * int(n/2), label='Control Runs')
plt.scatter(x_axis, paces_t, color='r' * int(n/2), label='Trial Runs')
plt.title('Average Pace Per Run (Both Control and Trial)')
plt.xlabel('Run Order')
plt.ylabel('Average Pace (in seconds)')
plt.hlines(510, 1, 8, label='Target pace (in seconds)')
plt.legend()
plt.show()
plt.scatter(data['heart_rate'], data['pace'])
plt.title('Average Heart Rate Versus Pace')
plt.xlabel('Average Heart Rate')
plt.ylabel('Average Pace (in seconds)')
min_hr = data['heart_rate'].min()
max_hr = data['heart_rate'].max()
plt.hlines(510, min_hr, max_hr, label='Target pace (in seconds)')
plt.legend()
plt.show()
data['pace'] = paces_t
from statsmodels.formula.api import ols
mod = ols('heart_rate ~ pace', data=data)
res = mod.fit()
table = sm.stats.anova_lm(res, typ=2)
print(res.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: heart_rate R-squared: 0.070
Model: OLS Adj. R-squared: -0.085
Method: Least Squares F-statistic: 0.4518
Date: Fri, 03 Dec 2021 Prob (F-statistic): 0.526
Time: 22:53:25 Log-Likelihood: -27.813
No. Observations: 8 AIC: 59.63
Df Residuals: 6 BIC: 59.79
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 377.7200 316.331 1.194 0.278 -396.314 1151.754
pace -0.4184 0.622 -0.672 0.526 -1.941 1.105
==============================================================================
Omnibus: 0.212 Durbin-Watson: 1.461
Prob(Omnibus): 0.900 Jarque-Bera (JB): 0.354
Skew: -0.243 Prob(JB): 0.838
Kurtosis: 2.090 Cond. No. 5.03e+04
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 5.03e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Unsurprisingly, pace appears to be (slightly) negatively correlated with average heart rate (i.e. as one runs faster, one's heart rate increases). The effect, however, is very small, likely resulting from the small variation in pace between runs.
###Code
np.std(data['pace'])
###Output
_____no_output_____
###Markdown
With the standard deviation above, roughly 5 seconds per mile or less than one percent of the average pace, it is likely that the differences in heart rate attributable to differences in pace would be small, which is borne out by the results of the regression above. If I had not attempted to match pace between runs, then we would expect to identify a much stronger relationship between pace and heart rate. But because there is little variation in pace, the effect of other factors on average heart rate appears to outweigh that of pace in this sample. To verify that the results of the analysis above are not significantly affected by `pace`, we add `pace` to the full regression model as a covariate.
###Code
from statsmodels.formula.api import ols
mod = ols('heart_rate ~ t_awake + fasting + coffee + t_run + pace', data=data)
res = mod.fit()
table = sm.stats.anova_lm(res, typ=2)
print(res.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: heart_rate R-squared: 0.992
Model: OLS Adj. R-squared: 0.971
Method: Least Squares F-statistic: 47.22
Date: Fri, 03 Dec 2021 Prob (F-statistic): 0.0209
Time: 22:53:25 Log-Likelihood: -8.9857
No. Observations: 8 AIC: 29.97
Df Residuals: 2 BIC: 30.45
Df Model: 5
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 3.0227 72.844 0.041 0.971 -310.399 316.444
t_awake -1.6447 0.567 -2.901 0.101 -4.084 0.794
fasting -7.8502 0.581 -13.505 0.005 -10.351 -5.349
coffee 0.3731 0.631 0.591 0.615 -2.344 3.090
t_run -3.2544 0.560 -5.808 0.028 -5.665 -0.843
pace 0.3189 0.143 2.225 0.156 -0.298 0.936
==============================================================================
Omnibus: 0.345 Durbin-Watson: 2.180
Prob(Omnibus): 0.842 Jarque-Bera (JB): 0.432
Skew: 0.254 Prob(JB): 0.806
Kurtosis: 1.981 Cond. No. 7.04e+04
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 7.04e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
There are 577 rows and 101 columns. We got rid of the null values and filtered coins mined greather than 0. The columns are for the different values of algorithm and prooftype elements, whichs is around 100 different values.
###Code
# Standardize your dataset
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
crypto_scaled = scaler.fit_transform(X)
# Perform dimensionality reduction with PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=0.9)
crypto_pca = pca.fit_transform(crypto_scaled)
crypto_pca_df = pd.DataFrame(crypto_pca)
crypto_pca_df
###Output
_____no_output_____
###Markdown
The amount of columns reduced to 77
###Code
# Further reduce the dataset dimensions with t-SNE and visually inspect the results.
from sklearn.manifold import TSNE
tsne = TSNE(learning_rate=35)
tsne_features = tsne.fit_transform(crypto_pca_df)
tsne_features.shape
###Output
_____no_output_____
###Markdown
Only 2 columns are left
###Code
# Plot
import matplotlib.pyplot as plt
plt.scatter(tsne_features[:,0], tsne_features[:,1])
plt.show()
# Use a for-loop to determine the inertia for each k between 1 through 10
from sklearn.cluster import KMeans
inertia = []
# Same as k = list(range(1, 11))
k = [1,2,3,4,5,6,7,8,9,10]
# Looking for the best k
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(df_iris)
inertia.append(km.inertia_)
###Output
_____no_output_____
###Markdown
1) Imports, load and prepare data
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data extracted from: https://www.realclearpolitics.com/epolls/2020/president/2020_elections_electoral_college_map.htmlMethodology: States with clear runaway favorite are fully attributed to that candidate and excluded from analysis
###Code
# load data
df = pd.read_excel('ElectionPolls.xlsx',delimiter=',')
df
# add column that indicates whether state is battleground state
df['Battleground'] = (df['Poll Trump'] != 0) * (df['Poll Trump'] != 100)
# compute electoral collage votes from non-battleground states
trump_solid = (df.loc[~df['Battleground']]['Electoral College Votes'].to_numpy() * (df.loc[~df['Battleground']]['Poll Trump'].to_numpy() > df.loc[~df['Battleground']]['Poll Biden'].to_numpy())).sum()
biden_solid = (df.loc[~df['Battleground']]['Electoral College Votes'].to_numpy() * (df.loc[~df['Battleground']]['Poll Trump'].to_numpy() < df.loc[~df['Battleground']]['Poll Biden'].to_numpy())).sum()
print('Votes from non-battleground states for Trump: {}\nVotes from non-battleground states for Biden: {}'.format(trump_solid, biden_solid))
# create arrays for votes from battleground states and poll results per candidate
votes = df.loc[df['Battleground']]['Electoral College Votes'].to_numpy()
trump = df.loc[df['Battleground']]['Poll Trump'].to_numpy()
biden = df.loc[df['Battleground']]['Poll Biden'].to_numpy()
size = len(votes)
###Output
_____no_output_____
###Markdown
2) Election simulation **Methodology for election simulation:****1) Nation-level noise:** One candidate gets a fixed bonus (and the other the same fixed deduction), that is same for all states. This bonus/deduction is drawn from a normal distribution with mean 0 and standard deviation as provided by the 'nation_sigma' parameter. Nation-level noise is intended to model a nation-wide bias of the polls toward one candidate. (Base case: 0.5 p.p.)**2) State-level noise:** One candidate gets a fixed bonus (and the other the same fixed deduction), that is different in each state and independent across states. This bonus/deduction is drawn from a normal distribution with mean 0 and standard deviation as provided by the 'state_sigma' parameter. State-level noise is intended to model a polls inherent uncertainty. (Base case: 3.5 p.p.)**3) Trump bonus/deduction:** Explicit bonus for Trump and deduction for Biden as provided by the parameter 'trump_bonus'. This parameter can be used to manually change the poll prediction by the same percentage points across all states. It can be used to model a polling bias towards one candidate. (Base case: 0)**4) Number of iterations:** Indicates the number of simulation iterations and is provided by the parameter 'n_runs' (Default: 1 million)
###Code
def simulate_election(sigma_state, sigma_nation, trump_bonus=0, n_runs=1000000):
'''
Simulates election outcomes per state for n_runs simulation iterations.
Parameters:
sigma_state: standard deviation of normal distribution for noise on state-level (independent across states)
sigma_nation: standard deviation of normal distribution for noise on nation-level (same for all states)
trump_bonus: bonus for trump (default: 0)
n_runs: number of simulation iterations (default: 1 million)
'''
res = np.zeros([n_runs,size])
for i in range(n_runs):
nation_noise = np.random.normal() * sigma_nation
state_noise = np.random.normal(size=size) * sigma_state
trump_pred = trump + nation_noise + state_noise + trump_bonus
biden_pred = biden - nation_noise - state_noise - trump_bonus
res[i,] = trump_pred > biden_pred
return res
def get_stats(sim_result):
'''
Computes summary statistics for simulation result.
'''
mean = (sim_result * votes).sum(axis=1).mean() + trump_solid
std = (sim_result * votes).sum(axis=1).std()
median = np.median((sim_result * votes).sum(axis=1)) + trump_solid
mode = np.argmax(np.bincount(np.array((sim_result * votes).sum(axis=1),dtype='int'))) + trump_solid
trump_prob = (((sim_result * votes).sum(axis=1) + trump_solid) >= 270).sum()/sim_result.shape[0]
state_probs = sim_result.mean(axis=0)
res = {'mean':mean,
'std': std,
'median': median,
'mode': mode,
'trump_prob': trump_prob,
'state_probs': [state_probs]}
return res
# Get base case simulation results
base_pred = simulate_election(3.5,0.5)
base_stats = get_stats(pred)
# set simulation parameters for scenario analysis
state_sigma = [1,2,3,4,5]
nation_sigma = [.25,.5,1,1.5,2]
trump_bonus = [0,.5,1,2]
# run simulations for scenario analysis
res = pd.DataFrame()
count = 0
n_iterations = len(state_sigma) * len(nation_sigma) * len(trump_bonus)
print('Total iterations: ', n_iterations)
for ss in state_sigma:
for ns in nation_sigma:
for tb in trump_bonus:
stats = get_stats(simulate_election(ss,ns,tb))
stats['state_sigma'] = ss
stats['nation_sigma'] = ns
stats['trump_bonus'] = tb
res = res.append(pd.DataFrame(stats))
count += 1
print('Completed iteration: ', count)
res
res.to_pickle('./results/results.pkl')
###Output
_____no_output_____
###Markdown
3) Visualize simulation results
###Code
colors = {'grey': '#666666',
'dark_blue': '#000045',
'light_blue': '#009dff',
'orange': '#ff9100'}
plt.figure(figsize=(15,10))
plt.hist(trump_solid + (base_pred * votes).sum(axis=1),bins=300, density=True, alpha=.5, color='blue')
plt.plot([base_stats['mode'],base_stats['mode']],[0,.016], linestyle='--', linewidth=1, color='black', label='most frequent value')
plt.plot([base_stats['mean'],base_stats['mean']],[0,.016], linestyle='-', linewidth=1, color='black', label='mean')
plt.plot([base_stats['median'],base_stats['median']],[0,.016], linestyle='dashdot', linewidth=1, color='black', label='median')
plt.plot([270,270],[0,.016], linestyle='dashdot', linewidth=1, color='red', label='Votes required for victory')
plt.xlim(120,320)
plt.title('Base case: Frequency distribution of predicted electoral votes for Trump', fontweight='bold', fontsize=14)
plt.xlabel('Electoral votes', fontsize=14)
plt.ylabel('Frequency', fontsize=14)
plt.legend()
plt.savefig('./plots/base_case_histogram.png', dpi=600)
# Base case statistics
base_stats
labels = df.loc[df['Battleground']].apply(lambda x: "{} ({})".format(x['State '],x['Electoral College Votes']), axis=1)
x = np.arange(len(labels))
width = .7
fig,ax = plt.subplots(figsize=(15,10))
ax.barh(x, base_pred.mean(axis=0),width)
ax.plot([.5,.5],[-1,len(labels)], linestyle='-', color='black', label='50% threshold')
ax.plot([.3,.3],[-1,len(labels)], linestyle='--', color='red', label='required threshold for trump victory')
ax.set_yticks(x)
ax.set_yticklabels(labels)
ax.set_ylabel('State (votes)',fontsize=14)
ax.set_xlabel('Probability of Trump victory',fontsize=14)
ax.set_title('Base case: Probability of Trump victory in battleground states',fontsize=16,fontweight='bold')
fig.legend(loc='upper right', bbox_to_anchor=(0.99, 0.96))
fig.tight_layout()
plt.savefig('./plots/base_case_stateProbs.png', dpi=600)
n_scenarios = len(state_sigma) * len(nation_sigma)
x = np.arange(n_scenarios)
label = res.loc[res['trump_bonus'] == 0].apply(lambda x: (x['state_sigma'],x['nation_sigma']), axis=1).to_numpy()
i = 0
fig,ax = plt.subplots(figsize=(15,10))
ax.plot(x,res.loc[res['trump_bonus'] == 0, 'trump_prob'], color='black', label='No Trump bonus')
ax.plot(x,res.loc[res['trump_bonus'] == 0.5, 'trump_prob'], color=colors['orange'], label='0.5% Trump bonus')
ax.plot(x,res.loc[res['trump_bonus'] == 1, 'trump_prob'], color=colors['light_blue'], label='1% Trump bonus')
ax.plot(x,res.loc[res['trump_bonus'] == 2, 'trump_prob'], color='red', label='2% Trump bonus')
ax.set_xticks([],[])
ax.set_ylabel('Probability of Trump victory',fontsize=14)
ax.set_title('Scenario analysis: Probability of Trump victory',fontsize=16,fontweight='bold')
ax.set_xticks(x)
ax.set_xlabel('Parameters (state_sigma, nation_sigma)',fontsize=14)
ax.set_xticklabels(label, rotation=90)
fig.legend(loc='upper right', bbox_to_anchor=(0.82, 0.86))
plt.savefig('./plots/scenario_analysis_victory_probability.png', dpi=600)
n_scenarios = len(state_sigma) * len(nation_sigma)
x = np.arange(n_scenarios)
label = res.loc[res['trump_bonus'] == 0].apply(lambda x: (x['state_sigma'],x['nation_sigma']), axis=1).to_numpy()
fig,axs = plt.subplots(3,1,figsize=(15,15))
i = 0
axs[i].errorbar(x,res.loc[res['trump_bonus'] == 0, 'mean'], yerr=res.loc[res['trump_bonus'] == 0, 'std'], color='black', alpha=.5)
axs[i].plot([0,n_scenarios],[270,270], linestyle='--', color='red')
axs[i].scatter(x,res.loc[res['trump_bonus'] == 0, 'mean'], color='black')
axs[i].scatter(x,res.loc[res['trump_bonus'] == 0, 'mode'], color=colors['orange'])
axs[i].scatter(x,res.loc[res['trump_bonus'] == 0, 'median'], color='green')
axs[i].set_xticks([],[])
axs[i].set_ylabel('Trump votes',fontsize=14)
axs[i].set_title('Scenario analysis: No Trump bonus',fontsize=14,fontweight='bold')
i = 1
axs[i].errorbar(x,res.loc[res['trump_bonus'] == i, 'mean'], yerr=res.loc[res['trump_bonus'] == i, 'std'], color='black', alpha=.5)
axs[i].plot([0,n_scenarios],[270,270], linestyle='--', color='red')
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'mean'], color='black')
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'mode'], color=colors['orange'])
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'median'], color='green')
axs[i].set_xticks([],[])
axs[i].set_ylabel('Trump votes',fontsize=14)
axs[i].set_title('Scenario analysis: 1% Trump bonus',fontsize=14,fontweight='bold')
i = 2
axs[i].errorbar(x,res.loc[res['trump_bonus'] == i, 'mean'], yerr=res.loc[res['trump_bonus'] == i, 'std'], color='black', alpha=.5)
axs[i].plot([0,n_scenarios],[270,270], linestyle='--', color='red', label='Required votes for victory')
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'mean'], color='black', label='prediction mean')
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'mode'], color=colors['orange'], label='prediction most frequent outcome')
axs[i].scatter(x,res.loc[res['trump_bonus'] == i, 'median'], color='green', label='prediction median')
axs[i].set_xticks(x)
axs[i].set_xlabel('Parameters (state_sigma, nation_sigma)',fontsize=14)
axs[i].set_ylabel('Trump votes',fontsize=14)
axs[i].set_title('Scenario analysis: 2% Trump bonus',fontsize=14,fontweight='bold')
axs[i].set_xticklabels(label, rotation=90)
fig.legend(loc='lower right')
plt.savefig('./plots/scenario_analysis_details.png', dpi=600)
###Output
_____no_output_____
###Markdown
[Dask Dashboard](localhost:8787) Load / Clean Data xkcd datasetFirst we load in the xkcd dataset from https://www.explainxkcd.comThis dataset has 2388 xkcd comics run on (22 November 2020)Each row has the following features:* **xkcd**: The link to the official xkcd comic URL* **xkcd_num**: The extracted comic number from the URL* **Title**: The link to the Explain XKCD wiki page for that comic* **Image**: Link to a backup hosted image of the XKCD comic* **Date**: The original date of publication of the comic* **TitleText**: Title of the comic* **Explanation**: A community explanation of the comic deciphering the sometimes pithyor cryptic humor* **Transcript**: If the comic has characters speaking, this section has the text of thecomic.
###Code
# Process explain xkcd data
links_df = dd.read_parquet("./data/xkcd/links_df.parquet") # .set_index("Title")
# There is a bug in the data collection which is caused by this surprise:
# https://www.explainxkcd.com/wiki/index.php/Disappearing_Sunday_Update
# its a comic with the same id which he speculates will break automated system. Sure
# broke mine!
links_df = links_df[links_df["TitleText"] != "Disappearing Sunday Update"].set_index("Title")
pages_df = dd.read_parquet("./data/xkcd/pages_df.parquet", blocksize=None) # .set_index("Title")
pages_df = pages_df.drop_duplicates()
xkcd_df = dd.merge(links_df, pages_df, how='left', on="Title")
xkcd_df["xkcd_num"] = xkcd_df["xkcd"].apply(
lambda url: int(URL(url).path.replace("/", "")), meta='str'
)
print(xkcd_df.columns)
CURR_MAX_COMIC = xkcd_df["xkcd_num"].max().compute()
xkcd_df.head()
###Output
_____no_output_____
###Markdown
reddit datasetNext we load in the reddit dataset which is a collection of every reference of an xkcdurl on Reddit.This dataset has 313485 samples and 9 features. The comments are collected from 2007 to2019, inclusive.Each sample has the following features:* **body**: The text in the comment body (should have an xkcd url)* **author**: The reddit user's name* **score**: The comment's score (should be >= 1)* **permalink**: The permalink to the comment* **parent_***: The previous four attributes for the child comment's parent.* **xkcd**: The xkcd comic url extracted from the child comment* **xkcd_num**: The comic number extracted from the URL
###Code
%%time
# Process reddit data
file_names = [
*list(map(str, range(2007, 2015))),
*[f"{year}_{month:02d}" for year in range(2015, 2020) for month in range(1, 13)]
]
reddit_dfs = [
dd.read_parquet(f"./data/reddit/{file_name}.parquet")
for file_name in file_names
]
reddit_df = dd.concat(reddit_dfs, ignore_index=True)
print(reddit_df.columns)
reddit_df.tail()
%%time
# Clean up reddit_df
# remove null rows in important columns
reddit_df = reddit_df[~(
reddit_df["xkcd"].isnull()
| reddit_df["parent_body"].isnull()
| reddit_df["body"].isnull()
)]
# # Cannot remove individual rows in dask
# # remove malformed row
# reddit_df = reddit_df.drop(labels=[52737], axis=1)
# Clean up multiple versions of URL to singular version
# (i.e. m.xkcd, ending with slash, without slash, etc...)
reddit_df["xkcd"] = reddit_df["xkcd"].apply(
lambda url: "https://xkcd.com/" + URL(url).path.replace("/", ""), meta=str
)
# Drop invalid comic numbers
# the convert_dtype=False is required here because some annoying people used invalid URLs
# with really large numbers
reddit_df["xkcd_url_type"] = reddit_df["xkcd"].apply(lambda url: URL(url), meta=URL)
def convert_to_num(url):
url_num = int(url.path[1:])
if url_num < 1 or url_num > CURR_MAX_COMIC:
return -1
else:
return url_num
# Add URL --> number column
reddit_df["xkcd_num"] = reddit_df["xkcd_url_type"].apply(convert_to_num, meta=int)
reddit_df = reddit_df[
(reddit_df["xkcd_num"] > 0)
& ~reddit_df["xkcd_num"].isnull()
]
# naive remove samples with xkcd in parent
# likely over fit signal (e.g. reminds of this specific xkcd 33)
# or low signal... (e.g. does anyone have the xkcd link)
reddit_df = reddit_df[~reddit_df["parent_body"].str.contains("xkcd")]
def strip_markdown(sample):
html = markdown(sample)
return ''.join(BeautifulSoup(html).findAll(text=True))
# strip markdown from text
# technically we don't use the child body comment so we don't have to do this
# reddit_df["body"] = reddit_df["body"].apply(unmark, meta=str)
reddit_df["parent_body"] = reddit_df["parent_body"].apply(strip_markdown, meta=str)
reddit_df.compute()
%%time
# what are the most common referenced xkcds on Reddit?
# For some reason value_counts does not work with modin dataframes
print(reddit_df["xkcd"].value_counts().nlargest(15).compute())
%%time
# how many xkcds have never been referenced on Reddit?
xkcds = dd.from_pandas(pd.Series(range(1, CURR_MAX_COMIC+1), name="xkcds"), npartitions=1)
# reddit_set = set(reddit_df["xkcd_num"].tolist())
num = (~xkcds.isin(reddit_df["xkcd_num"].unique().compute().tolist())).sum().compute()
print(f"Number of unreferenced xkcds: {num}")
print(f"Percentage of total: {num / len(xkcds) * 100:.2f}%")
%%time
# simple tfidf model that uses the explanations from explain xkcd
tfidf = TfidfVectorizer(strip_accents='ascii', stop_words='english', ngram_range=(1, 6), min_df=0.03)
exp_vec = tfidf.fit_transform(xkcd_df['Explanation'].compute())
reddit_vec = tfidf.transform(reddit_df['parent_body'].compute())
%%time
y = reddit_df["xkcd_num"].values.compute().reshape((-1, 1))
# subtract 1 from y so that the xkcd numbers are 0 indexed
y -= 1
cos_y_hat = cosine_similarity(reddit_vec, exp_vec)
def accuracy_n(y, y_hat, n=1):
"""Calculate the top-n accuracy given predicted class probabilities"""
# arg sort along the rows
top_n = np.argsort(y_hat, 1)[:, -n:]
return np.mean(np.fromiter((
1 if y[k] in top_n[k]
else 0
for k in range(len(top_n))
), dtype=np.int8))
%%time
top_1 = accuracy_n(y, cos_y_hat)
top_5 = accuracy_n(y, cos_y_hat, n=5)
print(f"Top-1 Acc: {top_1*100:.3f}%")
print(f"Top-5 Acc: {top_5*100:.3f}%")
# BM25
class BM25Transformer(BaseEstimator, TransformerMixin):
"""
Parameters
----------
use_idf : boolean, optional (default=True)
k1 : float, optional (default=2.0)
b : float, optional (default=0.75)
References
----------
Okapi BM25: a non-binary model - Introduction to Information Retrieval
http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html
"""
def __init__(self, use_idf=True, k1=2.0, b=0.75):
self.use_idf = use_idf
self.k1 = k1
self.b = b
def fit(self, X):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features]
document-term matrix
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features]
document-term matrix
copy : boolean, optional (default=True)
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
# Document length (number of terms) in each row
# Shape is (n_samples, 1)
dl = X.sum(axis=1)
# Number of non-zero elements in each row
# Shape is (n_samples, )
sz = X.indptr[1:] - X.indptr[0:-1]
# In each row, repeat `dl` for `sz` times
# Shape is (sum(sz), )
# Example
# -------
# dl = [4, 5, 6]
# sz = [1, 2, 3]
# rep = [4, 5, 5, 6, 6, 6]
rep = np.repeat(np.asarray(dl), sz)
# Average document length
# Scalar value
avgdl = np.average(dl)
# Compute BM25 score only for non-zero elements
data = X.data * (self.k1 + 1) / (X.data + self.k1 * (1 - self.b + self.b * rep / avgdl))
X = sp.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
return X
re_stopwords = re.compile(r'\b(' + r'|'.join(stopwords.words('english')) + r')\b\s*')
# remove stop words and punctuation
replace_vec = np.vectorize(
lambda item: re_stopwords.sub('', item).translate(str.maketrans('', '', string.punctuation))
)
class StopWordRemover(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return replace_vec(X)
StopWordRemover().fit_transform(np.array([
["This is a test", "hello %world this is a test."],
["another one", "of how well"],
["hello world, today is a good day.", "this works."]
]))
%%time
# TODO: Look into dask_ml to replace these custom transformers so
# they can be a lot faster
p = Pipeline([
('stop', StopWordRemover()),
('count_vec', CountVectorizer(ngram_range=(1, 6))),
('bm25', BM25Transformer()),
])
exp_vec2 = p.fit_transform(xkcd_df['Explanation'])
reddit_vec2 = p.transform(reddit_df['parent_body'])
cos_y_hat2 = cosine_similarity(reddit_vec2, exp_vec2)
top_1 = accuracy_n(y, cos_y_hat2)
top_5 = accuracy_n(y, cos_y_hat2, n=5)
print(f"Top-1 Acc: {top_1*100:.3f}%")
print(f"Top-5 Acc: {top_5*100:.3f}%")
%%time
# This takes about 10 minutes right now
X_train_raw, X_test_raw, y_train, y_test = train_test_split(reddit_df['parent_body'], reddit_df["xkcd_num"] - 1, test_size=0.25)
xgb_pipe = clone(p)
X_train = xgb_pipe.fit_transform(X_train_raw)
X_test = xgb_pipe.transform(X_test_raw)
eval_set = [(X_train, y_train), (X_test, y_test)]
# TODO: Fix bug attribute to_delayed not found (basically everything works up until this point)
# clf = XGBClassifier()
# clf.fit(X_train, y_train, eval_set=eval_set)
# clf.score(X_test_raw, y_test)
###Output
_____no_output_____
###Markdown
Analyzing Real vs. Fake News Article Headlines 📰Author:[Navraj Narula](http://navierula.github.io)Data Source: [Randomly-Collected Fake News Dataset](https://github.com/BenjaminDHorne/fakenewsdata1)Resources Consulted: [Text Mining with R](http://tidytextmining.com)[R: Text Classification using a K Nearest Neighbour Model](http://garonfolo.dk/herbert/2015/05/r-text-classification-using-a-k-nearest-neighbour-model/)
###Code
# turn off warnings
options(warn=-1)
# import libraries
library(dplyr)
library(e1071)
library(ggplot2)
library(tidytext)
library(stringr)
library(RColorBrewer)
library(tm)
library(class)
library(SnowballC)
# load in dataset
mydata = read.csv("cleaned_data/headlines.txt",sep="\t",stringsAsFactors = FALSE,col.names=c("text", "status"),fill=TRUE)
# remove rows with empty values
mydata = mydata[!apply(mydata, 1, function(x) any(x=="")),]
# preview the first five rows
# (mostly fake articles at the top)
head(mydata)
# preview the last five rows
# (mostly real articles at the bottom)
tail(mydata)
# calculate term/word frequency for words present in articles
news_words <- mydata %>%
unnest_tokens(word, text) %>%
count(status, word, sort = TRUE) %>%
ungroup()
total_words <- news_words %>%
group_by(status) %>%
summarize(total = sum(n))
news_words <- left_join(news_words, total_words)
news_words
###Output
Joining, by = "status"
###Markdown
From the table above, we can see that the word "trump" is not only the most commonly used word in real news article headlines, but also the most commonly used world overall. This makes sense given the past election cycle. Out of 633 total words that appeared in real news article headlines, the word "trump" appeared 28 times, or rather 4.4% overall.In fake news article headlines, the most commonly used word was "obama," and following that, "trump" once again. These words appeared 20 out of 842 times and 19 out of 842 times. Respectively, 2.3% and 2.2%.
###Code
# visualize word counts in buckets
ggplot(news_words, aes(n/total, fill = status)) +
geom_histogram(show.legend = TRUE,binwidth = 30,color="black") +
facet_wrap(~status, ncol = 4)
###Output
_____no_output_____
###Markdown
The visualization above simply counts the number of words present in each type of headline. For fake news headlines, the total number of words is 842. The total number of words for real news headlines is 633. Considering the fact that the particular dataset that I am using contains less real news headlines rather than fake news articles, the counts make sense.
###Code
sprintf("The number of real news headlines in my dataset is: %d", str_count(mydata, "real")[2])
sprintf("The number of fake news headlines in my dataset is: %d", str_count(mydata, "fake")[2])
# calculate frequency by rank, using Zipf's law
freq_by_rank <- news_words %>%
group_by(status) %>%
mutate(rank = row_number(),
`term frequency` = n/total)
freq_by_rank
###Output
_____no_output_____
###Markdown
The rank describes the rank of each word in the frequency table. It is plotted below, showing a constant negative slope.
###Code
myColors <- c("gold4", "mediumorchid4")
# plot Zipf's law
freq_by_rank %>%
ggplot(aes(rank, `term frequency`, col=status)) +
geom_line(size = 3, alpha = 0.8) +
scale_x_log10() +
scale_y_log10() +
scale_color_manual(values=myColors)
###Output
_____no_output_____
###Markdown
From the above graph, we can see that words associate with real news headlines have a higher rank - which is not surprising. I will now use TF-IDF (Term Frequency–Inverse Document Frequency) to find the most relevant word for each article headline. According to [tidytextmining](http://tidytextmining.com/tfidf.htmlterm-frequency-in-jane-austens-novels), "the idea of tf-idf is to find the important words for the content of each document by decreasing the weight for commonly used words and increasing the weight for words that are not used very much in a collection or corpus of documents."TF-IDF may be a good method to use in regards to understanding contents of a document (or headline, in our case) because it finds words that are common, but not too common. This perhaps get rids of words that are unnecessary or irrelevant.
###Code
news_words <- news_words %>%
bind_tf_idf(word, status, n)
news_words
###Output
_____no_output_____
###Markdown
We can see that tf-idf scores are ZERO for words that are very common. They appear in both types of news headlines. The idf will be low for such words and higher for words that appear in fewer headlines.
###Code
# order terms by highest tf-idf score
news_words %>%
select(-total) %>%
arrange(desc(tf_idf))
myColors <- c("rosybrown3", "darkseagreen4")
# plot top 30 words by tf-idf
plot_ <- news_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
plot_ %>%
top_n(30) %>%
ggplot(aes(word, tf_idf, fill = status)) +
geom_bar(stat="identity") +
scale_fill_manual(values=myColors) + #scale_fill instead of scale_col to fill color manually
labs(x = "words", y = "tf-idf") +
coord_flip()
myColors <- c("lightpink1", "cornflowerblue")
# plot by grouping for top 25 words
plot_ %>%
group_by(status) %>%
top_n(25) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill = status)) +
geom_col(show.legend = FALSE) +
labs(x = "word", y = "tf-idf") +
facet_wrap(~status, ncol = 2, scales = "free") +
scale_fill_manual(values=myColors) +
coord_flip()
###Output
Selecting by tf_idf
###Markdown
News Classifier Using K-Nearest Neighbors Algorithm
###Code
# turn off warnings
options(warn=-1)
#install.packages("RTextTools") #try installing this as a package
# set seed value
set.seed(100)
# generate headlines corpus
headlines <- Corpus(VectorSource(mydata$text))
# clean headlines
headlines <- tm_map(headlines, content_transformer(tolower))
headlines <- tm_map(headlines, removeNumbers)
headlines <- tm_map(headlines, removeWords, stopwords("english"))
headlines <- tm_map(headlines, removePunctuation)
headlines <- tm_map(headlines, stripWhitespace)
headlines <- tm_map(headlines, stemDocument, language = "english")
# create document-term matrix
dtm <- DocumentTermMatrix(headlines)
# transforms document-term matrix to dataframe
mat.mydata <- as.data.frame(data.matrix(dtm), stringsAsfactors = FALSE)
# column bind on status
mat.mydata <- cbind(mat.mydata, mydata$status)
# Change name of new column to "status"
colnames(mat.mydata)[ncol(mat.mydata)] <- "status"
all <- 0
max = -Inf
for (i in 1:1000)
{
# split data into train and test sets
train <- sample(nrow(mat.mydata), ceiling(nrow(mat.mydata) * .50))
test <- (1:nrow(mat.mydata))[- train]
# assign classifier
classifier <- mat.mydata[, "status"]
modeldata <- mat.mydata[,!colnames(mat.mydata) %in% "status"]
# make predictions using knn algo
knn_predictions <- knn(modeldata[train, ], modeldata[test, ], classifier[train])
# create confusion matrix
confusion_matrix <- table("Predictions" = knn_predictions, Actual = classifier[test])
accuracy <- sum(diag(confusion_matrix))/length(test) * 100
all = all + accuracy
if (accuracy > max) {
max <- accuracy # find max accuracy
print(max)
print(confusion_matrix)
}
}
all/1000
###Output
_____no_output_____
###Markdown
Dataset Dataset creation
###Code
# Load words dataset table
words = load_words('data/database/words.csv')
words.head()
# Retrieve dictionaries mapping lemma tuples to numeric value
w2i, i2w = map_words(words)
# Map lemmas to node numbers
words['node'] = words.apply(lambda w: w2i[(w.text, w.pos)], axis=1)
words.head()
# Load tweets dataset table
tweets = load_tweets('data/database/tweets.csv')
tweets.head()
###Output
_____no_output_____
###Markdown
Dataset statistics Number of tweets
###Code
# Define words from tweets of 2017 and the ones from tweets of 2018
tweets_2017 = tweets.id_str[tweets.created_at.dt.year == 2017].values
tweets_2018 = tweets.id_str[tweets.created_at.dt.year == 2018].values
# Show tweets distribution
fig, ax = plt.subplots(figsize=(7.5, 5))
_ = ax.set_title('Tweet count for 2017 and 2018 analyzed period', fontsize=18)
_ = ax.bar(['2017'], [len(tweets_2017)])
_ = ax.bar(['2018'], [len(tweets_2018)])
_ = plt.savefig('images/analysis/tweet_counts.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Words count
###Code
# Show word counts in tweets of 2017 and 2018 respectively
fig, ax = plt.subplots(figsize=(7.5, 5))
_ = ax.set_title('Word count for 2017 and 2018 analyzed period', fontsize=18)
_ = ax.bar(['2017'], sum(words.tweet.isin(tweets_2017)))
_ = ax.bar(['2018'], sum(words.tweet.isin(tweets_2018)))
_ = plt.savefig('images/analysis/words_counts.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Unique words count
###Code
# Show unique word counts in tweets of 2017 and 2018 respectively
unique_words_2017 = words.text[words.tweet.isin(tweets_2017)].unique()
unique_words_2018 = words.text[words.tweet.isin(tweets_2018)].unique()
fig, ax = plt.subplots(figsize=(7.5, 5))
_ = ax.set_title('Word count for 2017 and 2018 analyzed period')
_ = ax.bar(['2017'], unique_words_2017.shape[0])
_ = ax.bar(['2018'], unique_words_2018.shape[0])
_ = plt.savefig('images/analysis/nodes_counts.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Tweets lengths distributionsHistogram shows the distribution of tweet lengths in either 2017's and 2018'2 network. The difference in the two distributions is due to the fact that in november 2017 the allowed tweet lengths in term of characters has been duplicated by Twitter itself.
###Code
# Compute length of each tweet, for either words and characters
tweets_ = tweets.loc[:, ['id_str']]
tweets_['len_words'] = tweets.apply(lambda t: len(t.text.split(' ')), axis=1)
tweets_['len_chars'] = tweets.apply(lambda t: len(t.text), axis=1)
# Get 2017 and 2018 tweets
tweets_2017_ = tweets_[tweets_['id_str'].isin(tweets_2017)]
tweets_2018_ = tweets_[tweets_['id_str'].isin(tweets_2018)]
# Show distribution of words number per tweet in 2017 and 2018
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
# Word lengths
_ = axs[0].set_title('Number of words per tweet',fontsize=18)
_ = axs[0].hist(tweets_2017_['len_words'], bins=25, density=True, alpha=.7)
_ = axs[0].hist(tweets_2018_['len_words'], bins=50, density=True, alpha=.7)
_ = axs[0].legend(['Tweet length in 2017', 'Tweet length in 2018'])
# Charactes lengths
_ = axs[1].set_title('Number of characters per tweet', fontsize=18)
_ = axs[1].hist(tweets_2017_['len_chars'], bins=25, density=True, alpha=.7)
_ = axs[1].hist(tweets_2018_['len_chars'], bins=50, density=True, alpha=.7)
_ = axs[1].legend(['Tweet length in 2017', 'Tweet length in 2018'])
# Make plot
_ = plt.savefig('images/analysis/tweet_len_distr.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Network creation Edges creation
###Code
# Define years under examination
years = [2017, 2018]
# Define edges for 2017 and 2018 (as Pandas DataFrames)
edges = dict()
# Define edges for each network
for y in years:
# Get id of tweets for current year
tweet_ids = tweets.id_str[tweets.created_at.dt.year == y]
# Compute edges for current year
edges[y] = get_edges(words[words.tweet.isin(tweet_ids)])
# Save vocabularies to disk
np.save('data/edges_w2i.npy', w2i) # Save tuple to index vocabulary
np.save('data/edges_i2w.npy', i2w) # Save index to tuple vocabulary
# Save edges to disk
edges_ = [*years]
# Loop through each edges table
for i, y in enumerate(years):
# Add year column
edges_[i] = edges[y].copy()
edges_[i]['year'] = y
# Concatenate DataFrames
edges_ = pd.concat(edges_, axis=0)
# Save dataframe to disk
edges_.to_csv('data/database/edges.csv', index=False)
print('Edges for 2017\'s network')
edges[2017].head()
print('Edges for 2018\'s network')
edges[2018].head()
###Output
Edges for 2018's network
###Markdown
Adjacency matricesCompute upper triangular adjacency matrices for either 2017's and 2018's networks. Note: adjacency matrices are saved by default to avoid recomputing.
###Code
# Define networks container
network = dict()
# Create newtorks
for y in years:
network[y] = nx.from_pandas_edgelist(edges[y], source='node_x', target='node_y',
edge_attr=True, create_using=nx.Graph)
# Get numpy adjacency matrices
adj_matrix = dict()
for y in years:
adj_matrix[y] = nx.to_numpy_matrix(network[y])
# Show adjacency matrices
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Print adjacency matrix for each network
for i, y in enumerate(years):
_ = axs[i].set_title('{:d}\'s network adjacency matrix'.format(y))
_ = axs[i].imshow(np.minimum(adj_matrix[y], np.ones(adj_matrix[y].shape)))
_ = plt.show()
adj_matrix[2017].shape
###Output
_____no_output_____
###Markdown
Summary statisticsCompute mean, density, and other summary statistics for both 2017's and 2018's networks
###Code
# Initialize summary statistics
mean = {}
density = {}
std = {}
# Compute mean and density
for y in years:
x = adj_matrix[y] # Get adjacency matrix for current network
n = x.shape[0] # Get dimension of the adjacency matrix
mean[y] = x.sum() / n**2
std[y] = ( ((x - mean[y])**2).sum() / (n**2 - 1) )**.5
density[y] = np.minimum(x, np.ones((n, n))).sum() / n**2
# Print out results
for y in years:
print('{:d}\'s network has mean={:.04f}, standard deviation={:.04f} and density={:.04f}'.format(y, mean[y], std[y], density[y]))
# Show summary statistics graphically
fig, axs = plt.subplots(1, 3, figsize=(12, 5))
_ = axs[0].set_title('Mean',fontsize=15)
_ = axs[1].set_title('Standard deviation',fontsize=15)
_ = axs[2].set_title('Density',fontsize=15)
# Print scores for either 2017 and 2018
for y in years:
_ = axs[0].bar(str(y), mean[y])
_ = axs[1].bar(str(y), std[y])
_ = axs[2].bar(str(y), density[y])
# Make plot
_ = plt.savefig('images/analysis/net_stats.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Degrees analysis
###Code
# Compare degrees graphically
fig, ax = plt.subplots(figsize=(30, 5))
_ = fig.suptitle('Distribution of the networks degrees')
_ = ax.hist(get_degree(network[2017]), bins=500, alpha=0.7)
_ = ax.hist(get_degree(network[2018]), bins=500, alpha=0.7)
_ = ax.set_xlim(0, 200)
_ = ax.legend(['Degree of the network in 2017',
'Degree of the network in 2018'])
_ = plt.savefig('images/analysis/degree_hist.png')
_ = plt.show()
# Define function for computing degree analysis (compute pdf, cdf, ...)
def make_degree_analysis(network):
"""
Input:
- degrees Pandas Series node (index) maps to its degree (value)
Output:
- degree: list of degrees
- counts: list containing count for each degree
- pdf (probability distribution function): list
- cdf (cumulative distribution function): list
"""
# Get number of times a degree appeared in the network
degree = get_degree(network)
degree, count = np.unique(degree.values, return_counts=True)
pdf = count / np.sum(count) # Compute pdf
cdf = list(1 - np.cumsum(pdf))[:-1] + [0] # Compute cdf
# Return computed statistics
return degree, count, pdf, cdf
# Define function for plotting degree analysis
def plot_degree_analysis(network):
# Initialize plot
fig, axs = plt.subplots(1, 3, figsize=(12, 5))
_ = axs[0].set_title('Probability Distribution',fontsize=14)
_ = axs[1].set_title('Log-log Probability Distribution',fontsize=14)
_ = axs[2].set_title('Log-log Cumulative Distribution',fontsize=14)
# Create plot fore each network
for i, y in enumerate(network.keys()):
# Compute degree statistics
k, count, pdf, cdf = make_degree_analysis(network[y])
# Make plots
_ = axs[0].plot(k, pdf, 'o', alpha=.7)
_ = axs[1].loglog(k, pdf, 'o', alpha=.7)
_ = axs[2].loglog(k, cdf, 'o', alpha=.7)
# Show plots
_ = [axs[i].legend([str(y) for y in network.keys()], loc='upper right') for i in range(3)]
_ = plt.savefig('images/analysis/degree_distr.png')
_ = plt.show()
# Plot pdf, cdf, log-log, ... of each network
plot_degree_analysis(network)
###Output
_____no_output_____
###Markdown
Scale-free property Power law estimation
###Code
# Estimate power law parameters for each network
# Initialize power law parameters
power_law = {
2017: {'k_sat': 4},
2018: {'k_sat': 7}
}
# Define parameters for each network
for i, y in enumerate(years):
# Get the unique values of degree and their counts
degree = get_degree(network[y])
k, count = np.unique(degree, return_counts=True)
k_sat = power_law[y]['k_sat']
# Define minumum and maximum k (degree)
power_law[y]['k_min'] = k_min = np.min(k)
power_law[y]['k_max'] = k_max = np.max(k)
# Estimate parameters
n = degree[k_sat:].shape[0]
gamma = 1 + n / np.sum(np.log(degree[k_sat:] / k_sat))
c = (gamma - 1) * k_sat ** (gamma - 1)
# Compute cutoff
cutoff = k_sat * n ** (1 / (gamma - 1))
# Store parameters
power_law[y]['gamma'] = gamma
power_law[y]['c'] = c
power_law[y]['cutoff'] = cutoff
# Pront out coefficients
for y in power_law.keys():
# Retrieve parameters
gamma, c, cutoff = power_law[y]['gamma'], power_law[y]['c'], power_law[y]['cutoff']
k_min, k_max = power_law[y]['k_min'], power_law[y]['k_max']
# Print results
out = 'Power law estimated parameters for {:d}\'s network:\n'
out += ' gamma={:.03f}, c={:.03f}, cutoff={:.03f}, min.degree={:d}, max.degree={:d}'
print(out.format(y, gamma, c, cutoff, k_min, k_max))
# Define regression lines values for either 2017 and 2018 distributions
# Define regression lines container
regression_line = {}
# Define maximum degree, for both years together
k_max = np.max([power_law[y]['k_max'] for y in power_law.keys()])
# Compute regression lines
for y in power_law.keys():
# Retrieve parameters gamma and c
gamma = power_law[y]['gamma']
c = power_law[y]['c']
# Compute regression line
regression_line[y] = c * np.arange(1, k_max) ** (1 - gamma) / (gamma - 1)
# Plot results
fig, ax = plt.subplots(figsize=(12, 5))
_ = ax.set_title('Log-log Cumulative Distribution Function',fontsize=15)
# Print every network
for i, y in enumerate(power_law.keys()):
# Retrieve degree analysis values
k, count, pdf, cdf = make_degree_analysis(network[y])
# Print dots
_ = ax.loglog(k, cdf, 'o', alpha=.7, color=colors[i])
# Print regression line
_ = ax.loglog(np.arange(1, k_max), regression_line[y], color=colors[i])
# Make plot
_ = ax.legend(['2017']*2 + ['2018']*2, loc='lower left')
_ = plt.savefig('images/analysis/power_law.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Small-world property Connected components
###Code
# Extract cardinality of connected components and diameter of the giant component for both nets
"""# Initialize components container
connected_components = {}
# Compute giant component for every network
for i, y in enumerate(network.keys()):
# Compute connected component
cc = sorted(nx.connected_components(network[y]), key=len, reverse=True)
# Compute diameter of the giant component
d = nx.diameter(network[y].subgraph(cc[0]))
# Store the tuple (giant component, cardinality, diameter)
connected_components[y] = []
connected_components[y].append({
'component': cc[0],
'size': len(cc[0]),
'diameter': d
})
# Store each component
for component in cc[1:]:
# Add component, without diameter
connected_components[y].append({
'component': component,
'size': len(component)
})
# Save connected components to disk
np.save('data/connected_components.npy', connected_components)"""
# Load connected components from file
connected_components = np.load('data/connected_components.npy', allow_pickle=True).item()
# Show connected components info for each year
for y in years:
# Retrieve connected component
cc = connected_components[y]
# Show giant component info
print('Network {:d}'.format(y))
print('Giant component has cardinality={:d} and diameter={:d}'.format(cc[0]['size'], cc[0]['diameter']))
# Store each component
for j, component in enumerate(cc):
if j == 0: continue
# Show other components
print('Connected component nr {:d} has cardinality={:d}'.format(j + 1, component['size']))
print()
###Output
Network 2017
Giant component has cardinality=1665 and diameter=6
Connected component nr 2 has cardinality=4
Connected component nr 3 has cardinality=2
Connected component nr 4 has cardinality=2
Network 2018
Giant component has cardinality=1861 and diameter=5
Connected component nr 2 has cardinality=2
Connected component nr 3 has cardinality=2
###Markdown
Clustering coefficient
###Code
# Compute and show chlustering coefficients
# Compute clustering coefficients
clust_coef = {y: pd.Series(nx.clustering(network[y], weight='weight')) for y in years}
# Make plot
fig, axs = plt.subplots(1, 2, figsize=(15, 8), sharey=True)
# Loop through each network
for i, y in enumerate(years):
cc = clust_coef[y]
_ = axs[i].set_title('{:d}\'s network'.format(y))
_ = axs[i].plot(cc.index.values, cc.values, 'x', mec=colors[i])
_ = axs[i].grid()
# Show plot
_ = plt.savefig('images/analysis/clust_coeff.png')
_ = plt.show()
giant = {y: connected_components[y][0]['component'] for y in years}
# Compute the average shortest path length for both nets
L = {y: nx.average_shortest_path_length(network[y].subgraph(giant[y]), weight='counts', method='floyd-warshall-numpy') for y in years}
for y in years:
print('Network {:d}'.format(y))
N = len(network[y].nodes)
print('log N: {:.4f}'.format( np.log(N) ))
print('log log N: {:.4f}'.format( np.log( np.log(N) ) ))
print('Average shortest path length: {:.4f}'.format(L[y]))
print('Average clustering coefficient: {:.4f}'.format(np.mean(clust_coef[y])))
print()
###Output
Network 2017
log N: 7.4224
log log N: 2.0045
Average shortest path length: 2.7247
Average clustering coefficient: 0.0201
Network 2018
log N: 7.5310
log log N: 2.0190
Average shortest path length: 2.4799
Average clustering coefficient: 0.0079
###Markdown
Ranking Ranking of words Ranking by degree
###Code
# Define subset (firs n-th)
best = 20
# Make plot
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Plot each network
for i, y in enumerate(years):
degree = get_degree(network[y]).sort_values(ascending=False)
_ = axs[i].set_title('Best nodes in {:d}\'s network'.format(y))
_ = axs[i].bar(degree.index[:best].map(lambda x: str(i2w[x])), degree.values[:best], color=colors[i])
_ = axs[i].tick_params(axis='x', labelrotation=60)
# Show plot
_ = plt.savefig('images/analysis/words_rank_degree.png', bbox_inches='tight')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Ranking by betweenness
###Code
"""# Compute betweenness centrality measure for nodes (on giant components)
betweenness = {}
for y in years:
# Define giant component subgraph
# giant_component = connected_components[y][0]['component']
# subgraph = nx.induced_subgraph(network[y], giant_component)
# Compute betweenness
betweenness[y] = nx.betweenness_centrality(network[y], weight='weight')
# Save betweenness as numpy array
np.save('data/betweenness.npy', betweenness)"""
# Load betweenness
betweenness = np.load('data/betweenness.npy', allow_pickle=True).item()
# Define subset (firs n-th)
best = 20
# Make plot
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
for i, y in enumerate(years):
btw = pd.Series(betweenness[y]).sort_values(ascending=False)
_ = axs[i].set_title('Best nodes in {:d}\'s network'.format(y))
_ = axs[i].bar(btw.index[:best].map(lambda x: str(i2w[x])), btw.values[:best], color=colors[i])
_ = axs[i].tick_params(axis='x', labelrotation=60)
_ = plt.savefig('images/analysis/words_rank_btw.png', bbox_inches='tight')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Ranking of verbs
###Code
# Define verbs dictionary
verbs2i = {w: w2i[w] for w in w2i.keys() if w[1] == 'V'}
###Output
_____no_output_____
###Markdown
Ranking by degree
###Code
# Define subset (firs n-th)
best = 20
# Make plot
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Plot each network
for i, y in enumerate(years):
degree = get_degree( network[y].subgraph(list(set(network[y].nodes()) & set(verbs2i.values()))) ).sort_values(ascending=False)
_ = axs[i].set_title('Best verbs in {:d}\'s network'.format(y))
_ = axs[i].bar(degree.index[:best].map(lambda x: str(i2w[x])), degree.values[:best], color=colors[i])
_ = axs[i].tick_params(axis='x', labelrotation=60)
# Show plot
_ = plt.savefig('images/analysis/verbs_rank_degree.png', bbox_inches='tight')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Ranking by betweenness
###Code
# Compute betweenness centrality measure for nodes (on giant components)
"""betweenness_verbs = {}
for y in years:
# Define giant component subgraph
giant_component = connected_components[y][0]['component']
subgraph = nx.induced_subgraph(network[y], giant_component)
# Compute betweenness
betweenness_verbs[y] = nx.betweenness_centrality(subgraph.subgraph(list(set(network[y].nodes()) & set(verbs2i.values())))
,weight='weight')
# Save betweenness as numpy array
np.save('data/betweenness_verbs.npy', betweenness_verbs)"""
# Load betweenness
betweenness_verbs = np.load('data/betweenness_verbs.npy', allow_pickle=True).item()
# Define subset (firs n-th)
best = 20
# Make plot
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
for i, y in enumerate(years):
btw = pd.Series(betweenness_verbs[y]).sort_values(ascending=False)
_ = axs[i].set_title('Best verbs in {:d}\'s network'.format(y))
_ = axs[i].bar(btw.index[:best].map(lambda x: str(i2w[x])), btw.values[:best], color=colors[i])
_ = axs[i].tick_params(axis='x', labelrotation=60)
_ = plt.savefig('images/analysis/verbs_rank_btw.png', bbox_inches='tight')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Analysis of ranking changes All words
###Code
# Define subset (firs n-th)
best = 100
nodes = set(network[2017].subgraph(connected_components[2017][0]['component']).nodes) & set(network[2018].subgraph(
connected_components[2018][0]['component']).nodes)
# Define percentage of change for btw
rate_btw = { node : (betweenness[2017][node] - betweenness[2018][node]) / (betweenness[2017][node] + betweenness[2018][node])
for node in nodes if betweenness[2017][node] + betweenness[2018][node] != 0 }
# Define percentage of change for degree
degree17 = get_degree(network[2017]).sort_values(ascending=False)
degree18 = get_degree(network[2018]).sort_values(ascending=False)
rate_degree = { node : (degree17[node] - degree18[node]) / (degree17[node] + degree18[node]) for node in nodes }
# Make plot
fig, axs = plt.subplots(2, 1, figsize=(15, 10))
rate_btw = pd.Series(rate_btw).sort_values(ascending=False)
_ = axs[0].set_title('Words with highest percentage of change in betweenness'.format(y))
_ = axs[0].bar(rate_btw.index[:best].map(lambda x: str(i2w[x])), rate_btw.values[:best])
_ = axs[0].tick_params(axis='x', labelrotation=90)
rate_degree = pd.Series(rate_degree).sort_values(ascending=False)
_ = axs[1].set_title('Words with highest percentage of change in degree'.format(y))
_ = axs[1].bar(rate_degree.index[:best].map(lambda x: str(i2w[x])), rate_degree.values[:best])
_ = axs[1].tick_params(axis='x', labelrotation=90)
_ = plt.tight_layout()
_ = plt.show()
btw1 = sum(rate_btw == 1)/len(rate_btw)
btw2 = sum(rate_btw == -1)/len(rate_btw)
btw3 = sum(rate_btw == 0)/len(rate_btw)
deg1 = sum(rate_degree == 1)/len(rate_degree)
deg2 = sum(rate_degree == -1)/len(rate_degree)
deg3 = sum(rate_degree == 0)/len(rate_degree)
# Show node differences
fig, ax = plt.subplots(1,2,figsize=(15, 5), sharey=True)
_ = ax[0].set_title('Significative values for the betweenness change rate')
_ = ax[0].bar(['% words with rate = 1'], [btw1])
_ = ax[0].bar(['% words with rate = -1'], [btw2])
_ = ax[0].bar(['% words with rate = 0'], [btw3])
_ = ax[1].set_title('Significative values for the degree change rate')
_ = ax[1].bar(['% words with rate = 1'], [deg1])
_ = ax[1].bar(['% words with rate = -1'], [deg2])
_ = ax[1].bar(['% words with rate = 0'], [deg3])
_ = plt.tight_layout()
_ = plt.savefig('images/analysis/words_change_rank.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Verbs
###Code
# Define subset (firs n-th)
best = 100
nodes_verbs = set(network[2017].subgraph( set(connected_components[2017][0]['component']) & set(verbs2i.values()) ).nodes) & set(network[2018].subgraph( set(connected_components[2018][0]['component']) & set(verbs2i.values()) ).nodes)
# Define percentage of change for btw
rate_btw_verbs = { node : (betweenness_verbs[2017][node] - betweenness_verbs[2018][node]) / (betweenness_verbs[2017][node] + betweenness_verbs[2018][node])
for node in nodes_verbs if betweenness_verbs[2017][node] + betweenness_verbs[2018][node] != 0 }
# Define percentage pf change for degree
degree17 = get_degree(network[2017]).sort_values(ascending=False)
degree18 = get_degree(network[2018]).sort_values(ascending=False)
rate_degree_verbs = { node : (degree17[node] - degree18[node]) / (degree17[node] + degree18[node]) for node in nodes_verbs }
# Make plot
fig, axs = plt.subplots(2, 1, figsize=(15, 10))
rate_btw_verbs = pd.Series(rate_btw_verbs).sort_values(ascending=False)
_ = axs[0].set_title('Words with highest percentage of change in betweenness'.format(y))
_ = axs[0].bar(rate_btw_verbs.index[:best].map(lambda x: str(i2w[x])), rate_btw_verbs.values[:best])
_ = axs[0].tick_params(axis='x', labelrotation=90)
rate_degree_verbs = pd.Series(rate_degree_verbs).sort_values(ascending=False)
_ = axs[1].set_title('Words with highest percentage of change in degree'.format(y))
_ = axs[1].bar(rate_degree_verbs.index[:best].map(lambda x: str(i2w[x])), rate_degree_verbs.values[:best])
_ = axs[1].tick_params(axis='x', labelrotation=90)
_ = plt.tight_layout()
_ = plt.show()
btw1 = sum(rate_btw_verbs == 1)/len(rate_btw_verbs)
btw2 = sum(rate_btw_verbs == -1)/len(rate_btw_verbs)
btw3 = sum(rate_btw_verbs == 0)/len(rate_btw_verbs)
deg1 = sum(rate_degree_verbs == 1)/len(rate_degree_verbs)
deg2 = sum(rate_degree_verbs == -1)/len(rate_degree_verbs)
deg3 = sum(rate_degree_verbs == 0)/len(rate_degree_verbs)
# Show node differences
fig, ax = plt.subplots(1,2,figsize=(15, 5), sharey=True)
_ = ax[0].set_title('Significative values for the betweenness change rate')
_ = ax[0].bar(['% verbs with rate = 1'], [btw1])
_ = ax[0].bar(['% verbs with rate = -1'], [btw2])
_ = ax[0].bar(['% verbs with rate = 0'], [btw3])
_ = ax[1].set_title('Significative values for the degree change rate')
_ = ax[1].bar(['% verbs with rate = 1'], [deg1])
_ = ax[1].bar(['% verbs with rate = -1'], [deg2])
_ = ax[1].bar(['% verbs with rate = 0'], [deg3])
_ = plt.tight_layout()
_ = plt.savefig('images/analysis/verbs_change_rank.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Selected words
###Code
sel_words = [('young', 'A'), ('harassment', 'N'), # big words that change size
('empower','V'), ('initiative', 'N'), ('discuss','V'), ('education', 'N'), ('dream','N'), ('dignity','N'), #positive 1
('include','V'), ('safe','A'), ('prevent', 'V'), ('security','N'), #positive 2
('work','V'), ('assault','N'), ('flee','V'), ('abuse','N')] #specific
mask = []
for w in sel_words:
if not w2i[w] in nodes:
print(' "{}" word not in both networks'.format(w))
print()
else:
#print(' "{}" word degree change rate: {}'.format(w, rate_degree[w2i[w]]))
print(' "{}" word btw change rate: {}'.format(w, rate_btw[w2i[w]]))
print()
###Output
"('young', 'A')" word btw change rate: 0.5296871007339359
"('harassment', 'N')" word btw change rate: -0.05028998860520895
"('empower', 'V')" word btw change rate: 0.07216030830213684
"('initiative', 'N')" word btw change rate: 0.7715624714437412
"('discuss', 'V')" word btw change rate: -0.12924016901000587
"('education', 'N')" word btw change rate: -0.2700762154848485
"('dream', 'N')" word btw change rate: 0.21523354362535313
"('dignity', 'N')" word btw change rate: 0.046040219761660006
"('include', 'V')" word btw change rate: 0.5071152819268543
"('safe', 'A')" word btw change rate: -0.2819714413634178
"('prevent', 'V')" word btw change rate: -0.21733776733256185
"('security', 'N')" word btw change rate: 0.10384015636067427
"('work', 'V')" word btw change rate: -0.5051204882438888
"('assault', 'N')" word not in both networks
"('flee', 'V')" word btw change rate: 0.22556250492923832
"('abuse', 'N')" word btw change rate: -0.010238881034532703
###Markdown
Difference between sets of nodes
###Code
x17 = len(set(network[2017].nodes) - set(network[2018].nodes))/len(set(network[2017].nodes)) * 100
print('Percentage of words in 2017 but not in 2018: {:d} %'.format(int(x17)))
x18 = len(set(network[2018].nodes) - set(network[2017].nodes)) / len(set(network[2018])) * 100
print('Percentage of words in 2018 but not in 2017: {:d} %'.format(int(x18)))
# Show node differences
fig, ax = plt.subplots(figsize=(7.5, 5))
_ = ax.set_title('Difference between sets of nodes')
_ = ax.bar(['2017 without 2018'], [x17])
_ = ax.bar(['2018 without 2017'], [x18])
_ = plt.savefig('images/analysis/node_sets_difference.png')
_ = plt.show()
###Output
_____no_output_____
###Markdown
Assortativity Degree assortativity
###Code
print('Assortativity coefficient 2017:',nx.degree_assortativity_coefficient( network[2017], weight = 'counts' ))
print('Assortativity coefficient 2018:',nx.degree_assortativity_coefficient( network[2018], weight = 'counts' ))
###Output
Assortativity coefficient 2017: -0.10180140871856291
Assortativity coefficient 2018: -0.1421223782964965
###Markdown
Node assortativity by attribute
###Code
print('Assortativity coefficient 2017:',nx.degree_assortativity_coefficient( network[2017].subgraph(
list(set(network[y].nodes) & set(verbs2i.values()))), weight = 'counts' ))
print('Assortativity coefficient 2018:',nx.degree_assortativity_coefficient( network[2018].subgraph(
list(set(network[y].nodes) & set(verbs2i.values()))), weight = 'counts' ))
###Output
Assortativity coefficient 2017: -0.10998348703149166
Assortativity coefficient 2018: -0.15877767026488898
###Markdown
Segmentation Method ComparisonMetrics: IoU(Intersection over Union), F1 score, false positive rate averaged on video frames
###Code
seg_results = pd.read_csv(os.path.join(os.getcwd(), "seg_comparison.csv"))
seg_results.head()
seg_results.tail()
seg_method = seg_results['seg method'].unique()
table = []
for s in seg_method:
table.append([str(s), np.mean(seg_results.loc[seg_results['seg method'] == s]['IoU']),
np.mean(seg_results.loc[seg_results['seg method'] == s]['F1']),
np.mean(seg_results.loc[seg_results['seg method'] == s]['FP rate'])])
print(tabulate(table, headers=['seg method', 'mIoU','F1','FP rate']))
###Output
seg method mIoU F1 FP rate
------------ -------- -------- ---------
BackFlow 0.896087 0.939715 0.0879889
OSVOS 0.909781 0.950612 0.0719721
###Markdown
OSVOS performs better than BackFlow on three different metrics.Note that: FP rate is relatively more important for reconstruction, because it's worse if the background is involved in reconstructed model.
###Code
sns.set_context({"figure.figsize":(12,10)})
sns.stripplot(x="seg method",y="IoU",data=seg_results,jitter=True,hue="object name", dodge=True)
###Output
_____no_output_____
###Markdown
BackFlow works not so well on 'YcbBanana'. Reconstruction Algorithm ComparisonMetrics: mean usdf(unsigned distance fields) averaged on points in point-cloud, mean and RMS hausdorff distance( calculated bi-directionally)
###Code
recon_results = pd.read_csv(os.path.join(os.getcwd(), "recon_comparison.csv"))
recon_results.head()
recon_results.tail()
###Output
_____no_output_____
###Markdown
'Data Path' 'Data': generated by grasping the object. 'Data_stuck': generated by resetting the object with the gripper and protecting from sliding. 'Reconstruction Method' 'point-to-plane': reconstructed with point-to-plane icp. 'robot-joints': reconstructed with robot end effector positions and orientations, e.g. center of two prismatic fingers for franka General performance of point-to-plane icp and robot-joints
###Code
def evaluate(data, type= 'Reconstruction Method'):
method = recon_results[type].unique()
table = []
for m in method:
table.append([str(m), np.mean(data.loc[data[type] == m]['mean usdf']),
np.mean(data.loc[data[type] == m]['mean haus dist']),
np.mean(data.loc[data[type] == m]['RMS haus dist'])])
print(tabulate(table, headers=[type, 'mean usdf','mean haus dist','RMS haus dist']))
print("general performance")
evaluate(recon_results)
###Output
general performance
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.327332 0.00190181 0.00286709
robot-joints 0.00247028 0.000988605 0.00151556
###Markdown
Performance on 'Data' or 'Data_stuck'
###Code
print("Performance on 'Data'")
evaluate(recon_results.loc[recon_results['Data Path'] == 'Data'])
print("Performance on 'Data_stuck'")
evaluate(recon_results.loc[recon_results['Data Path'] == 'Data_stuck'])
sns.set_context({"figure.figsize":(12,10)})
sns.stripplot(x="Reconstruction Method",y="mean usdf",data=recon_results,jitter=True,hue="Object Name", dodge=True)
###Output
_____no_output_____
###Markdown
Point-to-plane work extremely bad on YcbTennisBall. So the general performance of point-to-plane looks much worse than robot-joints.\If YcbTennisBall is kicked out:
###Code
recon_results_drop = recon_results[recon_results['Object Name']!='YcbTennisBall']
sns.set_context({"figure.figsize":(12,10)})
sns.stripplot(x="Reconstruction Method",y="mean usdf",data=recon_results_drop,jitter=True,hue="Object Name", dodge=True)
print("general")
evaluate(recon_results_drop)
print("\n'Data'")
evaluate(recon_results_drop.loc[recon_results_drop['Data Path'] == 'Data'])
print("\n'Data_stuck'")
evaluate(recon_results_drop.loc[recon_results_drop['Data Path'] == 'Data_stuck'])
###Output
general
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.0109608 0.00183893 0.00267426
robot-joints 0.00253711 0.00103155 0.00160034
'Data'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.0192207 0.00281954 0.0039368
robot-joints 0.00380814 0.0015232 0.00226799
'Data_stuck'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00270094 0.00085832 0.00141172
robot-joints 0.00126607 0.000539897 0.00093268
###Markdown
drop YcbBanana
###Code
recon_results_drop2 = recon_results[recon_results['Object Name']!='YcbTennisBall'][recon_results['Object Name']!='YcbBanana']
sns.set_context({"figure.figsize":(12,10)})
sns.stripplot(x="Reconstruction Method",y="mean usdf",data=recon_results_drop2,jitter=True,hue="Object Name", dodge=True)
print("general")
evaluate(recon_results_drop2)
print("\n'Data'")
evaluate(recon_results_drop2.loc[recon_results_drop2['Data Path'] == 'Data'])
print("\n'Data_stuck'")
evaluate(recon_results_drop2.loc[recon_results_drop2['Data Path'] == 'Data_stuck'])
###Output
general
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00258523 0.000907076 0.00145407
robot-joints 0.00229762 0.00100571 0.00158859
'Data'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00243453 0.000909623 0.0014017
robot-joints 0.00336379 0.00145612 0.0021993
'Data_stuck'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00273594 0.000904529 0.00150643
robot-joints 0.00123146 0.000555293 0.000977875
###Markdown
Brief conclusion:\If reconstruction methods are performed on the object with significant features, point-to-plane ICP and robot-ee-info have the similar reconstruction performances generally.\In detail, point-to-plane works better than robot-ee-info on the dataset generated by directly grasping('Data'), and it's opposite for the dataset generated by resetting the object within the gripper('Data_stuck'). Segmented by BackFlow or by OSVOS
###Code
print("all objects")
evaluate(recon_results, type= 'Segmentation Method')
print("\ndrop tennis ball")
evaluate(recon_results_drop, type= 'Segmentation Method')
print("\ndrop banana")
evaluate(recon_results_drop2, type= 'Segmentation Method')
###Output
all objects
Segmentation Method mean usdf mean haus dist RMS haus dist
--------------------- ----------- ---------------- ---------------
BackFlow 0.326978 0.00175713 0.00256824
OSVOS 0.0028245 0.00113329 0.00181441
drop tennis ball
Segmentation Method mean usdf mean haus dist RMS haus dist
--------------------- ----------- ---------------- ---------------
BackFlow 0.0112672 0.00181425 0.00261059
OSVOS 0.00223075 0.00105622 0.00166401
drop banana
Segmentation Method mean usdf mean haus dist RMS haus dist
--------------------- ----------- ---------------- ---------------
BackFlow 0.0026617 0.000823804 0.00130877
OSVOS 0.00222116 0.00108898 0.00173389
###Markdown
Brief conclusion:\Segmentation Method influences the reconstruction performance. Generally, reconstruction following BackFlow has much worse performance than that following OSVOS. BackFlow
###Code
recon_results_backflow = recon_results_drop2[recon_results_drop2['Segmentation Method'] == 'BackFlow']
sns.set_context({"figure.figsize":(12,8)})
sns.stripplot(x="Reconstruction Method",y="mean usdf",data=recon_results_backflow,jitter=True,hue="Object Name", dodge=True)
print("general")
evaluate(recon_results_backflow)
print("\n'Data'")
evaluate(recon_results_backflow.loc[recon_results_backflow['Data Path'] == 'Data'])
print("\n'Data_stuck'")
evaluate(recon_results_backflow.loc[recon_results_backflow['Data Path'] == 'Data_stuck'])
###Output
general
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00253778 0.000649172 0.00103541
robot-joints 0.00278561 0.000998437 0.00158212
'Data'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.0028079 0.000701502 0.00108705
robot-joints 0.0039568 0.00144841 0.0021999
'Data_stuck'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00226766 0.000596841 0.000983779
robot-joints 0.00161442 0.000548466 0.000964341
###Markdown
OSVOS
###Code
recon_results_osvos = recon_results_drop2[recon_results_drop2['Segmentation Method'] == 'OSVOS']
sns.set_context({"figure.figsize":(12,8)})
sns.stripplot(x="Reconstruction Method",y="mean usdf",data=recon_results_osvos,jitter=True,hue="Object Name", dodge=True)
print("general")
evaluate(recon_results_osvos)
print("\n'Data'")
evaluate(recon_results_osvos.loc[recon_results_osvos['Data Path'] == 'Data'])
print("\n'Data_stuck'")
evaluate(recon_results_osvos.loc[recon_results_osvos['Data Path'] == 'Data_stuck'])
###Output
general
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00263268 0.00116498 0.00187272
robot-joints 0.00180964 0.00101298 0.00159506
'Data'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00206115 0.00111774 0.00171636
robot-joints 0.00277078 0.00146383 0.0021987
'Data_stuck'
Reconstruction Method mean usdf mean haus dist RMS haus dist
----------------------- ----------- ---------------- ---------------
point-to-plane 0.00320421 0.00121222 0.00202908
robot-joints 0.000848496 0.00056212 0.000991409
###Markdown
全国書誌データを分析してみよう!!国立国会図書館では全国書誌データと呼ばれる書誌データを作成し、誰でも利用可能な形で提供しています。全国書誌データは以下のような特色があります。>全国書誌データは、国立国会図書館が網羅的に収集した国内出版物の標準的な書誌情報です。 >書店で一般に購入できる書籍などの納入率は、95%以上です。 >官庁出版物や地方自治体出版物など一般に流通しにくいものも多く含みます。 >刊行された出版物が国立国会図書館に届いてから、おおむね4日後に新着書誌情報として提供し、1か月程度で完成した書誌情報を提供しています。 [全国書誌データ提供](https://www.ndl.go.jp/jp/data/data_service/jnb/index.html)全国書誌データの利用は**無償**で、かつ**申請等も不要**です。データの取得方法としては2019年6月現在、以下の4種類が提供されています。(※[全国書誌データ提供サービス一覧](https://www.ndl.go.jp/jp/data/data_service/jnb/faq.html)も参照のこと。)* 検索用API図書館システムの検索画面等から、国立国会図書館サーチの書誌データを検索し、その結果を取得・表示することができます。 [検索用API](https://www.ndl.go.jp/jp/data/data_service/jnb/ndl_search.htmliss01) [APIのご利用について](https://iss.ndl.go.jp/information/api/)* ハーベスト用API国立国会図書館サーチからOAI-PMHにより書誌データを取得できます。全件収集等、大量のデータをまとめて取得することができます。 [ハーベスト用API](https://www.ndl.go.jp/jp/data/data_service/jnb/ndl_search.htmliss02) [国立国会図書館サーチが提供するOAI-PMH](https://iss.ndl.go.jp/information/api/api-lists/oai-pmh_info/) * RSS新着書誌情報、全国書誌及び全国書誌(電子書籍・電子雑誌編)を、国立国会図書館サーチの機能を用いてRSS形式(RSS2.0)で提供しています。 [国立国会図書館サーチが提供するRSS](https://iss.ndl.go.jp/information/api/api-lists/rss_info/2)* TSVファイル全国書誌(電子書籍・電子雑誌編)をTSVファイル(タブ区切り形式のテキストファイル)で提供しています。 [全国書誌(電子書籍・電子雑誌編)TSVファイル一覧](https://www.ndl.go.jp/jp/data/data_service/jnb/ebej_tsv.html)今回はこの全国書誌を利用して、1. ハーベスト用APIを利用した全件取得 2. 取得したデータの整形とクレンジング3. 結果の可視化 4. 応用編(グラフをアニメーションにする) を行っていきます。 基本的には上から順番にctrl+Enterを押していけば実行できます。
###Code
#必要なライブラリ群のインストール
!pip install pycurl tqdm datetime pandas matplotlib seaborn
###Output
_____no_output_____
###Markdown
1. ハーベスト用APIを利用した全件取得適当なハーベスタを用意して必要な断面を全件収集してみましょう。 pythonで簡単なハーベスタを書いておきましたので、自己責任でご利用ください。
###Code
#(ハーベスタ)rdf/xmlを扱うための前準備
import os
import xml.etree.ElementTree as ET
import pycurl
import time
import codecs
from io import StringIO,BytesIO
#XMLの名前空間
OAI='{http://www.openarchives.org/OAI/2.0/}'
dc ='{http://purl.org/dc/elements/1.1/}'
dcndl='{http://ndl.go.jp/dcndl/terms/}'
dcterms='{http://purl.org/dc/terms/}'
rdf='{http://www.w3.org/1999/02/22-rdf-syntax-ns#}'
rdfs='{http://www.w3.org/2000/01/rdf-schema#}'
foaf='{http://xmlns.com/foaf/0.1/}'
#ElementTree(xmlを解析するライブラリ)にも名前空間を登録
ET.register_namespace('',"http://www.openarchives.org/OAI/2.0/")
ET.register_namespace('rdf', "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
ET.register_namespace('rdfs', "http://www.w3.org/2000/01/rdf-schema#")
ET.register_namespace('dc',"http://purl.org/dc/elements/1.1/")
ET.register_namespace('dcterms',"http://purl.org/dc/terms/")
ET.register_namespace('dcndl',"http://ndl.go.jp/dcndl/terms/")
ET.register_namespace('xsi',"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace('schemaLocation',"http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd")
ET.register_namespace('oai_dc',"http://www.openarchives.org/OAI/2.0/oai_dc/")
ET.register_namespace('foaf',"http://xmlns.com/foaf/0.1/")
ET.register_namespace('owl',"http://www.w3.org/2002/07/owl#")
#ハーベスタ本体
from datetime import datetime, date, timedelta
from tqdm import tqdm
import time
import os
import xml.etree.ElementTree as ET
import pycurl
import time
import codecs
from io import StringIO,BytesIO
import urllib.request
class OAI_harvester:
def __init__(self, outputxmlpath="xml_all.xml", prefixname="dcndl"):
self.outputxml = outputxmlpath
self.prefixname = prefixname
self.resumptiontoken = None
self.datasize = None
with open(self.outputxml, 'wb') as f:
print("initialize file")
def _parse_xml_ndl(self):
tree = ET.parse('oaitmp.xml')
root = tree.getroot()
with codecs.open(self.outputxml, 'a', "utf-8") as f:
es_item = root.find(OAI + 'ListRecords').findall(OAI + 'record')
for item in es_item: # OAI-PMHは「id <xml>」のようになっているので不要なid部分を消す
if item.find(OAI + 'metadata') is None:
continue
item2 = item.find(OAI + 'metadata').find(rdf + 'RDF')
item_id = item.find(OAI + 'metadata').find(rdf + 'RDF').find(dcndl + "BibAdminResource").attrib[
rdf + "about"].split("/")[-1]
item_str = ET.tostring(item2, encoding='utf8', method='xml').decode()
item_str = item_str.replace("\n", "")
f.write(item_str + "\n")
if root.find(OAI + 'ListRecords') is None:
self.resumptiontoken = None
return
token = root.find(OAI + 'ListRecords').find(OAI + "resumptionToken")
if token is None or token.text is None:
self.resumptiontoken = None
return
self.datasize = token.attrib["completeListSize"]
self.resumptiontoken = token.text
def _download_xml(self, fromdate):
# b = io.BytesIO()
with open("oaitmp.xml", 'wb') as f:
url = "http://iss.ndl.go.jp/api/oaipmh?verb=ListRecords"
if self.resumptiontoken is not None:
url += "&resumptionToken=" + self.resumptiontoken
else:
url += "&from=" + fromdate + "&metadataPrefix=" + self.prefixname + "&set=" + self.setname
print(url)
# print(url)
try:
data = urllib.request.urlopen(url)
f.write(data.read())
http_code = data.getcode()
if http_code == 200:
retval = True
else:
retval = False
except Exception as e:
print(str(e))
retval = False
return retval
def getxml(self, setname, fromdate=None):
self.setname = setname
self.resumptiontoken = None
if fromdate is None:
# 最初の200件は条件を指定して取得する。fromとuntilで年度の期間を取得できるが、最大1年分
today = datetime.today()
fromdate = datetime.strftime(today - timedelta(days=364), '%Y-%m-%d')
self._download_xml(fromdate)
self._parse_xml_ndl()
print(self.datasize + "件見つかりました。200件ずつ取得します")
if self.resumptiontoken is not None:
self._parse_xml_ndl()
for index in tqdm(range(int(self.datasize) // 200 + 1)):
while not self._download_xml(fromdate):
print("retry")
time.sleep(1)
# print("downloading file_count:",index)
self._parse_xml_ndl()
if self.resumptiontoken is None:
break
else:
print("エラーです。set名を確認してください")
###Output
_____no_output_____
###Markdown
例えば「小説・物語」(日本十進分類法で913)に分類される全国書誌(iss-ndl-opac-national)の書誌データは以下のようにして全件取得できます。 **注意**: 「小説・物語」の場合、実行に2時間程度かかります。 取得済の断面をhttp://lab.ndl.go.jp/dataset/xml_913.zipからダウンロードできるようにしてあります。
###Code
oai=OAI_harvester(outputxmlpath="xml_913.xml")
#実行時に下のコメントを外してください(誤操作防止)
#oai.getxml(setname="iss-ndl-opac-national:913",fromdate="2018-06-19")
###Output
_____no_output_____
###Markdown
2. 取得したデータの整形とクレンジング1で取得したデータは1行に1書誌のxmlが収まった形式をしています。 まずは書誌がどんなデータ構造をしているのか覗いてみましょう。
###Code
from xml.dom import minidom
with codecs.open("xml_novel.xml", "r","utf-8") as f:
xmlsample=f.readline()
xmlstr = minidom.parseString(xmlsample).toprettyxml(indent=" ")
print(xmlstr)
###Output
_____no_output_____
###Markdown
書誌データの中身を見ると、タイトルや著者などのほか、「出版社」や「出版年」といった情報がわかります。 今回は出版社名に注目して、 **「小説・物語の分野で多くの本を出版しているのはどの出版社なのか、また出版年代ごとに変化はあるのか」** 調べてみましょう。 上で表示した書誌データを見る限り、 出版社は``` ```を使うとよさそうです。 出版年は``````を使ってみましょう。 また、XMLのままではデータの取り回しが不便なので、抽出したデータはpandasのデータフレームで管理します。
###Code
#書誌データから出版社名と出版年だけ取り出してデータフレームに加工する
import pandas as pd
with codecs.open("xml_novel.xml", "r","utf-8") as f:
xmlsample=f.readline()
publisherList=[]
dateList=[]
cnt=0
while xmlsample:
cnt+=1
#if cnt%10000==0:
# print(cnt)
tree = ET.fromstring(xmlsample)
#print(tree)
#root = tree.getroot()
publisher=tree.find(dcndl+'BibResource').find(dcterms+'publisher')
publishername=publisher.find(foaf+'Agent').find(foaf+'name')
publishdate=tree.find(dcndl+'BibResource').find(dcterms+'date')
#cleandate=publishdate.text.replace(".*([0-9\.]+).*",r"\1",regex=True)
#print(publishername.text,cleandate)
#tmp_se = pd.Series( [ publishername.text, publishdate.text], index=analysis_df.columns )
#analysis_df = analysis_df.append( tmp_se, ignore_index=True )
publisherList.append(publishername.text)
dateList.append(publishdate.text)
xmlsample=f.readline()
analysis_df = pd.DataFrame({'publisher':publisherList,'date':dateList})
print(analysis_df)
###Output
_____no_output_____
###Markdown
このままでは出版年の中に「制作」や「19--」や\[2011\]のような変則的な表記が含まれてしまい、数値としての大小がわかりません。 出版年が西暦4桁の数値だけを抽出して持つようにデータをきれいにしましょう(このような処理を「クレンジング」と呼びます)。
###Code
#データのクレンジングをする
cleandf=analysis_df.copy()
#西暦4桁が含まれていれば抽出、含まれていなければ欠損値とする
cleandf['date']=cleandf['date'].str.extract('([0-9]{4})')
#欠損値を含む書誌を削除
cleandf=cleandf.dropna(how='any')
#残った書誌データの出版年を数値にする
cleandf['date']=cleandf['date'].astype("int")
print("書誌データの出版年の分布")
print(cleandf.describe())
print("\nきれいになった書誌データ")
print(cleandf.head())
#csvとして書き出す
#cleandf.to_csv("clean_novel.csv")
###Output
_____no_output_____
###Markdown
3. 結果の可視化データをきれいにしたので、いよいよ可視化をしてみましょう。出版年を追った時の出版数の推移を折れ線グラフで表してみます。
###Code
#ここから始めたい人
#cleandf=pd.read_csv("clean_novel.csv")
#出版年ごとに集計してグラフにしてみる
df = pd.DataFrame(cleandf.groupby('date').count())
df.columns=["count"]
print(df["count"].sum())
#多い順ベスト10
print(df.nlargest(10, columns='count'))
df.plot.line(title=u'小説・物語の出版数年次推移')
#日本語文字化け対策
plt.rcParams['font.family'] = 'Yu Mincho'
#出版社ごとに集計して多い順に表にしてみる
grp_df=cleandf.groupby('publisher').count()
grp_df.columns=["count"]
print(grp_df.nlargest(20, columns='count'))
grp_df.nlargest(20, columns='count').plot.bar(alpha=0.6, figsize=(15,8))
plt.title(u'小説・物語の出版数ランキング', size=16)
###Output
_____no_output_____
###Markdown
特定の出版年に絞り込んだランキングも出力可能です。1990年のランキングを見てみましょう。
###Code
year=2000
#year年に出版された書籍を出版社ごとに集計して多い順に表にしてみる
grp_df=cleandf[cleandf['date']==year].groupby('publisher').count()
grp_df.columns=["count"]
#トップ20
print(grp_df.nlargest(20, columns='count'))
grp_df.nlargest(20, columns='count').plot.bar(alpha=0.6, figsize=(8,8))
plt.title(u'小説・物語の出版数ランキング(%d)'% year, size=16)
###Output
_____no_output_____
###Markdown
4. 応用編(グラフをアニメーションにする) 最後に、グラフをアニメーションにしてみましょう。2000年以降、出版数でトップ10に入ったことのある出版社の出版数推移をアニメーションにしてみます。
###Code
%matplotlib nbagg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
publisherlist=[]
for year in range(2000,2018):
grp_df=cleandf[cleandf['date']==year].groupby('publisher').count()
grp_df.columns=["count"]
x=grp_df.nlargest(20, columns='count')
publisherlist.extend(list(x.index))
#重複を取り除く
publisherlist=list(set(publisherlist))
print(publisherlist)
#描画の準備
fig,ax= plt.subplots(figsize=(12, 10))
ims = []
for year in range(2000,2019):
#ttl = plt.text(0.5, 1.01, year, horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes)
#txt = plt.text(year,year,year)
grp_df=cleandf[cleandf["date"]==year].groupby("publisher").count().reset_index()
grp_df.columns=["publisher","count"]
grp_df=grp_df[grp_df["publisher"].isin(publisherlist)]
#x=grp_df.nlargest(20, columns='count')
#print(x["count"])
im = plt.barh(list(grp_df["publisher"]),grp_df["count"].values)
#ax.text(.8,.8, "{}".format(year), transform=ax.transAxes)
plt.title(u'小説・物語の出版数推移(2000年から2018年)', size=16)
ims.append(im)
ani = animation.ArtistAnimation(fig, ims, blit=False,interval=500)
ani.save('出版推移.gif',writer='pillow')
plt.show()
###Output
_____no_output_____
###Markdown
Analysis of questions asked by school students OverviewThis is a report on the analysis of science questions asked by students of Telangana Social WelfareResidential schools, of classes VII to IX to the outreach volunteers of TIFR. The dataset contains a sample of 100 questions picked for this analysis. Analysis A good way to discover patterns in textual data is to classify the data and analyse the classes to find hidden trends. In the given dataset, given that the data contains questions asked by students, my basic intuition was to classify the data acording to the subject or field of science from which the question was asked. For example, the question 'How many stars are in the sky?' can be classified as an Astronomy question, 'Which is the biggest animal in Ocean?' is most certainly a Biology question, so on and so forth. After careful analysis and reading through the entire datatset multiple times to get a general feel of the distribuion of questions, I discovered another underlying criteria for classification - some questions were being asked to fill in gaps in the knowledge of students, for example 'Where do petrol and diesel come from?' or 'Which is the coldest place?'. I categorized these questions as 'Comprehension' type quesions, since they have a single factual answer from the science curriculum being taught to these students. But a good chunk of questions in the dataset were not Comprehension type, but were more exploratory in nature, questions that were clearly rooted in curiosity and application of existing knowledge. I categorized these questions as 'Curiosity' type. This classification criteria could provide a high level view of the scientific temperament and understanding among students. It could also help guage the effectiveness of the current curriculum and teaching methodology being used at these schools. For example, if majority of questions asked by students after a session or class are 'Comprehension' type questions, it would be safe to say that we need to improve or even rethink the teaching strategy or the course content. It could also help point out specific areas of the course that might need improvement - for example, if a lot of Comprehension type questions are from the Biology section, it could indicate that the course material might need tweaking or even that the instructor for that particular session could improve his/her method. An attached pdf document titled 'categorized_questions.pdf' contains the entire dataset classified into Comprehension and Curiosity type questions, as well as into fields of science they belong to. Another file, titled data.tsv contains the same labelled data in a format that's easily readable by machine learning libraries such as pandas, which makes it easy to work with datasets. We can load up this file to get some insight into our newly classified data.
###Code
# imorting libraries to handle the dataset
import pandas as pd
import numpy as np
# importing the dataset
dataset = pd.read_csv('data.tsv', delimiter='\t')
###Output
_____no_output_____
###Markdown
We can now look at a preview of this classified data:
###Code
dataset.head(10)
###Output
_____no_output_____
###Markdown
We can calculate the distribution of labels we assigned to the questions using our classification criterias:
###Code
category_labels = ['Comprehension', 'Curiosity']
category_label_count = []
for label in category_labels:
category_label_count.append(dataset['Category'].tolist().count(label))
category_label_count
###Output
_____no_output_____
###Markdown
We can see that the **majority of questions (63%) are 'Curiosity' type** questions while the remaining (37%) are 'Comprehension' type. This indicates towards a general scietific temperament and curiosity among the students. Training a classifierSince the given data is a subset of an extensive dataset containing close to 40K questions asked by students, it is a good idea to train a classfier on a labelled sample data to learn the trends in data and use it to classify the entire dataset, instead of categorizing such a massive dataset by hand, which is inefficent and prone to errors. A common Natural Language Processing algorithm used in classification problems such as these is the 'Bag of Words' method, which breaks down each data point into a set of words that represents it. We then train a classifier to understand the corelations between a set of words and their label, which will enable the classfier to classify any unlabelled data. This strategy of NLP is known as Sentiment Analysis, and is frequently used to classfiy texts such as restaurant or movie reviews into positive or negative reviews. For example, reviews containing the keywords 'bad', 'terrible', 'poor' etc would indicate a negative review, while reviews containing the keywords 'great', 'excellent' etc indicates a positive review. Although it might seem like it, but this classification problem cannot be solved using the sentiment analysis method. It is possible for a human with an acceptable level of scientific knowledge and understanding to identify 'Curiosity' type questions among school students, because of the context he/she has. For example, to classify the question 'Do aliens exist?' as a 'Curiosity' type question, the classifier, human or machine, requires the context about the findings and limitaions of the human knowledge of Astronomy. We know intuitively that this is a curiosity driven question since we have not found any evidence of alien life so far and it is a question that has been asked through the centuries by many brilliant scientists. It is not practical to train a classfier that has such context about all branches of science. Also, it is not useful to try and use historical data to identify the Curiosity type questions, as in the example of the question on extraterrestrial life since science keeps evolving and moving forward, and the very nature of scientific curiosity makes it impossible to predict the direction it will take. Therefore, as a demonstration of how a classifier might be used to process the sample data, I will create and train a Naive-Bayes Classifier to categorise the questions in the sample into topics or branches of scientific study they belong to.
###Code
# since the dataset is already imported we will proceed to clean the text
# import the libraries to clean the text
import re
import nltk
# two common and powerful methods to clean the data are
# removing stopwords like 'the', 'a' etc
# and stemming, which converts words like 'rained', 'raining' etc
# to their root 'rain'
# import the packages that will do this efficiently
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# save the text into a corpus of cleaned data
corpus = []
# iterate through all questions and apply text
# cleaning operations on each one of them
print('An example of how the cleaning process works!\n')
for i in range(0, 100):
# replace all symbols and special characters with spaces
# since we only want to process words/text
question = re.sub('[^a-zA-Z]', ' ', dataset['Question'][i])
if i == 0:
print('Replace all symbols and special characters with spaces since we only want to process words/text:')
print("-"*50)
print(question)
# convert all text to lowercase
question = question.lower()
if i == 0:
print('\nConvert all text to lowercase:')
print("-"*50)
print(question)
# split the text into words to apply stemming to each word
question = question.split()
# import the stemming package
ps = PorterStemmer()
cleaned_question = []
# iterate through all the words in the question
for word in question:
# if word is not an english stopword, save the stemmed
# version of word to cleaned_question
if not word in set(stopwords.words('english')):
cleaned_question.append(ps.stem(word))
if i == 0:
print('\nRemove stop words and apply stemming:')
print("-"*50)
[print(i, end=" ") for i in cleaned_question]
# append the cleaned text for the question to corpus
corpus.append(cleaned_question)
# create the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=1500, tokenizer=lambda doc: doc, lowercase=False)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 2].values
# splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# fitting classifier to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# predicting the Test set results
y_pred = classifier.predict(X_test)
# calculating the accuracy score of the classifier
from sklearn.metrics import accuracy_score
score = accuracy_score(y_test, y_pred)
score
###Output
_____no_output_____
###Markdown
I picked the Naive-Bayes classification algorithm to build the classifier because it is the most widely used classificatin algorithm for sentiment analysis and is better suited to work with when the sample size is small, as opposed to the Decision-Tree classifier that tends to overfit to the data and has poor performance with small samples.The classifier did not do a good job of classifying our test data, as indicated by an accuracy score of 50%. There could be a number of reasons for the poor performance of our algorithm, most relevant of which is the tiny size of our training set. A sample size of 100 is very small when working with data such as science questions that can have a very high order of variation. With a bigger sample of labelled data, we might be able to achieve a better accuracy score. A bigger sample size will enable the classifier to better understand the corelations between the keywords in a question and it's label. For example, our sample did not have many instances of questions labelled 'Geology', therefore the classifier will have lower accuracy when trying to classify 'Geology' type questions. A larger dataset could correct this to some degree. To test the performance of our classifier on completely new data, I've created a test dataset from the student questions repository and labelled them by hand, just to have a value to measure the accuracy of classifier against. The classifier is already trained on the previous data and has no knowledge of the labels on the new data. It will read the questions in the new test dataset and try to predict the field of science the question belongs to.
###Code
# importing the test dataset
test_dataset = pd.read_csv('test_data.tsv', delimiter='\t')
# perform text pre-processing
test_corpus = []
for i in range(0, 130):
question = re.sub('[^a-zA-Z]', ' ', test_dataset['Question'][i])
question = question.lower()
question = question.split()
ps = PorterStemmer()
cleaned_question = []
for word in question:
if not word in set(stopwords.words('english')):
cleaned_question.append(ps.stem(word))
test_corpus.append(cleaned_question)
# create a Bag of Words model for the test questions
test_questions = cv.transform(test_corpus).toarray()
# making the predictions using the classifier
predicted_labels = classifier.predict(test_questions)
# store the predicted labels for easier analysis
with open('predictions.tsv', 'w') as file:
file.write("Questions\tLabel (Manual)\tLabel (Classifier)\n")
for i in range(0, 130):
file.write(test_dataset['Question'][i] + "\t" + test_dataset['Field'][i] + "\t" + predicted_labels[i] + "\n")
# display the predictions for analysis and verification
predictions = pd.read_csv('predictions.tsv', delimiter='\t')
predictions.head(10)
###Output
_____no_output_____
###Markdown
wow that is a hideous, useless plot. Looks like women finish a predictable amount worse than men every year? I wonder how, across all years, age groups do. (That one might benefit from a gender split, more than the above) Also TODO: a map of the states & countries of Boston Marathon participants alltimes agegroups = range(15,90,5) agebins = pd.cut(alltimes['age'], agegroups, labels=['{}-{}'.format(age,age+5) for age in agegroups][:-1]) f, ax1 = plt.subplots(1) ax1.set_title("Boston Marathon times 2001-2014 by age group") seaborn.violinplot(pd.Series(alltimes.loc[:, "official"], name="time in minutes"), groupby=[alltimes.gender, agebins], ax=ax1) g = alltimes.groupby([agebins, alltimes.gender]) g.head()
###Code
years = []
for year in range(2001, 2015):
y = pd.read_csv("results/{}/results.csv".format(year), na_values="-")[["state"]]
years.append(y)
states = pd.concat(years, ignore_index=True).dropna()
g = states.groupby("state") #.aggregate(len)
h = g.count()
import json
json.dumps(h.to_dict()['state'])
dict(sorted(h.to_dict().iteritems()))
years = []
for year in range(2001, 2015):
y = pd.read_csv("results/{}/results.csv".format(year), na_values="-")[["country"]]
years.append(y)
states = pd.concat(years, ignore_index=True).dropna()
g = states.groupby("country") #.aggregate(len)
h = g.count()
json.dumps(h.to_dict()['country'])
###Output
_____no_output_____
###Markdown
Textual Analysis of Modern British Philosophers- This is my course project for Digital Humanities course (Fall 17) at University of Georgia with Dr.William Kretzschmar.- Should be compatible with both Python 2 and 3- Parts of the codes in this notebook are benefited from this notebook https://github.com/brandomr/document_cluster/blob/master/cluster_analysis_web.ipynb- Tasks include: * keyword plotting and analysis; * Comparison of similarities of books based on TF-IDF (using scikit-learn); * Unsupervised classfication of books; * Prediction of the category of a book based on the aforementioned unsupervised classfication results * LDA analysis; * Sentimental analysis (using textblob)
###Code
from __future__ import print_function
try:
import builtins
except ImportError:
import __builtin__ as builtins
from __builtin__ import str
from __future__ import unicode_literals
from sklearn import feature_extraction #For extracting features
import numpy as np #For basic scientific and numerical calculations
import nltk #Natural Language ToolKit
import pandas as pd #For dataframe processing
import re #For regular expression
import matplotlib.pyplot as plt #For plotting
# %matplotlib inline
from sklearn.metrics.pairwise import cosine_similarity
from collections import defaultdict
import os # for os.path.basename
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.feature_extraction import DictVectorizer
from gensim import corpora, models, similarities
from nltk.tag import pos_tag
from sklearn.cluster import KMeans
from sklearn.manifold import MDS
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from scipy.cluster.hierarchy import ward, dendrogram
from sklearn.feature_extraction.text import TfidfTransformer
from textblob import TextBlob
from nltk.corpus import stopwords
# Some Global Variables
AUTHOR_FILES = ['Bentham.txt', 'Berkeley.txt','Hobbes.txt','Hume.txt','Locke.txt', 'Mill.txt', 'Sidgwick.txt']
# each txt file contains all searchable works from a single philosopher
NUM_WORDS = 80 # show how many highly frequent words in the plots
MORE_SW = False # whether we want more stop words
BOOK_LIST = ['hobbes-leviathan', 'hobbes-liberty', 'hobbes-elements', 'hobbes-law', 'mill-liberty', 'mill-util','locke-understanding',
'locke-treatise', 'hume-treatise', 'hume-morals', 'hume-enquiry', 'berkeley-TOK','berkeley-TD',
'bentham-POM', 'bentham-FOG', 'mill-representative', #'burke-reflections','conway-nature','mill-comte','more-utopia',
'reid-mind', 'hume-religion'] # this is the booklist we will analyse. Must be in the same folder
TEST_FILES = ['sidgwick.txt','machiavelli.txt','more-utopia','burke-reflections','smith-sentiments','smith-wealth',
'fedPapers', 'mill-logic', 'kant-CPR', 'russell-AOM', 'russell-external', 'russell-ideals',
'russell-mysticism', 'russell-POP', 'spinoza-ethica', 'spinoza-understanding','Shi-PC', 'Shi-equality',
'Shi-AM', 'Shi-MP']
NUM_CLUSTERS = 6 # how many clusters we want to categorize when we process different individual books.
SENTIMENT_LIST = []
#Adding more stopwords. Providing the option of an aggressive word list.
# nltk.download('stopwords') #Not necessary if you have done it once
stop_words = list(set(stopwords.words('english')))
stop_words.append('\'s')#manually add 's into the stop word list (because it's annoying!) We may add more similar ones.
if MORE_SW: #if we want to add more stop words and render a more aggressive stopword list
with open('stopwords', 'r') as myfile:
sw = [i.strip().split(' ') for i in myfile]
sw1 = [val.lower() for sublist in sw for val in sublist]
stop_words.extend(sw1)
stop_words = set(stop_words)
def tokenize(text):
'''
Tokenize the words in a texts. If we need tokenize and stemming, we can
comment this function and uncomment the function below.
'''
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
for token in tokens:
if re.search('[a-zA-Z]', token): #only search English words and put them into tokens
filtered_tokens.append(token.lower())
return (filtered_tokens)
# from nltk.stem.snowball import SnowballStemmer
# nltk.download('punkt')
# stemmer = SnowballStemmer("english")
# def tokenize(text):
# tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
# filtered_tokens = []
# for token in tokens:
# if re.search('[a-zA-Z]', token): #only search English words and put them into tokens
# if token.lower() not in stop_words:
# filtered_tokens.append(token.lower())
# stems = [stemmer.stem(t) for t in filtered_tokens]
# return stems # it turns out that stemming may not be a good choice...
def word_count(text):
'''
Count how many words in an author's work
'''
chunk_dict = {}
for i in text:
i = i.encode('utf-8', 'ignore').lower()
# i = str(i).lower() # we only need lower-case of an item in the word_chunk list.
if i.decode('utf-8', 'ignore') not in stop_words:
if chunk_dict.get(i.decode('utf-8', 'ignore')) is None: # we don't need the stopwords
chunk_dict[i.decode('utf-8', 'ignore')] = 1
else:
chunk_dict[i.decode('utf-8', 'ignore')] += 1
chunk_dict = sorted(chunk_dict.items(), key=lambda k_v: k_v[1], reverse=True)
return chunk_dict
# TD_count = word_count(h_tokens)
def plot_wc(wc_list, author_name):
'''
Plot the first NUM of words word count list, with the author name
'''
wc_plot = dict(wc_list[0:NUM_WORDS])
plt.figure(num=None, figsize=(96,64), dpi=80, facecolor='w', edgecolor='k')
plt.bar(range(len(wc_plot)), sorted(wc_plot.values(), reverse=True), align='center')
plt.xticks(range(len(wc_plot)), wc_plot.keys(), fontsize = 64, rotation=85)
plt.yticks(fontsize= 72)
plt.xlabel('Words', fontsize=78)
plt.ylabel('Occurances', fontsize=78)
# plt.rcParams["figure.figsize"] = (32,24)
plt.figtext(.5,.8,'Top ' + str(NUM_WORDS) + ' Words of ' + author_name, fontsize=96,ha='center')
# plt.show() # if we want to show the plot
# from here https://stackoverflow.com/questions/11373610/save-matplotlib-file-to-a-directory
script_dir = os.path.dirname(os.path.abspath('analysis.ipynb'))
results_dir = os.path.join(script_dir, 'keywordResults/')
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
plt.savefig(results_dir + author_name , dpi=100)
plt.close() # Close the plot save memory
import codecs
def kw_plot(text):
'''
Wrapper to process texts for all philosophers
'''
with codecs.open('./authorCorpus/' + text, 'r', encoding='utf-8', errors='ignore') as file:
t = file.read()
author = (str(text)[:-4])
t_tokens = tokenize(t)
t_count = word_count(t_tokens)
t_plot = plot_wc(t_count, author)
return t_plot
for f in AUTHOR_FILES:
print ('Processing ' + str(f) + '...') #
%time kw_plot(f)
print ("Done!")
def read_book(booklist):
read = [] # array to store processed individual books
for b in booklist:
with codecs.open('./authorBooks/' + b, 'r', encoding='utf-8', errors='ignore') as file:
# with open('./authorBooks/' + b, 'r') as myfile:
book_file = file.read()
read.append(book_file)
return read
book_str_list = []
# BOOK_LIST.extend(TEST_FILES) #Optional! Just for fun!
book_str_list = read_book(BOOK_LIST)
print ('We are analyzing '+ str(len(book_str_list)) + ' books!') #Check whether if it's good
def process_books(str_list):
total_words = []
# Put all the tokenized words in a list
for i in book_str_list:
allwords_tokenized = tokenize(i)
total_words.extend(allwords_tokenized)
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=1000000, min_df=0.1, stop_words=stop_words,
use_idf=True, tokenizer=tokenize, ngram_range=(1,2))
print ('Processing Time:')
tfidf_matrix = tfidf_vectorizer.fit_transform(str_list)
print ('Now we have a matrix with the shape of' + str(tfidf_matrix.shape))
feature_terms = tfidf_vectorizer.get_feature_names()
tokenized_v_frame = pd.DataFrame({'words': total_words}, index = total_words)
return total_words, tfidf_matrix, feature_terms, tokenized_v_frame
%time totalvocab_tokenized, tfidf_matrix, key_terms, vocab_frame = process_books(book_str_list)
def kmcluster(matrix):
'''
Unsupervised learning by using KMeans from sklearn; return a list of cluster indexes
'''
km = KMeans(n_clusters=NUM_CLUSTERS, n_init = 60, max_iter=700, verbose = 0)
%time km.fit(matrix)
cluster_idx = km.labels_.tolist()
centroids = km.cluster_centers_.argsort()[:, ::-1] #Finding the centroids
return cluster_idx, centroids
clusters, order_centroids = kmcluster(tfidf_matrix)
print (clusters)
def gen_frame(blist, c):
'''
Generate a pandas data frame for the categorized results.
Two arguments are book list containing only names, and assigned cluster categories.
'''
cat = {'book_title': blist, "cluster":c} # Dictionary for categories
frame = pd.DataFrame(cat, columns = ['book_title', 'cluster']) # put the dict above into a dataframe
return frame
frame = gen_frame(BOOK_LIST, clusters)
frame.sort_values('cluster')
def top_term(v_f, terms, centroids):
print("Top terms per cluster: \n")
tmp_dict = defaultdict(list) #temporary dictionary that appends top terms per cluster
for i in range(NUM_CLUSTERS):
print("Cluster %d words:" % i, end = '')
for ind in centroids[i, :20]: #replace 60 with n words per cluster
if str(v_f.ix[terms[ind].split(' ')].values.tolist()[0][0]) != 'nan': #get rid of extra 'nan' words
print (' %s' % v_f.loc[terms[ind].split(' ')].values.tolist()[0][0], end =',')
# yield v_f.ix[terms[ind].split(' ')].values.tolist()[0][0]
tmp_dict[i].append(v_f.loc[terms[ind].split(' ')].values.tolist()[0][0])
print('\n') #add whitespace
return tmp_dict
cluster_dict = top_term(vocab_frame, key_terms, order_centroids)
def cos_dist(matrix):
return 1 - cosine_similarity(matrix)
dist = cos_dist(tfidf_matrix)
%matplotlib inline
# MDS()
def plot_similarity(clusters, plotlist, word_matrix):
# convert two components as we're plotting points in a two-dimensional plane
# "precomputed" because we provide a distance matrix
# we will also specify `random_state` so the plot is reproducible.
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(dist) # shape (n_components, n_samples)
xs, ys = pos[:, 0], pos[:, 1]
#create data frame that has the result of the MDS plus the cluster numbers and titles
df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=plotlist))
#group by cluster
groups = df.groupby('label')
# print (df)
# set up plot
fig, ax = plt.subplots(figsize=(17, 9)) # set size
cluster_colors = {0: '#1b9e77', 1: '#ffff00', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e', 5:'#000000'} # for plotting
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=16, label=cluster_dict[name], color=cluster_colors[name],
mec='none')
ax.set_aspect('auto')
ax.legend(numpoints=1, loc = 8, bbox_to_anchor=(0.005, -0.25), borderaxespad=0., mode = 'expand') #show legend with only 1 point
ax.set_title('Similarities of Documents Based on Top Terms', fontdict={'fontsize': 20})
#add label in x,y position with the label as the book title
for i in range(len(df)):
ax.text(df.loc[i]['x'], df.loc[i]['y'], df.loc[i]['title'], size=16)
return plt.show() #show the plot
plot_similarity(clusters, BOOK_LIST, tfidf_matrix)
import matplotlib
%matplotlib inline
linkage_matrix = ward(dist) #define the linkage_matrix using ward clustering pre-computed distances
print (linkage_matrix.shape)
fig, ax = plt.subplots(figsize=(15, 20)) # set size
ax = dendrogram(linkage_matrix, orientation="right", labels=BOOK_LIST, leaf_font_size = 24);
matplotlib.rcParams['lines.linewidth'] = 6
#uncomment below to save figure
plt.savefig('ward_clusters_1208.png', dpi=200, figsize=(15, 20)) #save figure as ward_clusters
plt.show()
# plt.close()
def train_test_clf(train_str_list, test_list):
'''
Train a Naive Bayes Classifier based on word counts in the training string set
'''
count_vect = CountVectorizer(tokenizer=tokenize, lowercase=False, stop_words = stop_words)
train_counts = count_vect.fit_transform(train_str_list)
tfidf_transformer = TfidfTransformer()
train_matrix = tfidf_transformer.fit_transform(train_counts)
clf = MultinomialNB().fit(train_counts, clusters)
docs_new = []
for b in test_list:
with codecs.open('./authorBooks/' + b, 'r', encoding='utf-8', errors='ignore') as file:
docs_new.append(file.read())
doc_new_counts = count_vect.transform(docs_new)
doc_new_tfidf = tfidf_transformer.transform(doc_new_counts)
clf_result = clf.predict(doc_new_tfidf)
# print (clf_result)
frame_1 = gen_frame(BOOK_LIST, clusters)
frame_2 = gen_frame(test_list, clf_result)
# print (clusters, "\n", frame_1, '\n', frame_2)
res_frame = pd.concat([frame_1, frame_2])
return clf_result, res_frame
predicted, new_frame = train_test_clf(book_str_list, TEST_FILES)
new_frame.sort_values('cluster') # Sorting Values
nltk.download('averaged_perceptron_tagger')
### Using LDA for Topic Modeling
def strip_propper(text):
'''
POS Tagging
'''
tagged = pos_tag(text.split())
non_pnouns = [word for word,pos in tagged if pos != 'NNP' and pos != 'NNPS']
return non_pnouns
preprocess = [strip_propper(doc) for doc in book_str_list]
def tag_tokenize(text):
'''
Another Tokenizer (but used after POS tagging)
'''
# tokens = [nltk.word_tokenize(word) for word in text]
filtered_tokens = []
for token in text:
if re.search('[a-zA-Z]', token): #only search English words and put them into tokens
token = re.sub("[^a-zA-Z]", "", token)
filtered_tokens.append(token.lower())
return (filtered_tokens)
%time tokenized_text = [tag_tokenize(text) for text in preprocess]
%time texts = [[word for word in text if word not in stop_words] for text in tokenized_text]
def lda_model(text):
dictionary = corpora.Dictionary(text)
dictionary.filter_extremes(no_below = 1, no_above = 0.9)
corpus = [dictionary.doc2bow(t) for t in text]
lda = models.LdaModel(corpus, num_topics=6,id2word=dictionary,update_every=6,chunksize=10000,passes=100)
return lda
%time lda = lda_model(texts) #build lda model
lda.show_topics()
lda.num_topics
topics_matrix = lda.show_topics(formatted=False, num_words=20)
print (topics_matrix[1])
# topics_matrix = np.array(topics_matrix)
topic_words = []
# topic_words = topics_matrix[:,:,1]
for i,j in topics_matrix:
topic_words.append([])
for k,l in j:
topic_words[i].append(str(k))
for i in topic_words:
print (i)
print ()
from matplotlib import cm
def pol_sub(title_list):
'''
Polarization and Subjectivity Analysis. Just for fun!
'''
book_s_list = read_book(title_list)
df_dict = {'book_title': title_list, 'Polarity':[TextBlob(book).sentiment.polarity for book in book_s_list],
'Subjectivity': [TextBlob(book).sentiment.subjectivity for book in book_s_list]}
res_df = pd.DataFrame(df_dict, columns = ['book_title', 'Polarity', 'Subjectivity'])
ax = res_df.plot.scatter('Polarity', 'Subjectivity', figsize = (36,24), style=['o', 'rx'], fontsize = 28,
s=300)
matplotlib.rcParams.update({'font.size': 28, 'axes.labelsize' : 46})
res_df[['Polarity', 'Subjectivity','book_title']].apply(lambda x: ax.text(*x, fontsize=28),axis=1);
return res_df
if SENTIMENT_LIST == []:
# Testing the files we have already
SENTIMENT_LIST = list(BOOK_LIST)
SENTIMENT_LIST.extend(TEST_FILES)
SENTIMENT_LIST.extend(['rousseau-contract', 'engels-condition','marx-CM'])
pol_sub(SENTIMENT_LIST)
###Output
_____no_output_____
###Markdown
Appendix* Here is the appendix - Stopwords
###Code
print (stop_words)
#Helper Function to get rid of all the copyright info.
import os, fnmatch
def findReplace(directory, find, replace, filePattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
###Output
_____no_output_____
###Markdown
XAUUSD
###Code
import quandl
import pandas as pd
import numpy as np
# plotting modules
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import os
import warnings
import logging
# scikit-learn modules
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_array
# User defined modules
from column_transformers.technical_indicators import MacdSignal, StochRsiSignal, StochasticRsi
from column_transformers.base import BaseStrategy
from column_transformers.dates import DateDummy
from itertools import product
from operator import itemgetter
xau_ratios = [
"WGC/GOLD_DAILY_USD"
#"WGC/GOLD_DAILY_EUR",
# "WGC/GOLD_DAILY_TRY",
# "WGC/GOLD_DAILY_JPY",
#"WGC/GOLD_DAILY_GBP",
# "WGC/GOLD_DAILY_CAD",
# "WGC/GOLD_DAILY_CHF",
# "WGC/GOLD_DAILY_VND",
# "WGC/GOLD_DAILY_KRW",
# "WGC/GOLD_DAILY_RUB",
# "WGC/GOLD_DAILY_AUD",
]
economic_indc = []
###Output
_____no_output_____
###Markdown
Quandl data termsAnyone seeking to use this code must first apply for an account with [Quandl](https://www.quandl.com) in order to receive an valid authetitciation key.
###Code
DIR_NAME = os.path.abspath(os.path.join(os.getcwd(), '..'))
FILEPATH = os.path.join(DIR_NAME, "auth.txt")
with open(FILEPATH, "r") as f:
authtoken = f.read();
###Output
_____no_output_____
###Markdown
Retrieve Data
###Code
xau_df_dict = {}
for ratio in tqdm(xau_ratios):
name = ratio.lower().replace("/", "_")
# get the ratio dataframe
df = quandl.get(ratio, authtoken=authtoken, start_date = "1979-01-01")
df.columns = ["price"]
# check for missing business days
if pd.infer_freq(df.index) != "B":
logging.warn("Datetime frequency is not Business Days")
xau_df_dict[name] = df
###Output
100%|██████████| 1/1 [00:03<00:00, 3.44s/it]
###Markdown
Volatility
###Code
annualization_factor = 252.
window_size = [5, 10, 20, 60, 120]
for ratio, df in tqdm(xau_df_dict.items()):
start_date, end_date = df.index[0], df.index[-1]
full_range = pd.date_range(start_date, end_date, freq = "B")
if not np.array_equal(df.index, pd.date_range(start_date, end_date, freq="B")):
logging.warning("\n{} is missing business days".format(ratio))
for window in window_size:
df['{}d_market_vol'.format(window)] = np.sqrt(
(annualization_factor/window) * df['price'].rolling(window).var(ddof=0))
###Output
100%|██████████| 1/1 [00:00<00:00, 2.02it/s]
###Markdown
Quandl Features
###Code
features = [
"FRED/T10Y2Y",
"RATEINF/INFLATION_USA",
]
for ratio, df in xau_df_dict.items():
for feature in features:
col_name = feature.lower().replace('/', '_')
# get quandl features. `end_date` is set to df.index[-1] to match the price data
data = quandl.get(feature, authtoken=authtoken, start_date = "1979-01-01", end_date = df.index[-1])
start_date, end_date = data.index[0], data.index[-1]
# Some features contain missing data. To best simulate how the data would be ingested
# realtime, the current value is forward filled. This achieved by resampling.
if not np.array_equal(data.index, pd.date_range(start_date, end_date, freq="B")):
logging.warning("\n\t{} is missing business days".format(feature))
df[col_name] = data
df[:] = df.ffill()
###Output
WARNING:root:
FRED/T10Y2Y is missing business days
WARNING:root:
RATEINF/INFLATION_USA is missing business days
###Markdown
Technical indicator features
###Code
import talib
technical_indicators= {
# "MACD" : ("macd", "macdsignal", "macdhist"),
# "STOCHRSI" : ("fastk", "fastd"),
"MOM" : ("real"),
"APO" : ('real'),
"RSI" : ('real')
}
for ratio, df in xau_df_dict.items():
# talib requires market price data. starting price of $1 is taken
# since absolute values are not important (preprocess scaling)
price = df['price'].values
for indicator, indicator_type in technical_indicators.items():
# Return the result for each indicator
if indicator == 'STOCHRSI':
result = getattr(talib, indicator)(price, fastd_matype = 8)
else:
result = getattr(talib, indicator)(price)
if isinstance(result, np.ndarray):
df[indicator.lower()] = result
else:
for f, r in zip(indicator_type, result):
if f == indicator.lower():
df["{}".format(indicator.lower())] = r
else:
df["{}_{}".format(indicator.lower(), f)] = r
###Output
_____no_output_____
###Markdown
Strategies
###Code
class StochasticRsiStrategy(BaseStrategy):
# ======================================================================
# Constants
# ======================================================================
"""
Define the indices of the price series and the to be insered indicators
See Documentation for more information.
"""
PRICE = 0
FASTK, FASTD = 12, 13
def __init__(self, **kwargs):
super().__init__(**kwargs)
def price_indicator(self, X, timeperiod, fastk, fastd):
ind = StochasticRsi(self.PRICE, timeperiod, fastk, fastd)
return ind.fit_transform(X)
def _long_signal(self, price_indicator, long_entry, long_exit):
# Use np.insert if shift is greater than 1
signal_entry = price_indicator[:, self.FASTK] > long_entry
signal_hold = price_indicator[:, self.FASTK] > long_exit
# Define the long signal
long = signal_entry | signal_hold
return long[:-1] * 1
def _short_signal(self, price_indicator, short_entry, short_exit):
#Use np.insert if shift is greater than 1
signal_entry = price_indicator[:, self.FASTK] < short_entry
signal_hold = price_indicator[:, self.FASTK] <= short_exit
# Define the long signal
short = signal_entry | signal_hold
return short[:-1] * -1
def x(self, X):
X = check_array(X)
return X[:, self.PRICE]
class MacdStrategy(BaseStrategy):
# ======================================================================
# Constants
# ======================================================================
"""
Define the indices of the price series and the to be insered indicators
See Documentation for more information.
"""
PRICE = 0
MACD, MACD_SIGNAL, MACD_HIST = 1, 2, 3
# ======================================================================
# Constructors
# ======================================================================
def __init__(self, **kwargs):
super().__init__(**kwargs)
def price_indicator(self, X, fast_period, slow_period, signal_period):
real = X[:, self.PRICE]
macd_statistics = talib.MACD(
real,
fastperiod = fast_period,
slowperiod = slow_period,
signalperiod = signal_period
)
return np.c_[X, np.array(macd_statistics).T]
def _long_signal(self, price_indicator):
long = price_indicator[:,self.MACD] > price_indicator[:,self.MACD_SIGNAL]
return long[:-1] * 1
def _short_signal(self, price_indicator):
short = price_indicator[:, self.MACD] < price_indicator[:, self.MACD_SIGNAL]
return short[:-1] * -1
usd = data[['price']]
macd_params = dict(fast_period = range(5, 10),slow_period = range(20, 40),signal_period = range(5, 25, 5))
ind = MacdStrategy(**macd_params)
arr = ind.fit_transform(usd)
df = pd.DataFrame(arr, index = usd.index[1:])
df['long'] = (df[1] > df[2]).astype(int).shift(1)
df['short'] = (df[1] < df[2]).astype(int).shift(1)
df.dropna(inplace=True)
x_long = df['long'].copy()
x_short = df['short'].copy()
def signal_transform(s, n=50):
transforms = s.groupby([s, s.ne(1).cumsum()]).cumcount()
return np.exp(-transforms / n) * s
df['rets'] = df[0].pct_change()
df['new_long'] = signal_transform(x_long)
df['new_short'] = signal_transform(x_short)
df['port'] = (df['new_long'] - df['new_short']) * df['rets']
(1 + df['port']).cumprod().plot()
np.sqrt(252) * df['port'].mean() / df['port'].std(), np.sqrt(252) * df['rets'].mean() / df['rets'].std()
df
###Output
_____no_output_____
###Markdown
Plotting function to complete
###Code
# longs = X_rsi.index[X_rsi['long'] == 1]
# shorts = X_rsi.index[X_rsi['short'] == -1]
# # start date positions of new long/short positions
# long_indices_or_sections = np.arange(longs.size)[longs.to_series().diff() > pd.Timedelta('3D')]
# short_indices_or_sections = np.arange(shorts.size)[shorts.to_series().diff() > pd.Timedelta('3D')]
# long_date_regions = np.split(longs, long_indices_or_sections)
# short_date_regions = np.split(shorts, short_indices_or_sections)
# sns.set(rc={'figure.figsize':(16, 10)})
# fig, axes = plt.subplots(nrows=3, ncols=1)
# df.loc['1990', 'alpha_perf'].plot(ax = axes[0])
# df.loc['1990', ['macd', 'macd_macdsignal']].plot(ax=axes[1])
# df.loc['1990', 'gold_perf'].plot(ax = axes[2])
# for l_period, s_period in zip(long_date_regions, short_date_regions):
# for ax in axes:
# ax.axvline(l_period[0], color='green', linewidth=1)
# ax.axvline(s_period[0], color='green', linewidth=1)
# ax.axvline(l_period[-1], color='red', linewidth=1)
# ax.axvline(s_period[-1], color='red', linewidth=1)
# ax.axvspan(l_period[0], l_period[-1], alpha = 0.1, color = 'green')
# ax.axvspan(s_period[0], s_period[-1], alpha = 0.1, color = 'red')
###Output
_____no_output_____
###Markdown
Data preprocessing
###Code
from split._split import TrainValidateTest
data = xau_df_dict['wgc_gold_daily_usd'].copy()
# forward 5-day return
data['target'] = data['price'].shift(-5).pct_change(5)
# define training, validation and test data. The X and y data is split
# after the column transformation pipeline has been executed. This is to
# ensure the the X and y observations are aligned.
tvt = TrainValidateTest(0.7, 0.15, 0.15)
train_data, valid_data, test_data = tvt.transform(data)
train_data['price'].values
stoch_params = dict(timeperiod = range(10,20, 2), fastk = range(2, 5), fastd = range(2, 5), ob_region = range(45, 60, 5), os_region = range(0, 15, 5))
macd_params = dict(fast_period = range(5, 10),slow_period = range(20, 40),signal_period = range(5, 25, 5))
preprocess_pipeline = Pipeline([
('stoch_ud_signal', StochasticRsiStrategy(**stoch_params)),
('macd_ud_signal', MacdStrategy(**macd_params)),
#('date', DateDummy('weekday_name', 'month_name')),
#('vol_diff', VolatilityDiff()),
#('scalar', StandardScaler())
])
train_prepared, valid_prepared, test_prepared = (
preprocess_pipeline.fit_transform(train_data),
preprocess_pipeline.transform(valid_data),
preprocess_pipeline.transform(test_data)
)
train_prepared = train_prepared[~np.isnan(train_prepared).any(1), :]
valid_prepared = valid_prepared[~np.isnan(valid_prepared).any(1), :]
X_train = np.delete(train_prepared, [0, 11], axis=1)
X_valid = np.delete(valid_prepared, [0, 11], axis=1)
y_train = (train_prepared[:, 11] > 0).astype(int)
y_valid = (valid_prepared[:, 11] > 0).astype(int)
train_prepared[:, [0,15,16, 17]]
###Output
_____no_output_____
###Markdown
Model Selection
###Code
classifiers = [
SVC(gamma=2, C=1),
LogisticRegression(),
RandomForestClassifier(criterion='entropy', oob_score=True, n_jobs=-1, random_state= 0),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GradientBoostingClassifier(n_estimators=100),
]
results = {}
for clf in tqdm(classifiers, unit='Model') :
clf.fit(X_train, y_train)
name = str(clf).split('(')[0]
results[name] = {
"train_score" : clf.score(X_train, y_train),
"valid_score" : clf.score(X_valid, y_valid)
}
results
sns.set(rc={'figure.figsize':(16, 10)})
prob_array=[-1,1]
alpha_score = clf.predict_proba(X_train).dot(np.array(prob_array))
alpha_return = alpha_score * train_data.iloc[-alpha_score.size:, 11]
plt.plot((1 + alpha_return).cumprod())
plt.plot(train_data['price'] / train_data['price'][0])
clf.get_params
###Output
/home/joepy/anaconda3/lib/python3.7/site-packages/pandas/plotting/_converter.py:129: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters.
To register the converters:
>>> from pandas.plotting import register_matplotlib_converters
>>> register_matplotlib_converters()
warnings.warn(msg, FutureWarning)
###Markdown
Model Evaluation
###Code
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV
tscv = TimeSeriesSplit(n_splits = 10)
clf = RandomForestClassifier(criterion='entropy', oob_score=True, n_jobs=-1, random_state= 0)
rf_param_grid = {
'max_depth': [25, 35],
'min_samples_leaf': [5, 10],
'min_samples_split': [2, 5],
'n_estimators': [350, 400]
}
# search = GridSearchCV(estimator=clf, cv=tscv, param_grid=rf_param_grid)
# search.fit(X_train, y_train)
search.score(X_train, y_train), search.score(X_valid, y_valid), search.best_params_
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
clf_parameters = {
'n_estimators': 900 ,
'criterion': 'entropy',
'min_samples_leaf': 10,
'max_depth' : 25,
'min_samples_split': 2,
'oob_score': True,
'n_jobs': -1,
'random_state': 0}
clf = RandomForestClassifier(**clf_parameters)
cross_val_score(clf, X_train, y_train, cv=tscv, scoring='accuracy')
confusion_matrix(y_train, y_train_pred)
n_days = X_train.shape[0]
n_features = X_train.shape[1]
clf_parameters = {
'criterion': 'entropy',
'min_samples_leaf': 15,
'max_depth' : 25,
'min_samples_split': 8,
'oob_score': True,
'n_jobs': -1,
'random_state': 0}
n_trees_l = [5, 1000, 1500]
train_score = []
valid_score = []
oob_score = []
feature_importances = []
for n_trees in tqdm(n_trees_l, desc='Training Models', unit='Model'):
clf = RandomForestClassifier(**clf_parameters)
clf.fit(X_train, y_train)
train_score.append(clf.score(X_train, y_train))
valid_score.append(clf.score(X_valid, y_valid))
#oob_score.append(clf.oob_score_)
# feature_importances.append(clf.feature_importances_)
def plot(xs, ys, labels, title='', x_label='', y_label=''):
for x, y, label in zip(xs, ys, labels):
plt.ylim((0.3, 0.9))
plt.plot(x, y, label=label)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(bbox_to_anchor=(1.04, 1), borderaxespad=0)
plt.show()
plot([n_trees_l]*3,
[train_score, valid_score, oob_score],
['train', 'validation', 'oob'],
'Random Forrest Accuracy',
'Number of Trees')
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
plot_learning_curve(estimator, "lol", X, y, n_jobs =4, cv =cv, train_sizes=train_sizes)
#learning_curve(
#estimator, X, y, cv=cv, n_jobs=4, train_sizes=train_sizes)
cv
###Output
_____no_output_____
###Markdown
Result Analysis
###Code
import numpy as np
import pandas as pd
df = pd.read_csv("result.csv")
df["Memory"] = df["Memory"] / 1024
df.head()
def get_time(df, size, cycle):
return np.mean(df[
(df["Size"] == size) & (df["Cycle"] == cycle)
]["Time"])
def get_memory(df, size, cycle):
return np.mean(df[
(df["Size"] == size) & (df["Cycle"] == cycle)
]["Memory"])
def merge(df):
df2 = []
sizes = [1000000, 2000000, 4000000, 6000000, 8000000,
10000000, 20000000, 30000000, 40000000]
for s in sizes:
for i in range(0, 10):
c = s / 10 * i
x = { "Size": s, "Cycle": c }
x["Time"] = get_time(df, s, c)
x["Memory"] = get_memory(df, s, c)
df2.append(x)
df2 = pd.DataFrame(df2)
return df2
df_running = merge(df[df["Algorithm"] == "RUNNING"])
df_reverse = merge(df[df["Algorithm"] == "REVERSE"])
df_pointself = merge(df[df["Algorithm"] == "POINTSELF"])
import matplotlib.pyplot as plt
def plot(ax, t, size):
ax.plot('Cycle', t, data=df_pointself[df_pointself["Size"]==size],
marker='^', markerfacecolor='darkgreen', markersize=10, color='lightgreen',
alpha=0.7, linewidth=2, label="POINTSELF")
ax.plot('Cycle', t, data=df_reverse[df_reverse["Size"]==size],
marker='o', markerfacecolor='red', markersize=10, color='pink',
alpha=0.7, linewidth=2, label="REVERSE")
ax.plot('Cycle', t, data=df_running[df_running["Size"]==size],
marker='X', markerfacecolor='blue', markersize=10, color='skyblue',
alpha=0.7, linewidth=2, label="RUNNING")
ax.set_xlabel("Cycle Size")
if t == "Time":
ax.set_ylabel("Time (Seconds)")
ax.set_title("Linked List Length: {}".format(size))
elif t == "Memory":
ax.set_ylabel("Memory (MB)")
ax.legend()
fig, axes = plt.subplots(2, 3, figsize=(20, 10))
for i, t in enumerate(["Time", "Memory"]):
for j, size in enumerate([4000000, 10000000, 20000000]):
plot(axes[i][j], t, size)
fig.savefig("report/fig.pdf".format(t, size), bbox_inches="tight")
import matplotlib.pyplot as plt
def plot2(ax, t, cycle):
ax.plot('Size', t, data=df_pointself[df_pointself["Cycle"] / df_pointself["Size"] == cycle],
marker='^', markerfacecolor='darkgreen', markersize=10, color='lightgreen',
alpha=0.7, linewidth=2, label="POINTSELF")
ax.plot('Size', t, data=df_reverse[df_reverse["Cycle"] / df_reverse["Size"] == cycle],
marker='o', markerfacecolor='red', markersize=10, color='pink',
alpha=0.7, linewidth=2, label="REVERSE")
ax.plot('Size', t, data=df_running[df_running["Cycle"] / df_running["Size"] == cycle],
marker='X', markerfacecolor='blue', markersize=10, color='skyblue',
alpha=0.7, linewidth=2, label="RUNNING")
ax.set_xlabel("Linked List Length")
if t == "Time":
ax.set_ylabel("Time (Seconds)")
ax.set_title("Cycle Length / Total Length: {}".format(cycle))
elif t == "Memory":
ax.set_ylabel("Memory (MB)")
ax.legend()
fig, axes = plt.subplots(2, 3, figsize=(20, 10))
for i, t in enumerate(["Time", "Memory"]):
for j, cycle in enumerate([0.2, 0.5, 0.8]):
plot2(axes[i][j], t, cycle)
fig.savefig("report/fig2.pdf".format(t, size), bbox_inches="tight")
###Output
_____no_output_____
###Markdown
hpc-montecarloGoogle Cloud Datalab notebook for analysis of montecarlo stock portfolio tutorial. See URL for detailed tutorial. In this notebook, we will load the simulation data from bigquery, then do some simple analysis. The first step is to load the bigquery python package into the notebook, and then connect to bigquery to extract the aggregate portfolio data.
###Code
%load_ext google.cloud.bigquery
%%bigquery df
SELECT *
FROM `montecarlo_outputs.portfolio`
###Output
_____no_output_____
###Markdown
Now df contains portfolio as a pandas dataframe. Each row n represents a different simulation, and each column m represents the value of the portfolio m days into the simulation. Column 0 is the value of the portfolio before simulation, column 1 is the value after 1 day of simulation, and so on. We can plot the divergence of the simulations over time and see the spread.
###Code
df.T.plot(legend=False)
import matplotlib.pyplot as plt
mean = df.mean().reset_index().iloc[:,-1]
std = df.std().reset_index().iloc[:,-1]
plt.errorbar(mean.index, mean, xerr=0.5, yerr=2*std, linestyle='-')
plt.show()
###Output
_____no_output_____
###Markdown
While this shows the general progression, we might be interested in the spread of values at the end of the simulations, day 252. Looking at this day in particular, we plat a histogram of the values.
###Code
df.iloc[:,-1].hist(bins=100)
df.iloc[:,-1].describe()
###Output
_____no_output_____
###Markdown
We can also load the individual stock simulations as opposed to the aggregate portfolio valuation. Loading the data into a second data frame, we can then take a look at each of the FANG stocks to see the progression over the 1000 simulations.
###Code
%%bigquery df2
SELECT *
FROM `montecarlo_outputs.vartable`
df2[df2.string_field_0 == 'FB'][1:10].T[3:].plot(legend=False)
###Output
_____no_output_____
###Markdown
In case you get a lot of font warnings for matplotlib, use this to ignore them.
###Code
import warnings
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
加载函数
###Code
from common import read
from common import plot_df, plot_district, plot
import pandas as pd
def plotCity(df):
gp = df.groupby(['成交时间'])['成交价(元/平)']
res=pd.DataFrame({"volume":gp.size(),"median_price":gp.median(), "mean_price":gp.mean()})
res = res.iloc[:len(res),:]
title = city
res = plot(res, city, title, MA, ma_length, start_date)
return res
def plotAllDistrict(df):
districts = list(df['下辖区'].unique())
res = {}
for district in districts:
if str(district) != 'nan':
res[district] = plot_district(df, city, district, ma_length, start_date)
return res
###Output
_____no_output_____
###Markdown
画各个城市的趋势图
###Code
import os
MA = True
ma_length = 30
start_date = '2015-01-01'
cityList = ['北京', '上海', '深圳', '杭州', '广州', '长沙', '厦门', '宁波', '合肥', '成都','重庆','武汉',
'西安','石家庄','苏州','南京', '大连', '青岛', '无锡', '保定', '温州', '廊坊', '天津']
#cityList = ['北京', '上海','深圳']
cityList = ['北京']
data = {}
res = {}
districtRes = {}
for city in cityList:
print(city)
df = read(city)
data[city] = df
res[city] = plotCity(df)
districtRes[city]=plotAllDistrict(df)
df_new = df.loc[df['成交时间']>'2020-01-30']
df_new = df_new.loc[df_new['成交价(元/平)']>5000]
len(df_new)
def drawDown(res):
dd = {}
for district in res:
try:
dd[district] = (res[district]['mean_price'][-1]/res[district]['mean_price'].max()-1)
except:
pass
dd = pd.DataFrame({'跌幅':dd}).sort_values('跌幅')
dd['跌幅'] = ["%.2f%%"%(a*100) for a in dd['跌幅']]
display(dd)
#display(districtRes['北京']['海淀'])
drawDown(districtRes['北京'])
#计算城市排名
if not os.path.exists('fig/allcity'):
os.makedirs('fig/allcity')
os.system('rm fig/allcity/*')
median = {}
mean = {}
yearChange = {}
change = {}
monthChange = {}
for city in cityList:
median[city] = int(res[city]['median_price'][-1])
mean[city] = int(res[city]['mean_price'][-1])
try:
yearChange[city] = "%.2f%%"%(100 * (res[city]['median_price'][-1]/res[city]['median_price'][-365] - 1))
except:
yearChange[city] = '数据不足'
change[city] = "%.2f%%"%(100 * (res[city]['median_price'][-1]/res[city]['median_price'][-180] - 1))
monthChange[city] = "%.2f%%"%(100 * (res[city]['median_price'][-1]/res[city]['median_price'][-30] - 1))
cityRank = pd.DataFrame({'中位数':median, '均值':mean,
'近一年':yearChange,
'近半年':change, '近一个月':monthChange}).sort_values('中位数', ascending = False)
cityRank['城市'] = cityRank.index
cityRank.index = range(1, len(cityRank) + 1)
cityRank = cityRank.loc[:,['城市', '中位数', '均值', '近一年', '近半年','近一个月']]
display(cityRank)
for index, row in cityRank.iterrows():
city = row['城市']
index = int(index)
cmd = 'cp fig/%s/%s.png fig/allcity/%.2d.%s.png'%(city, city, index, city)
os.system(cmd)
###Output
_____no_output_____
###Markdown
合并历史数据
###Code
from common import read
cityList = ['北京', '上海', '深圳', '杭州', '广州', '长沙', '厦门', '宁波', '合肥', '成都','重庆','武汉',
'西安','石家庄','苏州','南京', '大连', '温州']
for city in cityList:
df = read(city)
df.to_csv('data/all/%s.csv'%city, index=False)
if not os.path.exists('data/res'):
os.makedirs('data/res')
for city in cityList:
res[city].to_csv('data/res/'+city+'.csv')
from common import read
city = 'alltj'
df = read(city)
xiaoqu= open('xiaoqu/tjxiaoqu.txt', 'w')
xiaoquList = df['小区'].unique()
print(len(xiaoquList))
for xq in xiaoquList:
if str(xq) != 'nan':
xiaoqu.write(xq+'\n')
xiaoqu.close()
from common import read
city = 'alltj'
df = read(city)
#df.drop_duplicates(subset=['链家编号'])
print(df.columns)
print(len(df))
df.to_csv(city+'.csv')
df['土地年限']
from common import plot_dfs
import pandas as pd
def plotDianti(df):
pd.options.display.max_columns = None
df_dt= df.loc[df['电梯'] == '有']
df_ndt= df.loc[df['电梯'] != '有']
print('有电梯', len(df_dt))
print('无电梯',len(df_ndt))
plot_dfs([df_ndt,df_dt], '%s有无电梯'%city, ['无电梯', '电梯'], 30, '2015-01-01')
for city in cityList:
df = read(city)
plotDianti(df)
from common import plot_df
df_sjs = df.loc[df['下辖区']=='石景山']
xiaoquList = df_sjs['小区'].unique()
for xiaoqu in xiaoquList:
plot_df(df_sjs.loc[df_sjs["小区"]== xiaoqu], "石景山", xiaoqu, True, 10)
import pandas as pd
x=df_sjs.groupby('小区')
x_mean = x.mean()
x_size = x.size()
x_mean = x_mean.merge(pd.DataFrame({'size':x_size}), left_index = True, right_index = True)
x_mean=x_mean.loc[x_size>=5]
x_mean=x_mean.sort_values(by='成交价(元/平)', ascending=False).loc[:,["建筑面积","成交价(元/平)","售价(万)", 'size']]
x_mean
from common import plot, plot_dfs, plot_df
MA = True
ma_length = 10
def plot_xiaoqu(xiaoqu, df):
df_xiaoqu = df.dropna(subset=['小区'])
df_xiaoqu = df_xiaoqu.loc[df_xiaoqu['小区'].str.contains(xiaoqu)]
#plot_dfs([df, df_xiaoqu], xiaoqu, ['全市', xiaoqu], ma_length, '2015-01-01')
plot_df(df_xiaoqu, city, xiaoqu, MA, ma_length)
ma_length = 30
#plot_xiaoqu('八角', data['北京'])
plot_xiaoqu('观林园', df)
pd.options.display.max_columns = None
#df.loc[df['小区'].str.contains('团结湖南里')]
df.loc[df['小区'].str.contains('爱乐')].sort_values(by='成交时间', ascending=False)
#df.loc[df['小区'].str.contains('平乐园')].sort_values(by='成交时间', ascending=False)
x=df.groupby('小区')
x_mean = x.mean()
x_size = x.size()
#x_size
x_mean=x_mean.loc[x_size>=1]
x_mean=x_mean.sort_values(by='成交价(元/平)', ascending=False).loc[:,["建筑面积","成交价(元/平)","售价(万)"]]
x_mean
x_mean.index[:10]
df.sort_values('售价(万)', ascending=False).loc[:,["小区", "建筑面积","成交价(元/平)", "售价(万)"]]
df.sort_values('成交价(元/平)', ascending=False).loc[:,["小区", "建筑面积","成交价(元/平)", "售价(万)","成交时间"]]
ma_length = 10
mean_price = df['成交价(元/平)'].mean()
price_std = df['成交价(元/平)'].std()
print('mean:', mean_price, 'std:', price_std)
threshold = 1.3
#high_df = df.loc[df['成交价(元/平)']>= mean_price + threshold * price_std]
#low_df = df.loc[df['成交价(元/平)']< mean_price - threshold* price_std]
#medium_df = df.loc[df['成交价(元/平)']< mean_price + threshold * price_std]
#medium_df = medium_df.loc[medium_df['成交价(元/平)']>= mean_price - threshold * price_std]
sort_key = '成交价(元/平)'#
#sort_key = '售价(万)'
df = df.sort_values(sort_key, ascending = False)
count = len(df)//3
high_df = df.iloc[:count]
low_df = df.iloc[-count:]
medium_df = df.iloc[count:-count]
print(len(high_df), len(low_df), len(medium_df))
print(high_df[sort_key].mean(), medium_df[sort_key].mean(), low_df[sort_key].mean() )
ma_length = 30
def getPriceSeries(df):
gp = df.groupby(['成交时间'])['成交价(元/平)']
res=pd.DataFrame({"volume":gp.size(),"median_price":gp.median(), "mean_price":gp.mean()})
res = res.sort_index()
res = res.iloc[:len(res)-1]
res = get_moving_average(res, ma_length)
return res
highSeries=getPriceSeries(high_df)
mediumSeries=getPriceSeries(medium_df)
lowSeries=getPriceSeries(low_df)
fig, ax = plt.subplots()
ax.plot(highSeries['mean_price']/highSeries['mean_price'][0])
ax.plot(mediumSeries['mean_price']/mediumSeries['mean_price'][0])
ax.plot(lowSeries['mean_price']/lowSeries['mean_price'][0])
plt.xticks(rotation=45)
ax.legend(['high=%.f yuan'%(high_df[sort_key].mean()),
'medium=%.f yuan'%medium_df[sort_key].mean(),
'low=%.f yuan'%low_df[sort_key].mean()])
xticks = ax.xaxis.get_major_ticks()
interval = len(xticks)// 10
ax.set_xticks(ax.get_xticks()[::interval])
'done'
plt.axis
plt.plot(highSeries['median_price']/highSeries['median_price'][0])
plt.plot(mediumSeries['median_price']/mediumSeries['median_price'][0])
plt.plot(lowSeries['median_price']/lowSeries['median_price'][0])
plt.xticks(rotation=90)
plt.legend(['high','medium', 'low'])
def plotAllDistrict(df, ma_length = 10):
districts = list(set(df['下辖区']))
legend = ['beijing']
data = []
gp = df.groupby(['成交时间'])['成交价(元/平)']
res=pd.DataFrame({"volume":gp.size(),"median_price":gp.median(), "mean_price":gp.mean()})
res = res.iloc[:len(res)-1,:]
res = get_moving_average(res, ma_length)
data.append(res)
for district in districts:
gp = df.loc[df['下辖区']==district].groupby(['成交时间'])
res = pd.DataFrame({'volume':gp.size(),'mean_price':gp['成交价(元/平)'].mean(), 'median_price':gp['成交价(元/平)'].median()})
res = res.iloc[:len(res) -1,:]
res = get_moving_average(res, ma_length)
if len(res) < 1:
continue
data.append(res)
title = pinyin(district)
if district == '朝阳':
title = 'chao yang'
elif district == '长宁':
title = 'chang ning'
elif district == '闵行':
title = 'min hang'
else:
title = " ".join([x[0] for x in title])
legend.append(title)
for i in range(len(data)):
plt.plot(data[i]['mean_price']/data[i]['mean_price'].iloc[0])
plt.xticks(rotation=90)
plt.legend(legend)
plotAllDistrict(df, 30)
city = 'alltj'
MA = True
ma_length = 30
start_date = '2015-01-01'
df = read(city)
res = plotCity(df)
plotAllDistrict(df)
from common import read
city = '天津'
df = read(city)
df.groupby('下辖区').size().sort_values()
sum(df['下辖区']=='天津')
[ a.split()[0] for a in '''和平 4312
北辰 4634
东丽 5512
红桥 5937
河北 6700
西青 10668
河东 12797
河西 16081
南开 25265'''.split('\n')]
###Output
_____no_output_____
###Markdown
Facebook Report Domain List VerificationThe purpose of this notebook is to analyze the list of top domains provided by Facebook in their "transparency report Q3" with the corresponding top domain list from Citizen Browser during the same time. The hope is that we can use these two lists to show that our results are indeed correlated with the general trends seen on facebook in order to give us confidence in results we see from other parts of the data.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import dataframe_image as dfi
from matplotlib.ticker import FormatStrFormatter
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import rbo
from analysis import FBCBData, load_cb_unsponsored
from utils import save_tabular
data = FBCBData(load_cb=load_cb_unsponsored)
###Output
Found query cache: data/query_cache/5482d4efb1ef9376d2a905594cb9fbec.csv
###Markdown
First let's make the dataframe human readible to have a nice view for the methodology.
###Code
fbcb = data.joined_domains()
print(fbcb.info())
fbcb.index.names = ['Domain']
fbcb_clean = (fbcb
.drop(columns=['Unnamed: 0'], errors='ignore')
.rename(columns={
"unique_users_cb": "Unique Citizen Browser Users",
"rank_cb": "Ranking Markup",
"rank_fb": "Ranking Facebook",
"unique_users_fb": "Unique Facebook Users",
})
.head(20))
dfi.export(fbcb_clean, 'images/fig4.png')
fbcb_clean
fbcb_clean.to_clipboard()
data.joined_domains(how='outer').sort_values('rank_cb').head(20)
###Output
_____no_output_____
###Markdown
Domain CorrelationLet's look at the raw correlation between the domain rankings and the view counts. We assume that the p-values for the domain correlation is biased because the null hypothesis doesn't properlly consider our full ranking and only sees the partial, intesected, ranking with respect to the facebook report.The domain correlation is done by taking the "ranking facebook" and "ranking markup" columns from the above dataframe and feeding them into scipy.stats.kendalltau.The views correlation is done by taking the "Unique Users Markup" and "Unique Users Facebook" and feeding them into scipy.stats.spearmanr.
###Code
print("Domain Correlation:", data.correlation_domains())
print("Views Correlation:", data.correlation_views())
###Output
Domain Correlation: KendalltauResult(correlation=0.4105263157894737, pvalue=0.011101359934968412)
Views Correlation: SpearmanrResult(correlation=0.5678827028149745, pvalue=0.00900226017644768)
###Markdown
P-Value SimulationIn order to calculate a more reasonable p-value, we sample from randomly generated full rankings of our domains and perform the same Kendall Tau correlation as above. The Markup's full ranking is shuffled, intersected with the Facebook ranking, and the correlation is performed (and outputted by the `random_sampler`). We are then able to calculate the one-sided p-value by seeing how many samples had a correlation lower than the correlation we calculate for our list.
###Code
corr_random = []
corr, p = data.correlation_domains()
random_sampler = data.correlation_domains_random()
for _ in tqdm(range(500_000)):
c, _ = next(random_sampler)
corr_random.append(c)
print("domains corr:", corr)
print("approx p:", p)
print("exact one-sided p:", sum(1 for c in corr_random if c >= corr) / len(corr_random))
plt.figure()
sns.histplot(corr_random, stat='probability')
plt.axvline(corr)
plt.xlabel('Kendall Tau Correlation')
plt.title('Correlation of full ranking vs randomly generated lists')
plt.tight_layout()
plt.savefig("images/fig2.png")
plt.savefig("images/fig2.svg")
plt.show()
###Output
_____no_output_____
###Markdown
For posterity, we also calculate the RBO coefficient to see how much the intersection of the lists effects the results
###Code
data.correlation_domains(method='rbo')
###Output
_____no_output_____
###Markdown
Data VisualizationWe now dive a bit into the full dataset. Here, `df` is the full, non-intersected dataset. Note that all the `*_fb` fields are None except for those 20 domains from the facebook report.
###Code
df = data.joined_domains(how='outer')
df.describe()
df.head()
plt.figure()
ax = sns.barplot(data=df.head(87), x='rank_cb', y='unique_users_cb')
save_tabular("cb_top_87", df.head(87)[['rank_cb', 'unique_users_cb']])
ax.set_yscale('log')
ax.set_ylabel("Number of unique users")
ax.set_xlabel("Domain")
ax.set_title("Unique user counts for top 87 domains")
ax.set_xticks([])
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
plt.tick_params(axis='y', which='minor')
plt.tight_layout()
plt.savefig("images/fig1a.png")
plt.savefig("images/fig1a.svg")
plt.show()
plt.figure()
ax = sns.barplot(data=fbcb.sort_values('rank_fb'), x='rank_fb', y='unique_users_fb')
save_tabular("fb_top_20",
fbcb.sort_values('rank_fb')[['rank_fb', 'unique_users_fb']])
# ax.set_yscale('log')
ax.set_ylabel("Number of unique users")
ax.set_xlabel("Domain")
ax.set_title("Facebook user counts for top 20 domains")
ax.set_xticks([])
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
plt.tick_params(axis='y', which='minor')
plt.tight_layout()
plt.savefig("images/fig1b.png")
plt.savefig("images/fig1b.svg")
plt.show()
###Output
_____no_output_____
###Markdown
RBO VerificationFor verification that the intersection of the two ranked lists isn't an overly biasing effect, we quickly calculate the [RBO](https://dl.acm.org/doi/abs/10.1145/1852102.1852106) of the two sets to make sure it is consistent with our results abobve
###Code
cb = df.sort_values('rank_cb').index.to_list()
fb = df.query('rank_fb > 0').sort_values('rank_fb').index.to_list()
corr, _ = data.correlation_domains()
r = rbo.RankingSimilarity(cb, fb)
print("RBO Extrapolated (Eq. (32) from paper):", r.rbo_ext())
print("RBO Default:", r.rbo())
P = np.arange(0.05, 1, 0.025)
Y = [r.rbo(p=p) for p in P]
Y_ext = [r.rbo_ext(p=p) for p in P]
f = plt.figure()
plt.plot(P, Y, label='RBO')
plt.plot(P, Y_ext, label='RBO Ext')
# note: rbo and kendall aren't directly comparable, but it's a good smell test
plt.axhline(y=corr, label='Kendall Tau')
plt.legend()
plt.xlabel("p (top-weightness)")
plt.ylabel("RBO Coef")
plt.show()
###Output
RBO Extrapolated (Eq. (32) from paper): 0.6979801453080263
RBO Default: 0.6267565872809681
###Markdown
Views CorrelationNow just a quick dive into the correlation between the viewership numbers from the facebook report.
###Code
data.correlation_views()
plt.figure()
g = sns.regplot(data=fbcb,
x='unique_users_cb',
y='unique_users_fb',
n_boot=10_000)
g.set_ylabel('Facebook Unique Users')
g.set_xlabel('Citizen Browser Unique Users')
g.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
g.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
plt.tick_params(axis='y', which='minor')
plt.tight_layout()
plt.savefig("images/fig3.png")
plt.savefig("images/fig3.svg")
plt.show()
a = np.vstack([
fbcb.unique_users_cb.to_numpy(),
np.ones(20)
]).T
b = fbcb.unique_users_fb.to_numpy()[..., np.newaxis]
m, b = np.linalg.lstsq(a, b, rcond=-1)[0]
print("slope:", m)
print("int:", b)
###Output
slope: [96742.13084853]
int: [48969243.83136585]
###Markdown
Domains with high viewership users
###Code
df_hfu = data.high_frequency_users()
df_hfu.sample(n=10)
df_hfu.describe()
###Output
_____no_output_____
###Markdown
We group by url_domain and do some aggregate statistics. We define a "High View User" as someone who saw a domain more than 90 times in our sample period. This represents seeing the domain at least once per day.
###Code
dg = df_hfu.groupby('url_domain')
domains = (
dg
.agg({
"n_views": lambda d: (d > 90).sum(),
})
.sort_values("n_views", ascending=False)
.head(1000)
.rename(columns={"n_views": "n_high_viewers"})
.merge(
dg
.agg({"n_views": "count"})
.sort_values("n_views", ascending=False)
.head(1000)
.rename(columns={'n_views': 'n_users'}),
right_index=True,
left_index=True,
how='outer',
)
)
domains['frac_high_viewers'] = domains.n_high_viewers / domains.n_users
domains.describe()
def get_domain_samples_raw(df, domains, field, N=25):
return (
domains
.sort_values(field, ascending=False)
.head(N)
.reset_index()
.merge(df, on='url_domain')
)
def get_domain_samples(domains, field, N=25):
return domains.sort_values(field, ascending=False).head(N).reset_index()
d = get_domain_samples_raw(df_hfu, domains, 'n_high_viewers')
plt.figure()
ax = sns.boxplot(data=d, x='url_domain', y='n_views')
plt.xticks(rotation='vertical')
ax.set_yscale('log')
plt.xlabel('')
plt.ylabel('Distribution of High View Users')
plt.tight_layout()
plt.show()
d = get_domain_samples(domains, 'n_high_viewers')
plt.figure()
sns.barplot(data=d, x='url_domain', y='n_high_viewers')
save_tabular('n_high_viewers', d[['url_domain', 'n_high_viewers']])
plt.xlabel('')
plt.ylabel('Number of High View Users')
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.show()
d = get_domain_samples(domains, 'n_high_viewers')
plt.figure()
sns.barplot(data=d, x='url_domain', y='frac_high_viewers')
save_tabular('n_high_viewers_by_frac', d[['url_domain', 'frac_high_viewers']])
plt.xlabel('')
plt.ylabel('Fraction of High View Users')
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.show()
Q = df_hfu.groupby('url_domain').sum().reset_index().n_views.quantile(0.99)
d = get_domain_samples(
domains.query('n_users > @Q'),
'frac_high_viewers'
)
plt.figure()
sns.barplot(data=d, x='url_domain', y='frac_high_viewers')
save_tabular('frac_high_viewers_99pct', d[['url_domain', 'frac_high_viewers']])
plt.xlabel('')
plt.ylabel('Fraction of High View Users')
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.show()
plt.figure()
sns.histplot(data=domains, x='n_users')
plt.show()
###Output
_____no_output_____
###Markdown
Now let's look at just news domains
###Code
domains_news = data.filter_news_sources(domains)
d = get_domain_samples(
domains_news,
'frac_high_viewers',
N=50
)
plt.figure()
sns.barplot(data=d,
x='url_domain',
y='frac_high_viewers',
)
save_tabular('news_frac_high_viewers', d[['url_domain', 'frac_high_viewers']])
plt.xlabel('')
plt.ylabel('Percentage of high viewership users')
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Report on US-Healthcare Databas with Stastistical Analysis Health searches data contains the statistics of google searches made in US. To start our analysis, let's read the data into a pandas dataframe and also we look at the first 3 rows to understand the columns/data.
###Code
import numpy as np
import pandas as pd
from IPython.display import display
import matplotlib.pyplot as plt
%matplotlib inline
healthSearchData=pd.read_csv("RegionalInterestByConditionOverTime.csv")
healthSearchData.head(3)
###Output
_____no_output_____
###Markdown
For our study, we do not consider the "geoCode" column and lets drop it. This is because we already have the city name in a separate column and I would like to keep the data simple.
###Code
healthSearchData = healthSearchData.drop(['geoCode'],axis=1)
###Output
_____no_output_____
###Markdown
In the dataset, we have 9 medical conditions and the search data is from 2004 to 2017. Its soo refreshing to see data for more than 10 years. Anyway, now we plot year wise search change for the diseases available.
###Code
#2004-2017
#cancer cardiovascular stroke depression rehab vaccine diarrhea obesity diabetes
yearWiseMeam = {}
for col in healthSearchData.columns:
if '+' in col:
year = col.split('+')[0]
disease = col.split('+')[-1]
if not disease in yearWiseMeam:
yearWiseMeam[disease] = {}
if not year in yearWiseMeam[disease]:
yearWiseMeam[disease][year] = np.mean(list(healthSearchData[col]))
plt.figure(figsize=(18, 6))
ax = plt.subplot(111)
plt.title("Year wise google medical search", fontsize=20)
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11,12,13])
ax.set_xticklabels(list(yearWiseMeam['cancer'].keys()))
lh = {}
for disease in yearWiseMeam:
lh[disease] = plt.plot(yearWiseMeam[disease].values())
plt.legend(lh, loc='best')
###Output
_____no_output_____
###Markdown
It can be observed that the line plot has so many uneven jumps. Let's smooth the plot and visualise how the search looks like. This is just for observational benefits and need not be performed everytime.
###Code
plt.figure(figsize=(18, 6))
ax = plt.subplot(111)
plt.title("Year wise google medical search [smoothened]", fontsize=20)
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11,12,13])
ax.set_xticklabels(list(yearWiseMeam['cancer'].keys()))
lh = {}
myLambda = 0.7
for disease in yearWiseMeam:
tempList = list(yearWiseMeam[disease].values())
localMean = np.mean(tempList)
smoothList = []
for x in tempList:
smoothList.append(x + myLambda * (localMean - x))
lh[disease] = plt.plot(smoothList)
plt.legend(lh, loc='best')
###Output
_____no_output_____
###Markdown
Table of contentsFollowing CRISP-DM process [SECTION 1](section1) | Business understanding : Brief description Question 1 Question 2 Question 3 [SECTION 2](section2) | Data understanding [SECTION 3](section3) | Data preparation [SECTION 4](section4) | Evaluation of the results Question 1 Analyze Visualize Brief explanation for visualization Question 2 Analyze Visualize Brief explanation for visualization Question 3 Analyze Visualize Brief explanation for visualization Remark- Data modeling was not conducted for this analysis. - Please note that another round of data preparation will be performed to fine tune the data whenever necessary before the analysis of each question. --- SECTION 1 | Business understandingThis analysis is to expand understanding over the fast growing Airbnb business. For this purpose, Airbnb data were retrieved from Kaggle for two U.S. cities-Seattle and Boston, which includes information about Airbnb activities in 2016. [Question 1](question1) How well Airbnb business performed in 2016? Applying some straighforward hospitality performance evaluation metrics [Question 2](question2) How much growth potential did Airbnb have?Looking at number of new listings by year [Question 3](question3) Which neighborhood has more expensive listings?Using Python's GeoPandas libraries for geogrpahical mapping and visualization --- SECTION 2 | Data understanding Importing packages
###Code
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import os
from zipfile import ZipFile
from datetime import datetime
# Matplotlib axis formatter
from matplotlib.axis import Axis
import matplotlib.ticker as ticker
# Geopandas package
import geopandas as gpd
from shapely.geometry import Point, Polygon
import descartes
###Output
_____no_output_____
###Markdown
Reading Airbnb dataset
###Code
DATA_PATH = os.path.join(os.getcwd(), 'data')
FILE_PATH_SEATTLE = os.path.join(DATA_PATH, 'seattle.zip')
FILE_PATH_BOSTON = os.path.join(DATA_PATH, 'boston.zip')
def extract_df_from_airbnb_zipfile(PATH_ZIPFILE) :
'''
Extract csv files from a zipfile and return a list of dataframes
INPUT : file path to a zipfile to open
OUTPUT : a dictionary that contains dataframes of files
extracted from the zip file
'''
zf = ZipFile(PATH_ZIPFILE)
dfs = {
text_file.filename : pd.read_csv(zf.open(text_file.filename ))
for text_file in zf.infolist()
if text_file.filename.endswith('.csv')
}
print('Printing a dictionary with filenames as keys')
for filename in dfs.keys() :
print(f'Filename (keys): {filename}')
return dfs
###Output
_____no_output_____
###Markdown
Seattle
###Code
dfs_seattle = extract_df_from_airbnb_zipfile(FILE_PATH_SEATTLE)
listings_seattle = dfs_seattle['listings.csv']
reviews_seattle = dfs_seattle['reviews.csv']
calendar_seattle = dfs_seattle['calendar.csv']
###Output
_____no_output_____
###Markdown
Boston
###Code
dfs_boston = extract_df_from_airbnb_zipfile(FILE_PATH_BOSTON)
calendar_boston = dfs_boston['calendar.csv']
listings_boston = dfs_boston['listings.csv']
reviews_boston = dfs_boston['reviews.csv']
###Output
_____no_output_____
###Markdown
Exploring the dataThe respective Airbnb datasets are downloaded from : - Seattle : https://www.kaggle.com/airbnb/seattle- Boston : https://www.kaggle.com/airbnb/boston Content- Listings, including full descriptions and average review score- Review, including unique id for each reviewer and detailed comments- Calendar, including listing id and the price and availability for that day Inspiration- Can you describe the vibe of each Seattle neighborhood using listing descriptions?- What are the busiest times of the year to visit Seattle? By how much do prices spike?- Is there a general upward trend of both new Airbnb listings and total Airbnb visitors to Seattle?Reference to the real use of the data: http://insideairbnb.com/seattle/ --- SECTION 3 | Data preparation Defining helper functionsCollect all helper functions to use in this notebook.
###Code
# Helper function
def convert_str_to_datetime(df, date_feature) :
'''
Convert a series of date string to datetime object
INPUT : a dataframe and a column that contains date data
OUTPUT : a series in datetime dtype
'''
date_conversion = lambda x : datetime.strptime(x, "%Y-%m-%d")
return df[date_feature].apply(date_conversion)
def break_date(df, date_feature) :
'''
Break down a datetime object into year, month, day
and save the information in new columns of input dataframe
INPUT : a dataframe and a column that contains date data
OUTPUT : a dataframe that adds 3 newly created date columns
to the original dataframe
'''
df_new = df.copy()
df_new[date_feature] = convert_str_to_datetime(df_new, date_feature)
df_new['year'] = df_new[date_feature].apply(lambda x : x.year)
df_new['month'] = df_new[date_feature].apply(lambda x : x.month)
df_new['day'] = df_new[date_feature].apply(lambda x : x.day)
return df_new
def convert_price_float(series) :
'''
Wrangle the price column in object dtype
by removing $, comma(,) sign and converting into
float dtype
INPUT : a pandas Series that contain Airbnb price (dtype: object)
OUTPUT : an updated series with price data in float
'''
# Remove $ & , sign from price
rep = {'$':'', ',': ''}
for old, new in rep.items() :
series = series.str.replace(old, new)
# convert date type to float
series = series.astype(float)
return series
def convert_binary_num(series) :
'''
Convert boolean notation t, f to numeric terms
INPUT : series that contains t, f
OUTPUT : a series that converted boolean notation
into numeric values (1, 0)
'''
series = series.map({
't': 1, 'f':0
})
return series
def plot_line_chart(x, height, layout_obj=False, rotation=False) :
'''
Plot line chart with labels
INPUT : values that needs appearing in x-axis(x)
and y-axis(height) and layout_obj that contains customizable labe data,
in case that x label needs 45 degree rotation, set rotation = True
OUTPUT : line chart that contains the custom set labels
'''
if ( layout_obj ) and ( not len(layout_obj) == 3 ):
print('Length of layout_obj must be 3')
raise
title, xlabel, ylabel = layout_obj.values()
plt.figure(figsize=(10,4))
plt.plot(x, height, marker='o')
plt.title(title)
plt.xlabel(xlabel);
plt.ylabel(ylabel)
plt.axhline(height.mean(), c='orange', ls='--')
if rotation :
plt.xticks(rotation=45);
plt.show()
def plot_bar_chart(x, height, layout_obj=False, rotation=False) :
'''
Plot bar chart with labels
INPUT : values that needs appearing in x-axis(x)
and y-axis(height) and layout_obj that contains customizable labe data,
in case that x label needs 45 degree rotation, set rotation = True
OUTPUT : bar chart that contains the custom set labels
'''
if ( layout_obj ) and ( not len(layout_obj) == 3 ):
print('Length of layout_obj must be 3')
raise
title, xlabel, ylabel = layout_obj.values()
plt.figure(figsize=(10,4))
plt.bar(x, height)
plt.title(title)
plt.xlabel(xlabel);
plt.ylabel(ylabel)
if rotation :
plt.xticks(rotation=45);
plt.show()
def map_calendar_month(series) :
'''
Map calendar month in numercial notation into
more readable month name in string (mmm format)
INPUT : a series that contains numeric month
OUTPUT : an updated series that mapped month in string
'''
try :
series = series.map({
1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun',
7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'
})
return series
except :
print('Failed mapping')
return
def create_points_geometry(series1, series2) :
""" Create points from longitude, latitude data
using GeoPandas module
INPUT :
series1 : longitude data series
series2 : latitude data series
OUTPUT :
transformed geometry list which will be used
as a parameter of GeoDataFrame
"""
return [Point(xy) for xy in zip(series1, series2)]
###Output
_____no_output_____
###Markdown
Data cleaning `calendar` dataframeFor exploring Airbnb performances (related to **Question 1**) Cleaning Seattle clendar data
###Code
# Copy a dataframe for data cleaning
calendar_sea_copy = calendar_seattle.copy()
calendar_sea_copy.info()
# Check the data period
# should be 365 days from 2016-01-04 to 2017-01-02
assert len(calendar_sea_copy.date.value_counts().index) == 365
# Convert t, f to binary int : available
calendar_sea_copy.available = convert_binary_num(calendar_sea_copy.available)
# Remove $ & , sign from price
calendar_sea_copy.price = convert_price_float(calendar_sea_copy.price)
# Saving the cleaned data
calendar_sea_copy.to_csv('data/calendar_seattle_cleaned.csv', index=False)
###Output
_____no_output_____
###Markdown
Cleaning Boston clendar data
###Code
# Copy a dataframe for data cleaning
calendar_bos_copy = calendar_boston.copy()
calendar_sea_copy.info()
# Check the data period
assert len(calendar_bos_copy.date.value_counts().index) == 365
# Convert t, f to binary int : available
calendar_bos_copy.available = convert_binary_num(calendar_bos_copy.available)
# Remove $ & , sign from price
calendar_bos_copy.price = convert_price_float(calendar_bos_copy.price)
# Saving the cleaned data
calendar_bos_copy.to_csv('data/calendar_boston_cleaned.csv', index=False)
###Output
_____no_output_____
###Markdown
`listings` dataframeFor exploring supplier side and growth of Airbnb (related to **Question 2 & 3** ) Cleaning Seattle listings data
###Code
# Copy a dataframe for data cleaning
listings_sea_copy = listings_seattle.copy()
listings_sea_copy.head(3)
# Trim the dataset with features that are host related
cols_host = listings_sea_copy.loc[:, listings_sea_copy.columns.str.contains('host')]
extra_info = listings_sea_copy[['property_type', 'room_type', 'price']]
host_original = cols_host.join(extra_info)
print(host_original.shape)
host_original.head(3)
###Output
(3818, 22)
###Markdown
Drop unnessary / repetitive featuresreturn host_clean_v0 dataframe
###Code
# Drop unnecessary columns
# calculated_host_listings_count are more accurate info
drop_cols = ['host_url', 'host_thumbnail_url', 'host_picture_url', 'host_verifications',
'host_has_profile_pic', 'host_listings_count', 'host_total_listings_count']
host_clean_v0 = host_original.drop(columns = drop_cols).copy()
#pd.options.display.max_row = None
#host[host.duplicated(subset=['host_id'], keep=False)].sort_values(by='host_id')
###Output
_____no_output_____
###Markdown
Drop duplicates
###Code
# Check for duplicates
( host_clean_v0.drop_duplicates(subset=['host_id'], keep='last').shape[0]
/ host_clean_v0.shape[0] )
###Output
_____no_output_____
###Markdown
It looks that host_id is duplicated when a host has more than 1 hosting. Drop duplicates by host_id (not host_name!). 72% remain after removing duplicated rows, but it is rational to drop, and keep the last row (latest).return host_clean_v1 dataframe
###Code
# Drop duplicates
host_clean_v1 = host_clean_v0.drop_duplicates(subset=['host_id'], keep='last')
###Output
_____no_output_____
###Markdown
Drop missing valuesIn this analysis, Airbnb’s growth potential will be evaluated by measuring the number of new hosts. The logic is that the more attractive is the Airbnb business, the more likely new hosts join and provide listings, which then will lead to market growth especially in supply side.`host_since` then is a feature to aggregate for the number of new listings by year.Therefore, the column shouldn't have any missing data.return host_clean_v2 dataframe
###Code
# Check for null data in 'host_since' column
host_clean_v1[host_clean_v1['host_since'].isnull()]
# Drop 2 missing values by 'host_since'
host_clean_v2 = host_clean_v1[host_clean_v1['host_since'].notnull()]
###Output
_____no_output_____
###Markdown
Change to relevant data type`host_since` feature needs conversion to date time object. return host_clean_v3 dataframe
###Code
# Convert into date time object\
host_clean_v3 = break_date(host_clean_v2, 'host_since')
print(host_clean_v3.shape)
host = host_clean_v3
###Output
_____no_output_____
###Markdown
Create wrangling function for listings dataframeChecked if seattle and boston dataframe share all features in common : there are three features that Seattle listings dataset do not have, which however are not relevant for this analysis.
###Code
#listings_bos.columns.isin(listings_sea.columns)
listings_boston.columns[12:15]
###Output
_____no_output_____
###Markdown
###Code
def wrangle_airbnb_host_data(df) :
'''
Wrangle 'listings' dataframe to extract features that
are relevant for the analysis, drop duplicates and null values,
and convert to the correct datatype
INPUT : Airbnb listings dataframe (to be validated in this function)
OUTPUT : cleaned dataframe ready for analysis
'''
# Check if input dataframe is 'listings' dataset
# Three three key columns must be inside the dataframe
key_cols= ['host_id', 'host_since', 'calculated_host_listings_count']
if df.columns.isin(key_cols).sum() !=3 :
print('Check if input dataframe is correct or data format has been changed')
return
print(f'Original dataframe has {df.shape[0]} x {df.shape[1]} dataset')
df_copy = df.copy()
# Drop uncessary columns
host_related = df.loc[:, df.columns.str.contains('host')]
extra_info = df[['property_type', 'room_type', 'price']]
host_df = host_related.join(extra_info)
drop_cols = ['host_url', 'host_thumbnail_url', 'host_picture_url', 'host_verifications',
'host_has_profile_pic', 'host_listings_count', 'host_total_listings_count']
host_df = host_df.drop(columns = drop_cols)
# Drop duplicates
host_df = host_df.drop_duplicates(subset=['host_id'], keep='last')
# Drop null values for 'host_since' columns
# 'host_since' is a key feature for new listings analysis
# therefore dropping misisng values that are not providing any information
host_df = host_df[host_df['host_since'].notnull()]
# Convert into date time object
host_df = break_date(host_df, 'host_since')
print(f'After wrangling : returning {host_df.shape[0]} x {host_df.shape[1]} dataset')
return host_df
# To load cleaned dataframe ready for analysis
listings_sea_cleaned = wrangle_airbnb_host_data(listings_seattle)
listings_bos_cleaned = wrangle_airbnb_host_data(listings_boston)
# Store the cleaned dataframe
listings_sea_cleaned.to_csv('data/listings_seattle_cleaned.csv', index=False)
listings_bos_cleaned.to_csv('data/listings_boston_cleaned.csv', index=False)
###Output
_____no_output_____
###Markdown
--- SECTION 4 | Evaluation of the resultsIn this section, the following work will be performed- Data preparation- Analysis- Visualization- Brief explanation for visualization `QUESTION1` How well Airbnb business performed in 2016?Working with **`calendar`** dataframe for both Seattle and Boston airbnb dataset. I will work on Seattle data first and subsequently apply a function for wrangling and visualizing Boston data.
###Code
# Load cleaned dataframes
calendar_sea = pd.read_csv('data/calendar_seattle_cleaned.csv')
calendar_bos = pd.read_csv('data/calendar_boston_cleaned.csv')
###Output
_____no_output_____
###Markdown
Occupancy rate & price through the yearExplore Seattle calendar data first
###Code
# Occpancy rate and price per day
occ_price_seattle = calendar_sea.groupby('date').mean().drop(columns='listing_id')
occ_price_seattle.columns = ['occ_rate', 'avg_rate']
occ_price_seattle.describe()
ticks = np.arange(0, len(occ_price_seattle.index)+1, 30)
labels = [occ_price_seattle.index[idx] for idx in ticks]
date = occ_price_seattle.index
rate_dict = [{'data': occ_price_seattle.occ_rate, 'desc': 'Occupancy Rate'},
{'data': occ_price_seattle.avg_rate, 'desc': 'Average Room Rate'}]
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(12,4))
for i in [0, 1] :
axes[i].plot(date, rate_dict[i]['data'])
axes[i].set_title('Seattle Airbnb ' + rate_dict[i]['desc'] + ' in 2016')
axes[i].set_xlabel('Date')
axes[i].set_ylabel(rate_dict[i]['desc'])
axes[i].set_xticks(labels)
axes[i].axhline(rate_dict[i]['data'].mean(), ls='--', color='orange', lw=1.5)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
In Seattle, the occupancy rate starts to increase from the beginning of 2016 until its first peak around early-April. It suddenly dips right after the April peak and stays idle until its second dip on early July. After that the trend starts to rise until the end of the year. When it comes to average room rate, it starts to increase from the beginning of the year until its peak near July. The room rate stays in the highest level for nearly 2 months until it slowly decreases and remains on the average level. Next, I will break the dates down into year, month and day for more detailed analysis. Monthly trend for occupancy rate and average room rate in 2016Airbnb Seattle The below code is to break the dates into year, month and day
###Code
# Copy a dataframe
monthly_analysis_seattle = calendar_sea.copy()
# Convert into datetime object : date
date_conversion = lambda x : datetime.strptime(x, "%Y-%m-%d")
monthly_analysis_seattle.date = monthly_analysis_seattle .date.apply(date_conversion)
# Insert year, month, day series into calendar dataframe
monthly_analysis_seattle.insert(2, 'year', monthly_analysis_seattle.date.apply(lambda x : x.year))
monthly_analysis_seattle.insert(3, 'month', monthly_analysis_seattle.date.apply(lambda x : x.month))
monthly_analysis_seattle.insert(4, 'day', monthly_analysis_seattle.date.apply(lambda x : x.day))
###Output
_____no_output_____
###Markdown
The above codes take too much time, which will be improved for any later use with `break_date` function
###Code
# Create a table that aggreates monthly average
monthly_analysis_seattle = monthly_analysis_seattle.groupby('month').mean()[['available', 'price']]
monthly_analysis_seattle.head()
# Mapping integers to month name
monthly_analysis_seattle.index = map_calendar_month(monthly_analysis_seattle.index)
# Change column names
monthly_analysis_seattle.columns = ['occ_rate', 'room_rate']
# Confirm the change
monthly_analysis_seattle.head()
date = monthly_analysis_seattle.index
rate_dict = [{'data': monthly_analysis_seattle.occ_rate, 'desc': 'occupancy rate (%)'},
{'data': monthly_analysis_seattle.room_rate, 'desc': 'average room rate ($)'}]
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(10, 5))
for i in [0, 1] :
axes[i].plot(date, rate_dict[i]['data'], marker='o', lw=3)
axes[i].set_title('Seattle Airbnb ' + rate_dict[i]['desc'] + ' in 2016')
axes[i].set_ylabel(rate_dict[i]['desc'])
axes[i].axhline(rate_dict[i]['data'].mean(), ls='--', color='orange', lw=1)
axes[1].set_xlabel('month')
fig.tight_layout()
axes[0].set_yticks(np.arange(0.4, 0.8 + 0.2, 0.1))
axes[0].set_yticklabels([str(occ) for occ in range(40, 80 + 20, 10)])
axes[1].set_yticks(np.arange(100, 160 + 20, 20))
axes[1].set_yticklabels([str(price) for price in range(100, 160 + 20, 20)])
plt.show()
###Output
_____no_output_____
###Markdown
The occupancy rate starts with the lowest level below 60% in the beginning of the year, and follows the increasing trend until March. As seen previously, there was a sudden dip April, which took effect in the occupancy rate in that month. The occupancy rate in July stays below the year's average level and it continues on in August. However, it gets recovered until its peak in December. Average room rates peak up in the summer period from Jun to August, whilst the higher price level may explain the lower occupancy level during the same period. However, is it good or bad? It is hard to see the performance by seperating the occupany rate and average room rate, and there is a metric that the hotel industry uses to measure the business performance, called RevPar. How about RevPar ? RevPAR, or revenue per available room, is a performance metric in the hotel industry that is calculated by dividing a hotel's total guestroom revenue by the room count and the number of days in the period being measured. https://en.wikipedia.org/wiki/RevPARIt can alternatively be calculated as $occupancy rate ( room occupied / available) x average room rate$.
###Code
monthly_analysis_seattle['revpar'] = ( monthly_analysis_seattle.occ_rate
* monthly_analysis_seattle.room_rate )
date = monthly_analysis_seattle.index
revpar = monthly_analysis_seattle['revpar']
layout_obj = {
'title': 'RevPar Performance of AirBnb in Seattle in 2016',
'xlabel': 'month',
'ylabel': 'revenue per available room ($)'
}
plot_bar_chart(date, revpar, layout_obj, rotation=False)
plt.savefig('assets/revparSeattle.png', format='png')
plt.show()
monthly_analysis_seattle['revpar'].sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
The RevPar is \\$99.45 in June, which is the second highest level through the year. It looks that the decrease in occupancy rate in August affected the RevPar but the performance in August is not too bad with \\$97.18. RevPar performance is quite steady from Q3 onwards, but a further study seems necessary to figure out why it started low in the beginning of the year. Supposedly there was a series of concerns around Airbnb that may have affected the confidence from consumer and hosts, as following:- Concerns over the company's affecting the local housing market affordability and some political consideration were expected whether to regulate the company's activity: [source1](https://www.seattletimes.com/business/airbnb-says-its-rentals-arent-affecting-housing-affordability/) [source2](https://www.geekwire.com/2016/seattle-regulates-airbnb-company-releases-study-showing-178m-annual-impact-local-economy/)- Airbnb will start collecting taxes in Washington state: [source](https://www.geekwire.com/2015/airbnb-will-start-collecting-taxes-in-washington-state-on-behalf-of-hosts) --- Extend the analysis to Boston dataUse functions (DRY principle) to perform the above CRISP-DM process Data explorationThe date period is not consistent across Seattle and Boston dataset!
###Code
calendar_bos.date.sort_values() # Boston calendar data starts from 2016-09-06
calendar_sea.date.sort_values()
###Output
_____no_output_____
###Markdown
There is an issue in date consistency between Seattle and Boston calendar data. Boston calendar data has a date range from 2016-09-06 to 2017-09-05, whereas Seattle data is for the period between 2016-01-04 and 2017-01-02.Therefore it is not a good idea to compare the two cities' performances by monthly. --- Comparison between Seattle and BostonThe transformed data shows daily average.Terminology used: - Occupancy rate : rooms rented out / total available rooms on a given day- Average room rate : average price of available rooms on a given day- Revpar : Occupancy rate x Average room rate, which is an aggregated meature to evaluate rental performance Create a new data table for comparative analysis
###Code
def get_analysis_table(df) :
''' Create a new table customized for Airbnb performance analysis
INPUT : calendar dataframe
OUTPUT : a new dataframe with data (daily) as index and
three key performance metrics as features - occupancy rate, average room rate and revpar
'''
table = df.groupby('date').mean().drop(columns='listing_id')
table.columns = ['occ_rate', 'avg_room_rate'] # daily
table['revpar'] = table['occ_rate'] * table['avg_room_rate']
return table
analysis_seattle = get_analysis_table(calendar_sea.copy())
analysis_boston = get_analysis_table(calendar_bos.copy())
###Output
_____no_output_____
###Markdown
Occupancy rate
###Code
title = 'Airbnb occupany rate comparison (year 2016)\nSeattle and Boston'
xlabel = 'occupancy rate'
ylabel = 'count'
fig, ax = plt.subplots(figsize=(12,4))
ax = sns.histplot(analysis_boston['occ_rate'], label='Boston')
sns.histplot(analysis_seattle['occ_rate'], ax=ax, color='orange', label='Seattle');
ax.set(xlabel=xlabel, ylabel=ylabel, title=title);
ax.set_xticks(np.arange(0, 1, 0.1))
ax.set_xticklabels(f'{i}%' for i in np.arange(0, 100, 10))
plt.legend()
plt.savefig(fname='assets/occ.png', format='png')
plt.show()
###Output
_____no_output_____
###Markdown
Room rate
###Code
title = 'Airbnb average room rate comparison (year 2016)\nSeattle and Boston'
xlabel = 'room rate ($)'
ylabel = 'count'
fig, ax = plt.subplots(figsize=(12,4))
ax = sns.histplot(analysis_boston['avg_room_rate'], label='Boston')
sns.histplot(analysis_seattle['avg_room_rate'], ax=ax, color='orange', label='Seattle');
ax.set(xlabel=xlabel, ylabel=ylabel, title=title);
plt.legend()
plt.savefig('assets/adr.png', format='png')
plt.show()
###Output
_____no_output_____
###Markdown
RevPar
###Code
title = 'Airbnb revpar comparison (year 2016)\nSeattle and Boston'
xlabel = 'revpar ($)'
ylabel = 'count'
fig, ax = plt.subplots(figsize=(10,4))
ax = sns.histplot(analysis_boston['revpar'], label='Boston')
sns.histplot(analysis_seattle['revpar'], ax=ax, color='orange', label='Seattle');
ax.set(xlabel=xlabel, ylabel=ylabel, title=title);
plt.legend()
plt.savefig('assets/revpar.png', format='png')
plt.show()
print(f'SEATTLE:\n{analysis_seattle.describe()}')
print()
print(f'BOSTON:\n{analysis_boston.describe()}')
###Output
SEATTLE:
occ_rate avg_room_rate revpar
count 365.000000 365.000000 365.000000
mean 0.670610 137.901783 92.507204
std 0.047899 9.860142 9.165813
min 0.454426 117.685413 55.479047
25% 0.647197 132.446443 90.289419
50% 0.674961 136.731206 94.582504
75% 0.702462 146.930502 97.844421
max 0.765322 157.480000 109.101886
BOSTON:
occ_rate avg_room_rate revpar
count 365.000000 365.000000 365.000000
mean 0.491284 201.165200 97.489904
std 0.076196 20.989130 10.679226
min 0.158951 177.023002 38.314278
25% 0.484663 186.764936 92.868935
50% 0.493865 196.100469 99.741495
75% 0.542666 205.207474 103.198550
max 0.615449 286.921977 119.380926
###Markdown
Overall, revpar performance is better in Boston than in Seattle. Although occupancy rate in Seattle is more stable (by standard deviation) and higher (67%, median) on average, the difference of average room rate between the two cities is larger. --- `QUESTION2` How much growth potential did Airbnb have?Earlier we saw the first quarater revpar performance in Seattle is low, due to both occupancy rate and revpar below the average level. To gain a better insight, I would like to explore supply side during the same period.Discovering the supply side, particularly in the following areas:- \ of new hostings : `host_since`, `calculated_host_listings_count`- number of super_host : `host_is_superhost`- hosting type: [`property_type`, `room_type`, `price`, ...]
###Code
# Load cleaned dataframes
host_seattle = pd.read_csv('data/listings_seattle_cleaned.csv')
host_boston = pd.read_csv('data/listings_boston_cleaned.csv')
###Output
_____no_output_____
###Markdown
Yearly growth of new listingsAggreated to the number of unique host id by year
###Code
new_hosting_seattle = host_seattle.groupby('year').count()['host_id']
new_hosting_boston = host_boston.groupby('year').count()['host_id']
analysis_new_hostings = pd.concat([new_hosting_seattle.rename('new_hosting_seattle'),
new_hosting_boston.rename('new_hosting_boston')], axis=1)
analysis_new_hostings.plot.bar(figsize=(10,5), width=0.8, color=['#f39c12', '#1f77b4'])
plt.title('New Airbnb Hostings in 2016\nSeattle and Boston area')
plt.xlabel('')
plt.ylabel('count')
plt.xticks(rotation=0)
plt.legend(labels=['Seattle', 'Boston'], loc='upper center', ncol=6)
plt.savefig('assets/newListings.png', format='png')
plt.show()
###Output
_____no_output_____
###Markdown
The joining of new hosts has been growing rapidly (can say exponentially) since the establishment of Airbnb (2008) in both Seattle and Boston markets. The number of new hostings is larger in Seattle than Boston - may possibly be due to lots of reasons i.e. more favourable regulation, demographic, market acitivities, etc. However, the new hostings became significantly idle in 2016 for both Seattle and Boston markets. This may be resulted from error in data collection but assuming data is super reliable, new regulatatory move and tax policy may have made potential hosts to be more cautious in renting out their properties in Seattle as considered earlier during revpar analysis (resources can be found as below).- Concerns over the company's affecting the local housing market affordability and some political consideration were expected whether to regulate the company's activity: [source1](https://www.seattletimes.com/business/airbnb-says-its-rentals-arent-affecting-housing-affordability/) [source2](https://www.geekwire.com/2016/seattle-regulates-airbnb-company-releases-study-showing-178m-annual-impact-local-economy/)- Airbnb will start collecting taxes in Washington state: [source](https://www.geekwire.com/2015/airbnb-will-start-collecting-taxes-in-washington-state-on-behalf-of-hosts) EXTRA analysis of general hosting statistics To gain insights into hostings : `superhost`, `property_type`, `room_type`, `price` How much percentage superhost accounts for?
###Code
is_superhost_sea = host_seattle.host_is_superhost.value_counts()
is_superhost_bos = host_boston.host_is_superhost.value_counts()
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(12,4))
ax1.pie(is_superhost_sea, labels=['Not superhost', 'Superhost'], autopct='%1.1f%%', explode=(0, 0.1),
colors=['#d3d3d3', '#f39c12'])
ax2.pie(is_superhost_bos, labels=['Not superhost', 'Superhost'], autopct='%1.1f%%', explode=(0, 0.1),
colors=['#d3d3d3', '#1f77b4'])
ax1.set_title(f'Proportion of Superhost in Seattle')
ax2.set_title(f'Proportion of Superhost in Boston')
plt.savefig('assets/superhost.png', format='png')
plt.show()
###Output
_____no_output_____
###Markdown
**Terminology:**According to Aibnb, "Superhosts are experienced hosts who provide a shining example for other hosts, and extraordinary experiences for their guests." [Reference](https://www.airbnb.com/help/article/828/what-is-a-superhost)To retain Superhost status, hosts should satisfies the performance standards and other qualifications for the most recent 12 months from the review date. This suggests that superhosts represent dedicated property suppliers in a fairly consistent manner. Source at the link [here]((https://www.airbnb.com/superhost/terms) **Findings:**Nearly 20% of the total hostings are made by Superhost in Seattle as opposed to 12% in Boston. It suggests that overall rental room supply is more consistent and stable in Seattle with more dedicated property owners. On the other hand, it may also be a barrier to entry for potential hosts facing stornger competitions already existing.Whether this affected the sudden decrease in new listings in 2016 is not so obvious, and leaves a room for a further analysis, which however will not be covered in this notebook. Property type
###Code
property_type_seattle = host_seattle.property_type.value_counts()
property_type_boston = host_boston.property_type.value_counts()
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(12,4))
ax1.bar(property_type_seattle.index, property_type_seattle.values, color='orange')
ax2.bar(property_type_boston.index, property_type_boston.values)
ax1.set_title(f'Propery type in Seattle')
ax2.set_title(f'Propery type in Boston')
ax1.set_ylabel('count')
# Two different ways to set xticks in subplot
ax1.set_xticks(property_type_seattle.index)
ax1.set_xticklabels(property_type_seattle.index, rotation=90)
for tick in ax2.get_xticklabels() :
tick.set_rotation(90)
###Output
_____no_output_____
###Markdown
In 2016, House (46%) and Apartment (44%) were the most common types of properties in Seattle, whereas large proportion of hosts (nearly 72% of total) offered Apartment in Boston. Room type
###Code
room_type_seattle = host_seattle.room_type.value_counts()
room_type_boston = host_boston.room_type.value_counts()
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(10,4))
ax1.pie(room_type_seattle.values, labels=room_type_seattle.index, autopct='%1.1f%%')
ax2.pie(room_type_boston.values, labels=room_type_boston.index, autopct='%1.1f%%')
ax1.set_title(f'Room type in Seattle')
ax2.set_title(f'Room type in Boston')
plt.show()
###Output
_____no_output_____
###Markdown
Hosts tended to rent out entire home/apartment as opposed to than shared / private room only in both Seattle and Boston. Room Price
###Code
# Convert price into numercial variable
host_seattle.price = convert_price_float(host_seattle.price)
host_boston.price = convert_price_float(host_boston.price)
plt.figure(figsize=(10,6))
plt.subplot(2,1,1)
sns.boxplot(x=host_seattle.price, color='#f39c12')
plt.title('Box Plot Statistics for Hosting Price\n(Unit: US Dollar)\n\nSeattle')
plt.xlabel('')
plt.xlim((0,400))
plt.subplot(2,1,2)
sns.boxplot(x=host_boston.price)
plt.title('Boston')
plt.xlabel('')
plt.xlim((0,400))
plt.tight_layout()
plt.savefig('assets/priceStats.png', format='png')
plt.show()
###Output
_____no_output_____
###Markdown
Room price range is wider in Boston than Seattle. The median room price is also higher in Boston. --- `QUESTION3` Which neighborhood has more expensive listings? Leaving features that are only relevant to the analysis
###Code
cols_neighbor = [
'id', 'neighborhood_overview', 'street', 'neighbourhood', 'neighbourhood_cleansed',
'neighbourhood_group_cleansed', 'city', 'state', 'zipcode', 'market',
'smart_location', 'country_code', 'country', 'latitude', 'longitude',
'is_location_exact', 'property_type', 'room_type', 'accommodates',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'amenities', 'square_feet', 'price',
'availability_30', 'availability_60', 'availability_90',
'availability_365'
]
neighbor_seattle_temp = listings_seattle[cols_neighbor].copy()
neighbor_boston_temp = listings_boston[cols_neighbor].copy()
neighbor_seattle_temp.columns
###Output
_____no_output_____
###Markdown
Additional data cleaning
###Code
def wrangle_airbnb_neighbor_data(df) :
'''
Remove duplicates and convert to datatype relevant for the analysis
INPUT : Airbnb 'listings' dataframe trimmed for neighborhood analysis
OUTPUT : A new, cleaned dataframe ready for the analysis
'''
print(f'Original dataframe: {df.shape}')
df_clean = df.copy()
isDuplicated = df_clean.duplicated(subset=['id'], keep=False).sum()
if( isDuplicated != 0 ) :
df_clean = df_clean.drop_duplicates(subset=['id'], keep='last')
df_clean.price = convert_price_float(df_clean.price)
print(f'Cleaned dataframe: {df.shape}')
return df_clean
neighbor_seattle = wrangle_airbnb_neighbor_data(neighbor_seattle_temp)
neighbor_boston = wrangle_airbnb_neighbor_data(neighbor_boston_temp)
###Output
Original dataframe: (3818, 30)
Cleaned dataframe: (3818, 30)
Original dataframe: (3585, 30)
Cleaned dataframe: (3585, 30)
###Markdown
GeoPandas mapping`geodata` folder contains shape files for Seattle and BostonReference: [GeoPandas 101: Plot any data with a latitude and longitude on a map](https://towardsdatascience.com/geopandas-101-plot-any-data-with-a-latitude-and-longitude-on-a-map-98e01944b972) Data source: - [City of Seattle](https://data-seattlecitygis.opendata.arcgis.com/datasets/city-clerk-neighborhoods)- [Boston GIS](https://bostonopendata-boston.opendata.arcgis.com/datasets/3525b0ee6e6b427f9aab5d0a1d0a1a28_0)
###Code
# Load shape files
seattle_map = gpd.read_file('geodata/seattle/City_Clerk_Neighborhoods.shp')
boston_map = gpd.read_file('geodata/boston/Boston_Neighborhoods.shp')
# Create a list of geometry points with longitude, latitude data
geometry_seattle = create_points_geometry(neighbor_seattle['longitude'], neighbor_seattle['latitude'])
geometry_boston = create_points_geometry(neighbor_boston['longitude'], neighbor_boston['latitude'])
# Create GeoDataFrame and set coordinates reference system (crs)
gdf_seattle = gpd.GeoDataFrame(neighbor_seattle, geometry = geometry_seattle)
gdf_boston = gpd.GeoDataFrame(neighbor_boston, geometry = geometry_boston)
# Additional feature 'geometry' added onto the original dataframes
gdf_seattle.geometry[:2], gdf_boston.geometry[:2]
###Output
_____no_output_____
###Markdown
Visualization and analysisCreating 4 price categories for visualization based on 5 number statistics (quartiles) Terminology :- $\text{\$\$\$\$}$ : Top 25%- $\text{\$\$\$}$ : Top 25% - 50%- $\text{\$\$}$ : Bottom 25% - 50%- $\text{\$}$ : Bottom 25%
###Code
seattle_price_bins = gdf_seattle.price.describe()[3:]
seattle_price_group = pd.cut(x=gdf_seattle.price, bins=seattle_price_bins.values,
labels=['\$', '\$\$', '\$\$\$', '\$\$\$\$'])
gdf_seattle['price_group'] = seattle_price_group
# To create texts index for top priced neighorhoods
neighbor_sea = gdf_seattle.groupby('neighbourhood_cleansed').mean()
lat_lng_sea = neighbor_sea[['latitude', 'longitude', 'price']].sort_values(by='price', ascending=False)
price_summary_sea = lat_lng_sea.price.describe()[3:]
price_summary_sea
seattle_map.plot(alpha=0.4, color='grey', figsize=(10, 10))
sns.scatterplot(data=gdf_seattle, x='longitude', y='latitude', hue='price_group',
palette='Oranges')
plt.title('Price map in Seattle')
plt.xticks(rotation=45)
plt.savefig('assets/seattlePriceMap.png', format='png')
plt.show()
# Simplified plot
seattle_map.plot(alpha=0.4, color='grey', figsize=(9, 9))
plt.title('Price map by neighborhood')
for i, values in enumerate(lat_lng_sea.itertuples()) :
area, lat, lng, price = values
if price > price_summary_sea[3]:
plt.text(x=lng, y=lat+0.01, s='\$\$\$\$',
backgroundcolor='#E74C3C', color='#f3f3f3')
elif price > price_summary_sea[2] :
plt.text(x=lng, y=lat+0.01, s='\$\$\$', backgroundcolor='#F39C12', color='#f3f3f3')
elif price < price_summary_sea[1] :
plt.text(x=lng, y=lat+0.01, s='\$\$', backgroundcolor='#F7DC6F')
else:
plt.text(x=lng, y=lat+0.01, s='\$', backgroundcolor='#FEF9E7')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Rental price is generally higher around the central bay area, and gets cheaper as properities get away from the centeral Seattle.
###Code
round(neighbor_sea['price'].nlargest(10), 2)
###Output
_____no_output_____
###Markdown
Plotting Boston price distribution
###Code
boston_price_bins = gdf_boston.price.describe()[3:]
boston_price_group = pd.cut(x=gdf_boston.price, bins=boston_price_bins.values,
labels=['\$', '\$\$', '\$\$\$', '\$\$\$\$'])
gdf_boston['price_group'] = boston_price_group
# To create texts index for top priced neighorhoods
neighbor_bos = gdf_boston.groupby('neighbourhood_cleansed').mean()
lat_lng_bos = neighbor_bos[['latitude', 'longitude', 'price']].sort_values(by='price', ascending=False)
price_summary_bos = lat_lng_bos.price.describe()[3:]
boston_map.plot(alpha=0.4, color='grey', figsize=(10,10))
sns.scatterplot(data=gdf_boston, x='longitude', y='latitude', hue='price_group',
palette='Blues')
plt.title('Price map in Boston')
plt.xticks(rotation=45)
# plt.savefig('assets/bostonPriceMap.png', format='png')
plt.show()
# Simplified plot
boston_map.plot(alpha=0.4, color='grey', figsize=(7, 7))
plt.title('Price map by neighborhood')
for i, values in enumerate(lat_lng_bos.itertuples()) :
area, lat, lng, price = values
if price > price_summary_bos[3]:
plt.text(x=lng, y=lat+0.01, s='\$\$\$\$',
backgroundcolor='#E74C3C', color='#f3f3f3')
elif price > price_summary_bos[2] :
plt.text(x=lng, y=lat+0.01, s='\$\$\$', backgroundcolor='#F39C12', color='#f3f3f3')
elif price < price_summary_bos[1] :
plt.text(x=lng, y=lat+0.01, s='\$\$', backgroundcolor='#F7DC6F')
else:
plt.text(x=lng, y=lat+0.01, s='\$', backgroundcolor='#FEF9E7')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Just like Seattle, the listings in the downtown area are generally the most expensive in Boston, and the price gets lower as the location of properties is away from the center.
###Code
round(neighbor_bos['price'].nlargest(10), 2)
###Output
_____no_output_____
###Markdown
Evaluation of Search AlgorithmsThis notebook analyzes data generated from the five search algorithms used in the On The Town application. They are:* Depth-First Search* Breadth-First Search* Greedy Search* Uniform Cost Search* A Star SearchThe data were generated from the file `evaluation.ipynb`.
###Code
import pandas as pd
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
plotly.tools.set_credentials_file(username='hangulu', api_key='xzmidZGzwl63Twl6sytL')
# Read the results from the CSV file
data = pd.read_csv("./data/simulation_results.csv")
data.head()
###Output
_____no_output_____
###Markdown
900 tests were run. Each test was limited to 60 seconds of evaluation time, and the test itself comprised of running all 5 algorithms, sequentially, and recording:* whether a solution was reached* the time each took to reach a solution* the average sadness garnered by that solution. The following checks the percentage of times each algorithm returned no solution.
###Code
# DFS
dfs_no_soln = (data.dfs_sad.isna().sum()) / 900.
print "DFS failed " + str((dfs_no_soln * 100)) + "% of the time."
# BFS
bfs_no_soln = (data.bfs_sad.isna().sum()) / 900.
print "BFS failed " + str((bfs_no_soln * 100)) + "% of the time."
# Greedy Search
greedy_no_soln = (data.greedy_sad.isna().sum()) / 900.
print "Greedy Search failed " + str((greedy_no_soln * 100)) + "% of the time."
# UCS
ucs_no_soln = (data.ucs_sad.isna().sum()) / 900.
print "UCS failed " + str((ucs_no_soln * 100)) + "% of the time."
# A* Search
astar_no_soln = (data.astar_sad.isna().sum()) / 900.
print "A* Search failed " + str((astar_no_soln * 100)) + "% of the time."
###Output
DFS failed 32.0% of the time.
BFS failed 88.88888888888889% of the time.
Greedy Search failed 14.888888888888888% of the time.
UCS failed 39.666666666666664% of the time.
A* Search failed 15.88888888888889% of the time.
###Markdown
The following are the results:* DFS failed 32.000% of the time.* BFS failed 88.889% of the time.* Greedy Search failed 14.889% of the time.* UCS failed 39.667% of the time.* A* Search failed 15.889% of the time.Next, the average time elapsed at each simulation is analyzed. This analysis ignores the number of times the algorithms took more than 1 minute to complete, and thus calculates the average length of the completed solution space.
###Code
# DFS
avg_dfs_time = (data[data.dfs_time < 60].dfs_time.sum()) / (data[data.dfs_time < 60].dfs_time.count())
print "When completed, DFS took " + str(avg_dfs_time) + " seconds on average."
# BFS
avg_bfs_time = (data[data.bfs_time < 60].bfs_time.sum()) / (data[data.bfs_time < 60].bfs_time.count())
print "When completed, BFS took " + str(avg_bfs_time) + " seconds on average."
# Greedy
avg_greedy_time = (data[data.greedy_time < 60].greedy_time.sum()) / (data[data.greedy_time < 60].greedy_time.count())
print "When completed, Greedy Search took " + str(avg_greedy_time) + " seconds on average."
# UCS
avg_ucs_time = (data[data.ucs_time < 60].ucs_time.sum()) / (data[data.ucs_time < 60].ucs_time.count())
print "When completed, UCS took " + str(avg_ucs_time) + " seconds on average."
# A* Search
avg_astar_time = (data[data.astar_time < 60].astar_time.sum()) / (data[data.astar_time < 60].astar_time.count())
print "When completed, A* Search took " + str(avg_astar_time) + " seconds on average."
###Output
When completed, DFS took 0.28874309963650174 seconds on average.
When completed, BFS took 10.948256943560294 seconds on average.
When completed, Greedy Search took 1.047404620783899 seconds on average.
When completed, UCS took 15.10807837055115 seconds on average.
When completed, A* Search took 3.098107170896465 seconds on average.
###Markdown
The following are the results:* When completed, DFS took 0.289 seconds on average.* When completed, BFS took 10.948 seconds on average.* When completed, Greedy Search took 1.047 seconds on average.* When completed, UCS took 15.108 seconds on average.* When completed, A* Search took 3.0981 seconds on average.Next, the average sadness generated at each simulation is analyzed. Again, this analysis ignores the times the algorithms failed (took more than 1 minute or did not find a solution).
###Code
# DFS
avg_dfs_sad = data.dfs_sad.mean()
print "When completed, DFS produced an average sadness score of " + str(avg_dfs_sad) + "."
# BFS
avg_bfs_sad = data.bfs_sad.mean()
print "When completed, BFS produced an average sadness score of " + str(avg_bfs_sad) + "."
# Greedy
avg_greedy_sad = data.greedy_sad.mean()
print "When completed, Greedy Search produced an average sadness score of " + str(avg_greedy_sad) + "."
# UCS
avg_ucs_sad = data.ucs_sad.mean()
print "When completed, UCS produced an average sadness score of " + str(avg_ucs_sad) + "."
# A* Search
avg_astar_sad = data.astar_sad.mean()
print "When completed, A* Search produced an average sadness score of " + str(avg_astar_sad) + "."
###Output
When completed, DFS produced an average sadness score of 28.1894960099.
When completed, BFS produced an average sadness score of 26.9977561181.
When completed, Greedy Search produced an average sadness score of 22.5920907814.
When completed, UCS produced an average sadness score of 16.3321656619.
When completed, A* Search produced an average sadness score of 19.0470854323.
###Markdown
The following are the results:* When completed, DFS produced an average sadness score of 28.189.* When completed, BFS produced an average sadness score of 26.998.* When completed, Greedy Search produced an average sadness score of 22.592.* When completed, UCS produced an average sadness score of 16.332.* When completed, A* Search produced an average sadness score of 19.047.Next, the algorithms are compared graphically. The following graphs are constructed for comparison:* bar charts comparing the number of times each algorithm fails* histograms showing the distribution of time each algorithm takes* bar charts comparing the average time each algorithm takes* histograms showing the distribution of sadness scores* bar charts comparing the average sadness scores for each algorithm
###Code
# Bar chart for tests failed
trace = {
'x': ['DFS', 'BFS', 'Greedy', 'UCS', 'A*'],
'y': [32.0, 88.88888888888889, 14.888888888888888, 39.666666666666664, 15.88888888888889],
'type': 'bar',
'marker': dict(color='rgb(255, 121, 121)')
}
chart = [trace]
layout = {
'xaxis': {'title': 'Search Algorithm'},
'yaxis': {'title': 'Percentage Of Tests Failed (%)'},
'title': 'Percentage Failure Of Each Search Algorithm'
};
plotly.plotly.iplot({'data': chart, 'layout': layout}, filename='fail_bar')
# Histogram for time
dfs_time = data.dfs_time
bfs_time = data.bfs_time
greedy_time = data.greedy_time
ucs_time = data.ucs_time
astar_time = data.astar_time
trace0 = go.Histogram(x=dfs_time, name='DFS')
trace1 = go.Histogram(x=bfs_time, name='BFS')
trace2 = go.Histogram(x=greedy_time, name='Greedy')
trace3 = go.Histogram(x=ucs_time, name='UCS')
trace4 = go.Histogram(x=astar_time, name='A*')
fig = plotly.tools.make_subplots(rows=3, cols=2)
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig.append_trace(trace2, 2, 1)
fig.append_trace(trace3, 2, 2)
fig.append_trace(trace4, 3, 1)
fig['layout']['yaxis1'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis2'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis3'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis4'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis5'].update(title='Frequency', showgrid=False)
fig['layout']['xaxis1'].update(title='DFS Evaluation Time (seconds)', showgrid=False)
fig['layout']['xaxis2'].update(title='BFS Evaluation Time (seconds)', showgrid=False)
fig['layout']['xaxis3'].update(title='Greedy Evaluation Time (seconds)', showgrid=False)
fig['layout']['xaxis4'].update(title='UCS Evaluation Time (seconds)', showgrid=False)
fig['layout']['xaxis5'].update(title='A* Evaluation Time (seconds)', showgrid=False)
fig['layout'].update(title='Distribution Of Evaluation Time For Each Search Algorithm')
plotly.plotly.iplot(fig, filename='time_hist')
# Bar chart for time
trace = {
'x': ['DFS', 'BFS', 'Greedy', 'UCS', 'A*'],
'y': [0.28874309963650174, 10.948256943560294, 1.047404620783899, 15.10807837055115, 3.098107170896465],
'type': 'bar',
'marker': dict(color='rgb(246, 229, 141)')
}
chart = [trace]
layout = {
'xaxis': {'title': 'Search Algorithm'},
'yaxis': {'title': 'Average Evaluation Time (seconds)'},
'title': 'Average Evaluation Time Of Each Search Algorithm (When Complete)'
};
plotly.plotly.iplot({'data': chart, 'layout': layout}, filename='time_bar')
# Histogram for sadness
dfs_sad = data.dfs_sad
bfs_sad = data.bfs_sad
greedy_sad = data.greedy_sad
ucs_sad = data.ucs_sad
astar_sad = data.astar_sad
trace0 = go.Histogram(x=dfs_sad, name='DFS')
trace1 = go.Histogram(x=bfs_sad, name='BFS')
trace2 = go.Histogram(x=greedy_sad, name='Greedy')
trace3 = go.Histogram(x=ucs_sad, name='UCS')
trace4 = go.Histogram(x=astar_sad, name='A*')
fig = plotly.tools.make_subplots(rows=3, cols=2)
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig.append_trace(trace2, 2, 1)
fig.append_trace(trace3, 2, 2)
fig.append_trace(trace4, 3, 1)
fig['layout']['yaxis1'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis2'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis3'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis4'].update(title='Frequency', showgrid=False)
fig['layout']['yaxis5'].update(title='Frequency', showgrid=False)
fig['layout']['xaxis1'].update(title='DFS Sadness', showgrid=False)
fig['layout']['xaxis2'].update(title='BFS Sadness', showgrid=False)
fig['layout']['xaxis3'].update(title='Greedy Sadness', showgrid=False)
fig['layout']['xaxis4'].update(title='UCS Sadness', showgrid=False)
fig['layout']['xaxis5'].update(title='A* Sadness', showgrid=False)
fig['layout'].update(title='Distribution Of Sadness For Each Search Algorithm')
plotly.plotly.iplot(fig, filename='sad_hist')
# Bar chart for sadness
trace = {
'x': ['DFS', 'BFS', 'Greedy', 'UCS', 'A*'],
'y': [28.1894960099, 26.9977561181, 22.5920907814, 16.3321656619, 19.0470854323],
'type': 'bar',
'marker': dict(color='rgb(186, 220, 88)')
}
chart = [trace]
layout = {
'xaxis': {'title': 'Search Algorithm'},
'yaxis': {'title': 'Average Sadness'},
'title': 'Average Sadness Of Each Search Algorithm (When Complete)'
};
plotly.plotly.iplot({'data': chart, 'layout': layout}, filename='sad_bar')
###Output
_____no_output_____
###Markdown
Generating possible answersI generated all possible answers using the code in `nerdle.py`. This should be the same as the possible nerdle answers, as the number generated (17723) is the same as the number of possible solutions mentioned on the nerdle website.Some rules are covered [in the nerdle faq](https://faqs.nerdlegame.com/), but here's a summary: - The result (after =) must be a positive integer or 0. - Division is treated as normal division (not integer division/no rounding). - Lone 0's are not allowed in the LHS. - Trailing 0's are not allowed anywhere. - Negative numbers cannot be used (and you cannot use + as a unary operator) anywhere. - Order of operations apply.
###Code
with open("all_answers.txt", "r") as f:
data = f.read().splitlines()
print(data[:5])
print(len(data))
frequencies: dict[int, Counter[str, int]]= {}
for equation in data:
for pos, char in enumerate(equation, 1):
frequencies.setdefault(pos, Counter())[char] += 1
# frequencies[4] gives total counts for each symbol at the 4th position in the equation.
frequencies[4].most_common(5)
###Output
_____no_output_____
###Markdown
Frequency of each symbol across all answersShows the probability that, if you pick a random character from a random answer, it will be that symbol
###Code
fig, ax = pyplot.subplots()
totals = []
for symbol in SYMBOLS:
total = sum(position[symbol] for position in frequencies.values())
totals.append(total)
total_sum = sum(totals)
totals = [i / total_sum for i in totals]
ax.set_title("Frequency of occurences of each symbol over all solutions")
ax.bar(SYMBOLS, totals)
fig.savefig("plots/symbol_frequency.jpg")
###Output
_____no_output_____
###Markdown
Probability of each symbol occuringSimilar to the last one, although only takes into account whether the symbol occurs *somewhere* in the solution, not how many times it occurs.
###Code
fig, ax = pyplot.subplots()
totals = []
for symbol in SYMBOLS:
totals.append(sum(symbol in line for line in data) / len(data))
ax.set_title("Probability of occurence of each symbol in a given solution")
ax.bar(SYMBOLS, totals)
fig.savefig("plots/symbol_probability.jpg")
###Output
_____no_output_____
###Markdown
Frequency of occurence of each symbol in a given position
###Code
fig, ax = pyplot.subplots(nrows=4, ncols=2)
ax = ax.flatten()
fig.set_figheight(20)
fig.set_figwidth(15)
for n, position in enumerate(POSITIONS):
totals = []
for symbol in SYMBOLS:
totals.append(frequencies[int(position)][symbol])
totals = [i / len(data) for i in totals]
ax[n].set_title(f"Frequency of occurence of symbols in position {position}")
ax[n].bar(SYMBOLS, totals)
fig.savefig("plots/frequency_of_symbols_per_position.jpg")
###Output
_____no_output_____
###Markdown
Probability of occuring in a given position per symbolThis shows, for each symbol, the probability that it will be in a given position in the answer
###Code
fig, ax = pyplot.subplots(nrows=5, ncols=3)
ax = ax.flatten()
fig.set_figheight(30)
fig.set_figwidth(20)
for n, symbol in enumerate(SYMBOLS):
totals = []
for position in POSITIONS:
totals.append(frequencies[int(position)][symbol])
total_sum = sum(totals)
totals = [i / total_sum for i in totals]
ax[n].set_title(f"Frequency of occurences, per position, of '{symbol}'")
ax[n].bar(POSITIONS, totals)
fig.savefig("plots/frequency_of_positions_per_symbol.jpg")
###Output
_____no_output_____
###Markdown
Permutations of operators by frequencyThis shows the probability that a result will be made up of operators in a given order. For example `+` would be any answers involving just a `+` such as `12+34=46`. There are 20 possibilites
###Code
operator_orders = Counter()
for line in data:
order = ''.join(i for i in line if i not in "0123456789").rstrip("=")
operator_orders[order] += 1 / len(data)
for pos, (form, prob) in enumerate(operator_orders.most_common(), 1):
print(f'{pos:>2}) {form}: {prob*100:5.2f}%')
###Output
1) +: 18.79%
2) -: 18.79%
3) /: 6.93%
4) *: 6.93%
5) *-: 5.86%
6) -*: 4.01%
7) --: 3.89%
8) /-: 3.83%
9) *+: 3.68%
10) +*: 3.68%
11) ++: 3.64%
12) +-: 2.96%
13) -+: 2.96%
14) */: 2.60%
15) /*: 2.60%
16) **: 2.11%
17) //: 2.11%
18) -/: 1.77%
19) +/: 1.44%
20) /+: 1.44%
###Markdown
Most Frequent Answer FormatsSimilar to the last one, although more interesting as it takes into account the number of digits between operators too. There are 44 possibilities.
###Code
operator_orders = Counter()
for line in data:
order = ''.join("x" if i in "0123456789" else i for i in line)
operator_orders[order] += 1 / len(data)
for pos, (form, prob) in enumerate(operator_orders.most_common(), 1):
print(f'{pos:>2}) {form}: {prob*100:5.2f}%')
###Output
1) xx+xx=xx: 18.28%
2) xx-xx=xx: 18.28%
3) xx-x*x=x: 4.01%
4) xx-x-x=x: 3.89%
5) xx/x-x=x: 3.83%
6) x*x+x=xx: 3.68%
7) x+x*x=xx: 3.68%
8) x+x+x=xx: 3.64%
9) xxx/xx=x: 3.46%
10) xxx/x=xx: 3.46%
11) xx*x=xxx: 3.46%
12) x*xx=xxx: 3.46%
13) x*x-xx=x: 2.84%
14) x*x-x=xx: 2.51%
15) x*x*x=xx: 2.11%
16) xx/x/x=x: 2.11%
17) x-xx/x=x: 1.38%
18) x+xx/x=x: 1.05%
19) xx/x+x=x: 1.05%
20) x+x-xx=x: 0.93%
21) x-xx+x=x: 0.93%
22) x+xx-x=x: 0.68%
23) x-x+xx=x: 0.68%
24) xx+x-x=x: 0.68%
25) xx-x+x=x: 0.68%
26) x+x-x=xx: 0.68%
27) x-x+x=xx: 0.68%
28) x*xx/x=x: 0.65%
29) x/x*xx=x: 0.65%
30) xx*x/x=x: 0.65%
31) xx/x*x=x: 0.65%
32) x*x/xx=x: 0.65%
33) x*x/x=xx: 0.65%
34) x/x*x=xx: 0.65%
35) x/xx*x=x: 0.65%
36) x+x/x=xx: 0.39%
37) x/x+x=xx: 0.39%
38) xx-x/x=x: 0.39%
39) x*xx-x=x: 0.25%
40) x+xx=xxx: 0.25%
41) xx*x-x=x: 0.25%
42) xxx-x=xx: 0.25%
43) xxx-xx=x: 0.25%
44) xx+x=xxx: 0.25%
###Markdown
Sentiment Analysis In this notebook we aim at training out-of-core algorithm by using database with opinions (in Polish) about cars - see db_cars folder. Data loading Opinion classes are imbalanced i.e. the ratio of negative to positive opinions is only around 6%. Therefore, in order to obtain evenly distributed opinions we can either downsample majority class (positive) or upsample minority class (negative). First option for logistic regression classifier gives approx. 70% accuracy, whereas the second one - approx. 90%.
###Code
import pandas as pd
import os
import re
from sklearn.utils import resample
basepath = './db_cars/data/'
labels = {'pos': 1, 'neg': 0}
# 1) Downsample majority class (positive opinions)
def fetch_data_downsample():
df = pd.DataFrame()
neg_numbers = {} # numbers of negative opinions in files
for label in ['neg', 'pos']:
path = os.path.join(basepath, label)
for file in os.listdir(path):
print(label, file)
number = 0
for line in open(os.path.join(path, file), 'r', encoding='utf-8'):
if line != '\n': # skip empty lines
number += 1
text = re.sub('\n$', '', line) # remove end line sign
df = df.append([[text, labels[label]]], ignore_index=True)
if label == 'neg':
neg_numbers[file] = number
elif neg_numbers[file] == number:
break
df.columns = ['review', 'sentiment']
return df
# 2) Upsample minority class (negative opinions)
def fetch_data_upsample():
df = pd.DataFrame()
for label in ['neg', 'pos']:
path = os.path.join(basepath, label)
for file in os.listdir(path):
print(label, file)
for line in open(os.path.join(path, file), 'r', encoding='utf-8'):
if line != '\n': # skip empty lines
text = re.sub('\n$', '', line) # remove end line sign
df = df.append([[text, labels[label]]], ignore_index=False)
df.columns = ['review', 'sentiment']
return upsample_minority(df)
def upsample_minority(df):
# Separate majority and minority classes
df_minority = df[df.sentiment==0]
df_majority = df[df.sentiment==1]
# Upsample minority class
majority_number = df['sentiment'].value_counts()[1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples = majority_number, # to match majority class
random_state=0)
# Combine majority class with upsampled minority class
return pd.concat([df_majority, df_minority_upsampled], ignore_index=True)
#df = fetch_data_downsample()
#db_path = './db_cars_downsampled.csv'
df = fetch_data_upsample()
db_path = './db_cars_upsampled.csv'
###Output
neg peugeot
neg kia
neg hyundai
neg mazda
neg opel
neg lancia
neg renault
neg citroen
neg volkswagen
neg ford
neg ssangyong
neg skoda
neg nissan
neg fiat
neg mitsubishi
pos peugeot
pos kia
pos hyundai
pos mazda
pos opel
pos lancia
pos renault
pos citroen
pos volkswagen
pos ford
pos ssangyong
pos skoda
pos nissan
pos fiat
pos mitsubishi
###Markdown
Class counts:
###Code
df['sentiment'].value_counts()
###Output
_____no_output_____
###Markdown
Shuffling the DataFrame:
###Code
import numpy as np
np.random.seed(1)
df = df.reindex(np.random.permutation(df.index))
###Output
_____no_output_____
###Markdown
Optional: saving the assembled data as CSV file:
###Code
# df.to_csv(db_path, index=False) # uncomment this !
import pandas as pd
df = pd.read_csv(db_path)
df.head(5)
###Output
_____no_output_____
###Markdown
Data processing - test Below we will process our data in order to get rid of meaningless words, endings etc.
###Code
def get_file_content(basepath, file):
path = os.path.join(basepath, file)
with open(path, 'r', encoding='utf-8') as infile:
return infile.read().split()
basepath = './processing_tools/'
stop_polish = get_file_content(basepath, 'stopwords_polish')
stop_cars = get_file_content(basepath, 'stopwords_cars')
# stop words
stop = stop_polish + stop_cars
# Polish endings
endings = get_file_content(basepath, 'endings_polish')
import re
example = 'nie na 8/30, moglibysmy, oceniam na 29%. Jestem,naprawdę zadowolony i mimo, \
że już nie chciałem kupować :p :D po45 767 raz kolejny nowego \
auta ze względu:-) na;( dużą utratę wartości, \
to Lancia bardzo sku11tecznie 100 tys km iii osładza świadomość utraty finansowej45%. :)'
polish_letters = [
('ą','a'), ('ć','c'), ('ę','e'), ('ł','l'), ('ń','n'),
('ó','o'), ('ś','s'), ('ź','z'), ('ż','z')]
def fetch_important(text):
# fetch emoticons
emoticons = re.findall('[:;=]-?[()DPp]', text)
emoticons = [e.replace('-','') for e in emoticons]
# fetch rates (e.g. 8/10 or 100%)
rates = re.findall('(\d+/\d+|\d+%)', text)
return emoticons + rates
def preprocessor(text):
# remove non-letter characters
text = re.sub('\W+', ' ', text)
# remove terms that contain digits
text = re.sub('[\w]*\d+[\w]*', '', text)
# to lower case
text = text.lower()
# remove Polish letters
for (i, j) in polish_letters:
text = re.sub(i, j, text)
# join 'nie' with subsequent word
text = re.sub('(^|\s)(nie)\s+', ' nie', text)
return text
print(preprocessor(example))
def remove_endings(word):
for ending in endings:
word = re.sub(ending+'$','', word)
return word
def tokenizer(text):
# fetch important tokens (emoticons and rates)
important = fetch_important(text)
# clean text
processed = preprocessor(text)
# remove irrelevant words (one-letter, Polish, car-specific)
words = [w for w in processed.split() if len(w) > 1 and w not in stop]
# remove Polish endings
tokens = [remove_endings(w) for w in words]
return tokens + important
example
print(tokenizer(example))
###Output
['niena', 'mogli', 'oceniam', 'naprawde', 'zadowolony', 'mimo', 'juz', 'niechcialem', 'kupowac', 'kolejny', 'nowego', 'auta', 'wzgledu', 'duza', 'utrate', 'wartosci', 'lancia', 'bardzo', 'osladza', 'swiadomosc', 'utraty', ':p', ':D', ':)', ';(', ':)', '8/30', '29%', '45%']
###Markdown
Out-of-core learning Training logistic regression model with SGDC classifier
###Code
def stream_docs(path):
with open(path, 'r') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
db_example = next(stream_docs(path = db_path))
print(db_example)
print(tokenizer(db_example[0]))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
clf = SGDClassifier(loss='log', random_state=1, max_iter=1)
doc_stream = stream_docs(path= db_path)
classes = np.array([0, 1])
for _ in range(9):
X_train, y_train = get_minibatch(doc_stream, size=2000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
X_test, y_test = get_minibatch(doc_stream, size=2312)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
clf = clf.partial_fit(X_test, y_test)
###Output
_____no_output_____
###Markdown
Serializaiton Saving objects that will be used by our vectorizer
###Code
import pickle
import os
dest = os.path.join('carclassifier', 'pkl_objects')
if not os.path.exists(dest):
os.makedirs(dest)
pickle.dump(stop, open(os.path.join(dest, 'stopwords.pkl'), 'wb'), protocol=4)
pickle.dump(endings, open(os.path.join(dest, 'endings.pkl'), 'wb'), protocol=4)
pickle.dump(clf, open(os.path.join(dest, 'classifier.pkl'), 'wb'), protocol=4)
###Output
_____no_output_____
###Markdown
Saving the vectorizer into a `.py` file
###Code
%%writefile carclassifier/vectorizer.py
from sklearn.feature_extraction.text import HashingVectorizer
import re
import os
import pickle
cur_dir = os.path.dirname(__file__)
stop = pickle.load(open(
os.path.join(cur_dir,
'pkl_objects',
'stopwords.pkl'), 'rb'))
endings = pickle.load(open(
os.path.join(cur_dir,
'pkl_objects',
'endings.pkl'), 'rb'))
polish_letters = [
('ą','a'), ('ć','c'), ('ę','e'), ('ł','l'), ('ń','n'),
('ó','o'), ('ś','s'), ('ź','z'), ('ż','z')]
def fetch_important(text):
emoticons = re.findall('[:;=]-?[()DPp]', text)
emoticons = [e.replace('-','') for e in emoticons]
rates = re.findall('(\d+/\d+|\d+%)', text)
return emoticons + rates
def preprocessor(text):
text = re.sub('\W+', ' ', text)
text = re.sub('[\w]*\d+[\w]*', '', text)
text = text.lower()
for (i, j) in polish_letters:
text = re.sub(i, j, text)
text = re.sub('(^|\s)(nie)\s+', ' nie', text)
return text
def remove_endings(word):
for ending in endings:
word = re.sub(ending+'$','', word)
return word
def tokenizer(text):
important = fetch_important(text)
processed = preprocessor(text)
words = [w for w in processed.split() if len(w) > 1 and w not in stop]
tokens = [remove_endings(w) for w in words]
return tokens + important
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
###Output
Overwriting carclassifier/vectorizer.py
###Markdown
Now, we can check whether everything works properly
###Code
import os
os.chdir('carclassifier')
import pickle
import re
import os
from vectorizer import vect
clf = pickle.load(open(os.path.join('pkl_objects', 'classifier.pkl'), 'rb'))
import numpy as np
labels = {0: 'negative', 1: 'positive'}
example = ['Generalnie polecam ten samochód']
X = vect.transform(example)
print('Prediction: %s\nProbability: %.2f%%' %\
(labels[clf.predict(X)[0]], clf.predict_proba(X).max()*100))
###Output
Prediction: positive
Probability: 86.89%
###Markdown
Creating a SQLite database SQLite database will store users' opinions
###Code
import sqlite3
import os
if os.path.exists('reviews.sqlite'):
os.remove('reviews.sqlite')
conn = sqlite3.connect('reviews.sqlite')
c = conn.cursor()
c.execute('CREATE TABLE review_db (review TEXT, sentiment INTEGER, date TEXT)')
example1 = 'Generalnie polecam ten samochód'
c.execute('INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME("now"))', (example1, 1))
example2 = 'Lepiej sobie darować'
c.execute('INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME("now"))', (example2, 0))
conn.commit()
conn.close()
conn = sqlite3.connect('reviews.sqlite')
c = conn.cursor()
c.execute('SELECT * FROM review_db')
results = c.fetchall()
conn.close()
print(results)
###Output
[('Generalnie polecam ten samochód', 1, '2018-03-14 13:08:55'), ('Lepiej sobie darować', 0, '2018-03-14 13:08:55')]
###Markdown
Analysis
###Code
import os
import gc
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
%matplotlib notebook
gc.collect()
###Output
_____no_output_____
###Markdown
Set: "Alumnos" Read dataframe
###Code
df_a = pd.read_csv('alumnos.csv')
###Output
_____no_output_____
###Markdown
Select only those columns having relevant info
###Code
aux_col = [x for x in df_a.columns if 'v' in x]
###Output
_____no_output_____
###Markdown
Matrix scatter
###Code
if False:
sns.set(style='ticks')
s_matrix = sns.pairplot(df_a[aux_col + ['curso']], hue='curso')
s_matrix.savefig('alumnos.pdf', dpi=500, format='pdf')
###Output
_____no_output_____
###Markdown
Define macro-variables Confianza (m1) / Adaptacion (m2) / Compromiso (m3) / Conciencia social (m4)Note some variables appears in more than in one macro-variable
###Code
m1 = [13, 14, 15, 16, 17, 18, 20, 21, 22, 26, 27, 28, 29]
m2 = [1, 10, 11, 22, 23, 25, 26]
m3 = [2, 3, 4, 5, 6, 11, 12, 13, 18, 19, 23, 24, 25, 26, 30]
m4 = [1, 7, 8, 9, 11, 15, 19, 22, 30]
#
m1 = ['v{0:02}'.format(n) for n in m1]
m2 = ['v{0:02}'.format(n) for n in m2]
m3 = ['v{0:02}'.format(n) for n in m3]
m4 = ['v{0:02}'.format(n) for n in m4]
equiv = {
'm1': 'Confianza',
'm2': 'Adaptacion',
'm3': 'Compromiso',
'm4': 'Conciencia Social',
}
###Output
_____no_output_____
###Markdown
Division per year
###Code
y1 = df_a.loc[df_a['curso'] == 1]
y4 = df_a.loc[df_a['curso'] == 4]
y5 = df_a.loc[df_a['curso'] == 5]
###Output
_____no_output_____
###Markdown
Define some lists to save correlation tests
###Code
aux_stat, aux_pval, aux_yr, aux_mvar, aux_test = [], [], [], [], []
###Output
_____no_output_____
###Markdown
Statistical tests for the macro-variables: Tests for all variables at once inside each macro-variable ANOVA The one-way ANOVA tests the null hypothesis that two or more groups have the same population mean.* The samples are independent.* Each sample is from a normally distributed population.* The population standard deviations of the groups are all equal. This property is known as homoscedasticity. Doing the analysis for all the years
###Code
m1_anova = stats.f_oneway(*[df_a[c].values for c in m1])
m2_anova = stats.f_oneway(*[df_a[c].values for c in m2])
m3_anova = stats.f_oneway(*[df_a[c].values for c in m3])
m4_anova = stats.f_oneway(*[df_a[c].values for c in m4])
aux_stat.append(m1_anova[0])
aux_pval.append(m1_anova[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m1'])
aux_test.append('anova')
aux_stat.append(m2_anova[0])
aux_pval.append(m2_anova[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m2'])
aux_test.append('anova')
aux_stat.append(m3_anova[0])
aux_pval.append(m3_anova[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m3'])
aux_test.append('anova')
aux_stat.append(m4_anova[0])
aux_pval.append(m4_anova[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m4'])
aux_test.append('anova')
###Output
_____no_output_____
###Markdown
For all the above ANOVA tests, the p-value allow us to reject the null hypothesis of the mean being the same for all the samples If we now do it year by year, lets see if that stands
###Code
m1_y1_anova = stats.f_oneway(*[y1[c].values for c in m1])
m2_y1_anova = stats.f_oneway(*[y1[c].values for c in m2])
m3_y1_anova = stats.f_oneway(*[y1[c].values for c in m3])
m4_y1_anova = stats.f_oneway(*[y1[c].values for c in m4])
m1_y1_anova, m2_y1_anova, m3_y1_anova, m4_y1_anova
aux_stat.append(m1_y1_anova[0])
aux_pval.append(m1_y1_anova[1])
aux_yr.append(1)
aux_mvar.append(equiv['m1'])
aux_test.append('anova')
#
aux_stat.append(m2_y1_anova[0])
aux_pval.append(m2_y1_anova[1])
aux_yr.append(1)
aux_mvar.append(equiv['m2'])
aux_test.append('anova')
#
aux_stat.append(m3_y1_anova[0])
aux_pval.append(m3_y1_anova[1])
aux_yr.append(1)
aux_mvar.append(equiv['m3'])
aux_test.append('anova')
#
aux_stat.append(m4_y1_anova[0])
aux_pval.append(m4_y1_anova[1])
aux_yr.append(1)
aux_mvar.append(equiv['m4'])
aux_test.append('anova')
m1_y4_anova = stats.f_oneway(*[y4[c].values for c in m1])
m2_y4_anova = stats.f_oneway(*[y4[c].values for c in m2])
m3_y4_anova = stats.f_oneway(*[y4[c].values for c in m3])
m4_y4_anova = stats.f_oneway(*[y4[c].values for c in m4])
m1_y4_anova, m2_y4_anova, m3_y4_anova, m4_y4_anova
aux_stat.append(m1_y4_anova[0])
aux_pval.append(m1_y4_anova[1])
aux_yr.append(4)
aux_mvar.append(equiv['m1'])
aux_test.append('anova')
#
aux_stat.append(m2_y4_anova[0])
aux_pval.append(m2_y4_anova[1])
aux_yr.append(4)
aux_mvar.append(equiv['m2'])
aux_test.append('anova')
#
aux_stat.append(m3_y4_anova[0])
aux_pval.append(m3_y4_anova[1])
aux_yr.append(4)
aux_mvar.append(equiv['m3'])
aux_test.append('anova')
#
aux_stat.append(m4_y4_anova[0])
aux_pval.append(m4_y4_anova[1])
aux_yr.append(4)
aux_mvar.append(equiv['m4'])
aux_test.append('anova')
m1_y5_anova = stats.f_oneway(*[y5[c].values for c in m1])
m2_y5_anova = stats.f_oneway(*[y5[c].values for c in m2])
m3_y5_anova = stats.f_oneway(*[y5[c].values for c in m3])
m4_y5_anova = stats.f_oneway(*[y5[c].values for c in m4])
m1_y5_anova, m2_y5_anova, m3_y5_anova, m4_y5_anova
aux_stat.append(m1_y5_anova[0])
aux_pval.append(m1_y5_anova[1])
aux_yr.append(5)
aux_mvar.append(equiv['m1'])
aux_test.append('anova')
#
aux_stat.append(m2_y5_anova[0])
aux_pval.append(m2_y5_anova[1])
aux_yr.append(5)
aux_mvar.append(equiv['m2'])
aux_test.append('anova')
#
aux_stat.append(m3_y5_anova[0])
aux_pval.append(m3_y5_anova[1])
aux_yr.append(5)
aux_mvar.append(equiv['m3'])
aux_test.append('anova')
#
aux_stat.append(m4_y5_anova[0])
aux_pval.append(m4_y5_anova[1])
aux_yr.append(5)
aux_mvar.append(equiv['m4'])
aux_test.append('anova')
###Output
_____no_output_____
###Markdown
The ANOVA tests gave the following result: only for year=5 the m1 and m2 groups showed a strong agreement in their distribution. For year=4 the m1 group has p-value=0.088 and for m2 a p-value=0.033For interpreting [p-value](https://www.statsdirect.com/help/basics/p_values.htm) Kruskal-Wallis The Kruskal-Wallis H-test tests the null hypothesis that the population median of all of the groups are equal. It is a non-parametric version of ANOVA. The test works on 2 or more independent samples, which may have different sizes. Note that rejecting the null hypothesis does not indicate which of the groups differs. Post-hoc comparisons between groups are required to determine which groups are different.Needs at least 5 measurements All years together
###Code
m1_kruskal = stats.kruskal(*[df_a[c].values for c in m1])
m2_kruskal = stats.kruskal(*[df_a[c].values for c in m2])
m3_kruskal = stats.kruskal(*[df_a[c].values for c in m3])
m4_kruskal = stats.kruskal(*[df_a[c].values for c in m4])
m1_kruskal, m2_kruskal, m3_kruskal, m4_kruskal
aux_stat.append(m1_kruskal[0])
aux_pval.append(m1_kruskal[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m1'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m2_kruskal[0])
aux_pval.append(m2_kruskal[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m2'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m3_kruskal[0])
aux_pval.append(m3_kruskal[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m3'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m4_kruskal[0])
aux_pval.append(m4_kruskal[1])
aux_yr.append(-1)
aux_mvar.append(equiv['m4'])
aux_test.append('kruskal-wallis')
m1_y1_kruskal = stats.kruskal(*[y1[c].values for c in m1])
m2_y1_kruskal = stats.kruskal(*[y1[c].values for c in m2])
m3_y1_kruskal = stats.kruskal(*[y1[c].values for c in m3])
m4_y1_kruskal = stats.kruskal(*[y1[c].values for c in m4])
m1_y1_kruskal, m2_y1_kruskal, m3_y1_kruskal, m4_y1_kruskal
aux_stat.append(m1_y1_kruskal[0])
aux_pval.append(m1_y1_kruskal[1])
aux_yr.append(1)
aux_mvar.append(equiv['m1'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m2_y1_kruskal[0])
aux_pval.append(m2_y1_kruskal[1])
aux_yr.append(1)
aux_mvar.append(equiv['m2'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m3_y1_kruskal[0])
aux_pval.append(m3_y1_kruskal[1])
aux_yr.append(1)
aux_mvar.append(equiv['m3'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m4_y1_kruskal[0])
aux_pval.append(m4_y1_kruskal[1])
aux_yr.append(1)
aux_mvar.append(equiv['m4'])
aux_test.append('kruskal-wallis')
m1_y4_kruskal = stats.kruskal(*[y4[c].values for c in m1])
m2_y4_kruskal = stats.kruskal(*[y4[c].values for c in m2])
m3_y4_kruskal = stats.kruskal(*[y4[c].values for c in m3])
m4_y4_kruskal = stats.kruskal(*[y4[c].values for c in m4])
m1_y4_kruskal, m2_y4_kruskal, m3_y4_kruskal, m4_y4_kruskal
aux_stat.append(m1_y4_kruskal[0])
aux_pval.append(m1_y4_kruskal[1])
aux_yr.append(4)
aux_mvar.append(equiv['m1'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m2_y4_kruskal[0])
aux_pval.append(m2_y4_kruskal[1])
aux_yr.append(4)
aux_mvar.append(equiv['m2'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m3_y4_kruskal[0])
aux_pval.append(m3_y4_kruskal[1])
aux_yr.append(4)
aux_mvar.append(equiv['m3'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m4_y4_kruskal[0])
aux_pval.append(m4_y4_kruskal[1])
aux_yr.append(4)
aux_mvar.append(equiv['m4'])
aux_test.append('kruskal-wallis')
m1_y5_kruskal = stats.kruskal(*[y5[c].values for c in m1])
m2_y5_kruskal = stats.kruskal(*[y5[c].values for c in m2])
m3_y5_kruskal = stats.kruskal(*[y5[c].values for c in m3])
m4_y5_kruskal = stats.kruskal(*[y5[c].values for c in m4])
m1_y5_kruskal, m2_y5_kruskal, m3_y5_kruskal, m4_y5_kruskal
aux_stat.append(m1_y5_kruskal[0])
aux_pval.append(m1_y5_kruskal[1])
aux_yr.append(5)
aux_mvar.append(equiv['m1'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m2_y5_kruskal[0])
aux_pval.append(m2_y5_kruskal[1])
aux_yr.append(5)
aux_mvar.append(equiv['m2'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m3_y5_kruskal[0])
aux_pval.append(m3_y5_kruskal[1])
aux_yr.append(5)
aux_mvar.append(equiv['m3'])
aux_test.append('kruskal-wallis')
#
aux_stat.append(m4_y5_kruskal[0])
aux_pval.append(m4_y5_kruskal[1])
aux_yr.append(5)
aux_mvar.append(equiv['m4'])
aux_test.append('kruskal-wallis')
###Output
_____no_output_____
###Markdown
For the Kruskal-Wallis (median test) the sample of year=5 has a strong support for H0, for m1 and m2. For m3 the p-value=0.0018. For year=4 m1 has p-value=0.0688 and m2 has p-value=0.0539 Save into pandas to be exported
###Code
d = {
'estadistica': aux_stat,
'valor_p': aux_pval,
'curso': aux_yr,
'macro_variable': aux_mvar,
'test': aux_test,
}
df_anova_kruskal = pd.DataFrame(d)
df_anova_kruskal.head()
df_anova_kruskal.to_csv('Test_ANOVA_KruskalWallis_Alumnos.csv', header=True, index=False)
###Output
_____no_output_____
###Markdown
Correlation inside the macro-variablesThe idea is to compare the correlation of some variables through time. In the other hand, to see how the correlation works inside some macro-variables, for a fixed period of time.https://www.datascience.com/blog/introduction-to-correlation-learn-data-science-tutorials
###Code
def plot_corr_matrix(df, title, outname):
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.imshow(df.corr(method='spearman'),
interpolation="nearest", cmap='bwr_r', vmin=-1, vmax=1)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
ax.set_title(title, color='dodgerblue')
ax.set_xticklabels(df.columns)
ax.set_yticklabels(df.columns)
ticks = np.arange(0,len(df.columns),1)
ax.set_xticks(ticks)
ax.xaxis.set_tick_params(rotation=90)
# plt.xticks(rotation=90)
ax.set_yticks(ticks)
plt.tight_layout()
plt.savefig(outname, dpi=300, format='png')
###Output
_____no_output_____
###Markdown
Correlation inside a macro-variable for all the years together
###Code
plot_corr_matrix(df_a[m1],
'Correlacion en {0}, curso=todos'.format(equiv['m1']),
'matriz_corr_{0}_cursoTodos.png'.format(equiv['m1']),)
plot_corr_matrix(df_a[m2],
'Correlacion en {0}, curso=todos'.format(equiv['m2']),
'matriz_corr_{0}_cursoTodos.png'.format(equiv['m2']),)
plot_corr_matrix(df_a[m3],
'Correlacion en {0}, curso=todos'.format(equiv['m3']),
'matriz_corr_{0}_cursoTodos.png'.format(equiv['m3']),)
plot_corr_matrix(df_a[m4],
'Correlacion en {0}, curso=todos'.format(equiv['m4']),
'matriz_corr_{0}_cursoTodos.png'.format(equiv['m4']),)
###Output
_____no_output_____
###Markdown
Correlation for macro-variable 1, for the different years
###Code
plot_corr_matrix(y1[m1],
'Correlacion en {0}, curso=1'.format(equiv['m1']),
'matriz_corr_{0}_curso1.png'.format(equiv['m1']),)
plot_corr_matrix(y4[m1],
'Correlacion en {0}, curso=4'.format(equiv['m1']),
'matriz_corr_{0}_curso4.png'.format(equiv['m1']),)
plot_corr_matrix(y5[m1],
'Correlacion en {0}, curso=5'.format(equiv['m1']),
'matriz_corr_{0}_curso5.png'.format(equiv['m1']),)
###Output
_____no_output_____
###Markdown
Correlation for macro-variable 2, for the different years
###Code
plot_corr_matrix(y1[m2],
'Correlacion en {0}, curso=1'.format(equiv['m2']),
'matriz_corr_{0}_curso1.png'.format(equiv['m2']),)
plot_corr_matrix(y4[m2],
'Correlacion en {0}, curso=4'.format(equiv['m2']),
'matriz_corr_{0}_curso4.png'.format(equiv['m2']),)
plot_corr_matrix(y5[m2],
'Correlacion en {0}, curso=5'.format(equiv['m2']),
'matriz_corr_{0}_curso5.png'.format(equiv['m2']),)
###Output
_____no_output_____
###Markdown
Correlation for macro-variable 3, for the different years
###Code
plot_corr_matrix(y1[m3],
'Correlacion en {0}, curso=1'.format(equiv['m3']),
'matriz_corr_{0}_curso1.png'.format(equiv['m3']),)
plot_corr_matrix(y4[m3],
'Correlacion en {0}, curso=4'.format(equiv['m3']),
'matriz_corr_{0}_curso4.png'.format(equiv['m3']),)
plot_corr_matrix(y5[m3],
'Correlacion en {0}, curso=5'.format(equiv['m3']),
'matriz_corr_{0}_curso5.png'.format(equiv['m3']),)
###Output
_____no_output_____
###Markdown
Correlation for macro-variable 4, for the different years
###Code
plot_corr_matrix(y1[m4],
'Correlacion en {0}, curso=1'.format(equiv['m4']),
'matriz_corr_{0}_curso1.png'.format(equiv['m4']),)
plot_corr_matrix(y4[m4],
'Correlacion en {0}, curso=4'.format(equiv['m4']),
'matriz_corr_{0}_curso4.png'.format(equiv['m4']),)
plot_corr_matrix(y5[m4],
'Correlacion en {0}, curso=5'.format(equiv['m4']),
'matriz_corr_{0}_curso5.png'.format(equiv['m4']),)
###Output
_____no_output_____
###Markdown
Subset of variables Comments:Should be related: "compromiso" and "conciencia social". Also "confianza" and "adaptacion"Should evolve over time: "confianza", "adaptacion" and "conciencia social"Subset:* Confianza (m1): v14, v17* Adpatacion (m2): v10, v23* Compromiso (m3): v05, v12* Conciencia (m4): v11, v19Confianza (m1) / Adaptacion (m2) / Compromiso (m3) / Conciencia social (m4) Anderson-Darling test for some variables Analysis of the pairsOutput: A2, critical (25%, 10%, 5%, 2.5%, 1%), p-value
###Code
sub_m1_all = stats.anderson_ksamp([df_a[c].values for c in ['v14', 'v17']])
sub_m1_y1 = stats.anderson_ksamp([y1[c].values for c in ['v14', 'v17']])
sub_m1_y4 = stats.anderson_ksamp([y4[c].values for c in ['v14', 'v17']])
sub_m1_y5 = stats.anderson_ksamp([y5[c].values for c in ['v14', 'v17']])
sub_m1_all, sub_m1_y1, sub_m1_y4, sub_m1_y5
sub_m2_all = stats.anderson_ksamp([df_a[c].values for c in ['v10', 'v23']])
sub_m2_y1 = stats.anderson_ksamp([y1[c].values for c in ['v10', 'v23']])
sub_m2_y4 = stats.anderson_ksamp([y4[c].values for c in ['v10', 'v23']])
sub_m2_y5 = stats.anderson_ksamp([y5[c].values for c in ['v10', 'v23']])
sub_m2_all, sub_m2_y1, sub_m2_y4, sub_m2_y5
sub_m3_all = stats.anderson_ksamp([df_a[c].values for c in ['v05', 'v12']])
sub_m3_y1 = stats.anderson_ksamp([y1[c].values for c in ['v05', 'v12']])
sub_m3_y4 = stats.anderson_ksamp([y4[c].values for c in ['v05', 'v12']])
sub_m3_y5 = stats.anderson_ksamp([y5[c].values for c in ['v05', 'v12']])
sub_m3_all, sub_m3_y1, sub_m3_y4, sub_m3_y5
sub_m4_all = stats.anderson_ksamp([df_a[c].values for c in ['v11', 'v19']])
sub_m4_y1 = stats.anderson_ksamp([y1[c].values for c in ['v11', 'v19']])
sub_m4_y4 = stats.anderson_ksamp([y4[c].values for c in ['v11', 'v19']])
sub_m4_y5 = stats.anderson_ksamp([y5[c].values for c in ['v11', 'v19']])
sub_m4_all, sub_m4_y1, sub_m4_y4, sub_m4_y5
###Output
_____no_output_____
###Markdown
2D histograms between the variables showing higher AD test
###Code
fig, ax = plt.subplots()
ax.hist2d(y5['v14'], y5['v19'])
ax.set_title('Test')
###Output
_____no_output_____
###Markdown
1. Data processingBefore we can begin analysing the data, we need to get it and "clean" it so that we can run computations on it.
###Code
%matplotlib inline
import ast
import csv
import numpy as np
from collections import Counter
import matplotlib
import matplotlib.pyplot as plt
# pretty plotting
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 5]
# First lets parse the data
reader = csv.reader(open("movie_recommendations.csv", "rb"), delimiter=",")
data = list(reader)
print data
# The first row has header info and the second row is empty, so we can ignore them.
# Note: the data is stored as strings, so we need to process it some more
text_data = np.array(data[2:])
movie_titles = [unicode(title, 'utf-8') for title in text_data[:,0]]
raw_movie_genres = text_data[:,1]
raw_omkar_ratings = text_data[:,2]
raw_imdb_ratings = text_data[:,3]
# -SOON->
# raw_meta_critic_ratings = result[:,4]
# raw_rotten_tomato_ratings = result[:,5]
# Now lets normalize these ratings so they are between 0 and 1
from __future__ import division # so that python will evaluate 3/10 as a floating pt operation instead of an integer op
def string_to_numpy(string_arr):
tmp_list = []
for string_val in string_arr:
if string_val is 'N/A':
tmp_list.append(0)
else:
tmp_list.append(eval(string_val))
return np.asarray(tmp_list).astype("float")
omkar_ratings = string_to_numpy(raw_omkar_ratings)
imdb_ratings = string_to_numpy(raw_imdb_ratings)
###Output
_____no_output_____
###Markdown
2. AnalysisLets look at the raw data first:
###Code
assert len(imdb_ratings) == len(movie_titles)
# plt.xticks(range(len(imdb_ratings)), movie_titles, rotation=90) # <- too messy :(
# Remember, we scalled all scores to [0,1]!
plt.plot(imdb_ratings, alpha=0.5, label="IMDB rating")
plt.plot(omkar_ratings, alpha=1.0, label="Omkar's rating")
plt.legend()
plt.title('Plotting Omkar and IMDB ratings (scaled to [0,1])')
plt.show()
###Output
_____no_output_____
###Markdown
_Phew!_ That's a pretty dense chart and on its own we can quickly surmise how closely related Omkar's ratings are w.r.t IMDB. For a single number statistic, let's look at [cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation) between Omakar and IMDB:
###Code
print "Overall IMDB corellation: ",np.corrcoef(omkar_ratings, imdb_ratings)[0,1]
###Output
Overall IMDB corellation: 0.6404962063207162
###Markdown
On its own, the correlation doesn't tell us much. Let's look at where the largest difference between Omkar and IMDB come up:
###Code
def analyze_diff(diff_omkar_imdb, title):
print 'Max difference: ', diff_omkar_imdb.max()
print 'Min difference: ', diff_omkar_imdb.min()
print 'Mean: ', diff_omkar_imdb.mean()
print 'Std dev: ', diff_omkar_imdb.std()
below_1_sigma = np.array(diff_omkar_imdb < (diff_omkar_imdb.mean() - diff_omkar_imdb.std()))
above_1_sigma = np.array(diff_omkar_imdb > (diff_omkar_imdb.mean() + diff_omkar_imdb.std()))
# everything that's not 1 sigma above/below the mean
rest = np.logical_not(below_1_sigma) & np.logical_not(above_1_sigma)
_x_axis = np.arange(len(imdb_ratings))
plt.bar(_x_axis[above_1_sigma], diff_omkar_imdb[above_1_sigma], label='Above 1 $\sigma$')
plt.bar(_x_axis[below_1_sigma], diff_omkar_imdb[below_1_sigma], label='Below 1 $\sigma$')
plt.bar(_x_axis[rest], diff_omkar_imdb[rest], alpha=0.5, label='Within 2 $\sigma$')
plt.legend()
plt.title(title)
high_positive_diff = []
high_negative_diff = []
for idx in range(len(movie_titles)):
if above_1_sigma[idx]:
high_positive_diff.append((movie_titles[idx], diff_omkar_imdb[idx]))
if below_1_sigma[idx]:
high_negative_diff.append((movie_titles[idx], diff_omkar_imdb[idx]))
# Note: diff = Omkar - IMDB, so a positive score indicates Omkar rated a movie higher and vice versa
print 'Movies that are above 1 sigma from the mean difference b/w Omkar and IMDB: (total: {})'.format(len(high_positive_diff))
for movie_title, diff in high_positive_diff:
print '\tMovie: {}, diff: {}'.format(movie_title.encode('utf-8'), diff)
print 'Movies that are below 1 sigma from the mean difference b/w Omkar and IMDB: (total: {})'.format(len(high_negative_diff))
for movie_title, diff in high_negative_diff:
print '\tMovie: {}, diff: {}'.format(movie_title.encode('utf-8'), diff)
return
analyze_diff(omkar_ratings - imdb_ratings, 'Difference b/w Omkar and IMDB (both of which were first scaled to [0,1])')
###Output
Max difference: 0.30000000000000004
Min difference: -0.16000000000000003
Mean: 0.041283185840707975
Std dev: 0.0667225093735769
Movies that are above 1 sigma from the mean difference b/w Omkar and IMDB: (total: 32)
Movie: 5) Seven Psycopaths (2012), diff: 0.18
Movie: 7) Con Air (1997), diff: 0.12
Movie: 12) Silver Linings Playbook (2012), diff: 0.12
Movie: 13) Mere Dad Ki Maruti (2013), diff: 0.15
Movie: 21) Killer Joe (2011), diff: 0.13
Movie: 35) Stoker (2013), diff: 0.12
Movie: 38) Pacific Rim (2013) , diff: 0.2
Movie: 43) Only God Forgives (2013) , diff: 0.13
Movie: 71) Speed Racer (2008) , diff: 0.3
Movie: 80) Edge Of Tomorrow (2014), diff: 0.11
Movie: 84) The Guest (2014), diff: 0.13
Movie: 89) Boyhood (2014), diff: 0.11
Movie: 90) John Wick (2014), diff: 0.17
Movie: 92) Birdman (2014), diff: 0.13
Movie: 97) You're Next (2011), diff: 0.15
Movie: 118) The DUFF (2015), diff: 0.15
Movie: 126) Soul Plane (2004), diff: 0.26
Movie: 136) Wet Hot American Summer (2001), diff: 0.13
Movie: 148) Shoot 'Em Up (2007) , diff: 0.13
Movie: 161) Captain America: Civil War (2016), diff: 0.12
Movie: 162) Kapoor and Sons (2016), diff: 0.12
Movie: 165) Big Nothing (2006), diff: 0.12
Movie: 166) Dilwale (2015), diff: 0.17
Movie: 178) Ride Along 2 (2016), diff: 0.11
Movie: 184) xXx (2002), diff: 0.11
Movie: 197) Get Out (2017), diff: 0.13
Movie: 200) Kong: Skull Island (2017), diff: 0.13
Movie: 210) King Arthur: Legend of the Sword (2017), diff: 0.12
Movie: 216) Thor: Ragnarok (2017), diff: 0.11
Movie: 217) Star Wars: The Last Jedi (2017), diff: 0.17
Movie: 219) The Matrix (1999), diff: 0.13
Movie: 223) Johnny English (2003), diff: 0.18
Movies that are below 1 sigma from the mean difference b/w Omkar and IMDB: (total: 31)
Movie: 2) Monty Python and the Holy Grail (1975), diff: -0.03
Movie: 11) Flight (2012), diff: -0.03
Movie: 28) The Game (1997), diff: -0.08
Movie: 39) Grosse Point Blank (1997), diff: -0.04
Movie: 41) It's A Boy Girl Thing (2006), diff: -0.03
Movie: 42) Carrie (1976) , diff: -0.04
Movie: 47) Udaan (2010) , diff: -0.12
Movie: 49) Office Space (1999) , diff: -0.08
Movie: 51) Videodrome (1983), diff: -0.13
Movie: 56) Following (1998) , diff: -0.06
Movie: 62) Good Will Hunting (1997) , diff: -0.03
Movie: 64) Being Cyrus (2005), diff: -0.03
Movie: 67) Inside Man (2006) , diff: -0.06
Movie: 78) Blade Runner (1982), diff: -0.12
Movie: 98) Jhankaar Beats (2003), diff: -0.03
Movie: 100) Whiplash (2014), diff: -0.05
Movie: 102) Queen (2013), diff: -0.03
Movie: 104) Sunset Blvd. (1950), diff: -0.05
Movie: 111) Mad Max 2: The Road Warrior, diff: -0.06
Movie: 112) Drishyam (2013), diff: -0.08
Movie: 125) Detective Byomkesh Bakshy! (2015), diff: -0.06
Movie: 128) Otto e Mezzo (8½) (1963), diff: -0.11
Movie: 141) Ant-Man (2015), diff: -0.03
Movie: 147) The Man From U.N.C.L.E. (2015), diff: -0.03
Movie: 155) Thani Oruvan (2015), diff: -0.16
Movie: 191) Ulidavaru Kandanthe (As Seen By The Rest) (2014), diff: -0.06
Movie: 201) Fast Five (2011), diff: -0.03
Movie: 209) Baby Driver (2017), diff: -0.07
Movie: 211) Dhuruvangal Pathinaaru (2016), diff: -0.16
Movie: 215) 3 Idiots (2009), diff: -0.04
Movie: 220) Goodfellas (1990), diff: -0.07
###Markdown
This is interesting: on average, it looks like Omkar rates movies ~4% higher than IMDB. With a standard deviation of ~6%, we see that Omkar tends to genrally be more generous with his ratings.Additionally, we can also look at the **absolute** difference b/w Omkar and IMDB in order to see which movies have very strong agreement b/w both datasets:
###Code
analyze_diff(np.abs(omkar_ratings - imdb_ratings), 'Absolute difference b/w Omkar and IMDB (both of which were first scaled to [0,1])')
###Output
Max difference: 0.30000000000000004
Min difference: 0.0
Mean: 0.061637168141592924
Std dev: 0.048549502507754805
Movies that are above 1 sigma from the mean difference b/w Omkar and IMDB: (total: 32)
Movie: 5) Seven Psycopaths (2012), diff: 0.18
Movie: 7) Con Air (1997), diff: 0.12
Movie: 12) Silver Linings Playbook (2012), diff: 0.12
Movie: 13) Mere Dad Ki Maruti (2013), diff: 0.15
Movie: 21) Killer Joe (2011), diff: 0.13
Movie: 35) Stoker (2013), diff: 0.12
Movie: 38) Pacific Rim (2013) , diff: 0.2
Movie: 43) Only God Forgives (2013) , diff: 0.13
Movie: 47) Udaan (2010) , diff: 0.12
Movie: 51) Videodrome (1983), diff: 0.13
Movie: 71) Speed Racer (2008) , diff: 0.3
Movie: 78) Blade Runner (1982), diff: 0.12
Movie: 84) The Guest (2014), diff: 0.13
Movie: 90) John Wick (2014), diff: 0.17
Movie: 92) Birdman (2014), diff: 0.13
Movie: 97) You're Next (2011), diff: 0.15
Movie: 118) The DUFF (2015), diff: 0.15
Movie: 126) Soul Plane (2004), diff: 0.26
Movie: 136) Wet Hot American Summer (2001), diff: 0.13
Movie: 148) Shoot 'Em Up (2007) , diff: 0.13
Movie: 155) Thani Oruvan (2015), diff: 0.16
Movie: 161) Captain America: Civil War (2016), diff: 0.12
Movie: 162) Kapoor and Sons (2016), diff: 0.12
Movie: 165) Big Nothing (2006), diff: 0.12
Movie: 166) Dilwale (2015), diff: 0.17
Movie: 197) Get Out (2017), diff: 0.13
Movie: 200) Kong: Skull Island (2017), diff: 0.13
Movie: 210) King Arthur: Legend of the Sword (2017), diff: 0.12
Movie: 211) Dhuruvangal Pathinaaru (2016), diff: 0.16
Movie: 217) Star Wars: The Last Jedi (2017), diff: 0.17
Movie: 219) The Matrix (1999), diff: 0.13
Movie: 223) Johnny English (2003), diff: 0.18
Movies that are below 1 sigma from the mean difference b/w Omkar and IMDB: (total: 37)
Movie: 10) Invincible (2006), diff: 0.01
Movie: 17) Death of a Superhero (2011), diff: 0.01
Movie: 29) The Breakfast Club (1985), diff: 0.01
Movie: 31) Warm Bodies (2013), diff: 0.01
Movie: 36) Murder By Numbers (2002), diff: 0.01
Movie: 40) Oblivion (2013), diff: 0.0
Movie: 44) Trance (2013), diff: 0.01
Movie: 46) Horrible Bosses (2011) , diff: 0.01
Movie: 48) Pawn Shop Chronicles (2013) , diff: 0.01
Movie: 50) Coffy (1973), diff: 0.01
Movie: 58) Violet & Daisy (2011), diff: 0.01
Movie: 59) We're The Millers (2013), diff: 0.0
Movie: 63) Evolution (2001), diff: 0.01
Movie: 65) Mou Gaan Dou (Infernal Affairs) (2002), diff: 0.01
Movie: 69) Hasee Toh Phasee (2014) , diff: 0.01
Movie: 75) The Grand Budapest Hotel (2014), diff: 0.01
Movie: 79) Troll Hunter (2010), diff: 0.0
Movie: 88) Gone Girl (2014), diff: 0.01
Movie: 91) Nightcrawler (2014), diff: 0.01
Movie: 93) The Little Death (2014), diff: 0.01
Movie: 94) Le Samourai (1967), diff: 0.01
Movie: 95) Blue Ruin (2013), diff: 0.01
Movie: 96) Le Cercle Rouge (1970) , diff: 0.01
Movie: 109) The One I Love (2014) , diff: 0.01
Movie: 110) Mad Max (1979), diff: 0.0
Movie: 115) Locke (2013), diff: 0.01
Movie: 138) Relatos Salvajes (Wild Tales) (2014) , diff: 0.01
Movie: 139) The Martian (2015) , diff: 0.0
Movie: 143) Star Wars: The Force Awakens (2015), diff: 0.0
Movie: 149) Straight Outta Compton (2015), diff: 0.01
Movie: 153) Buried (2010), diff: 0.0
Movie: 159) Pizza (2012), diff: 0.01
Movie: 167) Hot Fuzz (2007), diff: 0.01
Movie: 168) Zootopia/Zootropolis (2016), diff: 0.0
Movie: 179) Shaun Of The Dead (2004), diff: 0.0
Movie: 188) Arrival (2016), diff: 0.01
Movie: 199) Power Rangers (2017), diff: 0.0
###Markdown
Genre-based analysis
###Code
# Num unique genres
all_genres = []
for raw_genres in raw_movie_genres:
genres = raw_genres.split('/')
for genre in genres:
word = genre.lower().strip()
# spelling mistakes
if word == 'crme':
word = 'crime'
elif word == 'myster':
word = 'mystery'
all_genres.append(word)
unique_genres = sorted(set(all_genres))
counts = Counter(all_genres)
print unique_genres
print counts
max_correlation = 0
max_corr_genre = 'N/A'
for genre in unique_genres:
use = []
for raw_genres in raw_movie_genres:
use.append(genre in raw_genres.lower())
if sum(use) < 3:
print '> Genre "{}" has too few examples ({})'.format(genre, counts[genre])
continue
correlation = np.corrcoef(omkar_ratings[use], imdb_ratings[use])[0, 1]
print 'Genre: {}, Num. data pts: {}, Correlation: {}'.format(genre, counts[genre], correlation)
if correlation > max_correlation:
max_correlation = correlation
max_corr_genre = genre
print "Max. correlated genre: {}, ({})".format(max_corr_genre, max_correlation)
###Output
_____no_output_____
###Markdown
Now Let's Run the same algorithms with the same heuristics on the 3x3 puzzles
###Code
start = time.time()
run_experiment(experiment, three_by_three)
elapsed = round(time.time()-start, 2)
print(f'\n\nTotal of 2x50x5 = {2*50*5} puzzles solved in {elapsed} seconds')
def generate_stats(experiment):
prototype = {
'GBF': {
'hamming_distance': {},
'manhattan_distance': {},
'row_col_out_of_place': {},
'euclidean_distance': {},
'permutation_inversion': {}
},
'A*': {
'hamming_distance': {},
'manhattan_distance': {},
'row_col_out_of_place': {},
'euclidean_distance': {},
'permutation_inversion': {}
}
}
global_stats = {
(2,4): copy.deepcopy(prototype),
(3,3): copy.deepcopy(prototype),
}
stats = {}
timeouts = 0
result_count = 0
for heuristic in experiment:
for algo in experiment[heuristic]['algos']:
for shape in experiment[heuristic]['algos'][algo]['shape']:
results = experiment[heuristic]['algos'][algo]['shape'][shape]['results']
runtimes = [r['runtime'] for r in results]
costs = np.array([r['current_node'].total_cost for r in results])
num_visited_nodes = np.array([r['visited_nodes'] for r in results])
mean_runtime = np.mean(runtimes)
mean_cost = np.mean(costs)
mean_vis_nodes = np.mean(num_visited_nodes)
global_stats[shape][algo][heuristic]['mean_runtime'] = mean_runtime
global_stats[shape][algo][heuristic]['mean_cost'] = mean_cost
global_stats[shape][algo][heuristic]['mean_visited_nodes'] = mean_vis_nodes
global_stats[shape][algo][heuristic]['std_runtime'] = np.std(runtimes)
global_stats[shape][algo][heuristic]['std_cost'] = np.std(costs)
global_stats[shape][algo][heuristic]['std_visited_nodes'] = np.std(num_visited_nodes)
timeouts += len([1 for r in results if not r['success']])
print(f'number of timeouts = {timeouts}.')
return global_stats
stats = generate_stats(experiment)
'''here is a small sample of what the stats object looks like'''
stats[(2,4)]['GBF']['hamming_distance']
###Output
number of timeouts = 0.
###Markdown
We definitely did not expect any timeouts for algorithms A* and GBF as they are designed to be very fast but having the assurance is always nice. Let us now compare these heuristics.
###Code
'''Calculating average runtime for each heuristic'''
def plot_stats(stats, shape, algo):
heuris_vs_runtime = {}
heuris_vs_cost = {}
heuris_vs_visited = {}
for heuristic in stats[shape][algo]:
alias = stats[shape][algo][heuristic]
heuris_vs_runtime[heuristic] = round(alias['mean_runtime'], 2)
heuris_vs_cost[heuristic] = round(alias['mean_cost'], 2)
heuris_vs_visited[heuristic] = round(alias['mean_visited_nodes'], 2)
print(heuris_vs_runtime)
plt.barh(tuple(heuris_vs_runtime.keys()),heuris_vs_runtime.values())
plt.ylabel('Heuristics')
plt.xlabel('Time in Seconds')
plt.title(f'Mean runtime of {algo} on {shape[0]}x{shape[1]} puzzles')
plt.show()
print()
print(heuris_vs_cost)
plt.barh(tuple(heuris_vs_cost.keys()),heuris_vs_cost.values())
plt.ylabel('Heuristics')
plt.xlabel('Cost')
plt.title(f'Mean cost of {algo} on {shape[0]}x{shape[1]} puzzles')
plt.show()
print()
print(heuris_vs_visited)
plt.barh(tuple(heuris_vs_visited.keys()),heuris_vs_visited.values())
plt.ylabel('Heuristics')
plt.xlabel('Number of Visited Nodes')
plt.title(f'Mean search path of {algo} on {shape[0]}x{shape[1]} puzzles')
plt.show()
plot_stats(stats,(2,4),'A*')
plot_stats(stats,(2,4),'GBF')
plot_stats(stats,(3,3),'A*')
plot_stats(stats,(3,3),'GBF')
###Output
{'hamming_distance': 0.05, 'manhattan_distance': 0.04, 'row_col_out_of_place': 0.06, 'euclidean_distance': 0.04, 'permutation_inversion': 0.71}
###Markdown
Bikers on the Fremont bridgeExample adapted from the [Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html) Set up: Download (and load) data
###Code
# Download data(you can download it by uncommenting and runing this line of code)
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler # scaling data
from sklearn.model_selection import train_test_split # splitting data
from sklearn.neighbors import KNeighborsRegressor # regressor
from sklearn.model_selection import GridSearchCV # for grid search
from sklearn.pipeline import make_pipeline # for making pipelines
%matplotlib inline
# Aggregate data to the daily level
counts = pd.read_csv('data/FremontBridge.csv', index_col='Date', parse_dates=True)
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # remove other columns
plt.figure(figsize=(15,5))
daily.Total.plot()
daily[daily.Total == daily.Total.max()]
###Output
_____no_output_____
###Markdown
Data Prep: Adding Features
###Code
# Load weather data (downloaded from: https://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND)
weather = pd.read_csv('data/weather.csv', index_col='DATE', parse_dates=True)
# Create dry_day column
weather['dry_day'] = (weather['PRCP'] == 0).astype(int)
# Join selected weather columns
daily = daily.join(weather[['PRCP', 'dry_day', 'TMIN', 'TMAX']])
# Compute hours of daylight
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index))
daily[['daylight_hrs']].plot()
plt.ylim(8, 17)
###Output
_____no_output_____
###Markdown
Feature Generation: Categorical Variable(s)
###Code
# Get dummy variables from categorical columns (alternative: sklearn OneHotEncoding)
daily["day_of_week"] = daily.index.day_name()
# daily["day_of_week"] = daily.index.dayofweek.astype(dtype="str")
daily = pd.get_dummies(daily)
daily.head()
###Output
_____no_output_____
###Markdown
Abbreviated EDA
###Code
# What is the relationship between bikers and temperature?
plt.scatter(daily.TMAX, daily.Total, alpha = 0.2)
plt.xlabel("Max Temperature")
plt.ylabel("Total # Bikers in a Day")
plt.show()
# What is the relationship between bikers and date?
plt.figure(figsize=(15,4))
daily.Total.plot()
# What is the relationship between bikers and (min) temperature?
plt.scatter(daily.TMIN, daily.Total, alpha = 0.2)
plt.xlabel("Minimum Temperature")
plt.ylabel("Total # Bikers in a Day")
plt.show()
# What is the distribution of bikers on dry/wet days?
wet_days = daily[daily.dry_day == 0]
dry_days = daily[daily.dry_day == 1]
plt.hist(wet_days.Total, alpha = 0.3, label = "Wet")
plt.hist(dry_days.Total, alpha = 0.3, label = "Dry")
plt.legend()
plt.show()
# What is the relationship between bikers and precipitation?
plt.scatter(daily.PRCP, daily.Total, alpha = 0.2)
plt.xlabel("Precipitation")
plt.ylabel("Total # Bikers in a Day")
plt.show()
# How does the number of bikers vary by temperature and wet/dry?
plt.scatter(wet_days.TMAX, wet_days.Total, alpha = 0.3, label = "Wet")
plt.scatter(dry_days.TMAX, dry_days.Total, alpha = 0.3, label = "Dry")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Modeling: KNN Regressor
###Code
# Split data into training and testing data
# Split into test/train data
from sklearn.model_selection import train_test_split
train_features, test_features, train_outcome, test_outcome = train_test_split(
daily.drop("Total", axis = 1),
daily.Total,
test_size=0.10
)
# Create a scaler and your classifier
scaler = MinMaxScaler()
knn_clf = KNeighborsRegressor()
# Define a pipeline that uses your scaler and classifier
pipe = make_pipeline(scaler, knn_clf)
# Define a grid to search through
params = {"kneighborsregressor__n_neighbors":np.arange(1, 10)}
# Perform a grid search of your pipeline
grid_search = GridSearchCV(pipe, params, scoring="neg_mean_absolute_error")
grid_search.fit(train_features, train_outcome)
# Compare prediction to (test) data
preds = grid_search.predict(test_features)
plt.scatter(preds, test_outcome)
plt.show()
test_data = test_features.join(test_outcome)
test_data["preds"] = grid_search.predict(test_features)
plt.figure(figsize=(20, 5))
test_data.Total.plot(label = "Actual")
test_data.preds.plot(label = "Predicted")
plt.show()
grid_search.score(test_features, test_outcome)
###Output
_____no_output_____
###Markdown
Feature Generation: Polynomial Transformations
###Code
# Add a polynomial transformation to the pipeline
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures()
# Define a pipeline that includes the polynomial transformation
pipe = make_pipeline(poly, scaler, knn_clf)
# Define a grid to search through (including the degree of polynomial)
param_grid = {'polynomialfeatures__degree':range(1, 4),
'kneighborsregressor__n_neighbors':range(1, 10),
'kneighborsregressor__weights':["uniform", "distance"]}
# Perform a grid search of your pipeline
grid_search = GridSearchCV(pipe, param_grid, scoring="neg_mean_absolute_error")
grid_search.fit(train_features, train_outcome)
grid_search.score(test_features, test_outcome)
# Visualize time trends
###Output
_____no_output_____
###Markdown
Error assessment: find systematic errors
###Code
# Why are we getting this wrong?
# Assess error by day of the week
# Assess error by temperature and dry_day
# Assess error by precipitation
###Output
_____no_output_____
###Markdown
Feature Selection: Select best featuresAs a form of dimensionality reduction, only select the top percentile features that have a certain threshold of variance.
###Code
# Create a percentile selector, add it to the pipeline
# (alternatives a K selectors, PCA, or others)
# Define a grid to search through (including the degree of polynomial AND percentile of best features)
# Fit the model
###Output
_____no_output_____
###Markdown
Analysis
###Code
import numpy as np
import scipy
import math
import random
import matplotlib.pyplot as plt
from env import *
import utils
def observed_function_f(xs, a, b, c, d, frequency):
results = []
for x in xs:
result = c * math.cos(a * x * frequency + b) + d
results.append(result)
return results
def observed_function_f1(xs, a, b, c, d):
return observed_function_f(xs, a, b, c, d, 1)
def observed_function_f3(xs, a, b, c, d):
return observed_function_f(xs, a, b, c, d, 3)
def observed_function_f5(xs, a, b, c, d):
return observed_function_f(xs, a, b, c, d, 5)
def observed_function_f7(xs, a, b, c, d):
return observed_function_f(xs, a, b, c, d, 7)
def observed_function_f9(xs, a, b, c, d):
return observed_function_f(xs, a, b, c, d, 9)
###Output
_____no_output_____
###Markdown
extraction
###Code
def plainQubitsExtraction(counts):
return counts
def separateQubitsExtraction(counts):
##### calculating number of qubits on the device and shots done
experiments_count = len(counts)
qubits_count = None
shots = 0
one_experiment = counts[0]
for key in one_experiment:
shots += one_experiment[key]
if qubits_count == None:
qubits_count = len(key)
##### separating results for each qubit
qs = [[0 for i in range(experiments_count)] for j in range(qubits_count)]
##### transforming results
for i in range(experiments_count):
counts_i = counts[i]
for key in counts_i:
j = 0
for v in key:
# |0> = 1, |1> = -1
if v == "0":
qs[j][i] += counts_i[key]
elif v == "1":
qs[j][i] -= counts_i[key]
j += 1
##### from qiskit arrangment to physical
qs = qs[::-1]
qs = np.array(qs) / shots
return qs
###Output
_____no_output_____
###Markdown
process
###Code
# NB! experiments should be sorted by step offset
# e.g. for step == 0.1 lets say
# first batch contains results for parameter values 1, 2, 3
# next - for 1.1, 2.1, 3.1
# next - for 1.2, 2.2, 3.2
# ...
def combine(experiments, parameter):
all_counts = []
all_parameter_values = []
for experiment_name in experiments:
counts, parameter_values = processParametrizedExperiment(experiment_name,
THETA,
plainQubitsExtraction)
all_counts.append(counts)
all_parameter_values.append(parameter_values)
combined_counts = []
combined_parameter_values = []
experiments_count = len(all_counts)
batch_count = len(all_counts[0])
for result_index in range(batch_count):
for batch_index in range(experiments_count):
combined_counts.append(all_counts[batch_index][result_index])
combined_parameter_values.append(all_parameter_values[batch_index][result_index])
return combined_counts, combined_parameter_values
def processCombinedExperiment(experiment_names, parameter):
counts, parameter_values = combine(experiment_names, parameter)
qs = separateQubitsExtraction(counts)
return qs, parameter_values
def rebuildCounts(counts, desired_shots):
measurements = []
for value in counts:
observations = counts[value]
for i in range(observations):
measurements.append(value)
new_measurements = random.sample(measurements, desired_shots)
new_counts = {}
for value in counts:
new_counts[value] = new_measurements.count(value)
return new_counts
def processParametrizedExperiment(experiment_name, parameter, qubitsExtraction, shots = None):
path = "../experiments/" + experiment_name + ".json"
json_object = utils.retrieve(path)
##### retrieving experiment from the file
parameters = json_object.get("parameters")
parameter_values = parameters.get(str(parameter))
counts = json_object.get("counts")
experiments_count = len(parameter_values) # == len(counts)
if counts == None:
### retrive job from backend
account_id = json_object["account"]
device_id = json_object["backend"]["name"]
device = qutils.backend(account_id, device_id)
jobId = json_object["job"]
job = device.retrieve_job(jobId)
error_message = job.error_message()
if error_message:
print("ERROR: " + error_message, "\nEXPERIMENT: " + experiment_name)
result = job.result()
counts = []
for i in range(experiments_count):
i_counts = result.get_counts(i)
counts.append(i_counts)
### saving results locally
utils.update(json_object, counts, path)
if not shots == None:
for i in range(len(counts)):
i_counts = counts[i]
counts[i] = rebuildCounts(i_counts, shots)
return qubitsExtraction(counts), parameter_values
def processExperiment(experiment_name):
path = "experiments/" + experiment_name + ".json"
json_object = utils.retrieve(path)
##### retrieving experiment from the file
counts = json_object.get("counts")
if counts == None:
### retrive job from backend
account_id = json_object["account"]
device_id = json_object["backend"]["name"]
device = qutils.backend(account_id, device_id)
jobId = json_object["job"]
job = device.retrieve_job(jobId)
error_message = job.error_message()
if error_message:
print("ERROR: " + error_message, "\nEXPERIMENT: " + experiment_name)
result = job.result()
counts = result.get_counts()
### saving results locally
utils.update(json_object, counts, path)
return counts
def processParametrizedExperimentWithRealMeasurement(experiment_name, parameter, account_for_):
counts, parameter_values = processParametrizedExperiment(experiment_name, parameter, plainQubitsExtraction)
return utils.real_measurement(counts, account_for_)
###Output
_____no_output_____
###Markdown
analyze
###Code
def mean_square_error(A, B):
return np.square(np.subtract(A, B)).mean()
def analyzeExperiment(experiment_name, observed_function, shots = None):
counts, parameter_values = processParametrizedExperiment(experiment_name,
THETA,
separateQubitsExtraction,
shots)
target_qubit_results = counts[0]
xdata = parameter_values
ydata = target_qubit_results
fit_result = scipy.optimize.curve_fit(observed_function, xdata, ydata)
fitted_params = fit_result[0]
sim_results = observed_function(xdata, fitted_params[0], fitted_params[1], fitted_params[2], fitted_params[3])
return parameter_values, target_qubit_results, sim_results, fitted_params
def analyzeExperiments(experiments_names, observed_functions, shots = None):
for i in range(len(experiments_names)):
experiment_name = experiments_names[i]
observed_function = observed_functions[i]
parameter_values, target_qubit_results, sim_results, fitted_params = analyzeExperiment(experiment_name,
observed_function,
shots)
fitted_function = str(fitted_params[2]) + ' * cos(' + str(fitted_params[0]) \
+ ' * x + ' + str(fitted_params[1]) + ') + ' + str(fitted_params[3])
mse = mean_square_error(target_qubit_results, sim_results)
path_to_save_fig = None#"../experiments/" + a_experiment_name + "_fitted.pdf"
utils.plot([parameter_values, parameter_values],
[target_qubit_results, sim_results],
curves_names = ['measured', fitted_function],
title = 'Mean Square Error: ' + str(mse),
x_name = 'parameter value',
y_name = 'expectation',
path_to_file = path_to_save_fig,
include_ft = False)
def resultsOfExperiments(experiments_names):
for a_experiment_name in experiments_names:
a_qs, a_parameter_values = processParametrizedExperiment(a_experiment_name,
THETA,
separateQubitsExtraction)
path_to_save_fig = "../experiments/" + a_experiment_name + "_result.pdf"
plot([a_parameter_values for i in range(len(a_qs))], a_qs)
###Output
_____no_output_____
###Markdown
maximum likelyhood estimation
###Code
# ML
# theta_a - rotation
# m_k - iterations of fusion
# h_k - good outcomes
# N_k - all the outcomes
def L_k(theta_a, m_k, h_k, N_k):
angle = (2 * m_k + 1) * theta_a
sin_2 = pow(math.sin(angle), 2)
cos_2 = pow(math.cos(angle), 2)
result = pow(sin_2, h_k) * pow(cos_2, N_k - h_k)
return result
def L(theta_a, m_ks, h_ks, N_ks):
result = 1
for i in range(len(m_ks)):
m_k = m_ks[i]
h_k = h_ks[i]
N_k = N_ks[i]
result *= L_k(theta_a, m_k, h_k, N_k)
return result
def MLE_L(param):
global global_m_ks, global_h_ks, global_N_ks
value = L(param, global_m_ks, global_h_ks, global_N_ks)
return -value
def ln_L_k(theta_a, m_k, h_k, N_k):
angle = (2 * m_k + 1) * theta_a
sin_2 = pow(math.sin(angle), 2)
cos_2 = pow(math.cos(angle), 2)
# we can do the following, because log(x) ~ log(x + "a little")
a_little = 1e-323
sin_2 += a_little
cos_2 += a_little
result = h_k * math.log(sin_2) + (N_k - h_k) * math.log(cos_2)
return result
def ln_L(theta_a, m_ks, h_ks, N_ks):
result = 0
for i in range(len(m_ks)):
m_k = m_ks[i]
h_k = h_ks[i]
N_k = N_ks[i]
result += ln_L_k(theta_a, m_k, h_k, N_k)
return result
def MLE_ln_L(param):
global global_m_ks, global_h_ks, global_N_ks
value = ln_L(param, global_m_ks, global_h_ks, global_N_ks)
return -value
a_experiments = #array of experiment names#
a_range = range(len(a_experiments))
a_target_qubit_index = 0
theta_indeces = 75
global_m_ks = [i for i in a_range]
global_N_ks = [100 for i in a_range]
MLE_results = []
a_steps = 100
a_step = math.pi * 2 / a_steps
MLE_points = []
gammas = [a_step * step for step in range(a_steps)]
plt_gammas = []
plt_thetas = []
plt_MLE_ln_Ls = []
for a_theta_index in range(theta_indeces):
global_h_ks = []
for i in a_range:
a_experiment_name = a_experiments[i]
a_shots = global_N_ks[i]
a_qs, a_parameter_values = processParametrizedExperiment(a_experiment_name,
THETA,
plainQubitsExtraction,
a_shots)
h_k = 0
a_experiment_results = a_qs[a_theta_index]
for outcome in a_experiment_results:
if outcome[a_target_qubit_index] == '1':
h_k += a_experiment_results[outcome]
global_h_ks.append(h_k)
plt_thetas.extend([a_parameter_values[a_theta_index] for i in range(a_steps)])
plt_gammas.extend(gammas)
plt_MLE_ln_Ls.extend([MLE_ln_L(a_gamma) for a_gamma in gammas])
min_f = None
min_gamma = None
for a_gamma in gammas:
current_min_gamma = scipy.optimize.fminbound(MLE_ln_L, a_gamma, a_gamma + a_step)
current_min_f = MLE_ln_L(current_min_gamma)
if min_f == None or min_f > current_min_f:
min_gamma = current_min_gamma
min_f = current_min_f
optimized_result = pow(math.cos(min_gamma), 2) - pow(math.sin(min_gamma), 2)
MLE_results.append(optimized_result)
xs = a_parameter_values
ys = MLE_results
plt.plot(xs, ys)
plt.show()
for a_theta_index in range(theta_indeces):
left_i = a_theta_index * a_steps
right_i = (a_theta_index + 1) * a_steps
xs = plt_gammas[left_i:right_i]
ys = plt_MLE_ln_Ls[left_i:right_i]
plt.plot(xs, ys)
plt.show()
###Output
_____no_output_____
###Markdown
We need to disregard id since that will have no predictive power (as an arbitrarily assigned variable). We also need to correctly handle date_recorded by converting it into time since epoch. Region_code and district_code are incorrectly designated as continuous variables but that does not matter since our dependent variable is categorical and so we will be using chi-square tests to assess statistical significance.
###Code
# Removing date recorded (will handle later) and id
categorical_indices = categorical_indices[1:]
continuous_indices = continuous_indices[1:]
transformed_features = features.copy();
transformed_features
# Dropping waterpoint name
transformed_features = transformed_features.drop('wpt_name', axis=1)
for index in categorical_indices:
transformed_features[index] = features[index].replace(features[index].unique(), np.arange(len(features[index].unique()))).astype('int')
print("done with " + index)
categorical_outcome = outcome['status_group']
categorical_outcome = categorical_outcome.replace(['functional', 'functional needs repair', 'non functional'], [0, 1, 2]).astype('int')
categorical_outcome
# Converting date_recorded to time since epoch
epoch_time = []
for date in features['date_recorded']:
date = datetime.strptime(date, '%Y-%m-%d')
epoch_time.append(date.timestamp())
transformed_features['date_recorded'] = epoch_time
for index in categorical_indices:
table = pd.crosstab(transformed_features[index], categorical_outcome)
c, p, dof, expected = chi2_contingency(table.values)
print(index + ': ' + str(p))
for index in continuous_indices:
table = pd.crosstab(transformed_features[index], categorical_outcome)
c, p, dof, expected = chi2_contingency(table.values)
print(index + ': ' + str(p))
###Output
funder: 0.0
installer: 0.0
wpt_name: 3.167496602060987e-15
basin: 0.0
subvillage: 0.0
region: 0.0
lga: 0.0
ward: 0.0
recorded_by: 1.0
scheme_management: 0.0
scheme_name: 0.0
extraction_type: 0.0
extraction_type_group: 0.0
extraction_type_class: 0.0
management: 0.0
management_group: 1.7446261385259768e-57
payment: 0.0
payment_type: 0.0
water_quality: 0.0
quality_group: 0.0
quantity: 0.0
quantity_group: 0.0
source: 0.0
source_type: 0.0
source_class: 1.983538119535752e-126
waterpoint_type: 0.0
waterpoint_type_group: 0.0
amount_tsh: 0.0
gps_height: 1.935832234019064e-40
longitude: 0.9999988241305058
latitude: 0.9999988211760183
num_private: 1.3700364563899945e-12
region_code: 0.0
district_code: 0.0
population: 1.2004595650770784e-174
public_meeting: 6.695873894822635e-63
permit: 1.5416464629999488e-15
construction_year: 0.0
###Markdown
Features that have a statistically significant difference with water pump condition:- wpt name- public meeting- permit- management group- source class- gps height- num private- populationwpt_name is the name of the water pump so we will not use that.
###Code
column_selector = ['public_meeting', 'permit', 'management_group', 'source_class', 'gps_height', 'num_private', 'population']
fig, ax = plt.subplots()
fig = sns.countplot(x="status_group", data=outcome)
ax.set_title('State of Water Pumps in Tanzania')
ax.set_xlabel('Water Pump State')
ax.set_ylabel('Count of Occurrences')
fig, ax = plt.subplots()
fig = sns.countplot(x="permit", data=features)
ax.set_title('Are Water Pumps in Tanzania Permitted or Not?')
ax.set_xlabel('The Water Pump is Permitted')
ax.set_ylabel('Count of Occurrences')
groups = features['management_group'].unique()
sizes = []
for group in groups:
sizes.append(len(features.loc[features['management_group'] == group]))
fig, ax = plt.subplots()
plt.pie(sizes, labels=groups, autopct='%1.1f%%', shadow=True)
ax.set_title('Management Groups for Tanzania Water Pumps')
fig.set_size_inches(12,12)
###Output
_____no_output_____
###Markdown
parastatal: separate from the government but activities serve the government
###Code
fig, ax = plt.subplots()
fig = sns.countplot(x="public_meeting", data=features)
ax.set_title('Public Meeting before Pump Installation?')
ax.set_xlabel('There was a Public Meeting Before Installation')
ax.set_ylabel('Count of Occurrences')
fig, ax = plt.subplots()
fig = sns.countplot(x="source_class", data=features)
ax.set_title('Water Source Type Distribution')
ax.set_xlabel('Type of Water Source')
ax.set_ylabel('Count of Occurrences')
fig, ax = plt.subplots()
fig = plt.scatter(x="id", y="population", data=features)
ax.set_title('Population Distribution by Water Pump Id')
ax.set_xlabel('Water Pump Id')
ax.set_ylabel('Population around Water Pump')
fig, ax = plt.subplots()
fig = plt.scatter(x="id", y="num_private", data=features)
ax.set_title('Private Water Pump Distribution by Water Pump Id')
ax.set_xlabel('Water Pump Id')
ax.set_ylabel('Private Water Pumps around Water Pump')
train_features, test_features, train_outcome, test_outcome = train_test_split(
transformed_features, # [column_selector]
categorical_outcome,
test_size=0.30
)
param_grid = {'criterion': ['gini', 'entropy']}
grid = GridSearchCV(DecisionTreeClassifier(), param_grid, scoring="accuracy")
grid.fit(train_features, train_outcome)
grid.score(test_features, test_outcome)
grid.best_params_
tree_test_predict = grid.predict(test_features)
param_grid2 = {'n_neighbors':range(1, 11), 'weights': ['uniform', 'distance']}
grid2 = GridSearchCV(KNeighborsClassifier(), param_grid2, scoring="accuracy")
grid2.fit(train_features, train_outcome)
grid2.score(test_features, test_outcome)
grid2.best_params_
knn_test_predict = grid2.predict(test_features)
###Output
_____no_output_____
###Markdown
It looks like the decision tree classifier does a lot better than the k nearest neighbors classifier.
###Code
test_features = pd.DataFrame(test_features)
test_features['prediction'] = tree_test_predict
test_features['actual'] = test_outcome
test_features.plot('actual', 'prediction', kind='scatter')
plt.plot(test_features.actual, test_features.actual)
plt.show()
test_features['err'] = test_features['prediction'] - test_features['actual']
sns.violinplot(test_features['actual'], test_features['err'])
feature_selector = RFECV(estimator=DecisionTreeClassifier(criterion='entropy'), step=1, scoring="accuracy").fit(transformed_features, categorical_outcome)
columns = feature_selector.get_support(indices=True)
colnames = transformed_features.columns[columns]
transformed_features = transformed_features[colnames]
clf = DecisionTreeClassifier(criterion='entropy')
clf.fit(transformed_features, categorical_outcome)
test_features = pd.read_csv('./data/TestFeatures.csv')
test_features = test_features[colnames]
###Output
_____no_output_____
###Markdown
We need to handle nulls and convert categorical data and dates to integers again.
###Code
null_indices = test_features.columns[test_features.isna().any()].tolist()
for index in null_indices:
mode = test_features[index].mode().iloc[0]
test_features[index].loc[pd.isnull(test_features[index])] = mode
# Converting date_recorded to time since epoch
epoch_time = []
for date in test_features['date_recorded']:
date = datetime.strptime(date, '%Y-%m-%d')
epoch_time.append(date.timestamp())
test_features['date_recorded'] = epoch_time
categorical_indices = test_features.loc[:, test_features.dtypes == object].columns.values
for index in categorical_indices:
test_features[index] = test_features[index].replace(test_features[index].unique(), np.arange(len(test_features[index].unique()))).astype('int')
print("done with " + index)
predictions = clf.predict(test_features)
test_outcome = pd.read_csv('./data/SubmissionFormat.csv')
test_outcome['status_group'] = predictions
test_outcome['status_group'] = test_outcome['status_group'].replace([0, 1, 2], ['functional', 'functional needs repair', 'non functional'])
test_outcome.to_csv('./data/Submission.csv', index=False)
###Output
_____no_output_____
###Markdown
Analysis section This is based off of [this](https://towardsdatascience.com/end-to-end-topic-modeling-in-python-latent-dirichlet-allocation-lda-35ce4ed6b3e0) guide.
###Code
import pandas as pd
import os
import re
from wordcloud import WordCloud
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation as LDA
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from pyLDAvis import sklearn as sklearn_lda
import pickle
import pyLDAvis
import xml.etree.ElementTree as ET
#NOTE: this will need to be configured to pull from output.csv instead (work in progress)
impact_statements['processed'] = impact_statements['content'].map(lambda x: re.sub('[^a-zA-Z0-9 ]', '', x))
impact_statements['processed'] = impact_statements['processed'].map(lambda x: x.lower())
#create a wordcloud
# Join the different processed titles together.
long_string = ','.join(list(impact_statements['processed'].values))
# Create a WordCloud object
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue')
# Generate a word cloud
wordcloud.generate(long_string)
# Visualize the word cloud
wordcloud.to_image()
sns.set_style('whitegrid')
%matplotlib inline
# Helper function
def plot_10_most_common_words(count_data, count_vectorizer):
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts+=t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15/1.6180))
plt.subplot(title='10 most common words')
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.show()
# Initialise the count vectorizer with the English stop words
count_vectorizer = CountVectorizer(stop_words='english')
# Fit and transform the processed titles
count_data = count_vectorizer.fit_transform(impact_statements['processed'])
# Visualise the 10 most common words
plot_10_most_common_words(count_data, count_vectorizer)
warnings.simplefilter("ignore", DeprecationWarning)
# Helper function
def print_topics(model, count_vectorizer, n_top_words):
words = count_vectorizer.get_feature_names()
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([words[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
# Tweak the two parameters below
number_topics = 4
number_words = 10
# Create and fit the LDA model
lda = LDA(n_components=number_topics, n_jobs=-1)
lda.fit(count_data)
# Print the topics found by the LDA model
print("Topics found via LDA:")
print_topics(lda, count_vectorizer, number_words)
#%%time
LDAvis_data_filepath = os.path.join('./ldavis_prepared_'+str(number_topics))
# # this is a bit time consuming - make the if statement True
# # if you want to execute visualization prep yourself
#if 1 == 1:
LDAvis_prepared = sklearn_lda.prepare(lda, count_data, count_vectorizer)
with open(LDAvis_data_filepath, 'wb') as f:
pickle.dump(LDAvis_prepared, f)
# load the pre-prepared pyLDAvis data from disk
with open(LDAvis_data_filepath, "rb") as f:
LDAvis_prepared = pickle.load(f)
pyLDAvis.save_html(LDAvis_prepared, './ldavis_prepared_'+ str(number_topics) +'.html')
###Output
_____no_output_____
###Markdown
Data AnalysisWe would analyse the data to find valuable information.We would be doing the following analysis: - Are non STEM students more self-confident than STEM students and which one of them are more likely to become entrepreneurs? - Does competitiveness affect the physical health and mental disorder condition of a student? - If a student is influenced, then what matters more: confidence or projects? Is this different for STEM and non STEM students? - What is the relation between having mental disorder and self-confidence or self-reliance? How does it change with age? Is it different for people who do not have a mental disorder?- Is there a difference in the mental and physical health of people who may or may not become entrepreneurs?- Which traits are most and least helpful for being an entrepreneur and do these traits differ for people from and not from a city? - What is the correlation between a strong need to achieve something or the desire to take initiative and the probability of becoming an entrepreneur?- How does competitiveness change with age or degree?- Is there a relation between not having mental disorders and being influenced by someone? First we have to pre-preprocess data
###Code
# import libraraies
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def get_data() -> pd.DataFrame:
# load data
data = pd.read_csv('data.csv')
# remove unwanted columns
data = data.drop(columns=['ReasonsForLack'], axis=1)
# saving the unique values for lable encoding
values = {column: list(data[column].unique())
for column in data.columns.values}
# Add a filed called is_stem which tells whether a student has taken a science degree or not
stem_fields = ['Engineering Sciences',
'Medicine, Health Sciences',
'Mathematics or Natural Sciences']
data['is_stem'] = data['EducationSector'].apply(lambda val: 1 if val in stem_fields else 0)
# Lable encode the following columns: IndividualProject, Gender, City, Influenced, MentalDisorder
columns_to_lable = ['IndividualProject', 'Gender', 'City', 'Influenced', 'MentalDisorder']
for column in columns_to_lable:
data[column] = data[column].apply(lambda val: values[column].index(val))
# Onehot encode the following columns : EducationSector, KeyTraits
dummies_ed_sec = pd.get_dummies(data['EducationSector'], prefix='degree')
dummies_traits = pd.get_dummies(data['KeyTraits'], prefix='trait')
data = data.join(dummies_ed_sec.join(dummies_traits)).drop(columns=['EducationSector', 'KeyTraits'])
return data
###Output
_____no_output_____
###Markdown
Analysis 1 : Are non STEM students more self-confident than STEM students and which one of them are more likely to become entrepreneurs?
###Code
data = get_data()
# -- a) Relation to self confidence
stem = data[data['is_stem'] == 1].groupby(['SelfConfidence']).count()['Age']
not_stem = data[data['is_stem'] == 0].groupby(['SelfConfidence']).count()['Age']
X = range(1, 6)
stem = [stem[index] for index in X]
stem = [(val * 100) / sum(stem) for val in stem]
not_stem = [not_stem[index] for index in X]
not_stem = [(val * 100) / sum(not_stem) for val in not_stem]
X_axis = np.arange(len(X))
plt.bar(X_axis - 0.2, stem, 0.4, label = 'STEM')
plt.bar(X_axis + 0.2, not_stem, 0.4, label = 'Non STEM')
plt.xticks(X_axis, X)
plt.xlabel("Lowest to Highest Confidence")
plt.ylabel("percentage of Students")
plt.legend()
plt.show()
# -- b) Relation to competency of being entrepreneur
stem = data[data['is_stem'] == 1].groupby(['y']).count()['Age']
not_stem = data[data['is_stem'] == 0].groupby(['y']).count()['Age']
X = range(2)
stem = [stem[index] for index in X]
stem = [(val * 100) / sum(stem) for val in stem]
not_stem = [not_stem[index] for index in X]
not_stem = [(val * 100) / sum(not_stem) for val in not_stem]
X_axis = np.arange(len(X))
plt.bar(X_axis - 0.2, stem, 0.4, label = 'STEM')
plt.bar(X_axis + 0.2, not_stem, 0.4, label = 'Non STEM')
plt.xticks(X_axis, ['incompetent', 'competent'])
plt.xlabel("competency of being entrepreneur")
plt.ylabel("percentage of Students")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell : - non STEM students are as self-confident as STEM students- both STEM and non STEM students have the same competency of being entrepreneur Analysis 2 : Does competitiveness affect the physical health and mental disorder condition of a student?
###Code
data = get_data()
# -- a) Relation to Mental Disorder Condition
vals = data[data['MentalDisorder'] == 1].groupby(['Competitiveness']).count()['Age']
X = range(1, 6)
y = [vals[index] for index in X]
fig, ax = plt.subplots()
ax.plot(X, y, '--')
ax.scatter(X, y, marker='P', color='red')
ax.set_xlabel('competitiveness')
ax.set_ylabel('number of students with mental disorder')
plt.show()
# -- b) Relation to Physical health
vals = data[data['GoodPhysicalHealth'] >= 3].groupby(['Competitiveness']).count()['Age']
X = range(1, 6)
y = [vals[index] for index in X]
fig, ax = plt.subplots()
ax.plot(X, y, '--')
ax.scatter(X, y, marker='P', color='red')
ax.set_xlabel('competitiveness')
ax.set_ylabel('number of students with good physical health')
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell : - as the competitiveness increase the number of students with mental disorder increase- as the competitiveness increase the number of students with good physical health increase Analysis 3 : If a student is influenced, then what matters more: confidence or projects? Is this different for STEM and non STEM students?
###Code
data = get_data()
# -- a) Relation of confidence and projects
# Extracting confidence of influenced students
very_influenced_confidence = data[data['Influenced'] == 1].groupby(['SelfConfidence']).count()['Age']
array_1 = [1, 2, 3, 4, 5]
array_2 = [very_influenced_confidence[x] for x in array_1]
high_conf = (array_2[3] + array_2[4]) * 100 / sum(array_2)
# Extracting projects of influenced students
very_influenced_projects = data[data['Influenced'] == 1].groupby(['IndividualProject']).count()['Age']
project_1 = [0, 1]
project_2 = [very_influenced_projects[x] for x in project_1]
ind_p = project_2[1] * 100 / sum(project_2)
lis_1 = [high_conf, ind_p]
lis_2 = ["High Self-Confidence", "Undertaken Project"]
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_2, lis_1, color='maroon',
width=0.4)
plt.ylabel("Percentage of students")
plt.show()
# -- b) Relation of confidence for STEM and Non STEM students
# Extracting confidence for STEM students
very_influenced_confidence_stem = data[(data['Influenced'] == 1) &
(data["is_stem"] == 1)].groupby(['SelfConfidence']).count()['Age']
array_1 = [1, 2, 3, 4, 5]
array_2 = [very_influenced_confidence_stem[x] for x in array_1]
answer_confidence = (array_2[3] + array_2[4]) * 100 / sum(array_2)
# Extracting confidence for non-STEM students
very_influenced_confidence_non_stem = data[(data['Influenced'] == 1) &
(data["is_stem"] == 0)].groupby(['SelfConfidence']).count()['Age']
array_2_non = [very_influenced_confidence_non_stem[x] for x in array_1]
answer_confidence_non = (array_2_non[3] + array_2_non[4]) * 100 / sum(array_2_non)
lis_1 = [answer_confidence, answer_confidence_non]
lis_2 = ["STEM", "Non-STEM"]
fig_3 = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_2, lis_1, color='maroon',
width=0.4)
plt.ylabel("Percentage of students with high self-confidence when influenced")
plt.xlabel("Degree type of students")
plt.show()
# -- c) Relation of individual projects for STEM and Non STEM students
# Extracting individual projects for STEM students
very_influenced_project_stem = data[(data['Influenced'] == 1) &
(data["is_stem"] == 1)].groupby(['IndividualProject']).count()['Age']
array_1_p = [0, 1]
array_2_p = [very_influenced_project_stem[x] for x in array_1_p]
answer = array_2_p[1] * 100 / sum(array_2_p)
# Extracting individual projects for non-STEM students
very_influenced_projects_non_stem = data[(data['Influenced'] == 1) &
(data["is_stem"] == 0)].groupby(['IndividualProject']).count()['Age']
array_2_non_p = [very_influenced_projects_non_stem[x] for x in array_1_p]
answer2 = array_2_non_p[1] * 100 / sum(array_2_non_p)
lis = [answer, answer2]
lis2 = ["STEM", "Non-STEM"]
fig_2 = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis2, lis, color='maroon',
width=0.4)
plt.ylabel("Percentage of students undertaking individual projects when influenced")
plt.xlabel("Degree type of students")
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell:- When a student is influenced, percentage of students undertaking projects andhaving high self-confidence is similar.- When a student is influenced, the percentage of STEM students and non-STEM studentswith high self-confidence is similar.- When a student is influenced, percentage of STEM students undertaking individual projects is slightlyhigher than non-STEM students.Note: High self-confidence is defined as a rating of 4 or 5 in self-confidence. Analysis 4 : What is the relation between having mental disorder and self-confidence or self-reliance? How does it change with age? Is it different for people who do not have a mental disorder?
###Code
data = get_data()
# a) Relation with self-confidence
mental_disorder_conf = data[data['MentalDisorder'] == 1].groupby(['SelfConfidence']).count()['Age']
array_1 = [1, 2, 3, 4, 5]
array_2 = [mental_disorder_conf[x] for x in array_1]
high_conf = (array_2[3] + array_2[4]) * 100 / sum(array_2)
# b) Relation with self-reliance
mental_disorder_rel = data[data['MentalDisorder'] == 1].groupby(['SelfReliance']).count()['Age']
array_3 = [mental_disorder_rel[x] for x in array_1]
high_rel = (array_3[3] + array_3[4]) * 100 / sum(array_3)
lis_1 = [high_conf, high_rel]
lis_2 = ["High Self-Confidence", "High Self-reliance"]
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_2, lis_1, color='maroon',
width=0.4)
plt.ylabel("Percentage of students")
plt.show()
# c) Change with age
mental_disorder_age = data[data['MentalDisorder'] == 1]
mental_disorder_full = mental_disorder_age.groupby('Age').count()['Gender']
mental_disorder_age_c = mental_disorder_age[mental_disorder_age["SelfConfidence"] >= 4].groupby('Age').count()['Gender']
lis = [x for x in range(17, 24)]
lis.extend([25])
lis_2 = [mental_disorder_age_c[x] for x in lis]
lis_3 = [mental_disorder_full[x] for x in lis]
percentage = []
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
res = {lis[i]: percentage[i] for i in range(len(percentage))}
plt.bar(res.keys(), res.values())
plt.xlabel("Age of students with mental disorders")
plt.ylabel("Percentage with high self-confidence")
plt.show()
mental_disorder_age_r = mental_disorder_age[mental_disorder_age["SelfReliance"] >= 4].groupby('Age').count()['Gender']
lis_r = [x for x in range(17, 23)]
lis_2_r = [mental_disorder_age_r[x] for x in lis_r]
lis_4 = [mental_disorder_full[x] for x in lis_r]
percentage_r = []
for x in range(len(lis_4)):
percentage_r.append(lis_2_r[x] * 100 / lis_4[x])
res_r = {lis_r[i]: percentage_r[i] for i in range(len(percentage_r))}
plt.bar(res_r.keys(), res_r.values())
plt.xlabel("Age of students with mental disorders")
plt.ylabel("Percentage with high self-reliance")
plt.show()
# d) Relation with self-confidence -- No mental disorder
no_mental_disorder_conf = data[data['MentalDisorder'] == 0].groupby(['SelfConfidence']).count()['Age']
array_1 = [1, 2, 3, 4, 5]
array_2 = [no_mental_disorder_conf[x] for x in array_1]
high_conf_no = (array_2[3] + array_2[4]) * 100 / sum(array_2)
# e) Relation with self-reliance -- No mental disorder
no_mental_disorder_rel = data[data['MentalDisorder'] == 0].groupby(['SelfReliance']).count()['Age']
array_3 = [no_mental_disorder_rel[x] for x in array_1]
high_rel_no = (array_3[3] + array_3[4]) * 100 / sum(array_3)
lis_1 = [high_conf_no, high_rel_no]
lis_2 = ["High Self-Confidence", "High Self-reliance"]
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_2, lis_1, color='maroon',
width=0.4)
plt.ylabel("Percentage of students")
plt.show()
# f) Change with age
no_mental_disorder_age = data[data['MentalDisorder'] == 0]
no_mental_disorder_full = no_mental_disorder_age.groupby('Age').count()['Gender']
no_mental_disorder_age_c = \
no_mental_disorder_age[no_mental_disorder_age["SelfConfidence"] >= 4].groupby('Age').count()['Gender']
lis = [x for x in range(18, 23)]
lis.extend([24])
lis_2 = [no_mental_disorder_age_c[x] for x in lis]
lis_3 = [no_mental_disorder_full[x] for x in lis]
percentage = []
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
res = {lis[i]: percentage[i] for i in range(len(percentage))}
plt.bar(res.keys(), res.values())
plt.xlabel("Age of students without mental disorders")
plt.ylabel("Percentage with high self-confidence")
plt.show()
no_mental_disorder_age_r = \
no_mental_disorder_age[no_mental_disorder_age["SelfReliance"] >= 4].groupby('Age').count()['Gender']
lis_r = [x for x in range(17, 23)]
lis_r.extend([24])
lis_2_r = [no_mental_disorder_age_r[x] for x in lis_r]
lis_4 = [no_mental_disorder_full[x] for x in lis_r]
percentage_r = []
for x in range(len(lis_4)):
percentage_r.append(lis_2_r[x] * 100 / lis_4[x])
res_r = {lis_r[i]: percentage_r[i] for i in range(len(percentage_r))}
plt.bar(res_r.keys(), res_r.values())
plt.xlabel("Age of students without mental disorders")
plt.ylabel("Percentage with high self-reliance")
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell : With mental disorder:- We find that more than half of the students with mental disorders have high self-confidence and self-reliance, and there is a slightly more percentage of these students having high self-reliance than high self-confidence. - We see that there is no significant difference in the percentage of students having mental disorders and having high self-confidence from age 17 to 23. However, there is a spike in the percentage at age 25, and there are no students who have a mental disorder and high self-confidence at age 24. - We see that the percentage of students with mental disorders and having high self-reliance increases till age 20, after which it again decreases. Without mental disorder:- We find that more than half of the students without mental disorders have high self-confidence and self-reliance, and there is a significantly more percentage of these students having high self-reliance than high self-confidence.- We see that the percentage of students having mental disorders and having high self-confidence from age 18 to 22 decreases. However, there is a spike in the percentage at age 24, and there are no students who have a mental disorder and high self-confidence at age 23.- We see that the percentage of students having mental disorders and having high self-reliance from age 17 to 21 gradually increases. However, there is a spike in the percentage at age 24, and there are no students who have a mental disorder and high self-reliance at age 23. There is also a significant decline in the percentage at age 22. Note:- High self-confidence is defined as a rating of 4 or 5 in self-confidence.- High self-reliance is defined as a rating of 4 or 5 in self-reliance.- In the analysis, zero percentages might be explained by the lack of data; however, the sudden spikes at ages 24 and 25 are worth further investigation. Analysis 5 : Is there a difference in the mental and physical health of people who may or may not become entrepreneurs?
###Code
data = get_data()
# -- a) Assosiation with Mental health
fig, ax = plt.subplots()
fig.set_size_inches(15.5, 7.5)
counts = data[data['y'] == 1].groupby(['MentalDisorder']).count()['y']
another_counts = data[data['y'] == 0].groupby(['MentalDisorder']).count()['y']
ax.bar(['mental disorder - entrepreneur',
'no mental disorder - entrepreneur',
'mental disorder - not entrepreneurs',
'no mental disorder - not entrepreneur'],
[counts[0], counts[1], another_counts[0], another_counts[1]])
ratio_1_calc = round(counts[0] / (counts[0] + another_counts[0]) * 1000) / 10
ratio_2_calc = round(counts[1] / (counts[1] + another_counts[1]) * 1000) / 10
ratio_1 = f'percentage of people with metal health disorder who have the potential to become entrepreneur : {ratio_1_calc}%'
ratio_2 = f'percentage of people who are mentaly fit and have the potential to become entrepreneur : {ratio_2_calc}%'
ax.set_xlabel('has mental disorder or not \n' + ratio_1 + '\n' + ratio_2)
ax.set_ylabel('number of students who might become entreprenures')
ax.set_title('Mental health vs no of students who can become entreprenures')
plt.show()
# -- b) Assosiation with Physical health
fig, ax = plt.subplots()
fig.set_size_inches(15.5, 7.5)
counts = data[data['y'] == 1].groupby(['GoodPhysicalHealth']).count()['y']
another_counts = data[data['y'] == 0].groupby(['GoodPhysicalHealth']).count()['y']
ax.bar(['bad physical health - entrepreneur',
'good physical health - entrepreneur',
'bad physical health- not entrepreneur',
'good physical health - not entrepreneur'],
[counts[1] + counts[2], counts[3] + counts[4] + counts[5],
another_counts[1] + another_counts[2],
another_counts[3] + another_counts[4] + another_counts[5]])
ratio_1_calc = round((counts[1] + counts[2]) /
((counts[1] + counts[2]) +
(another_counts[1] + another_counts[2])) * 1000) / 10
ratio_2_calc = round((counts[3] + counts[4] + counts[5]) /
((counts[3] + counts[4] + counts[5]) +
(another_counts[3] + another_counts[4] + another_counts[5])) * 1000) / 10
ratio_1 = f'percentage of people with bad physical health who have the potential to become entrepreneur : {ratio_1_calc}%'
ratio_2 = f'percentage of people who are physically fit and have the potential to become entrepreneur : {ratio_2_calc}%'
ax.set_xlabel('rating from worst to best physical health \n' + ratio_1 + '\n' + ratio_2)
ax.set_ylabel('number of students who might become entreprenures')
ax.set_title('Physical health vs no of students who can become entreprenures')
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell : - Mental disorder does not but Phyical health does Analysis 6 : Which traits are most and least helpful for being an entrepreneur and do these traits differ for people from and not from a city?
###Code
data = get_data()
traits = ['trait_Passion',
'trait_Positivity',
'trait_Resilience',
'trait_Vision',
'trait_Work Ethic']
def get_vals(d, is_remote = False):
"""Return no of entrepreneurs corresponding to traits"""
if not is_remote:
return [d[1][0][0][0][0],
d[0][1][0][0][0],
d[0][0][1][0][0],
d[0][0][0][1][0],
d[0][0][0][0][1]]
return [d[1][0][0][0][0],
d[0][1][0][0][0],
0,
d[0][0][0][1][0],
0]
def get_percent_vals(d, is_remote = False):
"""Return percentage of entrepreneurs corresponding to traits"""
vals = get_vals(d, is_remote)
return [(val * 100) / sum(vals) for val in vals]
# -- a) Simple relation in traits and competency of being entrepreneur
ploting_data = data[data['y'] == 1].groupby(traits).count()['Age']
plt.bar(traits, get_vals(ploting_data))
plt.xlabel("trait")
plt.ylabel("Number of Students who have the competency of being entrepreneur")
plt.show()
# -- b) Difference in City and Remote
query_city = 'y == 1 and City == 0'
query_remote = 'y == 1 and City == 1'
city = data.query(query_city).groupby(traits).count()['Age']
remote = data.query(query_remote).groupby(traits).count()['Age']
X_axis = np.arange(len(traits))
city_y = get_percent_vals(city, False)
remote_y = get_percent_vals(remote, True)
plt.bar(X_axis - 0.2, city_y, 0.4, label = 'City')
plt.bar(X_axis + 0.2, remote_y, 0.4, label = 'Remote')
plt.xticks(X_axis, traits)
plt.xlabel("trait")
plt.ylabel("Number of Students who have the competency of being entrepreneur")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell : - The order is Positivity > Passion > Work ethic > Vision > Resiliance- The order is same for both city and remote areas. Analysis 7 : What is the correlation between a strong need to achieve something or the desire to take initiative and the probability of becoming an entrepreneur?
###Code
data = get_data()
# a) Strong need to achieve something
strong_need = data[data['StrongNeedToAchieve'] >= 4].groupby('y').count()['Gender']
lis = [0, 1]
full_need = data.groupby('y').count()['Gender']
lis_2 = [strong_need[x] for x in lis]
lis_3 = [full_need[x] for x in lis]
percentage = []
lis_4 = ["0", "1"]
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_4, percentage, color='maroon',
width=0.4)
plt.ylabel("Percentage of students with high need to achieve something")
plt.xlabel("Probability of becoming an entrepreneur")
plt.show()
# b) Desire to take initiative
strong_desire = data[data['DesireToTakeInitiative'] >= 4].groupby('y').count()['Gender']
lis = [0, 1]
full_desire = data.groupby('y').count()['Gender']
lis_2 = [strong_desire[x] for x in lis]
lis_3 = [full_desire[x] for x in lis]
percentage = []
lis_4 = ["0", "1"]
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_4, percentage, color='maroon',
width=0.4)
plt.ylabel("Percentage of students with high desire to take initiative")
plt.xlabel("Probability of becoming an entrepreneur")
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell:- We see that 60% of students with no probability of becoming an entrepreneur had a high need to achieve something, and 70% of students with probability of becoming an entrepreneur had a high need to achieve something. - We see that 55% of students with no probability of becoming an entrepreneur had a high desire to take initiative, and 70% of students with probability of becoming an entrepreneur had a high desire to take initiative. Note: - High desire to take initiative and high need to achieve something is defined as a rating of 4 or 5 on their respective scales. Analysis 8 : How does competitiveness change with age or degree?
###Code
data = get_data()
# a) Change with age
change_with_age = data[data['Competitiveness'] >= 4].groupby('Age').count()['Gender']
lis = [x for x in range(17, 25)]
full_age = data.groupby('Age').count()['Gender']
lis_2 = [change_with_age[x] for x in lis]
lis_3 = [full_age[x] for x in lis]
percentage = []
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
res = {lis[i]: percentage[i] for i in range(len(percentage))}
plt.bar(res.keys(), res.values())
plt.xlabel("Age of students")
plt.ylabel("Percentage with high competitiveness")
plt.show()
# b) Change with degree
change_with_degree = data[data['Competitiveness'] >= 4].groupby('is_stem').count()['Gender']
lis = [0, 1]
full_degree = data.groupby('is_stem').count()['Gender']
lis_2 = [change_with_degree[x] for x in lis]
lis_3 = [full_degree[x] for x in lis]
percentage = []
for x in range(len(lis_3)):
percentage.append(lis_2[x] * 100 / lis_3[x])
res = {lis[i]: percentage[i] for i in range(len(percentage))}
lis_2 = ["Non-STEM Degree", "STEM Degree"]
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(lis_2, percentage, color='maroon',
width=0.4)
plt.ylabel("Percentage of students")
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion in a Nutshell:- With age: We find that more that about 60% students have a high competitiveness at age 17. This percentage decreases for two years, and then increases at age 20, remaining almost the same for the upcoming ages. Then, there is a spike in the percentage at age 24.- With degree: We find that about 60% of STEM students have high competitiveness whereas 40% of non-STEM students have high competitiveness. Note:- High competitiveness is defined as a rating of 4 or 5 on competitiveness. Analysis 9 : Is there a relation between not having mental disorders and being influenced by someone?
###Code
data = get_data()
from data_preprocessing import data
import matplotlib.pyplot as plt
no_mental_disorder = data[data['MentalDisorder'] == 0].groupby("Influenced").count()['Age']
lis = [0, 1]
lis_2 = [no_mental_disorder[x] for x in lis]
influenced = lis_2[1] / sum(lis_2)
mental_disorder = data[data['MentalDisorder'] == 1].groupby("Influenced").count()['Age']
lis_3 = [mental_disorder[x] for x in lis]
influenced_1 = lis_3[1] / sum(lis_3)
plot = [influenced, influenced_1]
plot_1 = ["No mental disorder", "Mental disorder"]
fig = plt.figure(figsize=(10, 6))
# creating the bar plot
plt.bar(plot_1, plot, color='maroon',
width=0.4)
plt.ylabel("Percentage of students influenced")
plt.show()
###Output
_____no_output_____
###Markdown
Data Analysis
###Code
from gensim.corpora import Dictionary
from gensim.models import Phrases, TfidfModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.ldamodel import LdaModel
from gensim.models.phrases import Phraser
# import math
from matplotlib import pyplot as plt
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
import pandas as pd
import pickle
import re
# from sklearn.cluster import KMeans
# from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.mixture import GaussianMixture
pd.options.mode.chained_assignment = None
REVIEWS = './data/cellphone_reviews.json'
###Output
_____no_output_____
###Markdown
Utils
###Code
def flatten(lol):
return [l for ll in lol for l in ll]
###Output
_____no_output_____
###Markdown
Pre-processing
###Code
reviews = pd.read_json(REVIEWS, lines=True)
reviews['unhelpful'] = reviews['helpful'].apply(lambda x: x[1] - x[0])
reviews['helpful'] = reviews['helpful'].apply(lambda x: x[0])
reviews['reviewText'] = reviews['reviewText'].str.lower()
reviews.drop_duplicates(inplace=True)
reviews.head(5)
###Output
_____no_output_____
###Markdown
Filtering for negative reviews
###Code
negative_reviews = reviews.loc[reviews['overall'] <= 2]
negative_reviews.head(5)
###Output
_____no_output_____
###Markdown
Feature extraction
###Code
STOP_WORDS = set(stopwords.words('english'))
STOP_WORDS -= {'not', 'no'}
def tokenizer(sentence):
tokens = [re.sub('[\W_]+', '', word) \
for word in word_tokenize(sentence) \
if len(word) > 2 and word not in STOP_WORDS]
return tokens
def ngram(sent, n):
"""
Splits a sentence into n-grams.
"""
# Split sentence into words
tokens = tokenizer(sent)
# Zip n consecutive elements into tuples
ngram_toks = zip(*[tokens[i:] for i in range(n)])
# Concat
ngrams = [' '.join(tok) for tok in ngram_toks]
filtered_ngrams = [ngram for ngram in ngrams \
if len(ngram) > 2]
return filtered_ngrams
corpus = negative_reviews['reviewText'].values
all_sentences = flatten([sent_tokenize(review) for review in corpus])
all_valid_words = [tokenizer(sent) for sent in all_sentences]
###Output
_____no_output_____
###Markdown
Building n-gram modelsAbandoned because `gensim` is not giving any phrases, mostly unigrams.
###Code
# Check bigrams
bigrams = Phrases(all_valid_words, min_count=3)
bigram_mdl = Phraser(bigrams)
for avw in all_valid_words[:50]:
print(bigram_mdl[avw])
###Output
['worked', 'first', 'week', 'charge', 'phone']
['waste_money']
['worked_great', 'first', 'couple_weeks', 'stopped', 'completely', 'basically', 'small', 'waste_money']
['disappointed', 'nt', 'work', 'ipad']
['get', 'buying', 'cheap', 'adapter']
['week', 'one', 'side', 'works']
['works', 'one', 'side', 'time']
['connect', 'two', 'cables', 'one', 'side', 'stop_working', 'also', 'overheated', 'burning', 'fuses']
['purchased', 'two', 'problem']
['cheap', 'bad', 'quality']
['nt', 'last_long']
['worked_great', 'worked', 'cheap', 'piece_plastic', 'crap', 'nt', 'expected', 'last']
['bought', 'could', 'use', 'charge', 'tab', 'time']
['tab', 'not', 'recognize', 'high', 'power', 'port', 'either', 'charge', 'use', 'power', 'not', 'charge', 'powered']
['could', 'give', 'usb', 'car_charger', 'stars', 'although', 'worked_fine', 'months', 'subsequently', 'died', 'mepros', 'has', 'usb_ports', 'charging', 'one', 'top', '21_amps']
['bottom', 'slot', 'lower', 'presumably', '15', 'ampsfits', 'well', 'charging', 'socket', 'holds', 'tight']
['ve', 'loose', 'charging', 'socketworks', 'well', 'charge', 'iphone', 'top', 'slot', 'bottom', 'slot', 'works', 'android_phones', 'except', 'high_end', 'oneshas', 'blue_led', 'light', 'tell', 'ready', 'chargethe', 'usb', 'sockets', 'seem', 'well', 'made', 'tightly', 'fit', 'cablespretty', 'solid', 'constructionit', 'cost', 'less', '2cons', 'it', 'died', 'monthsi', 'really_enjoyed', 'using', 'usb', 'charger']
['rapidly', 'charged', 'iphone', 'worked_well', 'charging', 'phones', 'family_friends']
['two', 'slot', 'design', 'makes', 'design', 'handy', 'slot', 'design', 'one', 'else', 'charge', 'device', 'unless', 'remove', 'cord']
['course', 'charger', 'died', 'back', 'slot', 'design', 'using', 'beforeafter', 'months', 'began', 'notice', 'led_light', 'flickering']
['less_week', 'later', 'quit_working', 'permanently']
['not', 'sure', 'problem', 'faulty', 'wiring', 'bad', 'design', 'guess_ll', 'never', 'know']
['thought', 'getting', 'another', 'since', 'cheap', 'thought', 'better']
['maybe', 'cheap', 'simply', 'disposable', 'short_period', 'timefor', 'whatever_reason', 'died', 'forced', 'look', 'another', 'slot', 'design', 'nt', 'found', 'one', 'price_range', 'yet']
['bought', 'tried', 'test', 'first', 'minutes', 'charging', 'felt', 'hot']
['pulled', 'product', 'smelled', 'burnt']
['tried', 'one', 'thing']
['careful', 'one', 'could', 'fire_hazard', 'could_potentially', 'destroy', 'electrical', 'system']
['loved', 'case', 'first', 'received', 'shortly', 'case', 'started_peel', 'first', 'not', 'know', 'looked', 'back', 'case', 'missing', 'spots']
['guess', 'sometimes', 'good', 'deal', 'not', 'really', 'good', 'dealwould', 'not', 'purchase']
['looked_like', 'used']
['broken', 'got', 'paint_job', 'horriblei', 'would', 'never', 'get']
['case', 'reason', 'peeling', 'nt', 'much', 'left', 'orginal', 'skin', 'loved', 'case', 'pink', 'favorite_color', 'would', 'nt', 'recommend', 'specific', 'one', 'anyone']
['charger', 'lasted_week', 'stopped_charging', 'samsung_galaxy']
['really', 'need', 'start', 'making', 'chargers', 'better', 've', 'thru', 'several']
[]
['junk']
['product', 'must', 'miss', 'labeled']
['not', 'get', 'samsung', 'phone', 'charge', 'oem', 'charger']
['lights', 'phone', 'says', 'charging', 'nothing_happens']
['not', 'sure', 'got_defective', 'product', 'upsetting', 'sit', 'car', 'hours', 'not', 'see', 'phone', 'recharge']
['need', 'reliable', 'charger', 'not', 'purchase', 'productif', 'need', 'car_charger', 'samsung', 'would', 'recommend', 'motorola', 'vehicle', 'power_adapter']
['charges_fast', 'dependable']
['samsung', 'car_charger', 'stopped_working', 'within_month', 'period']
['thought', 'fuse', 'blown', 'inside', 'unit', 'attempted', 'change', 've', 'noticed', 'nt', 'come_apart', 'like', 'vehicle', 'chargers']
['great', 'price', 'right_box', 'plug', 'upload', 'sum', 'juice', 'nt', 'know', 'dropping', 'repeatedly', 'floorboard', 'occasionally', 'stuffing', 'console', 'hide', 'caused', 'failure', 'soon', 'green_light', 'failed', 'light', 'phones', 'display', 'showedit', 'nt', 'charging']
['either', 'spend', 'several', 'dollars', 'one', 'witha', 'heavier', 'cordstronger', 'shell', 'treat', 'delicate', 'flower']
['plugged', 'car', 'worked', 'first', 'couple_days']
['nt', 'worked', 'since']
['phone', 'not', 'even_recognize', 'plugged']
['thing', 'worked_well', 'actually', 'functioned', 'today', 'stopped_working', 'past', 'day', 'return_window', 'days', 'give', 'takei', 'guessing', 'quality', 'might', 'not', 'great', 'stopped_working', 'knows', 'waste_money']
['first', 'kind', 'cheaplooking', 'try', 'buy', 'cheapest', 'oem', 'car_charger', 'could', 'phone', 'forgiveable']
['springs', 'side', 'nt', 'much_force', 'though', 'constantly', 'slipping', '12v', 'car', 'port']
['cable', 'sturdy', 'rubber', 'near', 'connector', 'kind', 'stiff', 'makes', 'charging', 'operating', 'phone', 'bit', 'hassle']
['held', 'well', 'since', 'purchased', 'thoughupdate', 'stopped_charging', 'month']
['received', 'product', 'timely_manner', 'delightful', 'however', 'gave_gift', 'embarrassed', 'find', 'defective', 'item']
['reimbursed', 'product', 'would', 'much', 'rather', 'product', 'worked', 'first_time']
['worked_fine', 'days', 'change', 'cord', 'the', 'charger', 'working_fine', 'point', 'rather', 'send_back', 'bought', 'another', 'cord', 'sprint', '1900']
['thought_would', 'samsung', 'product', 'look', 'like', 'cheap', 'counterfiet']
['came', 'crushed', 'slow', 'charge', 'phone']
['marking', 'legit', 'samsung', 'product', 'cord', 'stop_working', 'days']
['not', 'upset', 'inexpensive', 'maybe', 'spend_little', 'buy', 'better', 'product', 'amazon']
['work', 'worked_well']
['charger', 'nt', 'charge', 'battery', 'samsung_galaxy', 'i9100', 'properlywhen', 'connect', 'screen', 'gets', 'frozen', 'cell', 'behaves', 'unusual', 'abnormal', 'way']
['right', 'timewall', 'adapter', 'expected', 'works_well', 'dose_not', 'quickcharge', 'states', 'whole_reason', 'chose', 'chager', 'horribly', 'disappointing', 'inconvenient', 'micro_usb', 'useless', 'looks', 'acts', 'cheaply_made', 'dose_not', 'fit', 'well', 'ports', 'either', 'end', 'periodically', 'stop_working', 'phone', 'freeze', 'turn', 'periodically', 'well', 'know', 'usd', 'tried_several', 'devices', 'adapters', 'problems', 'never', 'order', 'amazon', 'two', 'orders', 'come', 'without', 'defect', 'wrong', 'probuct', 'ugh']
['nutshell', 'not', 'oem', 'charger']
['although', 'samsung', 'markings', 'nt', 'seat', 'well', 'samsung', 'phone']
['means', 'either', 'meant', 'another', 'market', 'counterfeit']
['said', 'would', 'nt', 'buy', 'againaugust', '2013', 'updatethe', 'seller', 'tried', 'make', 'amends', 'sending', 'three', 'replacement', 'chargers']
['using', 'still', 'garbage']
['not', 'charge', 'phone', 'fact', 'drains', 'phone']
['must', 'counterfeit', 'product']
['samsung', 'markings', 'not', 'charge', 'phone']
['not', 'buy', 'product']
[]
['weeks', 'charger', 'fell_apart', 'taking', 'plug', 'not', 'strong', 'charger', 'would', 'asked', 'money', 'nt', 'even', 'worth']
['one', 'came_broken', 'bottom', 'pice', 'snaps', 'sides', 'main', 'backing', 'inlay', 'piece']
['came', 'scuffed', 'paint', 'pieces', 'not', 'click_together']
['not', 'broken', 'would', 'like']
['also', 'nt', 'three', 'click', 'pieces']
['never', 'almost', 'bought', 'product', 'based', 'amazing', 'sounding', 'fake', 'reviews', 'provided', 'shills', 'powerbear']
['look', 'positive_reviews', 'case', 'see', 'none', 'verified_purchases', 'yet', 'every', 'negative_review', 'one', 'course', 'verified_purchase']
['means', 'everyone', 'actually', 'bought', 'used', 'product', 'thinks', 'crap']
['furthermore', 'many_reviewers', 'posted', 'twice', 'product', 'per', 'color']
['amazon', 'allows', 'beyond', 'not', 'point']
['also', 'look', 'profiles', 'positive', 'reviewers', 'see', 'posted', 'multiple', 'positive_reviews', 'powerbear', 'products', 'many_reviews', 'carbon', 'copies']
['beware', 'paid', 'shills']
['watchout', 'positive_reviews', 'fake']
['even', 'reviewed', 'twice', 'using', 'name']
['sleazy', 'way', 'sell', 'item']
['raving', 'friend', 'amazon', 'great', 'phone', 'cases', 'cheap', 'bought']
['nt', 'even', 'fit', 'phone', 'right']
['poorly_made']
['bought', 'samsung_galaxy', 'seller', 'slow', 'getting', 'item']
['also', 'notice', 'product', 'fit', 'sgs2', 'cut_outs', 'not', 'good']
['cut', 'opening', 'small', 'power_button', 'also', 'extra', 'cut', 'reason']
['fit', 'nice', 'gave_stars', 'bad', 'opening', 'phone', 'costly']
['case', 'not', 'fit', 'phone', 'tad', 'small']
['could', 'get', 'corners', 'case']
['wound_buying', 'two', 'piece', 'case', 'cost', 'local_store']
###Markdown
N-gram generation
###Code
negative_reviews.loc[:, 'reviewSents'] = negative_reviews['reviewText'] \
.apply(sent_tokenize)
negative_reviews['unigrams'] = negative_reviews['reviewSents'] \
.apply(lambda sents: flatten([ngram(sent, 1) for sent in sents]))
negative_reviews['bigrams'] = negative_reviews['reviewSents'] \
.apply(lambda sents: flatten([ngram(sent, 2) for sent in sents]))
negative_reviews['ubgrams'] = negative_reviews['unigrams'] \
+ negative_reviews['bigrams']
negative_reviews['trigrams'] = negative_reviews['reviewSents'] \
.apply(lambda sents: flatten([ngram(sent, 3) for sent in sents]))
negative_reviews['ngrams'] = negative_reviews['unigrams'] \
+ negative_reviews['bigrams'] \
+ negative_reviews['trigrams']
negative_reviews.head(5)
###Output
_____no_output_____
###Markdown
Topic Inference Estimating no. of topics (ngrams)> **Note:** Decreasing `u_mass` coherence values for all models, using `c_v` metric as measure instead. Also considered k-means based on TF-IDF, but that's "hard" clustering.
###Code
def get_best_model(texts, max_topics, step=1):
dictionary = Dictionary(texts)
lda_corpus = [dictionary.doc2bow(text) for text in texts]
est_topics = range(2, max_topics + step, step)
lda_mdls = []
coh_vals = []
for i in est_topics:
lda_mdl = LdaModel(lda_corpus, num_topics=i, id2word=dictionary,
passes=3, alpha=[0.01] * i, eta=[0.01] * len(dictionary.keys()))
lda_mdls.append(lda_mdl)
coh_mdl = CoherenceModel(model=lda_mdl, texts=texts,
corpus=lda_corpus, dictionary=dictionary, coherence='c_v')
coh_vals.append(coh_mdl.get_coherence())
plt.plot(est_topics, coh_vals)
plt.xlabel('Number of topics')
plt.ylabel('Coherence values')
plt.title('Elbow curve')
plt.show()
best_mdl = None
best_cv = 0
for idx, cv in enumerate(coh_vals):
if cv > best_cv:
best_cv = cv
best_mdl = lda_mdls[idx]
return best_mdl
###Output
_____no_output_____
###Markdown
Ngram version has decreasing `c_v` coherence values, not useful indetermining topic number
###Code
ngram_texts = list(negative_reviews['ngrams'].values)
best_ngram_model = get_best_model(ngram_texts, 20, 2)
###Output
_____no_output_____
###Markdown
Too little trigrams occur in multiple reviews, cannot infer topicfrom topic words (overlaps).
###Code
trigram_texts = list(negative_reviews['trigrams'].values)
best_trigram_model = get_best_model(trigram_texts, 20, 2)
###Output
_____no_output_____
###Markdown
A LDA model based on unigrams + bigrams with ~8 topics seems to be the best-performing model.
###Code
ubgram_texts = list(negative_reviews['ubgrams'].values)
best_ubgram_model = get_best_model(ubgram_texts, 20, 2)
###Output
_____no_output_____
###Markdown
Optimal number of clusters extending to beyond 20, but doesn'tseem realistic.
###Code
bigram_texts = list(negative_reviews['bigrams'].values)
best_bigram_model = get_best_model(bigram_texts, 20, 2)
###Output
_____no_output_____
###Markdown
Lower coherence values in general.
###Code
unigram_texts = list(negative_reviews['unigrams'].values)
best_unigram_model = get_best_model(unigram_texts, 20, 2)
###Output
_____no_output_____
###Markdown
Looking at topic words
###Code
NUM_TOPICS = 8
ubgram_texts = list(negative_reviews['ubgrams'].values)
dictionary = Dictionary(ubgram_texts)
lda_corpus = [dictionary.doc2bow(text) for text in ubgram_texts]
best_lda_mdl = LdaModel(lda_corpus, num_topics=NUM_TOPICS, id2word=dictionary,
passes=5, alpha=[0.01] * NUM_TOPICS, eta=[0.01] * len(dictionary.keys()))
best_lda_mdl.show_topics(num_topics=NUM_TOPICS, num_words=15)
best_lda_mdl.save('./reviews/model/lda_mdl.mm')
###Output
_____no_output_____
###Markdown
Determining reasonsManual topic labelling, still problems with topic quality
###Code
TOPIC_LABELS = {
0: 'cheap product, waste of money',
1: 'no protection',
2: 'faulty charging cable',
3: 'screen protector bubbles',
4: 'lousy sound quality',
5: 'does not work',
6: 'poor battery life',
7: 'case does not fit'
}
###Output
_____no_output_____
###Markdown
Applying LDA Model
###Code
def review_topic(lda_mdl, dictionary, review_text):
topic = None
prob = 0
for t, p in lda_mdl[dictionary.doc2bow(review_text)]:
if p > prob:
topic = t
return topic
negative_reviews['topic_no'] = negative_reviews['ubgrams'] \
.apply(lambda x: review_topic(best_lda_mdl, dictionary, x))
topic_freq = pd.DataFrame(negative_reviews \
.groupby(['asin', 'topic_no'])['reviewText'] \
.agg(['count', list]))
display(topic_freq.head(5))
topic_pcts = pd.merge(topic_freq, topic_freq.groupby(level=0)['count'] \
.apply(lambda x: x / x.sum()).rename('perc'),
left_index=True, right_index=True) \
.reset_index()
topic_pcts['topic'] = topic_pcts['topic_no'] \
.apply(lambda x: TOPIC_LABELS[x])
topic_pcts.head(5)
###Output
_____no_output_____
###Markdown
Pickling output
###Code
review_dict = {}
for _, row in topic_pcts.iterrows():
pdt_asin = str(row['asin'])
pdt_dict = {'reason': row['topic'],
'frequency': row['perc'],
'reviews': row['list']}
if pdt_asin not in review_dict:
review_dict[pdt_asin] = [pdt_dict]
else:
review_dict[pdt_asin].append(pdt_dict)
with open('./reviews/saved_topics.pkl', 'wb') as out_pkl:
pickle.dump(review_dict, out_pkl)
###Output
_____no_output_____
###Markdown
Is Climate Change Real? How Do You Know?
###Code
# make imports
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# read datasets (disasters)
carbon_df = pd.read_csv('Data/carbon-emissions.csv')
disasters_df = pd.read_csv('Data/natural-disaster-data/number-of-natural-disaster-events.csv')
econ_df = pd.read_csv('Data/natural-disaster-data/economic-damage-from-natural-disasters.csv')
###Output
_____no_output_____
###Markdown
1: Getting a "Feel" For the Data[Link to natural disasters dataset.](https://www.kaggle.com/dataenergy/natural-disaster-data)
###Code
# Look at the data for natural disasters
disasters_df.head()
# drop NaN values
disasters_df = disasters_df.drop(columns='Code')
econ_df = econ_df.drop(columns='Code')
###Output
_____no_output_____
###Markdown
What Kinds of Values are There for "Entity"?
###Code
disasters_df['Entity'].unique()
###Output
_____no_output_____
###Markdown
Which Years Do We Have Data For?
###Code
(disasters_df['Year'].min(), disasters_df['Year'].max())
###Output
_____no_output_____
###Markdown
2: How Are the Natural Disasters Changing Over Time? Is Climate Change Real? Stats By the Decade: Measures of Central Tendency How does the mean number of natural disasters change by the decade? Functions to Compute Mean Amount of Disasters Annually, for a Given Decade
###Code
def grab_decade(start_yr, y_c_data, interval=10):
'''Return years and counts for only a specific interval length.'''
end_yr = int(start_yr) + interval - 1
years = y_c_data[(y_c_data['years'] <= end_yr) & (y_c_data['years'] >= start_yr)]
return years
def compute_decade_mean(start_yr, y_c_data):
'''Sum the number of total disasters over a given period of 10 years, returns the mean.'''
years = grab_decade(start_yr, y_c_data)
# compute and return the mean
return years['counts'].sum() / 10
###Output
_____no_output_____
###Markdown
Function to Perform This Step for all Decades 1900-2010
###Code
def compute_means(y_c_data):
'''Returns a dict of all mean number of disasters that occurred for every decade, 1900-2010.'''
# compute the amount of decades in our data
start_yr, end_yr = y_c_data['years'].min(), y_c_data['years'].max()
decades = (end_yr - start_yr) // 10
# store all the means in a dict
decade_means = dict()
for i in range(start_yr, end_yr, 10):
decade_means[f'{i}'] = compute_decade_mean(i, y_c_data)
return decade_means
# Calling the function
ALL_DIS = 'All natural disasters'
COUNT = 'Number of reported natural disasters (reported disasters)'
counts = disasters_df[(disasters_df['Entity'] == ALL_DIS)][COUNT] # just the counts of all natural disasters, all years
years = disasters_df[(disasters_df['Entity'] == ALL_DIS)]['Year'] # just the years
y_c_data = pd.DataFrame(data={
'years':years,
'counts':counts})
means_by_decade = compute_means(y_c_data)
###Output
_____no_output_____
###Markdown
Plot of Changing Mean of Disaster Counts, By Decade
###Code
plt.plot(list(means_by_decade.keys()), list(means_by_decade.values()))
plt.xlabel('Decade Start Year')
plt.ylabel('Annual Mean Disaster Count')
plt.title('Change in Decade Mean for Natural Disasters, 1900-2010')
plt.show()
###Output
_____no_output_____
###Markdown
How does the median number of natural disasters change by decade?¶ Analogous Functions for the Medians By Decade
###Code
def compute_decade_median(start_yr, y_c_data):
'''Return the median of total disasters over a given period of 10 years.'''
years = grab_decade(start_yr, y_c_data)
# compute and return the median
return years['counts'].median()
def compute_medians(y_c_data):
'''Returns a dict of all mean number of disasters that occurred for every decade, 1900-2010.'''
# compute the amount of decades in our data
start_yr, end_yr = y_c_data['years'].min(), y_c_data['years'].max()
decades = (end_yr - start_yr) // 10
# store all the medians in a dict
decade_medians = dict()
for i in range(start_yr, end_yr, 10):
decade_medians[f'{i}'] = compute_decade_median(i, y_c_data)
return decade_medians
medians_by_decade = compute_medians(y_c_data)
###Output
_____no_output_____
###Markdown
Plot the Change in Disaster Count Median, By Decade
###Code
plt.plot(list(medians_by_decade.keys()), list(medians_by_decade.values()))
plt.xlabel('Decade Start Year')
plt.ylabel('Median Disaster Count')
plt.title('Change in Decade Median for Natural Disasters, 1900-2010')
plt.show()
###Output
_____no_output_____
###Markdown
Wait, what? Why the drop around 2000? Watch out! For people who only show you the data for the last decade, there's more if we look closely (at the annual data)!
###Code
counts = disasters_df[(disasters_df['Entity'] == 'All natural disasters') & (disasters_df['Year'] >= 2000) & (disasters_df['Year'] <= 2010)]['Number of reported natural disasters (reported disasters)']
plt.plot(list(range(2000, 2011)), counts)
plt.xlabel('Year')
plt.ylabel('Annual Mean Disaster Count')
plt.title('Change in Natural Disaster Count, 2000-2010')
plt.show()
###Output
_____no_output_____
###Markdown
Our Data is Subject to Regression to the Mean! How can we reach better conclusions*? **Without just getting more data* 3: Bayes' Theorem $ P(A|B) = \frac{P(A and B)}{P(A)} $ Given a year is between 2000-2018, what is the chance that it's number of total disasters is greater than the average number of disasters for all years 1900-2018? In our scenario: $ P(A|B) $ = P(Year Has More Natural Disasters than the Mean for All Years 1900-2018, Given Year is 2000-2018) $ P(A) $ = P(Year is Between 2000-2018) What is the mean number of total natural disasters annually, for all years 1900-2018?
###Code
# find all rows reporting "all natural disasters"
COUNT = 'Number of reported natural disasters (reported disasters)'
all_disasters = disasters_df[disasters_df['Entity'] == 'All natural disasters'][COUNT]
# sum them together, divide by their number
mean_disasters = np.sum(all_disasters) / len(all_disasters)
# print the mean
mean_disasters
###Output
_____no_output_____
###Markdown
How Many Years Between 1900-2018 Have More Than This Mean?
###Code
count = 0
for num in all_disasters:
if num > mean_disasters:
count += 1
count
###Output
_____no_output_____
###Markdown
Do all years 2000-2018 have more total disasters than the mean?
###Code
all_disasters_years_and_counts = disasters_df[(disasters_df['Entity'] == 'All natural disasters')]
years_2000_2018 = all_disasters_years_and_counts.tail(19)
count = 0
for num in years_2000_2018['Number of reported natural disasters (reported disasters)']:
if num > mean_disasters:
count += 1
percent_val = round((count/19) * 100, 2)
print(f'{percent_val}%') # have all these years surpassed the mean we calculated?
###Output
100.0%
###Markdown
So, What's the Chance that a Year has an Above Average Number of Natural Disasters, given the year is 2000-2018, than the mean of all 118 years?
###Code
print(f'{round((19/42) * 100, 2)}%')
###Output
45.24%
###Markdown
Bayes' Theorem, pt. 2 Do we really need to set 2000 as our checkpoint? Given a year is between 2000-2018, what is the chance that it's number of total disasters is greater than the average number of disasters for all years 1900-2000?
###Code
# slice the DataFrame by century
disasters_20th = disasters_df[(disasters_df['Entity'] == 'All natural disasters') & (disasters_df['Year'] <= 1999) & (disasters_df['Year'] >= 1900)]
disasters_21st = disasters_df[(disasters_df['Entity'] == 'All natural disasters') & (disasters_df['Year'] >= 2000) & (disasters_df['Year'] <= 2018)]
# find the mean annual number of disasters in the 20th century
mean_20th = disasters_20th[COUNT].values.mean()
# compute the percent of years in the 21st century which is greater than this value
percent_over = len(disasters_21st[disasters_21st[COUNT] > mean_20th]) / len(disasters_21st) * 100
print(f'{percent_over}%')
###Output
100.0%
###Markdown
So how does the probability we're looking for, differ from the one before?
###Code
# find the total number of years with counts above the mean_20th
count_above_mean = len(all_disasters[all_disasters > mean_20th])
print(f'{round((18/count_above_mean) * 100, 2)}%')
###Output
37.5%
###Markdown
4: Distribution of Disasters What is the distribution of natural disasters over the years 1900-1999? 2000-2018?
###Code
# let's take another look at that data
all_disasters_years_and_counts
###Output
_____no_output_____
###Markdown
Breaking it Down Even Further Years and Counts
###Code
y_c_data
###Output
_____no_output_____
###Markdown
Time Series Plot
###Code
plt.plot(y_c_data['years'], y_c_data['counts'])
plt.title('All Natural Disasters Globally, From 1900-2018')
plt.ylabel('Total Count')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
Is the Distribution of Disasters "Balanced" Between the Centuries? What's the probability that any given natural disaster between 1900-2018, happened 1900-1999?
###Code
def probability_for_interval(start_year, end_year):
# take the sum of all natural disasters that occurred 1900-2018
sum_all = y_c_data['counts'].sum()
# take the sum that happen over the interval
yrs_in_range = y_c_data[(y_c_data['years'] < end_year) & (y_c_data['years'] > start_year)]
sum_yrs = yrs_in_range['counts'].sum()
# return the probability
percent = round((sum_yrs/sum_all) * 100, 2)
return percent
prob_20th = probability_for_interval(1900, 2000)
print(f'{prob_20th}%')
###Output
48.12%
###Markdown
What About 2000-2018?
###Code
prob_21st = probability_for_interval(2000, 2018)
print(f'{prob_21st}%')
plt.pie([prob_20th, prob_21st], labels=['20th', '21st'])
plt.title('Relative Frequency of Natural Disasters in 20th & 21st Centuries')
plt.show()
###Output
_____no_output_____
###Markdown
5: What Happens if We Remove Outliers? We need* to take a lot at our IQR! **because we don't have a normal distribution*
###Code
def find_remove_outlier_iqr(disaster_counts):
'''Remove the outliers from the dataset of annual total nautral disasters.'''
# calculate interquartile range
q25, q75 = np.percentile(disaster_counts, 25), np.percentile(disaster_counts, 75)
iqr = q75 - q25
print(f'This is the IQR: {iqr}')
# calculate the outlier cutoff
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
# identify outliers
outliers = [x for x in disaster_counts if x < lower or x > upper]
# remove outliers
outliers_removed = [x for x in disaster_counts if x > lower and x < upper]
return outliers
print(f'Number of outliers removed from the data: {len(find_remove_outlier_iqr(counts))}')
# show box plot
counts = all_disasters_years_and_counts['Number of reported natural disasters (reported disasters)']
plt.boxplot(counts)
plt.title("Box Plot of Annual Natural Disasters, 1900-2018")
plt.ylabel("Count of Natural Disasters")
plt.xlabel("Years 1900-2018")
plt.show()
###Output
_____no_output_____
###Markdown
6: How Has the Amount of Carbon Emissions Looked Over the Turn of the Century? Getting a Feel of the Carbon Emissions Data This provides the monthly carbon emissions from electricity generation, by the Energy Information Administration.[Link to the dataset](https://www.kaggle.com/txtrouble/carbon-emissions).
###Code
carbon_df.head(15)
# carbon_df['Description'].values
carbon_df.tail()
# Types of energy in the dataset
carbon_df['Description'].unique()
###Output
_____no_output_____
###Markdown
Plot the Change in Carbon Emissions Annually, from 1973-2016
###Code
# store the annual emissions count in a dict
years_emissions = dict()
# just look at emissions from total electric output
carbon_total = carbon_df[carbon_df['Description'] == 'Total Energy Electric Power Sector CO2 Emissions']
# traverse through the years
for i in range(197300, 201700, 100):
# find all the rows in the data for the year we're currently on
year = carbon_total[(carbon_total['YYYYMM'] >= i) & (carbon_total['YYYYMM'] <= i + 12)]
# sum the emissisons for that one year
sum = 0.0
for value in year['Value']:
# handle the invalid values
if value == 'Not Available':
value = 0.0
sum += float(value)
# store it in the dict
years_emissions[int(i/100)] = sum
# Voila! A dict of all years and their emissions counts, 1973-2016
print(years_emissions)
# One of the things to note in this data is that NaN values were replaced 0, but this is likely far from the
# true number of emissions made that month
plt.plot(list(years_emissions.keys()), list(years_emissions.values()))
plt.title('Annual Carbon Emissions from Electricity Generation, 1973-2016')
plt.xlabel('Year')
plt.ylabel('Million Metric Tons of Carbon Dioxide')
plt.show()
###Output
_____no_output_____
###Markdown
Wait, emissions are going down? Remember, this Data was Only for the Emissions Produced for Electricity in the U.S.![Globally, emissions are up](https://www.wri.org/blog/2018/12/new-global-co2-emissions-numbers-are-they-re-not-good). 7: The (Economic) Cost of Natural Disasters Getting a Feel for the Economic Data[Link to dataset, same as for the natural disasters data.](https://www.kaggle.com/dataenergy/natural-disaster-data)
###Code
econ_df.head()
###Output
_____no_output_____
###Markdown
Let's Combine This DataFrame with the Disasters Data!
###Code
# combining datasets
df = disasters_df.rename(columns={'Number of reported natural disasters (reported disasters)': 'Disaster Count'})
df2 = econ_df.rename(columns={'Total economic damage from natural disasters (US$)':'Cost'})
df['Cost'] = df2['Cost']
df.head()
###Output
_____no_output_____
###Markdown
Change in Economic Cost Over Time - Is it Normal?
###Code
dollars = df[df['Entity'] == 'All natural disasters']['Cost']
plt.plot(years, dollars)
plt.title('Cost of Nautral Disasters Globally, 1900-2018')
plt.ylabel('Total Cost (USD)')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
Warning About the Above Time Series! I **do not** currently know whether or not the costs reported in the dataset are **adjusted for inflation.**If it turns out the costs are not, we can only take the distribution with a grain of salt. **We don't really know if the disasters are costlier in terms of value, or if it's just inflation making everything more expensive over time.** Heatmap
###Code
# Credit to the Seaborn Documentation for inspiring this cell: https://seaborn.pydata.org/examples/many_pairwise_correlations.html
sns.set(style="white")
# Compute the correlation matrix
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=np.bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(8, 8))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.title('Covariance Between Costs Against Counts')
plt.show()
###Output
_____no_output_____
###Markdown
Correlations
###Code
def pearson_corr(x, y):
'''Given two lists of numbers x and y, return the value of their Pearson correlation coefficient.'''
x_mean = np.mean(x)
y_mean = np.mean(y)
num = [(i - x_mean)*(j - y_mean) for i,j in zip(x,y)]
den_1 = [(i - x_mean)**2 for i in x]
den_2 = [(j - y_mean)**2 for j in y]
correlation_x_y = np.sum(num)/np.sqrt(np.sum(den_1))/np.sqrt(np.sum(den_2))
return correlation_x_y
# get a lists of the counts and the costs
counts = df[(df['Entity'] == 'All natural disasters') & (df['Year'] <= 2018) & (df['Year'] >= 1900)]['Disaster Count']
costs = df[(df['Entity'] == 'All natural disasters') & (df['Year'] <= 2018) & (df['Year'] >= 1900)]['Cost']
corr_cost_count = pearson_corr(costs, counts)
print(f'Correlation between cost of damages and disaster count: {corr_cost_count}.')
###Output
Correlation between cost of damages and disaster count: 0.7547597509253345.
###Markdown
Null Hypothesis We know that both the count and cost of total natural disasters annually, rises around the turn of the century. Someone may claim, The higher mean count of total natural disasters globally in the 21st century, will not cause more expensive costs due to disasters in this century than the one prior. Do we accept or reject this? 1 Sample T-Test**Why?**This is 1 sample because as I'm sure you realize, Earth is the only planet like Earth for which we humans can calculate economic challenges due to natural disasters.."The 1-sample t-test is used when we want to compare a sample mean to a population mean (which we already know)." [quote from iaingallagher blog, "t-tests in python"](https://iaingallagher.tumblr.com/post/50980987285/t-tests-in-python).In our scenario we can already calculate the mean cost of natural disasters for all years 1900-2018, and then use the t-test to conclude whether the years in the 21st century are exceptionally high, when there were higher numbers of natural disasters (as shown earlier).
###Code
# 1-sample t-test
# get a list of the costs of disasters for just the 21st century
costs = df[df['Entity'] == 'All natural disasters']['Cost'].values
costs_21 = df[(df['Entity'] == 'All natural disasters') & (df['Year'] <= 2018) & (df['Year'] >= 2000)]['Cost'].values
# calculate the mean cost annually due to disasters, for whole population (1900-2018)
pop_mean = costs.mean()
# run the test
t, p = stats.ttest_1samp(costs_21, pop_mean)
# see the results
print(f"The t-statistic is {t} and the p-value is {p}.")
###Output
The t-statistic is 4.985294152328724 and the p-value is 9.584483881890286e-05.
###Markdown
This example examines output from the `blob2d` example included in the `BOUT-dev` repo. \[It was tested with BOUT++ v4.3.2 from Fedora 34\].`blob2d` is a simplified model of an isolated 'blob' or 'filament'. These are coherent, field-aligned structures that are common in the scrape-off layer of tokamaks. `blob2d` represents the evolution only in the plane perpendicular to the magnetic field, with approximate closures describing parallel currents to the sheath and loss of density due to parallel flows. The 'blob' is created by initialising the simulation with a Gaussian density perturbation on a constant background.This notebook is strongly based on [the blob2d notebook in the xBOUT-examples](https://github.com/boutproject/xBOUT-examples/blob/master/blob2d/blob2d_example.ipynb).Contents:* Setup* Running the simulation* Load* Plot* Animate* Analyse Setup
###Code
# set up matplotlib
%matplotlib notebook
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (16, 8)
plt.rcParams.update({"font.size": 14})
import numpy as np
from xbout import open_boutdataset
# The physics model we are going to run
import blob2d
# The simulation requires a folder from which options are read, and output is written.
path = "blob"
# Make sure we have the folder "blob" and options file "BOUT.inp" is present
blob2d.ensure_blob(path)
# We must call init only once
# Restart the kernel if you want to use a different working directory
blob2d.bc.init(["-d", path])
###Output
_____no_output_____
###Markdown
Running the simulation=====
###Code
# Only run simulation for 10 steps
model = blob2d.Blob2D(nout=10)
print("We are now running the simulation ... that might take some time ...")
model.solve()
print("The simulation is finished!")
###Output
We are now running the simulation ... that might take some time ...
----------Parameters: ------------
Omega_i = 1.681764e+07 /s,
c_s = 1.550006e+04 m/s,
rho_s = 9.216552e-04 m
delta_* = rho_s * (dn/n) * 9.372772e+00
The simulation is finished!
###Markdown
Load==== First we need to open the Dataset.The chunks argument to `open_boutdataset()` is needed so that dask can paralleliseoperations over the time dimension (by default the chunk size is the size of thearrays in the files being loaded). Seehttp://xarray.pydata.org/en/stable/dask.htmlchunking-and-performance.For this example it doesn't matter, but for larger ones it can be very useful.Note: a warning from `open_boutdataset()` is expected. For `blob2d` the z-directionis a periodic, binormal direction with lengths normalised to the background hybridgyro-radius `rho_s=sqrt(T_e/m_i)`, rather than the usual toroidal angle. `'dz'` isused and `'ZMIN'` and `'ZMAX'` are ignored.
###Code
ds = open_boutdataset(f"{path}/BOUT.dmp.*.nc", f"{path}/BOUT.inp", chunks={"t": 4})
# Use squeeze() to get rid of the y-dimension, which has length 1 as blob2d does not
# simulate the parallel dimension.
ds = ds.squeeze(drop=True)
###Output
Read in:
<xbout.BoutDataset>
Contains:
<xarray.Dataset>
Dimensions: (t: 11, x: 260, y: 1, z: 256)
Coordinates:
* t (t) float64 0.0 50.0 100.0 150.0 200.0 ... 350.0 400.0 450.0 500.0
* x (x) int64 0 1 2 3 4 5 6 7 8 ... 252 253 254 255 256 257 258 259
* y (y) float64 0.5
* z (z) float64 0.0 0.3 0.6 0.9 1.2 1.5 ... 75.3 75.6 75.9 76.2 76.5
Data variables:
dx (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
dy (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g11 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g22 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g33 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g12 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g13 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g23 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_11 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_22 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_33 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_12 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_13 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_23 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
J (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
Bxy (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G1 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G2 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G3 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
phi (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
ncalls (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
ncalls_e (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
ncalls_i (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
n (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
omega (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
Attributes:
BOUT_REVISION: Unknown
metadata: {'BOUT_VERSION': 4.32, 'iteration': 9, 'zperiod': 1, 'MXS...
options: # settings file for BOUT++\n#\n# Blob simulation in a 2D ...
Metadata:
{ 'BOUT_VERSION': 4.32,
'MXG': 2,
'MXSUB': 256,
'MYG': 0,
'MYSUB': 1,
'MZ': 256,
'MZG': 0,
'MZSUB': 256,
'NXPE': 1,
'NYPE': 1,
'NZPE': 1,
'ZMAX': 1.0,
'ZMIN': 0.0,
'dz': 0.3,
'fine_interpolation_factor': 8,
'iteration': 9,
'ixseps1': 260,
'ixseps2': 260,
'jyseps1_1': -1,
'jyseps1_2': 0,
'jyseps2_1': 0,
'jyseps2_2': 0,
'keep_xboundaries': 1,
'keep_yboundaries': 0,
'nx': 260,
'ny': 1,
'ny_inner': 0,
'nz': 256,
'zperiod': 1}
Options:
<boutdata.data.BoutOptionsFile object at 0x7f6ec7b22280>
###Markdown
We choose to create a 'coordinate' for the x-dimension from `dx`.This is not done generically because `dx` can have two-dimensional dependence\- as well as varying radially it can be different e.g. in core and PF regions.However, for a slab geometry like `blob2d`, `dx` is a constant so it can easilybe used to create a one-dimensional x-coordinate.This ensures we get a sensible aspect ratio in plots.A z-coordinate was already created from `dz`, because `dz` is always a scalar,so it can always be used to create a 1d 'dimension coordinate'.
###Code
dx = ds["dx"].isel(x=0).values
# Get rid of existing "x" coordinate, which is just the index values.
ds = ds.drop("x")
# Create a new coordinate, which is length in units of rho_s
ds = ds.assign_coords(x=np.arange(ds.sizes["x"])*dx)
###Output
_____no_output_____
###Markdown
Plot===Here we use xarray methods to plot simple slices. First make some plots of the initial state
###Code
ds_initial = ds.isel(t=0)
plt.figure()
ax = plt.subplot(131)
ax.set_aspect("equal")
ds_initial["n"].plot(x="x", y="z")
ax = plt.subplot(132)
ax.set_aspect("equal")
ds_initial["omega"].plot(x="x", y="z")
ax = plt.subplot(133)
ax.set_aspect("equal")
ds_initial["phi"].plot(x="x", y="z")
###Output
_____no_output_____
###Markdown
Plots at a time point during the blob evolution
###Code
tind = 10
# Uses xarray methods to plot simple slices
plt.figure()
ax = plt.subplot(131)
ax.set_aspect("equal")
ds.isel(t=tind)["n"].plot(x="x", y="z")
ax = plt.subplot(132)
ax.set_aspect("equal")
ds.isel(t=tind)["omega"].plot(x="x", y="z")
ax = plt.subplot(133)
ax.set_aspect("equal")
ds.isel(t=tind)["phi"].plot(x="x", y="z")
###Output
_____no_output_____
###Markdown
Slicing to a 1d Dataset automatically produces a 1d plot, herea radial density profile through the blob centre
###Code
plt.figure()
ds.isel(t=10, z=128)["n"].plot()
###Output
_____no_output_____
###Markdown
Animate=======Use `xbout` methods through the `.bout` accessor to create animations. For a DataArray
###Code
ds["n"].bout.animate2D(aspect="equal")
###Output
n data passed has 3 dimensions - will use animatplot.blocks.Pcolormesh()
###Markdown
Animate several fields from a Dataset with `animate_list()`
###Code
ds.bout.animate_list(["n", "omega", "phi"], ncols=3, aspect="equal")
###Output
_____no_output_____
###Markdown
DataArray objects can be passed to `animate_list()` (as long asthey all have the same time-axis length), e.g. to combine 1dand 2d plots.Keyword arguments to `animate_list()` can be passed lists (withas many entries as variables being plotted), to set a per-variablevalue.Animations can be saved by passing a 'save_as' argument giving a namefor the output file, producing a .gif file.
###Code
ds.bout.animate_list(["n", "omega", "phi", ds["n"].isel(z=128)], aspect=["equal", "equal", "equal", "auto"], save_as="blob")
###Output
_____no_output_____
###Markdown
Analyse=======Perform some analysis of the blob to demonstrate more `xarray` methods. Find the centre-of mass of the blob using `.integrate()` ([documentation](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.integrate.html)).
###Code
background_density = 1.0
delta_n = ds["n"] - background_density
integrated_density = delta_n.integrate(dim=["x", "z"])
ds["CoM_x"] = (ds["x"]*delta_n).integrate(dim=["x", "z"]) / integrated_density
ds["CoM_z"] = (ds["z"]*delta_n).integrate(dim=["x", "z"]) / integrated_density
plt.figure()
plt.subplot(121)
ds["CoM_x"].plot()
plt.subplot(122)
ds["CoM_z"].plot()
###Output
_____no_output_____
###Markdown
Find the blob velocity using `.differentiate()` ([documentation](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.differentiate.html)).This is a somewhat crude method, using finite difference on the output timestep.It would be more accurate to calculate and integrate the ExB velocity.
###Code
v_x = ds["CoM_x"].differentiate("t")
v_z = ds["CoM_z"].differentiate("t")
plt.figure()
plt.subplot(121)
v_x.plot()
plt.ylabel("Radial CoM velocity")
plt.subplot(122)
v_z.plot()
plt.ylabel("Binormal CoM velocity")
###Output
_____no_output_____
###Markdown
Import library
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import nltk
import json
###Output
_____no_output_____
###Markdown
Accept Rate
###Code
import pyecharts.options as opts
from pyecharts.charts import Bar, Line
x_data = ["2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019"]
total_submission = [1105, 1219, 1400, 1467, 1420, 1678, 1838, 2403, 3240, 4856, 6743]
accept = [263, 293, 308, 366, 359, 441, 403, 569, 678, 1011, 1429]
rate = list(np.floor(1000 * np.array(accept) / np.array(total_submission)) / 10)
bar = (
Bar(init_opts=opts.InitOpts(width="1000px", height="500px"))
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="Submission",
yaxis_data=total_submission,
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="Accepted",
yaxis_data=accept,
label_opts=opts.LabelOpts(is_show=False),
)
.extend_axis(
yaxis=opts.AxisOpts(
name="Accept Rate",
type_="value",
min_=0,
max_=100,
interval=20,
axislabel_opts=opts.LabelOpts(formatter="{value}%"),
)
)
.set_global_opts(
tooltip_opts=opts.TooltipOpts(
is_show=True, trigger="axis", axis_pointer_type="cross"
),
xaxis_opts=opts.AxisOpts(
type_="category",
axispointer_opts=opts.AxisPointerOpts(is_show=True, type_="shadow"),
),
yaxis_opts=opts.AxisOpts(
name="Submission Number",
type_="value",
min_=0,
max_=7000,
interval=1000,
axislabel_opts=opts.LabelOpts(formatter="{value}"),
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
),
)
)
line = (
Line()
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="Accept Rate",
yaxis_index=1,
y_axis=rate,
label_opts=opts.LabelOpts(is_show=False),
)
)
#
bar.overlap(line).render_notebook()
###Output
_____no_output_____
###Markdown
load data
###Code
with open('data/neurips2019.json') as fp:
data_set = json.load(fp)
for i, title in enumerate(data_set):
print("NO.%d" % i, "paper's title : ", title)
nltk.download('stopwords')
from nltk.corpus import stopwords
from collections import Counter
print(stopwords.words('english'))
stopwords_deep_learning = ['learning', 'network', 'neural', 'networks', 'deep', 'via', 'using', 'convolutional',
'single', 'data', 'method', 'based', 'beyond', 'model', 'algorithm', 'models', 'methods', 'evaluation', 'task', 'tasks', 'fast']
keyword_list = []
for i, title in enumerate(data_set):
word_list = title.split(" ")
word_list = list(set(word_list))
word_list_cleaned = []
for word in word_list:
if not word.strip():
continue
word = word.lower()
if word not in stopwords.words('english') and word not in stopwords_deep_learning: #remove stopwords
word_list_cleaned.append(word)
for k in range(len(word_list_cleaned)):
keyword_list.append(word_list_cleaned[k])
keyword_counter = Counter(keyword_list)
print('{} different keywords before merging'.format(len(keyword_counter)))
# Merge duplicates: CNNs and CNN
duplicates = []
for k in keyword_counter:
if k+'s' in keyword_counter:
duplicates.append(k)
for k in duplicates:
keyword_counter[k] += keyword_counter[k+'s']
del keyword_counter[k+'s']
print('{} different keywords after merging'.format(len(keyword_counter)))
print(keyword_counter)
print("")
# Show N most common keywords and their frequencies
num_keyowrd = 120
keywords_counter_vis = keyword_counter.most_common(num_keyowrd)
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(8, 20))
key = [k[0] for k in keywords_counter_vis]
value = [k[1] for k in keywords_counter_vis]
y_pos = np.arange(len(key))
ax.barh(y_pos, value, align='center', color='green', ecolor='black', log=True)
ax.set_yticks(y_pos)
ax.set_yticklabels(key, rotation=0, fontsize=8)
ax.invert_yaxis()
for i, v in enumerate(value):
ax.text(v + 3, i + .25, str(v), color='black', fontsize=8)
# ax.text(y_pos, value, str(value))
ax.set_xlabel('Frequency')
ax.set_title('NeurIPS 2019 Submission Top {} Keywords'.format(num_keyowrd))
plt.show()
# Show the word cloud forming by keywords
from wordcloud import WordCloud
NIPS = WordCloud(max_font_size=800, max_words=160,
width=1280, height=640,
background_color="black").generate(' '.join(['NeuIPS'] * 500))
wordcloud = WordCloud(max_font_size=64, max_words=160,
width=1280, height=640,
background_color="black").generate(' '.join(keyword_list))
plt.figure(figsize=(16, 8))
plt.imshow(NIPS, interpolation="bilinear", alpha=1)
plt.imshow(wordcloud, interpolation="bilinear", alpha=.5)
plt.axis("off")
plt.show()
import matplotlib
###Output
_____no_output_____
###Markdown
Business Understanding In this project I would like to gain some insight about tech jobs / careers as I plan to shift career in mid-term and Stackoverflow's annual developer survey presents an excellent material for this. In the frame of this analysis, I will try to figure out - What are the different tech roles at all that represent the market? - What career satisfaction can be measured among various tech professionals? - What are the company profiles that employ people who attended bootcamps? Data Understanding I chose to analyse the provided dataset from Stack Overflow, which is an outcome of an annual survey of people using the site. (I have analysed the survey results of 2017.) The survey covers all sort of information like programming languages, salary, code style and various other information. The dataset I worked with includes more than 64000 responses from 213 countries around the world. The survey has collected significant amount of data points as therein more than 150 question were asked.
###Code
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
###Output
_____no_output_____
###Markdown
To get a better understanding of the data I used for this analysis, first I look at the data itself.
###Code
df = pd.read_csv("data/survey-results-public.csv")
df.head()
###Output
_____no_output_____
###Markdown
Let's get the size of the dataset (we know from the previous preview that it has 154 columns), the number of rows:
###Code
n_rows = df.shape[0]
n_rows
###Output
_____no_output_____
###Markdown
Dealing with missing data is always important in the data science process. Thus below some statistics about this.
###Code
no_nulls = set(df.columns[df.isnull().mean()==0])
print("The following columns are completely populated, no missing values identified:")
no_nulls
###Output
The following columns are completely populated, no missing values identified:
###Markdown
Prepare
###Code
# Filtering for interested regions only:
df = df[df.Country.isin(['United Kingdom', 'Switzerland', 'Germany', 'Austria', 'Hungary'])]
df.Country.value_counts()
# Filtering for non empty Developer Type:
df = df[df.DeveloperType.notnull()]
# Creating stat dictionary about developer types
devtyp_list_raw = str(tuple(df.DeveloperType.tolist())) \
.replace(";",",")\
.replace("'","")\
.replace("(","")\
.replace(")","")\
.split(", ")
devtyp_list_raw
devtyp_dict = dict(Counter(devtyp_list_raw))
devtyp_dict
# Getting bootcamp attendees
df_bc_attend = df[df.TimeAfterBootcamp.notnull()]
###Output
_____no_output_____
###Markdown
Question1: Tech Roles / Developer Types
###Code
# Analysing and sorting
df_devtyp = pd.DataFrame.from_dict(devtyp_dict, orient="index")
df_devtyp.rename(columns={0: 'cnt'}, inplace=True)
df_devtyp.sort_values("cnt", ascending=True, inplace=True)
df_devtyp
# Visualizing
(df_devtyp.cnt).plot(kind='barh', legend=None)
###Output
_____no_output_____
###Markdown
The vast majority of responders work as Web / Desktop application developer in the selected region. The third most frequent role was the Mobile developer. Question2: Career Satisfaction
###Code
# Visualization of overall career satisfaction score frequency
df.CareerSatisfaction.hist()
###Output
_____no_output_____
###Markdown
The tendency is very positive, there is a high level of satisfaction in the selected population.
###Code
# Analyse mean career satisfaction scores per developer type
dt = []
labels = []
for idx in df_devtyp.index:
labels.append(idx)
df_filt = df[df.DeveloperType.str.contains(idx)]
dt.append(df_filt.CareerSatisfaction.mean())
df_careersat = pd.DataFrame(dt, index=labels)
df_careersat.rename(columns={0:"meanCareerSatisfaction"}, inplace=True)
df_careersat
# Visualizing
plt.plot(df_careersat.meanCareerSatisfaction,
df_careersat.index,
'D')
plt.grid(axis='y')
###Output
_____no_output_____
###Markdown
In general, I found similar career satisfaction values across the observed disciplines — the means calculated among those responders who reside in the geographical areas of interest vary between 7.27 and 7.63. Question3: Opportunities with Bootcamps
###Code
# Analysing bootcamp attendees employers by company type
df_bc_attend.CompanyType.value_counts()
# Analysing bootcamp attendees employers by company size
df_bc_attend.CompanySize.value_counts()
###Output
_____no_output_____
###Markdown
Machine Learning for Author Attribution - Analysis Genevieve Hayes 13th November 2018 Overview Author attribution "is the task of identifying the author of a given text from a (given) set of suspects (Mohsen et al. (2016))." This is a problem that can readily be framed as a text classification task, "where author represents a class (label) of a given text (Mohsen et al. (2016))," and as a result, recent research into author attribution analysis has focussed almost exclusively on the use of machine learning techniques.Prior to the advent of social media, author attribution analysis was typically applied to longer texts, such as books and letters. In fact, Forsyth and Holmes (1996) concluded that a text had to be a minimum of 250 words in length for the stylometric characteristics to be apparent. However, recent research (for example, Green and Sheppard (2013), Schwartz et al. (2013) and Shrestha et al. (2017)) has demonstrated the successful application of author attribution techniques to Twitter messages ("tweets"), which "average less than 25 words" in length and are "often less than 10" words long (Green and Sheppard (2013)). Tweets are currently limited to 280 characters, and prior to November 2017, were limited to 140 characters. As a result, "tweets are relatively self-contained and have smaller sentence length variance compared to excerpts from longer text (Schwartz et al. (2013))." It is possible that these characteristics are the reason why author attribution techniques, that have previously fallen apart when applied to shorter texts, have succeeded when applied to tweets. It is also possible that, had Forsyth and Holmes (1996) considered more modern machine learning algorithms in their analysis, such as support vector machines (SVMs) and neural networks, which were not in common use in 1996, that they would have drawn different conclusions about the minimum text length required to successfully identify the author of a text. In this analysis we explore these hypotheses by applying techniques that have been demonstrated to succeed in determining the authorship of tweets, to short, tweet-length, excerpts of longer works. In performing this analysis, we make use of a dataset comprising 68,000 sentence-long excerpts from the (fiction) works of eight classic authors. The dataset was created using novel texts sourced from [Project Gutenburg](https://www.gutenberg.org/), with chapter/section headings manually removed from the files prior to processing. To allow for the creation of a balanced dataset, for authors whose novels tended to be shorter in length, text excerpts were taken from multiple works.The novels used to create the dataset are as follows: |Author | Novels| Genre | Year of Publication||--------- |-------|-------|--------------------||Louisa May Alcott | *Little Women* |Coming of Age/Romance | 1869 ||Jane Austen| *Pride and Prejudice* and *Emma*|Romance | 1813/1815 ||Charlotte Bronte| *Jane Eyre* | Gothic Romance | 1847 ||Wilkie Collins | *The Woman in White* | Mystery | 1859 ||Arthur Conan Doyle | *A Study in Scarlet*, *The Sign of the Four* and *The Hound of the Baskervilles*| Mystery |1887/1890/1902| |L.M. Montgomery | *Anne of Green Gables* and *Anne of Avonlea* |Coming of Age | 1908/1909 ||Bram Stoker | *Dracula* | Horror | 1897||Mark Twain | *The Adventures of Tom Sawyer* and *The Adventures of Huckleberry Finn*|Coming of Age/Adventure|1876/1884| References Forsyth, R. and D. Holmes (1996). Feature finding for text classification. Literary and Linguistic Computing 11 (4), 163–174.Green, R. and J. Sheppard (2013). Comparing frequency- and style-based features for Twitter author identification. Proceedings of the Twenty-Sixth International Florida Artificial Intelligence Research Society Conference, 64–69.Mohsen, A., N. El-Makky, and N. Ghanem (2016). Author identification using deep learning. Proceedings of the 15th IEEE International Conference on Machine Learning and Applications, 898–903.Schwartz, R., O. Tsur, A. Rappoport, and M. Koppel (2013). Authorship attribution of micro-messages. Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing. Seattle, Washington, USA, 1880–1891.Shrestha, P., S. Sierra, F. Gonz´alez, P. Rosso, M. Montes-y G´omez, and T. Solorio (2017). Convolutional neural networks for authorship attribution of short texts. Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers. Valencia, Spain, 669–674. Import Packages and Load Data
###Code
# Import packages
import numpy as np
import pandas as pd
import chardet
from collections import Counter
import seaborn as sns
import matplotlib.pyplot as plt
import string
import time
# Display plots inline
% matplotlib inline
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, make_scorer, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import SVC
from keras.models import Model
from keras.layers import Input, Dense, Flatten, Dropout, Embedding
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.preprocessing.text import one_hot
from keras.callbacks import ModelCheckpoint
from scipy import sparse, stats
# Download nltk - only need to run once
nltk.download('stopwords')
# Get encoding of data file
with open("/data/author_data.csv", 'rb') as file:
print(chardet.detect(file.read()))
# Load data (uncomment relevant line)
# Local version
#data = pd.read_csv("author_data.csv", encoding="Windows-1252")
# Floydhub version
data = pd.read_csv("/data/author_data.csv", encoding="Windows-1252")
print(data.head())
# Create feature (text) and label (author) lists
text = list(data['text'].values)
author = list(data['author'].values)
print("The author dataset contains {} datapoints.".format(len(text)))
###Output
The author dataset contains 68000 datapoints.
###Markdown
Data Exploration **Explore the author (labels) data**
###Code
# Check distribution of authors in the data
Counter(author)
###Output
_____no_output_____
###Markdown
As expected, our data is a balanced dataset containing 8,500 text excerpts from each of the 8 authors under consideration. There do not appear to be any corrupt or missing labels. **Explore the text data** Here are some examples of what the text data looks like:
###Code
print(text[4000])
print(text[27000])
print(text[45000])
print(text[60000])
###Output
My heart was sore for you when I heard that," and he shook hands again, with such a sympathetic face that Jo felt as if no comfort could equal the look of the kind eyes, the grasp of the big, warm hand.
###Markdown
Calculate and examine word count/length and character count statistics:
###Code
# Create word count and character count lists
word_count = []
char_count = []
for i in range(len(text)):
word_count.append(len(text[i].split()))
char_count.append(len(text[i]))
# Convert lists to numpy arrays
word_count = np.array(word_count)
char_count = np.array(char_count)
# Calculate average word lengths
ave_length = np.array(char_count)/np.array(word_count)
def get_stats(var):
"""Print summary statistics for a variable of interest.
Args:
var: array. Numpy array containing values for the variable of interest.
Returns:
None
"""
print("Min:", np.min(var))
print("Max:", np.max(var))
print("Mean:", np.mean(var))
print("Median", np.median(var))
print("1st percentile", np.percentile(var, 1))
print("95th percentile", np.percentile(var, 95))
print("99th percentile", np.percentile(var, 99))
print("99.5th Percentile", np.percentile(var, 99.5))
print("99.9th Percentile", np.percentile(var, 99.9))
print("Word count statistics")
get_stats(word_count)
# Plot word count distribution
sns.distplot(word_count, kde = False, bins = 70, color = 'blue').set_title("Word Count Distribution")
plt.xlabel('Excerpt Length (Words)')
plt.ylabel('Count')
plt.xlim(0, 100)
plt.savefig("word_count.eps")
print("\nCharacter count statistics")
get_stats(char_count)
# Plot character count distribution
sns.distplot(char_count, kde = False, bins = 100, color = 'blue').set_title("Character Count Distribution")
plt.xlabel('Excerpt Length (Characters)')
plt.ylabel('Count')
plt.xlim(0, 400)
plt.savefig("char_count.eps")
print("\nAverage length statistics")
get_stats(ave_length)
# Plot average excerpt length distribution
sns.distplot(ave_length, kde = False, bins = 70, color = 'blue').set_title("Average Word Length Distribution")
plt.xlabel('Average Excerpt Length (Characters)')
plt.ylabel('Count')
plt.xlim(0, 10)
plt.savefig("ave_length.eps")
###Output
_____no_output_____
###Markdown
The vast majority of text excerpts are under 100 words long, with an average length of around 18 words. However, there are a small number of outliers, including one excerpt containing over 250 words. At the opposite end of the spectrum, 1 percent of the text excerpts contain only 1 word each.On average, the text excerpts contain around 95 characters each, with the longest containing 1370 characters and the shortest, just 5 characters (sentences containing fewer than 5 characters were removed during the creation of the dataset). We would expect there to be a high correlation between word count and character count, so as a result, we shall just focus on further examining word count outliers, going forward and assume these are the same as the character count outliers.With regard to word length, the majority of the excerpts have an average word length of around 5.3 characters. However, there are also outliers in this distribution, including one excerpt with an average word length of 22.0 characters, and at the other end of the spectrum, an excerpt with an average word length of 2.5 characters.We explore these outliers below.
###Code
# Get word count outliers
word_outliers = np.where(word_count > 150)
for i in word_outliers[0]:
print("Excerpt {} - Length: {}".format(i, word_count[i]))
print(text[i], "\n")
word_outliers = np.where(word_count < 2)
for i in word_outliers[0]:
print("Excerpt {} - Length: {}".format(i, word_count[i]))
print(text[i], "\n")
###Output
Excerpt 17 - Length: 1
“Yes”.
Excerpt 68 - Length: 1
Weston.]
Excerpt 122 - Length: 1
Why?"
Excerpt 220 - Length: 1
"Hindostanee".
Excerpt 250 - Length: 1
me-yow”!
Excerpt 312 - Length: 1
“Nobody.
Excerpt 350 - Length: 1
“No”.
Excerpt 411 - Length: 1
“Sir”!
Excerpt 464 - Length: 1
“No”.
Excerpt 484 - Length: 1
Hudson?'
Excerpt 486 - Length: 1
"Yes.
Excerpt 611 - Length: 1
"What!"
Excerpt 719 - Length: 1
"Genius.
Excerpt 765 - Length: 1
solution.
Excerpt 775 - Length: 1
Politics.
Excerpt 785 - Length: 1
“Sometimes.
Excerpt 832 - Length: 1
Read".
Excerpt 847 - Length: 1
“Why”?
Excerpt 853 - Length: 1
"When?"
Excerpt 1073 - Length: 1
'Never!
Excerpt 1085 - Length: 1
“Indeed!
Excerpt 1261 - Length: 1
“Mrs.
Excerpt 1332 - Length: 1
“‘P.S.
Excerpt 1554 - Length: 1
"Followed!
Excerpt 1609 - Length: 1
my-soul-bless-my-soul!
Excerpt 1610 - Length: 1
Well!
Excerpt 1636 - Length: 1
“No”.
Excerpt 1758 - Length: 1
Hartright!"
Excerpt 1851 - Length: 1
"Abroad!"
Excerpt 1869 - Length: 1
"Yes".
Excerpt 1874 - Length: 1
“Never!
Excerpt 1894 - Length: 1
"Yes.
Excerpt 1902 - Length: 1
Lord!
Excerpt 1940 - Length: 1
“Good!
Excerpt 2208 - Length: 1
"Pooh!
Excerpt 2230 - Length: 1
"Nonsense!
Excerpt 2287 - Length: 1
Good-bye".
Excerpt 2347 - Length: 1
"What!
Excerpt 2360 - Length: 1
“Trouble!
Excerpt 2383 - Length: 1
Well!
Excerpt 2409 - Length: 1
“Dreadful!
Excerpt 2504 - Length: 1
"The...
Excerpt 2549 - Length: 1
Hark!"
Excerpt 2671 - Length: 1
Come!
Excerpt 2784 - Length: 1
"Iss!"
Excerpt 2832 - Length: 1
"Exactly".
Excerpt 2870 - Length: 1
'Bank-note!'
Excerpt 2889 - Length: 1
Backsheesh.
Excerpt 2965 - Length: 1
My-soul-bless-my-soul!
Excerpt 2967 - Length: 1
Come!
Excerpt 2989 - Length: 1
What!
Excerpt 2990 - Length: 1
"Yes".
Excerpt 3006 - Length: 1
“Mrs.
Excerpt 3036 - Length: 1
"Laura!
Excerpt 3086 - Length: 1
"Destroyed?"
Excerpt 3163 - Length: 1
“Yes'm”.
Excerpt 3207 - Length: 1
"Eleanor!"
Excerpt 3224 - Length: 1
Laurence!'"
Excerpt 3248 - Length: 1
“Boom”!
Excerpt 3259 - Length: 1
Come!"
Excerpt 3263 - Length: 1
Hartright?"
Excerpt 3308 - Length: 1
"Nothing.
Excerpt 3337 - Length: 1
"Certainly.
Excerpt 3452 - Length: 1
station!
Excerpt 3508 - Length: 1
"Yes".
Excerpt 3568 - Length: 1
help!"
Excerpt 3591 - Length: 1
Harker?"
Excerpt 3609 - Length: 1
“Oh”!
Excerpt 3778 - Length: 1
“Well”!
Excerpt 3809 - Length: 1
Order!
Excerpt 3828 - Length: 1
Good-bye.
Excerpt 3858 - Length: 1
"Hum!
Excerpt 3869 - Length: 1
Immense.
Excerpt 3879 - Length: 1
Well!
Excerpt 3993 - Length: 1
"Laura!
Excerpt 4015 - Length: 1
"Laugh?
Excerpt 4047 - Length: 1
“TOM”!
Excerpt 4084 - Length: 1
Whitby.
Excerpt 4157 - Length: 1
"Yes".
Excerpt 4185 - Length: 1
"Yes.
Excerpt 4194 - Length: 1
“Yes”.
Excerpt 4210 - Length: 1
"What!
Excerpt 4499 - Length: 1
“Well.
Excerpt 4546 - Length: 1
"No".
Excerpt 4590 - Length: 1
bang!
Excerpt 4646 - Length: 1
Well!
Excerpt 4785 - Length: 1
"Well?"
Excerpt 4799 - Length: 1
"Yes".
Excerpt 4829 - Length: 1
"Nonsense!
Excerpt 4847 - Length: 1
talk!'
Excerpt 4852 - Length: 1
“Three-and-twenty!
Excerpt 4880 - Length: 1
"Certainly".
Excerpt 5015 - Length: 1
“Yes.
Excerpt 5124 - Length: 1
toll!
Excerpt 5202 - Length: 1
“Mrs.
Excerpt 5334 - Length: 1
"Pooh!
Excerpt 5402 - Length: 1
"Indeed!
Excerpt 5444 - Length: 1
Listen!"
Excerpt 5448 - Length: 1
"Bravo!"
Excerpt 5521 - Length: 1
solitude!"
Excerpt 5675 - Length: 1
Ting-a-ling-ling!
Excerpt 5704 - Length: 1
"No!"
Excerpt 5760 - Length: 1
what?"
Excerpt 5888 - Length: 1
"What!
Excerpt 5985 - Length: 1
Horsewhipped!
Excerpt 6072 - Length: 1
“Knightley”!
Excerpt 6218 - Length: 1
"Yes".
Excerpt 6357 - Length: 1
"Hark!"
Excerpt 6406 - Length: 1
Quick!
Excerpt 6546 - Length: 1
"Here!"
Excerpt 6634 - Length: 1
"Yes!
Excerpt 6648 - Length: 1
God!"
Excerpt 6654 - Length: 1
Gilmore?"
Excerpt 6663 - Length: 1
Well!
Excerpt 6709 - Length: 1
Mary.
Excerpt 6715 - Length: 1
Observe.
Excerpt 6816 - Length: 1
Jane!
Excerpt 6864 - Length: 1
“Mrs.
Excerpt 6896 - Length: 1
raf'?
Excerpt 6942 - Length: 1
When?
Excerpt 6954 - Length: 1
"Halloa!"
Excerpt 6986 - Length: 1
There!
Excerpt 7068 - Length: 1
Come!"
Excerpt 7080 - Length: 1
"Mrs.
Excerpt 7116 - Length: 1
“She!
Excerpt 7158 - Length: 1
“No”?
Excerpt 7183 - Length: 1
“Dangerous”!
Excerpt 7229 - Length: 1
"Aha!"
Excerpt 7232 - Length: 1
"Exactly.
Excerpt 7315 - Length: 1
Stay!
Excerpt 7371 - Length: 1
Alas!
Excerpt 7527 - Length: 1
Alas!
Excerpt 7560 - Length: 1
Marry!
Excerpt 7565 - Length: 1
Poole!"
Excerpt 7634 - Length: 1
“Him?
Excerpt 7699 - Length: 1
“Yes'm”.
Excerpt 7701 - Length: 1
"No".
Excerpt 7711 - Length: 1
“Oh”!
Excerpt 7746 - Length: 1
“Mrs.
Excerpt 7762 - Length: 1
"Halloa!"
Excerpt 7836 - Length: 1
13th.
Excerpt 7847 - Length: 1
“Good!
Excerpt 7904 - Length: 1
"Sir?"
Excerpt 7916 - Length: 1
"Master!
Excerpt 7930 - Length: 1
“Indeed!
Excerpt 7943 - Length: 1
Gilmore?"
Excerpt 8025 - Length: 1
'See!
Excerpt 8031 - Length: 1
“Tools”?
Excerpt 8422 - Length: 1
"Exactly".
Excerpt 8602 - Length: 1
Bistritz.
Excerpt 8645 - Length: 1
Look!
Excerpt 8796 - Length: 1
Come!
Excerpt 8831 - Length: 1
"Speak!
Excerpt 8862 - Length: 1
"Amen!
Excerpt 8876 - Length: 1
"Yes.
Excerpt 9022 - Length: 1
"You?"
Excerpt 9126 - Length: 1
Good-night.
Excerpt 9218 - Length: 1
toll!'
Excerpt 9220 - Length: 1
SHOVE!
Excerpt 9320 - Length: 1
"Benefactress!
Excerpt 9326 - Length: 1
“Yes”.
Excerpt 9378 - Length: 1
“Smarty!
Excerpt 9444 - Length: 1
"Why?"
Excerpt 9481 - Length: 1
"Exactly.
Excerpt 9560 - Length: 1
“Why”?
Excerpt 9589 - Length: 1
"Why?"
Excerpt 9655 - Length: 1
"Entirely".
Excerpt 9656 - Length: 1
"ART".
Excerpt 9722 - Length: 1
“Yes”.
Excerpt 9846 - Length: 1
Rochester?"
Excerpt 9848 - Length: 1
John?"
Excerpt 9947 - Length: 1
"Never!"
Excerpt 10079 - Length: 1
"Quite".
Excerpt 10144 - Length: 1
"What!
Excerpt 10322 - Length: 1
“Very”.
Excerpt 10326 - Length: 1
"Helen!"
Excerpt 10391 - Length: 1
“‘Tention”!
Excerpt 10462 - Length: 1
Rochester!"
Excerpt 10521 - Length: 1
"Grandfather.
Excerpt 10593 - Length: 1
“Certainly.
Excerpt 10671 - Length: 1
"Humbug!
Excerpt 10732 - Length: 1
"Percival!
Excerpt 10735 - Length: 1
Good!
Excerpt 10769 - Length: 1
(Amen!)
Excerpt 10837 - Length: 1
Jane!"
Excerpt 10888 - Length: 1
"Why?"
Excerpt 11096 - Length: 1
"Distasteful!
Excerpt 11336 - Length: 1
"Already?"
Excerpt 11340 - Length: 1
"So!"
Excerpt 11478 - Length: 1
Well?
Excerpt 11501 - Length: 1
Rochester?"
Excerpt 11694 - Length: 1
“You!
Excerpt 11743 - Length: 1
“Nothing.
Excerpt 11798 - Length: 1
(Aside.)
Excerpt 11816 - Length: 1
"Jane!"
Excerpt 11835 - Length: 1
"Right!"
Excerpt 11883 - Length: 1
"Humbug!
Excerpt 11962 - Length: 1
"Indeed!"
Excerpt 11999 - Length: 1
Astronomy.
Excerpt 12034 - Length: 1
"Excellent!
Excerpt 12152 - Length: 1
Come!
Excerpt 12184 - Length: 1
Bang!
Excerpt 12230 - Length: 1
"What?
Excerpt 12251 - Length: 1
“Money.
Excerpt 12306 - Length: 1
"No".
Excerpt 12446 - Length: 1
“Bingley”.
Excerpt 12517 - Length: 1
“Well”!
Excerpt 12552 - Length: 1
"Impossible!
Excerpt 12630 - Length: 1
“Yes.
Excerpt 12638 - Length: 1
"Good!"
Excerpt 12643 - Length: 1
“What”!
Excerpt 12686 - Length: 1
"Why?"
Excerpt 12758 - Length: 1
"Iss!"
Excerpt 12775 - Length: 1
'Soh!'
Excerpt 12784 - Length: 1
“La”!
Excerpt 12843 - Length: 1
Alas!"
Excerpt 12873 - Length: 1
"Certainly.
Excerpt 12883 - Length: 1
"Marian!"
Excerpt 12885 - Length: 1
“Mrs.
Excerpt 12991 - Length: 1
"Look!"
Excerpt 13038 - Length: 1
"No".
Excerpt 13039 - Length: 1
Reed?"
Excerpt 13133 - Length: 1
"Illusion".
Excerpt 13189 - Length: 1
come!"
Excerpt 13195 - Length: 1
"Calm?
Excerpt 13197 - Length: 1
Good-bye".
Excerpt 13198 - Length: 1
Good-bye”!
Excerpt 13321 - Length: 1
"Highty-tighty!
Excerpt 13390 - Length: 1
Amen.
Excerpt 13483 - Length: 1
"Hush!
Excerpt 13492 - Length: 1
"Away!"
Excerpt 13516 - Length: 1
Edward!"
Excerpt 13518 - Length: 1
Fight!
Excerpt 13599 - Length: 1
"Policeman!"
Excerpt 13704 - Length: 1
“How?
Excerpt 13793 - Length: 1
Faugh!
Excerpt 13872 - Length: 1
10th.
Excerpt 13889 - Length: 1
"Exactly.
Excerpt 13902 - Length: 1
Nothing!
Excerpt 13923 - Length: 1
"What?"
Excerpt 13946 - Length: 1
Lost”!
Excerpt 14085 - Length: 1
Kyrle?"
Excerpt 14092 - Length: 1
"Yes".
Excerpt 14097 - Length: 1
“No'm.
Excerpt 14123 - Length: 1
Gilmore?"
Excerpt 14140 - Length: 1
"Yabblins!
Excerpt 14211 - Length: 1
quick!"
Excerpt 14268 - Length: 1
What!
Excerpt 14333 - Length: 1
Good-by!"
Excerpt 14374 - Length: 1
My-soul-bless-my-soul!
Excerpt 14398 - Length: 1
“Where”?
Excerpt 14496 - Length: 1
"Prut!
Excerpt 14557 - Length: 1
"Oh!"
Excerpt 14666 - Length: 1
“Fancy.
Excerpt 14796 - Length: 1
Thornfield!
Excerpt 14926 - Length: 1
“Where”?
Excerpt 14946 - Length: 1
my-soul-bless-my-soul!
Excerpt 15027 - Length: 1
"Humph!
Excerpt 15028 - Length: 1
"Unjust!
Excerpt 15053 - Length: 1
"Footprints".
Excerpt 15135 - Length: 1
Chemistry.
Excerpt 15167 - Length: 1
Hurry”!
Excerpt 15182 - Length: 1
Never!"
Excerpt 15260 - Length: 1
“Wonderful”!
Excerpt 15280 - Length: 1
"Like?
Excerpt 15312 - Length: 1
Curious.
Excerpt 15338 - Length: 1
Whitby.
Excerpt 15365 - Length: 1
"Yes.
Excerpt 15432 - Length: 1
There!
Excerpt 15542 - Length: 1
“Why”?
Excerpt 15564 - Length: 1
"'Hum!'
Excerpt 15643 - Length: 1
"Yes.
Excerpt 15789 - Length: 1
bang!
Excerpt 15880 - Length: 1
“Get”?
Excerpt 15943 - Length: 1
"Hum!"
Excerpt 15969 - Length: 1
“Dead!
Excerpt 15995 - Length: 1
"Cruel?
Excerpt 16040 - Length: 1
?MRS.
Excerpt 16239 - Length: 1
"Journey!
Excerpt 16375 - Length: 1
"Jane!"
Excerpt 16448 - Length: 1
"Consider!"
Excerpt 16522 - Length: 1
21st.
Excerpt 16527 - Length: 1
“Yes.
Excerpt 16605 - Length: 1
Well!
Excerpt 16623 - Length: 1
"Stop!"
Excerpt 16626 - Length: 1
Hopes!
Excerpt 16653 - Length: 1
See!"
Excerpt 16688 - Length: 1
Good-bye”.
Excerpt 16706 - Length: 1
pass!
Excerpt 16747 - Length: 1
land”?
Excerpt 16794 - Length: 1
“Spunk-water!
Excerpt 16833 - Length: 1
Hartright!'
Excerpt 16891 - Length: 1
Mina!
Excerpt 16893 - Length: 1
Good-bye".
Excerpt 16965 - Length: 1
“Who?
Excerpt 16966 - Length: 1
“Mrs.
Excerpt 17005 - Length: 1
Hullo!
Excerpt 17007 - Length: 1
Bhaer?"
Excerpt 17031 - Length: 1
Sisters?
Excerpt 17052 - Length: 1
“Ah”!
Excerpt 17122 - Length: 1
Jane!'"
Excerpt 17127 - Length: 1
"Exactly.
Excerpt 17144 - Length: 1
"Here!
Excerpt 17221 - Length: 1
“Why”?
Excerpt 17295 - Length: 1
Truant!
Excerpt 17325 - Length: 1
Yes?"
Excerpt 17405 - Length: 1
"No".
Excerpt 17408 - Length: 1
“Impropriety!
Excerpt 17477 - Length: 1
"Holmes!"
Excerpt 17552 - Length: 1
"Yes.
Excerpt 17568 - Length: 1
"Good!
Excerpt 17583 - Length: 1
Holmes?"
Excerpt 17607 - Length: 1
“Yes”.
Excerpt 17612 - Length: 1
“Oh”!
Excerpt 17629 - Length: 1
"Why?"
Excerpt 17726 - Length: 1
well!
Excerpt 17731 - Length: 1
"'Ware!"
Excerpt 17739 - Length: 1
"Sentimental?
Excerpt 17747 - Length: 1
"No".
Excerpt 17850 - Length: 1
"Pooh!
Excerpt 17868 - Length: 1
"Margaret".
Excerpt 17917 - Length: 1
WINDSOR-JULY.
Excerpt 18034 - Length: 1
"Yes".
Excerpt 18064 - Length: 1
“Yes.
Excerpt 18083 - Length: 1
“Nonsense”!
Excerpt 18255 - Length: 1
"Hush!
Excerpt 18278 - Length: 1
Merriman?"
Excerpt 18413 - Length: 1
Well!
Excerpt 18420 - Length: 1
look!"
Excerpt 18431 - Length: 1
"Indeed!
Excerpt 18464 - Length: 1
beautiful!
Excerpt 18465 - Length: 1
“Indeed!
Excerpt 18556 - Length: 1
Ting-a-ling-ling!
Excerpt 18706 - Length: 1
"Entirely".
Excerpt 18717 - Length: 1
Well?
Excerpt 18728 - Length: 1
"Yes.
Excerpt 18996 - Length: 1
"Good!
Excerpt 19048 - Length: 1
See”?
Excerpt 19071 - Length: 1
"Hark!"
Excerpt 19124 - Length: 1
Stamp!
Excerpt 19203 - Length: 1
Anatomy.
Excerpt 19215 - Length: 1
"Nonsense!
Excerpt 19222 - Length: 1
“Difference!
Excerpt 19427 - Length: 1
"Crisis?"
Excerpt 19580 - Length: 1
"Impossible!"
Excerpt 19672 - Length: 1
"Count!
Excerpt 19700 - Length: 1
"Yes!"
Excerpt 19769 - Length: 1
“News!
Excerpt 19819 - Length: 1
Hush!
Excerpt 19843 - Length: 1
"Yes".
Excerpt 19853 - Length: 1
'Tisn't!
Excerpt 19914 - Length: 1
“'Smallridge!'
Excerpt 19921 - Length: 1
“Humph!
Excerpt 20041 - Length: 1
"Remember!"
Excerpt 20111 - Length: 1
"Yes'm".
Excerpt 20117 - Length: 1
"Good-bye".
Excerpt 20282 - Length: 1
Alas!
Excerpt 20339 - Length: 1
"Enough!"
Excerpt 20355 - Length: 1
"Oh!"
Excerpt 20491 - Length: 1
"Exactly.
Excerpt 20584 - Length: 1
“Where”?
Excerpt 20592 - Length: 1
“Wrong!
Excerpt 20653 - Length: 1
“Rubbage!
Excerpt 20852 - Length: 1
March'.
Excerpt 20902 - Length: 1
God!"
Excerpt 21052 - Length: 1
"Brooke?
Excerpt 21073 - Length: 1
“Amen!
Excerpt 21114 - Length: 1
16th.
Excerpt 21200 - Length: 1
“Shucks”!
Excerpt 21312 - Length: 1
“Course”.
Excerpt 21344 - Length: 1
"Grateful!
Excerpt 21412 - Length: 1
“Yes”.
Excerpt 21620 - Length: 1
"Yes".
Excerpt 21625 - Length: 1
Catch!
Excerpt 21643 - Length: 1
quick!
Excerpt 21738 - Length: 1
"Never!"
Excerpt 21752 - Length: 1
“Fudge!
Excerpt 21771 - Length: 1
"Lingerer!"
Excerpt 21947 - Length: 1
"Done!
Excerpt 22025 - Length: 1
"Humph!
Excerpt 22059 - Length: 1
Rochester?"
Excerpt 22098 - Length: 1
Come!
Excerpt 22123 - Length: 1
“Lordy”!
Excerpt 22152 - Length: 1
“Aha!
Excerpt 22196 - Length: 1
"What!
Excerpt 22198 - Length: 1
"Dead!"
Excerpt 22298 - Length: 1
Quick”!
Excerpt 22399 - Length: 1
“Yes”.
Excerpt 22648 - Length: 1
"Agreed!"
Excerpt 22682 - Length: 1
“Oh”!
Excerpt 22683 - Length: 1
"Yes".
Excerpt 22687 - Length: 1
"Station!
Excerpt 22802 - Length: 1
“Dream!
Excerpt 22816 - Length: 1
"Un-Dead!
Excerpt 23116 - Length: 1
“Yes.
Excerpt 23147 - Length: 1
“Liquor!
Excerpt 23151 - Length: 1
“Yes.
Excerpt 23313 - Length: 1
Hush!
Excerpt 23377 - Length: 1
"Where?"
Excerpt 23634 - Length: 1
Fairfax?"
Excerpt 23643 - Length: 1
"Thanks.
Excerpt 23662 - Length: 1
Who”?
Excerpt 23808 - Length: 1
“Yes”.
Excerpt 24052 - Length: 1
"Hist!"
Excerpt 24054 - Length: 1
"Why?"
Excerpt 24103 - Length: 1
help!
Excerpt 24214 - Length: 1
Rat!"
Excerpt 24249 - Length: 1
"Yes.
Excerpt 24263 - Length: 1
"Exactly.
Excerpt 24300 - Length: 1
“Nonsense!
Excerpt 24303 - Length: 1
"Evening.
Excerpt 24359 - Length: 1
'Sophie!
Excerpt 24393 - Length: 1
“Yes”.
Excerpt 24455 - Length: 1
"Yes".
Excerpt 24461 - Length: 1
"Exactly.
Excerpt 24555 - Length: 1
Smith!"
Excerpt 24648 - Length: 1
What!
Excerpt 24655 - Length: 1
“Mrs.
Excerpt 24684 - Length: 1
"Doubtless".
Excerpt 24711 - Length: 1
"Brother?
Excerpt 24828 - Length: 1
“Harriet”!
Excerpt 24921 - Length: 1
Reed?"
Excerpt 24963 - Length: 1
"What!
Excerpt 24995 - Length: 1
Come”!
Excerpt 25061 - Length: 1
Humph!
Excerpt 25066 - Length: 1
"Hum!"
Excerpt 25097 - Length: 1
“How”?
Excerpt 25133 - Length: 1
Vesey?"
Excerpt 25332 - Length: 1
“Quick!
Excerpt 25340 - Length: 1
“Yes”.
Excerpt 25353 - Length: 1
"Don't!
Excerpt 25509 - Length: 1
“Good”!
Excerpt 25523 - Length: 1
Unclean!
Excerpt 25771 - Length: 1
"Boh!
Excerpt 25811 - Length: 1
rich?"
Excerpt 25858 - Length: 1
"Exactly".
Excerpt 25873 - Length: 1
“Hooray”!
Excerpt 25891 - Length: 1
Bessie!"
Excerpt 25937 - Length: 1
unjust!"
Excerpt 25990 - Length: 1
John!"
Excerpt 25997 - Length: 1
"Why?
Excerpt 26091 - Length: 1
'Oh!'
Excerpt 26098 - Length: 1
"To-morrow".
Excerpt 26102 - Length: 1
look!"
Excerpt 26133 - Length: 1
"Read!"
Excerpt 26143 - Length: 1
Jane!
Excerpt 26188 - Length: 1
"Come!"
Excerpt 26315 - Length: 1
What!
Excerpt 26388 - Length: 1
Hampshire!
Excerpt 26504 - Length: 1
“Yes”.
Excerpt 26571 - Length: 1
“Yes'm”.
Excerpt 26645 - Length: 1
23rd.
Excerpt 26683 - Length: 1
"Yes?"
Excerpt 26835 - Length: 1
"No".
Excerpt 26841 - Length: 1
"'Nonsense!'
Excerpt 26914 - Length: 1
Nonsense!
Excerpt 26920 - Length: 1
“Yes”.
Excerpt 26937 - Length: 1
Wait!
Excerpt 26992 - Length: 1
What!
Excerpt 27081 - Length: 1
"No".
Excerpt 27086 - Length: 1
"Rather".
Excerpt 27195 - Length: 1
"P.S.
Excerpt 27211 - Length: 1
"Mrs.
Excerpt 27274 - Length: 1
Where?
Excerpt 27464 - Length: 1
Luck!
Excerpt 27586 - Length: 1
"Well?"
Excerpt 27628 - Length: 1
Inspector!"
Excerpt 27689 - Length: 1
18th.
Excerpt 27940 - Length: 1
“Shucks!
Excerpt 28082 - Length: 1
“True.
Excerpt 28138 - Length: 1
Dixon!
Excerpt 28261 - Length: 1
Work!
Excerpt 28270 - Length: 1
"Hem!"
Excerpt 28369 - Length: 1
here!"
Excerpt 28422 - Length: 1
"No".
Excerpt 28450 - Length: 1
“Say!
Excerpt 28461 - Length: 1
"Yes".
Excerpt 28471 - Length: 1
Tudor?"
Excerpt 28542 - Length: 1
"No".
Excerpt 28627 - Length: 1
"Well?"
Excerpt 28668 - Length: 1
"Well?"
Excerpt 28695 - Length: 1
"Well?"
Excerpt 28705 - Length: 1
Impossible!
Excerpt 28836 - Length: 1
Fairfax?"
Excerpt 28901 - Length: 1
“Yes.
Excerpt 28919 - Length: 1
"Percival!"
Excerpt 29012 - Length: 1
“Puffs?
Excerpt 29059 - Length: 1
“No”.
Excerpt 29133 - Length: 1
[Groan.]
Excerpt 29169 - Length: 1
“Mrs.
Excerpt 29315 - Length: 1
"Baxter?"
Excerpt 29342 - Length: 1
"Father!
Excerpt 29366 - Length: 1
“How”?
Excerpt 29402 - Length: 1
"Consider!"
Excerpt 29420 - Length: 1
"Ah!"
Excerpt 29449 - Length: 1
"No".
Excerpt 29720 - Length: 1
"Believe!
Excerpt 29744 - Length: 1
"Come!
Excerpt 29853 - Length: 1
"Anywhere.
Excerpt 29992 - Length: 1
“Mrs.
Excerpt 30000 - Length: 1
“Like?
Excerpt 30125 - Length: 1
“Who”?
Excerpt 30224 - Length: 1
What!
Excerpt 30407 - Length: 1
"Well?"
Excerpt 30408 - Length: 1
"Why?"
Excerpt 30436 - Length: 1
"What!
Excerpt 30438 - Length: 1
There!
Excerpt 30444 - Length: 1
“Yes.
Excerpt 30445 - Length: 1
"Ahem!"
Excerpt 30509 - Length: 1
“Mrs.
Excerpt 30537 - Length: 1
S'H'T!
Excerpt 30538 - Length: 1
Michelson?"
Excerpt 30856 - Length: 1
“No”.
Excerpt 30978 - Length: 1
"There!
Excerpt 31118 - Length: 1
to-morrow!
Excerpt 31161 - Length: 1
"LUCY.
Excerpt 31165 - Length: 1
"Sir?"
Excerpt 31290 - Length: 1
"Who?"
Excerpt 31294 - Length: 1
"Nay!
Excerpt 31301 - Length: 1
“Mrs.
Excerpt 31380 - Length: 1
“Goody!...
Excerpt 31528 - Length: 1
Title.
Excerpt 31622 - Length: 1
“Mrs.
Excerpt 31657 - Length: 1
"Why?
Excerpt 31760 - Length: 1
Toodles'.
Excerpt 31848 - Length: 1
"Why?"
Excerpt 31850 - Length: 1
"One!"
Excerpt 31862 - Length: 1
“Fiddlesticks!
Excerpt 31941 - Length: 1
"Perfectly".
Excerpt 31953 - Length: 1
Alas!
Excerpt 32013 - Length: 1
hurry!
Excerpt 32024 - Length: 1
(A-A-Men!)
Excerpt 32049 - Length: 1
"Jane!
Excerpt 32079 - Length: 1
"Helen".
Excerpt 32080 - Length: 1
"Indeed?
Excerpt 32300 - Length: 1
"No".
Excerpt 32450 - Length: 1
Percival!"
Excerpt 32488 - Length: 1
Lyons!"
Excerpt 32540 - Length: 1
“True.
Excerpt 32615 - Length: 1
"P.S.
Excerpt 32654 - Length: 1
"Mason!
Excerpt 32672 - Length: 1
"None.
Excerpt 32687 - Length: 1
"Enormous".
Excerpt 32694 - Length: 1
"Come!"
Excerpt 32769 - Length: 1
"Very.
Excerpt 32801 - Length: 1
"Yes.
Excerpt 32882 - Length: 1
"What?"
Excerpt 32892 - Length: 1
“Dora!
Excerpt 32959 - Length: 1
"Rather!"
Excerpt 33004 - Length: 1
Good-night".
Excerpt 33011 - Length: 1
“Hop?
Excerpt 33074 - Length: 1
"Stoop!"
Excerpt 33094 - Length: 1
“‘St.
Excerpt 33181 - Length: 1
Sophie!'
Excerpt 33183 - Length: 1
“Hello”!
Excerpt 33231 - Length: 1
“Sh!...
Excerpt 33234 - Length: 1
Old?"
Excerpt 33301 - Length: 1
Jane!'
Excerpt 33335 - Length: 1
“Certainly.
Excerpt 33354 - Length: 1
"Why?"
Excerpt 33649 - Length: 1
"Thick!
Excerpt 33650 - Length: 1
"Conditionally".
Excerpt 33676 - Length: 1
(Amen!)
Excerpt 33758 - Length: 1
Philosophy.
Excerpt 33794 - Length: 1
“No'm.
Excerpt 33832 - Length: 1
"Explain!
Excerpt 33845 - Length: 1
Stop!
Excerpt 34012 - Length: 1
There!...
Excerpt 34174 - Length: 1
“Splendid!
Excerpt 34178 - Length: 1
"Yes".
Excerpt 34183 - Length: 1
"Good!
Excerpt 34213 - Length: 1
Goodbye!"
Excerpt 34215 - Length: 1
“No”!
Excerpt 34260 - Length: 1
Stop!
Excerpt 34277 - Length: 1
"Hush!"
Excerpt 34373 - Length: 1
"What!
Excerpt 34486 - Length: 1
"ARTHUR".
Excerpt 34504 - Length: 1
benefactress!"
Excerpt 34512 - Length: 1
“Mrs.
Excerpt 34643 - Length: 1
"Wake!
Excerpt 34773 - Length: 1
Going?
Excerpt 34833 - Length: 1
“Yes”.
Excerpt 35036 - Length: 1
Cramer.
Excerpt 35109 - Length: 1
Impossible!
Excerpt 35249 - Length: 1
"Here!
Excerpt 35332 - Length: 1
"What!
Excerpt 35373 - Length: 1
"Man!"
Excerpt 35420 - Length: 1
"Spirits".
Excerpt 35451 - Length: 1
“Noth'n”.
Excerpt 35541 - Length: 1
"No!"
Excerpt 35659 - Length: 1
A-a-men”!
Excerpt 35742 - Length: 1
"Indeed!
Excerpt 35861 - Length: 1
"Yes".
Excerpt 36096 - Length: 1
"Stubborn?"
Excerpt 36110 - Length: 1
“Hours.
Excerpt 36153 - Length: 1
Sheep-pens?"
Excerpt 36204 - Length: 1
Barrymore?"
Excerpt 36251 - Length: 1
SH'T”!
Excerpt 36337 - Length: 1
Aye!"
Excerpt 36413 - Length: 1
“Shucks”!
Excerpt 36553 - Length: 1
“Look!
Excerpt 36654 - Length: 1
Faugh!
Excerpt 36728 - Length: 1
"Excellent!"
Excerpt 36893 - Length: 1
"Excellent!"
Excerpt 36945 - Length: 1
"Drink!
Excerpt 36953 - Length: 1
Clements?"
Excerpt 36998 - Length: 1
Chow!
Excerpt 37084 - Length: 1
"Yes".
Excerpt 37189 - Length: 1
"'Cabbages!'"
Excerpt 37210 - Length: 1
“Positive”!
Excerpt 37257 - Length: 1
Bessie!
Excerpt 37264 - Length: 1
pass!
Excerpt 37294 - Length: 1
“Neglect!
Excerpt 37362 - Length: 1
"Jane!"
Excerpt 37401 - Length: 1
"Humph!"
Excerpt 37423 - Length: 1
"Stop!
Excerpt 37434 - Length: 1
“Ah”!
Excerpt 37664 - Length: 1
"How?
Excerpt 37840 - Length: 1
Stead-y-y-y”!
Excerpt 37896 - Length: 1
Ting-a-ling-ling!
Excerpt 37982 - Length: 1
“Mary?
Excerpt 37991 - Length: 1
Where”?
Excerpt 38059 - Length: 1
"Yes".
Excerpt 38136 - Length: 1
"Certainly.
Excerpt 38337 - Length: 1
wake!"
Excerpt 38359 - Length: 1
"Said?"
Excerpt 38450 - Length: 1
Profound.
Excerpt 38573 - Length: 1
drink!"
Excerpt 38702 - Length: 1
“Mean?
Excerpt 38711 - Length: 1
“Talk?
Excerpt 38790 - Length: 1
"Perfectly".
Excerpt 38834 - Length: 1
"Hey!
Excerpt 38935 - Length: 1
“No'm”.
Excerpt 38976 - Length: 1
“His'n?
Excerpt 39020 - Length: 1
"Cumberland!"
Excerpt 39211 - Length: 1
"Jane!"
Excerpt 39265 - Length: 1
"Very.
Excerpt 39335 - Length: 1
presto!
Excerpt 39497 - Length: 1
[Groan.]
Excerpt 39571 - Length: 1
John!"
Excerpt 39579 - Length: 1
“Say?
Excerpt 39632 - Length: 1
"No".
Excerpt 39675 - Length: 1
Good-night".
Excerpt 39719 - Length: 1
“What!
Excerpt 39729 - Length: 1
“No”.
Excerpt 39800 - Length: 1
"Dead!"
Excerpt 39820 - Length: 1
"Hum!
Excerpt 39897 - Length: 1
"There!"
Excerpt 40006 - Length: 1
"Certainly".
Excerpt 40045 - Length: 1
"Yes".
Excerpt 40098 - Length: 1
Strange!"
Excerpt 40459 - Length: 1
BASKERVILLE".
Excerpt 40495 - Length: 1
"Cold?
Excerpt 40567 - Length: 1
"Hush!
Excerpt 40577 - Length: 1
"Why?
Excerpt 40651 - Length: 1
Hartright.
Excerpt 40673 - Length: 1
"Where?"
Excerpt 40780 - Length: 1
“Cairo?
Excerpt 40843 - Length: 1
Lucy!"
Excerpt 40864 - Length: 1
“Yes.
Excerpt 40916 - Length: 1
Well!
Excerpt 41172 - Length: 1
19th.
Excerpt 41208 - Length: 1
"Yes".
Excerpt 41432 - Length: 1
Splendid!
Excerpt 41478 - Length: 1
"Quick!"
Excerpt 41609 - Length: 1
“Yas’m.
Excerpt 41740 - Length: 1
"Stop!"
Excerpt 41755 - Length: 1
Night.
Excerpt 41785 - Length: 1
"Yes".
Excerpt 41883 - Length: 1
Bhaer?"
Excerpt 41885 - Length: 1
Knightley.'
Excerpt 41895 - Length: 1
"Hush!
Excerpt 41966 - Length: 1
"Fall!
Excerpt 41987 - Length: 1
"Worse!
Excerpt 41993 - Length: 1
"Yes".
Excerpt 42046 - Length: 1
“Yes!
Excerpt 42165 - Length: 1
"Footprints?"
Excerpt 42170 - Length: 1
"Grace!"
Excerpt 42208 - Length: 1
hasten!
Excerpt 42249 - Length: 1
"Excellent!
Excerpt 42407 - Length: 1
"Suspicion?"
Excerpt 42495 - Length: 1
“Nothing!
Excerpt 42752 - Length: 1
“Yas’m.
Excerpt 42769 - Length: 1
"Speak!
Excerpt 42801 - Length: 1
“What”!
Excerpt 42844 - Length: 1
14th.
Excerpt 42914 - Length: 1
See!"
Excerpt 42987 - Length: 1
Come!
Excerpt 43061 - Length: 1
"Hum!
Excerpt 43107 - Length: 1
"Pocket".
Excerpt 43185 - Length: 1
“Undoubtedly.
Excerpt 43199 - Length: 1
Alas!
Excerpt 43209 - Length: 1
"Daily".
Excerpt 43269 - Length: 1
“Nothing.
Excerpt 43542 - Length: 1
"-shire?
Excerpt 43550 - Length: 1
"Yes".
Excerpt 43573 - Length: 1
"Well!"
Excerpt 43591 - Length: 1
Blind!
Excerpt 43639 - Length: 1
to-night!"
Excerpt 43670 - Length: 1
"Yes".
Excerpt 43736 - Length: 1
"Ouf!"
Excerpt 43761 - Length: 1
11th.
Excerpt 43804 - Length: 1
"Certainly".
Excerpt 43839 - Length: 1
Jack!
Excerpt 43995 - Length: 1
Hartright?"
Excerpt 44003 - Length: 1
“Pretty”!
Excerpt 44044 - Length: 1
presto!
Excerpt 44244 - Length: 1
Rivers!"
Excerpt 44340 - Length: 1
Alas!
Excerpt 44362 - Length: 1
25th.
Excerpt 44396 - Length: 1
"Good!"
Excerpt 44511 - Length: 1
"Pre-cise-ly!"
Excerpt 44521 - Length: 1
"Yes".
Excerpt 44623 - Length: 1
Father?
Excerpt 44638 - Length: 1
“Harem”.
Excerpt 44693 - Length: 1
“Oh”!
Excerpt 44724 - Length: 1
Good!
Excerpt 44750 - Length: 1
"No".
Excerpt 44885 - Length: 1
"Justice!"
Excerpt 44891 - Length: 1
"What!
Excerpt 44987 - Length: 1
“Money!
Excerpt 45030 - Length: 1
“What!
Excerpt 45131 - Length: 1
“Everybody”?
Excerpt 45183 - Length: 1
"Sir?"
Excerpt 45463 - Length: 1
"Exactly".
Excerpt 45523 - Length: 1
don't!
Excerpt 45806 - Length: 1
"No".
Excerpt 46004 - Length: 1
“Married”!
Excerpt 46048 - Length: 1
“Kill?
Excerpt 46072 - Length: 1
“Mrs.
Excerpt 46165 - Length: 1
"Yes.
Excerpt 46344 - Length: 1
“Simple”!
Excerpt 46386 - Length: 1
Harker!"
Excerpt 46394 - Length: 1
“Mrs.
Excerpt 46492 - Length: 1
"Exactly.
Excerpt 46616 - Length: 1
"Sing!"
Excerpt 46691 - Length: 1
M.R.C.S.
Excerpt 46696 - Length: 1
"Thornfield?
Excerpt 46737 - Length: 1
"Good!
Excerpt 46752 - Length: 1
"None".
Excerpt 46842 - Length: 1
“Ransomed?
Excerpt 46947 - Length: 1
"Jane!
Excerpt 47045 - Length: 1
"Good!"
Excerpt 47396 - Length: 1
###Markdown
Even though it is unusual to have text excerpts as long or as short as the outliers in our dataset, examination of these excerpts indicates that they do all appear to be proper sentences, so no adjustments need to be made.We now perform similar checks with the average length data.
###Code
# Get average length outliers
length_outliers = np.where(ave_length > 10)
for i in length_outliers[0]:
print("Excerpt {} - Average Length: {}".format(i, ave_length[i]))
print(text[i], "\n")
length_outliers = np.where(ave_length < 3.5)
for i in length_outliers[0]:
print("Excerpt {} - Average Length: {}".format(i, ave_length[i]))
print(text[i], "\n")
###Output
Excerpt 120 - Average Length: 3.4
I do not see it”.
Excerpt 439 - Average Length: 3.3333333333333335
Am I ill?"
Excerpt 1199 - Average Length: 3.4
Now I saw no bad.
Excerpt 1413 - Average Length: 3.25
I see it all!
Excerpt 1933 - Average Length: 2.5
But .
Excerpt 2074 - Average Length: 3.25
I rose to go.
Excerpt 2111 - Average Length: 2.5
Am I?
Excerpt 2700 - Average Length: 3.3333333333333335
I said so.
Excerpt 2794 - Average Length: 2.6666666666666665
So am I.
Excerpt 2882 - Average Length: 3.3333333333333335
let me go!
Excerpt 2888 - Average Length: 3.0
Not I!
Excerpt 2984 - Average Length: 3.2857142857142856
I see I was up a stump.
Excerpt 4159 - Average Length: 3.0
Go on.
Excerpt 4246 - Average Length: 3.0
So it is.
Excerpt 5105 - Average Length: 3.3333333333333335
Woe is me!
Excerpt 6697 - Average Length: 3.25
So I done it.
Excerpt 6972 - Average Length: 3.3333333333333335
So I quit.
Excerpt 7325 - Average Length: 3.3333333333333335
How can I?
Excerpt 7624 - Average Length: 3.3333333333333335
"So did I.
Excerpt 8622 - Average Length: 3.25
Go on go on!"
Excerpt 8684 - Average Length: 3.0
"I am.
Excerpt 8923 - Average Length: 3.4
I am in no hurry.
Excerpt 8953 - Average Length: 3.3333333333333335
I felt it!
Excerpt 9891 - Average Length: 3.3333333333333335
“So do I”.
Excerpt 10021 - Average Length: 3.4285714285714284
“I do not get on at all.
Excerpt 10647 - Average Length: 3.4
“Not a bit of it.
Excerpt 11469 - Average Length: 3.0
Oh no!
Excerpt 11946 - Average Length: 3.3333333333333335
I saw you.
Excerpt 12633 - Average Length: 3.3333333333333335
"So I see.
Excerpt 12805 - Average Length: 3.3333333333333335
“So it is.
Excerpt 13881 - Average Length: 3.1666666666666665
er is a cow a cat”?
Excerpt 14019 - Average Length: 3.25
So I done it.
Excerpt 14473 - Average Length: 3.0
I ask.
Excerpt 15472 - Average Length: 3.0
Ah me!
Excerpt 18451 - Average Length: 3.25
I took it up.
Excerpt 18719 - Average Length: 2.5
O no.
Excerpt 21507 - Average Length: 3.4
Do as I bid you”.
Excerpt 21576 - Average Length: 3.3333333333333335
Let us go.
Excerpt 23563 - Average Length: 3.25
Is it not so?
Excerpt 24441 - Average Length: 3.0
"So I do!
Excerpt 25159 - Average Length: 2.5
But .
Excerpt 26017 - Average Length: 3.2
What am I to do?
Excerpt 26326 - Average Length: 3.3333333333333335
Let me go!
Excerpt 26818 - Average Length: 3.4444444444444446
), to do as I would be done by.
Excerpt 27447 - Average Length: 3.2
What am I to do?
Excerpt 27505 - Average Length: 3.2
what am I to do?
Excerpt 28530 - Average Length: 3.3333333333333335
he is off.
Excerpt 28729 - Average Length: 3.3333333333333335
“So do I”.
Excerpt 28979 - Average Length: 3.0
Go on!
Excerpt 30475 - Average Length: 3.25
Is it not so?
Excerpt 30643 - Average Length: 3.0
“So do I!
Excerpt 31281 - Average Length: 3.4
"Not a bit of it.
Excerpt 32051 - Average Length: 3.0
Is I me, or who is I?
Excerpt 32562 - Average Length: 3.0
To me!
Excerpt 33310 - Average Length: 2.5
I do.
Excerpt 33402 - Average Length: 3.4
“To be sure I am.
Excerpt 35299 - Average Length: 3.3333333333333335
"So it is.
Excerpt 35974 - Average Length: 3.0
Oh no!
Excerpt 37256 - Average Length: 3.3333333333333335
"So do I!"
Excerpt 38565 - Average Length: 3.0
Oh no!
Excerpt 38566 - Average Length: 3.4
What was I to do?
Excerpt 39167 - Average Length: 3.3333333333333335
It may be!
Excerpt 39288 - Average Length: 3.0
So I did.
Excerpt 39380 - Average Length: 3.3333333333333335
oh my God!
Excerpt 39551 - Average Length: 3.4285714285714284
Is I heah, or whah is I?
Excerpt 42543 - Average Length: 3.3333333333333335
I done it.
Excerpt 42771 - Average Length: 3.0
"I do.
Excerpt 44398 - Average Length: 3.0
I see!
Excerpt 44503 - Average Length: 3.4285714285714284
And if you can do so !"
Excerpt 45100 - Average Length: 3.3333333333333335
Is she up?
Excerpt 46251 - Average Length: 3.0
I did so.
Excerpt 46416 - Average Length: 3.25
Can I do it?"
Excerpt 47500 - Average Length: 3.3333333333333335
"If I can.
Excerpt 47623 - Average Length: 3.0
Oh no!
Excerpt 47770 - Average Length: 3.25
I I run off”.
Excerpt 48113 - Average Length: 3.0
: Rev.
Excerpt 48735 - Average Length: 3.125
I see I was in a fix now.
Excerpt 52349 - Average Length: 3.4
“I bet I know it.
Excerpt 52901 - Average Length: 3.0
I am old.
Excerpt 53884 - Average Length: 3.0
Oh me!
Excerpt 54655 - Average Length: 3.3333333333333335
And so on.
Excerpt 55242 - Average Length: 3.0
oh no!
Excerpt 56002 - Average Length: 3.3333333333333335
I knew it!
Excerpt 56770 - Average Length: 3.3333333333333335
"But I do.
Excerpt 57344 - Average Length: 3.0
I did.
Excerpt 57834 - Average Length: 3.0
I did.
Excerpt 57845 - Average Length: 3.3333333333333335
I will go!
Excerpt 58316 - Average Length: 3.4
Do you own a dog?
Excerpt 58471 - Average Length: 3.3333333333333335
"So I did!
Excerpt 59172 - Average Length: 3.4
I went up to her.
Excerpt 59575 - Average Length: 3.3333333333333335
He has to.
Excerpt 60205 - Average Length: 3.4
Is it you or me?"
Excerpt 60264 - Average Length: 3.3333333333333335
I was now in for it.
Excerpt 60441 - Average Length: 3.4
I had to hold on.
Excerpt 60824 - Average Length: 3.25
“I lay I did!
Excerpt 61619 - Average Length: 3.3333333333333335
I set out.
Excerpt 61680 - Average Length: 3.0
“Yes .
Excerpt 62121 - Average Length: 3.25
Is it not so?
Excerpt 64209 - Average Length: 3.25
I do my best.
Excerpt 64317 - Average Length: 3.0
5 May.
Excerpt 64399 - Average Length: 3.3333333333333335
A. or C.S.
Excerpt 64969 - Average Length: 3.3333333333333335
I went on.
Excerpt 65595 - Average Length: 3.0
A boy!
Excerpt 65830 - Average Length: 3.0
“So 'd I.
Excerpt 65857 - Average Length: 3.4
What was I to do?
Excerpt 66487 - Average Length: 3.4285714285714284
But it is best as it is.
Excerpt 66670 - Average Length: 3.0
I did.
###Markdown
Large average word lengths tend to be associated with short sentences containing a small number of long words, and similarly, small average word lengths tend to be associated with short sentences containing a small number of short words. There is nothing wrong with this, so no adjustments need to be made.It should be noted that this analysis revealed several sentences with large blocks of white space within them. We shall remove these blocks of white space during preprocessing. **Explore the words and characters** Next we look at the characters that make up the text excerpts, to check for any strange characters.
###Code
# Create string containing all excerpts in lower case
text_string = ''
for i in range(len(text)):
text_string += text[i].lower()
# Get character frequencies
char_cnt = Counter(text_string)
print(char_cnt)
print(len(char_cnt))
###Output
Counter({' ': 1157092, 'e': 627136, 't': 451237, 'a': 409675, 'o': 389824, 'n': 348067, 'i': 344868, 'h': 317481, 's': 312893, 'r': 288591, 'd': 232160, 'l': 211060, 'u': 147257, 'm': 137853, 'w': 127545, 'c': 113126, 'y': 112329, 'f': 107854, 'g': 102562, ',': 89890, 'p': 78758, 'b': 76660, '.': 63034, 'v': 48234, 'k': 44214, '"': 22813, "'": 17378, ';': 9705, 'j': 8704, '“': 8045, '”': 7951, 'x': 6966, '?': 6549, '-': 6398, '’': 5311, '!': 4932, 'q': 4861, ':': 3839, 'z': 2388, '*': 677, ')': 491, '(': 490, '‘': 279, '1': 193, '2': 168, '3': 91, '0': 80, '8': 65, '5': 64, '7': 62, '4': 61, '6': 44, '&': 39, ']': 38, '[': 38, '9': 38, '\xa0': 35, '{': 17, '}': 16, 'è': 11, 'ö': 9, 'é': 8, 'æ': 7, 'ñ': 3, '£': 3, 'à': 3, 'ë': 3, 'ï': 2, 'â': 2, 'ê': 2, '$': 2, 'ô': 1, 'á': 1})
73
###Markdown
There are a number of unusual characters in the text excerpts. '\xa0' is not a valid character, so should be removed. The accented characters, on the other hand, may or may not be valid, so should be explored further.
###Code
# Get character count dictionary keys
print(list(char_cnt.keys()))
# Create list of accented characters
accented_chars = ['ï', 'é', 'ñ', 'è', 'ö', 'æ', 'ô', 'â', 'á', 'à', 'ê', 'ë']
# Find all texts containing unusual characters
accented_text = []
for i in range(len(text)):
for j in text[i]:
if j in accented_chars:
accented_text.append(i)
accented_text = list(set(accented_text))
print('There are', str(len(accented_text)), 'texts containing accented characters.')
# Print accented texts
for i in accented_text:
print("Excerpt {}".format(i))
print(text[i] + '\n')
###Output
Excerpt 5892
Carére has blurred my recollection of Baskerville Hall.
Excerpt 56197
I leaned back in the embrasure in a more comfortable position, so that I could enjoy more fully the aërial gambolling.
Excerpt 61060
To which he smiled a sad sort of smile as he replied: "He is her lover, her fiancé.
Excerpt 60169
If there be anything behind this instinct it will be valuable to trace it afterwards accurately, so I had better commence to do so, therefore R. M. Renfield, ætat 59.
Excerpt 9484
Omnia Romæ venalia sunt.
Excerpt 19984
Our correspondent naïvely says that even Ellen Terry could not be so winningly attractive as some of these grubby-faced little children pretend and even imagine themselves to be.
Excerpt 28692
But the conditions of her are in no way anæmic.
Excerpt 4629
Lucy came with me, and we went early to our old seat, whilst the cortège of boats went up the river to the Viaduct and came down again.
Excerpt 39449
He smiled on me in quite a superior sort of way such a smile as would have become the face of Malvolio as he answered me: "The fly, my dear sir, has one striking feature; its wings are typical of the aërial powers of the psychic faculties.
Excerpt 45213
"Give me the Herr's luggage," said the driver; and with exceeding alacrity my bags were handed out and put in the calèche.
Excerpt 52382
After many inquiries and almost as many refusals, and perpetually using the words "Pall Mall Gazette" as a sort of talisman, I managed to find the keeper of the section of the Zoölogical Gardens in which the wolf department is included.
Excerpt 18338
This was all done en règle; and in our work we shall be en règle too.
Excerpt 48930
Van Helsing ordered the former arrangement to be adhered to, explaining that, as Lord Godalming was coming very soon, it would be less harrowing to his feelings to see all that was left of his fiancée quite alone.
Excerpt 61732
When I could see again the driver was climbing into the calèche, and the wolves had disappeared.
Excerpt 44581
Two dark jagged peaks loomed above them through the darkness, and the defile which led between them was the Eagle Cañon in which the horses were awaiting them.
Excerpt 66469
I took my courage à deux mains and waited.
Excerpt 35886
"We shall wait," said Van Helsing, "just long enough to fix the best spot for trephining, so that we may most quickly and perfectly remove the blood clot; for it is evident that the hæmorrhage is increasing".
Excerpt 13624
This murder would have been infinitely more difficult to unravel had the body of the victim been simply found lying in the roadway without any of those outré and sensational accompaniments which have rendered it remarkable.
Excerpt 33210
Uncle rushed out and bought a pair of dogskin gloves, some ugly, thick shoes, and an umbrella, and got shaved à la mutton chop, the first thing.
Excerpt 18493
Some sailor may have brought one home, and it managed to escape; or even from the Zoölogical Gardens a young one may have got loose, or one be bred there from a vampire.
Excerpt 1470
Even while he was speaking an idea dawned upon him, and he said with unconscious simplicity, in a different voice, and with the naïveté of a child: "That's quite true, upon my honour.
Excerpt 47935
There are swift-flowing rivers which dash through jagged cañons; and there are enormous plains, which in winter are white with snow, and in summer are grey with the saline alkali dust.
Excerpt 3650
When the calèche stopped, the driver jumped down and held out his hand to assist me to alight.
Excerpt 34114
Yet I am certain that he does not wish their intimacy to ripen into love, and I have several times observed that he has taken pains to prevent them from being tête-à-tête.
Excerpt 24900
Nordau and Lombroso would so classify him, and quâ criminal he is of imperfectly formed mind.
Excerpt 40516
I shouted and beat the side of the calèche, hoping by the noise to scare the wolves from that side, so as to give him a chance of reaching the trap.
Excerpt 67013
Then, amongst a chorus of screams from the peasants and a universal crossing of themselves, a calèche, with four horses, drove up behind us, overtook us, and drew up beside the coach.
Excerpt 4168
I had no idea how long he might be, but I sat stolidly puffing at my pipe and skipping over the pages of Henri Murger’s “Vie de Bohème”.
Excerpt 62926
Then, far off in the distance, from the mountains on each side of us began a louder and a sharper howling that of wolves which affected both the horses and myself in the same way for I was minded to jump from the calèche and run, whilst they reared again and plunged madly, so that the driver had to use all his great strength to keep them from bolting.
Excerpt 1872
They are waiting for me at the cañon.
Excerpt 22096
I shall have to invent a new classification for him, and call him a zoöphagous (life-eating) maniac; what he desires is to absorb as many lives as he can, and he has laid himself out to achieve it in a cumulative way.
Excerpt 20571
You must get a new patient, doctor, if you wish to study zoöphagy!"
Excerpt 14685
It is only in accordance with general principles of human nature that the "bloofer lady" should be the popular rôle at these al fresco performances.
Excerpt 14558
How he has been making use of the zoöphagous patient to effect his entry into friend John's home; for your Vampire, though in all afterwards he can come when and how he will, must at the first make entry only when asked thereto by an inmate.
Excerpt 22496
If she were in any way anæmic I could understand it, but she is not.
Excerpt 6627
Mortimer had stayed to dinner, and he and the baronet played ecarté afterwards.
Excerpt 11620
I went over and read: "Edward Spencelagh, master mariner, murdered by pirates off the coast of Andres, April, 1854, æt.
Excerpt 12899
Even the professional detectives, blasé as they were in every detail of crime, appeared to be keenly interested in the man’s story.
Excerpt 14566
My own work, with its manifold arrears, took me all day to clear off; it was dark when I was able to inquire about my zoöphagous patient.
Excerpt 26087
Again, when, after the battle of Mohács, we threw off the Hungarian yoke, we of the Dracula blood were amongst their leaders, for our spirit would not brook that we were not free.
Excerpt 63977
It was almost as if the sound sprang up at the rising of his hand, just as the music of a great orchestra seems to leap under the bâton of the conductor.
Excerpt 37994
Stay; he is himself zoöphagous, and in his wild ravings outside the chapel door of the deserted house he always spoke of "master".
Excerpt 14704
The house is very large and of all periods back, I should say, to mediæval times, for one part is of stone immensely thick, with only a few windows high up and heavily barred with iron.
Excerpt 27633
Carére, the young lady who, as it will be remembered, was found six months later alive and married in New York.
Excerpt 29942
Interview with the Keeper in the Zoölogical Gardens.
Excerpt 22263
Then I descended from the side of the coach, as the calèche was close alongside, the driver helping me with a hand which caught my arm in a grip of steel; his strength must have been prodigious.
Excerpt 7032
Zoöphagous patient still keeps up our interest in him.
Excerpt 49146
I got a cup of tea at the Aërated Bread Company and came down to Purfleet by the next train.
###Markdown
The texts containing accented characters do appear to be legitimate foreign words and not corrupt strings. As a result, no corrections are required. **Summary** Based on the above analysis, our data appears to be in reasonably good shape. The only abnormalities that have been identified that require correction are the presence of several invalid characters and the presence of several large blocks of white space. These shall be removed during the pre-processing stage. Data Preprocessing **Remove invalid characters and large blocks of white space** As discussed in the previous section, the first step required to preprocess the data is to remove any invalid characters or large blocks of white space.
###Code
# Remove invalid character from text
text = [excerpt.replace('\xa0', '') for excerpt in text]
# Verify character has been removed
unusual_text = []
for i in range(len(text)):
for j in text[i]:
if j == '\xa0':
unusual_text.append(i)
unusual_text = list(set(unusual_text))
print('There are', str(len(unusual_text)), 'texts containing the invalid character.')
# Count texts containing white space blocks
ctr = 0
for excerpt in text:
if " " in excerpt:
ctr += 1
print('There are', ctr, 'excerpts containing blocks of white space.')
# Remove blocks of white space
new_text = []
for excerpt in text:
while " " in excerpt:
excerpt = excerpt.replace(" "," ")
new_text.append(excerpt)
text = new_text
print(len(text))
ctr = 0
for excerpt in text:
if " " in excerpt:
ctr += 1
print('There are', ctr, 'excerpts containing blocks of white space.')
###Output
There are 0 excerpts containing blocks of white space.
###Markdown
**Remove punctuation and convert to lowercase** To normalize the excerpts, we remove all punctuation and convert them to lowercase.
###Code
normed_text = []
for i in range(len(text)):
new = text[i].lower()
new = new.translate(str.maketrans('','', string.punctuation))
new = new.replace('“', '').replace('”', '')
normed_text.append(new)
print(normed_text[0:5])
print(len(normed_text))
###Output
['im afraid i couldnt like him without a spice of human naughtiness', 'yonder was the banks and the islands across the water and maybe a spark which was a candle in a cabin window and sometimes on the water you could see a spark or two on a raft or a scow you know and maybe you could hear a fiddle or a song coming over from one of them crafts', 'well as i was saying about the parlor there was beautiful curtains on the windows white with pictures painted on them of castles with vines all down the walls and cattle coming down to drink', 'here again the count had not openly committed himself here again he was to all practical purpose out of my reach', 'no assented tom they dont kill the women theyre too noble']
68000
###Markdown
**Create training and test subsets**
###Code
text_train, text_test, author_train, author_test = train_test_split(normed_text, author, test_size = 0.2, random_state = 5)
# Check shapes of created datasets
print(np.shape(text_train))
print(np.shape(text_test))
print(np.shape(author_train))
print(np.shape(author_test))
###Output
(54400,)
(13600,)
(54400,)
(13600,)
###Markdown
**Create n-gram sequences**
###Code
def create_n_grams(excerpt_list, n, vocab_size, seq_size):
"""Create a list of n-gram sequences
Args:
excerpt_list: list of strings. List of normalized text excerpts.
n: int. Length of n-grams.
vocab_size: int. Size of n-gram vocab (used in one-hot encoding)
seq_size: int. Size of n-gram sequences
Returns:
n_gram_array: array. Numpy array of one-hot encoded n-grams.
"""
n_gram_list = []
for excerpt in excerpt_list:
# Remove spaces
excerpt = excerpt.replace(" ", "")
# Extract n-grams
n_grams = [excerpt[i:i + n] for i in range(len(excerpt) - n + 1)]
# Convert to a single string with spaces between n-grams
new_string = " ".join(n_grams)
# One hot encode
hot = one_hot(new_string, round(vocab_size*1.3))
# Pad hot if necessary
hot_len = len(hot)
if hot_len >= seq_size:
hot = hot[0:seq_size]
else:
diff = seq_size - hot_len
extra = [0]*diff
hot = hot + extra
n_gram_list.append(hot)
n_gram_array = np.array(n_gram_list)
return n_gram_array
def get_vocab_size(excerpt_list, n, seq_size):
"""Calculate size of n-gram vocab
Args:
excerpt_list: list of strings. List of normalized text excerpts.
n: int. Length of n-grams.
seq_size: int. Size of n-gram sequences
Returns:
vocab_size: int. Size of n-gram vocab.
"""
n_gram_list = []
for excerpt in excerpt_list:
# Remove spaces
excerpt = excerpt.replace(" ", "")
# Extract n-grams
n_grams = [excerpt[i:i + n] for i in range(len(excerpt) - n + 1)]
# Create list of n-grams
gram_len = len(n_grams)
if gram_len >= seq_size:
n_grams = n_grams[0:seq_size]
else:
diff = seq_size - gram_len
extra = [0]*diff
n_grams = n_grams + extra
n_gram_list.append(n_grams)
# Flatten n-gram list
n_gram_list = list(np.array(n_gram_list).flat)
# Calculate vocab size
n_gram_cnt = Counter(n_gram_list)
vocab_size = len(n_gram_cnt)
return vocab_size
# Determine vocab sizes
for i in range(1, 4):
vocab_size = get_vocab_size(text_train, i, 350)
print('Vocab size for n =', i, 'is:', vocab_size)
# Create n-gram lists
gram1_train = create_n_grams(text_train, 1, 51, 350)
gram2_train = create_n_grams(text_train, 2, 966, 350)
gram3_train = create_n_grams(text_train, 3, 9521, 350)
gram1_test = create_n_grams(text_test, 1, 51, 350)
gram2_test = create_n_grams(text_test, 2, 966, 350)
gram3_test = create_n_grams(text_test, 3, 9521, 350)
print(np.shape(gram1_train))
print(np.shape(gram2_train))
print(np.shape(gram3_train))
print(np.shape(gram1_test))
print(np.shape(gram2_test))
print(np.shape(gram3_test))
# Determine maximum value of n-gram encodings (this is used to set the CNN embedding dimension)
max_1gram = np.max(gram1_train)
max_2gram = np.max(gram2_train)
max_3gram = np.max(gram3_train)
print('Maximum encoding value for 1-grams is: ', max_1gram)
print('Maximum encoding value for 2-grams is: ', max_2gram)
print('Maximum encoding value for 3-grams is: ', max_3gram)
###Output
Maximum encoding value for 1-grams is: 65
Maximum encoding value for 2-grams is: 1255
Maximum encoding value for 3-grams is: 12376
###Markdown
**Create bag-of-words features**
###Code
def process_data(excerpt_list):
"""Stem data, remove stopwords and split into word lists
Args:
excerpt_list: list of strings. List of normalized text excerpts.
Returns:
processed: list of strings. List of lists of processed text excerpts (stemmed and stop words removed).
"""
stop_words = set(stopwords.words('english'))
porter = PorterStemmer()
processed = []
for excerpt in excerpt_list:
new = excerpt.split()
word_list = [porter.stem(w) for w in new if not w in stop_words]
word_list = " ".join(word_list)
processed.append(word_list)
return processed
# Process data subsets
processed_train = process_data(text_train)
processed_test = process_data(text_test)
print(processed_train[0:5])
# Create bag of words features
## Fit Tfidf Vectorizer
vectorizer = TfidfVectorizer(strip_accents = 'ascii', stop_words = 'english', min_df = 6)
vectorizer.fit(processed_train)
# Get size of vocabulary
print('Vocabulary size: ', len(vectorizer.vocabulary_))
# Create feature vectors
words_train = vectorizer.transform(processed_train)
words_test = vectorizer.transform(processed_test)
###Output
Vocabulary size: 5840
###Markdown
**One-hot encode labels**
###Code
# One hot encode labels
author_lb = LabelBinarizer()
author_lb.fit(author_train)
author_train_hot = author_lb.transform(author_train)
author_test_hot = author_lb.transform(author_test)
###Output
_____no_output_____
###Markdown
Implementation **Fit the CNN**
###Code
# Define model architecture in keras
# Code reference: https://machinelearningmastery.com/develop-n-gram-multichannel-convolutional-neural-network-sentiment-analysis/
def define_model(input_len, output_size, vocab_size, embedding_dim, verbose = True,
drop_out_pct = 0.25, conv_filters = 500, activation_fn = 'relu', pool_size = 2, learning = 0.0001):
"""Define n-gram CNN
Args:
input_len: int. Length of input sequences.
output_size: int. Number of output classes.
vocab_size: int. Maximum value of n-gram encoding.
embedding_dim: int. Size of embedding layer.
verbose: bool. Whether or not to print model summary.
drop_out_pct: float. Drop-out rate.
conv_filters: int. Number of filters in the conv layer.
activation_fn: string. Activation function to use in the convolutional layer.
pool_size: int. Pool size for the max pooling layer.
learning: float. Learning rate for the model optimizer.
Returns:
model: keras model object.
"""
# Channel 1
inputs1 = Input(shape = (input_len,))
embedding1 = Embedding(vocab_size, embedding_dim)(inputs1)
drop1 = Dropout(drop_out_pct)(embedding1)
conv1 = Conv1D(filters = conv_filters, kernel_size = 3, activation = activation_fn)(drop1)
pool1 = MaxPooling1D(pool_size = pool_size)(conv1)
flat1 = Flatten()(pool1)
# Channel 2
inputs2 = Input(shape = (input_len,))
embedding2 = Embedding(vocab_size, embedding_dim)(inputs2)
drop2 = Dropout(drop_out_pct)(embedding2)
conv2 = Conv1D(filters = conv_filters, kernel_size = 4, activation = activation_fn)(drop2)
pool2 = MaxPooling1D(pool_size = pool_size)(conv2)
flat2 = Flatten()(pool2)
# Channel 3
inputs3 = Input(shape = (input_len,))
embedding3= Embedding(vocab_size, embedding_dim)(inputs3)
drop3 = Dropout(drop_out_pct)(embedding3)
conv3 = Conv1D(filters = conv_filters, kernel_size = 5, activation = activation_fn)(drop3)
pool3 = MaxPooling1D(pool_size = pool_size)(conv3)
flat3 = Flatten()(pool3)
# Merge channels
merged = concatenate([flat1, flat2, flat3])
# Create output layer
output = Dense(output_size, activation = 'softmax')(merged)
# Create model
model = Model(inputs = [inputs1, inputs2, inputs3], outputs = output)
# Compile model
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr = learning), metrics=['accuracy'])
if verbose:
print(model.summary())
return model
# Create the 1-gram model
gram1_model = define_model(350, 8, max_1gram + 1, 26)
# Train 1-gram CNN
gram1_model.fit([gram1_train, gram1_train, gram1_train], author_train_hot, epochs=10, batch_size=32,
verbose = 1, validation_split = 0.2)
# Create the 2-gram model
gram2_model = define_model(350, 8, max_2gram + 1, 300)
# Train 2-gram CNN
gram2_model.fit([gram2_train, gram2_train, gram2_train], author_train_hot, epochs=10, batch_size=32,
verbose = 1, validation_split = 0.2)
# Create the 3-gram model
gram3_model = define_model(350, 8, max_3gram + 1, 600)
# Train 3-gram CNN
gram3_model.fit([gram3_train, gram3_train, gram3_train], author_train_hot, epochs=10, batch_size=32,
verbose = 1, validation_split = 0.2)
###Output
Train on 43520 samples, validate on 10880 samples
Epoch 1/10
43520/43520 [==============================] - 310s 7ms/step - loss: 1.9104 - acc: 0.2558 - val_loss: 1.5276 - val_acc: 0.4344
Epoch 2/10
43520/43520 [==============================] - 302s 7ms/step - loss: 1.2773 - acc: 0.5477 - val_loss: 1.2492 - val_acc: 0.5505
Epoch 3/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.9639 - acc: 0.6689 - val_loss: 1.1923 - val_acc: 0.5745
Epoch 4/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.7682 - acc: 0.7434 - val_loss: 1.1953 - val_acc: 0.5821
Epoch 5/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.6162 - acc: 0.8031 - val_loss: 1.2445 - val_acc: 0.5801
Epoch 6/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.4869 - acc: 0.8519 - val_loss: 1.3272 - val_acc: 0.5730
Epoch 7/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.3747 - acc: 0.8952 - val_loss: 1.3939 - val_acc: 0.5729
Epoch 8/10
43520/43520 [==============================] - 299s 7ms/step - loss: 0.2832 - acc: 0.9262 - val_loss: 1.4857 - val_acc: 0.5699
Epoch 9/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.2129 - acc: 0.9460 - val_loss: 1.7642 - val_acc: 0.5525
Epoch 10/10
43520/43520 [==============================] - 300s 7ms/step - loss: 0.1589 - acc: 0.9623 - val_loss: 1.7429 - val_acc: 0.5651
###Markdown
The best results with regard to validation accuracy were achieved by the 3-gram CNN (followed by the 2-gram CNN, then the 1-gram CNN). In the case of the 3-gram and 2-gram CNNs, validation accuracy tends to plateau after around 5 epochs, so for the remainder of this analysis, we shall reduce the number of epochs for fitting the CNN down to 5. **Fit the SVM**
###Code
# Define grid search object
svm = SVC()
params = {'kernel': ['linear'], 'C':[1, 10, 100]}
scorer = make_scorer(accuracy_score)
grid_obj = GridSearchCV(svm, params, scoring = scorer, verbose = 50)
# Fit bag of words svm
np.random.seed(6)
word_svm = grid_obj.fit(words_train, author_train)
print(word_svm.best_estimator_)
print(word_svm.cv_results_)
###Output
{'mean_fit_time': array([107.98448531, 136.42942905, 425.95507264]), 'std_fit_time': array([0.62575926, 0.62474636, 9.53641656]), 'mean_score_time': array([30.77192775, 27.47356653, 26.70237382]), 'std_score_time': array([0.07407209, 0.05506556, 0.03792019]), 'param_C': masked_array(data=[1, 10, 100],
mask=[False, False, False],
fill_value='?',
dtype=object), 'param_kernel': masked_array(data=['linear', 'linear', 'linear'],
mask=[False, False, False],
fill_value='?',
dtype=object), 'params': [{'C': 1, 'kernel': 'linear'}, {'C': 10, 'kernel': 'linear'}, {'C': 100, 'kernel': 'linear'}], 'split0_test_score': array([0.58756065, 0.56043229, 0.53049184]), 'split1_test_score': array([0.59912866, 0.57017592, 0.54111289]), 'split2_test_score': array([0.59053555, 0.56180023, 0.53218245]), 'mean_test_score': array([0.59240809, 0.56413603, 0.53459559]), 'std_test_score': array([0.00490484, 0.00430715, 0.00465976]), 'rank_test_score': array([1, 2, 3], dtype=int32), 'split0_train_score': array([0.7326274 , 0.82439885, 0.8677752 ]), 'split1_train_score': array([0.72826537, 0.82121488, 0.86544241]), 'split2_train_score': array([0.73442885, 0.82828311, 0.86969588]), 'mean_train_score': array([0.73177387, 0.82463228, 0.86763783]), 'std_train_score': array([0.0025876 , 0.00289031, 0.00173919])}
###Markdown
The best results were achieved when C = 1. Refinement **Add extra channel to CNN** Refit the CNN to the 3-gram sequences with an extra channel added to the model (with kernel size = 6). Stop after five epochs this time.
###Code
# Define model architecture in keras
# Code reference: https://machinelearningmastery.com/develop-n-gram-multichannel-convolutional-neural-network-sentiment-analysis/
def define_model2(input_len, output_size, vocab_size, embedding_dim, verbose = True,
drop_out_pct = 0.25, conv_filters = 500, activation_fn = 'relu', pool_size = 2, learning = 0.0001):
"""Define n-gram CNN
Args:
input_len: int. Length of input sequences.
output_size: int. Number of output classes.
vocab_size: int. Maximum value of n-gram encoding.
embedding_dim: int. Size of embedding layer.
verbose: bool. Whether or not to print model summary.
drop_out_pct: float. Drop-out rate.
conv_filters: int. Number of filters in the conv layer.
activation_fn: string. Activation function to use in the convolutional layer.
pool_size: int. Pool size for the max pooling layer.
learning: float. Learning rate for the model optimizer.
Returns:
model: keras model object.
"""
# Channel 1
inputs1 = Input(shape = (input_len,))
embedding1 = Embedding(vocab_size, embedding_dim)(inputs1)
drop1 = Dropout(drop_out_pct)(embedding1)
conv1 = Conv1D(filters = conv_filters, kernel_size = 3, activation = activation_fn)(drop1)
pool1 = MaxPooling1D(pool_size = pool_size)(conv1)
flat1 = Flatten()(pool1)
# Channel 2
inputs2 = Input(shape = (input_len,))
embedding2 = Embedding(vocab_size, embedding_dim)(inputs2)
drop2 = Dropout(drop_out_pct)(embedding2)
conv2 = Conv1D(filters = conv_filters, kernel_size = 4, activation = activation_fn)(drop2)
pool2 = MaxPooling1D(pool_size = pool_size)(conv2)
flat2 = Flatten()(pool2)
# Channel 3
inputs3 = Input(shape = (input_len,))
embedding3= Embedding(vocab_size, embedding_dim)(inputs3)
drop3 = Dropout(drop_out_pct)(embedding3)
conv3 = Conv1D(filters = conv_filters, kernel_size = 5, activation = activation_fn)(drop3)
pool3 = MaxPooling1D(pool_size = pool_size)(conv3)
flat3 = Flatten()(pool3)
# Channel 4
inputs4 = Input(shape = (input_len,))
embedding4 = Embedding(vocab_size, embedding_dim)(inputs4)
drop4 = Dropout(drop_out_pct)(embedding4)
conv4 = Conv1D(filters = conv_filters, kernel_size = 6, activation = activation_fn)(drop4)
pool4 = MaxPooling1D(pool_size = pool_size)(conv4)
flat4 = Flatten()(pool4)
# Merge channels
merged = concatenate([flat1, flat2, flat3, flat4])
# Create output layer
output = Dense(output_size, activation = 'softmax')(merged)
# Create model
model = Model(inputs = [inputs1, inputs2, inputs3, inputs4], outputs = output)
# Compile model
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr = learning), metrics=['accuracy'])
if verbose:
print(model.summary())
return model
# Create the 3-gram model
gram3_model2 = define_model2(350, 8, max_3gram + 1, 600)
# Train 3-gram CNN
gram3_model2.fit([gram3_train, gram3_train, gram3_train, gram3_train], author_train_hot, epochs=5, batch_size=32,
verbose = 1, validation_split = 0.2)
###Output
Train on 43520 samples, validate on 10880 samples
Epoch 1/5
43520/43520 [==============================] - 435s 10ms/step - loss: 1.8808 - acc: 0.2714 - val_loss: 1.4971 - val_acc: 0.4487
Epoch 2/5
43520/43520 [==============================] - 430s 10ms/step - loss: 1.2242 - acc: 0.5688 - val_loss: 1.2269 - val_acc: 0.5608
Epoch 3/5
43520/43520 [==============================] - 430s 10ms/step - loss: 0.9018 - acc: 0.6913 - val_loss: 1.1853 - val_acc: 0.5801
Epoch 4/5
43520/43520 [==============================] - 434s 10ms/step - loss: 0.6964 - acc: 0.7736 - val_loss: 1.2006 - val_acc: 0.5860
Epoch 5/5
43520/43520 [==============================] - 434s 10ms/step - loss: 0.5283 - acc: 0.8375 - val_loss: 1.2743 - val_acc: 0.5741
###Markdown
After 5 epochs, the 4-channel model has a slightly lower validation accuracy than the 3-channel model (0.5741 vs 0.5801). Consequently, we will stick with the original 3-channel model. Model Evaluation and Validation **Fit final models and evaluate**
###Code
# Define function for plotting normalized confusion matrix
# Code reference 1: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
# Code reference 2: https://stackoverflow.com/questions/35572000/how-can-i-plot-a-confusion-matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Args:
cm: matrix. Confusion matrix for plotting.
classes: list. List of class labels.
normalize: bool. Whether or not to normalize the confusion matrix.
title: string. Title for plot.
cmap: color map. Color scheme for plot.
Returns:
None
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
df_cm = pd.DataFrame(cm, index = classes,
columns = classes)
sns.heatmap(df_cm, annot=True, cmap = cmap)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.title(title)
# Fit and evaluate Model 1 (3-gram CNN)
t0 = time.time()
# Fit model
model1 = define_model(350, 8, max_3gram + 1, 600)
model1.fit([gram3_train, gram3_train, gram3_train], author_train_hot, epochs=5, batch_size=32,
verbose = 1, validation_split = 0.2)
t1 = time.time()
# Predict values for test set
author_pred1 = model1.predict([gram3_test, gram3_test, gram3_test])
t2 = time.time()
# Reverse one-hot encoding of labels
author_pred1 = author_lb.inverse_transform(author_pred1)
# Evaluate
accuracy = accuracy_score(author_test, author_pred1)
precision, recall, f1, support = score(author_test, author_pred1)
ave_precision = np.average(precision, weights = support/np.sum(support))
ave_recall = np.average(recall, weights = support/np.sum(support))
ave_f1 = np.average(f1, weights = support/np.sum(support))
confusion = confusion_matrix(author_test, author_pred1, labels = ['Alcott', 'Austen', 'Bronte', 'Collins',
'Doyle', 'Montgomery', 'Stoker', 'Twain'])
print("Accuracy:", accuracy)
print("Ave. Precision:", ave_precision)
print("Ave. Recall:", ave_recall)
print("Ave. F1 Score:", ave_f1)
print("Training Time:", (t1 - t0), "seconds")
print("Prediction Time:", (t2 - t1), "seconds")
print("Confusion Matrix:\n", confusion)
# Plot normalized confusion matrix
plot_confusion_matrix(confusion, classes=['Alcott', 'Austen', 'Bronte', 'Collins',
'Doyle', 'Montgomery', 'Stoker', 'Twain'], \
normalize=True, title='Normalized Confusion Matrix - Model 1')
plt.savefig("confusion1.eps")
# Fit and evaluate Model 2 (Bag of words SVM)
np.random.seed(28)
t0 = time.time()
# Fit model
model2 = SVC(C = 1, kernel = 'linear')
model2.fit(words_train, author_train)
t1 = time.time()
# Predict values for test set
author_pred2 = model2.predict(words_test)
t2 = time.time()
# Evaluate
accuracy = accuracy_score(author_test, author_pred2)
precision, recall, f1, support = score(author_test, author_pred2)
ave_precision = np.average(precision, weights = support/np.sum(support))
ave_recall = np.average(recall, weights = support/np.sum(support))
ave_f1 = np.average(f1, weights = support/np.sum(support))
confusion = confusion_matrix(author_test, author_pred2, labels = ['Alcott', 'Austen', 'Bronte', 'Collins',
'Doyle', 'Montgomery', 'Stoker', 'Twain'])
print("Accuracy:", accuracy)
print("Ave. Precision:", ave_precision)
print("Ave. Recall:", ave_recall)
print("Ave. F1 Score:", ave_f1)
print("Training Time:", (t1 - t0), "seconds")
print("Prediction Time:", (t2 - t1), "seconds")
print("Confusion Matrix:\n", confusion)
# Plot normalized confusion matrix
plot_confusion_matrix(confusion, classes=['Alcott', 'Austen', 'Bronte', 'Collins',
'Doyle', 'Montgomery', 'Stoker', 'Twain'], \
normalize=True, title='Normalized Confusion Matrix - Model 2')
plt.savefig("confusion2.eps")
# Get benchmark statistics (random model)
# Perform 10 times and take averages
accuracy_list = []
prec_list = []
recall_list = []
f1_list = []
for i in range(10):
# Create random predictions
author_pred3 = np.random.choice(['Alcott', 'Austen', 'Bronte', 'Collins',
'Doyle', 'Montgomery', 'Stoker', 'Twain'], len(author_test))
# Evaluate
accuracy = accuracy_score(author_test, author_pred3)
precision, recall, f1, support = score(author_test, author_pred3)
ave_precision = np.average(precision, weights = support/np.sum(support))
ave_recall = np.average(recall, weights = support/np.sum(support))
ave_f1 = np.average(f1, weights = support/np.sum(support))
accuracy_list.append(accuracy)
prec_list.append(ave_precision)
recall_list.append(ave_recall)
f1_list.append(ave_f1)
print("Accuracy:", accuracy_list, np.mean(accuracy_list), np.std(accuracy_list))
print("Ave. Precision:", prec_list, np.mean(prec_list), np.std(prec_list))
print("Ave. Recall:", recall_list, np.mean(recall_list), np.std(recall_list))
print("Ave. F1 Score:", f1_list, np.mean(f1_list), np.std(f1_list))
###Output
Accuracy: [0.1264705882352941, 0.12227941176470589, 0.12316176470588236, 0.13014705882352942, 0.12176470588235294, 0.12625, 0.12360294117647058, 0.13088235294117648, 0.12426470588235294, 0.12588235294117647] 0.12547058823529414 0.002951672448327311
Ave. Precision: [0.1266465444835753, 0.12233628218025662, 0.12342378106218606, 0.1304336723166218, 0.12189731573734387, 0.1262912622665182, 0.12386941963618259, 0.13109834759020694, 0.12437353571106353, 0.12583728670790226] 0.12562074476918572 0.00298023642284441
Ave. Recall: [0.1264705882352941, 0.12227941176470589, 0.12316176470588235, 0.1301470588235294, 0.12176470588235294, 0.12625, 0.1236029411764706, 0.13088235294117648, 0.12426470588235294, 0.12588235294117647] 0.12547058823529414 0.0029516724483273074
Ave. F1 Score: [0.12653319286141212, 0.12226843324399686, 0.12321947748660764, 0.1302077902363022, 0.12178652876626787, 0.12622344999942142, 0.1236886176229508, 0.13092308650117446, 0.12424972751678684, 0.12580242802232933] 0.12549027322572498 0.002958589237129839
###Markdown
**Perform sensitivity analysis** Sensitivity analysis is performed by creating 3 random (67%) subsets of the training data set and fitting the model to each subset then calculating the evaluation (test) metrics and examining the variability in these metrics.
###Code
# Model 1 Sensitivity Testing
kf = KFold(n_splits = 3)
accuracy_list = []
prec_list = []
recall_list = []
f1_list = []
cnt = 0
for train_inds, _ in kf.split(gram3_train):
cnt += 1
print('Run:', cnt)
# Create data subsets
train_x = np.array([gram3_train[i] for i in train_inds])
train_y = np.array([author_train_hot[i] for i in train_inds])
# Fit model
model1 = define_model(350, 8, max_3gram + 1, 600, verbose = False)
model1.fit([gram3_train, gram3_train, gram3_train], author_train_hot, epochs=5, batch_size=32, verbose = 0)
# Predict values for test set
author_pred1 = model1.predict([gram3_test, gram3_test, gram3_test])
author_pred1 = author_lb.inverse_transform(author_pred1)
# Evaluate
accuracy = accuracy_score(author_test, author_pred1)
precision, recall, f1, support = score(author_test, author_pred1)
ave_precision = np.average(precision, weights = support/np.sum(support))
ave_recall = np.average(recall, weights = support/np.sum(support))
ave_f1 = np.average(f1, weights = support/np.sum(support))
accuracy_list.append(accuracy)
prec_list.append(ave_precision)
recall_list.append(ave_recall)
f1_list.append(ave_f1)
print("Accuracy:", accuracy_list)
print("Ave. Precision:", prec_list)
print("Ave. Recall:", recall_list)
print("Ave. F1 Score:", f1_list)
# Model 2 sensitivity testing
kf = KFold(n_splits = 3)
accuracy_list = []
prec_list = []
recall_list = []
f1_list = []
cnt = 0
# Convert sparse matrix to array
words_train_np = words_train.toarray()
for train_inds, _ in kf.split(words_train):
cnt += 1
print('Run:', cnt)
# Create data subsets
train_x = np.array([words_train_np[i] for i in train_inds])
train_y = [author_train[i] for i in train_inds]
# Convert train_x back to sparse matrix
train_x = sparse.csr_matrix(train_x)
# Fit model
model2 = SVC(C = 1, kernel = 'linear')
model2.fit(train_x, train_y)
# Predict values for test set
author_pred2 = model2.predict(words_test)
# Evaluate
accuracy = accuracy_score(author_test, author_pred2)
precision, recall, f1, support = score(author_test, author_pred2)
ave_precision = np.average(precision, weights = support/np.sum(support))
ave_recall = np.average(recall, weights = support/np.sum(support))
ave_f1 = np.average(f1, weights = support/np.sum(support))
accuracy_list.append(accuracy)
prec_list.append(ave_precision)
recall_list.append(ave_recall)
f1_list.append(ave_f1)
print("Accuracy:", accuracy_list)
print("Ave. Precision:", prec_list)
print("Ave. Recall:", recall_list)
print("Ave. F1 Score:", f1_list)
###Output
Run: 1
Run: 2
Run: 3
Accuracy: [0.5855882352941176, 0.5822058823529411, 0.5827205882352942]
Ave. Precision: [0.5934193273768217, 0.5900483541326081, 0.590777047380897]
Ave. Recall: [0.5855882352941176, 0.5822058823529412, 0.5827205882352942]
Ave. F1 Score: [0.5880048936638183, 0.5846906365078599, 0.5852778440958231]
###Markdown
**Explore incorrectly classified excerpts**
###Code
# Explore the first 100 test examples
for i in range(100):
print('Excerpt', i, '- Actual label =', author_test[i], 'Model 1 predicted label =', author_pred1[i],
'Model 2 predicted label =', author_pred2[i])
print(text_test[i], '\n')
def calculate_averages(true, pred, text):
"""Calculate average length of correctly and incorrectly classified examples
Args:
true: list. List of correct labels.
pred: list. List of predicted labels.
text: list. List of text excerpts.
Returns:
correct_ave_chars: float. Average length of correctly classified examples in characters.
incorrect_ave_chars: float. Average length of incorrectly classified examples in characters.
correct_ave_words: float. Average length of correctly classified examples in characters.
incorrect_ave_words: float. Average length of incorrectly classified examples in characters.
"""
correct_len_chars = []
incorrect_len_chars = []
correct_len_words = []
incorrect_len_words = []
for i in range(len(true)):
if true[i] == pred[i]:
correct_len_chars.append(len(text[i]))
correct_len_words.append(len(text[i].split()))
else:
incorrect_len_chars.append(len(text[i]))
incorrect_len_words.append(len(text[i].split()))
correct_ave_chars = np.mean(correct_len_chars)
correct_ave_words = np.mean(correct_len_words)
incorrect_ave_chars = np.mean(incorrect_len_chars)
incorrect_ave_words = np.mean(incorrect_len_words)
# Conduct two sample t-test
print('Character t-test')
print(stats.ttest_ind(correct_len_chars, incorrect_len_chars, equal_var = False))
print('\nWord t-test')
print(stats.ttest_ind(correct_len_words, incorrect_len_words, equal_var = False))
return correct_ave_chars, correct_ave_words, incorrect_ave_chars, incorrect_ave_words
# Calculate averages for Model 1
correct_ave_chars1, correct_ave_words1, incorrect_ave_chars1, incorrect_ave_words1\
= calculate_averages(author_test, author_pred1, text_test)
# Calculate averages for Model 2
correct_ave_chars2, correct_ave_words2, incorrect_ave_chars2, incorrect_ave_words2\
= calculate_averages(author_test, author_pred2, text_test)
print('Model 1 - Average excerpt length (chars) of correct examples =', correct_ave_chars1,
'Incorrect exampes =', incorrect_ave_chars1)
print('Model 2 - Average excerpt length (chars) of correct examples =', correct_ave_chars2,
'Incorrect exampes =', incorrect_ave_chars2)
print('\nModel 1 - Average excerpt length (words) of correct examples =', correct_ave_words1,
'Incorrect exampes =', incorrect_ave_words1)
print('Model 2 - Average excerpt length (words) of correct examples =', correct_ave_words2,
'Incorrect exampes =', incorrect_ave_words2)
###Output
Model 1 - Average excerpt length (chars) of correct examples = 100.94069709127382 Incorrect exampes = 76.474573257468
Model 2 - Average excerpt length (chars) of correct examples = 103.71028391167192 Incorrect exampes = 72.82678414096917
Model 1 - Average excerpt length (words) of correct examples = 19.646564694082247 Incorrect exampes = 15.088371266002845
Model 2 - Average excerpt length (words) of correct examples = 20.112933753943217 Incorrect exampes = 14.47806167400881
###Markdown
Function for data import, processing and plotting
###Code
KEYS = ('year', 'month', 'day', 'hour', 'minute', 'second', 'ms', 'iter', 'MB')
TYPES = (int, int, int, int, int, int, int, int, float)
LINE_RE = r'^(\d+)-(\d+)-(\d+)\s(\d+):(\d+):(\d+),(\d+).*\#\[(-?\d+)].*MB:([0-9]*[.][0-9]+)'
re_complied = re.compile(LINE_RE)
def parse_line(line):
try:
values = re_complied.match(line).groups()
info = OrderedDict(zip(KEYS, values))
return info
except Exception as err:
print err
return None
def process_data_frame(df):
dt_list = []
for idx, row in df.iterrows():
args = map(int, list(row[0:6]))
dt = datetime.datetime(*args)
dt_list.append(dt)
df['datetime'] = dt_list
df['unix_time'] = map(lambda x: int(time.mktime(x.timetuple())), dt_list)
return df
def truncate_time_range(df, minutes=90):
start_date = df['datetime'].min()
end_date = start_date + datetime.timedelta(minutes=minutes)
mask = (df['datetime'] > start_date) & (df['datetime'] <= end_date)
df = df.loc[mask]
return df
def data_frame_from_log(log_path):
with open(log_path, 'rb') as f:
txt_lines = f.readlines()
data_list = []
for line in txt_lines:
line_info = parse_line(line)
if line_info is not None:
data_list.append(line_info)
df = pd.DataFrame(data_list)
df = process_data_frame(df)
df = truncate_time_range(df, minutes=30)
return df
def extract_leak_rate(df, title):
x = np.array(df['unix_time'].values, dtype=np.int)
x -=x.min()
y = np.array(df['MB'].values, dtype=np.float)
fit = np.polyfit(x, y, 1)
m, c = fit # m [MB/s]
fit_fn = np.poly1d(fit)
# fit_fn is now a function which takes in x and returns an estimate for y
plt.plot(x, y, 'o', x, fit_fn(x), '--k')
plt.xlabel('Seconds')
plt.ylabel('MB leaked')
info_txt = 'Leak Rate: {0:.2f} MB/hr'.format(m*60*60) # MB/hour
plt.title('{}. {}'.format(title, info_txt))
plt.show()
return m*60*60
def extract_leak_rate_multiple(df_list, labels):
for df, label in zip(df_list, labels):
x = np.array(df['unix_time'].values, dtype=np.int)
x -= x.min()
y = np.array(df['MB'].values, dtype=np.float)
y -= y.min()
fit = np.polyfit(x, y, 1)
m, c = fit # m [MB/s]
fit_fn = np.poly1d(fit)
# fit_fn is now a function which takes in x and returns an estimate for y
info_txt = '{0:.2f} MB/hr'.format(m*60*60) # MB/hour
plt.plot(x, y, 'o', alpha=0.5, label='{}: {}'.format(label, info_txt))
plt.plot(x, fit_fn(x), '--k')
plt.xlabel('Seconds')
plt.ylabel('Growth (MB)')
plt.title('Leak Rate')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Run analysis
###Code
# Version 1 as reported https://github.com/enthought/chaco/issues/406
df_60 = data_frame_from_log('chaco_60Hz.log')
df_30 = data_frame_from_log('chaco_30Hz.log')
extract_leak_rate_multiple([df_60, df_30], ['60Hz refresh', '30Hz refresh'])
###Output
_____no_output_____
###Markdown
Run 2: refreshing with wx Timer* All plots have refresh rate is 30Hz* Blue plot is with manual axis range update* Orange plot is without manual axis range updateThe manual axis range adjustment is leaking more memory than the non-manual.
###Code
# Version 2 as reported https://github.com/enthought/chaco/issues/406
df_a = data_frame_from_log('chaco_v2_with_manual_scaling_30Hz.log')
df_b = data_frame_from_log('chaco_v2_without_manual_scaling_30Hz.log')
extract_leak_rate_multiple([df_a, df_b], ['Using Timer, 30Hz (with manual scaling)', 'Using Timer, 30Hz (with manual scaling)'])
###Output
_____no_output_____
###Markdown
Analysis Results
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [12, 8]
plt.rcParams['lines.linewidth'] = 2.5
pd.set_option("display.max_rows", 20)
pd.set_option("display.max_columns", 20)
from pathlib import Path
import pandas as pd
results_dir = Path("results")
results_paths = results_dir.glob("*.json")
results = []
for path in results_paths:
results.append(pd.read_json(path, orient='index').T)
results_df = pd.concat(results, axis=0).reset_index()
data_names = results_df["data_name"].unique()
def plot_metric_for_name(data_name, metric_name, ax=None, remove_drop=False):
if ax is None:
fig, ax = plt.subplots()
results_data_name = results_df[results_df["data_name"] == data_name]
info_first = results_data_name.iloc[0]
data_name = info_first['data_name']
results_data_name_sorted = results_data_name.sort_values(f"test_{metric_name}_mean")
null_encoders = ~results_data_name_sorted[f"test_{metric_name}_mean"].isna()
if remove_drop:
null_encoders &= (results_data_name_sorted["encoder"] != "drop")
y_values = np.arange(np.sum(null_encoders))
ax.errorbar(results_data_name_sorted.loc[null_encoders, f"test_{metric_name}_mean"],
y_values,
xerr=results_data_name_sorted.loc[null_encoders, f"test_{metric_name}_std"],
ls='', marker='o')
ax.set_yticks(y_values)
ax.set_yticklabels(results_data_name_sorted.loc[null_encoders, "encoder"])
ax.set_title(f"{data_name}: {metric_name}")
def plot_all_metrics(data_name, remove_drop=False):
results_data_name = results_df[results_df["data_name"] == data_name]
info_first = results_data_name.iloc[0]
non_null_names = info_first.notnull()
test_names = info_first.index.str.startswith("test")
score_names = info_first.index[non_null_names & test_names]
score_means_names = score_names[score_names.str.endswith("_mean")]
metric_names = [name[5:-5] for name in score_means_names]
fig, axes = plt.subplots(1, len(metric_names), figsize=(20, 6), constrained_layout=True)
for metric_name, ax in zip(metric_names, axes.flatten()):
plot_metric_for_name(data_name, metric_name, ax=ax, remove_drop=remove_drop)
data_names = ["telco", "amazon_access", "kicks", "taxi", "ames", "churn", "adult", "dresses_sales", "phishing_websites"]
for dataset in data_names:
plot_all_metrics(dataset)
plt.savefig(f"figures/{dataset}.png")
md_names = [f"![{dataset}](figures/{dataset}.png)" for dataset in data_names]
print("\n".join(md_names))
###Output
![telco](figures/telco.png)
![amazon_access](figures/amazon_access.png)
![kicks](figures/kicks.png)
![taxi](figures/taxi.png)
![ames](figures/ames.png)
![churn](figures/churn.png)
![adult](figures/adult.png)
![dresses_sales](figures/dresses_sales.png)
![phishing_websites](figures/phishing_websites.png)
###Markdown
Get metadata for datasets
###Code
from bench_utils import fetch_openml_and_clean
from benchmark import DATA_INFOS
data_info = DATA_INFOS['kicks']
def get_metadata(data_info):
X, y = fetch_openml_and_clean(data_info)
data_info.is_classification
n_cats = X.select_dtypes(include=['object', 'category']).shape[1]
n_samples, n_features = X.shape
return {'dataset_name': data_info.data_name,
'categorical feaatures': n_cats,
'n_features': n_features,
'n_samples': n_samples,
'is_classification': data_info.is_classification,
'openml_url': f'https://www.openml.org/d/{data_info.data_id}'}
all_metadata = [get_metadata(data_info) for data_info in DATA_INFOS.values()]
import pandas as pd
metadata_df = pd.DataFrame.from_records(all_metadata)
print(metadata_df.to_markdown())
###Output
| | dataset_name | categorical feaatures | n_features | n_samples | is_classification | openml_url |
|---:|:------------------|------------------------:|-------------:|------------:|:--------------------|:-------------------------------|
| 0 | kicks | 18 | 32 | 72983 | True | https://www.openml.org/d/41162 |
| 1 | amazon_access | 9 | 9 | 32769 | True | https://www.openml.org/d/4135 |
| 2 | telco | 16 | 19 | 7043 | True | https://www.openml.org/d/42178 |
| 3 | adult | 12 | 14 | 48842 | True | https://www.openml.org/d/179 |
| 4 | ames | 43 | 79 | 1460 | False | https://www.openml.org/d/42165 |
| 5 | taxi | 9 | 18 | 581835 | False | https://www.openml.org/d/42729 |
| 6 | churn | 4 | 20 | 5000 | True | https://www.openml.org/d/40701 |
| 7 | dresses_sales | 11 | 12 | 500 | True | https://www.openml.org/d/23381 |
| 8 | phishing_websites | 30 | 30 | 11055 | True | https://www.openml.org/d/4534 |
###Markdown
Key functions
###Code
def gen_seed(x_k, x_l):
str_repr = ''.join(x_k.astype(str)) + ''.join(x_l.astype(str))
return int(hashlib.sha256(str_repr.encode('utf-8')).hexdigest(), 16) % 10**9
def braid(x_k, x_l, q):
np.random.seed(gen_seed(x_k, x_l))
mask = np.random.rand(len(x_k)) < 1/(q+1)
u = x_k.copy()
u[mask] = x_l[mask]
return u
def hamming(x_k, x_l):
return np.mean(x_k != x_l)
###Output
_____no_output_____
###Markdown
Setup
###Code
np.random.seed(0)
x_star = np.random.randint(0, Z, N)
xs = [np.random.randint(0, Z, N) for i in range(M)]
def encode(s):
y = x_star.copy()
for t, a in enumerate(s):
idx = A.index(a)
y = braid(y, xs[idx], t+1)
return y
def decode(y, l, return_d=False, force_decode=None):
# get *set* of elements in sequence
min_idxs = np.argsort([hamming(x, y) for x in xs])[:l]
vs = [xs[idx] for idx in min_idxs]
# reconstruct sequence
y_star = x_star.copy()
s_hat = ''
d = [hamming(y_star, y)]
for t in range(1, l+1):
us = [braid(y_star, v, t) for v in vs]
j = np.argmin([hamming(u, y) for u in us])
if force_decode is not None and len(force_decode) >= t:
next_sym = force_decode[t-1]
next_x = xs[A.index(next_sym)]
else:
next_sym = A[min_idxs[j]]
next_x = vs[j]
s_hat += next_sym
y_star = braid(y_star, next_x, t)
d.append(hamming(y_star, y))
if not all(y_star == y):
print('Default reconstruction failed.')
if not return_d:
return s_hat
else:
return s_hat, np.array(d)
###Output
_____no_output_____
###Markdown
Demo
###Code
print('Hamming similarities between symbols:')
symbol_dists = []
for k in range(M-1):
for l in range(k+1, M):
symbol_dists.append(1 - hamming(xs[k], xs[l]))
print('Min = ', np.min(symbol_dists))
print('Max = ', np.max(symbol_dists))
print('Mean = ', np.mean(symbol_dists))
print('Std = ', np.std(symbol_dists))
s = 'random vectors for the win'
y = encode(s)
print(y)
s_star = decode(y, len(s))
print(s_star)
test_seqs = [
'abcde',
'abced',
'aabcd',
'zyxwv',
'zyxvw',
'aaazz',
'abbbaaab',
'hi agostina'
]
for s in test_seqs:
s_hat = decode(encode(s), len(s))
print(s, '--> y --> ', s_hat, '(', s == s_hat, ')')
s = 'hyperdimensional computing via crossover'
y = encode(s)
s_hat, d = decode(y, len(s), return_d=True)
t = np.arange(len(s)+1)
fig, ax = plt.subplots(1, 1, figsize=(10, 5), tight_layout=True)
ax.plot(t, d, lw=2, c='k')
ax.plot(t, 1 - t/(len(t)-1), c='gray', ls='--')
ax.set_xlim(-1, len(t))
ax.set_ylim(-.05, 1.05)
ax.set_xticks(t)
ax.set_xticklabels('*' + s_hat)
ax.grid()
set_font_size(ax, 16)
s = 'hyperdimensional computing via crossover'
y = encode(s)
s_hat, d = decode(y, len(s), return_d=True, force_decode='h')
t = np.arange(len(s)+1)
fig, ax = plt.subplots(1, 1, figsize=(10, 5), tight_layout=True)
ax.plot(t, d, lw=2, c='k')
ax.plot(t, 1 - t/(len(t)-1), c='gray', ls='--')
ax.set_xlim(-1, len(t))
ax.set_ylim(-.05, 1.05)
ax.set_xticks(t)
ax.set_xticklabels('*' + s_hat)
ax.grid()
set_font_size(ax, 16)
s = 'aaaaaaabbbbbbbccccccc'
y = encode(s)
s_hat, d = decode(y, len(s), return_d=True, force_decode='aaaaaaabbbbbbbccccccc')
t = np.arange(len(s)+1)
fig, ax = plt.subplots(1, 1, figsize=(10, 5), tight_layout=True)
ax.plot(t, d, lw=2, c='k')
ax.plot(t, 1 - t/(len(t)-1), c='gray', ls='--')
ax.set_xlim(-1, len(t))
ax.set_ylim(-.05, 1.05)
ax.set_xticks(t)
ax.set_xticklabels('*' + s_hat)
ax.grid()
set_font_size(ax, 16)
###Output
_____no_output_____
###Markdown
Gitcoin Grants Round 3 CLR Analysis[![Gitcoin Grants](http://img.youtube.com/vi/eVgEWSPFR2o/0.jpg)](https://youtu.be/eVgEWSPFR2o) video from: https://gitcoin.co/grants/ Before StartThis research report is built for understanding the patterns and issues in [Gitcoin Grants](https://gitcoin.co/grants/), especially for Gitcoin Grants Round 3 CLR. For more details about the background, check out this Gitcoin issue to learn more: https://gitcoin.co/issue/gitcoinco/data-ops/40/3530In this report, we're intersted in the effectiveness about the funding process and results. We'll first analyze the patterns of the grants and contributions, and then verify whether there're collusions in the contributions. (Gitcoin Grants Round 3 CLR makes use of Pairwise Bonding ( https://ethresear.ch/t/pairwise-coordination-subsidies-a-new-quadratic-funding-design/5553 ) to prevent collusion)There're several key questions we'd like to study in this research, as mentioned in https://github.com/gitcoinco/data-ops/issues/40:- Does the community have a bias towards certain types of projects?- Does the community have a bias towards project leads with a large email/twitter list? Vs. actual importance of project.- Is there on-chain collusion?- Is there off-chain collusion? Data SourcesBelow datasets are avialble for us to investigate:1. The anonymous dataset about Gitcoin Grants Round 3 CLR contributions: https://gist.github.com/owocki/1d6deebe478bfbda3656bb243aab2610 Data PreparationBefore the analysis, we need to import the datasets:
###Code
import pandas as pd
import matplotlib
# set max row display
pd.set_option('display.max_row', 1000)
# set max column width
pd.set_option('display.max_columns', 50)
df = pd.read_json('./grants_round_3_data.json', typ='series')
grants = pd.DataFrame(df['grants'])
# add number of contributions
grants["num_of_contributions"] = grants.contributions.str.len()
# check the data is OK
grants.head()
###Output
_____no_output_____
###Markdown
Cool. We have already got the **Grants** dataset imported. Then let's start by looking at the basic facts of the dataset. We have **102** grants in total in the dataset (while 92 in the [Grants page](https://gitcoin.co/grants/)), and **61** grants got funds in Round 3, as shown below. The top 10 grants in Round 3 almost occupy **80%** of the sum of the contributed funds, and then the remaining **51** grants will receive **20%** of the total funds in this round. The fund distribution is quite skewed, which might be a reasonable distribution to support the really important projects for the blockchain ecosystem.
###Code
grants.shape[0] # the total number of grants in the dataset
len(grants[grants['estimated_round_3_clr_match_usd'] > 0]) # the number of grants that received funds in round 3
grants['estimated_round_3_clr_match_usd'].sum() # sum amount of round 3
sorted_grants = grants.sort_values("estimated_round_3_clr_match_usd", ascending=False)
sorted_grants[:10]
sorted_grants.estimated_round_3_clr_match_usd.plot(kind='bar', title='Figure 1: Funds (CLR R3)', figsize=(13, 6))
sorted_grants.estimated_round_3_clr_match_usd.plot(kind='pie', title='Figure 2: Funds (CLR R3)', figsize=(13, 6))
###Output
_____no_output_____
###Markdown
Now we have got the dataset ready, and let's move on to explore and answer the questions that we're curious about. Topic 1: Is there a bias towards certain types of projects?To answer the first question, we'll analyze the distribution of fund v.s. the types of projects. The project types are analyzed with the below fields:1. tags1. keywords1. history 1. TagsIn the below charts, we calculaed the distribution of funds by tags. There're **10** unique tags in all the grants. As show in figure 3 and figure 4, in Gitcoin Grants Round 3 CLR, **All**, **UI/UX** and **Wallet** almost occupied 70% of all the funds, followed by **ETH 2.0**, **Security**, **Community** and **DeFi**, which occupied almost the remaining 30%. As a comparison to previous rounds, **Wallet**, **ETH 2.0** and **DeFi** have drawn more attention from the fund contributors, and the portion of **Community** has decreased. The correlation of funds by tag are visible when comparing the round 3 and total funds as shown in figure 5 and figure 6.
###Code
# get the tag list
tags_list = grants.tags.tolist()
tags = set([tag for grant_tags in tags_list for tag in grant_tags])
tags
# calculate the fund distribution per tags
funds_r3 = [grants[grants.tags.apply(lambda x: t in x)]['estimated_round_3_clr_match_usd'].sum() for t in tags]
funds_total = [grants[grants.tags.apply(lambda x: t in x)]['total_amount_received_usd_life'].sum() for t in tags]
plot_df = pd.DataFrame({'funds_r3': funds_r3, 'funds_total': funds_total}, index = list(tags))
plot_df = plot_df.sort_values("funds_r3", ascending=False) # ranked by r3 funds
plot_df.plot(kind='bar', title='Figure 3: Tags and Funds (CLR R3)', figsize=(13, 6))
plot_df.plot(subplots=True, kind='pie', title='Figure 4: Tags and Funds (CLR R3)', figsize=(13, 6))
plot_df.plot(x="funds_total", y="funds_r3", s=100, kind='scatter', title='Figure 5: Tags and Funds (CLR R3)', figsize=(6, 6))
plot_df['r3_total_ratio'] = plot_df['funds_r3'] / plot_df['funds_total']
plot_df.plot(kind='bar', y="r3_total_ratio", title='Figure 6: Tags and Funds (CLR R3), Round 3 / Total Ratio', figsize=(13, 6))
###Output
_____no_output_____
###Markdown
2. KeywordsBesides following the tags defined by the Gitcoin community, next we try to extract keywords from the Grants **titles**, and analyze the distribution of funds by keywords. We can find that the top 20 keywords are quite different generally if we rank the grants by Round 3 CLR or the total funds. The common top keywords are **ethereum**, **development**, **research**, **austin**, and **griffith**, mainly coming from the Grant "[Austin Griffith Ethereum Research and Development](https://gitcoin.co/grants/122/austin-griffith-ethereum-rampd)". Besides, **ehtereum** is among the top keywords that attracts a large number of funds. **rdai** grows the most as a keyword when looking at the round 3 / total ratio.The keywords shift can also be found by comparing the Grants in Round 3 and previous total funds.
###Code
# get the keyword list
grants['keywords'] = grants.title.str.lower().str.replace('\W', ' ').str.split(' ')
keywords = set([k for grant_keywords in grants['keywords'] for k in grant_keywords if len(k) > 1])
exclude_keywords = ["and", "by", "to", "the", "an", "of", "on", "be", "for"]
for k in exclude_keywords:
if k in keywords:
keywords.remove(k)
keywords
# calculate the fund distribution per keyword
funds_r3 = [grants[grants.keywords.apply(lambda x: t in x)]['estimated_round_3_clr_match_usd'].sum() for t in keywords]
funds_total = [grants[grants.keywords.apply(lambda x: t in x)]['total_amount_received_usd_life'].sum() for t in keywords]
funds_df = pd.DataFrame({'funds_r3': funds_r3, 'funds_total': funds_total}, index = list(keywords))
# top 20 keywords for previous rounds
previous_keywords = funds_df.sort_values("funds_total", ascending=False)[:20] # ranked by total funds
previous_keywords
# top 20 keywords for round 3
r3_keywords = funds_df.sort_values("funds_r3", ascending=False)[:20] # ranked by r3 funds
r3_keywords
# common keywords in top 30 keywords from r3 and total funds
shared_keywords = list(set(previous_keywords.index).intersection(r3_keywords.index))
funds_df[funds_df.index.isin(shared_keywords)].sort_values("funds_r3", ascending=False)
# draw the charts for keywords and funds
plot_df = funds_df.sort_values("funds_r3", ascending=False)[:20]
plot_df.plot(kind='bar', title='Figure 7: Keywords and Funds (CLR R3)', figsize=(13, 6))
plot_df.plot(y="funds_r3", kind='pie', title='Figure 8: Keywords and Funds (CLR R3)', figsize=(13, 6))
plot_df2 = funds_df.sort_values("funds_total", ascending=False)[:20]
plot_df2.plot(y="funds_total", kind='pie', title='Figure 9: Keywords and Funds (Total)', figsize=(13, 6))
funds_df.plot(x="funds_total", y="funds_r3", s=50, kind='scatter', title='Figure 9: Keywords and Funds (CLR R3)', figsize=(6, 6))
plot_df['r3_total_ratio'] = plot_df['funds_r3'] / plot_df['funds_total']
plot_df.plot(kind='bar', y="r3_total_ratio", title='Figure 10: Keywords and Funds (CLR R3), Round 3 / Total Ratio', figsize=(13, 6))
###Output
_____no_output_____
###Markdown
3. HistoryThe result of Grant contribution in past rounds may impact the result in Round 3. In this section, we show the correlation between Round 3 funds and the total funds of previous rounds. The correlation is strong in top, but not apparent if considering all the cases. - The 9 out of the top 10 grants in Round 3 CLR have received more than 5000 USD Grants before this round. - Quite a few grants that received many grants in total have received quite few grants in Round 3. - For the grants that received the least funds this round, much less correlation can be found.
###Code
sorted_grants.plot(x="total_amount_received_usd_life", y="estimated_round_3_clr_match_usd", s=30, kind='scatter', title='Figure 11: Rounds Correlation -- All Grants', figsize=(6, 6))
sorted_grants[:10].plot(x="total_amount_received_usd_life", y="estimated_round_3_clr_match_usd", s=sorted_grants["num_of_contributions"], kind='scatter', title='Figure 12: Rounds Correlation -- Top 20 Grants', figsize=(6, 6))
sorted_grants[-60:].plot(x="total_amount_received_usd_life", y="estimated_round_3_clr_match_usd", s=30, kind='scatter', title='Figure 13: Rounds Correlation -- Top 20 Grants', figsize=(6, 6))
sorted_grants['r3_total_ratio'] = sorted_grants['estimated_round_3_clr_match_usd'] / sorted_grants['total_amount_received_usd_life']
sorted_grants.plot(kind='bar', y="r3_total_ratio", title='Figure 14: Rounds Correlation -- Round 3 / Total Ratio', figsize=(13, 6))
###Output
_____no_output_____
###Markdown
Topic 2: Is there on-chain collusion?Gitcoin Grants Round 3 CLR makes use of Pairwise Bonding ( https://ethresear.ch/t/pairwise-coordination-subsidies-a-new-quadratic-funding-design/5553 ) to prevent collusion. To verify the results, we're curious to detect whether there're collusion with the latest model applied. First, let's explore the contribution data.There're **2216** contributions, **514** unique contributors and **589** unique IP addresses in total.We could find that the **top 12** contributions by USD value has already coverd **60%** of the total fund contribution amount.To investigate the collusion, there're a few strategies we could try:1. investigate the shared IP addressed by profiles;1. investigate the pairwise coordination for all the projects
###Code
# add grants url and title in contributions
for index, row in grants.iterrows():
contrib = row['contributions']
for c in contrib:
c['grant_title'] = row['title']
c['grant_url'] = row['url']
# get the contribution list
contributions_list = grants.contributions.tolist()
contributions = pd.DataFrame([c for grant_contributions in contributions_list for c in grant_contributions])
contributions.shape[0]
len(contributions.profile.unique()) # number of profile
len(contributions.ip_address.unique()) # number of IP addresses
contributions.value_usd.sum() # total amount of round 3
sorted_contributions = contributions.sort_values("value_usd", ascending=False) # sort by usd value
sorted_contributions[:10]
sorted_contributions.describe() # profile
sorted_contributions[:100].value_usd.plot(kind='bar', title='Figure 14: Contributions by USD Value', figsize=(13, 6))
sorted_contributions.value_usd.plot(kind='pie', title='Figure 15: Contributions by USD Value', figsize=(13, 6))
###Output
_____no_output_____
###Markdown
1. Shared IP Addresses by ProfilesTo investigate the collusion, we start by find the suspicious that might be owned by the same real person or a group of people. The "IP address" field in the Grants dataset is the useful info for such detection. As shown by the analysis below, **14** suspecious IP addresses are used by **34** users to make **34** contributions. This brings us a question: is is possible there're 14 people/entities that used the 34 accounts to make exact one contribution by every account? If the answer is yes, why did he/she/they needs to do that? We need to look at details to understand more about the scenarios. By linking the IP addresses with grants, we could find **14** groups of grants that are contributed by the same IPs but from different profiles. Further analysis will be needed to understand whether these related grants have valid collusions or not.
###Code
# show the string in table completely
# pd.set_option('display.max_colwidth', -1)
# find shared ip addresses used by multiple profiles
shared_ip_addresses = contributions.groupby('ip_address').agg(
profiles=pd.NamedAgg(column="profile", aggfunc=set),
used_by_number_of_profiles=pd.NamedAgg(column="profile", aggfunc="nunique")).reset_index()
shared_ip_addresses = shared_ip_addresses.sort_values("used_by_number_of_profiles", ascending=False)
reused_ips = shared_ip_addresses[shared_ip_addresses['used_by_number_of_profiles'] > 1]
reused_ips
# list the contributions that belongs to the IP addresses
reused_ips_list = reused_ips.ip_address.tolist()
suspecious_contributions = contributions[contributions['ip_address'].isin(reused_ips_list)]
len(suspecious_contributions) # suspecious_contributions.sort_values("ip_address")
# list the profiles/users that made the suspecious contributions
suspecious_user_list = suspecious_contributions.profile.tolist()
suspecious_profiles_contributions = contributions[contributions.profile.isin(suspecious_user_list)]
suspecious_profiles = suspecious_profiles_contributions.groupby('profile').agg(
grants=pd.NamedAgg(column="grant_url", aggfunc=list),
grants_count=pd.NamedAgg(column="grant_url", aggfunc="count"),
value_sum_usd=pd.NamedAgg(column="value_usd", aggfunc=sum),
ip_addr_count=pd.NamedAgg(column="ip_address", aggfunc="nunique")).reset_index()
suspecious_profiles.shape[0] # number of profiles/users that made the suspecious contributions
suspecious_profiles.sort_values("grants_count", ascending=False)[:20]
# list the suspecious grants from the same IP address
urls = suspecious_contributions.grant_url.tolist()
suspecious_grants = grants[grants['url'].isin(urls)]
len(suspecious_grants)
# group the grants by ip addresses
grants_from_same_IPs = suspecious_contributions.groupby('ip_address')['grant_title'].transform(lambda x: ' | '.join(set(x)))
suspecious_contributions['related_grants'] = grants_from_same_IPs
related_grants = suspecious_contributions[['ip_address', 'related_grants']].drop_duplicates()
related_grants
len(related_grants) # number of group of related grants
###Output
_____no_output_____
###Markdown
2. Pairwise CoordinationIn this section, we'll analyze the paired contributors that appear in the grants, and understand how they interact in the Gitcoin Grants system, and potentially offline relationship. We first analyze the contributions made by each profile/user, and then find the users have the most number of shared grants in their contributions. As analyzed, **102** pairs of contributors have more than 10 shared grants in Gitcoin Grants Round 3 CLR. To look into more detials, we find that the top 1 pair profiles (`5b35dfc38e8523fe86422a9a12524ae02bc8d40448a4a1db96af800b` and `775fec778ed2672f511d864e139552a3690de36a93de3a8733773678`), shared **34 grants** in their contribution (more than 1/3 of the total number of grants). The two users are ranked top 2 by the number of grants they contributed (73 and 53 respectively). The interesting part is that `5b35dfc38e8523fe86422a9a12524ae02bc8d40448a4a1db96af800b` has granted 25K+ USD in total, while `775fec778ed2672f511d864e139552a3690de36a93de3a8733773678` has granted 5 USD in total for 53 projects, with 0.0943 USD for each contribution evenly, within 4 or 5 hours. We need extra info about this user to understand why he/she/it behaves like this.
###Code
# find shared ip addresses used by multiple profiles
profiles = contributions.groupby('profile').agg(
grants=pd.NamedAgg(column="grant_url", aggfunc=list),
grants_count=pd.NamedAgg(column="grant_url", aggfunc="count"),
value_sum_usd=pd.NamedAgg(column="value_usd", aggfunc=sum)).reset_index()
profiles.shape[0]
profiles.sort_values("grants_count", ascending=False)[:20] # users ranked by grants count
paired_contributors_list = []
for index1, row1 in profiles.iterrows():
for index2, row2 in profiles.iterrows():
if index2 > index1: # avoid duplicate
shared_grants = list(set(row1['grants']).intersection(row2['grants']))
if len(shared_grants) > 0:
paired_contributors_list.append({
"profile1": row1['profile'],
"profile2": row2['profile'],
"shared_grants": shared_grants,
"shared_grants_count": len(shared_grants)
})
paired_contributors = pd.DataFrame(paired_contributors_list)
paired_contributors = paired_contributors.sort_values("shared_grants_count", ascending=False)
paired_contributors.shape[0] # number of paired contributors
paired_contributors.head() # top contributor pairs, ranked by shared_grants_count
paired_contributors_sample = pd.Series(paired_contributors.shared_grants_count.tolist()[::40])
paired_contributors_sample.plot(kind="bar", title="Figure 16: Pairwise Coordination Sampling, Ranked by Shared Grants Count", figsize=(13, 4)) # contributor pairs, ranked by shared_grants_count
suspecious_paired_contributors = paired_contributors[paired_contributors["shared_grants_count"] > 10]
len(suspecious_paired_contributors) # the contributor pairs that have more than 10 shared grants
###Output
_____no_output_____
###Markdown
3. Combine Shared IP Addresses and Pairwise CoordinationBy combing results of the above two kinds of analysis towards shared IP addresses and paired contributors, we can narrow down the investigation to find issues faster. By looking at the intersection of the users from the above two analysis, we found the below two profiles may worth investigation first: `ae03c652db8c8a17ea7a89c0593da5ed6c22598fa7a050210c5feb16` and `54356585c9c19db59c4fefd8d157db60bd084fd5218d10c754f46b55`.profile | grants_count | value_sum_usd | ip_addr_count:-- | -- | -- | --ae03c652db8c8a17ea7a89c0593da5ed6c22598fa7a050210c5feb16 | 19 | 5.000000 | 254356585c9c19db59c4fefd8d157db60bd084fd5218d10c754f46b55 | 15 | 212.791739 | 1 Similar to `775fec778ed2672f511d864e139552a3690de36a93de3a8733773678`, `ae03c652db8c8a17ea7a89c0593da5ed6c22598fa7a050210c5feb16` has made 5 USD contribution in total, split into 19 contributions evenly, 0.263 USD per contribution. We may request extra info from Gitcoin team to understand why this happens, if these accounts need more attention and analysis. We can also find the shared grants that are contributed by the accounts that are controlled by the same IPs from the There're **11** such IPs and **26** such accounts / profiles. For example, the IP address `2ceb75027f1a1132d6cf349ea2bcd918b0f79acdb65c4f68dbf06154` has contributed to grant `/grants/79/lodestar-eth20-client` with 4 different accounts, each with **10 USD**
###Code
# show the string in table completely
pd.set_option('display.max_colwidth', -1)
# intersections of the user list from shared IP addresses and paired contributors
shared_ip_addr_user_list = set(suspecious_profiles.profile.tolist())
paired_contributor_user_list = set(suspecious_paired_contributors.profile1.tolist() + suspecious_paired_contributors.profile2.tolist())
intersected_user_list = list(shared_ip_addr_user_list.intersection(paired_contributor_user_list))
common_contributions = contributions[contributions.profile.isin(intersected_user_list)]
common_profiles = common_contributions.groupby('profile').agg(
grants=pd.NamedAgg(column="grant_url", aggfunc=list),
grants_count=pd.NamedAgg(column="grant_url", aggfunc="count"),
value_sum_usd=pd.NamedAgg(column="value_usd", aggfunc=sum),
ip_addr_count=pd.NamedAgg(column="ip_address", aggfunc="nunique")).reset_index()
common_profiles.sort_values("grants_count", ascending=False)
# the grants contributed by the two accounts
paired_contributors[((paired_contributors['profile1'] == "54356585c9c19db59c4fefd8d157db60bd084fd5218d10c754f46b55") & (paired_contributors['profile2'] == "24f5374e3bd4898c21084490c34800d86d65fda1d139f7ef91f275bb")) | ((paired_contributors['profile2'] == "54356585c9c19db59c4fefd8d157db60bd084fd5218d10c754f46b55") & (paired_contributors['profile1'] == "24f5374e3bd4898c21084490c34800d86d65fda1d139f7ef91f275bb"))]
# verify the collusion of the reused IPs
reused_ips
collusion_found_list = []
for index, row in reused_ips.iterrows():
relevant_profiles = list(row["profiles"])
for i in range(0, len(relevant_profiles)):
for j in range(i+1, len(relevant_profiles)):
p1 = relevant_profiles[i]
p2 = relevant_profiles[j]
if p1 != p2:
grants1 = suspecious_profiles[suspecious_profiles['profile'] == p1]['grants'].iloc[0]
grants2 = suspecious_profiles[suspecious_profiles['profile'] == p2]['grants'].iloc[0]
shared_grants = list(set(grants1).intersection(grants2))
if len(shared_grants) > 0:
collusion_found_list.append({
"ip_address": row["ip_address"],
"shared_grants": shared_grants,
"profile1": p1,
"profile2": p2,
"shared_grants_count": len(shared_grants)
})
collusion_found = pd.DataFrame(collusion_found_list)
# collusion_found = collusion_found.sort_values("shared_grants_count", ascending=False)
collusion_found # number of collusions found
collusion_found["ip_address"].nunique() # count the unique IPs
len(set(collusion_found["profile1"].unique().tolist() + collusion_found["profile2"].unique().tolist())) # count the profiles
###Output
_____no_output_____
###Markdown
ECM-MPT Data Analysis The following notebook will go through prediction analysis for the Extracellular-Matrix Multiple Particle Tracking (ECM-MPT) study of pup age in P14, P21, P28, and P35 datasets. Table of Contents [1. Load Data](1.-load-data) [2. Analysis](2.-analysis) [3. Modelling](modelling) [4. Evaluate Results](evaluate-results) --- 1. Load Data Loading feature dataset from AWS NanceLab Bucket: p14, p21, p28, data are present on mckenna.data/08_06_19_MPT_age_dependence while p35 data is present on mckenna.data/07_16_19_MPT_ECM_breakdown. This bucket is only available through access with Nance lab.There are 15 total videos from each age group. Names of each dataset downloaded are present on dwnld_list.txt.
###Code
# libraries used
import boto3
import diff_classifier.aws as aws
import pandas as pd
import seaborn as sn
import numpy as np
import matplotlib.pyplot as pl
import os
from matplotlib import colors as plt_colors
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import xgboost as xgb
from xgboost import cv
import shap
dwnld_list = []
source_bucket = 'nancelab.publicfiles'
source_folder = 'ECM_MPT_Files'
keyword = ['40nm', 'NT_brain_2']
s3 = boto3.resource('s3')
bucket = s3.Bucket(source_bucket)
for object in bucket.objects.all():
folder, filename = ('/'.join(object.key.split("/")
[:-1]), object.key.split("/")[-1])
# only look in remote_folder and if any keyword(s) math filename
if folder in source_folder and any(k in filename for k in ([keyword]*isinstance(keyword, str) or keyword)):
dwnld_list.append(s3.Object(object.bucket_name, object.key))
dwnld_list = [filename.key for filename in dwnld_list if 'features' in filename.key]
dwnld_list
cnt = 0
for dwnld_file in dwnld_list:
folder, filename = (dwnld_file.split("/")[0], dwnld_file.split("/")[-1])
try:
aws.download_s3(dwnld_file, filename, bucket_name=source_bucket)
fstats = pd.read_csv(filename, encoding = "ISO-8859-1", index_col='Unnamed: 0')
print('{} size: {}'.format(filename, fstats.shape))
if 'P14' in filename:
fstats['age'] = pd.Series(fstats.shape[0]*[14], index=fstats.index)
elif 'P21' in filename:
fstats['age'] = pd.Series(fstats.shape[0]*[21], index=fstats.index)
elif 'P28' in filename:
fstats['age'] = pd.Series(fstats.shape[0]*[28], index=fstats.index)
elif 'NT_brain_2' in filename:
fstats['age'] = pd.Series(fstats.shape[0]*[35], index=fstats.index)
else:
print('Error, no target')
fstats['Video Number'] = pd.Series(fstats.shape[0]*[cnt], index=fstats.index)
cnt += 1
if cnt == 1:
fstats_tot = fstats
else:
fstats_tot = fstats_tot.append(fstats, ignore_index=True)
except:
print('Skipped!: {}'.format(filename))
os.remove(f'./{filename}')
###Output
features_NT_brain_2_slice_1_vid_1.csv size: (416, 91)
features_NT_brain_2_slice_1_vid_2.csv size: (833, 91)
features_NT_brain_2_slice_1_vid_3.csv size: (1017, 91)
features_NT_brain_2_slice_1_vid_4.csv size: (878, 91)
features_NT_brain_2_slice_1_vid_5.csv size: (467, 91)
features_NT_brain_2_slice_2_vid_1.csv size: (2488, 91)
features_NT_brain_2_slice_2_vid_2.csv size: (2322, 91)
features_NT_brain_2_slice_2_vid_3.csv size: (1735, 91)
features_NT_brain_2_slice_2_vid_4.csv size: (1650, 91)
features_NT_brain_2_slice_2_vid_5.csv size: (2100, 91)
features_NT_brain_2_slice_3_vid_1.csv size: (562, 91)
features_NT_brain_2_slice_3_vid_2.csv size: (853, 91)
features_NT_brain_2_slice_3_vid_3.csv size: (817, 91)
features_NT_brain_2_slice_3_vid_4.csv size: (598, 91)
features_NT_brain_2_slice_3_vid_5.csv size: (1062, 91)
features_P14_40nm_s1_v1.csv size: (793, 91)
features_P14_40nm_s1_v2.csv size: (1356, 91)
features_P14_40nm_s1_v3.csv size: (519, 91)
features_P14_40nm_s1_v4.csv size: (140, 91)
features_P14_40nm_s1_v5.csv size: (268, 91)
features_P14_40nm_s2_v1.csv size: (568, 91)
features_P14_40nm_s2_v2.csv size: (938, 91)
features_P14_40nm_s2_v3.csv size: (220, 91)
features_P14_40nm_s2_v4.csv size: (162, 91)
features_P14_40nm_s2_v5.csv size: (258, 91)
features_P14_40nm_s3_v1.csv size: (151, 91)
features_P14_40nm_s3_v2.csv size: (243, 91)
features_P14_40nm_s3_v3.csv size: (323, 91)
features_P14_40nm_s3_v4.csv size: (113, 91)
features_P14_40nm_s3_v5.csv size: (389, 91)
features_P21_40nm_s1_v1.csv size: (807, 91)
features_P21_40nm_s1_v2.csv size: (2481, 91)
features_P21_40nm_s1_v3.csv size: (1330, 91)
features_P21_40nm_s1_v4.csv size: (1294, 91)
features_P21_40nm_s1_v5.csv size: (2540, 91)
features_P21_40nm_s2_v1.csv size: (2584, 91)
features_P21_40nm_s2_v2.csv size: (846, 91)
features_P21_40nm_s2_v3.csv size: (435, 91)
features_P21_40nm_s2_v4.csv size: (1506, 91)
features_P21_40nm_s2_v5.csv size: (2884, 91)
features_P21_40nm_s3_v1.csv size: (1086, 91)
features_P21_40nm_s3_v2.csv size: (679, 91)
features_P21_40nm_s3_v3.csv size: (456, 91)
features_P21_40nm_s3_v4.csv size: (1417, 91)
features_P21_40nm_s3_v5.csv size: (915, 91)
features_P28_40nm_s1_v1.csv size: (679, 91)
features_P28_40nm_s1_v2.csv size: (480, 91)
features_P28_40nm_s1_v3.csv size: (195, 91)
features_P28_40nm_s1_v4.csv size: (699, 91)
features_P28_40nm_s1_v5.csv size: (457, 91)
features_P28_40nm_s2_v1.csv size: (500, 91)
features_P28_40nm_s2_v2.csv size: (610, 91)
features_P28_40nm_s2_v3.csv size: (494, 91)
features_P28_40nm_s2_v4.csv size: (703, 91)
features_P28_40nm_s2_v5.csv size: (372, 91)
features_P28_40nm_s3_v1.csv size: (203, 91)
features_P28_40nm_s3_v2.csv size: (306, 91)
features_P28_40nm_s3_v3.csv size: (326, 91)
features_P28_40nm_s3_v4.csv size: (75, 91)
features_P28_40nm_s3_v5.csv size: (195, 91)
###Markdown
2. Analysis The following columns are present within the downloaded datasets:
###Code
fstats_tot.columns
###Output
_____no_output_____
###Markdown
Many of these features are not useful for prediction or have data which may negatively impact classification. The following features and the target feature are defined in the following cell. We also remove any datapoints that are empty or infinite:
###Code
fstats_tot
features = [
'alpha', # Fitted anomalous diffusion alpha exponenet
'D_fit', # Fitted anomalous diffusion coefficient
'kurtosis', # Kurtosis of track
'asymmetry1', # Asymmetry of trajecory (0 for circular symmetric, 1 for linear)
'asymmetry2', # Ratio of the smaller to larger principal radius of gyration
'asymmetry3', # An asymmetric feature that accnts for non-cylindrically symmetric pt distributions
'AR', # Aspect ratio of long and short side of trajectory's minimum bounding rectangle
'elongation', # Est. of amount of extension of trajectory from centroid
'boundedness', # How much a particle with Deff is restricted by a circular confinement of radius r
'fractal_dim', # Measure of how complicated a self similar figure is
'trappedness', # Probability that a particle with Deff is trapped in a region
'efficiency', # Ratio of squared net displacement to the sum of squared step lengths
'straightness', # Ratio of net displacement to the sum of squared step lengths
'MSD_ratio', # MSD ratio of the track
'frames', # Number of frames the track spans
'Deff1', # Effective diffusion coefficient at 0.33 s
'Deff2', # Effective diffusion coefficient at 3.3 s
'angle_mean', # Mean turning angle which is counterclockwise angle from one frame point to another
'angle_mag_mean', # Magnitude of the turning angle mean
'angle_var', # Variance of the turning angle
'dist_tot', # Total distance of the trajectory
'dist_net', # Net distance from first point to last point
'progression', # Ratio of the net distance traveled and the total distance
'Mean alpha',
'Mean D_fit',
'Mean kurtosis',
'Mean asymmetry1',
'Mean asymmetry2',
'Mean asymmetry3',
'Mean AR',
'Mean elongation',
'Mean boundedness',
'Mean fractal_dim',
'Mean trappedness',
'Mean efficiency',
'Mean straightness',
'Mean MSD_ratio',
'Mean Deff1',
'Mean Deff2',
]
target = 'age' # prediction target (y)
ecm = fstats_tot
ecm = ecm[~ecm.isin([np.nan, np.inf, -np.inf]).any(1)] # Removing nan and inf data points
# Showing a piece of our data:
ecm.head()
###Output
_____no_output_____
###Markdown
Before prediction, it is required to balance data. As shown, The current dataset is highly imbalance with most datapoints belonging to P21 and P35 categories. The dataset is reduced using random sampling of each target category.
###Code
ecm_14 = ecm[ecm[target] == 14]
ecm_21 = ecm[ecm[target] == 21]
ecm_28 = ecm[ecm[target] == 28]
ecm_35 = ecm[ecm[target] == 35]
print(f"Ratio before data balance (P14:P21:P28:P35) = {len(ecm_14)}:{len(ecm_21)}:{len(ecm_28)}:{len(ecm_35)}")
ecm_list = [ecm_14, ecm_21, ecm_28, ecm_35]
for i in range(len(ecm_list)):
ratio = 6000/len(ecm_list[i])
ecm_list[i] = ecm_list[i].sample(frac=ratio, random_state=1)
print(f"Ratio before after balance (P14:P21:P28:P35) = {len(ecm_list[0])}:{len(ecm_list[1])}:{len(ecm_list[2])}:{len(ecm_list[3])}")
bal_ecm = pd.concat(ecm_list)
###Output
Ratio before data balance (P14:P21:P28:P35) = 6416:20665:6194:17169
Ratio before after balance (P14:P21:P28:P35) = 6000:6000:6000:6000
###Markdown
3. Modelling The model used for this study is an extreme gradient boosting (XGBoost) decision tree which is a boosted decision tree. This model was used due to its past results within competitions and research. Due to the use of statistical surroundings in our feature analysis, binning is required in order to avoid data leakage between training/testing. The followingcode will implement binning and a checkerboard implementation to select certain bins for the training dataset.
###Code
# Using checkerboard binning for data split:
def checkerboard(size):
rows = int(size/2)
checks = list(range(0, size*size, size+1))
for i in range(1, rows):
ssize = size - 2*i
for j in range(0, ssize):
checks.append(2*i + (size+1)*j)
for i in range(1, rows):
ssize = size - 2*i
for j in range(0, ssize):
checks.append(size*size - 1 - (2*i + (size+1)*j))
checks.sort()
return checks
bins = list(range(0, 2048+1, 256))
bal_ecm['binx'] = pd.cut(bal_ecm.X, bins, labels=[0, 1, 2, 3, 4, 5, 6, 7])
bal_ecm['biny'] = pd.cut(bal_ecm.Y, bins, labels=[0, 1, 2, 3, 4, 5, 6, 7])
bal_ecm['bins'] = 8*bal_ecm['binx'].astype(np.int8) + bal_ecm['biny'].astype(np.int8)
bal_ecm = bal_ecm[np.isfinite(bal_ecm['bins'])]
bal_ecm['bins'] = bal_ecm['bins'].astype(int)
cols = bal_ecm.columns.tolist()
cols = cols[-3:] + cols[:-3]
bal_ecm = bal_ecm[cols]
le = preprocessing.LabelEncoder()
X_train = bal_ecm[~bal_ecm.bins.isin(checkerboard(8))].reset_index()
X_test_val = bal_ecm[bal_ecm.bins.isin(checkerboard(8))].reset_index()
y_train = le.fit_transform(X_train[target])
X_val, X_test = train_test_split(X_test_val, test_size=0.5, random_state=123)
y_test = le.fit_transform(X_test[target])
y_val = le.fit_transform(X_val[target])
dtrain = xgb.DMatrix(X_train[new_feat], label=y_train)
dtest = xgb.DMatrix(X_test[new_feat], label=y_test)
dval = xgb.DMatrix(X_val[new_feat], label=y_val)
###Output
_____no_output_____
###Markdown
Model parameters are based on the best possible XGBoost parameters to minimize logloss error.
###Code
param = {'max_depth': 7,
'eta': 0.005,
'min_child_weight': 0,
'verbosity': 0,
'objective': 'multi:softprob',
'num_class': 4,
'silent': 'True',
'gamma': 5,
'subsample': 0.15,
'colsample_bytree': 0.8}
watchlist = [(dval, 'eval'), (dtrain, 'train')]
num_round = 10
bst = xgb.train(param, dtrain, num_round, watchlist)
######
label = dtest.get_label()
ypred1 = bst.predict(dtest)
# by default, we predict using all the trees
pred = [np.where(x == np.max(x))[0][0] for x in ypred1]
print("Accuracy:",metrics.accuracy_score(y_test, pred))
# bst.save_model('xgboost_model_allcategories')
results = X_test[features]
results['predicted'] = pred
results['actual'] = y_test
###Output
/root/anaconda3/envs/david/lib/python3.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
/root/anaconda3/envs/david/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
4. Evaluate Results
###Code
# print('0 == {}'.format(le.inverse_transform([0])))
# print('1 == {}'.format(le.inverse_transform([1])))
# print('2 == {}'.format(le.inverse_transform([2])))
# print('3 == {}'.format(le.inverse_transform([3])))
class_names = ['P14', 'P21', 'P28', 'P35']
class_results = classification_report(y_test, pred, digits=4, target_names = ['P14', 'P21', 'P28', 'P35'])
print(str(class_results))
confusion_matrix(y_test, pred)
pl.figure(figsize=(12,10))
cm_array = confusion_matrix(y_test, pred)
df_cm = pd.DataFrame(cm_array, index = class_names, columns = class_names)
sn.set(font_scale=1.4) # for label size
ax = sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, cmap="YlGnBu")
ax.set(xlabel='Actual', ylabel='Predicted')
pl.show()
explainer = shap.TreeExplainer(bst)
shap_values = explainer.shap_values(X_test[features])
%matplotlib inline
colors = ['#999999', '#e5bf62', '#7995e9', '#a64ca6']
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
cmap = plt_colors.ListedColormap(np.array(colors)[class_inds])
# sn.reset_orig() # Reset matplot lib to no longer use seaborn
shap.summary_plot(shap_values, X_test[features], class_names=np.array(class_names), title='Total SHAP Values', color=cmap)
pl.ioff()
%matplotlib inline
figsize = (7.5, 5)
bottom = -2.0
top = 2.0
for i in range(len(shap_values)):
fig = pl.figure(figsize=figsize)
ax = fig.gca()
ax.set_ylim(bottom, top)
shap.dependence_plot("Mean Deff1", shap_values[i], X_test[features], interaction_index = None, color=colors[i], alpha=0.5, ax=ax)
figsize = (7.5, 5)
bottom = -1.2
top = 1.2
for i in range(len(shap_values)):
fig = pl.figure(figsize=figsize)
ax = fig.gca()
ax.set_ylim(bottom, top)
shap.dependence_plot("Mean fractal_dim", shap_values[i], X_test[features], interaction_index = None, color=colors[i], alpha=0.5, ax=ax)
figsize = (7.5, 5)
bottom = -1.5
top = 1.5
for i in range(len(shap_values)):
fig = pl.figure(figsize=figsize)
ax = fig.gca()
ax.set_ylim(bottom, top)
shap.dependence_plot("Mean D_fit", shap_values[i], X_test[features], interaction_index = None, color=colors[i], alpha=0.5, ax=ax)
figsize = (7.5, 5)
bottom = -1.0
top = 1.0
for i in range(len(shap_values)):
fig = pl.figure(figsize=figsize)
ax = fig.gca()
ax.set_ylim(bottom, top)
shap.dependence_plot("Mean MSD_ratio", shap_values[i], X_test[features], interaction_index = None, color=colors[i], alpha=0.5, ax=ax)
shap.summary_plot(shap_values[0], X_test[features], max_display=5, class_names = class_names, title = 'SHAP Value for P14')
shap.summary_plot(shap_values[1], X_test[features], max_display=5, class_names = class_names, title = 'SHAP Value for P21')
shap.summary_plot(shap_values[2], X_test[features], max_display=5, class_names = class_names, title='SHAP Value for P28')
shap.summary_plot(shap_values[3], X_test[features], max_display=5, class_names=class_names, title='SHAP Value for P35')
from modules import anim_plot_changed
from importlib import reload
reload(anim_plot_changed)
_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[1], top_feat[2]])
_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[2], top_feat[3]])
_ = anim_plot_changed.rotate_3d(results, [top_feat[1], top_feat[2], top_feat[3]])
from modules import anim_plot_changed
from importlib import reload
reload(anim_plot_changed)
_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[1], top_feat[2]], anim_param={'frames':np.arange(0,720,1)}, save_param={'filename':'This_is_a_test.gif','fps':50})
from matplotlib import animation
from matplotlib.animation import PillowWriter
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(X_train[features], y_train, eval_set=[(X_val[features],y_val)], eval_metric='mlogloss')
pred2 = model.predict(X_test[features])
print("Accuracy:", metrics.accuracy_score(y_test, pred2))
print(model.feature_importances_)
np.array(model.feature_importances_ > .078)
np.array(features)[np.array(model.feature_importances_ == 0)]
# Feature search:
thresh = np.arange(0,.1,.002)
best_acc = -1
best_thresh = -1
model2 = XGBClassifier()
for t in thresh:
print(f"Using thresh = {t} ",end = '| ')
new_feat = np.array(features)[np.array(model.feature_importances_ > t)]
model2.fit(X_train[new_feat], y_train, verbose=False, eval_set=[(X_val[new_feat],y_val)], eval_metric='mlogloss')
pred3 = model2.predict(X_test[new_feat])
acc = metrics.accuracy_score(y_test, pred3)
print(f"Accuracy = {acc} ",end = '| ')
if acc > best_acc:
best_thresh = t
best_acc = acc
print(f"Best accuracy = {best_acc}, Best threshold = {best_thresh}")
print(f"Features used:\n{np.array(features)[np.array(model.feature_importances_ > best_thresh)]}")
results = model2.evals_result()
param2 = {'max_depth': 2,
'eta': 0.005,
'min_child_weight': 0,
'verbosity': 0,
'objective': 'multi:softprob',
'num_class': 4,
'silent': 'True',
'gamma': 5,
'subsample': 0.25,
'colsample_bytree': 0.3,
'colsample_bynode':.5,
'reg_alpha': 0}
from sklearn.metrics import accuracy_score
model_final = XGBClassifier(**param2)
new_feat = np.array(features)[np.array(model.feature_importances_ > t)]
eval_set = [(X_train[new_feat], y_train), (X_test[new_feat], y_test)]
model_final.fit(X_train[new_feat], y_train, verbose=False, eval_set=eval_set, eval_metric=["merror", 'mlogloss'])
y_pred_f = model_final.predict(X_test[new_feat])
accuracy = accuracy_score(y_test, y_pred_f)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
results = model_final.evals_result()
epochs = len(results['validation_0']['merror'])
x_axis = range(0, epochs)
fig, ax = pl.subplots(figsize=(12,12))
ax.plot(x_axis, results['validation_0']['mlogloss'], label='Train')
ax.plot(x_axis, results['validation_1']['mlogloss'], label='Test')
ax.legend()
pl.ylabel('Log Loss')
pl.title('XGBoost Log Loss')
pl.show()
sorted(dict_importance, key=dict_importance.get, reverse=True)[:5]
new_feat = np.array(features)[np.array(model.feature_importances_ > best_thresh)]
model2.fit(X_train[new_feat], y_train, verbose=False, eval_set=[(X_val[new_feat],y_val)], eval_metric='mlogloss')
pred3 = model2.predict(X_test[new_feat])
acc = metrics.accuracy_score(y_test, pred3)
print("Accuracy:",metrics.accuracy_score(y_test, pred3))
from IPython.display import HTML
HTML('rotation_MeanDeff1_MeanD_fit_MeanMSD_ratio.html')
###Output
_____no_output_____
###Markdown
SIMULATION CHECKING AND VISUALIZING
###Code
from autoscalingsim import simulator
import pandas as pd
starting_time = pd.Timestamp("2020-09-17T10:00:00")
simulation_step = pd.Timedelta(100, unit = 'ms')
time_to_simulate = pd.Timedelta(10, unit = 'm')
config_dir = "experiments/topologies/reactive/topo_a"#"experiments/short-experiment/reactive"
#"experiments/short-experiment/reactive-mapping"#"experiments/testazuremanual2"#"experiments/test"#
results_dir = None
simulator = simulator.Simulator(simulation_step, starting_time, time_to_simulate, 666)
simulator.add_simulation(config_dir, results_dir)
simulator.start_simulation()
from stethoscope.analytical_engine import AnalysisFramework
af = AnalysisFramework(simulation_step, 'D:/AutoscalingSim/results/test/topologies/reactive/topo_a')
af.build_figures_for_single_simulation(simulator.simulations['topo_a'], '')#af.build_figures_for_single_simulation(simulator.simulations['test'], '')#
import pandas as pd
from experimentgenerator.deployment_generator import DeploymentGenerator
DeploymentGenerator.generate("experiments/topologies/reactive/topo_a/application_model.json",
"experiments/topologies/reactive/topo_a/platform_model.json",
reqs_fraction_expected_to_serve = 0.4,
simulation_step = pd.Timedelta(100, unit = 'ms'),
load_magnitude = 15, load_batch_size = 1)
services_by_app = {
'topo_a': [
'service-7edf312e-7e71-11eb-aac0-d8cb8af1e959',
'service-7edf312f-7e71-11eb-a3f8-d8cb8af1e959',
'service-7edf3133-7e71-11eb-9bac-d8cb8af1e959',
'service-7edf3135-7e71-11eb-baf5-d8cb8af1e959',
'service-7edf5818-7e71-11eb-b9e3-d8cb8af1e959',
'service-7edf3131-7e71-11eb-9cb2-d8cb8af1e959',
'service-7edf3134-7e71-11eb-9d1a-d8cb8af1e959',
'service-7edf3130-7e71-11eb-88ac-d8cb8af1e959',
'service-7edf5819-7e71-11eb-800c-d8cb8af1e959',
'service-7edf3132-7e71-11eb-a32d-d8cb8af1e959'
],
'topo_b': [
'service-1d1bdea9-7f3b-11eb-abf0-d8cb8af1e959',
'service-1d1bdeaa-7f3b-11eb-8cbf-d8cb8af1e959',
'service-1d1bdead-7f3b-11eb-8c6e-d8cb8af1e959',
'service-1d1bdeae-7f3b-11eb-bf1c-d8cb8af1e959',
'service-1d1bdeaf-7f3b-11eb-bbf6-d8cb8af1e959',
'service-1d1bdeb0-7f3b-11eb-9a4a-d8cb8af1e959',
'service-1d1c0586-7f3b-11eb-bb38-d8cb8af1e959',
'service-1d1bdeab-7f3b-11eb-8bce-d8cb8af1e959',
'service-1d1bdeb1-7f3b-11eb-a6d0-d8cb8af1e959',
'service-1d1bdeac-7f3b-11eb-95d1-d8cb8af1e959'
],
'topo_c': [
'service-a0856b68-7f3d-11eb-a7e8-d8cb8af1e959',
'service-a0856b69-7f3d-11eb-bcd9-d8cb8af1e959',
'service-a0856b6d-7f3d-11eb-af79-d8cb8af1e959',
'service-a0856b6c-7f3d-11eb-8578-d8cb8af1e959',
'service-a0856b6b-7f3d-11eb-b68a-d8cb8af1e959',
'service-a0856b6e-7f3d-11eb-9268-d8cb8af1e959',
'service-a0856b6a-7f3d-11eb-a26d-d8cb8af1e959',
'service-a0856b6f-7f3d-11eb-b24f-d8cb8af1e959',
'service-a0856b70-7f3d-11eb-a12c-d8cb8af1e959',
'service-a0859374-7f3d-11eb-a379-d8cb8af1e959'
],
'topo_d': [
'service-dd56521f-7f5b-11eb-a1dc-d8cb8af1e959',
'service-dd565220-7f5b-11eb-b6c4-d8cb8af1e959',
'service-dd565222-7f5b-11eb-8845-d8cb8af1e959',
'service-dd565223-7f5b-11eb-be29-d8cb8af1e959',
'service-dd565224-7f5b-11eb-87c1-d8cb8af1e959',
'service-dd567913-7f5b-11eb-a90f-d8cb8af1e959',
'service-dd565221-7f5b-11eb-9319-d8cb8af1e959',
'service-dd567915-7f5b-11eb-b17a-d8cb8af1e959',
'service-dd567912-7f5b-11eb-96e9-d8cb8af1e959',
'service-dd567914-7f5b-11eb-a440-d8cb8af1e959'
]
}
import tensorflow as tf
topo_name = 'topo_d'
for service_name in services_by_app[topo_name]:
model = tf.keras.models.load_model(f'D:/AutoscalingSim/autoscaling-simulator/results_thesis/mapping-models/topologies-SMALL/reps_1_230/{topo_name}/{service_name}/eu/group1/dav_model.mdl')
print(service_name)
# Container, load, mem, cpu
for cont_cnt, cpu_util_deduct in zip(range(40, 61, 1), range(0, 21, 1)):
cpu_util = (10 - cpu_util_deduct) / 10
print(f'{cont_cnt}: {model.predict([[cont_cnt, 15, (20 - cpu_util_deduct) / 20, 0.23]])}')
###Output
service-dd56521f-7f5b-11eb-a1dc-d8cb8af1e959
40: [[44958.11]]
41: [[45966.688]]
42: [[46975.266]]
43: [[47983.84]]
44: [[48992.414]]
45: [[50000.99]]
46: [[51009.566]]
47: [[52018.15]]
48: [[53026.72]]
49: [[54035.3]]
50: [[55043.875]]
51: [[56052.453]]
52: [[57061.03]]
53: [[58069.6]]
54: [[59078.188]]
55: [[60086.76]]
56: [[61095.34]]
57: [[62103.914]]
58: [[63112.492]]
59: [[64121.074]]
60: [[65129.64]]
service-dd565220-7f5b-11eb-b6c4-d8cb8af1e959
40: [[7952.744]]
41: [[7925.601]]
42: [[7898.458]]
43: [[7871.315]]
44: [[7844.17]]
45: [[7817.028]]
46: [[7789.884]]
47: [[7762.741]]
48: [[7735.598]]
49: [[7708.4536]]
50: [[7681.311]]
51: [[7654.167]]
52: [[7627.0244]]
53: [[7599.881]]
54: [[7572.737]]
55: [[7545.5938]]
56: [[7518.4507]]
57: [[7491.3066]]
58: [[7464.1646]]
59: [[7437.02]]
60: [[7409.877]]
service-dd565222-7f5b-11eb-8845-d8cb8af1e959
40: [[36247.94]]
41: [[37040.258]]
42: [[37832.582]]
43: [[38624.902]]
44: [[39417.223]]
45: [[40209.54]]
46: [[41001.86]]
47: [[41794.18]]
48: [[42586.5]]
49: [[43378.816]]
50: [[44171.137]]
51: [[44963.457]]
52: [[45755.785]]
53: [[46548.098]]
54: [[47340.418]]
55: [[48132.74]]
56: [[48925.06]]
57: [[49717.38]]
58: [[50509.7]]
59: [[51302.023]]
60: [[52094.336]]
service-dd565223-7f5b-11eb-be29-d8cb8af1e959
40: [[49091.63]]
41: [[50202.348]]
42: [[51313.06]]
43: [[52423.78]]
44: [[53534.496]]
45: [[54645.21]]
46: [[55755.934]]
47: [[56866.656]]
48: [[57977.355]]
49: [[59088.082]]
50: [[60198.8]]
51: [[61309.516]]
52: [[62420.234]]
53: [[63530.95]]
54: [[64641.668]]
55: [[65752.38]]
56: [[66863.11]]
57: [[67973.82]]
58: [[69084.55]]
59: [[70195.25]]
60: [[71305.98]]
service-dd565224-7f5b-11eb-87c1-d8cb8af1e959
40: [[36779.914]]
41: [[37614.91]]
42: [[38449.902]]
43: [[39284.895]]
44: [[40119.887]]
45: [[40954.88]]
46: [[41789.875]]
47: [[42624.87]]
48: [[43459.855]]
49: [[44294.855]]
50: [[45129.84]]
51: [[45964.84]]
52: [[46799.836]]
53: [[47634.832]]
54: [[48469.83]]
55: [[49304.812]]
56: [[50139.81]]
57: [[50974.8]]
58: [[51809.8]]
59: [[52644.79]]
60: [[53479.78]]
service-dd567913-7f5b-11eb-a90f-d8cb8af1e959
40: [[31781.643]]
41: [[32475.477]]
42: [[33169.31]]
43: [[33863.15]]
44: [[34556.98]]
45: [[35250.816]]
46: [[35944.652]]
47: [[36638.49]]
48: [[37332.32]]
49: [[38026.156]]
50: [[38719.992]]
51: [[39413.83]]
52: [[40107.66]]
53: [[40801.5]]
54: [[41495.332]]
55: [[42189.17]]
56: [[42883.008]]
57: [[43576.84]]
58: [[44270.68]]
59: [[44964.51]]
60: [[45658.344]]
service-dd565221-7f5b-11eb-9319-d8cb8af1e959
40: [[51050.586]]
41: [[52191.22]]
42: [[53331.836]]
43: [[54472.465]]
44: [[55613.098]]
45: [[56753.73]]
46: [[57894.35]]
47: [[59034.977]]
48: [[60175.61]]
49: [[61316.24]]
50: [[62456.867]]
51: [[63597.492]]
52: [[64738.12]]
53: [[65878.75]]
54: [[67019.39]]
55: [[68160.01]]
56: [[69300.64]]
57: [[70441.266]]
58: [[71581.89]]
59: [[72722.516]]
60: [[73863.16]]
service-dd567915-7f5b-11eb-b17a-d8cb8af1e959
40: [[42563.992]]
41: [[43525.348]]
42: [[44486.707]]
43: [[45448.07]]
44: [[46409.43]]
45: [[47370.785]]
46: [[48332.145]]
47: [[49293.504]]
48: [[50254.867]]
49: [[51216.223]]
50: [[52177.59]]
51: [[53138.94]]
52: [[54100.305]]
53: [[55061.664]]
54: [[56023.023]]
55: [[56984.387]]
56: [[57945.746]]
57: [[58907.105]]
58: [[59868.465]]
59: [[60829.824]]
60: [[61791.176]]
service-dd567912-7f5b-11eb-96e9-d8cb8af1e959
40: [[39936.54]]
41: [[40785.754]]
42: [[41634.965]]
43: [[42484.18]]
44: [[43333.387]]
45: [[44182.605]]
46: [[45031.816]]
47: [[45881.02]]
48: [[46730.24]]
49: [[47579.45]]
50: [[48428.66]]
51: [[49277.87]]
52: [[50127.082]]
53: [[50976.297]]
54: [[51825.508]]
55: [[52674.723]]
56: [[53523.926]]
57: [[54373.15]]
58: [[55222.355]]
59: [[56071.566]]
60: [[56920.785]]
service-dd567914-7f5b-11eb-a440-d8cb8af1e959
40: [[55086.867]]
41: [[56240.598]]
42: [[57394.324]]
43: [[58548.055]]
44: [[59701.777]]
45: [[60855.504]]
46: [[62009.23]]
47: [[63162.96]]
48: [[64316.684]]
49: [[65470.42]]
50: [[66624.14]]
51: [[67777.875]]
52: [[68931.6]]
53: [[70085.32]]
54: [[71239.055]]
55: [[72392.78]]
56: [[73546.51]]
57: [[74700.24]]
58: [[75853.96]]
59: [[77007.69]]
60: [[78161.42]]
###Markdown
VISUALIZING TRAINING PROGRESS FOR DEEP MODELS
###Code
import pandas as pd
import os
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
def produce_training_plot(dir_with_log : str):
log = pd.read_csv(os.path.join(dir_with_log, 'training_log.csv'), sep = ';', header = None,names = ['simtime', 'service', 'metrics_group', 'region', 'divergence'])
services = log.service.unique()
fig = plt.figure(figsize = (16, 10))
ax = fig.add_subplot(1, 1, 1)
for service in services:
service_training_log = log[log.service == service].reset_index()[['divergence']]
ax.plot(service_training_log.rolling(500).mean(), label = service)
ax.axhline(0.25, 0, 1.0, color = 'k', linestyle = 'solid', lw = 2)
ax.legend(loc = 'upper right')
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_locator(ticker.MultipleLocator(500))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(100))
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
plt.xlabel('Training iterations count', fontsize = 18)
plt.ylabel('Scaled error', fontsize = 18)
plt.savefig(os.path.join(dir_with_log, 'learning.png'), dpi = 600, bbox_inches = 'tight')
dir_with_log = 'D:/AutoscalingSim/autoscaling-simulator/results_thesis/mapping-models/topologies-SMALL/reps_1_150/topo_c'
produce_training_plot(dir_with_log)
###Output
_____no_output_____
###Markdown
LOAD PATTERNS EXPERIMENTATION
###Code
# Simple load pattern prep
import pandas as pd
import numpy as np
idx = pd.date_range(start = '2020-09-17T08:00:00', end = '2020-09-17T08:59:59', freq = '1s')
sub_idx_onwards = idx[idx >= pd.Timestamp('2020-09-17T08:01:00')]
sub_idx_before = idx[idx < pd.Timestamp('2020-09-17T08:01:00')]
load = [ 3 + np.random.choice(3) if ts.minute % 2 == 0 else 1 + np.random.choice(2) for ts in sub_idx_onwards ]
load = [ 0 ] * len(sub_idx_before) + load
load_ts = pd.DataFrame(data = {'value': load}, index = idx)
load_ts.resample(pd.Timedelta(1, unit = 's')).sum().plot()
# Check different ARIMA-based load patterns
import statsmodels.api as sm
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def generate_load_ts(scale_per_resolution : float):
idx = pd.date_range(start = '2020-09-17T08:00:00', end = '2020-09-17T08:59:59', freq = '1s')
empty_dataset = np.zeros(len(idx))
mod = sm.tsa.SARIMAX(empty_dataset, order=(2, 0, 1), seasonal_order = (0, 0, 1, 120), trend='c', initialization='diffuse')
load = scale_per_resolution * (mod.simulate([0, 0.8, 0.01, 0.5, 0.01, 0.01], len(idx)) + 1.0)
load_ts = pd.DataFrame(data = {'value': load}, index = idx)
ax = load_ts.resample(pd.Timedelta(1, unit = 's')).sum().plot(figsize=(10,6))
ax.get_legend().set_visible(False)
return load_ts
# Offline forecasting models fitting
import pickle
import os
import statsmodels.api as sm
def fit_forecasting_model(load_ts : pd.DataFrame, base_path : str, filename_pattern : str, metric : str, model_name : str, app_name : str, regions : list, services : str, scale_per_resolution : float):
load_ts_r = generate_load_ts(scale_per_resolution).resample(pd.Timedelta(1, unit = 'm')).sum()
mdl = None
if model_name == 'arima':
mdl = sm.tsa.SARIMAX(load_ts_r.value, order = (2, 0, 1), seasonal_order = (0, 0, 1, 2), trend='c', initialization='diffuse').fit()
elif model_name == 'holtwinters':
init_conf = { 'trend' : None, 'damped_trend' : None, 'seasonal' : "add", "seasonal_periods": 2 }
mdl = sm.tsa.ExponentialSmoothing(load_ts_r.value, **init_conf).fit(smoothing_level = 0.7, smoothing_trend = None, smoothing_seasonal = 1.0, damping_trend = None, optimized = False)
print(f'{model_name} prediction:\n {mdl.predict(start = load_ts_r.index.max() + pd.Timedelta(1, unit = "m"), end = load_ts_r.index.max() + pd.Timedelta(20, unit = "m"))}')
model_path = base_path.format(app_name, model_name)
if not os.path.exists(model_path):
os.makedirs(model_path)
model_path = os.path.join(model_path, filename_pattern)
for service_name in services:
for region in regions:
model_fpath = model_path.format(service_name, region, metric)
pickle.dump(mdl, open( model_fpath, 'wb') )
base_path = 'D:\\AutoscalingSim\\autoscaling-simulator\\trained_models\\forecasting\\topologies-experiment\\load\\arima\\{}\\{}'
filename_pattern = '{}-{}-{}.mdl'
regions = ['eu']
metric = 'load'
model_name = 'arima'
fit_forecasting_model(load_ts, base_path, filename_pattern, metric, model_name, 'topo_a', regions, services_by_app['topo_a'], 30)
fit_forecasting_model(load_ts, base_path, filename_pattern, metric, model_name, 'topo_b', regions, services_by_app['topo_b'], 50)
fit_forecasting_model(load_ts, base_path, filename_pattern, metric, model_name, 'topo_c', regions, services_by_app['topo_c'], 30)
fit_forecasting_model(load_ts, base_path, filename_pattern, metric, model_name, 'topo_d', regions, services_by_app['topo_d'], 30)
# Other patterns generation with Camel
from camel import camel
print(camel.Camel.generate_load_pattern_based_on_recipe("camel_conf/oscillating-5min.json"))
###Output
[{"requests_count_level": 0, "percentage_of_interval": 0.2},
{"requests_count_level": 2000, "percentage_of_interval": 0.2},
{"requests_count_level": 1000, "percentage_of_interval": 0.2},
{"requests_count_level": 2000, "percentage_of_interval": 0.2},
{"requests_count_level": 1000, "percentage_of_interval": 0.2}]
###Markdown
EXPERIMENTS GENERATION BASED ON TRACES (AZURE)
###Code
from experimentgenerator.experiment_generator import ExperimentGenerator
a = ExperimentGenerator('experiments/topologies/topo_d')
a.generate_experiment('experiment_recipes/topologies/topo_d.json')
###Output
Processing vmtable.csv: iteration 1
Processing vmtable.csv: iteration 2
Processing vmtable.csv: iteration 3
Processing vmtable.csv: iteration 4
Processing vmtable.csv: iteration 5
Processing vmtable.csv: iteration 6
Processing vmtable.csv: iteration 7
Processing vmtable.csv: iteration 8
Processing vmtable.csv: iteration 9
Processing vmtable.csv: iteration 10
Processing vmtable.csv: iteration 11
Processing vmtable.csv: iteration 12
Processing vmtable.csv: iteration 13
Processing vmtable.csv: iteration 14
Processing vmtable.csv: iteration 15
Processing vmtable.csv: iteration 16
Processing vmtable.csv: iteration 17
Processing vmtable.csv: iteration 18
Processing vmtable.csv: iteration 19
Processing vmtable.csv: iteration 20
Processing vmtable.csv: iteration 21
Processing vmtable.csv: iteration 22
Processing vmtable.csv: iteration 23
Processing vmtable.csv: iteration 24
Processing vmtable.csv: iteration 25
Processing vmtable.csv: iteration 26
Processing vmtable.csv: iteration 27
Processing vm_cpu_readings-file-1-of-195.csv: iteration 1
Processing vm_cpu_readings-file-1-of-195.csv: iteration 2
Processing vm_cpu_readings-file-1-of-195.csv: iteration 3
Processing vm_cpu_readings-file-1-of-195.csv: iteration 4
Processing vm_cpu_readings-file-1-of-195.csv: iteration 5
Processing vm_cpu_readings-file-1-of-195.csv: iteration 6
Processing vm_cpu_readings-file-1-of-195.csv: iteration 7
Processing vm_cpu_readings-file-1-of-195.csv: iteration 8
Processing vm_cpu_readings-file-1-of-195.csv: iteration 9
Processing vm_cpu_readings-file-1-of-195.csv: iteration 10
Processing vm_cpu_readings-file-1-of-195.csv: iteration 11
Processing vm_cpu_readings-file-1-of-195.csv: iteration 12
Processing vm_cpu_readings-file-1-of-195.csv: iteration 13
Processing vm_cpu_readings-file-1-of-195.csv: iteration 14
Processing vm_cpu_readings-file-1-of-195.csv: iteration 15
Processing vm_cpu_readings-file-1-of-195.csv: iteration 16
Processing vm_cpu_readings-file-1-of-195.csv: iteration 17
Processing vm_cpu_readings-file-1-of-195.csv: iteration 18
Processing vm_cpu_readings-file-1-of-195.csv: iteration 19
Processing vm_cpu_readings-file-1-of-195.csv: iteration 20
Processing vm_cpu_readings-file-1-of-195.csv: iteration 21
Processing vm_cpu_readings-file-1-of-195.csv: iteration 22
Processing vm_cpu_readings-file-1-of-195.csv: iteration 23
Processing vm_cpu_readings-file-1-of-195.csv: iteration 24
Processing vm_cpu_readings-file-1-of-195.csv: iteration 25
Processing vm_cpu_readings-file-1-of-195.csv: iteration 26
Processing vm_cpu_readings-file-1-of-195.csv: iteration 27
Processing vm_cpu_readings-file-1-of-195.csv: iteration 28
Processing vm_cpu_readings-file-1-of-195.csv: iteration 29
Processing vm_cpu_readings-file-1-of-195.csv: iteration 30
Processing vm_cpu_readings-file-1-of-195.csv: iteration 31
Processing vm_cpu_readings-file-1-of-195.csv: iteration 32
Processing vm_cpu_readings-file-1-of-195.csv: iteration 33
Processing vm_cpu_readings-file-1-of-195.csv: iteration 34
Processing vm_cpu_readings-file-1-of-195.csv: iteration 35
Processing vm_cpu_readings-file-1-of-195.csv: iteration 36
Processing vm_cpu_readings-file-1-of-195.csv: iteration 37
Processing vm_cpu_readings-file-1-of-195.csv: iteration 38
Processing vm_cpu_readings-file-1-of-195.csv: iteration 39
Processing vm_cpu_readings-file-1-of-195.csv: iteration 40
Processing vm_cpu_readings-file-1-of-195.csv: iteration 41
Processing vm_cpu_readings-file-1-of-195.csv: iteration 42
Processing vm_cpu_readings-file-1-of-195.csv: iteration 43
Processing vm_cpu_readings-file-1-of-195.csv: iteration 44
Processing vm_cpu_readings-file-1-of-195.csv: iteration 45
Processing vm_cpu_readings-file-1-of-195.csv: iteration 46
Processing vm_cpu_readings-file-1-of-195.csv: iteration 47
Processing vm_cpu_readings-file-1-of-195.csv: iteration 48
Processing vm_cpu_readings-file-1-of-195.csv: iteration 49
Processing vm_cpu_readings-file-1-of-195.csv: iteration 50
Processing vm_cpu_readings-file-1-of-195.csv: iteration 51
Processing vm_cpu_readings-file-1-of-195.csv: iteration 52
Processing vm_cpu_readings-file-1-of-195.csv: iteration 53
Processing vm_cpu_readings-file-1-of-195.csv: iteration 54
Processing vm_cpu_readings-file-1-of-195.csv: iteration 55
Processing vm_cpu_readings-file-1-of-195.csv: iteration 56
Processing vm_cpu_readings-file-1-of-195.csv: iteration 57
Processing vm_cpu_readings-file-1-of-195.csv: iteration 58
Processing vm_cpu_readings-file-1-of-195.csv: iteration 59
Processing vm_cpu_readings-file-1-of-195.csv: iteration 60
Processing vm_cpu_readings-file-1-of-195.csv: iteration 61
Processing vm_cpu_readings-file-1-of-195.csv: iteration 62
Processing vm_cpu_readings-file-1-of-195.csv: iteration 63
Processing vm_cpu_readings-file-1-of-195.csv: iteration 64
Processing vm_cpu_readings-file-1-of-195.csv: iteration 65
Processing vm_cpu_readings-file-1-of-195.csv: iteration 66
Processing vm_cpu_readings-file-1-of-195.csv: iteration 67
Processing vm_cpu_readings-file-1-of-195.csv: iteration 68
Processing vm_cpu_readings-file-1-of-195.csv: iteration 69
Processing vm_cpu_readings-file-1-of-195.csv: iteration 70
Processing vm_cpu_readings-file-1-of-195.csv: iteration 71
Processing vm_cpu_readings-file-1-of-195.csv: iteration 72
Processing vm_cpu_readings-file-1-of-195.csv: iteration 73
Processing vm_cpu_readings-file-1-of-195.csv: iteration 74
Processing vm_cpu_readings-file-1-of-195.csv: iteration 75
Processing vm_cpu_readings-file-1-of-195.csv: iteration 76
Processing vm_cpu_readings-file-1-of-195.csv: iteration 77
Processing vm_cpu_readings-file-1-of-195.csv: iteration 78
Processing vm_cpu_readings-file-1-of-195.csv: iteration 79
Processing vm_cpu_readings-file-1-of-195.csv: iteration 80
Processing vm_cpu_readings-file-1-of-195.csv: iteration 81
Processing vm_cpu_readings-file-1-of-195.csv: iteration 82
Processing vm_cpu_readings-file-1-of-195.csv: iteration 83
Processing vm_cpu_readings-file-1-of-195.csv: iteration 84
Processing vm_cpu_readings-file-1-of-195.csv: iteration 85
Processing vm_cpu_readings-file-1-of-195.csv: iteration 86
Processing vm_cpu_readings-file-1-of-195.csv: iteration 87
Processing vm_cpu_readings-file-1-of-195.csv: iteration 88
Processing vm_cpu_readings-file-1-of-195.csv: iteration 89
Processing vm_cpu_readings-file-1-of-195.csv: iteration 90
Processing vm_cpu_readings-file-1-of-195.csv: iteration 91
Processing vm_cpu_readings-file-1-of-195.csv: iteration 92
Processing vm_cpu_readings-file-1-of-195.csv: iteration 93
Processing vm_cpu_readings-file-1-of-195.csv: iteration 94
Processing vm_cpu_readings-file-1-of-195.csv: iteration 95
Processing vm_cpu_readings-file-1-of-195.csv: iteration 96
Processing vm_cpu_readings-file-1-of-195.csv: iteration 97
Processing vm_cpu_readings-file-1-of-195.csv: iteration 98
Processing vm_cpu_readings-file-1-of-195.csv: iteration 99
Processing vm_cpu_readings-file-1-of-195.csv: iteration 100
###Markdown
EVALUATING ALTERNATIVE CONFIGS, E.G. REACTIVE VS PREDICTIVE
###Code
from cruncher.cruncher import Cruncher
c = Cruncher('cruncher_conf/experiment_1/')
c.run_experiment()
c.visualize('D:/@TUM/PhD/FINAL/experimentresults/data')
# Visualizing costs of deployments in the experiments on resource limits (multilayered aspect)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
labels = ['50%', '100%', '150%', '200%', '250%']
limits_cost = {
'Topo-A': [ 0.18645, 0.19105, 0.18672, 0.18743, 0.18054 ],
'Topo-B': [ 0.22352, 0.22313, 0.22799, 0.22776, 0.21604 ],
'Topo-C': [ 0.23489, 0.22202, 0.23566, 0.27626, 0.23742 ],
'Topo-D': [ 0.27127, 0.2742, 0.27424, 0.26699, 0.27476]
}
x = np.arange(len(labels)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots(figsize = (10, 5))
rects = []
pos = [(-1, 1), (-1, 0), (1, 0), (1, 1)]
i = 0
for topo_name, cost_vector in limits_cost.items():
rects.append(ax.bar(x + pos[i][0] * (pos[i][1] + 0.5) * width, cost_vector, width, label=topo_name))
i += 1
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Cost, USD')
ax.set_ylim((0, 0.33))
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
for p in ax.patches:
ax.annotate(format(p.get_height()), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center',
xytext=(0, 25),
textcoords='offset points', rotation=90)
fig.tight_layout()
plt.legend(loc = 'upper center', ncol = 4, bbox_to_anchor = (0.5, -0.1))
plt.show()
###Output
_____no_output_____
###Markdown
PERFORMANCE EVALUATION OF THE SIMULATOR
###Code
from autoscalingsim import simulator
import pandas as pd
import collections
import time
import pickle
simulation_steps_ms = [10,20,30,40,50,60,70,80,90,100]
starting_time = pd.Timestamp("2020-09-17T10:00:00")
time_to_simulate = pd.Timedelta(10, unit = 'm')
repeats = 10
results = collections.defaultdict(list)
for sim_step_raw in simulation_steps_ms:
print(f'current simulation step is {sim_step_raw} ms')
for _ in range(repeats):
start = time.time()
simulation_step = pd.Timedelta(sim_step_raw, unit = 'ms')
config_dir = "experiments/testazuremanual2"
results_dir = None
sim = simulator.Simulator(simulation_step, starting_time, time_to_simulate)
sim.add_simulation(config_dir, results_dir)
sim.start_simulation()
results[sim_step_raw].append(time.time() - start)
pickle.dump( results, open( "performance_test_results_raw.pickle", "wb" ) )
import numpy as np
from matplotlib import pyplot as plt
res = pickle.load( open( "performance_test_results_raw.pickle", "rb" ) )
results_means = [np.mean(times_per_simstep) / (10 * 60) for times_per_simstep in res.values()]
results_stds = [np.std(times_per_simstep) / (10 * 60) for times_per_simstep in res.values()]
steps = np.arange(10, 101, 10)
p1 = plt.bar(steps, results_means, 7, yerr=results_stds)
plt.ylabel('Wall clock time per 1 simulated second, s')
plt.xlabel('Simulation step, ms')
plt.xticks(steps)
plt.yticks(np.arange(0.0, 2.0, 0.1))
plt.hlines(0.5, xmin = 5, xmax = 105, colors = 'r', linestyles = 'dashed')
#plt.show()
plt.savefig("./performance_results.png", dpi = 600, bbox_inches='tight')
# Diminishing returns:
(10 * 60_000) / steps
# To profile:
# python -m cProfile -o D:\AutoscalingSim\results\profiling_res.txt autoscalingsim-cl.py --step 10 --start "2020-09-17T10:00:00" --confdir "experiments/test" --simtime 1m
import pstats
from pstats import SortKey
p = pstats.Stats('D://AutoscalingSim//results//profiling_res.txt')
p.strip_dirs().sort_stats(SortKey.CALLS).print_stats()
###Output
_____no_output_____
###Markdown
**See [https://github.com/boutproject/boutcore-examples](https://github.com/boutproject/boutcore-examples) for links to the interactive version.**This example examines output from the `blob2d` example included in the `BOUT-dev` repo. \[It was tested with BOUT++ v4.3.2 from Fedora 34\].`blob2d` is a simplified model of an isolated 'blob' or 'filament'. These are coherent, field-aligned structures that are common in the scrape-off layer of tokamaks. `blob2d` represents the evolution only in the plane perpendicular to the magnetic field, with approximate closures describing parallel currents to the sheath and loss of density due to parallel flows. The 'blob' is created by initialising the simulation with a Gaussian density perturbation on a constant background.This notebook is strongly based on [the blob2d notebook in the xBOUT-examples](https://github.com/boutproject/xBOUT-examples/blob/master/blob2d/blob2d_example.ipynb).Contents:* Setup* Running the simulation* Load* Plot* Animate* Analyse Setup
###Code
# set up matplotlib
%matplotlib notebook
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (16, 8)
plt.rcParams.update({"font.size": 14})
import numpy as np
from xbout import open_boutdataset
# The physics model we are going to run
import blob2d
# The simulation requires a folder from which options are read, and output is written.
path = "blob"
# Make sure we have the folder "blob" and options file "BOUT.inp" is present
blob2d.ensure_blob(path)
# We must call init only once
# Restart the kernel if you want to use a different working directory
blob2d.bc.init(["-d", path])
###Output
_____no_output_____
###Markdown
Running the simulation=====
###Code
# Only run simulation for 10 steps
model = blob2d.Blob2D(nout=10)
print("We are now running the simulation ... that might take some time ...")
model.solve()
print("The simulation is finished!")
###Output
We are now running the simulation ... that might take some time ...
----------Parameters: ------------
Omega_i = 1.681764e+07 /s,
c_s = 1.550006e+04 m/s,
rho_s = 9.216552e-04 m
delta_* = rho_s * (dn/n) * 9.372772e+00
The simulation is finished!
###Markdown
Load==== First we need to open the Dataset.The chunks argument to `open_boutdataset()` is needed so that dask can paralleliseoperations over the time dimension (by default the chunk size is the size of thearrays in the files being loaded). Seehttp://xarray.pydata.org/en/stable/dask.htmlchunking-and-performance.For this example it doesn't matter, but for larger ones it can be very useful.Note: a warning from `open_boutdataset()` is expected. For `blob2d` the z-directionis a periodic, binormal direction with lengths normalised to the background hybridgyro-radius `rho_s=sqrt(T_e/m_i)`, rather than the usual toroidal angle. `'dz'` isused and `'ZMIN'` and `'ZMAX'` are ignored.
###Code
ds = open_boutdataset(f"{path}/BOUT.dmp.*.nc", f"{path}/BOUT.inp", chunks={"t": 4})
# Use squeeze() to get rid of the y-dimension, which has length 1 as blob2d does not
# simulate the parallel dimension.
ds = ds.squeeze(drop=True)
###Output
Read in:
<xbout.BoutDataset>
Contains:
<xarray.Dataset>
Dimensions: (t: 11, x: 260, y: 1, z: 256)
Coordinates:
* t (t) float64 0.0 50.0 100.0 150.0 200.0 ... 350.0 400.0 450.0 500.0
* x (x) int64 0 1 2 3 4 5 6 7 8 ... 252 253 254 255 256 257 258 259
* y (y) float64 0.5
* z (z) float64 0.0 0.3 0.6 0.9 1.2 1.5 ... 75.3 75.6 75.9 76.2 76.5
Data variables:
dx (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
dy (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g11 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g22 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g33 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g12 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g13 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g23 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_11 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_22 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_33 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_12 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_13 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
g_23 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
J (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
Bxy (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G1 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G2 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
G3 (x, y) float64 dask.array<chunksize=(260, 1), meta=np.ndarray>
phi (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
ncalls (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
ncalls_e (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
ncalls_i (t) int32 dask.array<chunksize=(4,), meta=np.ndarray>
n (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
omega (t, x, y, z) float64 dask.array<chunksize=(4, 260, 1, 256), meta=np.ndarray>
Attributes:
BOUT_REVISION: Unknown
metadata: {'BOUT_VERSION': 4.32, 'iteration': 9, 'zperiod': 1, 'MXS...
options: # settings file for BOUT++\n#\n# Blob simulation in a 2D ...
Metadata:
{ 'BOUT_VERSION': 4.32,
'MXG': 2,
'MXSUB': 256,
'MYG': 0,
'MYSUB': 1,
'MZ': 256,
'MZG': 0,
'MZSUB': 256,
'NXPE': 1,
'NYPE': 1,
'NZPE': 1,
'ZMAX': 1.0,
'ZMIN': 0.0,
'dz': 0.3,
'fine_interpolation_factor': 8,
'iteration': 9,
'ixseps1': 260,
'ixseps2': 260,
'jyseps1_1': -1,
'jyseps1_2': 0,
'jyseps2_1': 0,
'jyseps2_2': 0,
'keep_xboundaries': 1,
'keep_yboundaries': 0,
'nx': 260,
'ny': 1,
'ny_inner': 0,
'nz': 256,
'zperiod': 1}
Options:
<boutdata.data.BoutOptionsFile object at 0x7f6ec7b22280>
###Markdown
We choose to create a 'coordinate' for the x-dimension from `dx`.This is not done generically because `dx` can have two-dimensional dependence\- as well as varying radially it can be different e.g. in core and PF regions.However, for a slab geometry like `blob2d`, `dx` is a constant so it can easilybe used to create a one-dimensional x-coordinate.This ensures we get a sensible aspect ratio in plots.A z-coordinate was already created from `dz`, because `dz` is always a scalar,so it can always be used to create a 1d 'dimension coordinate'.
###Code
dx = ds["dx"].isel(x=0).values
# Get rid of existing "x" coordinate, which is just the index values.
ds = ds.drop("x")
# Create a new coordinate, which is length in units of rho_s
ds = ds.assign_coords(x=np.arange(ds.sizes["x"])*dx)
###Output
_____no_output_____
###Markdown
Plot===Here we use xarray methods to plot simple slices. First make some plots of the initial state
###Code
ds_initial = ds.isel(t=0)
plt.figure()
ax = plt.subplot(131)
ax.set_aspect("equal")
ds_initial["n"].plot(x="x", y="z")
ax = plt.subplot(132)
ax.set_aspect("equal")
ds_initial["omega"].plot(x="x", y="z")
ax = plt.subplot(133)
ax.set_aspect("equal")
ds_initial["phi"].plot(x="x", y="z")
###Output
_____no_output_____
###Markdown
Plots at a time point during the blob evolution
###Code
tind = 10
# Uses xarray methods to plot simple slices
plt.figure()
ax = plt.subplot(131)
ax.set_aspect("equal")
ds.isel(t=tind)["n"].plot(x="x", y="z")
ax = plt.subplot(132)
ax.set_aspect("equal")
ds.isel(t=tind)["omega"].plot(x="x", y="z")
ax = plt.subplot(133)
ax.set_aspect("equal")
ds.isel(t=tind)["phi"].plot(x="x", y="z")
###Output
_____no_output_____
###Markdown
Slicing to a 1d Dataset automatically produces a 1d plot, herea radial density profile through the blob centre
###Code
plt.figure()
ds.isel(t=10, z=128)["n"].plot()
###Output
_____no_output_____
###Markdown
Animate=======Use `xbout` methods through the `.bout` accessor to create animations. For a DataArray
###Code
ds["n"].bout.animate2D(aspect="equal")
###Output
n data passed has 3 dimensions - will use animatplot.blocks.Pcolormesh()
###Markdown
Animate several fields from a Dataset with `animate_list()`
###Code
ds.bout.animate_list(["n", "omega", "phi"], ncols=3, aspect="equal")
###Output
_____no_output_____
###Markdown
DataArray objects can be passed to `animate_list()` (as long asthey all have the same time-axis length), e.g. to combine 1dand 2d plots.Keyword arguments to `animate_list()` can be passed lists (withas many entries as variables being plotted), to set a per-variablevalue.Animations can be saved by passing a 'save_as' argument giving a namefor the output file, producing a .gif file.
###Code
ds.bout.animate_list(["n", "omega", "phi", ds["n"].isel(z=128)], aspect=["equal", "equal", "equal", "auto"], save_as="blob")
###Output
_____no_output_____
###Markdown
Analyse=======Perform some analysis of the blob to demonstrate more `xarray` methods. Find the centre-of mass of the blob using `.integrate()` ([documentation](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.integrate.html)).
###Code
background_density = 1.0
delta_n = ds["n"] - background_density
integrated_density = delta_n.integrate(dim=["x", "z"])
ds["CoM_x"] = (ds["x"]*delta_n).integrate(dim=["x", "z"]) / integrated_density
ds["CoM_z"] = (ds["z"]*delta_n).integrate(dim=["x", "z"]) / integrated_density
plt.figure()
plt.subplot(121)
ds["CoM_x"].plot()
plt.subplot(122)
ds["CoM_z"].plot()
###Output
_____no_output_____
###Markdown
Find the blob velocity using `.differentiate()` ([documentation](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.differentiate.html)).This is a somewhat crude method, using finite difference on the output timestep.It would be more accurate to calculate and integrate the ExB velocity.
###Code
v_x = ds["CoM_x"].differentiate("t")
v_z = ds["CoM_z"].differentiate("t")
plt.figure()
plt.subplot(121)
v_x.plot()
plt.ylabel("Radial CoM velocity")
plt.subplot(122)
v_z.plot()
plt.ylabel("Binormal CoM velocity")
###Output
_____no_output_____
###Markdown
Analyzing Edit Activity of Swiss Cities
###Code
%matplotlib inline
from gastrodon import RemoteEndpoint, QName, ttl, URIRef, inline
from wikidata.client import Client
import requests
import json
from io import StringIO
from collections import Counter, defaultdict
import time
import datetime
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
CitiesIn this section, we get the cities of Switzerland. This is performed by the SPARQL query:
###Code
"""
SELECT ?city ?cityLabel WHERE {
?city wdt:P31 wd:Q54935504
SERVICE wikibase:label {
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
}
} ORDER BY ?cityLabel
"""
###Output
_____no_output_____
###Markdown
Here, **wdt:p31** corresponds to the predicate **instance of**, and **wd:Q54935504** corresponds to the entity **cities of Switzerland**.We obtain the label of the entity we get (city) by using the wikibase:label service. Finally, the results are ordered alphabetically.
###Code
prefixes = inline("""
@prefix wd: <http://www.wikidata.org/entity/> .
@prefix wdt: <http://www.wikidata.org/prop/direct/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
""").graph
endpoint = RemoteEndpoint(
"http://query.wikidata.org/sparql"
,prefixes=prefixes
)
locs_in_switzerland = endpoint.select("""
SELECT ?city ?cityLabel WHERE {
?city wdt:P31 wd:Q54935504
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
} ORDER BY ?cityLabel""")
locs_in_switzerland.tail(25)
# TODO: combine SPARQL and visualizations
###Output
_____no_output_____
###Markdown
Here, we manually write the entity IDs of some of the cities of Switzerland.
###Code
bern = 'Q70'
geneva = 'Q71'
zurich = 'Q72'
basel = 'Q78'
canton_zurich = 'Q11943'
client = Client()
print(client.get(zurich, load=True))
print(client.get(canton_zurich, load=True))
###Output
<wikidata.entity.Entity Q72 'Zürich'>
<wikidata.entity.Entity Q11943 'canton of Zürich'>
###Markdown
Get the Complete Edit Activity
###Code
def edit_hist(item):
"""
This function returns the complete edit history of the entity with the given entity ID.
Parameters
----------
item: str
Q ID of the entity. For Zurich, this corresponds to Q72.
Returns
-------
edit_hist : pandas.DataFrame
Complete edit history of the given entity. The returned dataframe has the following
columns: UserID, User, Timestamp, Size, Comment
"""
fields = ['timestamp', 'user', 'userid', 'comment', 'size']
field_str = '%7C'.join(fields)
limit = '500'
query_template = 'https://www.wikidata.org/w/api.php?action=query&format=json&prop=revisions&titles={item}&rvprop={fields}&rvslots=main&rvlimit={limit}'
query = query_template.format(
item=item, fields=field_str, limit=limit)
curr_hist_dict = requests.get(query).json()
res_str = 'UserID,User,Timestamp,Size,Comment\n'
while True:
for page in curr_hist_dict['query']['pages'].values():
for revision in page['revisions']:
res_str += str(revision['userid']) + ','
res_str += revision['user'] + ','
res_str += revision['timestamp'] + ','
res_str += str(revision['size']) + ','
raw_comment = revision['comment']
escaped_comment = raw_comment.translate(str.maketrans({',' : '\\,'}))
res_str += escaped_comment + '\n'
try:
continue_field = curr_hist_dict['continue']['rvcontinue']
new_query = query + '&rvcontinue=' + continue_field
curr_hist_dict = requests.get(new_query).json()
except KeyError:
break
return pd.read_csv(StringIO(res_str), quoting=3, escapechar='\\')
###Output
_____no_output_____
###Markdown
Get Visit History For the Last 60 Days
###Code
def visit_hist(item):
"""
Get the visit history of the entity with the given entity ID for the last 60 days.
Parameters
----------
item: str
Q ID of the entity. For Zurich, this corresponds to Q72.
Returns
-------
visit_hist : pandas.DataFrame
Complete visit history of the given entity. The returned dataframe has the following
columns: Date, VisitCount
"""
query_template = 'https://www.wikidata.org/w/api.php?action=query&format=json&prop=pageviews&titles={item}'
query = query_template.format(item=item)
visit_dict = requests.get(query).json()
res_str = 'Date,VisitCount\n'
for page in visit_dict['query']['pages'].values():
for date, count in page['pageviews'].items():
if count is None:
count = 0
res_str += date + ',' + str(count) + '\n'
return pd.read_csv(StringIO(res_str))
###Output
_____no_output_____
###Markdown
Analysis In this section, we analyze the edit and visit history data.
###Code
item_to_analyze = zurich
edit_df = edit_hist(item_to_analyze)
visit_df = visit_hist(item_to_analyze)
# Transform the timestamp string columns to python timestamp objects
# for easier time manipulations and filtering
edit_df['Timestamp'] = edit_df['Timestamp'].apply(lambda s: datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ").timetuple())
visit_df['Date'] = visit_df['Date'].apply(lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").timetuple())
###Output
_____no_output_____
###Markdown
Number of Edits per YearWe count the number of edits per each year since the beginning of the dataset and plot the results.
###Code
edit_df_yearly = edit_df['Timestamp'].copy()
edit_df_yearly = edit_df_yearly.transform(lambda t: t.tm_year)
yearly_counts = Counter(edit_df_yearly.values)
fig = plt.figure()
plt.plot(yearly_counts.keys(), yearly_counts.values(), marker='o')
plt.xlabel('Year')
plt.ylabel('Number of Edits')
plt.grid()
plt.title('Number of Edits per Year');
fig.savefig('num_edits.png')
###Output
_____no_output_____
###Markdown
Number of Edits per Month of YearWe count the number of edits on each month for a given year and plot the results.
###Code
year = 2018
monthly_counts = defaultdict(int)
for row in edit_df['Timestamp']:
if row.tm_year == year:
monthly_counts[row.tm_mon] += 1
plt.plot(monthly_counts.keys(), monthly_counts.values(), marker='o')
plt.xticks(np.arange(1, 13))
plt.xlabel('Month')
plt.ylabel('Number of Edits')
plt.grid()
plt.title('Number of Edits per Month in {}'.format(year));
###Output
_____no_output_____
###Markdown
Size Change Frequency of EditsWe group edits based on how much content was added/removed and plot a histogram of the results.
###Code
content_size = edit_df['Size'].values
diff = -(content_size[1:] - content_size[:-1])
new_column = np.append(diff, edit_df['Size'].iloc[-1])
edit_size_diffs = edit_df.copy()
edit_size_diffs['Size'] = pd.Series(new_column)
intervals = np.arange(0, 1001, 50).astype(float)
intervals = np.insert(intervals, 0, -np.inf)
intervals = np.append(intervals, np.inf)
counts = np.zeros(len(intervals) - 1, dtype=np.int64)
for (idx,), lo in np.ndenumerate(intervals[:-1]):
hi = intervals[idx + 1]
cnt = len(edit_size_diffs[(edit_size_diffs['Size'] >= lo) & (edit_size_diffs['Size'] < hi)])
counts[idx] = cnt
labels = []
for point in intervals[1:-1]:
labels.append('< {}'.format(int(point)))
labels.append('< inf')
indices = np.arange(len(counts))
fig = plt.figure(figsize=(9, 5))
plt.bar(indices, counts, align='center')
plt.xlabel('Amount of change (Bytes)')
plt.ylabel('Number of edits')
plt.title('Size Change Frequencies')
plt.grid()
plt.xticks(indices, labels, rotation=90);
fig.savefig('size_freq.png')
###Output
_____no_output_____
###Markdown
Content Size vs TimeWe plot the size of the content against the number of edits.
###Code
size = edit_df['Size'].values
size = size[::-1]
plt.plot(size)
plt.xlabel('Number of edits')
plt.ylabel('Content size (Bytes)')
plt.title('Content Size over Time');
###Output
_____no_output_____
###Markdown
Views per Last 60 DaysWe plot the number of views over the last 60 days.**Note**: Unfortunately, wikidata API does not allow getting the visit count data for more than the last 60 days.
###Code
keys = []
vals = []
for _, row in visit_df.iterrows():
keys.append('{}-{}'.format(row['Date'].tm_mon, row['Date'].tm_mday))
vals.append(row['VisitCount'])
plt.figure(figsize=(12, 10))
num_days = 60
plt.plot(vals[:num_days], marker='o')
plt.xlabel('Day')
plt.xticks(np.arange(num_days), keys[:num_days], rotation=90)
plt.grid()
plt.ylabel('Number of Visits')
plt.title('Number of Visits per Day');
###Output
_____no_output_____
###Markdown
Welcome to my Youtube Tranding videos analysis!In this notebook I will try to understand whats patterns have US videos form trends or *how to be in trend?* Questions bellow I will answer while analysis: How important are missing data in videos? What the most fequent words use peoples? What distribution of likes, dislikes, reviews, comments? What the most popular categories of video? What the most liked, disliked, discussed and positive category?P.S. It's my first analysis, so don't be so strict. Libraries and main functions
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
import json
from collections import Counter
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
def ids_to_category(id, word_list):
return word_list[id]
def plot_distribution(data, distc, target):
facet = sns.FacetGrid(data, hue=target, aspect=3, height=5, palette="ch:.25")
facet.map(sns.distplot, distc)
facet.add_legend()
plt.show()
def binary_pie_plot(data, labels, column, title=''):
sizes = np.zeros((len(labels),))
sizes[0] = (data[column][data[column] == True].shape[0]*100)/data[column].shape[0]
sizes[1] = (data[column][data[column] == False].shape[0]*100)/data[column].shape[0]
fig1, ax1 = plt.subplots(figsize=(10, 10))
wedges, texts, autotexts = ax1.pie(sizes, labels=labels, autopct='%1.1f%%', textprops=dict(color="w"),
colors=['#D3BBAC', '#76485F'], startangle=90)
ax1.legend(wedges, labels,
title=column,
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.setp(autotexts, size=20, weight="bold")
ax1.set_title(title, fontdict=dict(fontsize=30))
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def datetime_time(x):
return x.split('T')[0]
def days_between(d1, d2):
d1 = datetime.strptime(d1, "%y.%d.%m")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
###Output
_____no_output_____
###Markdown
Data description
###Code
videos = pd.read_csv('USvideos.csv')
videos.drop_duplicates(inplace=True)
videos.head(5)
videos.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 40901 entries, 0 to 40948
Data columns (total 16 columns):
video_id 40901 non-null object
trending_date 40901 non-null object
title 40901 non-null object
channel_title 40901 non-null object
category_id 40901 non-null int64
publish_time 40901 non-null object
tags 40901 non-null object
views 40901 non-null int64
likes 40901 non-null int64
dislikes 40901 non-null int64
comment_count 40901 non-null int64
thumbnail_link 40901 non-null object
comments_disabled 40901 non-null bool
ratings_disabled 40901 non-null bool
video_error_or_removed 40901 non-null bool
description 40332 non-null object
dtypes: bool(3), int64(5), object(8)
memory usage: 4.5+ MB
###Markdown
Little data processing
###Code
json_file = None
with open('US_category_id.json') as f:
json_file = json.loads(f.read())
word_list = {}
for i in range(len(json_file['items'])):
word_list[int(json_file['items'][i]['id'])] = json_file['items'][i]['snippet']['title']
videos['category_id'] = videos['category_id'].apply(lambda x: ids_to_category(x, word_list))
videos['publish_time'] = videos['publish_time'].apply(lambda x: datetime_time(x))
###Output
_____no_output_____
###Markdown
NaN valuesThe first going to look at missing values. Why is it important? So, lack of some content is can be one of resons not to find video in either trends or recommendations.
###Code
videos.isnull().sum()
###Output
_____no_output_____
###Markdown
As we can see only *description* have **NaN** values, but its not true. Lets look at *tags*.
###Code
videos['tags'][videos['tags'] == '[none]'].shape[0]
###Output
_____no_output_____
###Markdown
Well, instead of **NaN** values in column *tags* there analog — **[none]** values presented as a *string*. There about *1535* **NaN** values in *tags* column. Let's convert it to real **NaN** values for our comfort.
###Code
videos['tags'][videos['tags'] == '[none]'] = None
videos.isnull().sum()
###Output
_____no_output_____
###Markdown
All **NaN-like** values was filled by real **NaN** values. Let's look at distribution of *views, likes, comment_count* and *dislikes* columns in depends on **NaN** and **not-NaN** values of *description* and *tags* columns to define is it important to have tags or description to be in trend. Before plotting let's normalize all data via *log* and change values of *tags* and *description* to **NaN** or **Not-NaN** strings.
###Code
df_plot = videos[['likes', 'views', 'dislikes', 'comment_count', 'tags', 'description']].copy()
# np.log(x + 1)
df_plot['likes'] = (df_plot['likes'] + 1).transform(np.log)
df_plot['views'] = (df_plot['views'] + 1).transform(np.log)
df_plot['dislikes'] = (df_plot['dislikes'] + 1).transform(np.log)
df_plot['comment_count'] = (df_plot['comment_count'] + 1).transform(np.log)
df_plot = df_plot.fillna('NaN')
df_plot['tags'][df_plot['tags'] != 'NaN'] = 'Not-NaN'
df_plot['description'][df_plot['description'] != 'NaN'] = 'Not-NaN'
for i in ['tags', 'description']:
for j in ['likes', 'views', 'dislikes', 'comment_count']:
plot_distribution(df_plot, j, i)
###Output
_____no_output_____
###Markdown
So, all **NaN** values are a little bit offset left in distribution it means that all indicators (views, likes, dislikes, comment count) decrease in compare with **Not-NaN** values. The most different distributions are *views* distributions that depends on *description*. Let's look whats more important: *description* or *tags*. Likes average
###Code
avg_nan = df_plot['likes'][df_plot['tags'] == 'NaN'].mean()
avg_not_nan = df_plot['likes'][df_plot['tags'] == 'Not-NaN'].mean()
print('Difference between two avg by tags:', avg_nan - avg_not_nan)
avg_nan = df_plot['likes'][df_plot['description'] == 'NaN'].mean()
avg_not_nan = df_plot['likes'][df_plot['description'] == 'Not-NaN'].mean()
print('Difference between two avg by description:', avg_nan - avg_not_nan)
###Output
Difference between two avg by tags: -1.1089584380589166
Difference between two avg by description: -2.054519825571262
###Markdown
Dislikes average
###Code
avg_nan = df_plot['dislikes'][df_plot['tags'] == 'NaN'].mean()
avg_not_nan = df_plot['dislikes'][df_plot['tags'] == 'Not-NaN'].mean()
print('Difference between two avg by tags:', avg_nan - avg_not_nan)
avg_nan = df_plot['dislikes'][df_plot['description'] == 'NaN'].mean()
avg_not_nan = df_plot['dislikes'][df_plot['description'] == 'Not-NaN'].mean()
print('Difference between two avg by description:', avg_nan - avg_not_nan)
###Output
Difference between two avg by tags: -0.7793763794848916
Difference between two avg by description: -0.9579849364948823
###Markdown
Views average
###Code
avg_nan = df_plot['views'][df_plot['tags'] == 'NaN'].mean()
avg_not_nan = df_plot['views'][df_plot['tags'] == 'Not-NaN'].mean()
print('Difference between two avg by tags:', avg_nan - avg_not_nan)
avg_nan = df_plot['views'][df_plot['description'] == 'NaN'].mean()
avg_not_nan = df_plot['views'][df_plot['description'] == 'Not-NaN'].mean()
print('Difference between two avg by description:', avg_nan - avg_not_nan)
###Output
Difference between two avg by tags: -0.6136751153672488
Difference between two avg by description: -1.1786658840617399
###Markdown
Comment count average
###Code
avg_nan = df_plot['comment_count'][df_plot['tags'] == 'NaN'].mean()
avg_not_nan = df_plot['comment_count'][df_plot['tags'] == 'Not-NaN'].mean()
print('Difference between two avg by tags:', avg_nan - avg_not_nan)
avg_nan = df_plot['comment_count'][df_plot['description'] == 'NaN'].mean()
avg_not_nan = df_plot['comment_count'][df_plot['description'] == 'Not-NaN'].mean()
print('Difference between two avg by description:', avg_nan - avg_not_nan)
###Output
Difference between two avg by tags: -1.1275278266461193
Difference between two avg by description: -1.4552704786152475
###Markdown
Following values above I can confirm my hypopthesis that videos with **NaN** values gets less *likes, dislikes, views* and *comments*. So, depends on difference betwen averages I can say that lack of description is much more important than lack of tags. Comment and rating abilityThe second we have to get know: is it important to haven't disabled comments or ratings?
###Code
binary_pie_plot(videos, ['Comments disabled', 'Comments available'], 'comments_disabled', 'Comments')
binary_pie_plot(videos, ['Ratings disabled', 'Ratings available'], 'ratings_disabled', 'Ratings')
###Output
_____no_output_____
###Markdown
After plotting rating importance and comments importance we can notice that only the least number of videos haven't ability to comment or rate them, therefore more trending videos have ability to give feedback to authors. The most popular categories
###Code
sns.catplot(x="category_id", kind="count", palette="ch:.25", data=videos, height=5, aspect=4);
plt.show()
###Output
_____no_output_____
###Markdown
Following the plot above we can notice that the most trending category is an Entertainment. Let's look at top 5 categories.
###Code
for j, i in enumerate(Counter(videos['category_id']).most_common(10)):
print(str(j+1)+'.'+i[0]+':', i[1])
top_categories = [i[0] for i in Counter(videos['category_id']).most_common(10)]
###Output
1.Entertainment: 9944
2.Music: 6467
3.Howto & Style: 4142
4.Comedy: 3453
5.People & Blogs: 3208
6.News & Politics: 2485
7.Science & Technology: 2397
8.Film & Animation: 2343
9.Sports: 2172
10.Education: 1655
###Markdown
After we got know top 10 categories, so, we can find the most liked, disliked, positive, discussed and viewed categories from top 10 categories we define before.
###Code
liked = []
disliked = []
viewed = []
liked_disliked = []
discussed = []
for i in top_categories:
viewed.append((i, int(videos['views'][videos['category_id'] == i].mean())))
disliked.append((i, int(videos['dislikes'][videos['category_id'] == i].mean())/viewed[-1][1]))
liked.append((i, int(videos['likes'][videos['category_id'] == i].mean())/viewed[-1][1]))
discussed.append((i, int(videos['comment_count'][videos['category_id'] == i].mean())/viewed[-1][1]))
liked_disliked.append((i, liked[-1][1]/disliked[-1][1]))
liked.sort(key=lambda x: x[1], reverse=True)
disliked.sort(key=lambda x: x[1], reverse=True)
viewed.sort(key=lambda x: x[1], reverse=True)
liked_disliked.sort(key=lambda x: x[1], reverse=True)
discussed.sort(key=lambda x: x[1], reverse=True)
###Output
_____no_output_____
###Markdown
Liked
###Code
print('The most liked categories (likes per view):')
print('-------------------------------------------')
for j, i in enumerate(liked):
print(str(j+1)+'. '+i[0] + ':', str(i[1])+' likes/views')
###Output
The most liked categories (likes per view):
-------------------------------------------
1. Comedy: 0.04228573899214924 likes/views
2. Education: 0.04172924581087847 likes/views
3. Howto & Style: 0.03992019217366444 likes/views
4. People & Blogs: 0.037953023422952537 likes/views
5. Music: 0.035302805451800354 likes/views
6. Entertainment: 0.025739364091988688 likes/views
7. Science & Technology: 0.023680427745194042 likes/views
8. Film & Animation: 0.02278590948758461 likes/views
9. Sports: 0.02239572388768694 likes/views
10. News & Politics: 0.012311232270341031 likes/views
###Markdown
Disliked
###Code
print('The most disliked categories (dislikes per view):')
print('-------------------------------------------------')
for j, i in enumerate(disliked):
print(str(j+1)+'. '+i[0] + ':', str(i[1])+' dislikes/views')
###Output
The most disliked categories (dislikes per view):
-------------------------------------------------
1. News & Politics: 0.0028357332757527093 dislikes/views
2. Entertainment: 0.002086387266170106 dislikes/views
3. People & Blogs: 0.0020724576132762733 dislikes/views
4. Comedy: 0.0014119341538765024 dislikes/views
5. Howto & Style: 0.0013409831095139598 dislikes/views
6. Science & Technology: 0.0013042695159089827 dislikes/views
7. Music: 0.0012751467579168046 dislikes/views
8. Sports: 0.0011656115489759094 dislikes/views
9. Education: 0.001144304351301436 dislikes/views
10. Film & Animation: 0.0008352899191048632 dislikes/views
###Markdown
Viewed
###Code
print('The most average viewed categories:')
print('-----------------------------------')
for j, i in enumerate(viewed):
print(str(j+1)+'. '+i[0] + ':', str(i[1])+' views')
###Output
The most average viewed categories:
-----------------------------------
1. Music: 6204776 views
2. Film & Animation: 3101917 views
3. Entertainment: 2067689 views
4. Sports: 2027262 views
5. People & Blogs: 1530550 views
6. Comedy: 1480239 views
7. Science & Technology: 1449087 views
8. Howto & Style: 982861 views
9. Education: 713097 views
10. News & Politics: 592792 views
###Markdown
Likes / dislikes
###Code
print('The most positive categories (likes/dislikes):')
print('----------------------------------------------')
for j, i in enumerate(liked_disliked):
print(str(j+1)+'. '+i[0] + ':', str(i[1]))
###Output
The most positive categories (likes/dislikes):
----------------------------------------------
1. Education: 36.466911764705884
2. Comedy: 29.948803827751195
3. Howto & Style: 29.769347496206375
4. Music: 27.685288169868556
5. Film & Animation: 27.279042840602084
6. Sports: 19.21371138383411
7. People & Blogs: 18.313051702395967
8. Science & Technology: 18.156084656084655
9. Entertainment: 12.336810384793695
10. News & Politics: 4.341463414634147
###Markdown
Discussed
###Code
print('The most discussed categories (comments/views):')
print('-----------------------------------------------')
for j, i in enumerate(discussed):
print(str(j+1)+'. '+i[0] + ':', str(i[1]) + ' comments/views')
###Output
The most discussed categories (comments/views):
-----------------------------------------------
1. Howto & Style: 0.0056722161119425836 comments/views
2. People & Blogs: 0.005041978373787201 comments/views
3. Education: 0.004609471081774289 comments/views
4. Comedy: 0.004401316273926035 comments/views
5. News & Politics: 0.004097558671507038 comments/views
6. Entertainment: 0.003572103928588874 comments/views
7. Science & Technology: 0.0034414772888032258 comments/views
8. Music: 0.003121949930182814 comments/views
9. Sports: 0.0025408654628755437 comments/views
10. Film & Animation: 0.0024597692330259 comments/views
###Markdown
So, following the data above *Education* has one of the bests indicators in *likes*, *likes/dislikes* and *dislikes*, but with not a big number of views. The top 3 the most discussed categories are *Howto & Style*, *People & Blogs* and *Education*. By the statistics peoples don't love *News & Politics* in compare with other categories. And the most viewed categories are *Music*, *Film & Animation* and *Entertainment* exactly. After looked at main indicators, let's try to understand what the fastest category in propagation.
###Code
tmp = []
for i in range(videos.shape[0]):
tmp.append(days_between(videos['trending_date'].iloc[i], videos['publish_time'].iloc[i]))
videos['days_to_trend'] = tmp
trended = []
for i in set(videos['category_id']):
trended.append((i, int(videos['days_to_trend'][videos['category_id'] == i].mean())))
trended.sort(key=lambda x: x[1], reverse=False)
print('Average count of days to trend for each category:')
print('-------------------------------------------------')
for j, i in enumerate(trended):
print(str(j+1)+'. '+i[0] + ':', str(i[1]) + ' days')
###Output
Average count of days to trend for each category:
-------------------------------------------------
1. Nonprofits & Activism: 5 days
2. Pets & Animals: 7 days
3. Travel & Events: 7 days
4. Howto & Style: 7 days
5. Comedy: 10 days
6. Shows: 10 days
7. Entertainment: 13 days
8. Music: 14 days
9. People & Blogs: 15 days
10. News & Politics: 18 days
11. Science & Technology: 18 days
12. Gaming: 21 days
13. Sports: 23 days
14. Education: 37 days
15. Film & Animation: 41 days
16. Autos & Vehicles: 43 days
###Markdown
Well, average the fastests categories are *Nonprofits & Activism*, *Pets & Animals*, *Travel & Events* and *Howto & Style*. What give us this data? So, as we can notice the most popular categories don't the most fastest it means that we can't chain this two meaning. The TitleBefore this section we only was analysis video after click (factors that peoples usually don't see) let's analysis that people see firstly - the title.
###Code
stopWords = set(stopwords.words('english'))
tokenizer = RegexpTokenizer(r'\w+')
words = []
for i in range(videos['title'].shape[0]):
for i in tokenizer.tokenize(videos['title'].iloc[i]):
if not i in stopWords or len(i) > 2:
words.append(i.lower())
c = Counter()
for word in words:
c[word] += 1
c.most_common(20)
###Output
_____no_output_____
###Markdown
Well, we see the most common words in titles. Now, let's look at how long titles in trends.
###Code
avg_len = 0
max_len = 0
min_len = 10000000000000000000000
for i in range(videos.shape[0]):
tmp = len(videos['title'].iloc[i])
avg_len += tmp
if min_len > tmp:
min_len = tmp
elif max_len < tmp:
max_len = tmp
avg_len = int(avg_len / videos.shape[0])
max_len
print('Maximum length of title:', max_len)
print('Minimum length of title:', min_len)
print('Average length of title:', avg_len)
###Output
Maximum length of title: 100
Minimum length of title: 3
Average length of title: 48
###Markdown
Mini Project 2 - ISYE-6644Author: Oscar Cortez
###Code
from generators import randu, dessert_island, glibc
from rand_tests import (gof_test,
ks_test,
run_test_up_down,
run_test_above_below_mean,
correlation_test)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
import seaborn as sns
sns.set_style("darkgrid")
sizes = [1, 10, 100, 1000, 10000]
seed = 12345
dist = {"numpy": np.random.random,
"dessert_island": lambda x: dessert_island(seed, x),
"glibc": lambda x: glibc(seed, x),
"randu": lambda x: randu(seed, x)}
results = {"generator": [],
"size": [],
"time": []}
for s in sizes:
for generator, rand in dist.items():
results["generator"].append(generator)
results["size"].append(s)
start = time.perf_counter()
values = rand(s)
end = time.perf_counter()
results["time"].append(end - start)
df_results = pd.DataFrame(results)
###Output
_____no_output_____
###Markdown
Plot timing results
###Code
fig, ax = plt.subplots(1, figsize=(10, 7))
df_results.pivot("size", "generator", "time").plot(ax=ax)
plt.legend(fontsize=18)
plt.title("Elapsed execution time per generator", fontsize=22)
plt.ylabel("Time (seconds)", fontsize=18)
plt.xlabel("Array Size", fontsize=18)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.savefig("time.svg")
plt.show()
###Output
_____no_output_____
###Markdown
Plot in 3D Randu
###Code
%matplotlib notebook
randu_rands = randu(seed, 1000)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(randu_rands[:-2], randu_rands[1:-1], randu_rands[2:])
ax.set_xlabel('Ri')
ax.set_ylabel('Ri+1')
ax.set_zlabel('Ri+2')
plt.show()
###Output
_____no_output_____
###Markdown
Dessert Island
###Code
randu_rands = dessert_island(seed, 1000)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(randu_rands[:-2], randu_rands[1:-1], randu_rands[2:])
ax.set_xlabel('Ri')
ax.set_ylabel('Ri+1')
ax.set_zlabel('Ri+2')
plt.show()
###Output
_____no_output_____
###Markdown
glibc
###Code
randu_rands = glibc(seed, 1000)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(randu_rands[:-2], randu_rands[1:-1], randu_rands[2:])
ax.set_xlabel('Ri')
ax.set_ylabel('Ri+1')
ax.set_zlabel('Ri+2')
plt.show()
###Output
_____no_output_____
###Markdown
Run PRN tests
###Code
seeds = [12345, 77807, 283092, 482344, 453604]
fixed_size = 10000000
gen_2 = {"numpy": np.random.random,
"dessert_island": dessert_island,
"glibc": glibc,
"randu": randu}
tests = {"Chi-Squared g-o-f": gof_test,
"Kolmogorov-Smirnov Test": ks_test,
"Run Test - Up and Down": run_test_up_down,
"Run Test - Above and Below Mean": run_test_above_below_mean,
"Correlation Test": correlation_test}
test_results = {"generator": [],
"test": [],
"passed": []}
for gen_name, gen in gen_2.items():
for test_name, test in tests.items():
test_success = 0
for seed in seeds:
if gen_name == "numpy":
np.random.seed(seed)
sample = gen(fixed_size)
else:
sample = gen(seed, fixed_size)
success, _ = test(sample)
test_success += success
test_results["generator"].append(gen_name)
test_results["test"].append(test_name)
test_results["passed"].append(test_success)
df_test_results = pd.DataFrame(test_results).pivot("generator", "test", "passed")
df_test_results.to_csv("test_results.csv")
###Output
_____no_output_____
###Markdown
Difference network for $s_{ij}$ proportional to distances in Euclidean space
###Code
reload(dn)
np.random.seed( 2001)
K0 = 5
s0 = (2 - .2)*np.random.rand( K0) + 0.2
x0 = np.cumsum( s0)
# K0 = 5
# x0 = np.arange( 1., K0+1, 1)
sij0 = np.diag( x0)
for i in xrange(K0):
for j in xrange(i+1, K0):
sij0[i,j] = sij0[j,i] = x0[j] - x0[i]
sij0 = matrix( sij0)
results = dn.optimize( sij0, optimalities=[ 'D', 'A', 'Etree'] )
results.update( dict(
MST=gph.MST_optimize( sij0, 'n')))
def distnet_us( x0):
K = len(x0)
u = np.zeros( K)
u[0] = x0[0]/np.sqrt(K)
s = np.sqrt(K)*x0[0]
for i in xrange( 1, K):
u[i] = u[i-1] + (x0[i] - x0[i-1])/np.sqrt(K-i)
s += (x0[i] - x0[i-1])*np.sqrt(K-i)
return u*s
def distnet_minTrC( xs):
K = len(xs)
trC = np.sqrt(K)*xs[0]
for i in xrange( 1, K):
trC += np.sqrt(K-i)*(xs[i] - xs[i-1])
return trC**2
distnet_us( x0)
distnet_minTrC( x0)
np.sum( distnet_us( x0))
def draw_distnet( xs, results):
fig, axes = plt.subplots( 3, 1, sharex=True, figsize=(5, 8))
xmax = np.max( xs)
dy = xmax/(len(xs) - 1.)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'][:len(xs)]
pos = np.array( [ ( x, (i*i-2)*dy ) for i, x in enumerate( xs) ] + [ (0, 0) ])
titles = dict( D=r'$D$', A=r'$A$', Etree=r'$E$')
allocation = dict(
D = r'$n_{i\, i+1} = \mathrm{const}$',
A = r'$n_{i\, i+1} \propto \sqrt{m-i}\cdot(s_{i+1} - s_i)$',
Etree = r'$n_i \propto s_i^2$')
for i, o in enumerate( [ 'D', 'A', 'Etree']):
ax = axes[i]
nij = results[o]
g = gph.diffnet_to_graph( nij, 'O')
mypos = gph.draw_diffnet_graph( g, pos=pos, ax=ax, node_color=colors, nodescale=20, widthscale=30, origins='O')
ax.spines['left'].set_visible( False)
ax.spines['right'].set_visible( False)
ax.spines['top'].set_visible( False)
ax.set_title( titles[o])
ax.text( 0.5*xmax, -2., allocation[o], verticalalignment='center')
if i!=2:
ax.spines['bottom'].set_visible( False)
ax.xaxis.set_visible( False)
else:
ax.xaxis.set_ticks( [ 0 ])
ax.set_xlabel( r'$s_i$')
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
# manual arrowhead width and length
hw = 1./10.*(ymax-ymin)
hl = 1./20.*(xmax-xmin)
lw = 1. # axis line width
ohg = 0.3 # arrow overhang
# get width and height of axes object to compute
# matching arrowhead length and width
dps = fig.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(dps)
width, height = bbox.width, bbox.height
# compute matching arrowhead length and width
yhw = hw/(ymax-ymin)*(xmax-xmin)* height/width
yhl = hl/(xmax-xmin)*(ymax-ymin)* width/height
ax.arrow(xmin, ymin, xmax-xmin, 0, fc='k', ec='k', lw = lw,
head_width=hw, head_length=hl, overhang = ohg,
length_includes_head= True, clip_on = False)
ax.yaxis.set_visible( False)
ax.set_aspect( 'auto')
fig.subplots_adjust(hspace=0.5)
return fig
figdistnets = draw_distnet( x0, results)
# figdistnets.savefig( 'const-rel-error.eps', bbox_inches='tight')
np.array(results['Etree'])/np.square(np.array(sij0))
distnet_minTrC( x0) - np.sum( x0*x0)
###Output
_____no_output_____
###Markdown
Check that $\sum_{i\neq j} n_{ij} = 1$
###Code
[ dn.sum_upper_triangle( results[o]) for o in results ]
###Output
_____no_output_____
###Markdown
COX-2 alchemistry
###Code
nheavy = dict(A1=7, A2=6, B1=9, B2=6, C1=10, C2=10)
sCOX2 = np.diag( [nheavy['A1'] + nheavy['B1'] + nheavy['C1'],
nheavy['A1'] + nheavy['B1'] + nheavy['C2'],
nheavy['A1'] + nheavy['B2'] + nheavy['C1'],
nheavy['A1'] + nheavy['B2'] + nheavy['C2'],
nheavy['A2'] + nheavy['B1'] + nheavy['C1'],
nheavy['A2'] + nheavy['B1'] + nheavy['C2'],
nheavy['A2'] + nheavy['B2'] + nheavy['C1'],
nheavy['A2'] + nheavy['B2'] + nheavy['C2']]) + \
np.array( [[ 0, 1, 16, 17, 1, 2, 16, 17],
[ 1, 0, 17, 16, 2, 1, 17, 16],
[16, 17, 0, 1, 16, 17, 1, 2],
[17, 16, 1, 0, 17, 16, 2, 1],
[ 1, 2, 16, 17, 0, 1, 16, 17],
[ 2, 1, 17, 16, 1, 0, 17, 16],
[16, 17, 1, 2, 16, 17, 0, 1],
[17, 16, 2, 1, 17, 16, 1, 0]], dtype=float)
sCOX2 = np.sqrt( sCOX2)
sCOX2 = matrix( sCOX2)
print sCOX2
def cubeLayout( origin=False):
front = np.array( [[0, 0],
[0, 1],
[1, 0],
[1, 1]])
back = front + np.array( [ 0.5, np.sqrt(3)/6])
if not origin:
return np.concatenate( [front, back])
o = np.array( [np.sqrt(3)/6, -0.25])
return np.concatenate( [front, back, [o]])
figCOX2s, ax = plt.subplots( figsize=(7, 7))
gph.draw_diffnet_graph( gph.diffnet_to_graph( sCOX2), pos=cubeLayout( True), ax=ax, widthscale=1.5, nodescale=15, node_color=plt.rcParams['axes.prop_cycle'].by_key()['color'][:8])
ax.set_aspect( 1)
ax.axis('off')
# figCOX2s.savefig( 'COX2-sij.eps')
results = dn.optimize( sCOX2, optimalities=[ 'D', 'A', 'Etree'] )
results.update( dict(
MST=gph.MST_optimize( sCOX2, 'n')))
figCOX2n = draw_optimalities( matrix(sCOX2), results, pos=cubeLayout(True), nodescale=10)
# figCOX2n.savefig( 'COX2-nij.eps')
###Output
_____no_output_____
###Markdown
Relative to Celecoxib and Rofecoxib Celecoxib: A1-B1-C1Rofecoxib: A2-B2-C2
###Code
CEL, ROF = 0, 7 # celecoxib and rofecoxib
def relative_sij_COX2( sCOX2):
sCOX2rel = np.zeros( (6, 6))
allmols = range(1, 7)
origins = [-1]*6
for i, a in enumerate( allmols):
if sCOX2[a, CEL] < sCOX2[a, ROF]:
# The closer of the two reference molecules
sCOX2rel[i,i] = sCOX2[a, CEL]
origins[i] = 'C'
else:
sCOX2rel[i,i] = sCOX2[a, ROF]
origins[i] = 'R'
for j in xrange(i+1, len(allmols)):
b = allmols[j]
sCOX2rel[i,j] = sCOX2rel[j,i] = sCOX2[a,b]
return matrix(sCOX2rel), origins
sCOX2rel, oCOX2rel = relative_sij_COX2( sCOX2)
results = dn.optimize( sCOX2rel, optimalities=[ 'D', 'A', 'Etree'] )
results.update( dict(
MST=gph.MST_optimize( sCOX2rel, 'n')))
posCOX2 = cubeLayout( False)
posCOX2os = posCOX2[[CEL, ROF]]
posCOX2 = np.concatenate( [posCOX2[:CEL], posCOX2[CEL+1:ROF], posCOX2[ROF+1:], posCOX2os])
colorCOX2 = plt.rcParams['axes.prop_cycle'].by_key()['color'][:8]
colorCOX2 = colorCOX2[:CEL] + colorCOX2[CEL+1:ROF] + colorCOX2[ROF+1:] + [ colorCOX2[CEL], colorCOX2[ROF] ]
figCOX2reln = draw_optimalities( matrix(sCOX2rel), results, pos=posCOX2, nodescale=10, origins=oCOX2rel, node_color=colorCOX2)
# figCOX2reln.savefig( 'COX2-rel-nij.eps')
###Output
_____no_output_____
###Markdown
Uniform network
###Code
sijp = np.ones( (K, K), dtype=float)
sijp += np.diag( 0.*np.ones( K))
sijp = matrix( sijp)
resultsp = dn.optimize( sijp, optimalities=['D', 'A', 'Etree'])
resultsp.update( dict(
MST=gph.MST_optimize( sijp, 'n')))
figuninet = draw_optimalities( sijp, resultsp)
# figuninet.savefig( 'uniform-nets.eps')
###Output
_____no_output_____
###Markdown
Random network
###Code
np.random.seed( 1)
sijr = matrix( np.random.rand( K, K), (K, K))
sijr = 0.5*(sijr + sijr.trans())
sijr += matrix( 3.5*np.diag( np.ones( K)), (K,K))
resultsr = dn.optimize( sijr, optimalities=['D', 'A', 'Etree'])
resultsr.update( dict(
MST=gph.MST_optimize( sijr, 'n')))
figrandnet = draw_optimalities( sijr, resultsr)
###Output
_____no_output_____
###Markdown
Analyze the statistical behavior of the difference network
###Code
import cPickle as pickle
def plot_diffnet_statistics( stats):
opts = stats.keys()
samples = stats[opts[0]].keys()
opts = OPTS
samples = SAMPLES
olabels = OPT_LABELS
slabels = SAMPLE_LABELS
nrows, ncols = len(samples), len(opts)
fig, axes = plt.subplots( nrows, ncols, sharex='col', sharey='col', figsize=(5*ncols, 1*nrows))
for i, sample in enumerate( samples):
for j, opt in enumerate( opts):
stat = stats[opt][sample]
ax = axes[i][j]
avg = np.mean( stat)
std = np.std( stat)
for p in [ 'bottom', 'top', 'right' ]:
ax.spines[p].set_visible( False)
ax.yaxis.set_ticklabels( [])
if i != len(samples) - 1:
ax.xaxis.set_visible( False)
else:
ax.spines['bottom'].set_visible( True)
if sample == 'MSTn':
_, y0 = axes[i-1][j].get_ylim()
ax.plot( [ avg, avg ], [ 0, y0 ], 'k-', label=slabels.get(sample, sample))
continue
h, _, __ = ax.hist( stat, bins=10, density=True, histtype='stepfilled')
y0 = 1.25*np.max( h)
ax.errorbar( [ avg ], [ y0 ], xerr=[ std ], fmt='k.', linewidth=2, ecolor='r', label=slabels.get(sample, sample))
if (opt=='A' and sample=='A') or (opt=='E' and sample=='Etree'):
ax.plot( [ 1., 1. ], [ 0, y0 ], 'k--')
if (opt=='D' and sample=='D'):
ax.plot( [ 0., 0. ], [ 0, y0 ], 'k--')
leg = ax.legend(loc='center left', bbox_to_anchor=(0.8, 0.5), handlelength=0, markerscale=0, frameon=False, fontsize='small')
for h in leg.legendHandles: h.set_visible( False)
for j, opt in enumerate( opts):
axes[-1][j].set_xlabel( olabels[opt])
axes[nrows/2][0].set_ylabel( 'Frequency')
# plt.tight_layout()
return fig
def plot_diffnet_efficiency( stats):
opts = OPTS
samples = SAMPLES
olabels = OPT_LABELS
slabels = SAMPLE_LABELS
nrows, ncols = len(opts), len(samples)
fig, axes = plt.subplots( nrows, 1, sharex=True, figsize=(8, nrows*3))
for i, opt in enumerate( opts):
x = np.array([ stats[opt][sample] for sample in samples ]).transpose()
if opt=='D':
axes[i].plot( [1, ncols], [0, 0], 'k--')
else:
axes[i].plot( [1, ncols], [1, 1], 'k--')
axes[i].boxplot( x, sym='.')
axes[i].set_ylabel( olabels[opt])
axes[-1].set_xticklabels( [ slabels[s] for s in samples ], rotation=80, horizontalalignment='center')
return fig
def plot_allocation_stats( topo):
nrows = 3
fig, axes = plt.subplots( nrows, 1, sharex=True, sharey=True)
emin, emax = -5, 2
nbins = 2*(emax + 1 - emin)
ns = np.concatenate( [ [0.5*np.power(10., emin)], np.logspace( emin, emax, nbins) ])
for i, o in enumerate( topo):
hd, hu = topo[o]
hd /= hd.sum()
hu /= hu.sum()
hd = np.concatenate( [ [ hd[0]], hd ])
hu = np.concatenate( [ [ hu[0]], hu ])
axes[i].step( ns[:], hd[:], where='pre', label=r'$(\varnothing,i)$')
axes[i].step( ns[:], hu[:], where='pre', label=r'$(i,j>i)$')
axes[i].set_xscale( 'log')
# axes[i].set_yscale( 'log')
axes[i].text( 2e-5, 0.5, SAMPLE_LABELS[o], fontsize='small')
axes[0].legend( loc='best', frameon=False, fontsize='small')
axes[-1].set_xlabel( r'$(n_e/s_e)/(N/\sum_e s_e)$')
axes[nrows/2].set_ylabel( r'Fraction of edges')
return fig
def plot_allocation_topo( topo):
nrows = 3
fig, axes = plt.subplots( nrows, 2, sharex='col', sharey=True, figsize=( 10, nrows*3))
emin, emax = -5, 2
nbins = 2*(emax + 1 - emin)
ns = np.concatenate( [ [0.5*np.power(10., emin)], np.logspace( emin, emax, nbins) ])
k2max = np.max( [ topo[o][-1] for o in topo ])
for i, o in enumerate( topo):
hd, hu, _, k2 = topo[o]
hd /= hd.sum()
hu /= hu.sum()
hd = np.concatenate( [ [ hd[0]], hd ])
hu = np.concatenate( [ [ hu[0]], hu ])
axes[i][0].step( ns[:], hd[:], where='pre', label=r'$(\varnothing,i)$')
axes[i][0].step( ns[:], hu[:], where='pre', label=r'$(i,j>i)$')
axes[i][0].set_xscale( 'log')
# axes[i].set_yscale( 'log')
axes[i][0].text( 2e-5, 0.5, SAMPLE_LABELS[o], fontsize='small')
axes[i][1].hist( k2, normed=True, bins=np.arange(k2max+1)-0.5)
axes[0][0].legend( loc='best', frameon=False, fontsize='small')
axes[-1][0].set_xlabel( r'$(n_e/s_e)/(N/\sum_e s_e)$')
axes[-1][1].set_xlabel( r'|Edges to 2-connectivity|')
axes[nrows/2][0].set_ylabel( r'Fraction of edges')
axes[nrows/2][1].set_ylabel( r'Fraction of networks')
return fig
stats = pickle.load( file( 'examples/const_rel_error_net.pkl', 'rb'))
figeffdist = plot_diffnet_efficiency( stats)
# figeffdist.savefig( 'gain_const_rel_error_nets.eps', bbox_inches='tight')
figstatdist = plot_diffnet_statistics( stats)
###Output
_____no_output_____
###Markdown
Random networks of $\{ s_e \}$
###Code
resultsran = pickle.load( file( 'examples/random_net.pkl', 'rb'))
figeffran = plot_diffnet_efficiency( resultsran['stats'])
figtopran = plot_allocation_topo( resultsran['topo'])
# figeffran.savefig( 'gain_random_nets.eps', bbox_inches='tight')
# figtopran.savefig( 'topo_random_nets.eps', bbox_inches='tight')
def compare_two( results, o1, o2, val, blocks=5):
stats1 = results[val][o1]
stats2 = results[val][o2]
ratio = stats2/stats1
bl = len(ratio)/blocks
bavg = [ np.mean( ratio[b*bl:(b+1)*bl]) for b in xrange(blocks)]
return np.mean(ratio), np.std( bavg)/np.sqrt(blocks)
###Output
_____no_output_____
###Markdown
Compare the statistics of $tr(C)$ between the $D$- and $A$-optimals.
###Code
compare_two( resultsran['stats'], 'D', 'A', 'A')
###Output
_____no_output_____
###Markdown
Compare the statistics of $tr(C)$ between the naive allocation of $n_e\propto s_e$ and the $A$-optimal.
###Code
compare_two( resultsran['stats'], 'csts', 'A', 'A')
###Output
_____no_output_____
###Markdown
Percentage of the $A$-optimal networks that are not 2-connected.
###Code
float(np.sum(np.array(resultsran['topo']['A'][3])>0))/len(resultsran['topo']['A'][3])
_m = 30
(_m*(1-resultsran['topo']['A'][0][0]), _m*(_m - 1)/2*(1-resultsran['topo']['A'][1][0]))
_m = 30
(_m*(1-resultsran['topo']['Etree'][0][0]), _m*(_m - 1)/2*(1-resultsran['topo']['Etree'][1][0]))
###Output
_____no_output_____
###Markdown
Uniform networks
###Code
resuni = pickle.load( file( 'examples/uniform_net.pkl', 'rb'))
resuni
def plot_uniform_networks( results):
Ks, ds, stats = results['K'], results['d'], results['stats']
fig, ax = plt.subplots()
for k, K in enumerate( Ks):
nii = stats['diag'][k]
nij = stats['offdiag'][k]
ax.plot( ds+1, K*nii, label='K=%d' % K)
ax.legend( loc='best', frameon=False)
ax.set_xlabel( r'$s_0$')
ax.set_ylabel( r'$K n_0$')
return fig
_ = plot_uniform_networks( resuni)
def trCuni(n0, s0, K):
s2 = s0*s0
n = 2./(K-1.)*(1./K - n0)
trC = K*s2/n0*(n0/n/s2 + 1)/(n0/n/s2 + K)
return trC
from scipy.optimize import minimize
def A_optimize_uniform( s0, K):
sol = minimize( lambda x: trCuni( 1./K/(np.exp(-x) + 1), s0, K), 0)
n0 = 1./K/(np.exp( -sol.x[0]) + 1)
trC = trCuni( n0, s0, K)
return n0, trC
def plot_uniform_networks2():
fig, ax = plt.subplots()
ps = np.arange(1, 6)
ds = np.logspace( -0.25, 2, 50)
for p in ps:
K = 1<<p
n0s = np.array( [ A_optimize_uniform( s0, K)[0] for s0 in ds ])
ax.plot( ds, K*n0s, label='K=%d' % K)
ax.legend( loc='best', frameon=False)
ax.set_xlabel( r'$s_0$')
ax.set_ylabel( r'$K \times n_0$')
ax.set_xscale( 'log')
return fig
_ = plot_uniform_networks2()
def plot_uniform_networks3():
fig, ax = plt.subplots()
Ks = np.arange(2, 32)
n0s = np.array( [ A_optimize_uniform( 1., K)[0] for K in Ks ])
ax.plot( Ks, Ks*n0s)
ax.plot( Ks, 2/(Ks + 1.), 'k--', label=r'$n_{ij} = const$')
ax.set_xlabel( r'$K$')
ax.set_ylabel( r'$K\times n_0$')
ax.legend( loc='best', frameon=False)
return fig
_ = plot_uniform_networks3()
###Output
_____no_output_____
###Markdown
Maximum-likelihood estimator
###Code
disconnect = 4
x0, xij, invsij2 = dn.fabricate_measurements(20, noerror=False, disconnect=4)
xML, vML = dn.MLestimate( xij, invsij2, np.concatenate( [x0[:3], [None]*(20 - 3)]))
for j in xrange(4):
plt.plot( x0[j::4], xML[j::4], 'o')
plt.gca().set_aspect( 1)
plt.gca().set_xlabel( r'$x_0$')
plt.gca().set_ylabel( r'$x_{ML}$')
###Output
_____no_output_____
###Markdown
FASTGenomics Seurat Analysis You might want to describe your analysis briefly here, if you are planning to share it.
###Code
# Place all your R package imports here.
library(fgread)
library(Seurat)
# Place all your parameter values and options here.
options(repr.plot.width=10, repr.plot.height=5)
###Output
_____no_output_____
###Markdown
Raw DataFirst, the raw dataset(s) will be read into Seurat object(s).You can describe your data here using markdown or delete this text.
###Code
# Print metadata of all attached datasets
ds_info <- fgread::ds_info()
ds_info
# Load the attached dataset
fgread::load_data() # If multiple datasets are attached, you have to select one by its id or tile
# Alternatively, if you started the analysis without datasets, load your data from elsewhere
# (see our tutorial "How to Load Data in FASTGenomics (R)")
###Output
_____no_output_____
###Markdown
Analysing the data for prediction purposes
###Code
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder, StandardScaler
from datetime import date, datetime
from dateutil.parser import parse
data = pd.read_csv('Cristano_Ronaldo_Final_v1/Data.csv')
data.drop('Unnamed: 0', axis = 1, inplace = True)
sample = pd.read_csv('Cristano_Ronaldo_Final_v1/sample_submission.csv')
data['shot_id_number'] = range(1, len(data)+1)
data.select_dtypes(include=['object']).head(10)
###Output
_____no_output_____
###Markdown
Converting non-integer fields into integers
###Code
data.game_season = data.game_season.fillna(method = 'ffill')
le_gs = LabelEncoder()
le_gs.fit(data.game_season.tolist())
print(le_gs.classes_)
new_col = le_gs.transform(data.game_season.tolist())
data['game_season'] = new_col
# Columns that needs its rows to be encoded into labels
change_cols = ['area_of_shot','shot_basics','range_of_shot','team_name']
le_list = []
for i in range (0,len(change_cols)):
le = LabelEncoder()
le.fit(data[change_cols[i]].tolist())
new_col = le.transform(data[change_cols[i]].tolist())
data[change_cols[i]] = new_col
le_list.append(le)
# Function to separate home/away into two separate columns
def separate_home_away_col(lines):
away = []
home = []
for l in lines:
l = str(l)
if (l == 'nan'):
away.append(None)
home.append(None)
else:
tokens = l.split(' ')
if (tokens[1] == '@'):
away.append(tokens[2])
home.append(None)
else:
away.append(None)
home.append(tokens[2])
return away, home
away, home = separate_home_away_col(list(data['home/away']))
data['home'] = home
data['away'] = away
data.drop('home/away', axis = 1, inplace = True)
data[['home', 'away']] = data[['home', 'away']].fillna(0)
le_ha = LabelEncoder()
le_ha.fit((data['home'].tolist() + data['away'].tolist()))
new_col = le_ha.transform(data['home'].tolist())
data['home'] = new_col
new_col = le_ha.transform(data['away'].tolist())
data['away'] = new_col
def separate_lat_long(lines):
lat = []
long = []
for l in lines:
l = str(l)
if (l == 'nan'):
lat.append(None)
long.append(None)
else:
tokens = l.split(',')
lat.append(tokens[0])
long.append(tokens[1])
return lat,long
lat, long = separate_lat_long(list(data['lat/lng']))
data['lat'] = lat
data['long'] = long
data.drop('lat/lng', axis = 1, inplace = True)
# Observing that type_of_shot and type_of_combined_shot complement each other
print((data['type_of_combined_shot'][data['type_of_shot'].isnull()]).isnull().sum())
# We can try by combining them together
data['type_of_shot'] = data['type_of_shot'].fillna(data['type_of_combined_shot'])
data.drop('type_of_combined_shot', axis = 1, inplace = True)
data['type_of_shot'] = data['type_of_shot'].apply(lambda x: x.split('-')[1])
le_mi = LabelEncoder()
le_mi.fit(data.match_id.tolist())
print(le_mi.classes_)
new_col = le_mi.transform(data.match_id.tolist())
data['match_id'] = new_col
# Function to calculate number of days before today
def calc_days(g_date):
if (type(g_date) == str):
tokens = g_date.split('-')
given_date = date(int(tokens[0]), int(tokens[1]), int(tokens[2]))
today = date.today()
diff = today - given_date
return diff.days
else:
return None
data.date_of_game = data.date_of_game.apply(lambda x: calc_days(x))
data.loc[:, data.columns != 'is_goal'] = data.loc[:, data.columns != 'is_goal'].fillna(method = 'ffill')
# Scaling the data using Standard Scaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(data.loc[:, data.columns != 'is_goal'])
scaled_df = pd.DataFrame(scaled_df, columns=data.loc[:, data.columns != 'is_goal'].columns)
scaled_df['is_goal'] = data['is_goal']
scaled_df['shot_id_number'] = data['shot_id_number']
scaled_df = scaled_df.set_index('shot_id_number')
#data.iloc[:10,8:]
scaled_df.iloc[:10,6:]
# Spliting the data into train and test data
test = scaled_df[scaled_df['is_goal'].isnull()]
train = scaled_df[scaled_df['is_goal'].notnull()]
###Output
_____no_output_____
###Markdown
Plotting the correlation matrix to find useful features
###Code
corrmat = train.corr()
filteredCorrMat_features = corrmat.index[abs(corrmat['is_goal']).notnull()]
plt.figure(figsize=(40,40))
sns.heatmap(train[filteredCorrMat_features].corr(),annot=True,cmap='summer')
# Correlation matrix
train.corr()
# Selecting the important features
new_data = data[['match_event_id','location_y','power_of_shot','distance_of_shot', 'area_of_shot', 'shot_basics','range_of_shot','distance_of_shot.1','is_goal']]
corrmat = new_data.corr()
filteredCorrMat_features = corrmat.index[abs(corrmat['is_goal']).notnull()]
plt.figure(figsize=(12,12))
sns.heatmap(new_data[filteredCorrMat_features].corr(),annot=True,cmap='summer')
# full_test = data[data['is_goal'].isnull()]
# full_train = data[data['is_goal'].notnull()]
# full_train.loc[:, full_train.columns != 'is_goal'] = full_train.loc[:, full_train.columns != 'is_goal'].fillna(method = 'ffill')
# full_test.loc[:, full_test.columns != 'is_goal'] = full_test.loc[:, full_test.columns != 'is_goal'].fillna(method = 'ffill')
#train.iloc[:,:-1] = train.iloc[:,:-1].fillna(method = 'ffill')
#test.iloc[:,:-1] = test.iloc[:,:-1].fillna(method = 'ffill')
# Print statistics
print('Orig: '+ str(len(data)))
print('test stats')
print(test.is_goal.describe())
print('Length of the dataset : '+str(len(test)))
print('train stats')
print(train.is_goal.describe())
print(train.is_goal.value_counts())
print('Length of the dataset : '+str(len(train)))
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression, ElasticNet, Ridge, Lasso
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score, f1_score, mean_squared_error, mean_absolute_error
from sklearn.metrics import accuracy_score, recall_score, precision_score
from xgboost import XGBRegressor
from math import sqrt
X_train, X_test, y_train, y_test = train_test_split(train.iloc[:,:-1], train['is_goal'], test_size=0.25)
regr = LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('R2 score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
logi = LogisticRegression()
logi.fit(X_train, y_train)
lp = logi.predict_proba(X_test)
y_pred = [x[0] for x in lp]
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('R2 score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
en = ElasticNet()
en.fit(X_train, y_train)
y_pred = en.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('R2 score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
svr = SVR()
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
ls = Lasso(alpha = 0.1)
ls.fit(X_train, y_train)
y_pred = ls.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
rdg = Ridge(alpha = 1.0)
rdg.fit(X_train, y_train)
y_pred = rdg.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
y_pred = rfr.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
ada = AdaBoostRegressor()
ada.fit(X_train, y_train)
y_pred = ada.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
br = BaggingRegressor()
br.fit(X_train, y_train)
y_pred = br.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
etr = ExtraTreesRegressor()
etr.fit(X_train, y_train)
y_pred = etr.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
gbr = GradientBoostingRegressor()
gbr.fit(X_train, y_train)
y_pred = gbr.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
xgb = XGBRegressor(n_estimators=1000, learning_rate=0.1, gamma=0, subsample=0.50, colsample_bytree=1, max_depth=10)
xgb.fit(X_train, y_train)
y_pred = xgb.predict(X_test)
print("Mean absolute error: %.2f"
% mean_absolute_error(y_test, y_pred))
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print("Root Mean squared error: %.2f"
% sqrt(mean_squared_error(y_test, y_pred)))
###Output
[19:10:04] WARNING: src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
Mean absolute error: 0.46
Mean squared error: 0.27
Variance score: -0.07
Root Mean squared error: 0.51
###Markdown
Prediction and storing in CSV file
###Code
lgr = LogisticRegression()
lgr.fit(train.iloc[:,:-1], train['is_goal'])
lp = lgr.predict_proba(test.iloc[:,:-1])
y_pred = [x[0] for x in lp]
temp = [[str(int(x)) for x in test.index], [x for x in list(y_pred)]]
df = pd.DataFrame(temp).transpose()
df.columns = ['shot_id_number', 'is_goal']
df.set_index('shot_id_number')
df.to_csv('Hari_Veeramallu_032699_prediction.csv', index = False)
###Output
_____no_output_____
###Markdown
HBN Wearable Analysis: Second TestFunctions to examine rolling correlations between device sensor outputs in follow-up analysis.Authors: – Jon Clucas, 2017 [email protected] – Arno Klein, 2017 © 2017, Child Mind Institute, Apache v2.0 Licensesetup:
###Code
%matplotlib inline
%load_ext autoreload
%autoreload 2
import readline # for R magics
%load_ext rpy2.ipython
import warnings
from rpy2.rinterface import RRuntimeWarning
warnings.filterwarnings("ignore", category=RRuntimeWarning)
from utilities.chart_data import df_devices_qt, linechart, xcorr
from config import config
from datetime import datetime, timedelta
import IPython as IP
from utilities.normalize_acc_data import normalize as norm
import numpy as np
import os
import sys
import pandas as pd
from utilities import fetch_data
from utilities.organize_wearable_data import geneactiv_acc
pd.set_option('mode.use_inf_as_null', True)
acc_hashes = dict()
if not os.path.exists('./sample_data'):
os.makedirs('./sample_data')
###Output
_____no_output_____
###Markdown
load normalized data:
###Code
df = df_devices_qt([('A', 'ActiGraph wGT3X-BT'), ('G1', 'GENEActiv Original (black)'), ('G2',
'GENEActiv Original (pink)')], 'accelerometer quicktest', datetime(2017, 4, 28, 15, 30),
datetime(2017, 4, 28, 15, 48), acc_hashes)
df.rename(columns={'normalized_vector_length': 'normalized_vector_length_GENEActiv Original (pink)'}, inplace=True)
Avalues = df['normalized_vector_length_ActiGraph wGT3X-BT'].values
G1values = df['normalized_vector_length_GENEActiv Original (black)'].values
G2values = df['normalized_vector_length_GENEActiv Original (pink)'].values
[xcorr(G1values, G2values), xcorr(Avalues, G1values),
xcorr(Avalues, G2values)]
df1 = df[['normalized_vector_length_ActiGraph wGT3X-BT',
'normalized_vector_length_GENEActiv Original (black)',
'normalized_vector_length_GENEActiv Original (pink)']]
linechart(df1, 'Achttp://localhost:8888/notebooks/analysis.ipynb#tiGraph vs 2×GENEActiv', line=False, full=True)
print(df1['normalized_vector_length_ActiGraph wGT3X-BT'].mode())
print(df1['normalized_vector_length_GENEActiv Original (black)'].mode())
print(df1['normalized_vector_length_GENEActiv Original (pink)'].mode())
shiftG1G2 = len(G1values) - np.argmax(np.correlate(G1values, G2values, mode='full'))
shiftG1A = len(G1values) - np.argmax(np.correlate(G1values, Avalues, mode='full'))
shiftG2A = len(G2values) - np.argmax(np.correlate(G2values, Avalues, mode='full'))
shiftGA = np.int(np.mean([shiftG1A, shiftG2A]))
[shiftG1G2, shiftG1A, shiftG2A, shiftGA]
shift_GA = np.abs(shiftGA)
Avalues_shifted = Avalues[:G1values.shape[0]-shift_GA]
G1values_shifted = G1values[shift_GA:G1values.shape[0]]
G2values_shifted = G2values[shift_GA:G2values.shape[0]]
[np.shape(G1values_shifted), np.shape(G2values_shifted), np.shape(Avalues_shifted)]
[xcorr(G1values_shifted, G2values_shifted), xcorr(Avalues_shifted, G1values_shifted),
xcorr(Avalues_shifted, G2values_shifted)]
shifted_t = [datetime(2017, 4, 28, 15, 30)]
while len(shifted_t) < np.shape(Avalues_shifted)[0]:
shifted_t.append(shifted_t[-1] + timedelta(seconds=0.0166))
shifted_df = pd.DataFrame({'normalized_vector_length_ActiGraph wGT3X-BT': Avalues_shifted,
'normalized_vector_length_GENEActiv Original (black)': G1values_shifted,
'normalized_vector_length_GENEActiv Original (pink)': G2values_shifted, 'Timestamp':shifted_t})
shifted_df.set_index('Timestamp', inplace=True)
###Output
_____no_output_____
###Markdown
cut middle portion out when devices were being transferred:
###Code
start1 = datetime(2017,4,28,15,30)
stop1 = datetime(2017,4,28,15,37)
start2 = datetime(2017,4,28,15,40)
stop2 = datetime(2017,4,28,15,48)
cropped_df = shifted_df.loc[(shifted_df.index >= start1) & (shifted_df.index <= stop1) |
(shifted_df.index >= start2) & (shifted_df.index <= stop2)].copy()
linechart(cropped_df, 'ActiGraph vs 2×GENEActiv, shifted',
line=False, full=True)
Avalues_cropped = cropped_df['normalized_vector_length_ActiGraph wGT3X-BT'].values
G1values_cropped = cropped_df['normalized_vector_length_GENEActiv Original (black)'].values
G2values_cropped = cropped_df['normalized_vector_length_GENEActiv Original (pink)'].values
###Output
_____no_output_____
###Markdown
compute normalized cross-correlations:
###Code
[xcorr(G1values_cropped, G2values_cropped), xcorr(Avalues_cropped, G1values_cropped),
xcorr(Avalues_cropped, G2values_cropped)]
###Output
_____no_output_____
###Markdown
plot x-second windows:
###Code
start = datetime(2017,4,28,15,30) #shifted_t[0]
stop = datetime(2017,4,28,15,48) #shifted_t[-1]
plot_data = True
while start < stop and plot_data:
new_start = start + timedelta(seconds=10)
plot_df = cropped_df.loc[(cropped_df.index >= start) & (cropped_df.index <= new_start)].copy()
label = '–'.join([start.strftime('%H:%M:%S'), new_start.strftime('%H:%M:%S')])
plot_data = linechart(plot_df, label, line=True, full=False)
#print(xcorr(plot_df['normalized_vector_length_GENEActiv'].values,
# plot_df['normalized_vector_length_GENEActiv(2)'].values))
#print(xcorr(plot_df['normalized_vector_length_ActiGraph'].values,
# plot_df['normalized_vector_length_GENEActiv'].values))
#print(xcorr(plot_df['normalized_vector_length_ActiGraph'].values,
# plot_df['normalized_vector_length_GENEActiv(2)'].values))
start = new_start
np.shape(G1values)
start = datetime(2017,4,28,15,30) #shifted_t[0]
stop = datetime(2017,4,28,15,48) #shifted_t[-1]
plot_data = True
while start < stop and plot_data:
new_start = start + timedelta(seconds=10)
plot_df = cropped_df.loc[(cropped_df.index >= start) & (cropped_df.index <= new_start)].copy()
label = '–'.join([start.strftime('%H:%M:%S'), new_start.strftime('%H:%M:%S')])
plot_data = linechart(plot_df, label, line=True, full=True)
#print(xcorr(plot_df['normalized_vector_length_GENEActiv'].values,
# plot_df['normalized_vector_length_GENEActiv(2)'].values))
#print(xcorr(plot_df['normalized_vector_length_ActiGraph'].values,
# plot_df['normalized_vector_length_GENEActiv'].values))
#print(xcorr(plot_df['normalized_vector_length_ActiGraph'].values,
# plot_df['normalized_vector_length_GENEActiv(2)'].values))
start = new_start
###Output
_____no_output_____
###Markdown
"The first thing which I should point out here is that the data which you are collecting from GENEActiv devices is completely unfiltered raw data. Most devices apply some proprietary filtering to the data on board the device – Activinsights do not do this. The result you are seeing is completely normal for the device. This result is due to a small offset in the calibration of the accelerometer which you are not seeing with the other devices which you are using as a comparison as they have filtered this out before you see the data.[. . .]I think you find better results if you calibrate the data on the GENEActiv first.[. . .]This function can be found within the R package `GGIR`, as `g.calibrate`. A vignette to this package can be found here https://cran.r-project.org/web/packages/GGIR/vignettes/GGIR.html.[. . .]I've attached a script which will take any GENEActiv `bin` files you have and calibrate them into their own folder. I'd then convert these into raw `.csv` files. You can use the function found in `GGIR` to do the same for other accelerometer data." —Activinsights download GENEActiv RAW files for R scripts:
###Code
if not os.path.exists("raw"):
os.makedirs("raw")
acc_GA_black_path = fetch_data.fetch_data(config.rawurls['raw_accelerometer']['GENEActiv Original (black)'],
"raw/GA_black.bin")
acc_GA_pink_path = fetch_data.fetch_data(config.rawurls['raw_accelerometer']['GENEActiv Original (pink)'],
"raw/GA_pink.bin")
###Output
_____no_output_____
###Markdown
Calibrate GENEActiv data:
###Code
%R source("utilities/JonClucasCalibrationScript.R")
###Output
_____no_output_____
###Markdown
GGIR:
###Code
%%R
library(GENEAread)
GA_black <- read.bin("GA_black_Recalibrate.bin")
write.csv(GA_black$data.out, "GA_black.csv", row.names=FALSE)
GA_pink <- read.bin("GA_pink_Recalibrate.bin")
write.csv(GA_pink$data.out, "GA_pink.csv", row.names=FALSE)
geneactiv_acc(os.getcwd())
od = os.path.abspath(os.path.join(sys.modules["utilities"].__file__,
os.pardir, os.pardir, "organized/accelerometer"))
dfR = df[[col for col in df.columns if "ActiGraph" in col]].merge(norm(pd.read_csv(os.path.join(od, "GENEActiv_black.csv"),
parse_dates=['Timestamp'], infer_datetime_format=True)).set_index('Timestamp'), left_index=
True, right_index=True, suffixes=('', ''.join(['_', "GENEActiv Original (black)"]))).merge(norm(pd.read_csv(
os.path.join(od, "GENEActiv_pink.csv"), parse_dates=['Timestamp'], infer_datetime_format=True)).set_index(
'Timestamp'), left_index=True, right_index=True, suffixes=('', ''.join(['_', "GENEActiv Original (pink)"]))
).rename(columns={'normalized_vector_length': 'normalized_vector_length_GENEActiv Original (black)'})
print(dfR)
AvaluesR = dfR['normalized_vector_length_ActiGraph wGT3X-BT'].values
G1valuesR = dfR['normalized_vector_length_GENEActiv Original (black)'].values
G2valuesR = dfR['normalized_vector_length_GENEActiv Original (pink)'].values
shiftG1G2R = len(G1valuesR) - np.argmax(np.correlate(G1valuesR, G2valuesR, mode='full'))
shiftG1AR = len(G1valuesR) - np.argmax(np.correlate(G1valuesR, AvaluesR, mode='full'))
shiftG2AR = len(G2valuesR) - np.argmax(np.correlate(G2valuesR, AvaluesR, mode='full'))
shiftGAR = np.int(np.mean([shiftG1AR, shiftG2AR]))
[shiftG1G2R, shiftG1AR, shiftG2AR, shiftGAR]
shift_GAR = np.abs(shiftGAR)
Avalues_shiftedR = AvaluesR[:G1valuesR.shape[0]-shift_GAR]
G1values_shiftedR = G1valuesR[shift_GAR:G1valuesR.shape[0]]
G2values_shiftedR = G2valuesR[shift_GAR:G2valuesR.shape[0]]
[np.shape(G1values_shiftedR), np.shape(G2values_shiftedR), np.shape(Avalues_shiftedR)]
[xcorr(G1values_shiftedR, G2values_shiftedR), xcorr(Avalues_shiftedR, G1values_shiftedR),
xcorr(Avalues_shiftedR, G2values_shiftedR)]
shifted_tR = [datetime(2017, 4, 28, 15, 30)]
while len(shifted_tR) < np.shape(Avalues_shiftedR)[0]:
shifted_tR.append(shifted_tR[-1] + timedelta(seconds=0.0166))
shifted_dfR = pd.DataFrame({'normalized_vector_length_ActiGraph wGT3X-BT': Avalues_shiftedR,
'normalized_vector_length_GENEActiv Original (black)': G1values_shiftedR,
'normalized_vector_length_GENEActiv Original (pink)': G2values_shiftedR, 'Timestamp':shifted_tR})
shifted_dfR.set_index('Timestamp', inplace=True)
cropped_dfR = shifted_dfR.loc[(shifted_dfR.index >= start1) & (shifted_dfR.index <= stop1) |
(shifted_dfR.index >= start2) & (shifted_dfR.index <= stop2)].copy()
linechart(cropped_dfR, 'ActiGraph vs 2×GENEActiv, shifted',
line=False, full=True)
###Output
_____no_output_____
###Markdown
L.A. homeless arrests analysisBy [Christine Zhang](mailto:[email protected]) The Los Angeles Times analyzed daily arrest logs between January 1, 2011 and December 31, 2016 from the LAPD to determine yearly trends in arrests of homeless people.The results were reported in a February 4, 2018, Los Angeles Times story titled ["L.A. leaders oppose 'criminalizing' homeless people. But thousands are jailed for minor offenses"](http://www.latimes.com/local/politics/la-me-homeless-arrests-20180204-story.html).Here are the key findings of the data analysis, which is documented below with code written in the programming language R: * The LAPD made 14,000 arrests of homeless people last year, a 30% increase over 2011 * LAPD arrests overall went down 15% from 2011 to 2016 * Two-thirds of those arrested were black or Latino * In 2011, one in 10 people arrests citywide were of homeless people; in 2016, it was 1 in 6 * The 14,000 arrests of homeless people in 2016 included more than 500 unique charges * By far the most common was failing to appear in court on an unpaid infraction or misdemeanor citation * The top five charges were for non-violent or minor offenses Read more about the methodology [here](../README.md). How we did it Import R data analysis libraries
###Code
library('dplyr')
library('feather')
library('ggplot2')
###Output
_____no_output_____
###Markdown
The file `arrests.feather.zip` must first be unzipped to run this notebook. This is a file that has been prepared outside of this notebook as part of an unpublished processing script. The raw LAPD data includes names and other identifying information about arrestees that The Times has chosen to withhold.
###Code
unzip("arrests.zip")
###Output
_____no_output_____
###Markdown
Finding: The LAPD made 14,000 arrests of homeless people last year, a 30% increase over 2011 Read in the data for analysis. Note that the data has been pre-processed to remove the name, address and date of birth fields for privacy purposes. Age was calculated based on the individual's date of birth at the time of arrest.
###Code
data <- read_feather('arrests.feather')
names(data)
head(data)
###Output
_____no_output_____
###Markdown
Group the data by arrest year and homeless and sum the total number of arrests
###Code
arrest.totals <- data %>%
group_by(arrest_year, homeless) %>%
distinct(booking_num) %>%
summarize(arrests_number = n())
###Output
_____no_output_____
###Markdown
Filter to homeless arrests
###Code
homeless.totals <- arrest.totals %>% filter(homeless == 1)
homeless.totals
print(paste0("The *raw* increase in homeless arrests between 2011 and 2016 is ",
round((homeless.totals[homeless.totals$arrest_year == 2016,]$arrests_number /
homeless.totals[homeless.totals$arrest_year == 2011,]$arrests_number - 1) * 100), "%"))
###Output
[1] "The *raw* increase in homeless arrests between 2011 and 2016 is 33%"
###Markdown
However, there are some missing dates in the data
###Code
# extract the booking dates as a vector
booking.dates <- data %>% select(booking_ymd)
booking.dates <- booking.dates %>% distinct(booking_ymd) %>% select(booking_ymd) %>% arrange(booking_ymd)
booking.dates$has.data = 1
# get the time period (minimum date and maximum date) of the data set
time.min <- booking.dates$booking_ymd[1]
time.max <- booking.dates$booking_ymd[length(booking.dates$booking_ymd) - 1]
# create a dataframe of all the days spanning that time period
all.dates.frame <- data.frame(list(booking_ymd = seq(time.min, time.max, by="day")))
# merge this dataframe with the vector of booking dates to find the missing dates
merged.data <- merge(all.dates.frame, booking.dates , all=T)
missing.dates <- merged.data %>% filter(is.na(has.data) == T)
###Output
_____no_output_____
###Markdown
There were six days in 2011 for which arrests data were not available
###Code
missing.dates
###Output
_____no_output_____
###Markdown
Pro-rate the 2011 figure to account for the missing six days
###Code
prorated.homeless.2011 <- homeless.totals[homeless.totals$arrest_year == 2011,]$arrests_number/(365 - 6) * 365
prorated.homeless.2011
print(paste0("The *prorated* change in homeless arrests between 2011 and 2016 is ",
round((homeless.totals[homeless.totals$arrest_year == 2016,]$arrests_number /
prorated.homeless.2011 - 1) * 100), "% (rounded down to 30% in the story)"))
###Output
[1] "The *prorated* change in homeless arrests between 2011 and 2016 is 31% (rounded down to 30% in the story)"
###Markdown
Finding: LAPD arrests overall went down 15% from 2011 to 2016 Group `arrest.totals` by arrest year and sum the total number of arrests to get the overall arrest numbers for each year.
###Code
all.totals <- arrest.totals %>%
group_by(arrest_year) %>%
summarize(arrests_number = sum(arrests_number))
all.totals
print(paste0("The *raw* change in overall arrests between 2011 and 2016 is ",
round((all.totals[all.totals$arrest_year == 2016,]$arrests_number /
all.totals[all.totals$arrest_year == 2011,]$arrests_number - 1) * 100), "%"))
###Output
[1] "The *raw* change in overall arrests between 2011 and 2016 is -14%"
###Markdown
Again, we need to pro-rate to take into account the six missing days in 2011.
###Code
prorated.arrests.2011 <- all.totals[all.totals$arrest_year == 2011,]$arrests_number/(365 - 6) * 365
prorated.arrests.2011
print(paste0("The *prorated* change between 2011 and 2016 is ",
round((all.totals[all.totals$arrest_year == 2016,]$arrests_number /
prorated.arrests.2011 - 1) * 100), "%"))
###Output
[1] "The *prorated* change between 2011 and 2016 is -15%"
###Markdown
Finding: Two-thirds of those arrested were black or Latino Group the data by arrest year, homeless, and race/ethnicity
###Code
arrests.race <- data %>%
group_by(arrest_year, homeless, race) %>%
distinct(booking_num)
###Output
_____no_output_____
###Markdown
Create a variable, `race.grp` to represent racial/ethnic grouping, where W = white, B = black, H = Latino (Hispanic), etc.
###Code
table(arrests.race$race)
arrests.race$race.grp <- ifelse(arrests.race$race == 'W', "White",
ifelse(arrests.race$race == 'B', "Black",
ifelse(arrests.race$race == 'H', "Latino",
ifelse(arrests.race$race == 'A' | arrests.race$race == 'C' | arrests.race$race == 'J'| arrests.race$race == 'K'| arrests.race$race == 'F', "Asian",
'Other'))))
###Output
_____no_output_____
###Markdown
Group by `race.grp` and calculate the total number and percentage of homeless arrests
###Code
arrests.race.yr <- arrests.race %>%
group_by(arrest_year, homeless, race.grp) %>%
summarize(arrests_number = n()) %>%
mutate(arrests_percent = arrests_number / sum(arrests_number) * 100)
arrests.race.yr %>% filter(homeless == 1 & arrest_year == 2016) %>% arrange(desc(arrests_percent))
ggplot(arrests.race.yr %>% filter(homeless == 1 & arrest_year != 2017), aes(x = arrest_year,
y = arrests_percent, color = race.grp)) +
geom_line() +
geom_text(data = arrests.race.yr %>% filter(homeless == 1 &
arrest_year == 2016), aes(label = race.grp), hjust = 0.7,
vjust = 1) +
scale_y_continuous(limits = c(0, 50)) +
labs(x = "", y = "% of arrests", title = "Racial Breakdown of homeless arrests") +
theme(legend.position = 'none')
###Output
_____no_output_____
###Markdown
Finding: In 2011, one in 10 people arrests citywide were of homeless people; in 2016, it was 1 in 6 Use the grouped dataframe `arrest.totals` to calculate percentage of homeless arrests by year
###Code
arrest.totals %>%
mutate(arrests_percent = arrests_number / sum(arrests_number) * 100) %>%
filter(homeless == 1)
###Output
_____no_output_____
###Markdown
Finding: The 14,000 arrests of homeless people in 2016 included more than 500 unique charges Filter the data to include homeless arrests in 2016 and calculate the number and percent of times each charge was cited. Note that this is done by analyzing each charge separately, so the `times_cited` column will not sum to the total number of arrests per year (arrestees can have multiple charges).
###Code
arrest.reasons <- data %>% filter(homeless == 1 & arrest_year == 2016) %>%
group_by(charge_code, charge_desc) %>%
summarize(times_cited = n()) %>%
ungroup() %>%
mutate(percent_cited = times_cited/sum(times_cited) * 100)
###Output
_____no_output_____
###Markdown
Get the number of unique charges
###Code
length(unique(arrest.reasons$charge_code))
###Output
_____no_output_____
###Markdown
Finding: The most common offense was failure to appear in court for unpaid petty or minor citations Sort by percent of the time the charge was cited to get the top charges
###Code
head(arrest.reasons %>% arrange(desc(percent_cited)))
###Output
_____no_output_____
###Markdown
Many codes did not come with charge descriptions in the data. Those that appear in the above table are described as follows:* 459.5PC: shoplifting* 3000.08CPC: parole warrant* 3454(C)PC: flash incarceration Finding: The top five charges were for non-violent or minor offenses Some charge codes are grouped, largely according to [this](http://milliondollarhoods.org/wp-content/uploads/2017/10/Policing-the-House-2.0.FINAL_.pdf) UCLA report. For example. charge codes 40508(A)VC, 853.7PC, and 853.8PC all cover failure to appear.
###Code
arrest.reasons$failure <- ifelse(arrest.reasons$charge_code == '40508(A)VC'|
arrest.reasons$charge_code == '853.7PC'|
arrest.reasons$charge_code == '853.8PC', 1, 0)
arrest.reasons$trespass <- ifelse(arrest.reasons$charge_code == '419PC'|
arrest.reasons$charge_code == '602(K)PC'|
arrest.reasons$charge_code == '602(O)(2)PC'|
arrest.reasons$charge_code == '602.5(A)PC'|
arrest.reasons$charge_code == '555PC'|
arrest.reasons$charge_code == '484F(A)PC'|
arrest.reasons$charge_code == '602(L)(1)PC'|
arrest.reasons$charge_code == '602(P)PC'|
arrest.reasons$charge_code == '602.5(B)PC'|
arrest.reasons$charge_code == '602PC'|
arrest.reasons$charge_code == '602(M)PC'|
arrest.reasons$charge_code == '602(Q)PC'|
arrest.reasons$charge_code == '602.8PC'|
arrest.reasons$charge_code == '602(A)PC'|
arrest.reasons$charge_code == 'A602(N)1PC'|
arrest.reasons$charge_code == '602(S)PC'|
arrest.reasons$charge_code == '626.8(A)1PC'|
arrest.reasons$charge_code == '602(D)PC'|
arrest.reasons$charge_code == '602(N)PC'|
arrest.reasons$charge_code == '602(U)(1)PC'|
arrest.reasons$charge_code == '647(E)PC'|
arrest.reasons$charge_code == '602(F)PC'|
arrest.reasons$charge_code == '602(O)PC'|
arrest.reasons$charge_code == '602.1(A)PC'|
arrest.reasons$charge_code == '647(H)PCLPP'|
arrest.reasons$charge_code == '602(J)PC'|
arrest.reasons$charge_code == '602(O)(1)PC'|
arrest.reasons$charge_code == '602.1(B)PC'|
arrest.reasons$charge_code == '369I(A)PC', 1, 0)
arrest.reasons$shoplift <- ifelse(arrest.reasons$charge_code == '18 1708'|
arrest.reasons$charge_code == '484PCTFMV'|
arrest.reasons$charge_code == '485PC'|
arrest.reasons$charge_code == '488PC'|
arrest.reasons$charge_code == '459.5PC'|
arrest.reasons$charge_code == '484F(A)PC'|
arrest.reasons$charge_code == 'A488PC'|
arrest.reasons$charge_code == '490PC'|
arrest.reasons$charge_code == 'A484PC'|
arrest.reasons$charge_code == '484E(D)PC'|
arrest.reasons$charge_code == '666PC'|
arrest.reasons$charge_code == '484PC'|
arrest.reasons$charge_code == '490.2PC'|
arrest.reasons$charge_code == '666(A)PC'|
arrest.reasons$charge_code == '484(A)PC'|
arrest.reasons$charge_code == '490.5(A)PC'|
arrest.reasons$charge_code == '537(A)(1)PC'|
arrest.reasons$charge_code == '666.5PC'|
arrest.reasons$charge_code == '484E(A)PC'|
arrest.reasons$charge_code == '587CPC'|
arrest.reasons$charge_code == '666.5(A)PC'|
arrest.reasons$charge_code == '484E(B)PC', 1, 0)
arrest.reasons$supervision_viol <- ifelse(arrest.reasons$charge_code == '1203.2PC'|
arrest.reasons$charge_code == '3000.08CPC'|
arrest.reasons$charge_code == '3454(C)PC'|
arrest.reasons$charge_code == '3455(B)1PC'|
arrest.reasons$charge_code == '1203.2(A)PC'|
arrest.reasons$charge_code == '3056PC'|
arrest.reasons$charge_code == '3455(A)4PC'|
arrest.reasons$charge_code == '3455(C)PC'|
arrest.reasons$charge_code == '3000.08FPC'|
arrest.reasons$charge_code == '3454PC'|
arrest.reasons$charge_code == '3455(A)PC'|
arrest.reasons$charge_code == '18 3606US', 1, 0)
arrest.reasons$drug_poss <- ifelse(arrest.reasons$charge_code == '11377(A)HS'|
arrest.reasons$charge_code == '11377(A)1HS'|
arrest.reasons$charge_code == '11377HS'|
arrest.reasons$charge_code == '11350(A)HS'|
arrest.reasons$charge_code == '11350HS'|
arrest.reasons$charge_code == '11357HS'|
arrest.reasons$charge_code == '11357(A)HS'|
arrest.reasons$charge_code == '11357(B)HS'|
arrest.reasons$charge_code == '11357(C)HS'|
arrest.reasons$charge_code == '4573.6PC'|
arrest.reasons$charge_code == '11550(A)HS'|
arrest.reasons$charge_code == '11375(B)2HS'|
arrest.reasons$charge_code == '11351HS'|
arrest.reasons$charge_code == '4060BP', 1, 0)
arrest.reasons$charge_desc_grouped <- ifelse(arrest.reasons$drug_poss == 1, 'drug_poss',
ifelse(arrest.reasons$trespass == 1, 'trespass',
ifelse(arrest.reasons$shoplift == 1, 'shoplift',
ifelse(arrest.reasons$supervision_viol == 1, 'supervision violation',
ifelse(arrest.reasons$failure == 1, 'failure to appear',
arrest.reasons$charge_code)))))
###Output
_____no_output_____
###Markdown
Get top five offenses using `charge_desc_grouped` as the charge identifier
###Code
arrest.reasons %>% group_by(charge_desc_grouped) %>%
summarise(times_cited = sum(times_cited)) %>%
mutate(perc_cited = times_cited/sum(times_cited) * 100) %>% arrange(desc(times_cited)) %>% head(5)
###Output
_____no_output_____
###Markdown
Preliminary Analysis
###Code
# s = la.svd(W, compute_uv=False, full_matrices=True, check_finite=False)
# x = da.from_array(W, chunks=(1000, 1000))
# xt = da.from_array(W.T, chunks=(1000, 1000))
# X = da.matmul(x, xt)
# # x = da.from_array(X, chunks=(1000, 1000))
# u,s,v = da.linalg.svd(X, name=None)
plt.hist(s, 50, normed=True)
# plt.xlim((0, 0.3))
0
def marcenkopasturpdf(x, c):
# Marchenko Pastur Density Function for c > 1
# ub = (1 + np.sqrt(c))**2
# lb = (1 - np.sqrt(c))**2
ub = 1 + 1/c + 2 * np.sqrt(1/c)
lb = 1 + 1/c - 2 * np.sqrt(1/c)
mp = np.zeros(len(x))
# Figure out indices where mp is to be calculated
lbidx = np.where(x > lb)
ubidx = np.where(x < ub)
a = lbidx[0][0]
b = ubidx[-1][-1]
xh = x[a:b+1]
# MP distribution
mp[a:b+1] = c* np.sqrt((xh - lb)*(ub - xh))/(2*math.pi*xh)
# mp[a:b+1] = np.sqrt((xh - lb)*(ub - xh))/(2*math.pi*c*xh)
return lb, ub, mp
l = np.arange(0, 5, step=0.01)
# print(l)
c = 461408206 / 344332
print(c)
lb, ub, mp = marcenkopasturpdf(l, c)
print(lb,ub)
print(np.mean(mp))
print(np.min(mp), np.max(mp))
# print(mp)
# sns.distplot(l, kde=True, norm_hist=True)
plt.plot(l, mp, linewidth=1, color='red')
# plt.hist(mp, linewidth=1, color='red',normed=True)
# sns.distplot(sgns001_s)
# plt.hist(svd.s/1000, 100, normed=True)
# plt.hist(s, 50, normed=True)
# plt.xlim((0, 0.3))
# plt.hist(mp, 'auto', normed=True)
# plt.hist(mp, 'auto', normed=True, range=(0,ub))
sns.distplot(svd.s/c, kde=False, norm_hist=True)
plt.ylabel = 'MP'
plt.xlabel = 'Eigenvalue'
plt.show()
# sns.distplot(mp)
s = [0, 1, 2, 3, 4]
# s = [0.01,0.1, 1, 10, 100] 10^i
wss = [0,0,0,2,34]#[::-1]
wsr = [0,0,0,1,37]#[::-1]
men = [0,0,0,22,968]#[::-1]
mt = [1,1,1,4,73]#[::-1]
rw = [410,410,524,1117,1744]#[::-1]
sl = [1,1,1,6,269]#[::-1]
glr = [0,0,58,2792,11488]#[::-1]
msr = [1386,1386,1620,2598,4598]#[::-1]
sns.set_style("whitegrid")
plt.plot(s, rw, color='red', label='rw')
plt.plot(s, men, color='blue',label='men')
plt.plot(s, sl, label='sl')
plt.plot(s, mt, label='mt')
plt.plot(s, wss, label='wss')
plt.plot(s, wsr, label='wsr')
plt.xticks(s)
plt.xlabel('Negative Exponents of 10')
plt.ylabel('UNK tokens')
leg = plt.legend(loc='best', ncol=1, mode="expand", shadow=True, fancybox=True)
plt.show()
plt.plot(s, glr, label='glr')
plt.plot(s, msr, label='msr')
plt.xticks(s)
plt.xlabel('Negative Exponents of 10')
plt.ylabel('UNK tokens')
leg = plt.legend(loc='best', ncol=1, mode="expand", shadow=True, fancybox=True)
plt.show()
###Output
_____no_output_____
###Markdown
Analysis of the Pagination Dataset Table of Contents* [Preliminaries](Preliminaries) * [Main parameters](Main-parameters) * [Tools](Tools) * [Reading the instances](Reading-the-instances)* [Difficulty and average multiplicity](Difficulty-and-average-multiplicity) * [[Sec. 4.2] Measuring the difficulty of a given instance]([Sec.-4.2]-Measuring-the-difficulty-of-a-given-instance) * [Number of instances by difficulty](Number-of-instances-by-difficulty) * [Correlation between the difficulty and the size of the best pagination](Correlation-between-the-difficulty-and-the-size-of-the-best-pagination) * [[Sec. 4.3] Predicting the difficulty of a given instance]([Sec.-4.3]-Predicting-the-difficulty-of-a-given-instance) * [[Fig. 4] Statistical difficulty by average multiplicity]([Fig.-4]-Statistical-difficulty-by-average-multiplicity) * [[Fig. 5] Number of instances by average multiplicity]([Fig.-5]-Number-of-instances-by-average-multiplicity)* [[Sec. 4.4] Discussion]([Sec.-4.4]-Discussion) * [[Sec. 4.4.1] Behavior of the integer program]([Sec.-4.4.1]-Behavior-of-the-integer-program) * [[Sec. 4.4.2] Comparison of the heuristic methods]([Sec.-4.4.2]-Comparison-of-the-heuristic-methods) * [[Fig. 6] Performance of the main heuristics]([Fig.-6]-Performance-of-the-main-heuristics) * [[Fig. 7] Relative quality of the five main heuristics]([Fig.-7]-Relative-quality-of-the-five-main-heuristics) * [Exact algorithms vs. heuristics](Exact-algorithms-vs.-heuristics) * [Grouping GA vs. the other heuristics](Grouping-GA-vs.-the-other-heuristics) Preliminaries This notebook generates every plot and numerical result mentioned or alluded in Section 4 of [_Algorithms for the Pagination Problem, a Bin Packing with Overlapping Items_](http://arxiv.org/abs/1605.00558). Main parameters
###Code
from collections import OrderedDict
INPUT_PATH = "gauss/"
(MIN_PREFIX, MAX_PREFIX) = ("C015", "C055") # for instance filenames
OUTPUT_PATH = "plots/"
WINDOW = 150 # size of the subsets of instances used as a moving window
SOLVER_NAMES = OrderedDict([
("GeneticGroup", "Grouping GA"),
("GeneticStandard", "Standard GA"),
("OverloadAndRemove", "Overload-and-Remove"),
("OverloadAndRemovePresort", "Overload-and-Remove (with presort)"),
("BestFusion", "Best Fusion"),
("FirstFit", "First Fit"),
])
EXCLUDED_SOLVER_NAMES = {"OverloadAndRemovePresort"} # excluded from certain plots
solvers = ["solvers" + name for name in SOLVER_NAMES.keys()]
times = ["times" + name for name in SOLVER_NAMES.keys()]
###Output
_____no_output_____
###Markdown
Tools
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import Locator
np.warnings.filterwarnings("ignore", category=RuntimeWarning)
np.warnings.filterwarnings("ignore", category=UserWarning)
!pip install seaborn
import seaborn as sns
sns.set_style("white")
sns.set_context("paper", font_scale=2)
sns.set_palette(sns.color_palette("Set1", 5))
def plot_linear_regression(x, y):
fit = np.polyfit(x, y, deg=1)
plt.plot(x, fit[0] * x + fit[1])
correlation = round(x.corr(y), 3)
print("Pearson:", correlation)
return correlation
!pip install pandas --upgrade
###Output
_____no_output_____
###Markdown
Reading the instances Create a DataFrame from all the JSON files whose name is comprised between `MIN_PREFIX` and `MAX_PREFIX`.
###Code
import os, json
df = []
indexes = []
for filename in os.listdir(INPUT_PATH):
if not filename.endswith("json") or not MIN_PREFIX <= filename <= MAX_PREFIX:
continue
with open(os.path.join(INPUT_PATH, filename)) as f:
instances = json.loads(f.read())
indexes.extend([(filename, discriminant) for discriminant in range(len(instances))])
for instance in instances:
for (k, v) in list(instance.items()):
if isinstance(v, dict): # flatten any sub-dict with dot notation
for (sub_key, sub_value) in v.items():
instance[k + sub_key] = sub_value
del instance[k]
df.extend(instances)
df = pd.DataFrame(df, index=pd.MultiIndex.from_tuples(indexes, names=("filename", "i")))
df["best"] = df[["pageCount", "cplexOpt", "cplexUB"]].min(axis = 1) # add a column for the best known pagination size
df["cardinality"] = df["tiles"].apply(lambda tiles: sum(len(tile) for tile in tiles))
df_sorted_by_multiplicity = df.sort_values(by="avgMultiplicity") # for use with a moving window
print(df.info())
df.describe()
print("There are a %s instances." % len(df))
###Output
There are a 10986 instances.
###Markdown
Statistical difficulty and average multiplicity [Sec. 4.2] Measuring the statistical difficulty of a given instance **Conjecture 1.** The **statistical difficulty** of a given instance can be approximated by the difference between the average and the minimal number of pages in the paginations calculated by the various solvers. Correlation between the statistical difficulty and the size of the best pagination Note that this measure of statistical difficulty is intrinsically correlated to the size of the best pagination:
###Code
x = df[solvers].mean(axis=1) - df["best"]
y = df["best"]
plt.xlabel("Statistical difficulty")
plt.ylabel("Best pagination size")
plt.scatter(x, y, marker="o", s=1)
_ = plot_linear_regression(x, y)
###Output
Pearson: 0.792
###Markdown
The dispersion of the pagination sizes could have been measured in several other ways, for instance with the standard deviation (below).
###Code
x = df["avgMultiplicity"]
y = df[solvers].std(axis=1)
axes = plt.gca()
axes.set_xlim([0, 70])
plot_linear_regression(x, y)
plt.scatter(x, y, marker="o", s=1)
plt.xlabel("Average multiplicity")
plt.ylabel("Average standard deviation")
plt.grid()
plt.show()
###Output
Pearson: 0.695
###Markdown
Number of instances by statistical difficulty
###Code
result = df.groupby(round(2 * (df[solvers].mean(axis=1) - df["best"]))/2).size()
result.plot(kind="bar")
plt.yscale("symlog")
plt.xlabel("Statistical difficulty")
plt.ylabel("Number of instances (sym-log scale)")
plt.show()
print("Number of instances per statistical difficulty:\n", result)
print("Average statistical difficulty: %.02f" % (df[solvers].mean(axis=1) - df["best"]).mean())
print("Median statistical difficulty: %.02f" % (df[solvers].mean(axis=1) - df["best"]).median())
###Output
_____no_output_____
###Markdown
[Sec. 4.3] Predicting the statistical difficulty of a given instance **Conjecture 2.** The statistical difficulty of a given random instance is strongly correlated to the density of its shared symbols, or **average multiplicity**. [Fig. 4] Statistical difficulty by average multiplicity
###Code
plt.figure(figsize=(10,5))
x = df["avgMultiplicity"]
y = df[solvers].mean(axis=1) - df["best"]
axes = plt.gca()
axes.set_xlim([0, 70])
axes.set_ylim([-1, 9.5])
plot_linear_regression(x, y)
plt.scatter(x, y, marker="o", s=1)
plt.xlabel("Average multiplicity")
plt.ylabel("Average range (statistical difficulty)")
plt.grid()
plt.savefig(os.path.join(OUTPUT_PATH, "difficulty_by_multiplicity.pdf"), bbox_inches='tight')
plt.figure(figsize=(20, 10))
df["bitSize"] = df["symbolCount"] * df["tileCount"]
for (i, column) in enumerate(["symbolCount", "bitSize", "tileCount", "cardinality"], 1):
plt.subplot(2, 2, i)
x = df[column]
y = df[solvers].mean(axis=1) - df["best"]
if i in [1, 3]:
plt.ylabel("Average range (statistical difficulty)")
plt.scatter(x, y, marker="o", s=1)
correlation = plot_linear_regression(x, y)
plt.xlabel("%s (r = %s)" % (column, correlation))
plt.show()
###Output
Pearson: -0.082
Pearson: 0.366
Pearson: 0.563
Pearson: 0.77
###Markdown
[Fig. 5] Number of instances by average multiplicity
###Code
plt.figure(figsize=(10,6))
range_width = 2
ranges = np.arange(1, df["avgMultiplicity"].max() + range_width, range_width)
slices = pd.cut(df["avgMultiplicity"], ranges)
instances_per_slice = df.groupby(slices).size()
instances_per_slice.plot(kind="bar", width=0.9, color="#ffffbf")
cplex_instances = df[df["cplexOpt"].notnull() | df["cplexLB"].notnull() | df["cplexUB"].notnull()]
cplex_slices = pd.cut(cplex_instances["avgMultiplicity"], ranges)
cplex_instances.groupby(cplex_slices).size().plot(kind="bar", width=0.7, color='#abdda4')
cplex_solved_instances = df[df["cplexOpt"].notnull()]
cplex_solved_slices = pd.cut(cplex_solved_instances["avgMultiplicity"], ranges)
cplex_solved_instances.groupby(cplex_solved_slices).size().plot(kind="bar", width=0.5, color="#2b83ba")
plt.xlabel("Ranges of average multiplicity")
plt.ylabel("Number of instances (sym-log scale)")
plt.yscale('symlog')
axes = plt.gca()
axes.set_ylim(0, 3000)
plt.tick_params(axis='x', which='both', bottom='off', top='off')
axes.yaxis.grid(True)
plt.legend(["All instances", "Submitted to CPLEX", "Solved to optimality by CPLEX"])
plt.savefig(os.path.join(OUTPUT_PATH, "count_by_multiplicity.pdf"), bbox_inches='tight')
range_width = 1
ranges = np.arange(1, df["avgMultiplicity"].max() + range_width, range_width)
slices = pd.cut(df["avgMultiplicity"], ranges)
instances_per_slice = df.groupby(slices).size()
for start in (4, 23, 53):
n = instances_per_slice[range_width * (start - 1)]
print("There are %d instances whose average multiplicity lies between %s and %s." % (n, start, start + range_width))
(a, b) = (1, 9)
rate = 100.0 * sum(instances_per_slice[a-1:b-1]) / len(df)
print("%0.2f %% of the instances concentrate between average multiplicities %s and %s." % (rate, a, b))
###Output
51.29 % of the instances concentrate between average multiplicities 1 and 9.
###Markdown
[Sec. 4.4] Discussion [Sec. 4.4.1] Behavior of the integer linear program
###Code
cplex_instances = df[df["cplexOpt"].notnull() | df["cplexLB"].notnull() | df["cplexUB"].notnull()]
print("%s instances (%.2f %%) submitted to CPLEX." % (len(cplex_instances), 100.0 * len(cplex_instances)/len(df)))
print("CPLEX's success in less than one hour: %s instances (%.1f %%)." % (df["cplexOpt"].count(), 100.0 * df["cplexOpt"].count() / len(cplex_instances)))
for above in (13, 20):
cplex_instances_above = cplex_instances[df["avgMultiplicity"] > above]
print("CPLEX's success in less than one hour above an average multiplicity of %s: %.1f %%." % (above, 100.0 * cplex_instances_above["cplexOpt"].count() / len(cplex_instances_above)))
cplex_results = df[df["cplexOpt"].notnull() | df["cplexUB"].notnull()][["cplexOpt","cplexUB","pageCount"]]
print("All the %s instances for which CPLEX has found either a solution, either an upper bound:" % len(cplex_results))
cplex_results
###Output
All the 51 instances for which CPLEX has found either a solution, either an upper bound:
###Markdown
[Sec. 4.4.2] Comparison of the heuristic methods [Fig. 6] Performance of the main heuristics
###Code
x = pd.Series.rolling(df_sorted_by_multiplicity["avgMultiplicity"], WINDOW, center=True).mean()
plt.figure(figsize=(10,5))
axes = plt.gca()
axes.set_xlim([2, 52])
for time in times:
solver_name = time[len("times"):]
if solver_name in EXCLUDED_SOLVER_NAMES:
continue
y = pd.Series.rolling(df_sorted_by_multiplicity[time], WINDOW, center=True).mean()
plt.plot(x, y, label=SOLVER_NAMES[solver_name])
plt.yscale('log')
plt.xlabel("Average multiplicity (rolling mean on %s instances)" % WINDOW)
plt.ylabel("Execution time (seconds, log scale)")
plt.grid()
plt.savefig(os.path.join(OUTPUT_PATH, "speed_by_multiplicity.pdf"), bbox_inches='tight')
plt.legend(loc=7) # legend not plotted for the paper version
plt.show()
contents = [
df[times].min().map('{:,.2f}'.format),
df[times].max().map('{:,.2f}'.format),
df[times].mean().map('{:,.2f}'.format),
df[times].std().map('{:,.2f}'.format)
]
digest = pd.DataFrame(contents, index = ["min", "max", "mean", "std"])
digest.columns = SOLVER_NAMES.values()
print("Basic aggregations on execution times (in seconds):")
digest
###Output
Basic aggregations on execution times (in seconds):
###Markdown
[Fig. 7] Relative quality of the five main heuristics The outcomes are plotted at $y = \frac{\mathrm{best~size}}{\mathrm{size}}$, with $y=1$ corresponding to the best known solution (which is either the optimal or the best feasible solution found by CPLEX, or the smallest approximation calculated for the given instance).
###Code
x = pd.Series.rolling(df_sorted_by_multiplicity["avgMultiplicity"], WINDOW, center=True).mean()
plt.figure(figsize=(10,7))
axes = plt.gca()
axes.set_xlim([2, 52])
axes.set_ylim([0.74, 1.01])
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
for solver in solvers:
solver_name = solver[len("solvers"):]
if solver_name in EXCLUDED_SOLVER_NAMES:
continue
ratio = df_sorted_by_multiplicity["best"] / df_sorted_by_multiplicity[solver]
y = pd.Series.rolling(ratio, WINDOW, center=True).mean()
plt.plot(x, y, label=SOLVER_NAMES[solver_name])
plt.xlabel("Average multiplicity (rolling mean on %s instances)" % WINDOW)
plt.ylabel("Average pagination size vs. best known result")
plt.grid()
# move the legend to an empty place
legend = plt.legend(loc=7)
plt.draw()
bb = legend.legendPatch.get_bbox().inverse_transformed(axes.transAxes)
bb.set_points([[bb.x0 - 0.02, bb.y0 + 0.2], [bb.x1 - 0.02, bb.y1 + 0.2]])
legend.set_bbox_to_anchor(bb)
plt.savefig(os.path.join(OUTPUT_PATH, "relative_size_by_multiplicity.pdf"), bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Exact algorithms vs. heuristics The column `pageCount` gives the smallest pagination size found by the various **heuristics**:
###Code
assert len(df[df["pageCount"] != df[solvers].min(axis=1)]) == 0
###Output
_____no_output_____
###Markdown
Hence, the optimal value found by CPLEX may be lesser than this one:
###Code
suboptimal_instances_1 = df[df["cplexOpt"] < df["pageCount"]][["cplexOpt", "pageCount"] + solvers]
suboptimal_instances_1.columns = ["cplexOpt", "pageCount"] + list(SOLVER_NAMES.values())
print("The optimal solution is better than the best approximation for these %s instances:" % len(suboptimal_instances_1))
suboptimal_instances_1
###Output
The optimal solution is better than the best approximation for these 4 instances:
###Markdown
It may happen that the upper bound found by CPLEX is less than the best page count found by the heuristics. In this case, we know that there exists a better pagination (although CPLEX cannot prove its optimality):
###Code
suboptimal_instances_2 = df[df["cplexUB"] < df["pageCount"]][["cplexUB", "pageCount"] + solvers]
suboptimal_instances_2.columns = ["cplexOpt", "pageCount"] + list(SOLVER_NAMES.values())
print("For %s more instances, we know that the best approximation is not optimal:" % len(suboptimal_instances_2))
suboptimal_instances_2
###Output
For 2 more instances, we know that the best approximation is not optimal:
###Markdown
The column `best` gives the minimum pagination sizes found by the heuristics and CPLEX (including the upper bound):
###Code
df[df["best"] < df["pageCount"]][["best", "pageCount"]]
count = len(suboptimal_instances_1) + len(suboptimal_instances_2)
print("All in all, ILP improved on the heuristics in %s cases" % count, end=" ")
print("(%.02f %% of the %s selected instances)." % (100.0 * count / len(cplex_instances), len(cplex_instances)))
###Output
All in all, ILP improved on the heuristics in 6 cases (1.75 % of the 342 selected instances).
###Markdown
Grouping GA vs. the other heuristics
###Code
prefix = ["avgMultiplicity", "pageCount"]
columns = [
"solversGeneticGroup",
"solversGeneticStandard",
"solversOverloadAndRemove",
"solversOverloadAndRemovePresort"
]
bad_gga = df[df["pageCount"] < df["solversGeneticGroup"]][prefix + columns]
for column in columns[1:]:
bad_gga[column] = bad_gga[column][bad_gga[column] < bad_gga["solversGeneticGroup"]]
bad_gga.columns = prefix + [SOLVER_NAMES[column[len("solvers"):]] for column in columns]
print("In %.02f %% of the cases," % (100.0 - 100.0 * len(bad_gga) / len(df)),)
print("Grouping GA was the best heuristics, except on these %s instances" % len(bad_gga), end=" ")
print("(greater values erased for clarity, sorted by increasing average multiplicity).")
bad_gga.sort_values(by="avgMultiplicity").fillna("")
for column in bad_gga.columns[len(prefix) + 1:]:
count = bad_gga[column].count()
print("%s produced a better pagination than Grouping GA on %s instances (%.03f %%)." % (column, count, (100.0 * count / len(df))))
###Output
Standard GA produced a better pagination than Grouping GA on 4 instances (0.036 %).
Overload-and-Remove produced a better pagination than Grouping GA on 22 instances (0.200 %).
Overload-and-Remove (with presort) produced a better pagination than Grouping GA on 24 instances (0.218 %).
###Markdown
Entropy Analysis Prolog Imports
###Code
from importlib import reload
from math import log
import numpy as np # Numeric Python
import scipy.stats as stats # Distribution functions and stuff
from scipy.optimize import minimize
import sqlite3 as sql # To fetch data
import analysis # Own analysis tools
reload(analysis); # force reload of analysis, for it will be changed often
import seaborn as sb # Plots
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams["figure.figsize"] = analysis.a4_dims
import random
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Table Schemes
###Code
analysis.print_table_schemes(
'data/k3-v500-r4.1.db',
'experiment',
'algorithm_run',
'search_run',
'dist_1',
'dist_2'
)
###Output
TABLE experiment
NAME DATA_TYPE PRIMARY_KEY
id INTEGER 1
experiment_name TEXT 0
TABLE algorithm_run
NAME DATA_TYPE PRIMARY_KEY
id INTEGER 1
experiment_id INTEGER 0
solver TEXT 0
formula_fname TEXT 0
max_clause_len INTEGER 0
variables INTEGER 0
clauses INTEGER 0
cb REAL 0
time INTEGER 0
sat BOOL 0
TABLE search_run
NAME DATA_TYPE PRIMARY_KEY
id INTEGER 1
algorithm_run_id INTEGER 0
flips INTEGER 0
minimal_unsat INTEGER 0
last_unsat INTEGER 0
h_1 REAL 0
h_2 REAL 0
TABLE dist_1
NAME DATA_TYPE PRIMARY_KEY
id INTEGER 1
run_id INTEGER 0
label TEXT 0
variable INTEGER 0
measure REAL 0
TABLE dist_2
NAME DATA_TYPE PRIMARY_KEY
id INTEGER 1
run_id INTEGER 0
label TEXT 0
variable_1 INTEGER 0
variable_2 INTEGER 0
measure REAL 0
###Markdown
Analysis Entropy Distribution
###Code
query = """
SELECT search_run.flips, search_run.{}
FROM algorithm_run INNER JOIN search_run ON search_run.algorithm_run_id = algorithm_run.id
WHERE algorithm_run.experiment_id = ? AND search_run.last_unsat {}
"""
samples = 2
bins_1 = np.arange(4.0,6.25,0.05)
bins_2 = np.arange(6.0,10.25,0.05)
pdf = stats.norm.pdf
bounds = [(0.0001,None),(0.0001,None)]
theta_0 = lambda X: [np.average(X), np.var(X)]
with sql.connect('data/k3-v500-r4.2.db') as conn:
c = conn.cursor()
ids, = zip(*c.execute('SELECT id FROM experiment')) # Get experiment indices
ids = random.sample(ids, samples) # Choose three experiments randomly
print(list(c.execute(query.format('h_1', '= 0'),(1,))))
div = (lambda stuff: stuff[1]/stuff[0])
#div = (lambda stuff: stuff[1])
XS_sat = [list(map(div,c.execute(query.format('h_1', '= 0'),(exp_id,)))) for exp_id in ids]
YS_sat = [list(map(div,c.execute(query.format('h_2', '= 0'),(exp_id,)))) for exp_id in ids]
XS_unsat = [list(map(div,c.execute(query.format('h_1', '> 0'),(exp_id,)))) for exp_id in ids]
YS_unsat = [list(map(div,c.execute(query.format('h_2', '> 0'),(exp_id,)))) for exp_id in ids]
print(YS_unsat)
#figX, axesX = plt.subplots(1,samples)
#for i,X in enumerate(XS_sat):
# sb.distplot(X, label = 'Success', ax = axesX[i], hist=True, bins=bins_1)
#res = minimize(
# fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), X),
# x0 = theta_0(X),
# bounds = bounds,
#)
#if res.success:
# loc, scale = res.x
# axesX[i].plot(bins_1, np.vectorize(lambda x: pdf(x, loc, scale))(bins_1))
#else:
# print(loc, scale)
#for i,X in enumerate(XS_unsat):
# sb.distplot(X, label = 'Failure', ax = axesX[i], hist=True, bins=bins_1)
#res = minimize(
# fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), X),
# x0 = theta_0(X),
# bounds = bounds,
#)
#if res.success:
# loc, scale = res.x
# axesX[i].plot(bins_1, np.vectorize(lambda x: pdf(x, loc, scale))(bins_1))
#else:
# print(loc, scale)
#plt.legend()
figY, axesY = plt.subplots(1,samples)
for i,Y in enumerate(YS_sat):
sb.distplot(Y, label = 'Success',ax = axesY[i], hist=True)
#res = minimize(
# fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), Y),
# x0 = theta_0(Y),
# bounds = bounds,
#)
#if res.success:
# loc, scale = res.x
# axesY[i].plot(bins_2, np.vectorize(lambda x: pdf(x, loc, scale))(bins_2))
#else:
# print(loc, scale)
for i,Y in enumerate(YS_unsat):
sb.distplot(Y, label = 'Failure',ax = axesY[i], hist=True)
#res = minimize(
# fun = lambda args: -analysis.log_likelihood(lambda x: pdf(x, *args), Y),
# x0 = theta_0(Y),
# bounds = bounds,
#)
#if res.success:
# loc, scale = res.x
# axesY[i].plot(bins_2, np.vectorize(lambda x: pdf(x, loc, scale))(bins_2))
#else:
# print(loc, scale)
plt.legend()
for i,x in enumerate([11,33,44]):
print(i,x)
###Output
0 11
1 33
2 44
###Markdown
Trying out if i can shuffel two arrays with dimentions like a.shape (3, 2, 3)b.shape (3, 2)I am trying to shuffel A and b such that the if row 2 of a goes to row 1 of a. Same movement will be done for b
###Code
a = np.array([[[ 0., 1., 2.],
[ 3., 4., 5.]],
[[ 6., 7., 8.],
[ 9., 10., 11.]],
[[ 12., 13., 14.],
[ 15., 16., 17.]]])
b = np.array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]])
print a.shape
print b.shape
###Output
(3, 2, 3)
(3, 2)
###Markdown
Merge to 2 arrays into 1 array
###Code
c = np.c_[a.reshape(len(a), -1), b.reshape(len(b), -1)]
print c
###Output
[[ 0. 1. 2. 3. 4. 5. 0. 1.]
[ 6. 7. 8. 9. 10. 11. 2. 3.]
[ 12. 13. 14. 15. 16. 17. 4. 5.]]
###Markdown
Extract the 2 arrays out
###Code
a2 = c[:, :a.size//len(a)].reshape(a.shape)
b2 = c[:, a.size//len(a):].reshape(b.shape)
print a2
print b2
###Output
[[[ 0. 1. 2.]
[ 3. 4. 5.]]
[[ 6. 7. 8.]
[ 9. 10. 11.]]
[[ 12. 13. 14.]
[ 15. 16. 17.]]]
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
###Markdown
Shuffle and see the output.
###Code
np.random.shuffle(c)
print a2
print b2
###Output
[[[ 6. 7. 8.]
[ 9. 10. 11.]]
[[ 12. 13. 14.]
[ 15. 16. 17.]]
[[ 0. 1. 2.]
[ 3. 4. 5.]]]
[[ 2. 3.]
[ 4. 5.]
[ 0. 1.]]
###Markdown
Анализ данных и построенных контуров Загружаем нужные библиотеки и имеющиеся данные
###Code
import os
import pandas as pd
from tqdm.notebook import tqdm
from sequence import *
datas = []
for subdir, dirs, files in os.walk('datasets'):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".dat"):
datas.append(np.loadtxt(filepath))
n = len(datas)
###Output
_____no_output_____
###Markdown
Поиск зависимости количества пропусков и петель от размера входных данных
###Code
plot_data = []
for data in tqdm(datas):
seq = Sequence(data)
_, nuniq = seq.is_data_unique(return_not_unique=True)
_, md = seq.sorted(key='nn_md').have_missed_data(return_num=True)
_, loops = seq.sorted(key='nn_loops').have_loops(return_num=True)
plot_data.append([seq.get_data_len(), len(md), loops, len(nuniq)])
plot_data = np.array(sorted(plot_data))
fig, ax = plt.subplots(1,2, figsize=(15, 4))
ax[0].plot(plot_data[:,0], plot_data[:,1])
ax[0].set_title("Зависимость количества пропусков от размера входных данных")
ax[0].set_xlabel("Размер входных данных")
ax[0].set_ylabel("Количество пропусков")
ax[1].plot(plot_data[:,0], plot_data[:,2])
ax[1].set_title("Зависимость количества петель от размера входных данных")
ax[1].set_xlabel("Размер входных данных")
ax[1].set_ylabel("Количество петель")
plt.savefig("statistic/data_analysis.png")
plt.show()
###Output
_____no_output_____
###Markdown
Каким требованиям удовлетворяют контуры, построенные алгоритмами Функция для сохранения таблиц в формате, удобном для вставки в диплом
###Code
def save_table_tex(table, table_name='output', fmt='%.5f'):
np.savetxt("statistic/"+table_name+".txt", table, fmt=fmt,
delimiter=' & ', newline=" \\\\ \n\hline\n")
###Output
_____no_output_____
###Markdown
Сбор информации для таблицы
###Code
results_req = {}
for alg in tqdm(sort_dict):
res = np.array([*Sequence(datas[0]).sorted(key=alg).is_contour(return_all=True).values()]).astype(int)
for i, data in enumerate(datas[1:]):
res1 = np.array([*Sequence(data).sorted(key=alg).is_contour(return_all=True).values()]).astype(int)
res += res1
res[0] = n - res[0]
res[1] = n - res[1]
results_req[alg] = res
###Output
_____no_output_____
###Markdown
Таблица с количеством датасетов, где соответствующие алгоритмы справились с требованиями
###Code
def highlight_max(data, color='lightgreen'):
'''
highlight the maximum in a Series or DataFrame
'''
attr = 'background-color: {}'.format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data > 0.89
return [attr if v else '' for v in is_max]
else: # from .apply(axis=None)
is_max = data['Is contour'] > 0.89
return pd.DataFrame(np.where(is_max, attr, ''),
index=data.index, columns=data.columns)
req_name = ['No missed data', 'No loops', 'Is single contour', 'Solve the problem']
df_req = pd.DataFrame(data=results_req).T
df_req.columns = req_name
df_req = df_req.div(n)
save_table_tex(df_req.to_numpy(),'req.txt')
# ds = df_req.style.apply(highlight_max, subset=['Is contour'])
df_req.style.apply(highlight_max, subset=['Solve the problem']).format("{:.0%}")
###Output
_____no_output_____
###Markdown
Длины полученных контуров Вывод длины контуров, полученных при использовании алгоритмов, которые справились с задачей хотя бы на 90%
###Code
good_algs = ['nn_no_loops', 'nn_21_no_loops', 'polar', 'ch_no_loops', 'best']
results_len = {}
for ind,data in enumerate(tqdm(datas)):
res = []
seq = Sequence(data)
for alg in good_algs:
res.append(seq.sorted(key=alg).get_contour_len())
results_len[ind+1] = res
alg_name = ['Улучшенный алгоритм ближайшего соседа', 'Вставка второго контура в первый','Сортировка по полярным координатам','Вставка точек в выпуклую оболочку','Объединение алгоритмов']
df = pd.DataFrame(data=results_len).T
df.columns = alg_name
save_table_tex(df.to_numpy(),'length.txt')
df.style.highlight_min(color='lightgreen',axis = 1)
###Output
_____no_output_____
###Markdown
MotivationIn this analysis we will have a look at both the performance of the Rust stereo delay compared to its C counterpart and the resulting audio file. Prerequisites
###Code
install.packages(c("seewave", "signal", "tuneR", "ggplot2", "microbenchmark"))
library(seewave)
library(tuneR)
library(ggplot2)
library(microbenchmark)
###Output
_____no_output_____
###Markdown
Performance comparisonDespite of all the syntactic sugar, the memory safety, and the feeling to be part of the 21st century, the most important feature of a LADSPA plugin written in `Rust` should be its performance. Since it is using the `C` ABI and the compiled objects can be called from `C` without any overhead, I had the expectation that it would run almost as fast as its `C` counterpart.Let's first try it with a wrapper around a wrapper. The `microbenchmark` is a `R` function calling `apply_delay.sh` 200 times and reporting some summary statistics about the time taken during execution. This script is a wrapper around the `applyplugin` binary shipped with the [ladspa_sdk](https://www.ladspa.org/download/index.html). It sets up a LADSPA host, plays back the input audio file and pipes the result - modulated by the supplied LADSPA plugin - into the output audio file.
###Code
benchmark.c <- microbenchmark(
system2("bash", c("apply_delay.sh", "delay_snare_c.wav",
"./c/delay_stereo.so", "c_delay_5s_stereo"),
stdout = FALSE), times = 200)
benchmark.rust <- microbenchmark(
system2("bash", c("apply_delay.sh", "delay_snare_rust.wav",
"./rust/target/release/librust_delay_5s_stereo.so",
"rust_delay_5s_stereo"),
stdout = FALSE), times = 200)
print(benchmark.c)
print(benchmark.rust)
###Output
Unit: milliseconds
expr
system2("bash", c("apply_delay.sh", "delay_snare_rust.wav", "./rust/target/release/librust_delay_5s_stereo.so", "rust_delay_5s_stereo"), stdout = FALSE)
min lq mean median uq max neval
20.89194 21.31846 22.11495 21.6921 22.11272 30.38315 200
###Markdown
Well, this doesn't look good at all.The `Rust` version takes a lot longer than the `C` counterpart. Also mind the fact that we used a wrapper around a wrapper! So, the relative increase in the time taken by processing the plugin is probably a lot larger than 22.11/17.79. This is not good at all and more or less a red line being crossed making `Rust` a language not suitable for writing LADSPA plugins. Comparing the resultsBut let's have a look at the original and the delayed samples.
###Code
sample.original <-
readWave("./snare.wav")
sample.delayed.rust <-
readWave("./delay_snare_rust.wav")
sample.delayed.c <-
readWave("./delay_snare_c.wav")
###Output
_____no_output_____
###Markdown
Plot the files into one figure. Since we expect the `Rust` and `C` to produce exactly the same result, both figures should be identically and one should hide the other.
###Code
plot.data <- data.frame(
time = rep(seq(1, length(sample.original@left))/
[email protected], 6 ),
audio.data = c(sample.original@left, sample.delayed.rust@left,
sample.delayed.c@left, sample.original@right,
sample.delayed.rust@right, sample.delayed.c@right),
sample = rep(c(rep("original", length(sample.original@left)),
rep("rust", length(sample.original@left)),
rep("c", length(sample.original@left))), 2),
channel = c(rep("left", length(sample.original@left) * 3),
rep("right", length(sample.original@left) * 3)))
ggplot(data = plot.data, aes(x = time, y = audio.data,
color = sample)) +
geom_line() + facet_grid(channel ~ ., scales = "free") +
theme_bw()
###Output
_____no_output_____
###Markdown
Well, this looks alright. The results of both channels are exactly what was expected.Let's be sure the output of both plugins are the same by comparing the underlying data.
###Code
difference.data <- data.frame(
left.channel = sample.delayed.rust@left - sample.delayed.c@left,
right.channel = sample.delayed.rust@right -
sample.delayed.c@right)
if (any(max(difference.data) != c(0,0))) {
warning("The Rust and C plugin do not yield the same result!")
}
###Output
_____no_output_____
###Markdown
Measurement Project- Target: **grunt-contrib-compress**
###Code
%load_ext babel
%%babel
import * as d3 from "d3";
import * as fs from "fs";
%%babel
bytes = fs.readFileSync("result_formatted.json");
data = JSON.parse(bytes)
console.log(data != null ? "Data Loaded" : "Problem with JSON");
%%babel
data
1
data[0]
###Output
_____no_output_____
###Markdown
Configuration du cluster local Dask
###Code
import dask_kubernetes
cluster = dask_kubernetes.KubeCluster()
cluster.adapt(minimum=1, maximum=10)
cluster
client = dask.distributed.Client(cluster)
client
def get_dask_dataframe(
dirname: str,
start: Optional[datetime.date] = None,
end: Optional[datetime.date] = None,
index: Optional[bool] = False,
) -> dask.dataframe.DataFrame:
"""Select the data frame to process between two dates"""
if start is None:
start = datetime.date(1995, 1, 1)
if end is None:
end = datetime.date.today()
ddf = dask.dataframe.read_parquet(dirname,
engine="pyarrow",
filters=[('year', '>=', start.year),
('month', '>=', start.month),
('year', '<=', end.year),
('month', '<=', end.month)])
ddf = ddf[(ddf.datetime > start.isoformat())
& (ddf.datetime <= end.isoformat())]
if index:
ddf = ddf.set_index("datetime")
return ddf
###Output
_____no_output_____
###Markdown
Sélection géographique
###Code
def _select_area(ddf: dask.dataframe.DataFrame, box: pyinterp.geodetic.Box2D):
"""Applies geographic selection to a DataFrame of a partition"""
return list(
box.covered_by(ddf.longitude.values, ddf.latitude.values).astype(bool))
def select_area(ddf: dask.dataframe.DataFrame, box: pyinterp.geodetic.Box2D):
"""Applies geographic selection to a DataFrame"""
return ddf.map_partitions(_select_area, box)
# Path the Parquet dataset
path = "gs://pangeo-cnes/argo"
# Reading a small dataset (You can increase the size of data to read, but it
# will take longer on our virtual machine)
ddf = get_dask_dataframe(
path,
datetime.date(1990, 1, 1),
datetime.date(2019, 2, 1))
# Creation of the data selection box.
area = pyinterp.geodetic.Box2D(
pyinterp.geodetic.Point2D(-80, 7),
pyinterp.geodetic.Point2D(0,60))
area
# Calculation of the query
df = ddf[select_area(ddf, area)].compute()
# Visualization of the result
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
%matplotlib inline
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
sc = ax.scatter(
df.longitude,
df.latitude,
1,
c=[item[0] for item in df.temp],
transform=ccrs.PlateCarree(),
cmap='jet')
ax.coastlines()
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
fig.colorbar(sc)
###Output
_____no_output_____
###Markdown
Sélection par numéro de plateforme
###Code
df = ddf[ddf.platform_number.isin(['2901216', '6900381', '5901026', '2902557'])]
df = df[['datetime', 'longitude', 'latitude', 'temp']]
df = df.compute()
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
sc = ax.scatter(
df.longitude,
df.latitude,
1,
c=[item[0] for item in df.temp],
transform=ccrs.PlateCarree(),
cmap='jet')
ax.coastlines()
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
fig.colorbar(sc)
###Output
_____no_output_____
###Markdown
Calcul d'une anomalie de pression
###Code
ddf = get_dask_dataframe(
path,
datetime.date(1990, 1, 1),
datetime.date(2019, 2, 1))
def pressure_anomalies(df):
"""Calculates pressure anomalies"""
return df.pres - df.pres_adjusted
# Here only columns containing the longitude and latitude of the floats are
# selected.
df = ddf[['longitude', 'latitude']].compute()
df['anomalies'] = ddf.map_partitions(
pressure_anomalies, meta=(None, 'f8')).compute()
# The average anomaly is calculated
df['mean_anomalies'] = df['anomalies'].map(
lambda series: np.nan if np.all(np.isnan(series)) else np.nanmean(series))
df
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
sc = ax.scatter(
df.longitude,
df.latitude,
1,
c=df.mean_anomalies,
transform=ccrs.PlateCarree(),
cmap='jet',
vmin=-1,
vmax=1)
ax.coastlines()
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
fig.colorbar(sc)
###Output
_____no_output_____
###Markdown
SLA interpolation on Argo float positions
###Code
class GridSeries:
"""Handles a series of grids stored in zarr format. This series is a
time series."""
def __init__(self, ds):
self.ds = ds
self.series, self.dt = self._load_ts()
@staticmethod
def _is_sorted(array):
indices = np.argsort(array)
return np.all(indices == np.arange(len(indices)))
def _load_ts(self):
"""Loading the time series into memory."""
time = self.ds.time
assert self._is_sorted(time)
series = pd.Series(time)
frequency = set(np.diff(series.values.astype("datetime64[s]")).astype("int64"))
if len(frequency) != 1:
raise RuntimeError(
"Time series does not have a constant step between two "
f"grids: {frequency} seconds")
return series, datetime.timedelta(seconds=float(frequency.pop()))
def load_dataset(self, varname, start, end):
"""Loading the time series into memory for the defined period.
Args:
varname (str): Name of the variable to be loaded into memory.
start (datetime.datetime): Date of the first map to be loaded.
end (datetime.datetime): Date of the last map to be loaded.
Return:
pyinterp.backends.xarray.Grid3D: The interpolator handling the
interpolation of the grid series.
"""
if start < self.series.min() or end > self.series.max():
raise IndexError(
f"period [{start}, {end}] out of range [{self.series.min()}, "
f"{self.series.max()}]")
first = start - self.dt
last = end + self.dt
selected = self.series[(self.series >= first) & (self.series < last)]
print(f"fetch data from {selected.min()} to {selected.max()}")
data_array = ds[varname].isel(time=selected.index)
return pyinterp.backends.xarray.Grid3D(data_array)
def interpolate(df, grid_series, varname):
"""Interpolate a variable 'varname' described by the time series
'grid_series' for the locations provided in the DataFrame 'df'"""
if not len(df):
return np.array([])
# The DataFrame must be ordered by the time axis
df = df.set_index("datetime")
# The time axis is divided into monthly periods
period_start = df.groupby(df.index.to_period('M'))["sla"].count().index
periods = []
end = None
# Calculates the period required to interpolate the data from the provided
# time series
for start, end in zip(period_start, period_start[1:]):
start = start.to_timestamp()
if start < grid_series.df.index[0]:
start = grid_series.df.index[0]
end = end.to_timestamp()
periods.append((start, end))
if end is None:
end = period_start[0].to_timestamp()
periods.append((end, df.index[-1] + datetime.timedelta(seconds=3600)))
# Finally, the data on the different periods identified are interpolated.
result = []
for start, end in periods:
interpolator = grid_series.load_dataset(varname, start, end)
mask = (df.index >= start) & (df.index < end)
selected = df.loc[mask, ["longitude", "latitude"]]
result.append(
interpolator.trivariate(dict(
longitude=selected["longitude"].values,
latitude=selected["latitude"].values,
time=selected.index.values),
interpolator="inverse_distance_weighting",
num_threads=1))
return pd.Series(np.hstack(result), df.index)
# Loading the time series
import intake
cat = intake.Catalog("https://raw.githubusercontent.com/pangeo-data/pangeo-datastore"
"/master/intake-catalogs/ocean.yaml")
ds = cat["sea_surface_height"].to_dask()
ds
# DELETE
ds = ds.drop("crs")
grid_series = GridSeries(ds)
# Select the data from dataset
ddf = get_dask_dataframe(
path,
datetime.date(1990, 1, 1),
datetime.date(2019, 1, 2))
# Calculation of SLA
sla = ddf.map_partitions(interpolate, grid_series, 'sla', meta=('result', np.float64)).compute()
# Generation of a DataFrame containing the float positions and the
# interpolated SLA.
df = ddf[["datetime", "longitude", "latitude"]].compute()
df = df.join(sla, on="datetime")
###Output
_____no_output_____
###Markdown
Visualization of the result
###Code
first = df.datetime.min()
last = df.datetime.max()
size = (df.datetime - first) / (last-first)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
sc = ax.scatter(
df.longitude,
df.latitude,
s=size*100,
c=df.result,
transform=ccrs.PlateCarree(),
cmap='jet')
ax.coastlines()
ax.set_title("Time series of SLA "
"(larger points are closer to the last date)")
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
ax.set_extent([80, 100, 13.5, 25], crs=ccrs.PlateCarree())
fig.colorbar(sc)
###Output
_____no_output_____
###Markdown
Analysis notebook for: How much research shared on Facebook is hidden from public view?This notebook produces all results and figures in the article.Figures are plotted to the *figures/* directory.In order to re-produce the plots without interacting with the notebook use `jupyter nbconvert --execute analysis.ipynb`**Outline**
###Code
from pathlib import Path
import gspread
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from gspread_dataframe import get_as_dataframe, set_with_dataframe
from matplotlib import ticker
from matplotlib.colors import ListedColormap
from matplotlib_venn import venn2, venn3, venn3_circles
from oauth2client.service_account import ServiceAccountCredentials
from scipy import stats
from scipy.optimize import curve_fit
from tqdm.auto import tqdm
tqdm.pandas()
# Implementation of partial log binning following Milojević (2010)
def thresh(bin_size):
x = 1
while True:
diff = np.log10(x+1) - np.log10(x)
if diff < bin_size:
return x +1
x = x + 1
def partial_log_binning(data_counts, bin_size=0.1):
n_bins = 1/bin_size
binning_threshold = thresh(bin_size)
log_data = np.log10(data_counts)
log_index = np.log10(log_data.index)
logbins = np.linspace(np.log10(binning_threshold)+0.1,
np.log10(max(data)),
((np.log10(max(data))-np.log10(binning_threshold)+0.1)//0.1)+1)
binned_xs = []
binned_vals = []
for i in range(1, binning_threshold+1):
if i in log_data.index:
binned_vals.append(log_data.loc[i])
binned_xs.append(np.log10(i))
for b in logbins:
vals = (b-.05 <= log_index) & (log_index < b+.05)
vs = data_counts[vals]
if len(vs)>0:
n = np.ceil(10**(b+.05) - 10**(b-.05))
if n == 0:
continue
binned_vals.append(np.log10(vs.sum()/n))
binned_xs.append(b)
return binned_xs, binned_vals
###Output
_____no_output_____
###Markdown
Configuration
###Code
plt.rcParams.update({
'font.family':'sans-serif',
'font.size': 16.0,
'text.usetex': False,
'figure.figsize': (11.69,8.27)
})
# Seaborn styles
sns.set_style("whitegrid")
# Color palette
cm = "Paired"
cp3 = sns.color_palette(cm, 3)
cp10 = sns.color_palette(cm, 10)
### Optional ###
# Set up GSpread connection to push dataframes to Google Spreadsheets
# Instructions can be found at https://gspread.readthedocs.io/en/latest/
# scope = ['https://spreadsheets.google.com/feeds',
# 'https://www.googleapis.com/auth/drive']
# credentials = ServiceAccountCredentials.from_json_keyfile_name('My Project-d9fa71152fe8.json', scope)
# gc = gspread.authorize(credentials)
# sh = gc.open("PLOS Paper - Tables")
push_to_gspread = False
###Output
_____no_output_____
###Markdown
Load data, preprocessing, dropping years + bad results
###Code
articles_csv = "data/articles.csv"
responses_csv = "data/responses.csv"
figs = Path("figures")
articles = pd.read_csv(articles_csv, index_col="doi", parse_dates=['publication_date'])
all_responses = pd.read_csv(responses_csv, index_col="id", parse_dates=['received_at', 'og_updated_time', 'publication_date', 'added_on'])
# add year and metrics
all_responses = all_responses.merge(articles[['year', 'AES', 'AER', 'AEC', 'discipline']], left_on="doi", right_index=True, how="left")
# Limit responses to those articles that received some forms of engagement
responses = all_responses
responses = responses.replace(0, np.nan)
responses = responses.dropna(subset=['shares', 'reactions', 'comments'], how="all")
all_shares = set(articles['AES'].dropna().index.tolist())
all_reactions = set(articles['AER'].dropna().index.tolist())
all_comments = set(articles['AEC'].dropna().index.tolist())
any_engagement = all_shares.union(all_reactions).union(all_comments)
metrics = ['AES', 'AER', 'AEC']
###Output
_____no_output_____
###Markdown
Methods
###Code
df = pd.DataFrame(columns=["Count"])
df.loc['Number of articles', "Count"] = len(articles)
df.loc['Number of URLs', "Count"] = len(articles) * 10
df.loc['--------', "Count"] = None
df.loc['Number of successful responses', "Count"] = len(all_responses)
df.loc['Number of non-zero responses', "Count"] = len(responses)
df.loc['Number of zero-responses', "Count"] = len(all_responses) - len(responses)
df.loc['---------', "Count"] = None
df.loc['Number of unique URLs', "Count"] = responses.url.nunique()
df.loc['Number of unique queries', "Count"] = responses.query_id.nunique()
df.loc['Number of unique OG IDs', "Count"] = responses.og_id.nunique()
df.loc['Number of unique DOIs', "Count"] = responses.doi.nunique()
df
articles[metrics].describe().round(2)
###Output
_____no_output_____
###Markdown
Results What is the overall coverage of articles?
###Code
temp = articles[metrics].dropna(how="all")
df = articles[metrics].count().to_frame("n")
df["% (n={})".format(len(articles))] = df['n'].div(len(articles)/100).round(2)
df['% (n={})'.format(len(temp))] = df['n'].div(len(temp)/100).round(2)
df
###Output
_____no_output_____
###Markdown
Distribution of articles with shares, reactions, and comments
###Code
v = venn3(subsets= [all_shares, all_reactions, all_comments],
set_labels=('', '', ''),
subset_label_formatter=lambda x: "{} ({:.1f}%)".format(x, 100*x/len(any_engagement)));
c=venn3_circles(subsets= [all_shares, all_reactions, all_comments], linewidth=0)
c[0].set_lw(.9)
c[0].set_ls('-.')
v.get_patch_by_id('100').set_color(cp3[0])
v.get_patch_by_id('010').set_color(cp3[1])
v.get_patch_by_id('001').set_color(cp3[2])
v.get_patch_by_id('110').set_color(np.add(cp3[0],cp3[1])/2)
v.get_patch_by_id('011').set_color(np.add(cp3[1],cp3[2])/2)
v.get_patch_by_id('101').set_color(np.add(cp3[0],cp3[2])/2)
v.get_patch_by_id('111').set_color(np.add(np.add(cp3[1],cp3[0]), cp3[2]) / 3)
for text in v.set_labels:
text.set_fontsize(12)
for text in v.subset_labels:
text.set_fontsize(14)
for text in v.set_labels:
text.set_fontsize(10)
for text in v.subset_labels:
text.set_fontsize(12)
plt.gca().legend(handles=[v.get_patch_by_id('100'), v.get_patch_by_id('010'), v.get_patch_by_id('001')],
labels=["Shares", "Reactions", "Comments"], prop={'size': 12});
###Output
_____no_output_____
###Markdown
What does the breakdown of URLs per article look like?
###Code
cov_urls_counts = responses[['doi', 'og_id']].groupby("doi").count().og_id.value_counts().reset_index()
cov_urls_counts['%'] = 100 * cov_urls_counts.og_id.div(cov_urls_counts.og_id.sum())
cov_urls_counts.columns = ["Number of URLs", "Articles", "Articles [%]"]
cov_urls_counts = cov_urls_counts.set_index("Number of URLs")
if push_to_gspread:
wks = sh.worksheet("Coverage - Number of URLs")
set_with_dataframe(wks, cov_urls_counts.round(1).reset_index())
cov_urls_counts.round(1)
x = responses[['doi', 'og_id']].groupby("doi").nunique().og_id.value_counts().reset_index()
x['%'] = 100*x.og_id.div(x.og_id.sum())
x.columns = ["Objects per Article", "Articles", "Articles [%]"]
x = x.set_index("Objects per Article")
x.round(1)
###Output
_____no_output_____
###Markdown
Which URLs were used to share articles?
###Code
cov_urls_types = responses.type.value_counts().reset_index()
cov_urls_types['%'] = 100*cov_urls_types.type.div(cov_urls_types.type.sum())
cov_urls_types.columns = ["URL Type", "FB Objects", "FB Objects [%]"]
cov_urls_types = cov_urls_types.set_index("URL Type")
if push_to_gspread:
wks = sh.worksheet("Coverage - URL Types")
set_with_dataframe(wks, cov_urls_types.round(1).reset_index())
cov_urls_types.round(1)
# Number of FB objects per DOI
n_responses_per_doi = responses[['doi', 'og_id']].groupby("doi")["og_id"].nunique()
# DOIs with multiple FB objects
dois_with_mult_ogids = n_responses_per_doi[n_responses_per_doi>1].keys()
# Responses of DOIs with more FB objects
y = responses[responses.doi.isin(dois_with_mult_ogids)]
# URL types of those articles with more than one response
z = y[['doi', 'og_id', 'type']].groupby(["doi", "og_id"])['type'].apply(lambda x: ", ".join(sorted(x))).reset_index()
# Concat URL type names
zz = z.groupby("doi")['type'].apply(lambda x: " -- ".join(sorted(x)))
zz.value_counts().head(10).to_frame("Articles")
# Number of articles where a PDF caused an extra FB object
zz.map(lambda x: "pdf" in x).sum()
###Output
_____no_output_____
###Markdown
Did the type of shared URLs change across years?
###Code
df = responses.groupby(['type', 'year']).size().to_frame('size').reset_index()
df = df.pivot(columns="year", index="type", values="size")
df = df.apply(lambda x: 100*x/x.sum()).sort_values(by=2017, ascending=False)
df.round(1)
df = responses.groupby(['type', 'year']).size().to_frame('size').reset_index()
sort_order = df.groupby("type")['size'].sum().sort_values(ascending=False).index.tolist()
year_counts = df.groupby("year")['size'].sum()
df['%'] = df.apply(lambda x: 100*x['size']/(year_counts[x['year']]), axis=1)
sns.barplot(x="type", y="%", hue="year", data=df, order=sort_order)
sns.despine(left=True, right=True, top=True)
###Output
_____no_output_____
###Markdown
Do the types of shared URLs vary across disciplines?
###Code
url_types_by_disc = responses.groupby(["discipline", "type"])['og_id'].count()
url_types_by_disc = url_types_by_disc.reset_index().pivot(columns="type", index="discipline", values="og_id")
url_types_by_disc = url_types_by_disc.apply(lambda x: x.div(x.sum()), axis=1)
url_types_by_disc.round(2)
url_types_by_disc = url_types_by_disc.rank(method="min", ascending=False, axis=1).sort_values(axis=1, by="Clinical Medicine")
url_types_by_disc
sns.heatmap(url_types_by_disc, cmap="PuBu", annot=True, cbar=False)
###Output
_____no_output_____
###Markdown
What kind of engagement did the articles receive?
###Code
articles[metrics].describe()
pdf = articles[metrics].dropna(how="any")
sns.boxenplot(x="variable", y="value", data=pdf.melt())
plt.yscale("log")
yticks = [1, 10, 100, 1000, 10000]
plt.yticks(yticks, yticks);
plt.xlabel("")
plt.ylabel("Engagement counts")
sns.despine(top=True, left=True, right=True, bottom=True)
sort_order = base.dropna(how="any", subset=metrics).groupby("discipline").AES.mean().sort_values().keys()
pdf = base.dropna(how="any", subset=metrics)
pdf = pdf.melt(id_vars="discipline", value_vars=metrics)
sns.boxenplot(x="discipline", hue="variable", y="value", data=pdf, order=sort_order)
plt.yscale("log")
yticks = [1, 10, 100, 1000, 10000]
plt.yticks(yticks, yticks);
plt.xticks(rotation=90)
plt.xlabel("")
plt.ylabel("Engagement counts")
sns.despine(top=True, left=True, right=True, bottom=True)
artics = articles[(articles.AES.isna()) & ((~articles.AER.isna()) | (~articles.AEC.isna()))]
artics.describe()
###Output
_____no_output_____
###Markdown
Do the shared URL types receive different kinds of engagement? Analysis by groups: Do all articles receive the same types of engagement?
###Code
from itertools import product
def select_nonzero_src(df: pd.DataFrame, s: bool, r: bool, c: bool) -> pd.DataFrame:
bdf = df.isna()
return df[(bdf.AES != s) & (bdf.AER != r) & (bdf.AEC != c)]
df_src = base[(~base.AES.isna()) & (~base.AER.isna()) & (~base.AEC.isna())]
df_sr = base[(~base.AES.isna()) & (base.AER.isna()) & (~base.AEC.isna())]
df_sc = base[(~base.AES.isna()) & (~base.AER.isna()) & (base.AEC.isna())]
df_s = base[(~base.AES.isna()) & (base.AER.isna()) & (base.AEC.isna())]
df_rc = base[(base.AES.isna()) & ((~base.AER.isna()) | (~base.AEC.isna()))]
df_ = base[(base.AES.isna()) & (base.AER.isna()) & (base.AEC.isna())]
perms = [df_src, df_sr, df_sc, df_s, df_rc, df_]
labels = ['All counts', 'Shares & Reactions', 'Shares & Comments', 'Only Shares', 'Reactions or Comments', 'None']
[print(len(_)) for _ in perms];
###Output
_____no_output_____
###Markdown
Correlations by groups
###Code
df = pd.DataFrame()
for tdf, l in zip(perms, labels):
df[l] = tdf.discipline.value_counts().sort_values(ascending=False)
df.index = df.index.map(lambda x: "{} ({})".format(x, int(df.loc[x].sum())))
df
(df.fillna(0).apply(lambda x: 100*x/x.sum(), axis=1)
.sort_values(by="Biology (6761)", axis=1, ascending=False)
.sort_values(by="None")
.style
.background_gradient(axis=None, cmap="Greens")
.format("{:,.2f}")
)
df = pd.DataFrame()
for tdf, l in zip(perms, labels):
df[l] = tdf.discipline.value_counts().sort_values(ascending=False)
df = df.T
df.index = df.index.map(lambda x: "{} ({})".format(x, int(df.loc[x].sum())))
df
(df.fillna(0).apply(lambda x: 100*x/x.sum(), axis=1)
.sort_values(by="All counts (9005)", axis=1, ascending=False)
.sort_values(by="Clinical Medicine")
.style
.background_gradient(axis=None, cmap="Greens")
.format("{:,.2f}")
)
pdf = df.apply(lambda x: 100*x/x.sum())
sort_order = pdf.index.tolist()
pdf = pdf.reset_index().melt(id_vars="index")
ax = sns.pointplot(x="index", y="value", hue="variable", data=pdf, order=sort_order, dodge=True)
# # Line for all articles with 1 share
# pdf = base[base.AES==1].discipline.value_counts().to_frame()
# pdf = pdf.apply(lambda x: 100*x/x.sum())
# sns.pointplot(x="index", y="discipline", data=pdf.reset_index(), markers="X", color="red", linestyle="--", ax=ax)
plt.xticks(rotation=90)
sns.despine(top=True, left=True, right=True, bottom=True)
###Output
_____no_output_____
###Markdown
Comparison of retrieval methods
###Code
# Remove articles in Arts and Humanities
base = articles[~articles.discipline.isin(["Arts", "Humanities"])]
"Removed {} articles in Arts or Humanities".format(articles[articles.discipline.isin(["Arts", "Humanities"])].shape[0])
# Unit of analysis
disc = 'discipline'
###Output
_____no_output_____
###Markdown
Coverage of Shares, Reactions, and Comments
###Code
print(articles[['AES', 'AER', 'AEC']].dropna(how="all").shape[0])
articles.describe()
disc_counts = base.dropna(how="any", subset=['AES', 'AER', 'AEC'])[disc].value_counts()
x = base.dropna(how="any", subset=['AES', 'AER', 'AEC'])[[disc, 'AES', 'AER', 'AEC']]
x['Reactions per share'] = x['AER'] / x['AES']
x['Comments per share'] = x['AEC'] / x['AES']
x = x.melt(id_vars=disc, value_vars=['Comments per share', 'Reactions per share']).dropna()
meds = x.groupby(["discipline", "variable"])['value'].median().reset_index().groupby(disc)['variable', 'value'].apply(lambda x: x.iloc[0,1])
x['sort'] = x[disc].map(lambda x: meds[x])
x[disc] = x[disc].map(lambda x: "{} ({})".format(x, disc_counts[x]))
x = x.sort_values(["sort"])
ax = sns.boxenplot(x=disc, y="value", hue="variable", data=x, palette=cm)
# Scale and axes limits
plt.yscale("log")
xmin, xmax = ax.get_xlim()
# Plot additional line
plt.hlines(1, xmin, xmax, zorder=-1, color="red")
# X and Y ticks & labels
yticks = [0.1, 0.5, 1, 2, 5, 10, 100, 1000]
plt.yticks(yticks, yticks);
plt.xticks(rotation=45, ha="right");
# Axes labels
plt.xlabel("")
plt.ylabel("Ratio")
# Remove legend title
l = ax.legend()
l.set_title(None)
sns.despine(left=True, right=True, top=True, bottom=True)
disc_counts = base[disc].value_counts()
x = base.groupby(disc)[['AES', 'AER', 'AEC']].count()
x = x.apply(lambda x: x.map(lambda y: 100*y/disc_counts[x.name]), axis=1)
x.index = x.index.map(lambda x: "{} ({})".format(x, disc_counts[x]))
x.sort_values("AES", ascending=False).plot(kind="barh")
plt.grid(False)
plt.grid(True, axis="x", linestyle=":")
sns.despine(left=True, top=True, right=True, bottom=True)
disc_counts = base.disc.value_counts()
x = base.groupby("disc")[['AES', 'AER', 'AEC']].count()
# x = x.apply(lambda x: x.map(lambda y: 100*y/disc_counts[x.name]), axis=1)
x.index = x.index.map(lambda x: "{} ({})".format(x, disc_counts[x]))
x['AER/AES'] = 100 * x['AER'] / x['AES']
x['AEC/AES'] = 100 * x['AEC'] / x['AES']
x[['AER/AES', 'AEC/AES']].sort_values('AEC/AES', ascending=False).plot(kind="barh")
plt.ylabel("")
ticks = list(range(0, 81, 10))
plt.xticks(ticks, ["{:,}%".format(int(_)) for _ in ticks])
plt.grid(False)
plt.grid(True, axis="x", linestyle=":")
sns.despine(left=True, top=True, right=True, bottom=True)
col = "disc"
cov_disciplines = base.groupby(col)[metrics].apply(lambda x: x.count())
cov_disciplines['All articles'] = base.groupby(col)[metrics].size()
cov_disciplines = cov_disciplines.sort_values("All articles", ascending=False)
# Column names + order
cov_disciplines.index.name = "Discipline"
###Output
_____no_output_____
###Markdown
Distribution of disciplines Detailed look at Facebook
###Code
any_fb_counts = base.reindex(all_shares.union(am_shares))[col].value_counts()
any_fb_counts.loc['Total'] = any_fb_counts.sum()
mask = nz_resp['type'].isin(["pmc", "pmid"])
pdf = nz_resp[~mask]
pdf['log_shares'] = pdf['shares'].map(lambda x: np.log10(x))
order = pdf.type.value_counts().keys().tolist()
ax = sns.boxenplot(x="type", y="log_shares", data=pdf,
saturation=1,
order=order, palette=cm)
medians = pdf.groupby(['type'])['log_shares'].median()
nobs = pdf['type'].value_counts()
nobs = nobs.map(lambda x: "n: {}".format(x))
pos = range(len(nobs))
for pos, label in enumerate(order):
plt.text(pos, medians[label]+.05, nobs[label],
horizontalalignment='center', color='w', weight='semibold')
ticks = [1, 2, 5, 10, 50, 100, 500, 1000, 5000]
plt.yticks(np.log10(ticks), ticks);
sns.despine(left=True, right=True, top=True, bottom=True);
###Output
/home/asura/.virtualenvs/altmetrics/lib/python3.5/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Dask is a recently developed parallel computation framework for Python providing capabilities for highly scalable computation. Dask makes it very easy to develop numerically intensive codes for high performance computing environments especially when compared with traditional approaches based on lower level languages. Given the potential advantages of reduced development time provided by Dask it is pertinent to consider whether using a low level language retains any significant benefits in terms of performance.This document reports a benchmarking exercise comparing the performance of Dask with PBLAS, a low level linear algebra library.The problem used for benchmarking: $trace(\bf{X}\bf{Y})$Where:$\bf{X}$ and $\bf{Y}$ are N-by-N matrices of single precision, randomly generated numbers in the range [0,1]To achieve comparible timings for pblas and dask:* Timings include memory allocation, random number generation, matrix multiplication, trace calculation and memory deallocation. As the only $O(N^3)$ operation, the matrix multiplication dominates the computation time.* For PBLAS, multiple calculations were performed using different [block sizes](http://netlib.org/scalapack/slug/node186.html).* For dask, calculations were performed using the automatically determined [chunk size](https://docs.dask.org/en/latest/array-chunks.html).* Benchmarks were run for two values of N.* 5 repeats were performed for each calculation with the quickest time reported in the below analysis.* Speedups were calculated between 1 and 256 processesThe values for all considered parameters are given below, with all combinations having been run.
###Code
frameworks = ["pblas", "dask"]
Ns = [32768, 65536]
nprocs = [1, 4, 16, 64, 256]
dask_block_sizes = ["auto"]
pblas_block_sizes = [32, 64, 128, 256, 512, 1024]
all_block_sizes = list(map(str, pblas_block_sizes + dask_block_sizes))
repeats = list(range(5))
###Output
_____no_output_____
###Markdown
Before considering any results its worth noting that the development time of the Dask code was considerably less than that of PBLAS. This is despite the Dask code retaining much greater flexibility around the number of processes used and the matrix size and shape. The PBLAS code is restricted to dealing with square matrices, with edge lengths divisible by the block size, and square process grids. Considerable effort would be required allow deviation from these constraints but are all possible for free when using Dask.Another other way in which the Dask offers considerably advantages is in its [memory management](https://distributed.dask.org/en/latest/worker.htmlmemory-management). We have not considered this here as this exercise focusses on a cpu intensive task. The spill-to-disk functionality of the Dask workers was disabled by configuring a large memory limit.We start with some setup and load the timing data in order to look at the results:
###Code
%matplotlib notebook
from pathlib import Path
import matplotlib.pyplot as plt
from analysis_lib import block_size_plot, efficiency_plot, load_data, make_plot, speed_up, speed_up_plot
plt.style.use("seaborn")
plt.rcParams.update({"figure.titlesize": "xx-large"})
data = load_data(
Path("results/"),
framework=frameworks,
N=Ns,
nproc=nprocs,
block_size=all_block_sizes,
repeat=repeats
)
print(data.coords)
###Output
Coordinates:
* framework (framework) <U5 'pblas' 'dask'
* N (N) int64 32768 65536
* nproc (nproc) int64 1 4 16 64 256
* block_size (block_size) <U4 '32' '64' '128' '256' '512' '1024' 'auto'
* repeat (repeat) int64 0 1 2 3 4
###Markdown
As can be seen above the data is stored as an xarray DataArray. For a first look at the data we'll consider the speed up of each framework with increasing numbers of processes (so-called strong scaling).
###Code
fastest_runs = data.min(dim=["repeat", "block_size"])
speed_ups = speed_up(fastest_runs, fastest_runs.sel(nproc=1))
###Output
/home/ccaveayl/.conda/envs/dask-comp/lib/python3.8/site-packages/xarray/core/nputils.py:215: RuntimeWarning: All-NaN slice encountered
result = getattr(npmodule, name)(values, axis=axis, **kwargs)
###Markdown
First `N=32768`:
###Code
make_plot(speed_ups.sel(N=Ns[0]), title=f"N = {Ns[0]}", plot_types=(speed_up_plot, efficiency_plot))
###Output
_____no_output_____
###Markdown
Overall the observed performance is fairly comparable with PBLAS maintaining a small edge. Based on these data Dask appears to offer a compelling alternative to its lower level counterpart given its advantages in development speed and flexibility. The superlinear speed-up observed with PBLAS for `nproc=4` is consistent and does not seem to be a measurement artifact. The
###Code
make_plot(speed_ups.sel(N=Ns[1]), title=f"N = {Ns[1]}", plot_types=(speed_up_plot, efficiency_plot))
###Output
_____no_output_____
###Markdown
Qualitatively the results for `N=65536` are comparible with the smaller problem size. A key observation however is that it was not possible to obtain a value for Dask with `nproc=16`. For this configuration Dask was consistently killed due to memory usage. No value of the chunk size parameter was found to produce viable memory consumption on available hardware.Storing the three matrices for this problem should consume ~48GB of memory and available hardware supports jobs using up to 100GB. This suggests Dask is using more than twice the theoretical mimimum amount of memory needed for this problem (depending on the number of worker processes). This therefore raises a note of caution about the use of Dask for memory intensive applications. Whilst this analysis has not included the ability of Dask to spill data to disk, using such functionality will no-doubt come with a significant performance hit. See also the discussion below about the use of processes vs threads for Dask workers.Another accusation that could be levelled is that we've weighted things in favour of PBLAS by considering a range of block sizes. How plausible is it that, when using a code in production, one can pick the optimal block size for any given problem? The below analysis considers the impact of block size on the recorded timings:
###Code
# In order to plot with block_size, coordinate values for this dimension must be numbers
numerical_block_size_data = data.drop_sel(block_size="auto").assign_coords(block_size=pblas_block_sizes)
make_plot(
numerical_block_size_data.sel(framework="pblas").stack(nproc_N=("nproc", "N")),
plot_types=(block_size_plot,)
)
###Output
_____no_output_____
###Markdown
Voters
###Code
voters_raw = pd.read_excel('data/Active_Voters_by_Race_Gender_as_of_November_1_2020.xlsx')
voters_columns = voters_raw.iloc[7].apply(lambda el: '_'.join(el.strip().split()))
voters = voters_raw.iloc[8:167]
voters.columns = voters_columns
voters.columns.name = None
voters = voters.set_index(voters['COUNTY_NAME'])
voters = voters.drop(columns=['COUNTY_ID', 'COUNTY_NAME'])
voters.index.name = None
voters = voters.apply(pd.to_numeric)
voters
###Output
_____no_output_____
###Markdown
Votes
###Code
votes_file = pd.ExcelFile('data/detail.xlsx')
###Output
_____no_output_____
###Markdown
Presidential Votes
###Code
presidential_votes_raw = pd.read_excel(votes_file, '1')
presidential_columns = [
'COUNTY_NAME',
'TRUMP_ELECTION_DAY_VOTES',
'TRUMP_ABSENTEE_BY_MAIL_VOTES',
'TRUMP_ADVANCED_VOTING_VOTES',
'TRUMP_PROVISIONAL_VOTES',
'TRUMP_TOTAL_VOTES',
'BIDEN_ELECTION_DAY_VOTES',
'BIDEN_ABSENTEE_BY_MAIL_VOTES',
'BIDEN_ADVANCED_VOTING_VOTES',
'BIDEN_PROVISIONAL_VOTES',
'BIDEN_TOTAL_VOTES',
'JORGENSEN_ELECTION_DAY_VOTES',
'JORGENSEN_ABSENTEE_BY_MAIL_VOTES',
'JORGENSEN_ADVANCED_VOTING_VOTES',
'JORGENSEN_PROVISIONAL_VOTES',
'JORGENSEN_TOTAL_VOTES',
'TOTAL_VOTES_PRESIDENTIAL'
]
presidential_votes = presidential_votes_raw.iloc[2:161, :]
presidential_votes.columns = presidential_columns
presidential_votes.columns.name = None
presidential_votes = presidential_votes.set_index(presidential_votes['COUNTY_NAME'].apply(lambda el: el.upper()))
presidential_votes = presidential_votes.drop(columns=['COUNTY_NAME'])
presidential_votes.index.name = None
presidential_votes = presidential_votes.apply(pd.to_numeric)
presidential_votes
###Output
_____no_output_____
###Markdown
Perdue Votes
###Code
perdue_votes_raw = pd.read_excel(votes_file, '2')
perdue_columns = [
'COUNTY_NAME',
'PERDUE_ELECTION_DAY_VOTES',
'PERDUE_ABSENTEE_BY_MAIL_VOTES',
'PERDUE_ADVANCED_VOTING_VOTES',
'PERDUE_PROVISIONAL_VOTES',
'PERDUE_TOTAL_VOTES',
'OSSOF_ELECTION_DAY_VOTES',
'OSSOF_ABSENTEE_BY_MAIL_VOTES',
'OSSOF_ADVANCED_VOTING_VOTES',
'OSSOF_PROVISIONAL_VOTES',
'OSSOF_TOTAL_VOTES',
'HAZEL_ELECTION_DAY_VOTES',
'HAZEL_ABSENTEE_BY_MAIL_VOTES',
'HAZEL_ADVANCED_VOTING_VOTES',
'HAZEL_PROVISIONAL_VOTES',
'HAZEL_TOTAL_VOTES',
'TOTAL_VOTES_PERDUE'
]
perdue_votes = perdue_votes_raw.iloc[2:161, :]
perdue_votes.columns = perdue_columns
perdue_votes.columns.name = None
perdue_votes = perdue_votes.set_index(perdue_votes['COUNTY_NAME'].apply(lambda el: el.upper()))
perdue_votes = perdue_votes.drop(columns=['COUNTY_NAME'])
perdue_votes.index.name = None
perdue_votes = perdue_votes.apply(pd.to_numeric)
perdue_votes
###Output
_____no_output_____
###Markdown
Non-White vs Biden
###Code
join = pd.merge(voters, presidential_votes, left_index=True, right_index=True)
join['WHITE_VOTERS'] = join['WH_MALE_VOTERS'] + join['WH_FEMALE_VOTERS'] + join['WH_UNKNOWN_VOTERS']
join['NON_WHITE_VOTERS'] = join['TOTAL_VOTERS'] - join['WHITE_VOTERS']
join['NON_WHITE_RATIO'] = join['NON_WHITE_VOTERS'] / join['TOTAL_VOTERS']
join['BIDEN_RATIO'] = join['BIDEN_TOTAL_VOTES'] / join['TOTAL_VOTES_PRESIDENTIAL']
sns.lmplot(x='NON_WHITE_RATIO', y='BIDEN_RATIO', data=join, fit_reg=True, height=9, aspect=16/9)
join['NON_WHITE_RATIO'].corr(join['BIDEN_RATIO'])
###Output
_____no_output_____
###Markdown
Counties where Biden outperformed Ossof
###Code
votes_join = pd.merge(presidential_votes, perdue_votes, left_index=True, right_index=True)
join = pd.merge(votes_join, voters, left_index=True, right_index=True)
join['BIDEN_RATIO'] = join['BIDEN_TOTAL_VOTES'] / join['TOTAL_VOTES_PRESIDENTIAL']
join['OSSOF_RATIO'] = join['OSSOF_TOTAL_VOTES'] / join['TOTAL_VOTES_PERDUE']
join['BIDEN_OUTPERFORMANCE'] = join['BIDEN_RATIO'] - join['OSSOF_RATIO']
join['OSSOF_POSSIBLE_VOTES'] = join['BIDEN_OUTPERFORMANCE'] * join['TOTAL_VOTERS']
join.sort_values(by='OSSOF_POSSIBLE_VOTES', ascending=False)['OSSOF_POSSIBLE_VOTES'].head(10)
###Output
_____no_output_____
###Markdown
Keeping interesting columns
###Code
#The following variables may have an impact on the price a rental can charge, so these will be looked at
listings_keep_df = listings_df[['price','security_deposit','cleaning_fee','extra_people'
,'minimum_nights','availability_30','guests_included'
,'cancellation_policy','amenities','host_is_superhost'
,'property_type','room_type','accommodates','bathrooms'
,'bedrooms','beds','bed_type','number_of_reviews'
,'review_scores_rating','review_scores_accuracy'
,'review_scores_cleanliness','review_scores_checkin'
,'review_scores_communication','review_scores_location'
,'review_scores_value','neighbourhood_group_cleansed']].copy()
#Assess data types
listings_keep_df.dtypes
#Assessing the frequencies of the captured neighbourhoods
listings_keep_df["neighbourhood_group_cleansed"].value_counts()
#Assessing valid values for property type
listings_keep_df['property_type'].value_counts()
#Assessing missing values
listings_keep_df.isnull().sum()/listings_keep_df.shape[0]
###Output
_____no_output_____
###Markdown
Assessing other key columns with nulls to see if they are for niche types of accomodation (e.g. missing rooms for tents)
###Code
listings_keep_df[listings_keep_df['bathrooms'].isnull()].head()
#null bathrooms seem to be standard rooms, so are genuinely missing - will set to the mean below
listings_keep_df[listings_keep_df['bedrooms'].isnull()].head()
#null bedrooms seem to be standard rooms, so are genuinely missing - will set to the mean below
listings_keep_df[listings_keep_df['beds'].isnull()].head()
#There is only one with no value for beds - it has one room, a real bed, and accomodates 4
#will set to the mean below
#Assessing correlations between scores to see if we can drop some
cor = listings_keep_df[['review_scores_rating','review_scores_accuracy','review_scores_cleanliness'
,'review_scores_checkin','review_scores_communication','review_scores_location'
,'review_scores_value']
].corr()
cor
###Output
_____no_output_____
###Markdown
As expected the scores are all positively correlated with each other. Will just use the review_scores_rating metric because it is the most correlated with the other values so will preserve the most information
###Code
#Assessing what amenities are recorded
#The amenities have multiple values in a single cell seperated by commas.
#It is stored as a string, but has dictionary characters as well as quotations that need to be removed
all_amenities = []
for idx in range(listings_keep_df['amenities'].shape[0]):
#removing unnessary characters and splitting the amenity string
lst = (re.sub('("|{|})', "", listings_keep_df['amenities'][idx])).split(",")
all_amenities.extend(lst)
amenity_counts = pd.Series(all_amenities).value_counts()
print("{0} unique amenities captured".format(len(amenity_counts)))
amenity_counts
###Output
42 unique amenities captured
###Markdown
Cleaning data
###Code
def clean_data(df,amenity_counts):
"""
Perform feature re-encoding and engineering for listings data.
Extra columns are created called amenities_0 to amenities_n where n is the number of unique amenities in the dataframe
A decode of what these are is returned by the function
It can take a minute to assign the amenity dummy variables, so the function prints out the progress every 500 rows
INPUT1: Listings DataFrame
INPUT2: Series object with the unique amenities
OUTPUT1: New dataframe containing cleaned and re-engineered columns
OUTPUT2: Dataframe of the amenity counts, which corresponds to the columns amenity0 to amenityn
"""
#Keeping initial columns
df2 = df[['price','security_deposit','cleaning_fee','extra_people'
,'minimum_nights','availability_30','guests_included'
,'cancellation_policy','amenities','host_is_superhost'
,'property_type','room_type','accommodates','bathrooms'
,'bedrooms','beds','bed_type','number_of_reviews'
,'review_scores_rating','review_scores_accuracy'
,'review_scores_cleanliness','review_scores_checkin'
,'review_scores_communication','review_scores_location'
,'review_scores_value','neighbourhood_group_cleansed']].copy()
#converting the columns with strings in currency format to floats
string_to_float_cols = ['price','security_deposit','cleaning_fee','extra_people']
for col in string_to_float_cols:
df2[col] = df2[col].replace(regex=True
,inplace=False
,to_replace=r'(\$|,)'
,value=r'').astype(float)
#creating a new boolean field to say whether the host is a superhost
df2['superhost'] = np.where(df2['host_is_superhost']=='t', 1, 0)
#grouping the property types into 'House','Apartment','B&B',' and 'other'
df2['house'] = df2['property_type'].isin(['House','Townhouse','Bungalow'])
df2['apartment'] = df2['property_type'].isin(['Apartment','Condominium'])
df2['bnb'] = df2['property_type'] == 'Bed & Breakfast'
df2['other_building'] = df2['property_type'].isin(
['House','Townhouse','Bungalow','Apartment','Condominium','Bed & Breakfast']) == False
#Setting the individual amenity names to a dataframe and removing the missing value
amenity_list = pd.DataFrame(amenity_counts.index)
amenity_list.columns = ['amenities']
#removing where there are no amenities listed
amenity_list = amenity_list[amenity_list.amenities != ''].reset_index(drop=True)
amenity_list['index'] = "amenities_" + amenity_list.index.astype(str)
#Recording the column index that is the start of the amenity groups
amenities_start_col_index = df2.shape[1]
#creating a new column for each amenitiy
new_amenity_cols = ["amenities_" + str(x) for x in range(amenity_list.shape[0])]
for new_col in new_amenity_cols:
df2[new_col] = 0
#Assigning values of 1 where there is a match on amenity
start_time = time.time()
num_rows = df2.shape[0]
for row_indexer, row in df2.iterrows():
if row_indexer % 500 == 0:
print("Amenity progress: {:.1%}".format(row_indexer/num_rows)
,", Seconds since start",time.time() - start_time)
for match_id, match_val in amenity_list.iterrows():
if df2['amenities'][row_indexer].find(match_val[0]) > 0:
df2.iloc[row_indexer,match_id + amenities_start_col_index] = 1
#Dropping the columns that are no longer needed variable
df2.drop([#Re-engineered columns
'property_type','host_is_superhost','amenities'
#Extra review scores
,'review_scores_accuracy','review_scores_cleanliness'
,'review_scores_checkin','review_scores_communication'
,'review_scores_location','review_scores_value'
]
,axis=1,inplace=True)
#Adding dummy variables for categorical variables
df2 = pd.get_dummies(df2)
#Dropping additional fields to reduce multicollinearity
df2.drop(['other_building',"cancellation_policy_moderate"
,"room_type_Entire home/apt","bed_type_Real Bed"
,"neighbourhood_group_cleansed_Other neighborhoods"
,'amenities_7','amenities_9'
]
,axis=1,inplace=True)
#Dealing with missing values
df2['cleaning_fee'] = df2['cleaning_fee'].fillna(0)
df2['security_deposit'] = df2['security_deposit'].fillna(0)
#Setting other columns to the mean where missing
cols_mean_impute = ['bathrooms','bedrooms','beds','review_scores_rating' ]
fill_mean = lambda col: col.fillna(col.mean())
df2[cols_mean_impute] = df2[cols_mean_impute].apply(fill_mean, axis=0)
return df2, amenity_list
listings_cleaned_df, amenity_list = clean_data(listings_keep_df,amenity_counts)
print('')
print('Amenity List')
print(amenity_list)
print('')
print('Checking missing values have been dealt with')
print('All below should be zero')
print('')
print(listings_cleaned_df.isnull().sum())
print('')
print('Returned DF')
print('')
listings_cleaned_df.head()
#Checking dogs have been assigned correctly, as a proxy to test all the other columns are correctly assigned
#Check the output for the 3 rows of amenities to make sure they contain the word dog in each
test = listings_keep_df[listings_cleaned_df['amenities_27'] == 1].reset_index(drop=True).copy()
print("(1)",test.amenities[0])
print("(2)",test.amenities[1])
print("(3)",test.amenities[2])
###Output
(1) {TV,"Cable TV",Internet,"Wireless Internet","Air Conditioning",Kitchen,"Free Parking on Premises","Pets Allowed","Pets live on this property",Dog(s),Cat(s),"Hot Tub","Indoor Fireplace",Heating,"Family/Kid Friendly",Washer,Dryer,"Smoke Detector","Carbon Monoxide Detector",Essentials,Shampoo}
(2) {"Wireless Internet","Pets live on this property",Dog(s),Heating,"Family/Kid Friendly",Essentials,Shampoo}
(3) {TV,"Cable TV",Internet,"Wireless Internet",Kitchen,"Free Parking on Premises","Pets live on this property",Dog(s),"Indoor Fireplace","Buzzer/Wireless Intercom",Heating,"Family/Kid Friendly",Washer,Dryer,"Smoke Detector","Carbon Monoxide Detector","First Aid Kit","Fire Extinguisher",Essentials,Shampoo}
###Markdown
Creating new dataframe with values scaled for modelling
###Code
listings_scaled = listings_cleaned_df.copy()
cols_to_scale = ['security_deposit','cleaning_fee','extra_people','minimum_nights','availability_30'
,'guests_included','accommodates','bathrooms','bedrooms','beds','number_of_reviews'
,'review_scores_rating']
scaler = StandardScaler()
listings_scaled[cols_to_scale] = scaler.fit_transform(listings_scaled[cols_to_scale])
listings_scaled.head()
#Creating table decodes that can be used to undo the scaling after modelling
col_decodes = pd.DataFrame(cols_to_scale)
col_decodes.columns = ['col']
means = pd.DataFrame(scaler.mean_)
means.columns = ['mean']
sd = pd.DataFrame(scaler.scale_)
sd.columns = ['sd']
decodes = col_decodes.merge(means,how='left',left_index = True, right_index = True)
decodes = decodes.merge(sd,how='left',left_index = True, right_index = True)
#decodes = pd.DataFrame(colsToScale).merge(pd.DataFrame(scaler.mean_),how='left',left_index = True, right_index = True)
decodes
###Output
_____no_output_____
###Markdown
Looking to find the size of the uplift in income impact of accomodating more people
###Code
accom = pd.DataFrame(listings_cleaned_df["accommodates"].value_counts())
accom.columns = ['Number']
accom['Accommodates'] = accom.index
accom.sort_values(by = ['Accommodates'], ascending=True, inplace=True)
plt.figure(figsize=(10,10))
plt1 = plt.subplot(2,1,1)
ax = sns.barplot(x="Accommodates"
, y="Number"
, data=accom).set_title('Number of rentals per occupancy')
plt.subplot(2,1,2)
ax = sns.boxplot(x="accommodates"
, y="price"
, data=listings_cleaned_df)
plt1.set_xlabel('')
plt.show();
fig = ax.get_figure()
fig.savefig('picture_outputs\occupancy_graph.png')
plt.clf()
#Finding the actual means and jumps in price
accomodation_means = listings_cleaned_df[["accommodates","price"]].groupby(["accommodates"]).mean()
accomodation_means['change'] = accomodation_means["price"].diff()
accomodation_means
###Output
_____no_output_____
###Markdown
Properties that accomodate over 8 people are fairly rare, so robust conclusions cannot be taken for these.There is however a big jump from 4 to 5.For properties with two double rooms, it may therefore be worth while getting a sofa or camp bed, so that it can be used to accomodate 5 people. Create regression model to look at contribution to price of the different metrics
###Code
#Split into explanatory and response variables
X = listings_scaled.iloc[:,1:80]
y = listings_scaled['price']
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .10, random_state=42)
lm_model = LinearRegression() # Instantiate
lm_model.fit(X_train, y_train) #Fit
#Predict and score the model
y_test_preds = lm_model.predict(X_test)
"The r-squared score for the model was {} on {} values.".format(r2_score(y_test, y_test_preds), len(y_test))
###Output
_____no_output_____
###Markdown
Visually looking at how well the predictions were
###Code
#Set background
back1 = plt.fill_between([0,800], [0,800], alpha=0.5)
back1.set_color('#ffde72')
back2 = plt.fill_between([0,800], [0,800], 800, alpha=0.5)
back2.set_color('#b3fff4')
ax = sns.scatterplot(x=y_test, y=y_test_preds)
ax.set(xlabel='Actual Price'
, ylabel='Predicted Price'
,ylim=(0, 800)
,xlim=(0, 800)
)
plt.show()
###Output
_____no_output_____
###Markdown
The model seems to under-predict the price for higher values. For our purposes however, this matches close enough Getting a table with the dollar value of each contribution
###Code
#Getting column names
print("Intercept:",lm_model.intercept_)
coefNames = pd.DataFrame(X.columns)
coefNames.columns = ['Metric']
#Getting corresponding coefficients
coefVals = pd.DataFrame(lm_model.coef_)
coefVals.columns = ['Regression_coef_val']
#Merging with column names and sorting
coefNames = coefNames.merge(coefVals,left_index = True, right_index = True)
coefNames.sort_values(by = ['Regression_coef_val'], ascending=False, inplace=True)
#Merging on the amenity names
coefDecodes = coefNames.merge(amenity_list,how = 'left',left_on='Metric',right_on='index')
coefDecodes['Description'] = np.where(coefDecodes['amenities'].isnull(), coefDecodes['Metric'], coefDecodes['amenities'])
coefDecodes = coefDecodes[['Description','Regression_coef_val']]
coefDecodes
###Output
Intercept: 162.1774960305662
###Markdown
For the scaled variables, the actual impact of the variable on the price is:$coef\_val*\frac{(unit-mean)}{std}$This can be re-written to be $\frac{coef\_val}{std}(unit) - \frac{coef\_val*mean}{std}$$\frac{coef\_val*mean}{std}$ is a constant, thefore each increase in the unit increases the price by $\frac{coef\_val}{std}$
###Code
#Where scaling occured divide the coefficient by the standard deviation
new = coefDecodes.merge(decodes,how='left',left_on = 'Description',right_on='col')
new['mean'].fillna(0, inplace=True)
new['sd'].fillna(1, inplace=True)
new['reg_coeff_unscaled'] = new['Regression_coef_val']/new['sd']
new = new[['Description','Regression_coef_val','reg_coeff_unscaled']]
new.sort_values(by = ['reg_coeff_unscaled'], ascending=False, inplace=True)
new
new.to_excel('price_coefs.xlsx',index=False)
###Output
_____no_output_____
###Markdown
Factors that boost income
###Code
new[new['reg_coeff_unscaled'] >= 5]
###Output
_____no_output_____
###Markdown
Main factors that reduce income
###Code
new[new['reg_coeff_unscaled'] <= -5]
###Output
_____no_output_____
###Markdown
Analysis of End-of-Year Book ListsFind the data [here](https://bit.io/bitdotio/best%20books%202021). The DataThe data can be found in [this bit.io repository](https://bit.io/bitdotio/best%20books%202021). Other good sources for aggregate end-of-year lists include:- [yearendlists.com](https://www.yearendlists.com/) which also includes lists for TV, music, movies, and more.- [The Ultimate Best Books of 2021 List (Lithub)](https://lithub.com/the-ultimate-best-books-of-2021-list/) which aggregates many year-end lists to obtain a "definitive" year-end list. How We Got the DataWe looked at all of the book lists for 2021 on [yearendlists.com](https://www.yearendlists.com/) to identify the lists. We followed a few key principles in selecting sources:- Look for Discriminating Sources: the lists included ten books or fewer (the publications have to narrow the books to a list of favorites. a [list of 100](https://time.com/collection/100-must-read-books-2021/) doesn't help us in finding a consensus "best book.")- Use High-profile sources: we didn't have specific criteria for this one, but in general, we looked for lists from well-known individuals or media sources, not from any blog with a book list we could find on the Internet.- Prefer generality: We looked for "best books" list, not "best fiction" or "best science fiction" or other sub-classifications of books. In one case (TIME), we took both the "top fiction" and "top nonfiction" lists rather than omitting the publication entirely.- Avoid redundancy: Use only a single list (or, in the case of TIME, two non-overlapping lists) per source. We don't want to count a single book more than once for a given source. Obtain the data from bit.io
###Code
# Helper Fucntion for Downloading Datasets
def download_dataset(target, pg_string):
engine = create_engine(pg_string)
# SQL for querying an entire table
sql = f"""
SELECT *
FROM {target};
"""
# Return SQL query as a pandas dataframe
with engine.connect() as conn:
# Set 1 minute statement timeout (units are milliseconds)
conn.execute("SET statement_timeout = 60000;")
df = pd.read_sql(sql, conn)
return df
df_media = download_dataset(MEDIA_TABLE, PG_STRING)
df_individual = download_dataset(CELEB_TABLE, PG_STRING)
df_rank = download_dataset(RANK_TABLE, PG_STRING)
df_media = df_media.merge(df_rank, how="left", left_on="title", right_on="title").rename(columns={'count':'rank'})
df_individual = df_individual.merge(df_rank, how="left", left_on="title", right_on="title").rename(columns={'count':'rank'})
df_media
df_media.loc[df_media['source'].isin(['New York Times', 'Washington Post', 'Slate'])].sort_values('title')
# List Appearing Once
df_media['title_author'] = df_media.title.values + ' (' + df_media.author.values + ')'
twice = (df_media
.loc[df_media['rank']==2, :]
.loc[:,'title_author']
.unique()
)
", ".join(twice)
names = []
props = []
counts = []
groups = df_media.groupby('source')
for name, group in groups:
others = df_media.loc[df_media['source'] != name]
length = group.shape[0]
titles = group['title']
num = titles.isin(others['title'])
n_times = (others['title'].isin(titles)).sum()
counts.append(n_times)
props.append(num.mean())
names.append(name)
df_lists = pd.DataFrame({'source':names, 'prop':props, 'times':counts}).sort_values('prop', ascending=False)
df_lists.loc[df_lists['source']=='TIME', 'source'] = "TIME*"
df_lists
fig, ax = plt.subplots(1, 2, figsize=(10,6), dpi=150)
ax[0].barh(df_lists['source'], df_lists['prop'], color=BLUE, alpha=0.8)
ax[0].set_title('Proportion of Books from each List\nAppearing in Other Lists')
ax[0].set_xlabel('Proportion')
ax[0].set_ylabel('List Publisher')
ax[0].invert_yaxis()
# grid x
ax[0].grid(True, axis='x', alpha=0.3)
ax[1].barh(df_lists['source'], df_lists['times'], color=GOLD, alpha=0.8)
ax[1].set_title('Number of Times Books from each List\nAppear on Other Lists')
ax[1].invert_yaxis()
ax[1].set_yticks([])
ax[1].set_xlabel('Number of Times')
ax[1].grid(True, axis='x', alpha=0.3)
# Formatting
img = Image.open('/Users/danielliden/git/innerjoin/resources/logo.png')
img2 = Image.open('/Users/danielliden/git/innerjoin/resources/twitter.png')
for a in ax:
for spine in ['top', 'right', 'left', 'bottom']:
a.spines[spine].set_visible(False)
a.tick_params(which='both', bottom=True, left=True, color=GREY)
fig.tight_layout(rect=[0.02,0.1,0.97,0.9])
fig.text(0.1, 0.05, 'Source: 16 "Best Books of 2021" lists. Access Data at https://bit.io/bitdotio/best%20books%202021\n*List Contained 20 Books', ha='left',
fontdict={"family":"Inter", "size":8, "color":GREY})
fig.suptitle("End-of-Year Lists Contain Many of the Same Books", x=0.2, y=0.96,
fontweight="bold", ha="left", fontdict={"family":"Inter", "size":8, "color":"black", "alpha":0.8})
# Fonts
mpl.rcParams['font.family'] = 'Inter'
# logos
logo = plt.axes([0.8,0.88, 0.13, 0.13], frameon=True)
logo.imshow(img)
logo.axis('off')
logo.patch.set_facecolor("white")
twt = plt.axes([0.8,0.0, 0.13, 0.13], frameon=True)
twt.imshow(img2)
twt.axis('off')
fig.patch.set_facecolor("white")
if not Path("./figures/").exists():
Path("./figures/").mkdir()
plt.savefig("./figures/lists_figure_2.png")
plt.show()
names_i = []
props_i = []
counts_i = []
groups = df_individual.groupby('source')
for name, group in groups:
# print(name)
others = df_media.loc[df_media['source'] != name]
length = group.shape[0]
titles = group['title']
num = titles.isin(others['title'])
n_times = (others['title'].isin(titles)).sum()
counts_i.append(n_times)
props_i.append(num.mean())
names_i.append(name)
df_lists = pd.DataFrame({'source':names_i, 'prop':props_i, 'times':counts_i}).sort_values('prop', ascending=False)
df_individual
fig, ax = plt.subplots(1, 2, figsize=(6,4), dpi=150)
ax[0].bar(df_lists['source'], df_lists['prop'], color=GREEN, alpha=0.8)
ax[0].set_title('Proportion of Books Appearing\nin Published Year-End Lists')
ax[0].set_ylabel('Proportion')
# grid x
ax[0].grid(True, axis='y', alpha=0.3)
ax[1].bar(df_lists['source'], df_lists['times'], color=RED, alpha=0.8)
ax[1].set_title('Number of Times Books from each\nList Appear on Other Lists')
ax[1].set_ylabel('Number of Times')
ax[1].grid(True, axis='y', alpha=0.3)
# Formatting
img = Image.open('/Users/danielliden/git/innerjoin/resources/logo.png')
img2 = Image.open('/Users/danielliden/git/innerjoin/resources/twitter.png')
for a in ax:
for spine in ['top', 'right', 'left', 'bottom']:
a.spines[spine].set_visible(False)
a.tick_params(which='both', bottom=True, left=True, color=GREY)
fig.tight_layout(rect=[0.02,0.1,0.97,0.9])
fig.text(0.1, 0.05, 'Source: 16 "Best Books of 2021" lists.\nAccess Data at https://bit.io/bitdotio/best%20books%202021', ha='left',
fontdict={"family":"Inter", "size":8, "color":GREY})
fig.suptitle("Celebrity End-of-Year Lists", x=0.1, y=0.96,
fontweight="bold", ha="left", fontdict={"family":"Inter", "size":8, "color":"black", "alpha":0.8})
# Fonts
mpl.rcParams['font.family'] = 'Inter'
# logos
logo = plt.axes([0.8,0.88, 0.13, 0.13], frameon=True)
logo.imshow(img)
logo.axis('off')
logo.patch.set_facecolor("white")
twt = plt.axes([0.8,0.0, 0.13, 0.13], frameon=True)
twt.imshow(img2)
twt.axis('off')
fig.patch.set_facecolor("white")
if not Path("./figures/").exists():
Path("./figures/").mkdir()
plt.savefig("./figures/lists_figure_3.png")
plt.show()
df_rank3 = df_rank.loc[df_rank['count']>=3, :]
df_rank3.iloc[12,0] = "How the Word Is Passed: A Reckoning with the History of Slavery..."
# make horizontal bar plot from df_rank3 count column
#reverse order of df_rank3
my_colors = [GOLD, GOLD, GOLD, GOLD, GREEN, GREEN, GREEN, RED, RED, RED, RED, BLUE, BLUE, BLUE, BLUE, BLUE]
#df_rank3 = df_rank3.sort_values('count', ascending=True)
fig, ax = plt.subplots(figsize=(10,6), dpi=150)
barlist = ax.barh(df_rank3['title'].values, df_rank3['count'].values, color=my_colors)
ax.invert_yaxis()
ax.set_yticks([])
ax.set_xlabel('Number of Times Book Appears in End-of-Year Lists')
# vertical grid
ax.xaxis.grid(True, linestyle='-', which='major', alpha=0.3)
# Formatting
img = Image.open('/Users/danielliden/git/innerjoin/resources/logo.png')
img2 = Image.open('/Users/danielliden/git/innerjoin/resources/twitter.png')
for spine in ['top', 'right', 'left', 'bottom']:
ax.spines[spine].set_visible(False)
ax.tick_params(which='both', bottom=True, left=True, color=GREY)
fig.tight_layout(rect=[0.02,0.1,0.97,0.9])
# add labels overlapping bars
for i, bar in enumerate(barlist):
width = bar.get_width()
ax.text(0.1, i, df_rank3.loc[i,'title'], color="white", ha='left', va='center', fontweight="bold", fontsize=9
#path_effects=[path_effects.withStroke(linewidth=0.5, foreground='white')])
)
fig.text(0.1, 0.05, r'Source: 16 "Best Books of 2021" lists. Access Data at https://bit.io/bitdotio/best%20books%202021', ha='left',
fontdict={"family":"Inter", "size":8, "color":GREY})
fig.suptitle("Books with 3 or More Appearances in 2021 End-of-Year Lists", x=0.1, y=0.96,
fontweight="bold", ha="left", fontdict={"family":"Inter", "size":8, "color":"black", "alpha":0.8})
# Fonts
mpl.rcParams['font.family'] = 'Inter'
# logos
logo = plt.axes([0.8,0.88, 0.13, 0.13], frameon=True)
logo.imshow(img)
logo.axis('off')
logo.patch.set_facecolor("white")
twt = plt.axes([0.8,0.0, 0.13, 0.13], frameon=True)
twt.imshow(img2)
twt.axis('off')
fig.patch.set_facecolor("white")
if not Path("./figures/").exists():
Path("./figures/").mkdir()
plt.savefig("./figures/books_figure_1.png")
plt.show()
###Output
/Users/danielliden/git/innerjoin/2021_book_lists/env/lib/python3.9/site-packages/pandas/core/indexing.py:1817: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
self._setitem_single_column(loc, value, pi)
/var/folders/kx/yhv2f4ds2xl41vhv6h8py3q80000gn/T/ipykernel_46971/3970611543.py:11: MatplotlibDeprecationWarning: Support for passing numbers through unit converters is deprecated since 3.5 and support will be removed two minor releases later; use Axis.convert_units instead.
ax.set_yticks([])
###Markdown
Analysis > This file runs both the calibration and the decompositioning of the model. In the settings section you may choose which country for which to run the analysis as well as whether to print results or output them in LaTeX format. If running the file for the first time consider setting install_packages = True. If running in Binder this is already taken care of. Settings
###Code
## Calibration is run for all countries.
## Choose country to output results for
## "BEL", "DNK", "FIN", "FRA", "GBR", "ITA", "JPN", "NLD", or "USA"
ISO = "GBR"
## Choose whether to print Latex tables (True or False)
show_results = True
print_latex = False
## Define where data is located
data_path = "data/data.csv"
## Required packages are numpy, pandas, scipy, statistics, itertools, and tabulate (if printing latex)
install_packages = False
###Output
_____no_output_____
###Markdown
Calibration settings and assumptions
###Code
moments = ["AvgRet","CapShare","rf","PD","XK","TFPgrowth","PriceInvt","PopGrowth","EmpPop"]
parameters = ["beta","mu","p","delta","alpha","g_L","g_Z","g_Q","N_bar"]
countries = ["BEL","DNK","FIN","FRA","ITA","JPN","NLD","GBR","USA"]
startP1 = 1984
endP1 = 1999
startP2 = 2000
endP2 = 2015
b = 0.15
theta = 12
sigma = 0.5
###Output
_____no_output_____
###Markdown
Install packages
###Code
if install_packages == True:
!pip install numpy
!pip install pandas
!pip install scipy
!pip install statistics
!pip install more-itertools
!pip install tabulate
###Output
_____no_output_____
###Markdown
Import packages
###Code
import numpy as np
import pandas as pd
#import scipy as sp
from scipy import optimize
from scipy.special import factorial
import statistics as stat
import itertools
#!pip install tabulate
from tabulate import tabulate
class par: None
class moms: None
###Output
_____no_output_____
###Markdown
Set dictionary for data-series names
###Code
# Make Dictionary for data-series
data_series = {
"AvgRet": "Average Return to Capital",
"CapShare": "Gross Capital Share",
"rf": "Risk Free Interest Rate",
"PopGrowth": "Population Growth",
"PriceInvt": "Investment Price Growth",
"PD": "Price-Dividend Ratio",
"TFPgrowth": "TFP Growth",
"XK":"Investment-Capital Ratio",
"EmpPop": "Employment-Population Ratio",
"Spread": "Spread"}
###Output
_____no_output_____
###Markdown
Define calculation of moments
###Code
def calc_moments(country,s1,e1,s2,e2):
df = pd.read_csv(data_path, sep=";", index_col="year")
df = df[df["ISO"]==country]
start1 = int(s1)
end1 = int(e1)
start2 = int(s2)
end2 = int(e2)
#select relevant series, set year as index, create copy
df = df[['AvgRet','CapShare','rf', 'PopGrowth','PriceInvt','PD','TFPgrowth','XK','EmpPop']]
df = df.loc[start1:end2]
df_2 = pd.DataFrame(index=['AvgRet','CapShare','rf', 'PopGrowth','PriceInvt','PD','TFPgrowth','XK','EmpPop'])
# Calculate averages and insert to DataFrame
for var in df.columns.tolist():
df_2.loc[var,'p1'] = stat.mean(df.loc[s1:e1,var])
df_2.loc[var,'p2'] = stat.mean(df.loc[s2:e2,var])
#df_2.loc[var,'stdev1'] = stat.stdev(df.loc[1984:2000,var])
#df_2.loc[var,'stdev2'] = stat.stdev(df.loc[2001:2016,var])
df_2.loc[var,'change'] = df_2.loc[var,'p2']-df_2.loc[var,'p1']
return df_2
###Output
_____no_output_____
###Markdown
Define calibration
###Code
### EQUATIONS IN IDENTIFICATION
###### FOR PART 2
def eq_footnote_15(par,moms):
return moms.TFPgrowth - (par.g_T-(1-moms.CapShare)*par.g_L-moms.CapShare*(par.g_T+par.g_Q))
def eq_11(par,moms):
return (1+par.g_T) - (1+par.g_L)*(1+par.g_Z)**(1/(1-par.alpha))*(1+par.g_Q)**(par.alpha/(1-par.alpha))
def eq_15(par,moms):
par.beta_star = 1/(1+par.r_star)
return moms.AvgRet - ((par.mu+par.alpha-1)/par.alpha)*(par.r_star + par.delta + par.g_Q/par.beta_star)
def eq_18(par,moms):
return moms.XK - ((1+par.g_Q)*(1+par.g_T)-(1-par.delta))
def eq_20(par,moms):
return moms.CapShare - (par.mu+par.alpha-1)/(par.mu)
def eq_23(par,moms):
par.beta_star = 1/(1+par.r_star)
return moms.PD - par.beta_star*(1+par.g_T)/(1-par.beta_star*(1+par.g_T))
### END OF EQ'S FOR PART 2
###### FOR PART 3
def find_p(p,par,moms): # Change name from find_p to EQ-number at some point
update_misc(par)
MOM2 = ((1-2*p)+p*np.exp(par.Bh*(1-par.theta)) + p*np.exp(par.B*(1-par.theta)))
MOM3 = ((1-2*p)+p*np.exp(par.Bh*(-par.theta)) + p*np.exp(par.B*(-par.theta)))
return moms.rf - (MOM2/(par.beta_star*MOM3)-1)
def update_misc(par): # Help function for part 3
par.beta_star = 1/(1+par.r_star)
par.B = np.log(1-par.b)
par.Bh = np.log(1+par.b)
par.g_PC = (1+par.g_T)/(1+par.g_L)-1
###### DEFINE CALIBRATION
def calibrate(ISO,s,e,u,b,theta,sigma):
#Set country, start years and end years – when calling function, eventually
country = ISO
start = s
end = e
unique_id = u
# a. shock size
par.b = b
# b. risk aversion coefficient
par.theta = theta
# c. IES, sigma = 1/IES
par.sigma = sigma
#Set data
df = pd.read_csv(data_path, sep=";", index_col="year")
df = df[df["ISO"]==country]
# Calc moments
moms.AvgRet = stat.mean(df.loc[start:end,"AvgRet"])/100
moms.CapShare = stat.mean(df.loc[start:end,"CapShare"])/100
moms.XK = stat.mean(df.loc[start:end,"XK"])/100
moms.PD = stat.mean(df.loc[start:end,"PD"])
moms.TFPgrowth = stat.mean(df.loc[start:end,"TFPgrowth"])/100
moms.rf = stat.mean(df.loc[start:end,"rf"])/100 #used in step 3
moms.PopGrowth = stat.mean(df.loc[start:end,"PopGrowth"])/100
moms.PriceInvt = -stat.mean(df.loc[start:end,"PriceInvt"])/100 # note: negativ value used
moms.EmpPop = stat.mean(df.loc[start:end,"EmpPop"])/100
# STEP 1: set parameters g_L, g_Q and N_bar directly
par.g_L = moms.PopGrowth
par.g_Q = moms.PriceInvt
par.N_bar = moms.EmpPop
# STEP 2
# Set initial guesses for parameters to be estimated in second part and solve equlation system
# a. parameters to estimate
par.names = ['g_Z','g_T','delta','alpha','r_star','mu']
# b. guess
par.mu = 1.01
par.delta = 0.025
par.alpha = 0.25
par.g_Z = 0.08 #0.02
par.r_star = 0.05
par.g_T = 0.04
x = set_x(par)
# c. solve
solution = optimize.fsolve(eq_system, x, args=(par,moms), full_output=0)
set_parameters(par,solution)
# STEP 3 – estimate beta and p
par.p = optimize.fsolve(find_p, 0.1, args=(par,moms), full_output=0)[0]
update_misc(par)
MOM = ((1-2*par.p)+par.p*np.exp(par.Bh*(1-par.theta)) + par.p*np.exp(par.B*(1-par.theta)))**((1-par.sigma)/(1-par.theta))
par.beta = par.beta_star/((1+par.g_PC)**(-par.sigma)*MOM);
df_estimates = []
for name in parameters:
df_estimates.append({'Parameter': name, unique_id:getattr(par,name)})
df_estimates = pd.DataFrame(df_estimates).set_index('Parameter')
return df_estimates
##### OTHER HELP FUNCTIONS
def set_parameters(par,x):
for name,value in zip(par.names,x):
setattr(par,name,value)
def set_x(par):
x = np.zeros(len(par.names))
for i,name in enumerate(par.names):
x[i] = getattr(par,name)
return x
def eq_system(x,par,moms):
# a. set parameters
set_parameters(par,x)
# c. evaluate equations
out = []
out.append(eq_footnote_15(par,moms))
out.append(eq_11(par,moms))
out.append(eq_15(par,moms))
out.append(eq_18(par,moms))
out.append(eq_20(par,moms))
out.append(eq_23(par,moms))
return out
###Output
_____no_output_____
###Markdown
Define decomposition
###Code
def decomp(ISO):
# Get data
estimates = pd.DataFrame(index=parameters)
estimates.index.name = "Name"
estimates["P1"] = all_estimates[ISO+"_P1"]
estimates["P2"] = all_estimates[ISO+"_P2"]
# Make DataFrame with permutations
l_permutation = list(itertools.product([0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1]))
df_permutation = pd.DataFrame(l_permutation,columns = parameters)
df_permutation["permsum"] = df_permutation.sum(axis=1)
# Calculate weights
for parameter in parameters:
switch = df_permutation[parameter]
arr = np.array(df_permutation["permsum"]-switch)
df_permutation["w_" + parameter] = (factorial(arr)*factorial(8-arr))/factorial(9)
#Vary all parameters one by one and store values. Save in DataFrame df.
result = [(AvgRet(par,moms),CapShare(par,moms),rf(par,moms),PD(par,moms),ik(par,moms),TFPgrowth(par,moms),priceinvt(par,moms),growthpop(par,moms),EmpPop(par,moms))
for par.beta in [estimates.loc["beta","P1"],estimates.loc["beta","P2"]]
for par.mu in [estimates.loc["mu","P1"],estimates.loc["mu","P2"]]
for par.p in [estimates.loc["p","P1"],estimates.loc["p","P2"]]
for par.delta in [estimates.loc["delta","P1"],estimates.loc["delta","P2"]]
for par.alpha in [estimates.loc["alpha","P1"],estimates.loc["alpha","P2"]]
for par.g_L in [estimates.loc["g_L","P1"],estimates.loc["g_L","P2"]]
for par.g_Z in [estimates.loc["g_Z","P1"],estimates.loc["g_Z","P2"]]
for par.g_Q in [estimates.loc["g_Q","P1"],estimates.loc["g_Q","P2"]]
for par.N_bar in [estimates.loc["N_bar","P1"],estimates.loc["N_bar","P2"]]
]
df = pd.DataFrame(result, columns=moments)
# Join permutations to results
df = df_permutation.join(df)
# Take means, etc, to create results
df_results = pd.DataFrame(columns=parameters)
df_results["moment"] = moments
df_results = df_results.set_index("moment",drop=True)
#Take means of all possible orders, conditional on one parameter.
for mom in moments:
for parm in parameters:
result = (df.loc[(df[parm] == 1),mom].mul(df["w_"+parm]).sum()-df.loc[(df[parm] == 0),mom].mul(df["w_"+parm]).sum())
df_results.loc[mom,parm] = result
# Format results
df_results_formatted = df_results.copy()
df_results_formatted = df_results_formatted.astype(float) # Convert all to floats (they appear to be strings?)
df_results_formatted.insert(0,'sum',df_results_formatted.sum(axis=1, skipna=True)) # Sum rows
# Multiply all, except for PD, by 100
scalar = [1 if i=="PD" else 100 for i in df_results_formatted.index.tolist()] #Create list, 1 for PD, 100 for all other
df_results_formatted = df_results_formatted.multiply(scalar,axis=0)
# Construct Spread as AvgRet - rf
df_results_formatted.loc["Spread",:] = df_results_formatted.loc["AvgRet",:] - df_results_formatted.loc["rf",:]
# Round and set padding zeros
return df_results_formatted
# Define functions
def misc(par,moms):
par.b = -np.log(1-0.15)
par.bh = -np.log(1+0.15)
par.sigma = 0.5
par.theta = 12
par.g_T = (1+par.g_L)*(1+par.g_Z)**(1/(1-par.alpha))*(1+par.g_Q)**(par.alpha/(1-par.alpha)) - 1
par.g_PC = (1+par.g_T)/(1+par.g_L) - 1
par.MOM2 = ((1-2*par.p)+par.p*np.exp(-par.bh*(1-par.theta)) + par.p*np.exp(-par.b*(1-par.theta)))
par.MOM3 = ((1-2*par.p)+par.p*np.exp(-par.bh*(-par.theta)) + par.p*np.exp(-par.b*(-par.theta)))
par.MOM = (par.MOM2)**((1-par.sigma)/(1-par.theta))
par.beta_star = par.beta * (1+par.g_PC)**(-par.sigma) * par.MOM
#Define equations as functions of parameters only
def growthpop(par,moms):
return par.g_L
def priceinvt(par,moms):
return -par.g_Q
def ik(par,moms):
return (1+par.g_Q)*(1+par.g_T)-(1-par.delta)
def EmpPop(par,moms):
return par.N_bar
def CapShare(par,moms):
return (par.mu+par.alpha-1)/(par.mu)
def TFPgrowth(par,moms):
misc(par,moms)
moms.CapShare = (par.mu+par.alpha-1)/(par.mu)
return par.g_T-(1-moms.CapShare)*par.g_L-moms.CapShare*(par.g_T+par.g_Q)
def AvgRet(par,moms):
misc(par,moms)
r_star = 1/par.beta_star - 1
return ((par.mu + par.alpha -1)/(par.alpha))*(r_star + par.delta + par.g_Q*(1+r_star))
def rf(par,moms):
misc(par,moms)
return par.MOM2/(par.MOM3*par.beta_star)-1
def PD(par,moms):
misc(par,moms)
return (par.beta_star*(1+par.g_T)/(1-par.beta_star*(1+par.g_T)))
###Output
_____no_output_____
###Markdown
Calculate moments for all countries
###Code
all_moments = pd.DataFrame(index=moments)
for iso in countries:
dfx = calc_moments(iso,startP1,endP1,startP2,endP2)
all_moments[iso+"_P1"] = dfx["p1"]
all_moments[iso+"_P2"] = dfx["p2"]
all_moments[iso+"_change"] = dfx["change"]
###Output
_____no_output_____
###Markdown
Run calibration for all countries
###Code
all_estimates = pd.DataFrame(index=parameters)
for iso in countries:
for p in ["P1","P2"]:
if p == "P1": estimates = calibrate(iso,startP1,endP1,iso+"_"+p,b,theta,sigma)
if p == "P2": estimates = calibrate(iso,startP2,endP2,iso+"_"+p,b,theta,sigma)
all_estimates[iso+"_"+p] = estimates
class par: None
class moms: None
del estimates
###Output
_____no_output_____
###Markdown
Print Latex table of moments for chosen country
###Code
if print_latex == 1:
table = all_moments.loc[:,ISO+"_P1":ISO+"_change"]
for var in table.columns.tolist()[:]:
table[var] = table[var].map('${:,.3f}$'.format)
table.index = table.index.to_series().map(data_series)
latex = tabulate(table, tablefmt="latex_raw")
print("\\begin{tabular}{lrrr}")
print("\\toprule")
print(" & \\multicolumn{2}{c}{\\textit{Averages}} & \\\\ \\cmidrule(lr){2-3} ")
print(" & 1984 - 1999 & 2000 - 2015 & Change \\\\ ")
print("\\midrule")
print(latex[29:-21])
print("\\bottomrule")
print("\end{tabular}")
###Output
_____no_output_____
###Markdown
Print Latex table of estimates for chosen country
###Code
if print_latex == 1:
#ISO = "GBR" # Uncomment to set country here
table = all_estimates.loc[:,ISO+"_P1":ISO+"_P2"].copy()
table['Parameter name'] = ["Discount factor","Markup","Disaster probability","Depreciation, pct.","Cobb-Douglas parameter","Population growth, pct.","TFP growth, pct.","Technological change, pct.","Labour Supply"]
table['Symbol'] = ["$\\beta$","$\\mu$","$p$","$\\delta$","$\\alpha$","$g_L$","$g_Z$","$g_Q$","$\\bar{N}$"]
table['Difference'] = table[ISO+"_P2"] - table[ISO+"_P1"]
table = table[["Parameter name","Symbol",ISO+"_P1",ISO+"_P2","Difference"]]
table[ISO+"_P1"] = table[ISO+"_P1"].multiply([1,1,1,100,1,100,100,100,1])
table[ISO+"_P2"] = table[ISO+"_P2"].multiply([1,1,1,100,1,100,100,100,1])
table["Difference"] = table["Difference"].multiply([1,1,1,100,1,100,100,100,1])
for var in table.columns.tolist()[2:]:
table[var] = table[var].map('${:,.3f}$'.format)
latex = tabulate(table, tablefmt="latex_raw", showindex=False)
print("\\begin{tabular}{lcccr}")
print("\\toprule")
print("& & & \\textit{Estimates} & \\\\ \\cmidrule(lr){3-5}")
print("Parameter name & Symbol & " + str(startP1) + " - " + str(endP1) + " & " + str(startP2) + " - " + str(endP2) + " & Change \\\\")
print("\midrule")
print(latex[30:-21])
print("\\bottomrule")
print("\\end{tabular}")
###Output
_____no_output_____
###Markdown
Print Latex table of decomposition for chosen country
###Code
if print_latex == 1:
#ISO = "GBR" # Uncomment to set country here
# Set data values from calc_moments
mom_P1 = all_moments.loc[:,ISO+"_P1"]
mom_P2 = all_moments.loc[:,ISO+"_P2"]
# Calculate spread
mom_P1.loc["Spread"] = all_moments.loc["AvgRet",ISO+"_P1"]-all_moments.loc["rf",ISO+"_P1"]
mom_P2.loc["Spread"] = all_moments.loc["AvgRet",ISO+"_P2"]-all_moments.loc["rf",ISO+"_P2"]
formatted_decomp = decomp(ISO).copy()
formatted_decomp.insert(0,"P2",mom_P2)
formatted_decomp.insert(0,"P1",mom_P1)
table = formatted_decomp
for var in table.columns.tolist()[:]:
table[var] = table[var].map('${:,.2f}$'.format)
table.index = table.index.to_series().map(data_series)
latex = tabulate(table, tablefmt="latex_raw")
latex = tabulate(formatted_decomp, showindex=True, tablefmt="latex_raw")
print("\\begin{tabular}{lrrrrrrrrrrrr}")
print("\\toprule")
print(" & \multicolumn{3}{c}{\\textit{Data}} & \multicolumn{9}{c}{\\textit{Decomposition of $\\Delta$}} \\\\ \\cmidrule(lr){2-4} \\cmidrule(lr){5-13}")
print(" & P1 & P2 & $\\Delta$ & $\\beta$ & $\\mu$ & $p$ & $\\delta$ & $\\alpha$ & $g_L$ & $g_Z$ & $g_Q$ & $\\bar{N}$ \\\\")
print("\\midrule")
print(latex[38:-21])
print("\\bottomrule")
print("\\end{tabular}")
###Output
_____no_output_____
###Markdown
Show estimated parameters for chosen country
###Code
if show_results == 1:
table = all_estimates.loc[:,ISO+"_P1":ISO+"_P2"]
for var in table.columns.tolist()[:]:
table[var] = table[var].map('${:,.3f}$'.format)
display(table)
###Output
_____no_output_____
###Markdown
Show decomposition for chosen country
###Code
if show_results == 1:
table = decomp(ISO)
for var in table.columns.tolist()[:]:
table[var] = table[var].map('${:,.2f}$'.format)
display(table)
###Output
_____no_output_____
###Markdown
Analysis of training data Imports
###Code
from __future__ import print_function
import os
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from collections import Counter
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load Data Files
###Code
def load_files(out_dir, tier):
questions = pd.read_csv(os.path.join(out_dir, tier + '.question'), delimiter="\n", header=None, names=["data"])
contexts = pd.read_csv(os.path.join(out_dir, tier + '.context'), delimiter="\n", header=None, names=["data"])
answers = pd.read_csv(os.path.join(out_dir, tier + '.answer'), delimiter="\n", header=None, names=["data"])
spans = pd.read_csv(os.path.join(out_dir, tier + '.span'), delimiter=" ", header=None, names=["start", "end"])
return questions, contexts, answers, spans
train_questions, train_contexts, train_answers, train_spans = load_files("data", "train")
dev_questions, dev_contexts, dev_answers, dev_spans = load_files("data", "dev") # Currently not used
###Output
_____no_output_____
###Markdown
Analyze Data Utils
###Code
def plot_data_counts(data, title, num_bins=20, q=99):
top_n = data.value_counts().nlargest(15).to_frame()
occurances = np.array(top_n.values)[:, 0]
percentages = np.round(occurances / np.sum(occurances), 3)
table = np.stack((np.array(top_n.index), occurances, percentages), axis =1)
percentile = np.percentile(data, q)
fig = plt.figure(figsize=(18,5))
ax1 = fig.add_subplot(121)
ax1.hist(data, num_bins, normed=1, facecolor='blue', alpha=0.5)
ax1.axvline(percentile, color='b', linestyle='dashed', linewidth=2, label=str(q) + " Percentile is " + str(percentile))
ax1.legend()
ax1.set_xlabel('Count')
ax1.set_ylabel('Occurrence Percentage')
ax1.set_title(title.title())
ax2 = fig.add_subplot(122)
font_size=14
bbox=[0, 0, 1, 1]
ax2.axis('off')
mpl_table = ax2.table(cellText=table, bbox=bbox, colLabels=["Count", "Occurances", "Rate"])
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
ax2.set_title(title.title())
plt.show()
###Output
_____no_output_____
###Markdown
Analyze Word Counts
###Code
def analyze_word_count(dataset, title, num_bins=50):
dataset["word_count"] = dataset["data"].apply(lambda x: len(str(x).split(" ")))
plot_data_counts(dataset["word_count"], title.title() + " Word Counts", num_bins=num_bins)
analyze_word_count(train_questions, "train questions")
analyze_word_count(train_contexts, "train contexts")
analyze_word_count(train_answers, "train answers")
###Output
_____no_output_____
###Markdown
Analyze Head Words
###Code
def analyze_start_word(dataset, title, start=0, end=1):
heads = dataset["data"].apply(lambda x: " ".join(str(x).split(" ")[start: end]))
top_n = heads.value_counts().nlargest(15).to_frame()
occurances = np.array(top_n.values)[:, 0]
percentages = np.round(occurances / np.sum(occurances), 3)
table = np.stack((np.array(top_n.index), occurances, percentages), axis =1)
fig = plt.figure(figsize=(18,5))
ax1 = fig.add_subplot(121)
pd.value_counts(heads).nlargest(20).plot.bar(ax=ax1)
ax1.set_xlabel('Word Count')
ax1.set_ylabel('Occurrences')
ax1.set_title(title.title() + " Head")
ax2 = fig.add_subplot(122)
font_size=14
bbox=[0, 0, 1, 1]
ax2.axis('off')
mpl_table = ax2.table(cellText=table, bbox=bbox, colLabels=["Head", "Occurances", "Rate"])
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
ax2.set_title(title.title() + " Top Head Words")
plt.show()
analyze_start_word(train_questions, "train questions", start=0, end=1)
analyze_start_word(train_questions, "train questions", start=0, end=2)
analyze_start_word(train_contexts, "train contexts", start=0, end=1)
analyze_start_word(train_contexts, "train contexts", start=0, end=2)
analyze_start_word(train_answers, "train answers", start=0, end=1)
analyze_start_word(train_answers, "train answers", start=0, end=2)
###Output
_____no_output_____
###Markdown
Analyze Answer Position
###Code
def analyze_answer_pos(context, span, title, num_bins=100, percentile=99):
plot_data_counts(span["start"], title + " Start Position", num_bins=num_bins, q=percentile)
plot_data_counts(span["end"], title + " End Position", num_bins=num_bins, q=percentile)
plot_data_counts(span["end"]-span["start"] + 1, title + " Length", num_bins=num_bins, q=percentile)
analyze_answer_pos(train_contexts, train_spans, "Train Span")
###Output
_____no_output_____
###Markdown
Zanimajo nas le knjige z oceno 4 ali več, ki so izšle po letu 1500
###Code
books = books[(books.rating > 4) & (books.publication_year >= 1500)]
books['decade'] = books['publication_year'] // 10 * 10
decade_count = books.groupby('decade').count()
decade_count = decade_count['title']
decade_count = decade_count.sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Desetletja z največ uspešnicami
###Code
decade_count.head(10)
###Output
_____no_output_____
###Markdown
Komentar: potrdila se je hipoteza, da so najbolj brane knjige sodobne
###Code
best_authors = books.groupby('author').count()
best_authors = best_authors[['title']].rename({'title':'count'}, axis=1)
###Output
_____no_output_____
###Markdown
Dvajset najuspešnejših avtorjev
###Code
best_authors.sort_values('count', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Komentar: potrdila se je hipoteza, da so najuspešnejši avtorji večinoma moški Korelacija letnica-ocena
###Code
books.plot(kind = 'scatter', x = 'decade', y = 'rating')
###Output
_____no_output_____
###Markdown
Komentar: v nasprotju s predvidevanji vidimo, da z leti ne narašča le število popularnih knjih, temveč tudi njihova ocena
###Code
romances = books[books.title.isin(genres[genres.genre == 'Romance'].title)]
romances
romances['century'] = romances['decade'] // 100 * 100
romances.head()
romances = romances.groupby('century').count()[['title']].rename({'title':'count'}, axis=1)
romances
romances.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Komentar: največ romantičnih romanov je sodobnih, a to je lahko zgolj posledica dejstva, da je večina knjig na seznamu sodobnih. Oglejmo si, iz katerega časa so ljubezenski romani v relativnem smislu največje uspešnice.
###Code
books['century'] = books['decade'] // 100 * 100
books.head()
century_count = books.groupby('century').count()
century_count = century_count['title']
century_count = century_count.sort_values(ascending=False)
century_count.head()
century_count = century_count.reset_index()
century_count.columns = ['century', 'count']
century_count.head()
romances.reset_index(inplace=True)
romances.head()
romances_percentage = romances.merge(century_count, on='century')
romances_percentage.columns = ['century', 'romances', 'total']
romances_percentage['percentage_of_romances'] = romances_percentage['romances'] / romances_percentage['total']
romances_percentage.sort_values('percentage_of_romances')
romances_percentage
###Output
_____no_output_____
###Markdown
Relativni delež romantičnih knjig
###Code
romances_percentage.plot(kind='bar', x='century', y='percentage_of_romances')
###Output
_____no_output_____
###Markdown
Outcome (Non diabetic vs Diabetic)
###Code
df.groupby('Outcome').count()
###Output
_____no_output_____
###Markdown
Plots of Diabetic Cases
###Code
sns.set_theme(style="darkgrid")
cols = df.columns[:8]
plt.subplots(figsize=(16, 6))
for i, col in enumerate(cols):
plt.subplot(2, 4, i + 1)
plt.subplots_adjust(wspace=0.5, hspace=0.5)
df[col].hist(bins=20)
plt.title(col)
plt.show()
sns.pairplot(data=df, hue='Outcome', kind="reg", diag_kind='kde')
plt.show()
###Output
_____no_output_____
###Markdown
No pairs of attributes clearly separate the two outcome classes. PCA
###Code
scaled = StandardScaler().fit_transform(df)
scaled_df = pd.DataFrame(scaled, columns=df.columns)
pca = PCA()
pca.fit(scaled_df)
pca_df = pca.transform(scaled_df)
sns.barplot(x=df.columns, y=pca.explained_variance_ratio_)
plt.xticks(rotation=90)
plt.xlabel('Principal Components')
plt.ylabel('Explained Variance Ratio')
plt.title('Scree Plot')
plt.show()
###Output
_____no_output_____
###Markdown
basic vis
###Code
# function producing histogram-like plots for categorical variables
# in this dataset. Results split by case_status
# df = df
# var = var to display
# num = number of categories to display (in descending order)
def plot_cat(df, var, num):
sns.countplot(y = var,
hue = 'case_status',
data = df,
order = df[var].value_counts().iloc[:num].index)
plot_cat(df, 'employer_state', 10)
plot_cat(df, 'applicant_major', 15)
plot_cat(df, 'citizenship', 10)
plot_cat(df, 'agent_firm_name', 5)
plot_cat(df, 'job_title', 10)
plot_cat(df, 'applicant_education', 10)
plot_cat(df, 'employer_name', 10)
max_wage = df['wage'] < 200000
df_red = df[max_wage]
plt.hist(df_red['wage'])
plt.show()
###Output
_____no_output_____
###Markdown
Modelling
###Code
# get labels as y
y = df['case_status']
# encode y as integer
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(y)
y = label_encoder.transform(y)
print(y)
# select feature subset
df_red = df.loc[:,["employer_state",
"employer_city",
"employer_num_employees",
"employer_yr_estab",
"applicant_major",
"applicant_uni",
"applicant_education",
"agent_firm_name",
"citizenship",
"job_title",
"wage"]]
print("Reduced shape:", df_red.shape)
###Output
Reduced shape: (27321, 11)
###Markdown
Constructing feature matrix:
###Code
# normalize numeric predictors
X = df_red.select_dtypes('float').values
for i in range(X.shape[1]):
m = np.mean(X[:,i])
sd = np.std(X[:,i])
X[:,i] = ((X[:,i] - m) / sd)
# encode categorical predictors for full variable xgboost
df_X_to_encode = df_red.select_dtypes(exclude = ['float'])
X_to_encode = np.asarray(df_X_to_encode)
for i in range(0, X_to_encode.shape[1]):
print("Encoding feature", df_X_to_encode.columns[i])
label_encoder = LabelEncoder()
feature = label_encoder.fit_transform(X_to_encode[:,i])
onehot_encoder = OneHotEncoder(sparse=False, categories='auto')
feature = feature.reshape(-1, 1)
feature = onehot_encoder.fit_transform(feature)
X = np.concatenate((X, feature), axis=1)
print("Dummy count: ", feature.shape[1])
print("X shape:", X.shape[0], "x", X.shape[1])
###Output
Encoding feature employer_state
Dummy count: 55
Encoding feature employer_city
Dummy count: 2307
Encoding feature employer_yr_estab
Dummy count: 214
Encoding feature applicant_major
Dummy count: 4032
Encoding feature applicant_uni
Dummy count: 6523
Encoding feature applicant_education
Dummy count: 6
Encoding feature agent_firm_name
Dummy count: 2969
Encoding feature citizenship
Dummy count: 148
Encoding feature job_title
Dummy count: 1974
X shape: 27321 x 18230
###Markdown
**TO DO:** Apply fuzzywords shrink number of categories.Problematic variables: * job_title * applicant_uni * applicant_major
###Code
# train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
###Output
_____no_output_____
###Markdown
Logit
###Code
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression(solver = 'liblinear')
logmodel.fit(X_train, y_train)
ylog = logmodel.predict(X_test)
recall = recall_score(y_true = y_test,
y_pred = ylog)
precision = precision_score(y_true = y_test,
y_pred = ylog)
accuracy = accuracy_score(y_true = y_test,
y_pred = ylog)
print("Recall:", recall)
print("Precision:", precision)
print("Accuracy:", accuracy)
#pickle model
filename = 'models/xgb1.pkl'
with open(filename, 'wb') as file:
pickle.dump(logmodel, file)
###Output
_____no_output_____
###Markdown
xgboost
###Code
# construct dmatrix
visa_dmatrix = xgb.DMatrix(data = X, label = y)
# set params
params = {
'eta': 0.3,
'max_depth': 4,
'objective': 'binary:logistic',
}
# run xgb built-in cross-validation
cv_results = xgb.cv(dtrain = visa_dmatrix,
params = params,
nfold = 10,
num_boost_round = 5,
metrics = 'aucpr',
as_pandas = 'True',
seed = 123)
print(cv_results)
###Output
_____no_output_____
###Markdown
Import data to database
###Code
card_holder_csv = Path(".\Data\card_holder.csv")
credit_card_csv = Path(".\Data\credit_card.csv")
merchant_category_csv = Path(".\Data\merchant_category.csv")
merchant_csv = Path(".\Data\merchant.csv")
transaction_csv = Path(".\Data\transaction.csv")
seed_data = Path(".\Data\seed.sql")
schema = Path(".\schema.sql")
eng = create_engine("postgres://postgres:W@terH0u53@localhost/postgres")
with eng.connect() as con:
schema_sql = text(schema.read_text())
seed_sql = text(seed_data.read_text())
con.execute(schema_sql)
con.execute(seed_sql)
###Output
_____no_output_____
###Markdown
Analysis How can you isolate (or group) the transactions of each cardholder?
###Code
trans = pd.DataFrame
with eng.connect() as con:
trans = pd.read_sql("transaction", con)
trans.head(10)
# grouping the transactions by card number
card_groups = trans.groupby("card", axis=0)
card_groups.count()
###Output
_____no_output_____
###Markdown
Niki.ai
###Code
import pandas as pd
import numpy as np
import nltk
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv("label.txt",sep=",,,",header=None ,names=['question','type'])
df.head()
df.shape
df['type']=df['type'].str.strip()
df['type'].unique()
df['question'].values
df['question'] = df['question'].apply(lambda x: x.lower())
df['question'] = df['question'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x)))
VALIDATION_SPLIT=0.20
###Output
_____no_output_____
###Markdown
Naive Bayes with tfidf vectorizer
###Code
from collections import Counter, defaultdict
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle as pkl
from sklearn.naive_bayes import MultinomialNB
# organize imports
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from nltk.corpus import wordnet as wn
class StemTokenizer(object):
def __init__(self):
self.ignore_set = {'footnote', 'nietzsche', 'plato', 'mr.'}
def __call__(self, doc):
words = []
for word in word_tokenize(doc):
word = word.lower()
w = wn.morphy(word)
if w and len(w) > 1 and w not in self.ignore_set:
words.append(w)
return words
stemmer = SnowballStemmer('english').stem
def stem_tokenize(text):
return [stemmer(i) for i in word_tokenize(text)]
###Output
_____no_output_____
###Markdown
Using Count Vectorizer
###Code
vectorizer = CountVectorizer(analyzer='word',lowercase=True,tokenizer=stem_tokenize)
X_train = vectorizer.fit_transform(df.question.values)
with open('vectorizer.pk', 'wb') as fin:
pkl.dump(vectorizer, fin)
labels = data['type']
# split the data into a training set and a validation set
indices = np.arange(X_train.shape[0])
np.random.shuffle(indices)
X_train = X_train[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * X_train.shape[0])
x_train = X_train[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = X_train[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
clf = MultinomialNB()
clf.fit(x_train,y_train)
# evaluate the model of test data
preds = clf.predict(x_val)
print(classification_report(preds,y_val))
print("Accuracy :",clf.score(x_val,y_val))
example=vectorizer.transform(["What time does the train leave"])
clf.predict(example)
###Output
_____no_output_____
###Markdown
Using TF-IDF (though bad choice for short sequences or corpus)
###Code
tf_vectorizer = TfidfVectorizer(analyzer='word',lowercase=True,tokenizer=stem_tokenize)
X_train = tf_vectorizer.fit_transform(df.question.values)
with open('tf_vectorizer.pk', 'wb') as fin:
pkl.dump(tf_vectorizer, fin)
labels = data['type']
# split the data into a training set and a validation set
indices = np.arange(X_train.shape[0])
np.random.shuffle(indices)
X_train = X_train[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * X_train.shape[0])
x_train = X_train[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = X_train[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
clf = MultinomialNB()
clf.fit(x_train,y_train)
# evaluate the model of test data
preds = clf.predict(x_val)
print(classification_report(preds,y_val))
print("Accuracy :",clf.score(x_val,y_val))
example=tf_vectorizer.transform(["What time does the train leave"])
clf.predict(example)
###Output
_____no_output_____
###Markdown
LSTM
###Code
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
MAX_NB_WORDS = 20000
MAX_SEQUENCE_LENGTH=30
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import re
data=df.copy()
print(data['type'].value_counts())
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, split=' ')
tokenizer.fit_on_texts(data['question'].values)
X = tokenizer.texts_to_sequences(data['question'].values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
Y = data['type']
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Y)
Y=le.transform(Y)
labels = to_categorical(np.asarray(Y))
print('Shape of data tensor:', X.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X = X[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * X.shape[0])
x_train = X[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = X[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
embeddings_index = {}
f = open('E:/Projects/Word2Vec/glove.42B.300d.txt', encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
EMBEDDING_DIM=300
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
from keras.layers import Embedding
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
embed_dim = 300
lstm_out = 196
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(lstm_out, dropout_U=0.25, dropout_W=0.25))
model.add(Dense(5,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
model.fit(x_train, y_train,
batch_size=128,
epochs=20,
validation_data=(x_val, y_val))
example = tokenizer.texts_to_sequences(["What time does the train leave"])
example = pad_sequences(example, maxlen=MAX_SEQUENCE_LENGTH)
le.inverse_transform(np.argmax(model.predict(example)))
###Output
_____no_output_____
###Markdown
What is the global glacier ice volume outside the ice sheets? Code & data attached to the manuscript. If using the data for something else, please refer to the original sources.
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Read in the various estimates
###Code
# Match regional agg choices Millan 2022
def reformat_df(df):
df.loc['01, 02'] = df.loc[['01', '02']].sum()
df.loc['13, 14, 15'] = df.loc[['13', '14', '15']].sum()
return df.drop(['01', '02'] + ['13', '14', '15']).sort_index()
# Output
gdf = pd.DataFrame()
s = 'mb96'
gdf.loc['Global', f'{s}_V'] = 180000
gdf.loc['Global', f'{s}_V_err'] = 40000
gdf.loc['Global', f'{s}_SLE'] = 0.5
gdf.loc['Global', f'{s}_SLE_err'] = 0.1
s = 'o04'
gdf.loc['excl. A. & G.', f'{s}_V'] = 56000
gdf.loc['excl. A. & G.', f'{s}_V_err'] = np.NaN
gdf.loc['excl. A. & G.', f'{s}_SLE'] = 0.15
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = np.NaN
s = 'dm05'
gdf.loc['Global', f'{s}_V'] = 260000
gdf.loc['Global', f'{s}_V_err'] = 65000
gdf.loc['Global', f'{s}_SLE'] = 0.65
gdf.loc['Global', f'{s}_SLE_err'] = 0.16
gdf.loc['excl. A. & G.', f'{s}_V'] = 133000
gdf.loc['excl. A. & G.', f'{s}_V_err'] = 20000
gdf.loc['excl. A. & G.', f'{s}_SLE'] = 133000 * 0.9 / 326 * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = 20000 * 0.9 / 326 * 1e-3
s = 'rb05'
gdf.loc['excl. A. & G.', f'{s}_V'] = 87000
gdf.loc['excl. A. & G.', f'{s}_V_err'] = 10000
gdf.loc['excl. A. & G.', f'{s}_SLE'] = 0.241
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = 0.026
###Output
_____no_output_____
###Markdown
The following estimates have regional tables. Radic & Hock 2010 This is pre-RGI and slightly different:
###Code
rh10 = pd.read_csv('data/rh10.csv', index_col=0, header=1)
rh10
rh10_total = rh10.iloc[-1:].copy()
rh10_total
rh10 = rh10.iloc[:-1].copy().drop('WGI_XF', axis=1)
rh10.index = [f'{int(c):02d}' for c in rh10.index]
rh10.sum().to_frame().T
((rh10**2).sum()**0.5).loc[['A_err.1', 'V_err.1', 'SLE_err']].to_frame().T
###Output
_____no_output_____
###Markdown
Table is consistent. **Volume without 05 and 19**:
###Code
rh10_no = rh10.drop(['17', '18', '19'])
rh10_no_s = rh10_no.sum().to_frame().T
rh10_no_s
err = ((rh10_no**2).sum()**0.5).loc[['A_err.1', 'V_err.1', 'SLE_err']].to_frame().T
err
rh10_no['V.1'].values
s = 'rh10'
gdf.loc['Global', f'{s}_V'] = rh10_total['V.1'].values
gdf.loc['Global', f'{s}_V_err'] = rh10_total['V_err.1'].values
gdf.loc['Global', f'{s}_SLE'] = rh10_total['SLE'].values * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = rh10_total['SLE_err'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = rh10_no_s['V.1'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = err['V_err.1'].values
gdf.loc['excl. A. & G.', f'{s}_SLE'] = rh10_no_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = err['SLE_err'].values * 1e-3
###Output
_____no_output_____
###Markdown
Marzeion et al, 2012
###Code
m12 = pd.read_csv('data/m12.csv', index_col=0)
m12[['A', 'A_err']] = m12[['A', 'A_err']] * 1e3
###Output
_____no_output_____
###Markdown
Let's compute the volumes from SLE:
###Code
m12['V'] = m12['SLE'] * 362 / 0.9
m12['V_err'] = m12['SLE_err'] * 362 / 0.9
m12_total = m12.iloc[-1:].copy()
m12_total
m12 = m12.iloc[:-1].copy()
m12.index = [f'{int(c):02d}' for c in m12.index]
m12.sum().to_frame().T
((m12**2).sum()**0.5).loc[['A_err', 'V_err', 'SLE_err']].to_frame().T
###Output
_____no_output_____
###Markdown
OK Table is more or less consistent, **uncertainty estimates computed as uncorrelated.** **Volume without 05 and 19**:
###Code
m12_no5 = m12.drop('05')
m12_no5_s = m12_no5.sum().to_frame().T
m12_no5_s
err = ((m12_no5**2).sum()**0.5).loc[['A_err', 'V_err', 'SLE_err']].to_frame().T
err
s = 'm12'
gdf.loc['excl. A. & G.', f'{s}_V'] = m12_no5_s['V'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = err['V_err'].values
gdf.loc['excl. A. & G.', f'{s}_SLE'] = m12_no5_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = err['SLE_err'].values * 1e-3
###Output
_____no_output_____
###Markdown
Huss & Farinotti 2012
###Code
hf12 = pd.read_csv('data/hf12.csv', index_col=0).drop('Name', axis=1)
hf12
hf12_total = hf12.iloc[[-1]].copy()
hf12_total
hf12 = hf12.iloc[:-1].copy()
hf12.index = [f'{int(c):02d}' for c in hf12.index]
hf12.sum().to_frame().T
###Output
_____no_output_____
###Markdown
OK Table is more or less consistent. The volume is off by 11 and the error estimates aren't exact (using uncorrelated is much worse) **Volume without 05 and 19**:
###Code
hf12_no = hf12.drop(['05', '19'])
hf12_no_s = hf12_no.sum().to_frame().T
hf12_no_s
s = 'hf12'
gdf.loc['Global', f'{s}_V'] = hf12_total['V'].values
gdf.loc['Global', f'{s}_V_err'] = hf12_total['V_err'].values
gdf.loc['Global', f'{s}_SLE'] = hf12_total['SLE'].values * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = hf12_total['SLE_err'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = hf12_no_s['V'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = hf12_no_s['V_err'].values
gdf.loc['excl. A. & G.', f'{s}_SLE'] = hf12_no_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = hf12_no_s['SLE_err'].values * 1e-3
###Output
_____no_output_____
###Markdown
Grinsted, 2013
###Code
g13 = pd.read_csv('data/g13.csv', index_col=0)
###Output
_____no_output_____
###Markdown
Let's compute the volumes from SLE:
###Code
g13['V'] = g13['SLE'] * 362 / 0.9
g13['V_err'] = g13['SLE_err'] * 362 / 0.9
g13_total = g13.iloc[-2:].copy()
g13_total
g13 = g13.iloc[:-2].copy()
g13.index = [f'{int(c):02d}' for c in g13.index]
g13.sum().to_frame().T
###Output
_____no_output_____
###Markdown
OK Table is more or less consistent. **Volume without 05 and 19**:
###Code
g13_no = g13.drop(['05', '19'])
g13_no_s = g13_no.sum().to_frame().T
g13_no_s
s = 'g13'
gdf.loc['Global', f'{s}_V'] = g13_total.loc['Total', 'V']
gdf.loc['Global', f'{s}_V_err'] = g13_total.loc['Total', 'V_err']
gdf.loc['Global', f'{s}_SLE'] = g13_total.loc['Total', 'SLE'] * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = g13_total.loc['Total', 'SLE_err'] * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = g13_total.loc['Withouth 5+19', 'V']
gdf.loc['excl. A. & G.', f'{s}_V_err'] = g13_total.loc['Withouth 5+19', 'V_err']
gdf.loc['excl. A. & G.', f'{s}_SLE'] = g13_total.loc['Withouth 5+19', 'SLE'] * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = g13_total.loc['Withouth 5+19', 'SLE_err'] * 1e-3
###Output
_____no_output_____
###Markdown
Radic et al., 2014
###Code
r14 = pd.read_csv('data/r14.csv', index_col=0)
r14_total = r14.iloc[[-1]].copy()
r14_total
r14 = r14.iloc[:-1].copy()
r14.index = [f'{int(c):02d}' for c in r14.index]
r14_s = r14.sum().to_frame().T
r14_s
###Output
_____no_output_____
###Markdown
OK Table is consistent. **Volume without 05 and 19**:
###Code
r14_no = r14.drop(['05', '19'])
r14_no_s = r14_no.sum().to_frame().T
r14_no_s
s = 'r14'
gdf.loc['Global', f'{s}_V'] = r14_total['V'].values
gdf.loc['Global', f'{s}_V_err'] = r14_total['V_err'].values
gdf.loc['Global', f'{s}_SLE'] = r14_total['SLE'].values * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = r14_total['SLE_err'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = r14_no_s['V'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = np.NaN
gdf.loc['excl. A. & G.', f'{s}_SLE'] = r14_no_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = np.NaN
###Output
_____no_output_____
###Markdown
Farinotti et al., 2019
###Code
f19 = pd.read_csv('data/f19.csv', index_col=0)
f19[['V', 'V_err']] = f19[['V', 'V_err']] * 1e3
f19_total = f19.iloc[[-1]].copy()
f19_total
f19 = f19.iloc[:-1].copy()
f19.sum().to_frame().T
###Output
_____no_output_____
###Markdown
OK Table is consistent. **Volume without 05 and 19**:
###Code
f19_no = f19.drop(['05', '19'])
f19_no_s = f19_no.sum().to_frame().T
f19_no_s
s = 'f19'
gdf.loc['Global', f'{s}_V'] = f19_total['V'].values
gdf.loc['Global', f'{s}_V_err'] = f19_total['V_err'].values
gdf.loc['Global', f'{s}_SLE'] = f19_total['SLE'].values * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = f19_total['SLE_err'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = f19_no_s['V'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = f19_no_s['V_err'].values
gdf.loc['excl. A. & G.', f'{s}_SLE'] = f19_no_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = f19_no_s['SLE_err'].values * 1e-3
###Output
_____no_output_____
###Markdown
Millan et al., 2022
###Code
m22 = pd.read_csv('data/m22.csv', index_col=0)
m22[['A', 'V', 'V_err']] *= 1e3;
m22_total = m22.iloc[[-1]].copy()
m22_total
# Also add a dataset with AA "cropped" only
m22_a = m22.iloc[[-2]].copy()
m22_a
m22 = m22.iloc[:-2].copy()
m22_s = m22.sum().to_frame().T
m22_s
# Total with other region
m22_ss = (m22.iloc[:-1].sum().to_frame()).T + m22_a.values
m22_ss
# Verification
# diff = 35.1 - 3.2
# print(diff * 1e3, 140900 - 109000)
###Output
_____no_output_____
###Markdown
OK Table is more or less consistent, with the **problem of global SLE of course**. **Volume without 05 and 19**:
###Code
m22_no = m22.drop(['05', '19'])
m22_no_s = m22_no.sum().to_frame().T
m22_no_s
s = 'm22'
gdf.loc['Global', f'{s}_V'] = m22_total['V'].values
gdf.loc['Global', f'{s}_V_err'] = m22_s['V_err'].values
gdf.loc['Global', f'{s}_SLE'] = m22_s['SLE'].values * 1e-3
gdf.loc['Global', f'{s}_SLE_err'] = m22_s['SLE_err'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_V'] = m22_no_s['V'].values
gdf.loc['excl. A. & G.', f'{s}_V_err'] = m22_no_s['V_err'].values
gdf.loc['excl. A. & G.', f'{s}_SLE'] = m22_no_s['SLE'].values * 1e-3
gdf.loc['excl. A. & G.', f'{s}_SLE_err'] = m22_no_s['SLE_err'].values * 1e-3
###Output
_____no_output_____
###Markdown
Final check and area vs volume plot
###Code
# We copy the values from the table to have the authors values
d = {
'mb96': [180000, 40000, 680000, 0.5],
# 'o04': [, ,],
'dm05': [260000, 65000, 785000, 0.65],
# 'rb05': [, ,],
'rh10': [241430, 29229, 741448, 0.6],
# 'm12': [, ,],
'hf12': [170214, 20688, 734856, 0.43],
'g13': [140778, 28155, 734933, 0.35],
'r14': [209973, 0, 736989, 0.522],
'f19': [158170, 41030, 705253, 0.324],
'm22': [140800, 40400, 705253, 0.311],
'm22*': [109000, 32130, 705253-106701, 0.257],
}
df_v = pd.DataFrame(d, index=['V', 'V_err', 'A', 'SLE']).T
df_v
for k in df_v.index:
if k == 'm22*':
continue
p = df_v.loc[k, 'V']
m = gdf.loc['Global', k +'_V']
if not np.allclose(p, m):
print(k, 'V', p, m)
p = df_v.loc[k, 'V_err']
m = gdf.loc['Global', k +'_V_err']
if not np.allclose(p, m):
print(k, 'V_err', p, m)
###Output
g13 V_err 28155.0 28155.555555555555
r14 V_err 0.0 nan
###Markdown
Plot
###Code
import seaborn as sns
import random
import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox # for the text box
from matplotlib.gridspec import GridSpec # for plot layout
# Separate vol and SLE
x = np.array([1, 2])
gdf.index = x
gdf_vol = gdf[[c for c in gdf if 'V' in c]].copy()
gdf_sle = gdf[[c for c in gdf if 'SLE' in c]].copy()
dataframes = {
'm12': reformat_df(m12),
'hf12': reformat_df(hf12),
'g13': reformat_df(g13),
'r14': reformat_df(r14),
'f19': reformat_df(f19),
'm22': m22,
}
legend = {
'mb96': 'Meier and Bahr, 1996',
'o04': 'Ohmura, 2004',
'dm05': 'Dyurgerov and Meier, 2005',
'rb05': 'Raper and Braithwaite 2005',
'rh10': 'Radić and Hock, 2010',
'm12': 'Marzeion and others, 2012',
'hf12': 'Huss and Farinotti, 2012',
'g13': 'Grinsted, 2013',
'r14': 'Radić and others, 2014',
'f19': 'Farinotti and others, 2019',
'm22': 'Millan and others, 2022',
}
# Axis labels for region plot, in order by RGI area
rgi_ids = reformat_df(f19).sort_values('A').index[::-1]
names = [
'Antarctic\nPeriphery',
'Arctic Canada\nNorth',
'Alaska, Western\nCanada & USA',
'High Mountain\nAsia',
'Greenland\nperiphery',
'Russian\nArctic',
'Arctic Canada\nSouth',
'Svalbard &\nJan Mayen',
'Southern\nAndes',
'Iceland',
'Scandinavia',
'Asia North',
'Low Latitudes',
'Central Europe',
'Caucasus &\nMiddle East',
'New Zealand'
]
strs = []
for i, n in zip(rgi_ids, names):
# strs.append(n + f' ({i})')
strs.append(n)
# Whixh data are available for the global plot
estimates = [c.split('_')[0] for c in gdf_vol if 'err' not in c]
sle_valid_keys_global = gdf_sle[[f'{e}_SLE' for e in estimates]].iloc[[0]].dropna(axis=1).columns
sle_valid_keys_no = gdf_sle[[f'{e}_SLE' for e in estimates]].iloc[[1]].dropna(axis=1).columns
v_valid_keys_global = gdf_vol[[f'{e}_V' for e in estimates]].iloc[[0]].dropna(axis=1).columns
v_valid_keys_no = gdf_vol[[f'{e}_V' for e in estimates]].iloc[[1]].dropna(axis=1).columns
# Make plot pretty
sns.set_context('talk')
sns.set_style('whitegrid')
# Figure size
scale_factor = 0.85 # to control font size in PNG
f = plt.figure(figsize=(22 * scale_factor, 18 * scale_factor))
# Axis layout
gs = GridSpec(3, 2, wspace=0.03)
ax1 = f.add_subplot(gs[0, 1])
ax2 = f.add_subplot(gs[1, 1])
ax3 = f.add_subplot(gs[2, 1])
ax4 = f.add_subplot(gs[:, 0])
ax1.sharex(ax2)
# Prepare the rhs plot for y distance between estimates - we want the same for b and c
nm = len(sle_valid_keys_global) / 40 # +1 for additional millan
offset_global = np.linspace(-nm, nm, len(sle_valid_keys_global) + 1)
nm = (len(sle_valid_keys_no) - 1) / 40
offset_no = np.linspace(-nm, nm, len(sle_valid_keys_no))
xtext = 0.85 # where to put the rhs text
# Colors - we shuffle for prettier colors in the "important" estimates
p = sns.color_palette("dark", len(estimates))
random.Random(2).shuffle(p)
cmillan = '#7f8dbe'
mimarker = 'D'
# Parameters
y_range_rhs = 0.27 # Make sure both plots have same size despite different # of estimates
fs_rhs = 18
fs_lhs = 16
# Shift axis up right
yshift = 0.03
# ------------- Plot top right (b) -------------
ax = ax1
toplot = gdf_sle.iloc[[0]]
for i, e in enumerate(sle_valid_keys_global):
estimate = e.split('_')[0]
color = p[estimates.index(estimate)]
err = toplot[f'{e}_err']
if not np.isfinite(err.values[0]):
err = None
ax.errorbar(toplot[f'{e}'], offset_global[i], xerr=err, fmt='o', c=color);
ax.text(xtext, offset_global[i], legend[estimate], c=color, va='center', fontsize=fs_rhs)
# Millan weird
ax.errorbar(257.2 * 1e-3, offset_global[i + 1], xerr=85 * 1e-3, fmt=mimarker, c=color, alpha=0.5);
ax.text(xtext, offset_global[i + 1], 'Millan and others, 2022*', c=cmillan, va='center', fontsize=fs_rhs)
# Axis cosmetics stuffs
ax.grid(axis='y')
ax.set_yticks([])
ax.set_ylim((-y_range_rhs, y_range_rhs))
sns.despine(ax=ax, bottom=False)
ax.set_title(r'$\bf{b}$ SLE global', loc='left')
plt.setp(ax1.get_xticklabels(), visible=False)
# Pos shenanigans
pos = ax.get_position()
pos.y0 += yshift;
ax.set_position(pos)
# ------------- Plot middle right (c) -------------
ax = ax2
toplot = gdf_sle.iloc[[1]]
for i, e in enumerate(sle_valid_keys_no):
estimate = e.split('_')[0]
color = p[estimates.index(estimate)]
err = toplot[f'{e}_err']
if not np.isfinite(err.values[0]):
err = None
ax.errorbar(toplot[f'{e}'], offset_no[i], xerr=err, fmt='o', c=color);
ax.text(xtext, offset_no[i], legend[estimate], c=color, va='center', fontsize=fs_rhs)
# Axis cosmetics stuffs
ax.grid(axis='y')
ax.set_yticks([])
ax.set_xlim([0, 0.85]);
ax.set_ylim((-y_range_rhs, y_range_rhs))
# Pos shenanigans
pos = ax.get_position()
pos.y0 += yshift * 2; pos.y1 += yshift;
ax.set_position(pos)
ax.set_xlabel('Sea-level equivalent (m)')
ax.set_title(r'$\bf{c}$ SLE excluding Antarctic periphery & Greenland periphery', loc='left');
sns.despine(ax=ax)
# ------------- Plot bottom right (d) -------------
ax = ax3
for estimate in df_v.index:
if estimate == 'm22*':
c = cmillan
fmt = mimarker
else:
c = p[estimates.index(estimate)]
fmt = 'o'
toplot = df_v.loc[[estimate]]
ax.errorbar(toplot.A * 1e-3, toplot.V * 1e-3, yerr=toplot.V_err * 1e-3, fmt=fmt, color=c, capsize=6)
# Text
e = estimate.upper()
if len(e) == 4 and '*' not in e:
text = f'{e[0]}{e[2:]}'
else:
text = f'{e[0]}{e[1:]}'
px, py = toplot.A * 1e-3 + 1.5, toplot.V * 1e-3 + 1.5
ha = 'left'
va = 'bottom'
if e in ['R14', 'DM05']:
px -= 3
ha = 'right'
if e in ['M22', 'G13']:
py -= 3
va = 'top'
ax.text(px, py, text, color=c, ha=ha, va=va, fontsize=14);
ax.set_title(r'$\bf{d}$ Global volume $\it{vs}$ area', loc='left')
ax.set_xlabel('Area (10$^3$ km$^2$)'); ax.set_ylabel('Volume (10$^3$ km$^3$)');
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.tick_params(axis='both', which='both', length=0)
# Pos shenanigans
pos = ax.get_position()
pos.y1 -= yshift / 2;
ax.set_position(pos)
# ------------- Plot left (a) -------------
ax = ax4
# Index on the y-axis
rx = np.arange(len(m22))
# Space between estimates
offset = np.linspace(-0.25, 0.25, 6)
# Parameters
s = 6 # markersize
a = 0.4 # alpha for "less important" estimates
texts_for_legend = []
# Go over all esimates
for i, estimate in enumerate(['m22', 'f19', 'r14', 'g13', 'hf12', 'm12']):
df = dataframes[estimate]
if estimate == 'm12':
# One reg less
df = df.loc[rgi_ids[1:]]
x = rx[1:] + offset[i]
else:
df = df.loc[rgi_ids]
x = rx + offset[i]
alpha = 1 if i < 2 else a
c = p[estimates.index(estimate)]
ax.errorbar(df['V'], x, xerr=df['V_err'], fmt='o', c=c, markersize=s, alpha=alpha);
texts_for_legend.append(TextArea(legend[estimate], textprops=dict(color=c, fontsize=fs_lhs, alpha=alpha)))
# Add Millan other
ax.errorbar(m22_a['V'], rx[0] - 0.35, xerr=m22_a['V_err'], fmt=mimarker, c=cmillan, markersize=s);
texts_for_legend.insert(0, TextArea('Millan and others, 2022*', textprops=dict(color=cmillan, fontsize=fs_lhs)))
# Legend box
texts_vbox = VPacker(children=texts_for_legend, pad=0, sep=0)
ann = AnnotationBbox(texts_vbox, (.223, .92), xycoords=ax.transAxes,
bboxprops=dict(color='none', facecolor='white'))
ann.set_figure(f)
f.artists.append(ann)
# Titles
ax.set_title(r'$\bf{a}$ Volume per region', loc='left');
ax.set_xlabel('Ice volume (km$^3$) - log scale');
# Axis cosmetics
ax.invert_yaxis()
ax.set_yticks(rx);
ax.set_yticklabels(strs);
sns.despine(ax=ax, right=True)
ax.grid(axis='y', which='both')
# All gridlines for log
ax.set_xscale('log')
xlocs = np.concatenate([np.arange(1, 11)[2:] * 1e1,
np.arange(1, 11) * 1e2,
np.arange(1, 11) * 1e3,
np.arange(1, 11)[:6] * 1e4])
ax.set_xticks(xlocs)
ax.set_xlim([30, 65000])
locs, labels = plt.yticks()
# Shading
for i, loc in enumerate(locs):
alpha = 0.05 if i % 2 == 1 else 0.1
ax.axhspan(loc - 0.5, loc + 0.5, facecolor='grey', alpha=alpha)
ax.set_ylim(15.5, -0.5)
# plt.tight_layout()
plt.savefig('plot_global_and_reg_log_alpha.png', dpi=150, bbox_inches='tight');
###Output
_____no_output_____
###Markdown
Additional analyses Other models in Farinotti 2019
###Code
df_rgi = pd.read_hdf('data/rgi6_stats.h5')
df_all = pd.read_hdf('data/f19_icevol_pergla.hdf')
df_all['area'] = df_rgi['Area']
df_all['REG'] = [s[6:8] for s in df_all.index]
df_ref = df_rgi.groupby('O1Region').sum()[['Area']]
models = ['composite_vol_m3',
'model_1_vol_m3',
'model_2_vol_m3',
'model_3_vol_m3',
'model_4_vol_m3']
df_all_s = df_ref.copy()
for mo in models:
dd_ = df_all[['REG', 'area', mo]].dropna()
dd = dd_.groupby('REG').sum().replace(0, np.NaN) * 1e-9
dd['area'] = dd_.groupby('REG').sum().replace(0, np.NaN)['area']
ratio = dd['area'].divide(df_ref['Area'])
dd.loc[ratio < 0.98] = np.NaN
df_all_s[mo] = dd[mo]
df_all_s.loc['01, 02'] = df_all_s.loc[['01', '02']].sum()
df_all_s.loc['13, 14, 15'] = df_all_s.loc[['13', '14', '15']].sum()
df_all_s = df_all_s.drop(['01', '02'] + ['13', '14', '15']).sort_index()
df_all_s.loc['13, 14, 15', 'model_4_vol_m3'] = np.NaN
df_all_s.loc['01, 02', 'model_2_vol_m3'] = np.NaN
df_all_s.loc['01, 02', 'model_4_vol_m3'] = np.NaN
f, ax = plt.subplots(figsize=(14, 7))
reformat_df(f19).plot(ax=ax, y='V', yerr='V_err', marker='o', linestyle='none', alpha=0.8, c='C0');
m22.plot(ax=ax, y='V', yerr='V_err', marker='o', linestyle='none', alpha=0.8, c='C3');
plt.plot(df_all_s.model_1_vol_m3, '.', c='black', zorder=99)
ax.set_yscale('log')
plt.xticks(np.arange(len(m22.index)));
ax.set_xticklabels(m22.index, rotation=45);
plt.legend(['Individual models in F 19', 'Farinotti 19', 'Millan 22'], loc='lower left'); plt.xlabel('Region'); plt.ylabel('Volume [km$^3$]');
plt.plot(df_all_s.model_1_vol_m3, '.', c='black', zorder=99)
plt.plot(df_all_s.model_2_vol_m3, '.', c='black', zorder=99)
plt.plot(df_all_s.model_3_vol_m3, '.', c='black', zorder=99)
plt.plot(df_all_s.model_4_vol_m3, '.', c='black', zorder=99)
plt.title('Volume, log scale');
###Output
_____no_output_____
###Markdown
![title](img/1.png) DECISION TREEDecision tree merupakan salah satu dari algoritma machine learning yang dapat digunakan untuk melakukan klasifikasi dan juga regresi, pada module kali ini kita akan menggunakan decision tree sebagai algoritma klasifikasi Langkah-langkah:1. Import module2. Persiapan Data 3. EDA (Exploratory Data Analysis)4. Data Preparation5. Pembuatan model machine learning6. Validasi Model 7. Klasifikasi INSTALL MODULE TAMBAHAN: * conda install python-graphviz* conda install graphviz 1. Import ModuleLangkah pertama yang akan kita lakukan adalah melakukan import module yang akan dipakai selama klasifikasi ini berlangsung
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import graphviz
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
###Output
_____no_output_____
###Markdown
2. Persiapan DataPada langkah kali ini, kita akan mencari data yang akan kita gunakan, pada klasifikasi kali ini, data yang kita gunakan merupakan data obat untuk pemilihan obat berdasarkan kondisi pasienTerdapat beberapa feature / atribut didalam dataset ini antara lain:* Age >> Umur dari pasien* Sex >> Jenis kelamin pasien* BP >> Blood Pressure / Tekanan darah dari pasien* Cholesterol >> Tingkat kolestrol pasien* Na_to_K >> Natrium / Kalium dalam darah* Drug >> Jenis obat yang digunakanhttps://www.kaggle.com/gangliu/drugsets![title](img/dataset.png)
###Code
#Load data & tampilkan 5 baris teratas
df = pd.read_csv('dataset/drug200.csv')
df.head()
###Output
_____no_output_____
###Markdown
3. EDA (Exploratory Data Analysis)EDA merupakan proses analisis data, pada proses ini kita mencari suatu pattern dan insight dari suatu data, dimana hasil dari eksplorasi ini akan digunakan untuk memudahkan kita dalam membuat model machine learning nanti
###Code
#Output informasi penting pada data
df.info()
#Cek nilai pada setiap feature / column
for col in df.columns:
print(f'{df[col].value_counts()}\n\n')
#Cek apakah terdapat data kosong atau tidak, outputkan dalam persentase
df.isna().sum() / len(df)
#Cek apakah terdapat instance yang duplikat atau tidak
df[df.duplicated() == True]
###Output
_____no_output_____
###Markdown
Dari eksplorasi data yang kita lakukan, tidak ditemukan nilai kosong / NaN dan juga nilai yang duplikat, kita dapat lanjut ke langkah berikutnya
###Code
#Cek relasi obat dan umur pasien
sns.set()
sns.set_palette('coolwarm')
sns.barplot('Drug','Age',hue='Sex',ci=None,data=df)
plt.title('RELASI OBAT DAN UMUR PASIEN',fontweight='bold')
plt.show()
#Cek perbandingan penggunaan obat
sns.countplot('Drug',data=df)
plt.title('PERBANDINGAN PENGGUNAAN OBAT',fontweight='bold')
###Output
_____no_output_____
###Markdown
Orang yang berumur lebih dari 50 tahun cenderung menggunakan drug B, sedangkan untuk jumlah penggunaan obat drug Y merupakan obat yang paling sering digunakan 4. Data PreparationSaat ini kita akan melakukan cleaning data agar data ini dapat dimasukan kedalam model machine learning, kita akan mengganti column 'Sex' 'BP' 'Cholesterol' yang bermupakan columns category menjadi angka agar dapat dimasukan kedalam model machine learningkarena column Sex merupakan binominal (ya / tidak) dan column BP, Cholesterol merupakan skala ordinal dalam statistika (https://id.wikipedia.org/wiki/Skala_(statistik)) dan bukan skala nominal, kita bisa menyimpulkan bahwa data yang metode penggantian kategori menjadi angka yang kita gunakan adalah LabelEncoder (skala nominal menggunakan OneHot Encoder)![title](img/ordinal.png)![title](img/nominal.png)dikarenakan jumlah nilai yang sedikit, kita akan melakukan LabelEncoder secara manual, namun apabila nilai yang terdapat dalam column ada banyak, kalian bisa menggunakan module LabelEncoder dari sklearn
###Code
#Outputkan nilai dari setiap column category
for col in df.select_dtypes(include=['object']):
print(f'{df[col].value_counts()}\n\n')
#Mapping nilai yang akan di ganti menggunakan Dictionary pada python
Sex = {'M':1, 'F':0}
BP = {'HIGH':2, 'NORMAL':1, 'LOW': 0}
Cholesterol = {'HIGH':1, 'NORMAL':0}
#Menukar nilai column
df['Sex'] = df['Sex'].replace(Sex)
df['BP'] = df['BP'].replace(BP)
df['Cholesterol'] = df['Cholesterol'].replace(Cholesterol)
#Output 5 baris teratas untuk cek apakah nilai sudah berganti atau belum
df.head()
###Output
_____no_output_____
###Markdown
Langkah selanjutnya kita akan memecah data menjadi 2 kategori yaitu feature X dan target variable ydilanjutkan dengan memecah data tadi menjadi 25% untuk test, dan 75% data untuk training, kita akan menggunakan module dari sklearn untuk memudahkan pekerjaan kita
###Code
#Pecah data menjadi feature X dan target variable y
y = df['Drug']
X = df.drop('Drug',axis=1)
#Pecah menjadi 75% train set dan 25% test set
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, stratify=y)
###Output
_____no_output_____
###Markdown
5. Pembuatan model Machine learninglangkah ini merupakan langkah dimana kita akan membuat model machine learning, algoritma yang kita gunakan merupakan decision tree dimana feature akan diurutkan berdasarkan nilai information gain yang paling besar ke kecil DECISION TREE![title](img/pengertian.png)merupakan algoritma supervised learning yang dapat digunakan untuk klasifikasi dan juga regresibagian root (node paling atas) merupakan feature yang memiliki nilai information gain paling besar, lalu disusul dengan feature dengan information gain yang dibawahnya hingga pada akhirnya pada bagian daun (node) diputuskan hasil klasifikasi / regresi yang dilakukan Kelebihan:* Tidak memerlukan preprosessing data numeric (melakukan standarisasi berdasarkan variance) * Menangani kolinearitas secara efisien* Decision tree memberikan penjelasan yang mudah dimengerti atas prediksi yang didapatkan Kekurangan:* Memiliki peluang overfit model apabila membangun model pohon dengan kemurnian tinggi (jumlah turunan pohon / ranting sangatlah banyak)* Rawan terhadap pencilan* Pohon bisa menjadi kompleks pada saat melatih dataset yang rumit* Apabila data latih terdapat nilai kontinu, maka informasi didalamnya dapat hilang untuk penjelasan lebih detail, silahkan menonton video berikut ini:https://www.youtube.com/watch?v=qDcl-FRnwSU
###Code
#Panggil algoritma decision tree dan lakukan training menggunakan training data
dt = DecisionTreeClassifier()
dt.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
6. Validasi modelkita akan melakukan pengecekan akurasi pada model yang telah kita buat
###Code
#Cek akurasi model
accuracy_score(y_test,dt.predict(X_test))
#Cek score F1 model
print(classification_report(y_test,dt.predict(X_test)))
###Output
precision recall f1-score support
drugA 1.00 1.00 1.00 6
drugB 1.00 1.00 1.00 4
drugC 1.00 1.00 1.00 4
drugX 1.00 0.92 0.96 13
drugY 0.96 1.00 0.98 23
accuracy 0.98 50
macro avg 0.99 0.98 0.99 50
weighted avg 0.98 0.98 0.98 50
###Markdown
Hebat! model kita memiliki akurasi diatas 80%, namun pada saat melakukan pengecekan F1 score akan terdapat label dengan F1 Score yang agak rendah, hal tersebut maklum dikarenakan jumlah data yang tidak seimbang, dan tidak dilakukannya hyperparameter tuning pada model yang telah kita buat
###Code
#Cek sebaran data Drug
y.value_counts()
###Output
_____no_output_____
###Markdown
Disini kita bisa lihat bahwa benar adanya jumlah data yang kita miliki tidaklah merata, karena ini hanya latihan maka praktik seperti ini tidak apa, namun pada real world case, hal ini tidak boleh dilakukan sehingga pada real world case kita harus memastikan bahwa jumlah data yang kita miliki itu seimbangOke karena model yang sudah kita buat siap untuk dideploy, mari kita melakukan suatu prediksi berdsasarkan ketentuan:* Umur: 40* Sex: Perempuan* BP: Low* Cholestrol: Low* Na_to_K: 12apakah obat yang direkomendasikan merupakan DrugX?
###Code
#Melakukan Prediksi
dt.predict([[40,0,0,0,12]])
###Output
_____no_output_____
###Markdown
![title](img/2.png)Kelebihan dari decision tree ialah karena algoritma ini menggunakan nilai information gain untuk klasifikasi, kita dapat melihat komponen apa yang paling berpengaruh dalam klasifikasi, kita akan melihat feature dengan 2 cara yaitu menggunakan feature_importances_ dan juga menggunakan visualisasi pohon
###Code
#Melihat feature yang paling berpengaruh melalui nilai gini
print(dict(zip(df.columns, dt.feature_importances_)))
###Output
{'Age': 0.13512260389206093, 'Sex': 0.0, 'BP': 0.32950836900745195, 'Cholesterol': 0.058452148826825354, 'Na_to_K': 0.47691687827366164}
###Markdown
Disini kita mengetahui bahwa urutan feature yang paling berpengaruh ke kurang berpengaruh adalah:* Na_to_K* BP* Age* Cholesterol* Sex (Tidak berpengaruh sama sekali)
###Code
#Membuat visualisasi pohon ke dalam file pdf dengan nama Decision Tree
dot_data = export_graphviz(dt,out_file=None,
feature_names=X.columns,
class_names=y.unique(),
filled=True,rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render('Decision Tree')
###Output
_____no_output_____
###Markdown
Fetching Reviews from Play Store
###Code
def fetch_reviews(app_name, app_id):
try:
os.mkdir(app_name)
except FileExistsError:
pass
# Empty list for storing reviews
app_reviews = []
# Number of reviews to scrape per batch
count = 10
# To keep track of how many batches have been completed
batch_num = 0
# Retrieve reviews (and continuation_token) with reviews function
rvws, token = reviews(
app_id, # found in app's url
lang='en', # defaults to 'en'
country='us', # defaults to 'us'
sort=Sort.NEWEST, # start with most recent
count=count # batch size
)
# Add the list of review dicts to overall list
app_reviews.extend(rvws)
# Increase batch count by one
batch_num +=1
# Wait 1 to 5 seconds to start next batch
time.sleep(random.randint(1,5))
# Append review IDs to list prior to starting next batch
pre_review_ids = []
for rvw in app_reviews:
pre_review_ids.append(rvw['reviewId'])
# Loop through at most max number of batches
for batch in range(5):
rvws, token = reviews( # store continuation_token
app_id,
lang='en',
country='us',
sort=Sort.NEWEST,
count=count,
# using token obtained from previous batch
continuation_token=token
)
# Append unique review IDs from current batch to new list
new_review_ids = []
for r in rvws:
new_review_ids.append(r['reviewId'])
# Add the list of review dicts to main app_reviews list
app_reviews.extend(rvws)
# Increase batch count by one
batch_num +=1
# Break loop and stop scraping for current app if most recent batch
# did not add any unique reviews
all_review_ids = pre_review_ids + new_review_ids
if len(set(pre_review_ids)) == len(set(all_review_ids)):
print(f'No reviews left to scrape. Completed {batch_num} batches.\n')
break
# all_review_ids becomes pre_review_ids to check against
# for next batch
pre_review_ids = all_review_ids
# At every 100th batch
# if batch_num%100==0:
if True:
# print update on number of batches
print(f'Batch {batch_num} completed.')
df = pd.DataFrame(app_reviews)
df = df[['content', 'thumbsUpCount', 'score']]
ran = app_name + ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10))
df.to_csv(f"./{app_name}/{ran}.csv")
# empty our list for next round of 100 batches
app_reviews = []
# Wait 1 to 5 seconds to start next batch
time.sleep(random.randint(1,5))
# Reviews for tinder
fetch_reviews('tinder', 'com.tinder')
###Output
Batch 2 completed.
Batch 3 completed.
Batch 4 completed.
Batch 5 completed.
Batch 6 completed.
###Markdown
Analysis of Tinder Getting all the csv files with the reviews for tinder
###Code
csv_files = list(filter(lambda x: x.endswith('.csv'),os.listdir('tinder')))
csv_files = list(map(lambda x: os.path.join('tinder', x), csv_files))
csv_files
###Output
_____no_output_____
###Markdown
Combining all the review dataset
###Code
df = pd.DataFrame()
for file in csv_files:
df = df.append(pd.read_csv(file))
df.head()
###Output
_____no_output_____
###Markdown
Droping extra columns* **content**: contains the actual review* **score**: number of stars provided by user* **thumbsupCount**: thumbsup for the review* **at**: time of posting Reset the Index also
###Code
df = df[['content', 'score', 'thumbsUpCount', 'at']]
df.reset_index(inplace=True)
df
df.hist('score')
df[df['thumbsUpCount']>1000]
###Output
_____no_output_____
###Markdown
NLP Tagging the english reviewsWe will be focusing on just english reviews
###Code
# !pip install langdetect
from langdetect import detect_langs
((detect_langs(df.iloc[4543].content))[0]).lang
def tagging(data):
try:
return detect_langs(data)[0].lang
except:
return 'undefined'
df['lang'] = df['content'].apply(tagging)
df
df.iloc[370472].content
for file in csv_files:
a = (pd.read_csv(file))
# print(a)
s = s.append(a)
s.size
s.append(pd.read_csv(csv_files[0]))
dff0 = pd.read_csv(csv_files[0])[['userName', 'thumbsUpCount', 'content', 'score']]
dff1 = pd.read_csv(csv_files[1])[['userName', 'thumbsUpCount', 'content', 'score']]
dff1 + dff0
###Output
_____no_output_____
###Markdown
COVID-19 in Germany's Political Discourse **RQ:** What prevalence does *COVID-19* have in the social media messaging across Germany's political spectrum? We measure the number of posts on Twitter created by the parties in the German Bundestag containing the string "corona". We restrict us to the account of the left-wing party *Die Linke* ([@Linksfraktion](https://twitter.com/Linksfraktion)) and the right-wing party *Alternative für Deutschland* ([@AfDimBundestag](https://twitter.com/AfDimBundestag)).For the analysis we begin by importing the required libraries
###Code
source("myLib.R")
###Output
_____no_output_____
###Markdown
To execute cells press Shift+Enter. Next, we read the [data](./data.csv) (see [data-collection.ipynb](data-collection.ipynb) ) and plot the frequency of tweets. For plotting we use the [R](https://www.r-project.org/) package [ggplot](https://ggplot2.tidyverse.org/).
###Code
data <- read_csv("data.csv", col_types = cols()) %>% mutate(date=as.Date(date))
data %>% ggplot(aes(x = date, fill = username)) +
geom_histogram(position = "dodge", binwidth = 1) +
labs(y = "Number of tweets / day", x = "Date",fill="Twitter accounts") +
scale_fill_manual(values = c_values)
###Output
_____no_output_____
###Markdown
analysis- it is quite clear that teams that qualified are performing better in most of the departments than non-qualifing teams- and then in qualifying also team that is doing even better(overall in all depts) than other teams have won the tournament- here teamID 1 is performing better than most teams in all depts and it has won championship
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
batting
###Code
batting=pd.read_csv('datasets/battingdata.csv')
print(batting.shape)
batting.head()
print('orange cap')
orangeCap=batting.sort_values('r',axis=0,ascending=False)
orangeCap[['r','teamID','playerID']].head(5)
###Output
orange cap
###Markdown
overall season
###Code
print(''.center(50,'='))
print('total fifties in season >>> ',sum(batting['50s']))
print('total centuries in season >>> ',sum(batting['100s']))
print('total runs in season >>> ',sum(batting['r']))
print('total balls in season >>> ',sum(batting['b']))
print('total 4s in season >>> ',sum(batting['4s']))
print('total 6s in season >>> ',sum(batting['6s']))
print(''.center(50,'='))
###Output
==================================================
total fifties in season >>> 89
total centuries in season >>> 4
total runs in season >>> 17716
total balls in season >>> 13952
total 4s in season >>> 1548
total 6s in season >>> 687
==================================================
###Markdown
teamWise
###Code
print('team-wise stats for batting'.center(40,'='))
print(batting[['50s','100s','r','6s','4s','teamID']].groupby('teamID').sum())
print(''.center(40,'='))
# since some teams have played more games than other it is only fair to avgerage statistics
print('team-wise avg stats for batting'.center(60,'='))
print(batting[['b','r','6s','4s','teamID']].groupby('teamID').mean())
print(''.center(60,'='))
###Output
==============team-wise avg stats for batting===============
b r 6s 4s
teamID
1 117.625000 162.937500 7.187500 14.437500
3 92.650000 119.100000 3.300000 11.650000
4 65.160000 82.440000 3.720000 6.200000
5 100.947368 127.526316 5.157895 11.578947
6 79.650000 100.800000 3.800000 9.050000
8 68.375000 86.958333 3.750000 7.791667
9 88.300000 109.050000 3.950000 9.500000
62 69.583333 81.625000 2.916667 6.291667
============================================================
###Markdown
bowling
###Code
bowling=pd.read_csv('datasets/bowlingdata.csv')
bowling=bowling.loc[bowling.o!='-']
print(bowling.shape)
bowling.head()
print('purple cap')
purplecap=bowling.sort_values('w',axis=0,ascending=False)
purplecap[['w','teamID','playerID']].head()
# overall season
print(''.center(50,'='))
print('total wickets in season >>> ',sum(bowling['w']))
print('total maiden overs in season >>> ',sum(bowling['maid']))
print('total maiden wicket overs in season >>> ',sum(bowling['wmaid']))
print('total extra runs in season >>> ',sum(bowling['r'])-sum(batting['r']))
print('total no balls in season >>> ',sum(bowling['nb']))
print('total wide balls in season >>> ',sum(bowling['wb']))
print('total dot balls in season >>> ',sum(bowling['d']))
print('total 4 wickets hauls in season >>> ',sum(bowling['4w']))
print('total 5 wickets hauls in season >>> ',sum(bowling['5w']))
print('total hatricks in season >>> ',sum(bowling['ht']))
print(''.center(50,'='))
print('hatric player ID >>>',int(bowling.loc[bowling.ht==1,'playerID']))
print('he also has the purple cap')
# team wise
# since some teams have played more games than other it is only fair to avgerage statistics
print('team-wise stats for bowling'.center(40,'='))
print(bowling[['w','nb','wb','d','maid','teamID']].groupby('teamID').sum())
print('wickets-no-wide-dot'.center(40,'='))
###Output
======team-wise stats for bowling=======
w nb wb d maid
teamID
1 96 8 85 700 3
3 96 8 47 721 4
4 75 4 48 620 3
5 100 20 59 767 0
6 77 12 48 640 1
8 62 7 70 518 1
9 92 13 55 641 2
62 72 4 49 622 2
==========wickets-no-wide-dot===========
###Markdown
fielding
###Code
fielding=pd.read_csv('datasets/fieldingdata.csv')
print(fielding.shape)
fielding.head()
# overall season
print(''.center(50,'='))
print('total catches in season >>> ',sum(fielding['c']))
print('total run outs in season >>> ',sum(fielding['ro']))
print('total stumpings in season >>> ',sum(fielding['s']))
print(''.center(50,'='))
print('team-wise stats for fielding'.center(40,'='))
print(fielding[['c','ro','s','teamID']].groupby('teamID').sum())
print('catches-runOuts-stumpings'.center(40,'='))
print('top catchers')
catches=fielding.sort_values('c',axis=0,ascending=False)
catches[['c','teamID','playerID']].head()
print('team-wise avg stats for fielding'.center(40,'='))
print(fielding[['c','ro','teamID']].groupby('teamID').mean())
print('catches-runOuts'.center(40,'='))
###Output
====team-wise avg stats for fielding====
c ro
teamID
1 4.500000 0.625000
3 3.200000 0.500000
4 2.120000 0.240000
5 3.315789 0.736842
6 2.550000 0.300000
8 1.916667 0.416667
9 3.550000 0.350000
62 2.291667 0.250000
============catches-runOuts=============
###Markdown
Analysis of MMNN group chat using Python let's first import the libraries to use
###Code
import matplotlib.pyplot as plt # for visualization
import numpy as np # for numerical
import pandas as pd # for data analysis
from pandas.io.json import json_normalize # dealing with nested json files
data = pd.read_json("data.json")
data.head(5)
###Output
_____no_output_____
###Markdown
from the above data frame we can see that the data we need to deal with is nested inside another dictionary hence why we need the json_normalize import to create a dataframe of it's own
###Code
new = json_normalize(data["messages"])
new.head(4)
new.tail(4)
###Output
_____no_output_____
###Markdown
import torchimport torch.nn as nnimport torch.nn.parallelimport torch.backends.cudnn as cudnn
###Code
model = torch.load('runs/conv4_usc_unsigned/example/prune_rate=0.5/9/checkpoints/model_best.pth')
print(model['arch'])
print(model['state_dict'].keys())
import matplotlib.pyplot as plt
import numpy as np
score = model['state_dict']['module.convs.0.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.convs.2.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.convs.5.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.convs.7.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.linear.0.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.linear.2.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
score = model['state_dict']['module.linear.4.scores'].cpu()
score, _ = score.flatten().abs().sort(descending=True)
plt.bar(np.arange(len(score)), score.numpy())
###Output
_____no_output_____
###Markdown
setup
###Code
%matplotlib inline
from eelbrain import *
import scipy, mne, os, shutil, pdb, importlib
import numpy as np
import matplotlib.pyplot as plt
root_folder = 'data_path' # path to DRUM dataset
subjects_dir = f'{root_folder}/mri' # copy freesurfer fsaverage files here
meg_folder = f'{root_folder}/meg'
output_folder = 'output_path' # path to output folder
if not os.path.exists(output_folder):
os.makedirs(output_folder)
subjects = [f for f in os.listdir(meg_folder) if f[0]=='R'] # get subject list
subjects.sort()
###Output
_____no_output_____
###Markdown
make sourcespace
###Code
# make subjects_dir mri folders and scaled source spaces
# freesurfer fsaverage files need to be in the subjects_dir
for subject in subjects:
if not os.path.exists(f'{subjects_dir}/{subject}/bem/{subject}-ico-4-src.fif'):
print(f'making {subject}')
os.makedirs(f'{subjects_dir}/{subject}/bem')
shutil.copyfile(f'{meg_folder}/{subject}/MRI scaling parameters.cfg', f'{subjects_dir}/{subject}/MRI scaling parameters.cfg')
mne.scale_source_space(subject, f'{{subject}}-ico-4-src.fif', subjects_dir=subjects_dir)
###Output
_____no_output_____
###Markdown
make resting beta power
###Code
for subject in subjects:
if subject == 'R26672': # fix for R2667 is not needed for resting data
continue
for visit in ['visit1', 'visit2']:
fwdfile = f'{meg_folder}/{subject}/{subject}_{visit}_resting-ico-4-fwd.fif'
snds = []
for i in range(1,3): # loop over resting 1 and 2
rawfile = f'{meg_folder}/{subject}/{subject}_{visit}_resting{i}-raw.fif'
if not os.path.exists(rawfile):
print(f'file not found: {subject}_{visit}_resting{i}')
continue
print(f'loading {subject}_{visit}_resting{i}')
raw = mne.io.read_raw_fif(rawfile)
# source localization
covfile = f'{rawfile[:-7]}cov.fif'
fwd = mne.read_forward_solution(fwdfile)
cov = mne.read_cov(covfile)
invsol = mne.minimum_norm.make_inverse_operator(raw.info, fwd, cov, fixed=True, depth=0.8)
stc1 = mne.minimum_norm.apply_inverse_raw(raw, invsol, lambda2 = 1, method='MNE')
snd1 = load.fiff.stc_ndvar(stc1, 'fsaverage', 'ico-4', subjects_dir=subjects_dir)
snds.append(snd1.sub(time=(5, snd1.time.tmax-5)))
del stc1, snd1
if len(snds) == 0:
continue
# concatenate resting 1 and 2
snd1 = concatenate(snds, 'time')
del snds
# make psd on 15s blocks of the data
nblocks = snd1.time.tmax/15
psds = []
for i in range(int(np.floor(nblocks))):
print('making psd block', i*15, 's - ', (i+1)*15, 's', ' '*20, end='\r')
psds.append(psd_welch(snd1.sub(time=(i*15,(i+1)*15)), n_per_seg=256, n_overlap=128).sub(frequency=(1,40)))
psd = combine(psds).mean('case')
save.pickle(psd, f'{output_folder}/{subject}_{visit}_resting_psd.pkl')
# plot average psds across the whole brain and across rolandic roi
rolandicROI = list(set([l for l in psd.source.parc.as_labels() if 'central' in l]))
psd1 = psd.mean('source') # whole brain average
psd1.name = 'whole brain'
psd2 = psd.sub(source=rolandicROI).mean('source') # rolandic roi average
psd2.name = 'rolandic ROI'
p = plot.UTS([[psd1, psd2]])
p.save(f'{output_folder}/plots_psd_{subject}_{visit}_resting.png')
p.close()
###Output
_____no_output_____
###Markdown
make visual beta power
###Code
tasks = ['fam', 'mm', 'pn', 'pd']
for subject in subjects:
for visit in ['visit1', 'visit2']:
fwdfile = f'{meg_folder}/{subject}/{subject}_{visit}_visual-fwd.fif'
for task in tasks:
rawfile = f'{meg_folder}/{subject}/{subject}_{visit}_visual_{task}-raw.fif'
if not os.path.exists(rawfile):
print(f'file not found: {subject}_{visit}_visual_{task}')
continue
print(f'loading {subject}_{visit}_visual_{task}')
raw = mne.io.read_raw_fif(rawfile)
# source localization
covfile = f'{rawfile[:-7]}cov.fif'
fwd = mne.read_forward_solution(fwdfile)
cov = mne.read_cov(covfile)
invsol = mne.minimum_norm.make_inverse_operator(raw.info, fwd, cov, fixed=True, depth=0.8)
stc1 = mne.minimum_norm.apply_inverse_raw(raw, invsol, lambda2 = 1, method='MNE')
snd1 = load.fiff.stc_ndvar(stc1, 'fsaverage', 'ico-4', subjects_dir=subjects_dir)
# make psd in 15s blocks
nblocks = snd1.time.tmax/15
psds = []
for i in range(int(np.floor(nblocks))):
print('making psd block', i*15, 's - ', (i+1)*15, 's', ' '*20, end='\r')
psds.append(psd_welch(snd1.sub(time=(i*15,(i+1)*15)), n_per_seg=256, n_overlap=128).sub(frequency=(1,40)))
psd = combine(psds).mean('case')
save.pickle(psd, f'{output_folder}/{subject}_{visit}_visual_{task}_psd.pkl')
# plot average psds across the whole brain and across rolandic roi
rolandicROI = list(set([l for l in psd.source.parc.as_labels() if 'central' in l]))
psd1 = psd.mean('source') # whole brain average
psd1.name = 'whole brain'
psd2 = psd.sub(source=rolandicROI).mean('source') # rolandic roi average
psd2.name = 'rolandic ROI'
p = plot.UTS([[psd1, psd2]])
p.save(f'{output_folder}/plots_psd_{subject}_{visit}_visual_{task}.png')
p.close()
###Output
_____no_output_____
###Markdown
write CSV file beta power
###Code
CONTROLS = ['R2517', 'R2519', 'R2520', 'R2521', 'R2525', 'R2528', 'R2496', 'R2673',]
PATIENTS = ['R2527', 'R2540', 'R2546', 'R2598', 'R2615', 'R2617', 'R2664', 'R2667', 'R2668',]
# lesion hemisphere
LEFT = ['R2527', 'R2540', 'R2667', 'R2668']
RIGHT = ['R2546', 'R2598', 'R2615', 'R2617', 'R2664']
psdsubj = dict(C={},P={})
beta_band = (13, 25)
psdrange = (2, 40)
outfile = 'betapower.csv'
with open(outfile, 'w+') as f:
f.write(f'subject,group,rel_beta,visit,task\n') # column headings
# lesion hemisphere
outfile2 = 'betapower_lesionhemi.csv'
with open(outfile2, 'w+') as f:
f.write(f'subject,group,rel_beta,hemi,lesion,visit,task\n') # column headings
for subject in subjects:
for visit in ['visit1', 'visit2']:
for task in ['resting', 'visual_fam', 'visual_mm', 'visual_pn', 'visual_pd']:
infile = f'{subject}_{visit}_{task}_psd.pkl'
if not os.path.exists(f'{output_folder}/{infile}'):
print(f'file not found: {infile}')
continue
print(f'loading: {infile}')
psd = load.unpickle(f'{output_folder}/{infile}')
subject = subject[:5] # this is to combine R26672 and R2667
if subject in CONTROLS:
group = 'C'
else:
group = 'P'
# rolandic ROI
rolandicROI = list(set([l for l in psd.source.parc.as_labels() if 'central' in l]))
psd2 = psd.sub(source=rolandicROI).mean('source').sub(frequency=psdrange)
# relative power
psd_rel = psd2.copy()
psd_rel.x /= np.sum(psd_rel.x)
rel_beta = psd_rel.sub(frequency=beta_band).sum('frequency')
with open(outfile, 'a+') as f:
f.write(f'{subject},{group},{rel_beta},{visit},{task}\n')
psdL = psd.sub(source=rolandicROI).sub(source='lh').mean('source').sub(frequency=psdrange)
psd_relL = psdL.copy()
psd_relL.x /= np.sum(psd_relL.x)
rel_betaL = psd_relL.sub(frequency=beta_band).sum('frequency')
psdR = psd.sub(source=rolandicROI).sub(source='rh').mean('source').sub(frequency=psdrange)
psd_relR = psdR.copy()
psd_relR.x /= np.sum(psd_relR.x)
rel_betaR = psd_relR.sub(frequency=beta_band).sum('frequency')
with open(outfile2, 'a+') as f:
if subject in LEFT:
f.write(f'{subject},{group},{rel_betaL},left,ipsi,{visit},{task}\n')
f.write(f'{subject},{group},{rel_betaR},right,contra,{visit},{task}\n')
elif subject in RIGHT:
f.write(f'{subject},{group},{rel_betaL},left,contra,{visit},{task}\n')
f.write(f'{subject},{group},{rel_betaR},right,ipsi,{visit},{task}\n')
else:
f.write(f'{subject},{group},{rel_betaL},left,,{visit},{task}\n')
f.write(f'{subject},{group},{rel_betaR},right,,{visit},{task}\n')
###Output
_____no_output_____
###Markdown
make ERD ERS
###Code
import pandas as pd
for subject in subjects:
for visit in ['visit1', 'visit2']:
trigger_file = f'{subject[:5]}_{visit}_visual_triggers.csv' # subject[:5] to combine R26672 and R2667
if not os.path.exists(f'{meg_folder}/{subject[:5]}/{trigger_file}'):
print(f'file not found: {trigger_file}')
continue
print(trigger_file)
triggers = pd.read_csv(f'{meg_folder}/{subject[:5]}/{trigger_file}')
for task in ['mm', 'pn', 'pd']:
rawfile = f'{subject}_{visit}_visual_{task}-raw.fif'
if not os.path.exists(f'{meg_folder}/{subject}/{rawfile}'):
print(f'file not found: {rawfile}')
continue
print(f'loading: {rawfile}')
raw = mne.io.read_raw_fif(f'{meg_folder}/{subject}/{rawfile}')
# source localization
covfile = f'{meg_folder}/{subject}/{rawfile[:-7]}cov.fif'
fwdfile = f'{meg_folder}/{subject}/{subject}_{visit}_visual-fwd.fif'
fwd = mne.read_forward_solution(fwdfile)
cov = mne.read_cov(covfile)
invsol = mne.minimum_norm.make_inverse_operator(raw.info, fwd, cov, fixed=True, depth=0.8)
stc1 = mne.minimum_norm.apply_inverse_raw(raw, invsol, lambda2 = 1, method='MNE')
snd1 = load.fiff.stc_ndvar(stc1, 'fsaverage', 'ico-4', subjects_dir=subjects_dir)
rolandicROI = list(set([l for l in snd1.source.parc.as_labels() if 'central' in l]))
snd1 = snd1.sub(source=rolandicROI)
tstart = load.unpickle(f'{meg_folder}/{subject}/{rawfile[:-8]}_tstart.pkl')
snd1 = NDVar(snd1.x, (snd1.source, UTS(tstart, snd1.time.tstep, snd1.x.shape[1])))
epochsR = []
epochsL = []
specgramsL = []
specgramsR = []
specgramsL_lh = []
specgramsL_rh = []
specgramsR_lh = []
specgramsR_rh = []
task_trigs = triggers[triggers['task']==task]
i = 1
for tt, side in zip(task_trigs['t_start'], task_trigs['button_side']):
t = tt
if subject == 'R26672':
t -= 1214 # get correct trigger times
print(i, len(task_trigs['t_start']), t, ' '*20, end='\r')
i += 1
if t+3 > snd1.time.tmax:
print(f'{t+3} > {snd1.time.tmax}')
continue
epoch = snd1.sub(time=(t-3,t+3))
# make spectrogram using morlet wavelets
freqs = np.logspace(*np.log10([6, 35]), num=20)
n_cycles = freqs / 2.
fs = 1/epoch.time.tstep
specgram = mne.time_frequency.tfr_array_morlet(epoch.x[np.newaxis,:,:], fs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
n_jobs=1, decim=1)
specgram = NDVar(np.abs(specgram[0])**2, (epoch.source, Scalar('frequency', freqs), UTS(-3, 1/fs, specgram[0].shape[2])))
specgram_lh = specgram.sub(source='lh').mean('source')
specgram_rh = specgram.sub(source='rh').mean('source')
specgram = specgram.mean('source')
if side == 'left':
epochsL.append(epoch)
specgramsL.append(specgram)
specgramsL_lh.append(specgram_lh)
specgramsL_rh.append(specgram_rh)
elif side == 'right':
epochsR.append(epoch)
specgramsR.append(specgram)
specgramsR_lh.append(specgram_lh)
specgramsR_rh.append(specgram_rh)
save.pickle(epochsL, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_buttonL_epochs.pkl')
save.pickle(epochsR, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_buttonR_epochs.pkl')
save.pickle(specgramsL, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsL.pkl')
save.pickle(specgramsR, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsR.pkl')
save.pickle(specgramsL_lh, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsL_lh.pkl')
save.pickle(specgramsR_lh, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsR_lh.pkl')
save.pickle(specgramsL_rh, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsL_rh.pkl')
save.pickle(specgramsR_rh, f'{output_folder}/{subject[:5]}_{visit}_visual_{task}_button_specgramsR_rh.pkl')
###Output
_____no_output_____
###Markdown
write CSV file ERD ERS
###Code
# functions for computing ERD, ERS
def get_ERD_ERS(ev, f1=13, f2=25):
tbaseline = (-2.9,-2)
RELBASE = ev.sub(frequency=(f1,f2)).mean('frequency').sub(time=tbaseline).mean('time') / ev.sub(time=tbaseline).mean('frequency').mean('time')
ev = ev.sub(frequency=(f1,f2)).mean('frequency')
BASE = ev.sub(time=tbaseline).mean('time')
normf = ev.sub(time=tbaseline).mean('time')
ts1 = -1
te1 = 0.5
ERD = -(ev.sub(time=(ts1,te1)).mean('time') - normf)/normf
ts2 = 0.5
te2 = 2.5
ERS = (ev.sub(time=(ts2,te2)).mean('time') - normf)/normf
if ERD <= 0:
ERD = ''
if ERS <= 0:
ERS = ''
return ERD, ERS, BASE, RELBASE
def get_ERD_ERS_trials(ev):
ERDs = []
ERSs = []
BASEs = []
RELBASEs = []
ntrials = 0
for ii in range(len(ev)):
ERD, ERS, BASE, RELBASE = get_ERD_ERS(ev[ii])
if ERD!='':
ERDs.append(ERD)
if ERS!='':
ERSs.append(ERS)
BASEs.append(BASE)
RELBASEs.append(RELBASE)
ntrials += 1
if len(ERDs) == 0:
ERD = ''
else:
ERD = np.sum(ERDs)/ntrials
if len(ERSs) == 0:
ERS = ''
else:
ERS = np.sum(ERSs)/ntrials
BASE = np.sum(BASEs)/ntrials
RELBASE = np.sum(RELBASEs)/ntrials
return ERD, ERS, BASE, RELBASE
CONTROLS = ['R2517', 'R2519', 'R2520', 'R2521', 'R2525', 'R2528', 'R2496', 'R2673',]
PATIENTS = ['R2527', 'R2540', 'R2546', 'R2598', 'R2615', 'R2617', 'R2664', 'R2667', 'R2668',]
subjects = CONTROLS + PATIENTS
# lesion hemisphere
LEFT = ['R2527', 'R2540', 'R2667', 'R2668']
RIGHT = ['R2546', 'R2598', 'R2615', 'R2617', 'R2664']
beta_band = (13, 25)
psdrange = (2, 40)
outfile = 'ERD_ERS.csv'
with open(outfile, 'w+') as f:
f.write(f'subject,group,value,metric,visit,task\n') # column headings
# lesion hemisphere button
outfile2 = 'ERD_ERS_lesionhemi.csv'
with open(outfile2, 'w+') as f:
f.write(f'subject,group,value,metric,hemi,lesion,visit,task\n') # column headings
for subject in subjects:
if subject == 'R26672':
continue
for visit in ['visit1', 'visit2']:
for task in ['mm', 'pn', 'pd']:
infile = f'{subject}_{visit}_visual_{task}_button_specgramsL.pkl'
if not os.path.exists(f'{output_folder}/{infile}'):
print(f'file not found: {infile}')
continue
print(f'loading: {infile}')
specL = load.unpickle(f'{output_folder}/{infile}')
specR = load.unpickle(f'{output_folder}/{subject}_{visit}_visual_{task}_button_specgramsR.pkl')
spec = combine([combine(specL),combine(specR)])
ERD, ERS, BASE, RELBASE = get_ERD_ERS_trials(spec)
if subject in CONTROLS:
group = 'C'
else:
group = 'P'
spec_lh = combine([combine(specR_lh), combine(specL_lh)])
spec_rh = combine([combine(specR_rh), combine(specL_rh)])
ERD_lh, ERS_lh, _, _ = get_ERD_ERS_trials(spec_lh)
ERD_rh, ERS_rh, _, _ = get_ERD_ERS_trials(spec_rh)
with open(outfile, 'a+') as f:
f.write(f'{subject},{group},{ERD},ERD,{visit},{task}\n')
f.write(f'{subject},{group},{ERS},ERS,{visit},{task}\n')
f.write(f'{subject},{group},{BASE},BASE,{visit},{task}\n')
f.write(f'{subject},{group},{RELBASE},RELBASE,{visit},{task}\n')
with open(outfile2, 'a+') as f:
if subject in LEFT:
f.write(f'{subject},{group},{ERD_lh},ERD,left,ipsi,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_lh},ERS,left,ipsi,{visit},{task}\n')
f.write(f'{subject},{group},{ERD_rh},ERD,right,contra,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_rh},ERS,right,contra,{visit},{task}\n')
elif subject in RIGHT:
f.write(f'{subject},{group},{ERD_lh},ERD,left,contra,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_lh},ERS,left,contra,{visit},{task}\n')
f.write(f'{subject},{group},{ERD_rh},ERD,right,ipsi,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_rh},ERS,right,ipsi,{visit},{task}\n')
else:
f.write(f'{subject},{group},{ERD_lh},ERD,left,,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_lh},ERS,left,,{visit},{task}\n')
f.write(f'{subject},{group},{ERD_rh},ERD,right,,{visit},{task}\n')
f.write(f'{subject},{group},{ERS_rh},ERS,right,,{visit},{task}\n')
###Output
_____no_output_____
###Markdown
======================================================================================================================================
###Code
from fastseg import MobileV3Large
model = MobileV3Large(num_classes=19, use_aspp=True, num_filters=256)
model = model.from_pretrained(num_filters=256)
img = Image.open("../data/utube/cities/Kochi 4K _ Driving from Kakkanad to Kaloor by sunset 24-41 screenshot.png")
labels = model.predict_one(img)
labels.shape
labels
classes, counts = np.unique(labels, return_counts=True)
classes, counts, counts.sum()
class_names = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic_light", "traffic_sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
len(class_names)
img_class_counts = {}
img_dir = "../data/utube"
# for img_path in os.listdir(img_dir):
# print(f"Processing image {img_path}")
# img = Image.open(os.path.join(img_dir, img_path))
# labels = model.predict_one(img)
# _, counts = np.unique(labels, return_counts=True)
# n_pixels = labels.size
# counts = counts/ n_pixels
# img_class_counts[img_path] = counts
###Output
_____no_output_____
###Markdown
========================================================================================================================================
###Code
with open("img_class_counts.pkl", 'rb') as f:
img_class_counts = pickle.load(f)
class_names = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic_light", "traffic_sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
len(class_names)
relevant_classes = ["road", "sidewalk", "building", "vegetation", "terrain", "sky"]
relevant_class_ids = []
for class_name in relevant_classes:
relevant_class_ids.append(class_names.index(class_name))
relevant_class_ids
img_class_counts["Kochi 4K _ Driving from Kakkanad to Kaloor by sunset 24-41 screenshot.png"][relevant_class_ids]
relevant_img_class_counts = {img:img_count[relevant_class_ids] for img, img_count in img_class_counts.items()}
relevant_img_class_counts
feature_names = ["frac_"+name for name in relevant_classes]
feature_names
dataset = pd.DataFrame(list(relevant_img_class_counts.values()), columns=feature_names)
dataset.head()
dataset["img_name"] = list(relevant_img_class_counts.keys())
dataset.head()
dataset = dataset[["img_name"] + feature_names]
dataset.head()
city_walkability_scores = {
"Bangalore": 0.63, "Chennai": 0.77, "Kochi": 0.57, "Kolkata": 0.81, "Mumbai": 0.85,
"Varanasi": 0.33, "Shimla": 0.22, "Bhubaneswar": 0.28, "Delhi": 0.87, "Guwahati": 0.39,
"Madurai": 0.40, "Panaji": 0.32, "Ahmedabad": 0.85, "Amritsar": 0.31, "Bikaner": 0.43, "Chandigarh": 0.91,
"Gangtok": 0.30, "Jaipur": 0.64, "Kanpur": 0.59, "Kolkata": 0.81, "Madurai": 0.40, "Pune": 0.81, "Shimla": 0.22,
"Surat": 0.62, "Trivandrum": 0.34, "Varanasi": 0.33,
}
len(city_walkability_scores)
img_walkability_bins = {}
cities = city_walkability_scores.keys()
for img in relevant_img_class_counts.keys():
if "panjim" in img.lower() or "panajim" in img.lower():
img = "panaji" + img
city = [c for c in cities if c.lower() in img.lower()][0]
img_walkability_bins[img] = int(city_walkability_scores[city] // 0.2)
img_walkability_bins
dataset["label"] = list(img_walkability_bins.values())
dataset.head()
X = dataset.drop(columns=["img_name", "label"])
y = dataset["label"]
type(X), type(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = DecisionTreeClassifier(random_state=42, max_depth=5) # tune max_depth
model.fit(X_train, y_train)
model.score(X_test, y_test)
model.tree_.compute_feature_importances(normalize=False)
feature_cols = X.columns
feature_cols
feat_imp_dict = dict(zip(feature_cols, model.feature_importances_))
feat_imp = pd.DataFrame.from_dict(feat_imp_dict, orient='index')
feat_imp.rename(columns = {0:'Feature Importance'}, inplace = True)
feat_imp.sort_values(by=['Feature Importance'], ascending=False).head()
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names = feature_cols, class_names=['0', '1', '2', '3', '4']) # change class_names to ['0', '1', '2', '3', '4']
(graph, ) = graph_from_dot_data(dot_data.getvalue())
Img(graph.create_png())
###Output
_____no_output_____
###Markdown
=======================================================================================================================================================================
###Code
random_forest_model = RandomForestClassifier(random_state=42, max_depth=5)
random_forest_model.fit(X_train, y_train)
random_forest_model.score(X_test, y_test)
feat_imp_dict = dict(zip(feature_cols, random_forest_model.feature_importances_))
feat_imp = pd.DataFrame.from_dict(feat_imp_dict, orient='index')
feat_imp.rename(columns = {0:'Feature Importance'}, inplace = True)
feat_imp.sort_values(by=['Feature Importance'], ascending=False).head()
###Output
_____no_output_____
###Markdown
Initial Run
###Code
# AU Mic == TIC 441420236
file = 'tess2018206045859-s0001-0000000441420236-0120-s_lc.fits'
tbl = Table.read(file, format='fits')
AOK = (tbl['QUALITY'] == 0) & np.isfinite(tbl['PDCSAP_FLUX'])
df_tbl = tbl.to_pandas()
smo = df_tbl['PDCSAP_FLUX'][AOK].rolling(128, center=True).median()
med = np.nanmedian(df_tbl['PDCSAP_FLUX'][AOK])
mcmc = pd.read_table('aumic_mcmc.txt', delim_whitespace=True,
names=('walk','accept','step','chi','rad1','lat1','lon1', 'rad2','lat2','lon2','bright'))
plt.plot(mcmc['rad1'], marker='.', linestyle=None, alpha=0.1)
plt.plot(mcmc['rad2'], marker='.', linestyle=None, alpha=0.1)
plt.ylabel('rad')
_ = plt.hist(mcmc['rad1'],bins=100,alpha=0.5)
_ = plt.hist(mcmc['rad2'],bins=100,alpha=0.5)
plt.xlabel('rad')
plt.plot(mcmc['lat1']-np.pi/2, marker='.', linestyle=None, alpha=0.1)
plt.plot(mcmc['lat2']-np.pi/2, marker='.', linestyle=None, alpha=0.1)
plt.ylabel('lat')
_ = plt.hist(mcmc['lat1']-np.pi/2,bins=100,alpha=0.5)
_ = plt.hist(mcmc['lat2']-np.pi/2,bins=100,alpha=0.5)
plt.xlabel('lat')
plt.plot(mcmc['lon1'], marker='.', linestyle=None, alpha=0.1)
plt.plot(mcmc['lon2'], marker='.', linestyle=None, alpha=0.1)
plt.ylabel('lon')
_ = plt.hist(mcmc['lon1'],bins=100,alpha=0.5)
_ = plt.hist(mcmc['lon2'],bins=100,alpha=0.5)
plt.xlabel('lon')
lcbest = pd.read_table('aumic_lcbest.txt', delim_whitespace=True,
names=('time', 'flux','err','model','f1','f2','snum'))
plt.plot(lcbest['f1'])
plt.plot(lcbest['f2'])
plt.plot(mcmc['chi'] / np.size(lcbest['f1']), marker='.', alpha=0.1)
plt.yscale("log")
plt.plot(mcmc['chi']/ np.size(lcbest['f1']), mcmc['lat1']-np.pi/2, marker='.', linestyle='none', alpha=0.1)
plt.plot(mcmc['chi']/ np.size(lcbest['f1']), mcmc['lat2']-np.pi/2, marker='.', linestyle='none', alpha=0.1)
plt.xscale("log")
plt.xlabel('chisq')
plt.ylabel('lat')
plt.gca().invert_xaxis()
plt.xlim(1e3,10)
plt.figure(figsize=(15,4))
plt.plot(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK]/med, lw=0.75, label='TESS 2-min', rasterized=True)
plt.ylabel('Flux');
plt.xlabel('BJD - 2457000 (days)');
plt.plot(lcbest['time'], lcbest['model'], c='r', label='Starspot Model', rasterized=True)
plt.legend(fontsize=12)
# plt.savefig('lc_model.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
ok = np.where((mcmc['step'] > 1500))[0]
plt.figure(figsize=(12,6))
# ax=plt.subplot(111, projection="aitoff",)
plt.scatter(mcmc['lon1'][ok]* 180/np.pi, 90-(mcmc['lat1'][ok])* 180/np.pi, marker='o', alpha=0.1, s=1, rasterized=True)
plt.scatter(mcmc['lon2'][ok]* 180/np.pi, 90-(mcmc['lat2'][ok])* 180/np.pi, marker='o', alpha=0.1, s=1, rasterized=True)
plt.scatter(209.565806, 90-31.484912, c='r')
plt.scatter(343.430354, 90-46.778680, c='k')
plt.grid(True)
ds = 0.25
plt.figure(figsize=(6,6))
plt.scatter(mcmc['lon1'][ok]* 180/np.pi, 90-(mcmc['lat1'][ok])* 180/np.pi, marker=',', alpha=0.1, s=1, rasterized=True)
plt.scatter(209.565806, 90-31.484912, c='C1', rasterized=True)
plt.xlim(209.565806-ds, 209.565806+ds)
plt.ylim(90-31.484912-ds, 90-31.484912+ds)
plt.xlabel('Lon (deg)')
plt.ylabel('Lat (deg)')
plt.grid(True)
# plt.savefig('spot1.pdf', dpi=450, bbox_inches='tight', pad_inches=0.25, rasterized=True)
plt.figure(figsize=(6,6))
plt.scatter(mcmc['lon2'][ok]* 180/np.pi, 90-(mcmc['lat2'][ok])* 180/np.pi, marker=',', alpha=0.1, s=1, c='C1', rasterized=True)
plt.scatter(343.430354, 90-46.778680, c='C0', rasterized=True)
plt.xlim(343.430354-ds, 343.430354+ds)
plt.ylim(90-46.778680-ds, 90-46.778680+ds)
plt.xlabel('Lon (deg)')
plt.ylabel('Lat (deg)')
plt.grid(True)
# plt.savefig('spot2.pdf', dpi=450, bbox_inches='tight', pad_inches=0.25, rasterized=True)
###Output
_____no_output_____
###Markdown
Explore InclinationDid a few, and longer, MCMC runs with STSP
###Code
inclin = np.array([0., 15, 30, 45, 60], dtype='float')
chi = np.zeros_like(inclin)
for k in range(5):
chi[k] = pd.read_csv('aumic'+str(k)+'_parambest.txt',
delim_whitespace=True, names=('c','x'))['c'][7]
plt.plot(inclin, chi / np.float(len(lcbest)), '-o', c='k')
plt.xlabel('Stellar Inclination (deg)')
plt.ylabel('$\chi^2$')
print(chi / np.float(len(lcbest)))
plt.savefig('chisq_incl.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
sim = 0
pbest = pd.read_csv('aumic'+str(sim)+'_parambest.txt', delim_whitespace=True,
header=None, usecols=(0,), names=('c'))
pbest
# aumic0.in aumic1.in aumic2.in aumic3.in aumic4.in
# i = 0, 15, 30, 45, 60 deg
mcmc = pd.read_table('aumic'+str(sim)+'_mcmc.txt', delim_whitespace=True,
names=('walk','accept','step','chi','rad1','lat1','lon1', 'rad2','lat2','lon2','bright'))
lcbest = pd.read_table('aumic'+str(sim)+'_lcbest.txt', delim_whitespace=True,
names=('time', 'flux','err','model','f1','f2','snum'))
plt.plot(mcmc['step'], mcmc['chi'] / np.size(lcbest['f1']), marker='.', alpha=0.1)
plt.yscale("log")
plt.plot(mcmc['step'], mcmc['lon1']* 180/np.pi, marker='.', linestyle=None, alpha=0.1)
plt.plot(mcmc['step'], mcmc['lon2']* 180/np.pi, marker='.', linestyle=None, alpha=0.1)
plt.ylabel('lon')
plt.xscale('log')
ok = np.where((mcmc['step'] > 2000))[0]
plt.figure(figsize=(15,4))
plt.plot(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK]/med, lw=0.75,
label='TESS 2-min', rasterized=True)
plt.ylabel('Flux');
plt.xlabel('BJD - 2457000 (days)');
plt.plot(lcbest['time'], lcbest['model'], c='r',
label='Starspot Model', rasterized=True)
plt.legend(fontsize=12)
plt.savefig('lc_model.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
ds = 0.25
plt.figure(figsize=(6,6))
plt.scatter(mcmc['lon1'][ok]* 180/np.pi, 90-(mcmc['lat1'][ok])* 180/np.pi, marker=',', alpha=0.1, s=1, rasterized=True)
plt.scatter(pbest['c'][10], 90-pbest['c'][9], c='C1', rasterized=True)
plt.xlim(pbest['c'][10]-ds, pbest['c'][10]+ds)
plt.ylim(90-pbest['c'][9]-ds, 90-pbest['c'][9]+ds)
plt.xlabel('Lon (deg)')
plt.ylabel('Lat (deg)')
plt.grid(True)
plt.savefig('spot1.pdf', dpi=450, bbox_inches='tight', pad_inches=0.25, rasterized=True)
plt.figure(figsize=(6,6))
plt.scatter(mcmc['lon2'][ok]* 180/np.pi, 90-(mcmc['lat2'][ok])* 180/np.pi, marker=',', alpha=0.1, s=1, c='C1', rasterized=True)
plt.scatter(pbest['c'][13], 90-pbest['c'][12], c='C0', rasterized=True)
plt.xlim(pbest['c'][13]-ds, pbest['c'][13]+ds)
plt.ylim(90-pbest['c'][12]-ds, 90-pbest['c'][12]+ds)
plt.xlabel('Lon (deg)')
plt.ylabel('Lat (deg)')
plt.grid(True)
plt.savefig('spot2.pdf', dpi=450, bbox_inches='tight', pad_inches=0.25, rasterized=True)
plt.scatter(mcmc['lon1'][ok]* 180/np.pi, np.abs(90-(mcmc['lat1'][ok])* 180/np.pi),
marker=',', alpha=0.1, s=1, rasterized=True)
plt.scatter(mcmc['lon2'][ok]* 180/np.pi, np.abs(90-(mcmc['lat2'][ok])* 180/np.pi), marker=',', alpha=0.1, s=1, c='C1', rasterized=True)
plt.scatter(pbest['c'][10], 90-pbest['c'][9], c='C4', rasterized=True)
plt.scatter(pbest['c'][13], 90-pbest['c'][12], c='C3', rasterized=True)
plt.xlim(0,365)
plt.ylim(0,90)
plt.grid(True)
###Output
_____no_output_____
###Markdown
Exploring Hacker News PostsIn this project, we'll work with a data set of submissions to popular technology site [Hacker News](https://news.ycombinator.com/).Hacker News is a site started by the startup incubator Y Combinator, where user-submitted stories (known as "posts") are voted and commented upon, similar to reddit. Hacker News is extremely popular in technology and startup circles, and posts that make it to the top of Hacker News' listings can get hundreds of thousands of visitors as a result. Analysis goalOur main goal will be to deteminate **Top 5 hours for posting to get most comments** Step 1First we need to take a look on our data set and separate headers from rest of the data
###Code
import csv
with open("data_sets/HN_posts_year_to_Sep_26_2016.csv", encoding='utf8') as data_file:
hn = list(csv.reader(data_file))
print(len(hn))
headers = hn[0]
hn.remove(headers)
print(len(hn))
###Output
293120
293119
###Markdown
Now let's take a look on the first few rows of our data set
###Code
print("Headers:\n%s\n\nData:" % headers)
for row in hn[:5]:
print(row)
###Output
Headers:
['id', 'title', 'url', 'num_points', 'num_comments', 'author', 'created_at']
Data:
['12579008', 'You have two days to comment if you want stem cells to be classified as your own', 'http://www.regulations.gov/document?D=FDA-2015-D-3719-0018', '1', '0', 'altstar', '9/26/2016 3:26']
['12579005', 'SQLAR the SQLite Archiver', 'https://www.sqlite.org/sqlar/doc/trunk/README.md', '1', '0', 'blacksqr', '9/26/2016 3:24']
['12578997', 'What if we just printed a flatscreen television on the side of our boxes?', 'https://medium.com/vanmoof/our-secrets-out-f21c1f03fdc8#.ietxmez43', '1', '0', 'pavel_lishin', '9/26/2016 3:19']
['12578989', 'algorithmic music', 'http://cacm.acm.org/magazines/2011/7/109891-algorithmic-composition/fulltext', '1', '0', 'poindontcare', '9/26/2016 3:16']
['12578979', 'How the Data Vault Enables the Next-Gen Data Warehouse and Data Lake', 'https://www.talend.com/blog/2016/05/12/talend-and-Â\x93the-data-vaultÂ\x94', '1', '0', 'markgainor1', '9/26/2016 3:14']
###Markdown
Step 2As we can see, we have posts without comments. So we have to clean our data from such posts.
###Code
clean_hn = []
print("hn before cleaning: %s" % len(hn))
for row in hn:
n_comments = int(row[4])
if n_comments > 0:
clean_hn.append(row)
print("clean_hn after cleaning: %s" % len(clean_hn))
headers = clean_hn[0]
clean_hn.remove(headers)
print("clean_hn without header: %s" % len(clean_hn))
###Output
hn before cleaning: 293119
clean_hn after cleaning: 80401
clean_hn without header: 80400
###Markdown
Step 3We're specifically interested in posts whose titles begin with either _Ask HN_ or _Show HN_. Users submit _Ask HN_ posts to ask the Hacker News community a specific question. Below are a couple examples:* Ask HN: How to improve my personal website?* Ask HN: Am I the only one outraged by Twitter shutting down share counts?* Ask HN: Aby recent changes to CSS that broke mobile?Likewise, users submit Show HN posts to show the Hacker News community a project, product, or just generally something interesting. Below are a couple of examples:* Show HN: Wio Link ESP8266 Based Web of Things Hardware Development Platform'* Show HN: Something pointless I made* Show HN: Shanhu.io, a programming playground powered by e8vmWe'll compare these two types of posts to determine the following:1. Do Ask HN or Show HN receive more comments on average?2. Do posts created at a certain time receive more comments on average?Let's separate posts beginning with _Ask HN_ and _Show HN_ (and case variations) into two different lists next.
###Code
ask_posts = []
show_posts = []
other_posts = []
for row in clean_hn:
title = row[1]
if title.lower().startswith("ask hn"):
ask_posts.append(row)
elif title.lower().startswith("show hn"):
show_posts.append(row)
else:
other_posts.append(row)
print(len(ask_posts))
print(len(show_posts))
print(len(other_posts))
###Output
6911
5059
68430
###Markdown
Step 4Next, let's determine if ask posts or show posts receive more comments on average.
###Code
total_ask_comments = 0
for row in ask_posts:
total_ask_comments += int(row[4])
avg_ask_comments = total_ask_comments / len(ask_posts)
print("Average comments in 'Ask HN' posts: %s" % avg_ask_comments)
total_show_comments = 0
for row in show_posts:
total_show_comments += int(row[4])
avg_show_comments = total_show_comments / len(show_posts)
print("Average comments in 'Show HN' posts: %s" % avg_show_comments)
###Output
Average comments in 'Show HN' posts: 9.810832180272781
###Markdown
We've determined that, on average, _"Ask"_ posts receive more comments than _"Show"_ posts. Since ask posts are more likely to receive comments, we'll focus our remaining analysis just on these posts. Step 5Next, we'll determine if ask posts created at a certain time are more likely to attract comments. We'll use the following steps to perform this analysis:* Calculate the amount of ask posts created in each hour of the day, along with the number of comments received.
###Code
import datetime as dt
result_list = []
for row in ask_posts:
result_list.append([row[6], int(row[4])])
counts_by_hour = {}
comments_by_hour = {}
for row in result_list:
date_time = row[0]
n_commnts = row[1]
date_time = dt.datetime.strptime(date_time, "%m/%d/%Y %H:%M")
hour = date_time.strftime("%H")
if hour not in counts_by_hour.keys():
counts_by_hour[hour] = 1
comments_by_hour[hour] = n_commnts
else:
counts_by_hour[hour] += 1
comments_by_hour[hour] += n_commnts
print("Posts count by hour:\n%s\n" % counts_by_hour)
print("Comments count by hour:\n%s" % comments_by_hour)
###Output
Posts count by hour:
{'02': 227, '01': 223, '22': 287, '21': 407, '19': 420, '17': 404, '15': 467, '14': 378, '13': 326, '11': 251, '10': 219, '09': 176, '07': 157, '03': 212, '16': 415, '08': 190, '00': 231, '23': 276, '20': 392, '18': 452, '12': 274, '04': 186, '06': 176, '05': 165}
Comments count by hour:
{'02': 2996, '01': 2089, '22': 3372, '21': 4500, '19': 3954, '17': 5547, '15': 18525, '14': 4972, '13': 7245, '11': 2797, '10': 3013, '09': 1477, '07': 1585, '03': 2154, '16': 4466, '08': 2362, '00': 2277, '23': 2297, '20': 4462, '18': 4877, '12': 4234, '04': 2360, '06': 1587, '05': 1838}
###Markdown
* Calculate the average number of comments ask posts receive by hour
###Code
avg_by_hour = []
for counts_hour, posts in counts_by_hour.items():
for comments_hour, comments in comments_by_hour.items():
if counts_hour == comments_hour:
avg_comments = comments/posts
avg_by_hour.append([counts_hour, avg_comments])
print("Average number of comments ask posts receive by hour created:\n%s" % avg_by_hour)
###Output
Average number of comments ask posts receive by hour created:
[['02', 13.198237885462555], ['01', 9.367713004484305], ['22', 11.749128919860627], ['21', 11.056511056511056], ['19', 9.414285714285715], ['17', 13.73019801980198], ['15', 39.66809421841542], ['14', 13.153439153439153], ['13', 22.2239263803681], ['11', 11.143426294820717], ['10', 13.757990867579908], ['09', 8.392045454545455], ['07', 10.095541401273886], ['03', 10.160377358490566], ['16', 10.76144578313253], ['08', 12.43157894736842], ['00', 9.857142857142858], ['23', 8.322463768115941], ['20', 11.38265306122449], ['18', 10.789823008849558], ['12', 15.452554744525548], ['04', 12.688172043010752], ['06', 9.017045454545455], ['05', 11.139393939393939]]
###Markdown
Step 6Although we now have the results we need, this format makes it hard to identify the hours with the highest values. Let's finish by sorting the list of lists and printing the five highest values in a format that's easier to read.
###Code
swap_avg_by_hour = []
for row in avg_by_hour:
swap_avg_by_hour.append([row[1], row[0]])
sorted_swap = sorted(swap_avg_by_hour, reverse=True)
top_5_hours_for_ask_posts_comments = sorted_swap[:5]
for row in top_5_hours_for_ask_posts_comments:
time_formated = dt.datetime.strptime(row[1], "%H")
time_formated = time_formated.strftime("%H:%M")
comment = "{}: {:.2f} comments per post in average".format(time_formated, row[0])
print(comment)
###Output
15:00: 39.67 comments per post in average
13:00: 22.22 comments per post in average
12:00: 15.45 comments per post in average
10:00: 13.76 comments per post in average
17:00: 13.73 comments per post in average
###Markdown
Analysis Load the required libraries.
###Code
library(oligo)
library(biomaRt)
library(data.table)
library(stringr)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(qusage)
library(limma)
###Output
Loading required package: BiocGenerics
Loading required package: parallel
Attaching package: ‘BiocGenerics’
The following objects are masked from ‘package:parallel’:
clusterApply, clusterApplyLB, clusterCall, clusterEvalQ,
clusterExport, clusterMap, parApply, parCapply, parLapply,
parLapplyLB, parRapply, parSapply, parSapplyLB
The following objects are masked from ‘package:stats’:
IQR, mad, sd, var, xtabs
The following objects are masked from ‘package:base’:
anyDuplicated, append, as.data.frame, basename, cbind, colnames,
dirname, do.call, duplicated, eval, evalq, Filter, Find, get, grep,
grepl, intersect, is.unsorted, lapply, Map, mapply, match, mget,
order, paste, pmax, pmax.int, pmin, pmin.int, Position, rank,
rbind, Reduce, rownames, sapply, setdiff, sort, table, tapply,
union, unique, unsplit, which.max, which.min
Loading required package: oligoClasses
Welcome to oligoClasses version 1.52.0
Loading required package: Biobase
Welcome to Bioconductor
Vignettes contain introductory material; view with
'browseVignettes()'. To cite Bioconductor, see
'citation("Biobase")', and for packages 'citation("pkgname")'.
Loading required package: Biostrings
Loading required package: S4Vectors
Loading required package: stats4
Attaching package: ‘S4Vectors’
The following object is masked from ‘package:base’:
expand.grid
Loading required package: IRanges
Loading required package: XVector
Attaching package: ‘Biostrings’
The following object is masked from ‘package:base’:
strsplit
================================================================================
Welcome to oligo version 1.54.1
================================================================================
Attaching package: ‘data.table’
The following object is masked from ‘package:IRanges’:
shift
The following objects are masked from ‘package:S4Vectors’:
first, second
Attaching package: ‘dplyr’
The following objects are masked from ‘package:data.table’:
between, first, last
The following object is masked from ‘package:biomaRt’:
select
The following object is masked from ‘package:oligo’:
summarize
The following objects are masked from ‘package:Biostrings’:
collapse, intersect, setdiff, setequal, union
The following object is masked from ‘package:XVector’:
slice
The following objects are masked from ‘package:IRanges’:
collapse, desc, intersect, setdiff, slice, union
The following objects are masked from ‘package:S4Vectors’:
first, intersect, rename, setdiff, setequal, union
The following object is masked from ‘package:Biobase’:
combine
The following objects are masked from ‘package:BiocGenerics’:
combine, intersect, setdiff, union
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
Loading required package: limma
Attaching package: ‘limma’
The following object is masked from ‘package:oligo’:
backgroundCorrect
The following object is masked from ‘package:BiocGenerics’:
plotMA
###Markdown
Set the working directory and list the data files.
###Code
# Data directory
setwd('/home/mario/Projects/holmes_analysis/data')
# Read data
cel_files <- list.files(path = getwd(), pattern = '*.CEL.gz', full.names = TRUE)
# Set working directory
setwd('/home/mario/Projects/holmes_analysis')
###Output
_____no_output_____
###Markdown
Load the data and perform data normalisation RMA.
###Code
# Load data
parsed_cels <- oligo::read.celfiles(cel_files, verbose = TRUE)
# Background correction of the microarrays
parsed_cels_rma <- oligo::rma(parsed_cels, normalize = TRUE, background = TRUE)
# Obtain the expression matrix
expression_data <- parsed_cels_rma@assayData$exprs
expression_data <- as.data.frame(expression_data)
expression_data$affy_mouse430_2 <- rownames(expression_data)
expression_data <- expression_data[, c(21, 1:20)]
rownames(expression_data) <- NULL
# Print expression matrix
head(expression_data)
###Output
Loading required package: pd.mouse430.2
Loading required package: RSQLite
Loading required package: DBI
Platform design info loaded.
###Markdown
Obtain the translation table from Ensembl
###Code
# Connect with Ensembl
mart <- useEnsembl(biomart='ensembl', dataset='mmusculus_gene_ensembl')
mouse_probes <- row.names(parsed_cels_rma@assayData$exprs)
# Obtain the translation table
id_translation_table <- getBM(attributes = c('affy_mouse430_2', 'ensembl_gene_id', 'mgi_symbol'),
filters = 'affy_mouse430_2',
values = mouse_probes,
mart=mart)
id_translation_table$mgi_symbol <- toupper(id_translation_table$mgi_symbol)
# Print translation table
head(id_translation_table)
###Output
_____no_output_____
###Markdown
Translate the data
###Code
# Translate data
expression_data <- merge(x = id_translation_table, y = expression_data, by = 'affy_mouse430_2')
expression_data <- expression_data %>% dplyr::rename(Ensembl = ensembl_gene_id)
# Print translated data
head(expression_data)
###Output
_____no_output_____
###Markdown
Format the data
###Code
# Remove probes that do not match to Ensembl IDs
expression_data <- data.table(expression_data)
expression_data <- expression_data[!is.na(expression_data$Ensembl),]
# Compute the mean of all the probes that match to the same gene
sample_columns <- row.names(parsed_cels_rma@phenoData@data)
expression_data <- expression_data[,lapply(.SD, mean), by=Ensembl, .SDcols=sample_columns]
# Format the data columns
colnames(expression_data) <- gsub('_Mouse430v2.CEL.gz', '', colnames(expression_data))
colnames(expression_data) <- substr(colnames(expression_data), 12, 35)
colnames(expression_data)[1] <- 'Ensembl'
expression_data <- as.data.frame(expression_data)
# Translate again
expression_data <- merge(x = expression_data, y = id_translation_table, by.x = 'Ensembl', by.y = 'ensembl_gene_id')
expression_data <- expression_data[, c(23, 2:21)]
expression_data <- expression_data[!duplicated(expression_data), ]
colnames(expression_data)[1] <- 'Gene_symbol'
# Remove IDs that do not match to HGNC symbols
expression_data <- expression_data[!is.na(expression_data$Gene_symbol),]
expression_data <- expression_data[!(expression_data$Gene_symbol==''),]
# Print expression data
head(expression_data)
###Output
_____no_output_____
###Markdown
Single-gene analysis Study the effect of Dusp5 knockout Perform a t-test per gene between population and correct for multiple testing.
###Code
# Compute tests
wt <- expression_data[, grepl('WT', colnames(expression_data))]
ko <- expression_data[, grepl('KO', colnames(expression_data))]
first <- TRUE
for (i in 1:dim(expression_data)[1]) {
test <- t.test(wt[i, ], ko[i, ])
row <- data.frame(Gene_symbol = expression_data[i, 'Gene_symbol'],
p.value = test$p.value,
mean_diff = test$estimate[[2]] - test$estimate[[1]])
if (first) {
first <- FALSE
ko_vs_wt <- row
next
}
ko_vs_wt <- rbind(ko_vs_wt, row)
}
# Multiple testing correction
ko_vs_wt$adjusted.p.value <- p.adjust(ko_vs_wt$p.value, method = 'BH')
# Print results
head(ko_vs_wt)
###Output
_____no_output_____
###Markdown
Plot the results.
###Code
# Select relevant genes to highlight
ko_vs_wt$mlog10PValue <- -log10(ko_vs_wt$p.value)
relevants <- ko_vs_wt[ko_vs_wt$adjusted.p.value <= 0.05, ]
relevants <- relevants[order(-abs(relevants$mean_diff)), ]
relevants <- relevants[1:25, ]
relevants <- relevants[!is.na(relevants$Gene_symbol), ]
# Volcano plot
options(repr.plot.width=25, repr.plot.height=10)
ko_vs_wt %>%
ggplot +
geom_point(aes(x = mean_diff, y = mlog10PValue, colour = adjusted.p.value <= 0.05), size = 4) +
geom_vline(xintercept = c(-1, 1), linetype="dashed", color = "red", size=1.5) +
scale_color_brewer(palette="Set2") +
theme_bw() +
theme(text = element_text(size=32),
axis.text.x = element_text(size=32),
axis.text.y = element_text(size=32)) +
geom_text_repel(data = relevants, aes(x = mean_diff, y = mlog10PValue, label = Gene_symbol), size = 8) +
labs(x = '-log2 fold-change', y = '-log10 p-value', colour = 'Adjusted p-value <= 0.05') +
labs(title = 'Knockout vs Wild type')
###Output
_____no_output_____
###Markdown
Study the effect of IL-33 (4 hrs) Perform a t-test per gene between population and correct for multiple testing.
###Code
# Compute the tests
ut <- expression_data[, grepl('Untreated', colnames(expression_data))]
t <- expression_data[, grepl('IL-33', colnames(expression_data))]
first <- TRUE
for (i in 1:dim(expression_data)[1]) {
test <- t.test(ut[i, ], t[i, ])
row <- data.frame(Gene_symbol = expression_data[i, 'Gene_symbol'],
p.value = test$p.value,
mean_diff = test$estimate[[2]] - test$estimate[[1]])
if (first) {
first <- FALSE
t_vs_ut <- row
next
}
t_vs_ut <- rbind(t_vs_ut, row)
}
# Multiple testing correction
t_vs_ut$adjusted.p.value <- p.adjust(t_vs_ut$p.value, method = 'BH')
# Print the results
head(t_vs_ut)
###Output
_____no_output_____
###Markdown
Plot the results.
###Code
# Select relevant genes to highlight
t_vs_ut$mlog10PValue <- -log10(t_vs_ut$p.value)
relevants <- t_vs_ut[t_vs_ut$adjusted.p.value <= 0.05, ]
relevants <- relevants[order(-abs(relevants$mean_diff)), ]
relevants <- relevants[1:25, ]
relevants <- relevants[!is.na(relevants$Gene_symbol), ]
# Volcano plot
options(repr.plot.width=25, repr.plot.height=10)
t_vs_ut %>%
ggplot +
geom_point(aes(x = mean_diff, y = mlog10PValue, colour = adjusted.p.value <= 0.05), size = 4) +
geom_vline(xintercept = c(-1, 1), linetype="dashed", color = "red", size=1.5) +
scale_color_brewer(palette="Set2") +
theme_bw() +
theme(text = element_text(size=32),
axis.text.x = element_text(size=32),
axis.text.y = element_text(size=32)) +
geom_text_repel(data = relevants, aes(x = mean_diff, y = mlog10PValue, label = Gene_symbol), size = 8) +
labs(x = '-log2 fold-change', y = '-log10 p-value', colour = 'Adjusted p-value <= 0.05') +
labs(title = 'Treated vs Untreated')
###Output
Warning message:
“ggrepel: 7 unlabeled data points (too many overlaps). Consider increasing max.overlaps”
###Markdown
Gense set analysis Over-representation analysis (hypergeometric test)
###Code
# Read gene sets. Options: biocarta, go, kegg, reactome
gene_sets <- read.gmt('gene_sets/biocarta_gene_sets.gmt')
# Print gene sets
head(gene_sets)
###Output
_____no_output_____
###Markdown
Perform the hypergeometric test upon every gene set
###Code
# Obtain differentially expressed genes
differentially_expressed_genes <- t_vs_ut[t_vs_ut$adjusted.p.value <= 0.05, ]$Gene_symbol
# Perform the hypergeometric test
# Genes in the arrays
N <- length(expression_data$Gene_symbol)
# Number of differentiated genes
n <- length(differentially_expressed_genes)
# Test p-values
hyper.p.values <- c()
# Number of genes in the set
n_genes_set <- c()
# Number of differentially expressed genes in the set
n_genes_in_the_set <- c()
for (gene_set in gene_sets) {
# Number of differentially expressed genes in the set
x <- sum(differentially_expressed_genes %in% gene_set)
# Number of genes in the set
k <- length(unlist(gene_set))
# Compute the test
p.value <- phyper(x, k, N - k, n, lower.tail = FALSE)
hyper.p.values <- c(hyper.p.values, p.value)
# Store results
n_genes_set <- c(n_genes_set, k)
n_genes_in_the_set <- c(n_genes_in_the_set, x)
}
# Multiple testing correction
hyper.p.values <- p.adjust(hyper.p.values, 'BH')
hyper_results <- data.frame(gene.set = names(gene_sets),
adjusted.p.value = hyper.p.values,
n.set = n_genes_set,
n.in.set = n_genes_in_the_set)
relevant_hyper_results <- hyper_results[hyper_results$adjusted.p.value <= 0.05, ]
# Print results
relevant_hyper_results
###Output
_____no_output_____
###Markdown
In the North, we trust!**The European Social Survey (ESS)** is a biennial cross-national survey of attitudes and behaviour. Since its beginning in 2001, the study has been conducted 7 times. The results are published online.In this brief study, we are interested in which what factors have seen the greatest changes in the ESS across the years. We observe that trust to political authorities is one of these. We examine trust to politicians and the European Parliament, and show that there has been a decrease in trust particularly towards the EU and in Central and Southern Europe. However, Northern European respondents report notably higher levels of trust. We speculate if the decrease in trust towards authoroties is related to Generalized Social Trust towards other people but judging by visual inspection of the data, this does not seem to be the case.This notebook will guide you through the analysis. Please run each cell so that the code will run and figures will be shown.Note: please unzip the file data.zip to the same folder with this notebook. We have had to zip the data because of file size constraints of Github. Ingest
###Code
import pandas as pd
import numpy as np
import zipfile
filename = 'ESS1-7e01.csv'
#Read file contents in pandas Data Frame
zf = zipfile.ZipFile('data.zip')
df = pd.read_csv(zf.open('ESS1-7e01.csv'), sep=',', low_memory=False)
#df = pd.read_csv(filename, sep=',', low_memory=False)
###Output
_____no_output_____
###Markdown
Drop uninteresting variables
###Code
#The data set contains some variables which are not particularly interesting for us. Let us drop some of them.
df = df.drop(columns=['edition', 'idno', 'name', 'cproddat', 'cedition', 'cname', 'cseqno'])
#Let's also drop weights for now
df = df.drop(columns=['dweight', 'pspwght', 'pweight'])
###Output
_____no_output_____
###Markdown
Data encoding and missing valuesMost of the questions in the survey are categorical or binary tickboxes but they are encoded as numbers.We would like to treat nominal variables differently to ordinal variables.However, it is difficult to recognize which variables are nominal and which ordinal based on the encoded values.Many questions are Likert-like. Because the ESS survey is time series data, we can analyze trends based on Likert-like and binary values.
###Code
#Some question include additional missing value options also encoded as numbers.
#These are encoded with numbers 6, 7, 8, 9, 55, 66, 77, 88, 99, 555, 666, etc.
#We well replace ESS missing data encodings with NaN. The below values don't appear naturally.
#However, we are still left with missing value encodings [6, 7, 6, 9].
df.replace(to_replace=[99, 88, 77, 66, 55, 999, 888, 777, 666, 555, 9999, 8888, 7777, 6666, 5555], value=np.nan, inplace=True)
#Replace missing data encodings with NaN in variables with less unique values
for col in list(df):
if 6 not in df[col].unique() and 7 in df[col].unique() and 8 in df[col].unique() and 9 in df[col].unique():
df[col].replace(to_replace=[7, 8, 9], value=np.nan, inplace=True)
for col in list(df):
if 5 not in df[col].unique() and 6 in df[col].unique() and 7 in df[col].unique() and 8 in df[col].unique() and 9 in df[col].unique():
df[col].replace(to_replace=[6, 7, 8, 9], value=np.nan, inplace=True)
###Output
_____no_output_____
###Markdown
Drop values with insufficient response rateWe still have a lot of data. We probably don't need all of it. Let's drop variables which have more than 50% missing values.
###Code
df = df[df.columns[df.isnull().mean() < 0.5]]
#Let's save this thinned data a file so we don't need to continue to process such big files (and more importantly, so that we can share this).
df.to_csv('ESS1-7e01_mod.csv')
###Output
_____no_output_____
###Markdown
Load the preprocessed dataset (start here if you don't have the original ESS data)Github, through which we are sharing this notebook, has file size constraints. Because of this, we are loading in a dataset which had multiple variables dropped, through the aforementioned steps.
###Code
zf = zipfile.ZipFile('data_mod.zip')
df = pd.read_csv(zf.open('ESS1-7e01_mod.csv'), sep=',', low_memory=False)
df = df.drop(columns=['Unnamed: 0'])
###Output
_____no_output_____
###Markdown
Aggregate variables to a more insightful levelWe have a lot of data but nothing specific to look for.Perhaps we will find something interesting if we look at which variables have seen the greatest absolute change since the beginning of ESS.
###Code
#First, let's see aggregate a mean for each variable per each ESS round and country.
df.groupby(['essround','cntry']).agg('mean').unstack().T
#Since the question are with different scales, we'll hopefully get a more accurate idea by taking the percentage of change from one year to another.
df.groupby(['essround','cntry']).agg('mean').unstack().pct_change()
#Let's only look at the change between the first and the last ESS round.
cum_changes = df.groupby(['essround','cntry']).agg('mean').unstack().pct_change(6)[6:].T
###Output
_____no_output_____
###Markdown
European aggregationTo look at Europe as a whole, let's again aggregate these averages to European level.
###Code
#We take the mean for each variable on level 0, which is the country variable in this DataFrame.
sorted_changes = cum_changes.mean(level=0).sort_values(by=[7])
#Fill infinite values with NaN.
sorted_changes = sorted_changes.replace([np.inf, -np.inf], np.nan).dropna()
#Let's change the name to something more appropriate.
sorted_changes.columns = ['pct_change']
#Calculate absolute change and make it a new column, and sort based on that.
sorted_changes['abs_pct_change'] = sorted_changes['pct_change'].abs()
sorted_changes.sort_values(by='abs_pct_change', ascending=False)
#Retrieve the 20 variables where we see the greatest change across Europe
top20 = sorted_changes.nlargest(20, 'abs_pct_change')
top20 = top20[['abs_pct_change', 'pct_change']]
#Make the table prettier.
top20.style.bar(subset=['pct_change', 'abs_pct_change'], align='mid', color=['#d65f5f', '#5fba7d'])
###Output
_____no_output_____
###Markdown
Codebook exempts for the most changed variablesLet's examine what do the most changed values mean by looking at the ESS codebook.**dscrna**: "On what grounds is your group discriminated against?", multiple choice tickbox question where this variable is binary indicator of whether the respondent did not tick any other boxes. Because there is a negative change, the respondents are thus more able to tick one other box stating a factor which has lead them to experience discrimination. Therefore, experiences of discrimation based on a group characteristic has increased over the years.**dscrntn**: "On what grounds is your group discriminated against? - Nationality". Binary tickbox. Experiences of discrimination based on nationality have increased.**dscrgnd**: "On what grounds is your group discriminated against? - Gender". Binary tickbox. Experiences of discrimination based on gender have increased.**uempla**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days? - Unemployed and actively looking for a job." Binary tickbox. Unemployment and job-seeking activities have increased.**dscrrlg**: "On what grounds is your group discriminated against? - Religion". Binary tickbox. Experiences of discrimination based on religion have increased.**dscrrce**: "On what grounds is your group discriminated against? - Race". Binary tickbox. Experiences of discrimination based on race have increased.**hswrk**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days? - Doing housework, looking after children or other persons." Binary tickbox. Housework activities have decreased.**hswrkp**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - Doing housework, looking after children or other persons" Binary tickbox. Partner's ousework activities have decreased.**rtrdp**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - Retired" Binary tickbox. More partners have been retired.**uemplap**: "Using this card, which of these descriptions applies to what he/she has been doing for the last 7 days? - Unemployed and actively looking for a job." Binary tickbox. Partner's unemployment and job-seeking activities have increased.**rtrdp**: "Which of the descriptions on this card applies to what you have been doing for the last 7 days? - Retired" Binary tickbox. More respondents have been retired.**dscrage**: "On what grounds is your group discriminated against? - Age". Binary tickbox. Experiences of discrimination based on age have increased.**edulvla**: "What is the highest level of education you have achieved?" Ordinal scale. Respondents' level of education has increased.**freehms**: "Using this card, please say to what extent you agree or disagree with each of the following statements - Gay men and lesbians should be free to live their own life as they wish" Likert-like scale. Respondents agree with the statement more.**uemplip**: "Which of the descriptions on this card applies to what he/she has been doing for the last 7 days? - Unemployed, wanting a job but not actively looking for a job" Binary tickbox. Number of Partners who are unemployed, wanting a job but not seeking one has increased.**trstplt**: "Using this card, please tell me on a score of 0-10 how much you personally trust each of the institutions I read out. 0 means you do not trust an institution at all, and 10 means you have complete trust. Firstly...... politicians?" Likert-like scale. Trust to politicians decreased.**dsbld**: "Using this card, which of these descriptions applies to what you have been doing for the last 7 days?Permanently sick or disabled" Binary tickbox. Number of disabled increased.**trstep**: "Using this card, please tell me on a score of 0-10 how much you personally trust each of the institutions I read out. 0 means you do not trust an institution at all, and 10 means you have complete trust. Firstly...... the European Parliament?" Likert-like scale. Trust to European Parliament decreased.**stfhlth**: "Still using this card, please say what you think overall about the state of health services in [country] nowadays?"Likert-like scale. Perception of health services quality has increased.**iphlppl**: "Now I will briefly describe some people. Please listen to each description and tell me how much each person is or is not like you. Use this card for your answer.It's very important to her/him to help the people around her/him. She/he wants to care for their well-being." Likert-like scale. Self-identification towards helpful people decreased.
###Code
#A lot of stuff, a lot of explaining!
#We would like to visualize some of these changes.
#Since we're going to draw these graphs a lot, let's make a function out of it.
import matplotlib.pyplot as plt
def draw_change(var, group, stat):
fig, ax = plt.subplots(figsize=(15,7))
df.groupby(['essround',group])[var].agg(stat).unstack().plot(ax=ax)
df.groupby(['essround',group])[var].agg(stat).unstack().T.agg('mean').plot(ax=ax, style='--', colormap='gray', title=var)
plt.show()
###Output
_____no_output_____
###Markdown
A little caveat with the list of most changed variables is the emphasis that the above method puts on binary variables. Because we are looking at the changes as percentages, change from the binary scale 1 to 0 is rather drastic. Ideally, we'd eliminate binary variables from this examination. Hence we are focusing on Likert-like variables which where the above examination made more sense. Finding the insightNow that we have bunch of digestible data and a function that let's us explore them, we need to start exploring.Even if the task is to find "one insight", we cannot find an interesting insight without stumbling around multiple other possibilities for insights.First, we want to test something that is common knowledge. Education levels have risen across the world so we should see that in the ESS data. Further, we should see that Northern and Western European have higher levels of education compared to Central and South Europe.
###Code
#There were a lot of interesting observations! Let's look at some on country-level.
#First, education:
draw_change('edulvla', 'cntry', 'mean')
###Output
_____no_output_____
###Markdown
So many countries makes the graph a bit of a mess. Let's group some of them together.We are assuming, a priori, that some countries are similar.Alternatively, we could do e.g. a cluster analysis and see if our perception of similar countries is in accordance with the data.But let's not question the status quo right now and let's go with traditional geography-inspired distinctions:
###Code
def labelRegion(cntry):
if cntry in ['DK', 'FI', 'SE', 'NO']:
return 'north'
if cntry in ['HU', 'PL', 'SI']:
return 'central'
if cntry in ['PT', 'ES']:
return 'south'
if cntry in ['DE', 'CH', 'FR', 'BE', 'NL']:
return 'west'
if cntry in ['GB', 'IE']:
return 'uki'
df['region'] = df.apply (lambda row: labelRegion(row['cntry']),axis=1)
#Let's look at education again - but regionally
draw_change('edulvla', 'region', 'mean')
###Output
_____no_output_____
###Markdown
We see what we know; Northern Europe is highly educated whereas South is not as much. However, we see that education levels have been increasing across the board.
###Code
#Let's look at values; acceptance of homosexuality
draw_change('freehms', 'region', 'mean')
###Output
_____no_output_____
###Markdown
We notice that people disagree less with the statement that "Gays and lesbians should be free to live their life as they wish. However, central European nations are still more opposed to this compared to other European geographies. End the truisms: Insights into TrustTrust is another interesting variable. From listening to a plenty of behavioural economics podcasts, I have been lead to believe countries with higher levels of Social Trust have higher GDPs. Unfortunately, we don't have GDP information in this data - but the geographical grouping also reflects the wealth of those nations.After some exploration, we choose Trust as to focal point for our insight. Focusing on this gives as plenty of room where we would go with further analyses.First, let's look how much people can trust politicians and the European Parliament.
###Code
#Trust is interesting, let's look how much people can trust politicians and the European Parliament
draw_change('trstplt', 'region', 'mean')
draw_change('trstep', 'region', 'mean')
draw_change('trstplt', 'cntry', 'mean')
draw_change('trstep', 'cntry', 'mean')
###Output
_____no_output_____
###Markdown
Some observation: The British have approximately mean levels of trust to politicians but the lowest trust to the EP. The trust of the Portuguese towards EP has decline drastically since mid-ESS history (around 2010, after the Great Recession hit). Scandinavians continue to trust everyone.We also see that confidence intervals or drawing sigmas around the mean would help us understand whether there actually has been a difference throughout time. We must remember that *n* of samples is quite high so we might assume even from this that even smallish changes in the mean level indicate a true change. On the Theory of TrustWe saw a decline in trust towards political authorities. If we speculate a bit further, could increasing lack of trust be the reason for the turmoil in Europe?Some researches (Beilmann, 2017; Breen, 2016) have argued for Generalized Social Trust Index which is measured by three questions in ESS:* Trust: ‘Would you say that most people can be trusted, or that you can’t be too careful in dealing with people?’ (0 = ‘You can't be too careful’ – 10 = ‘Most people can be trusted’);* Fairness: ‘Do you think that most people would try to take advantage of you if they got the chance, or would they try to be fair?’ (0 = ‘Most people would try to take advantage of me’ – 10 = ‘Most people would try to be fair’);* Helpfulness: ‘Would you say that most of the time people try to be helpful or that they are mostly looking out for themselves?’ (0 = ‘People mostly look out for themselves’ – 10 = ‘People mostly try to be helpful’).Do we observe a decline in Generalized Social Trust Index, or are the European trust issues specifically related to political authority? Can the rising tide of extremist idealogies, increaing inequality, marginalizing rethoric and the echo chambers of social media be manifestations of diminished Social Trust? Let us see.References:*Beilmann, M. (2017). Social Capital and Individualism–Collectivism at the Individual Level (Doctoral dissertation).Breen, M. J., & Healy, A. E. (2016). Changing Values, Attitudes and Behaviours in Ireland: An Analysis of European Social Survey Data in Ireland, 2002-2012. Cambridge Scholars Publishing.*
###Code
#Let's calculate social trust, as defined in the literature
df['socialTrust'] = ((df.ppltrst + df.pplfair + df.pplhlp) / 3)
draw_change('socialTrust', 'cntry', 'mean')
draw_change('socialTrust', 'region', 'mean')
###Output
_____no_output_____
###Markdown
Alright, we don't really see a real change in social trust over the years. Maybe slight upward trend.Maybe the decline in social cohesion is actually exhibited through increased deviation in how much people can trust others?
###Code
draw_change('socialTrust', 'cntry', 'std')
draw_change('socialTrust', 'region', 'std')
###Output
_____no_output_____
###Markdown
PAS Install Python dependencies
###Code
%pip install pandas matplotlib
###Output
_____no_output_____
###Markdown
BD model vs BIDE model
###Code
import os
import pandas as pd
import matplotlib.pyplot as plt
bd_vs_bide_folder = "bd_vs_bide"
bd_results_folder = "bd_results"
bide_results_folder = "bide_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
bd = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_P_tot{}.csv'), sep=',', names=columns, header=None)
bide = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(bd['Time'], bd['Mean'], label='BD model')
ax.fill_between(bd['Time'], bd['Mean']-bd['SD'], bd['Mean']+bd['SD'], alpha=0.3)
ax.plot(bide['Time'], bide['Mean'], label='BIDE model')
ax.fill_between(bide['Time'], bide['Mean']-bide['SD'], bide['Mean']+bide['SD'], alpha=0.3)
ax.legend()
plt.show()
bd = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_P_tot{}.csv'), sep=',', names=columns, header=None)
bide = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_P_tot{}.csv'), sep=',', names=columns, header=None)
bd_equation = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_BD{}.csv'), sep=',', names=columns, header=None)
bide_equation = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_BIDE{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population and BD equation')
ax.plot(bd['Time'], bd['Mean'], label='BD model')
ax.plot(bd_equation['Time'], bd_equation['Mean'], label='BD equation')
n0 = bd['Mean'][0]
plt.hlines(y=n0, xmin=0, xmax=len(bd['Mean']), linestyles='dashed', label=f'N0 = {n0}')
ax.legend(loc='upper left')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population and BIDE equation')
ax.plot(bide['Time'], bide['Mean'], label='BIDE model')
ax.plot(bide_equation['Time'], bide_equation['Mean'], label='BIDE equation')
n0 = bide['Mean'][0]
plt.hlines(y=n0, xmin=0, xmax=len(bide['Mean']), linestyles='dashed', label=f'N0 = {n0}')
ax.legend(loc='upper left')
plt.show()
bd_pop = [None for i in range(n_cities)]
bide_pop = [None for i in range(n_cities)]
bd_equation = [None for i in range(n_cities)]
bide_equation = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
bd_equation[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_SINGLE_BD{{i={i}.0}}.csv'), sep=',', names=columns, header=None)
bide_equation[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_SINGLE_BIDE{{i={i}.0}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population and BD equation')
for i in range(n_cities):
# ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
ax.plot(bd_equation[i]['Time'], bd_equation[i]['Mean'], label=f'BD equation city {i+1}')
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population and BIDE equation')
for i in range(n_cities):
# ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.plot(bide_equation[i]['Time'], bide_equation[i]['Mean'], label=f'BIDE equation city {i+1}')
ax.legend()
plt.show()
bd_pop = [None for i in range(n_cities)]
bide_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
for i in range(n_cities):
ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.legend()
plt.show()
bd_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
ax.fill_between(bd_pop[i]['Time'], bd_pop[i]['Mean'] - bd_pop[i]['SD'], bd_pop[i]['Mean'] + bd_pop[i]['SD'], label=f'SD City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
bide_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.fill_between(bide_pop[i]['Time'], bide_pop[i]['Mean'] - bide_pop[i]['SD'], bide_pop[i]['Mean'] + bide_pop[i]['SD'], label=f'SD City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
species = 'PBD'
n_species = len(species)
bd_data = {}
for s in species:
bd_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(bd_data[s]['Time'], bd_data[s]['Mean'], label=f'#{s}')
ax.legend()
plt.show()
species = 'PBIDE'
n_species = len(species)
bide_data = {}
for s in species:
bide_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-I-D-E')
for s in species:
ax.plot(bide_data[s]['Time'], bide_data[s]['Mean'], label=f'#{s}')
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Balanced vs Unbalanced
###Code
import os
import pandas as pd
import matplotlib.pyplot as plt
balanced_vs_unbalanced_folder = "balanced_vs_unbalanced"
balanced_results_folder = "balanced_results"
unbalanced_results_folder = "unbalanced_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
balanced = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, balanced_results_folder), 'balanced_P_tot{}.csv'), sep=',', names=columns, header=None)
unbalanced = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, unbalanced_results_folder), 'unbalanced_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(balanced['Time'], balanced['Mean'], label='Balanced system')
ax.fill_between(balanced['Time'], balanced['Mean']-balanced['SD'], balanced['Mean']+balanced['SD'], alpha=0.3)
ax.plot(unbalanced['Time'], unbalanced['Mean'], label='Unbalanced system')
ax.fill_between(unbalanced['Time'], unbalanced['Mean']-unbalanced['SD'], unbalanced['Mean']+unbalanced['SD'], alpha=0.3)
ax.legend()
plt.show()
balanced_pop = [None for i in range(n_cities)]
unbalanced_pop = [None for i in range(n_cities)]
for i in range(n_cities):
balanced_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, balanced_results_folder), f'balanced_#P[{i}].csv'), sep=',', names=columns, header=None)
unbalanced_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, unbalanced_results_folder), f'unbalanced_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(balanced_pop[i]['Time'], balanced_pop[i]['Mean'], label=f'Balanced system city {i+1}')
for i in range(n_cities):
ax.plot(unbalanced_pop[i]['Time'], unbalanced_pop[i]['Mean'], label=f'Unbalanced system city {i+1}')
ax.legend()
# plt.axis([0, 2000, 0, 200])
plt.show()
###Output
_____no_output_____
###Markdown
Emigrate to Next vs Biggest vs Smallest city
###Code
import os
import pandas as pd
import matplotlib.pyplot as plt
next_vs_biggest_vs_smallest_folder = "next_vs_biggest_vs_smallest"
next_results_folder = "next_results"
biggest_results_folder = "biggest_results"
smallest_results_folder = "smallest_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
next = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, next_results_folder), 'next_P_tot{}.csv'), sep=',', names=columns, header=None)
biggest = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, biggest_results_folder), 'biggest_P_tot{}.csv'), sep=',', names=columns, header=None)
smallest = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, smallest_results_folder), 'smallest_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(next['Time'], next['Mean'], label='Emigrate to Next')
ax.fill_between(next['Time'], next['Mean']-next['SD'], next['Mean']+next['SD'], alpha=0.3)
ax.plot(biggest['Time'], biggest['Mean'], label='Emigrate to Biggest')
ax.fill_between(biggest['Time'], biggest['Mean']-biggest['SD'], biggest['Mean']+biggest['SD'], alpha=0.3)
ax.plot(smallest['Time'], smallest['Mean'], label='Emigrate to Smallest')
ax.fill_between(smallest['Time'], smallest['Mean']-smallest['SD'], smallest['Mean']+smallest['SD'], alpha=0.3)
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(next['Time'], next['Mean'], label='Emigrate to Next')
ax.plot(smallest['Time'], smallest['Mean'], color='green', label='Emigrate to Smallest')
ax.legend()
plt.show()
next_pop = [None for i in range(n_cities)]
biggest_pop = [None for i in range(n_cities)]
smallest_pop = [None for i in range(n_cities)]
for i in range(n_cities):
next_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, next_results_folder), f'next_#P[{i}].csv'), sep=',', names=columns, header=None)
biggest_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, biggest_results_folder), f'biggest_#P[{i}].csv'), sep=',', names=columns, header=None)
smallest_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, smallest_results_folder), f'smallest_#P[{i}].csv'), sep=',', names=columns, header=None)
next = ['Emigrate to Next', next_pop]
biggest = ['Emigrate to Biggest', biggest_pop]
smallest = ['Emigrate to Smallest', smallest_pop]
for strategy in [next, biggest, smallest]:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle(strategy[0])
for i in range(n_cities):
ax.plot(strategy[1][i]['Time'], strategy[1][i]['Mean'], label=f'City {i+1}')
ax.fill_between(strategy[1][i]['Time'], strategy[1][i]['Mean'] - strategy[1][i]['SD'], strategy[1][i]['Mean'] + strategy[1][i]['SD'], label=f'City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Child vs Children
###Code
import os
import pandas as pd
import matplotlib.pyplot as plt
child_vs_children_folder = "child_vs_children"
child_results_folder = "child_results"
children_results_folder = "children_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
child = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, child_results_folder), 'child_P_tot{}.csv'), sep=',', names=columns, header=None)
children = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, children_results_folder), 'children_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(child['Time'], child['Mean'], label='Child')
ax.fill_between(child['Time'], child['Mean']-child['SD'], child['Mean']+child['SD'], alpha=0.3)
ax.plot(children['Time'], children['Mean'], label='Children')
ax.fill_between(children['Time'], children['Mean']-children['SD'], children['Mean']+children['SD'], alpha=0.3)
ax.legend()
plt.show()
species = 'PBD'
n_species = len(species)
child_data = {}
children_data = {}
for s in species:
child_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, child_results_folder), f'child_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
children_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, children_results_folder), f'children_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(child_data[s]['Time'], child_data[s]['Mean'], label=f'Child #{s}')
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(children_data[s]['Time'], children_data[s]['Mean'], label=f'Children #{s}')
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Ebay Mac Price Regression Analysis 0.1 IntentIn this notebook I will perform multivariate linear regression analysis on data collected from eBay's API regarding the sale of Mac Minis in a 60-day time period. The script used to obtain this data is contained within this folder but is no longer functional due to API depreciation. 0.2 Data DescriptionFollowing data collection the data was manually cleaned by removing listings that did not pertain to Mac Minis, had irregular or None input for any of the features listed below (i.e. "16GB" for "Processor Speed"), or included additional items.**listingType Values:**- Auction- FixedPrice (Indicates a Buy It Now offer)- Store Inventory**sellingState Values:**- EndedWithSales- EndedWithoutSales**hoursToSale:** Duration (in hours) until sale or closing of the listing without sale.**releaseYear:** Year the mac mini model was released. Only the years 2012, 2014, and 2018 were examined due to low counts of all other model years.**processorSpeed:** Speed, in Gigahertz of the processor.**Cores:** Number of core processors.**Memory:** Size of RAM in GB.**storageType:**- 0 : HDD (Hard drive)- 1 : SSD (Solid state drive)- 3 : HDD/SSD (Both included)- 4 : Fusion (A fusion drive)**totalSale:** Sale price including shipping and tax. 1. Import Dataset
###Code
import pandas as pd
dataset = pd.read_csv('/Users/kersh/Documents/Github/Portfolio/eBay Mac Price Regression/macmini.csv')
###Output
_____no_output_____
###Markdown
2. Preprocess Dataset Here I will only be looking at auction listings. In addition, for the purposes of this analysis I will only consider listings that ended in a successful sale.Outliers with a sale price of >$2,000 are removed from the dataset (only 2 sold at a price this high).
###Code
sold = dataset.loc[dataset['sellingState'] == 'EndedWithSales']
auction = sold.loc[sold['listingType'] == 'Auction']
# Remove outliers
auction = auction[~(auction['totalSale'] > 2000)]
# Reset index
auction = auction.reset_index(drop=True)
# Remove listingType and sellingState columns
auction = auction.drop(['listingType','sellingState'],axis=1)
auction
###Output
_____no_output_____
###Markdown
3. Check for MulticollinearityHigh or near-perfect correlation between two variables, known as multicollinearity, violates the assumptions of multiple regression and indicates reduced model accuracy. Variables were therefore checked for very high levels of correlation using a correlation matrix and heatmap.
###Code
# Calculate correlation matrix
corr = auction.corr()
display(corr)
# Plot heatmap using Seaborn
import seaborn as sns
sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, cmap='RdBu')
###Output
_____no_output_____
###Markdown
Aside from hoursToSale, which showed no noteworthy correlation with any other variables, all variables were correlated with each other to some extent, but not to a degree indicating multicollinearity (>80%). Therefore no further steps to remove multicollinearity were required. 4. View Descriptive Statistics
###Code
desc = auction.describe()
desc
###Output
_____no_output_____
###Markdown
Of noteworthy interest here is the high sales price standard deviation of 260.39, compared to the mean sale price of 385.32. This high level of variability will present a challenge for the model to overcome in order to create meaningful and accurate sales price estimates. 5. Visualize the Data
###Code
sns.set_theme(color_codes=True)
plot = sns.scatterplot(x="hoursToSale", y="totalSale", data=auction)
plot = sns.catplot(x="releaseYear", y="totalSale", data=auction)
plot = sns.catplot(x="Memory", y="totalSale", data=auction)
plot = sns.catplot(x="processorSpeed", y="totalSale", data=auction)
plot = sns.catplot(x="Cores", y="totalSale", data=auction)
plot = sns.catplot(x="storageType", y="totalSale", data=auction)
###Output
_____no_output_____
###Markdown
6. Build Regression Model 6.1 Get Dummy Variables Several variables in our model are categorical or best treated as such due to lack of continuity between values (release year, number of cores, and storage type). Because these variables cannot be directly entered into the regression model, they must first be converted to a series of one-hot encoded dummy variables. To prevent multicollinearity, the first column of each series of dummy variables is dropped.
###Code
year_dummies = pd.get_dummies(auction['releaseYear'],drop_first=True)
core_dummies = pd.get_dummies(auction['Cores'],drop_first=True)
storage_dummies = pd.get_dummies(auction['storageType'],drop_first=True)
###Output
_____no_output_____
###Markdown
6.2 Build the Complete Dataset We now have everything we need to build the complete set of independent variables (X) and the target vector (Y).
###Code
X = [pd.DataFrame(auction[['hoursToSale','Memory','processorSpeed']]),year_dummies,core_dummies,storage_dummies]
X = pd.concat(X,axis=1)
Y = auction['totalSale']
###Output
_____no_output_____
###Markdown
6.3 Get Train and Test SetsIn order to ensure the results of our model generalize to data that the model was not trained on, it is best practice to split the data into train and test sets. This is done using scikitlearn's convenient test_train_split package.
###Code
from sklearn.model_selection import train_test_split
XTrain, XTest, YTrain, YTest = train_test_split(X, Y, test_size=0.25, random_state=1)
###Output
_____no_output_____
###Markdown
6.4 Instantiate and Fit the Model
###Code
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(XTrain,YTrain)
###Output
_____no_output_____
###Markdown
6.5 View Intercept and Coefficients
###Code
print('Intercept: {}'.format(model.intercept_))
for c in model.coef_:
print(c)
###Output
Intercept: 214.93719738723004
0.2929556342826053
12.964365735875063
-56.723796519234426
82.47345648019886
489.1375847282913
90.05985607384491
1.4522280589840788
98.32870286287502
104.58444162661739
###Markdown
7. Evaluate the Model 7.1 Rebuild the Model using statsmodels
###Code
import statsmodels.api as sm
# Adds a constant column to input
X2 = sm.add_constant(X, prepend=False)
regr = sm.OLS(Y, X2)
pred = regr.fit()
###Output
_____no_output_____
###Markdown
7.2 Test for Heteroscedasticity When performing regression analysis it is important to check for heteroscedasticity. If it is present, this may indicate that coefficient estimates have reduced precision. Two tests are commonly used to detect heteroscedasticity in regression models: the Breusch-Pagan and White tests. Both tests attempt to reject the null hypothesis that there is no heteroscedasticity and product a p-value. For our purposes α = .05. Both tests are run using the statsmodels package.
###Code
from statsmodels.stats import diagnostic as diag
# Breusch-Pagan test
_, pval, __, f_pval = diag.het_breuschpagan(pred.resid, pred.model.exog)
print(pval, f_pval)
print('-'*100)
if pval > 0.05:
print("Breusch-Pagan's Test:")
print("P-value {:.4}".format(pval))
print('No heteroscedasticity detected.')
else:
print("Breusch-Pagan's Test:")
print("p: {:.4}".format(pval))
print('Heteroscedasticity detected.')
# White's test
_, pval, __, f_pval = diag.het_white(pred.resid, pred.model.exog)
print(pval, f_pval)
print('-'*100)
if pval > 0.05:
print("White's Test:")
print("p: {:.4}".format(pval))
print('No heteroscedasticity detected.')
else:
print("White's Test:")
print("p: {:.4}".format(pval))
print('Heteroscedasticity detected.')
###Output
1.6804160116947507e-10 4.146953495132328e-11
----------------------------------------------------------------------------------------------------
Breusch-Pagan's Test:
p: 1.68e-10
Heteroscedasticity detected.
7.912803525802802e-08 1.1741164970513362e-08
----------------------------------------------------------------------------------------------------
White's Test:
p: 7.913e-08
Heteroscedasticity detected.
###Markdown
Here both tests detected heteroscedasticity in our model. This makes intuitive sense with respect to the graphs produced in section 5 which show unequal variances in price among different groups. While this reduces the model's statistical validity to some extent, it does not make our model's prediction less useful in practice. 7.3 Test for Autocorrelation Autocorrelation is present when errors are not independent of eachother, violating the assumptions of the model. Autocorrelation is tested using the Ljung-Box test.
###Code
# Calculate the lag
lag = min(10, (len(X)//5))
print('Number of lags: {}'.format(lag))
print('-'*100)
test_results = diag.acorr_ljungbox(pred.resid, lags = lag, return_df = False)
ibvalue, p_val = test_results
if min(p_val) > 0.05:
print("The lowest p-value found was {:.4}".format(min(p_val)))
print("No autocorrelation.")
print('-'*100)
else:
print("The lowest p-value found was {:.4}".format(min(p_val)))
print("Autocorrelation detected.")
print('-'*100)
###Output
Number of lags: 10
----------------------------------------------------------------------------------------------------
The lowest p-value found was 0.1905
No autocorrelation.
----------------------------------------------------------------------------------------------------
###Markdown
7.4 Examine Residual Distribution and Mean Residuals are plotted using a qq plot and checked for normality. Adherence to the line indicates normally-distributed residuals. The mean residual is calculated to ensure it equals or is very close to 0.
###Code
import pylab
# Plot residuals
sm.qqplot(pred.resid, line='s')
pylab.show()
# Check mean of residuals
mean_residuals = sum(pred.resid)/ len(pred.resid)
print("Mean residual: {:.4}".format(mean_residuals))
###Output
_____no_output_____
###Markdown
7.5 Model Metrics
###Code
print(pred.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: totalSale R-squared: 0.763
Model: OLS Adj. R-squared: 0.759
Method: Least Squares F-statistic: 204.3
Date: Fri, 05 Mar 2021 Prob (F-statistic): 4.04e-172
Time: 14:01:13 Log-Likelihood: -3637.3
No. Observations: 581 AIC: 7295.
Df Residuals: 571 BIC: 7338.
Df Model: 9
Covariance Type: nonrobust
==================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------
hoursToSale 0.3074 0.099 3.114 0.002 0.114 0.501
Memory 13.5332 0.780 17.345 0.000 12.001 15.066
processorSpeed -63.2028 10.851 -5.825 0.000 -84.516 -41.890
2014 77.0213 19.146 4.023 0.000 39.416 114.626
2018 477.8464 27.795 17.192 0.000 423.254 532.438
4 87.3136 24.272 3.597 0.000 39.640 134.988
2 8.1138 19.863 0.408 0.683 -30.900 47.128
3 84.8494 31.921 2.658 0.008 22.153 147.546
4 108.6229 74.747 1.453 0.147 -38.190 255.436
const 224.0184 33.757 6.636 0.000 157.716 290.321
==============================================================================
Omnibus: 200.923 Durbin-Watson: 1.885
Prob(Omnibus): 0.000 Jarque-Bera (JB): 1913.898
Skew: 1.242 Prob(JB): 0.00
Kurtosis: 11.538 Cond. No. 1.99e+03
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.99e+03. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
7.6 Error Measurements
###Code
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
YPred = model.predict(XTest)
# mean squared error
mse = mean_squared_error(YTest, YPred)
# mean absolute error
mae = mean_absolute_error(YTest, YPred)
# root mean squared error
rmse = np.sqrt(mse)
# display the output
print("MSE {:.6}".format(mse))
print("MAE {:.6}".format(mae))
print("RMSE {:.6}".format(rmse))
###Output
MSE 13397.6
MAE 89.3917
RMSE 115.748
###Markdown
8. Repeated K-Fold Cross ValidationIn order to validate the model against numerous training/test sets K-Fold cross validation is performed.
###Code
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
cv = RepeatedKFold(n_splits=10, n_repeats=3,random_state=1)
r2 = cross_val_score(model,X,Y,cv=cv,n_jobs=1,scoring='r2')
rmse = cross_val_score(model,X,Y,cv=cv,n_jobs=1,scoring='neg_root_mean_squared_error')
print('R2: {}'.format(np.mean(r2)))
print('RMSE: {}'.format(np.mean(rmse)))
###Output
R2: 0.7283601563606384
RMSE: -128.0057681237432
###Markdown
Analysis of word frequency (only considering nouns)
###Code
#text block
raw= ' '.join([x[2] for x in result])
# tokenzie and position tagging using nltk library
# http://www.nltk.org/book/ch05.html
# to understand the meaning of tags: nltk.help.upenn_tagset()
text = nltk.word_tokenize(raw)
postags= nltk.pos_tag(text)
# turn the result into dataframe for the convenience of processing
df = pd.DataFrame(postags,columns =['word','type'])
#filter words by type, only keeping nouns
typepattern_prefix=['NN']
mask = df.type.str.slice(0,2).isin(typepattern_prefix)
filtered=df[mask]
# plot word frequency
ax=filtered['word'].value_counts().sort_values(ascending=True).plot.barh(figsize=(5,10))
ax.set_ylabel('counts')
ax.set_title('Word frequency', fontsize=16)
###Output
_____no_output_____
###Markdown
Analysis of speech speed on the video timeline
###Code
df2=pd.DataFrame(result, columns = ['sTimestamp','eTimestamp','words'])
df2['sTimestamp']=pd.to_datetime(df2['sTimestamp'])
df2['eTimestamp']=pd.to_datetime(df2['eTimestamp'])
from datetime import datetime, timedelta
df2['durSeconds']= (df2['eTimestamp']-df2['sTimestamp'])/ timedelta(seconds=1)
df2['wordcounts']=df2.apply(lambda row: len(row['words'].split(' ')),axis='columns')
df2.sample()
#fastest and slowest line by speech speed
df2['speechSpeed']=df2['wordcounts']/df2['durSeconds']
vStart=min(df2['sTimestamp'])
df2['offsetVideoStart'] = (df2['sTimestamp']-vStart)/timedelta(seconds=1)
print('--------slowest spoken line:----------------')
print(df2.sort_values(by=['speechSpeed']).iloc[0])
print('--------fastest spoken line:----------------')
print(df2.sort_values(by=['speechSpeed']).iloc[-1])
#fastest and slowest line by speech speed
fig=plt.figure(figsize=(12,5))
ax=fig.add_subplot(111)
df2['speechSpeed']=df2['wordcounts']/df2['durSeconds']
ax.plot(df2['offsetVideoStart'],df2['speechSpeed'],'--')
ax.set_ylabel('words / second')
ax.set_xlabel('time from the start of the video (seconds)')
ax.annotate('\"and I think that I could bring us a stem\"',
xy=(43.45, 3.14465), xycoords='data',
xytext=(-30, -20), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', size=14)
ax.annotate('\"information management\"',
xy=(41.74, 0.551116), xycoords='data',
xytext=(-30, 20), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='bottom', size=14)
###Output
_____no_output_____
###Markdown
search for sentences
###Code
len(df2)
' '.join(df2["words"])
#examples of search for a sentence
from re import finditer
#test1
#searchWords = 'hardware devices'
#test2
searchWords = 'i created a python script on a raspberry pi and mounted a webcam'
for match in finditer(searchWords, ' '.join(df2["words"].str.strip())):
#print matches
print(match.span(), match.group())
startPos = match.span()[0]
endPos = match.span()[1]
#find the line indexes of the start and end position of each match
startLineIdx=-1
endLineIdx=-1
pos= 0
for index, row in df2.iterrows():
pos += len(row["words"].strip())+1 # 1 is the space added between lines
if startLineIdx ==-1 and startPos<pos:
startLineIdx=index
if endLineIdx==-1 and endPos<pos:
endLineIdx = index
if startLineIdx>0 and endLineIdx>0:
break
#verify
print(df2.loc[startLineIdx:endLineIdx,["sTimestamp","words"]])
###Output
(519, 583) i created a python script on a raspberry pi and mounted a webcam
sTimestamp words
13 2017-08-28 00:00:23.760 still adjusting to my home i created a
14 2017-08-28 00:00:26.340 python script on a raspberry pi and
15 2017-08-28 00:00:27.930 mounted a webcam on several allowing you
###Markdown
Estimating text loss in Middle Dutch chivalric epics This English-language, Python notebook accompanies the following publication:> Mike Kestemont and Folgert Karsdorp, "Het Atlantis van de Middelnederlandse ridderepiek. Een schatting van het tekstverlies met methodes uit de ecodiversiteit". *Spiegel der letteren* (2020).All figures and numbers were prepared using the code below. Future updates of the code and data will be managed in an open [Github repository](https://github.com/mikekestemont/chivalric_diversity). The code block below loads all (third-party) packages and modules necessary to run the module. These can be installed from the file `requirements.txt`: pip install -r requirements.txt
###Code
from functools import partial
from itertools import product
import numpy as np
np.random.seed(12345)
from scipy.special import erfinv
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("tufte.mplstyle")
plt.rcParams["text.usetex"] = False
%matplotlib inline
import scipy.stats as stats
from scipy.special import gammaln
###Output
_____no_output_____
###Markdown
Data We load the data from the spreadsheet file `mnl.xlsx`:
###Code
mnl = pd.read_excel('mnl.xlsx', header=None, names=('text', 'witness'))
mnl.head(10)
###Output
_____no_output_____
###Markdown
We are only interested in the count data, i.e. the number of witnesses per text (the technical term is "abundance data").
###Code
mnl.groupby('text').size().sort_values(ascending=False).head()
###Output
_____no_output_____
###Markdown
The counts per text can be plotted as follows:
###Code
fig, ax = plt.subplots(figsize=(10,18))
mnl.groupby('text').size().sort_values(ascending=True).plot.barh(ax=ax);
ax.set(xlabel='aantal handschriften', ylabel='',
title='Distributie van de (ons bekende) ridderepische teksten over tekstgetuigen')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig1.jpeg', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
Yet a different perspective is to list the size of the frequency bins that we can distinguish within the manuscript counts:
###Code
types = mnl.groupby('text').size().sort_values(ascending=False).value_counts().sort_index()
types = types.to_frame(name='aantal teksten')
types['aantal handschriften'] = types.index
types.to_excel('output/Tab1.xlsx')
types
###Output
_____no_output_____
###Markdown
Finally, we define the auxiliary function `species_richness` to count the number of unique texts in the data (i.e. the number of non-zero counts):
###Code
def species_richness(counts):
return np.sum(counts > 0)
print('# unique texts:', species_richness(mnl.groupby('text').size()))
print('# witnesses:', len(mnl))
###Output
# unique texts: 74
# witnesses: 164
###Markdown
Jackknife The following function computes the first-order Jackknife estimate, on the basis of the abundance data in our data frame, as well as a confidence interval (.95 be default). This approach is detailed in the following paper:> K. Burnham & W. Overton, "Robust Estimation of Population Size When Capture Probabilities Vary Among Animals". *Ecology* (1979), 927-936.
###Code
def jackknife(data, conf_lvl=0.95):
jack_stat = species_richness(data)
x = np.array(sum([[i] * c for i, c in enumerate(data, 1)], []))
index = np.arange(x.shape[0])
vals = []
for i in range(x.shape[0]):
t = x[index != i]
vals.append(species_richness(np.bincount(t)))
mean_jack_stat = np.mean(vals)
bias = (x.shape[0] - 1) * (mean_jack_stat - jack_stat)
estimate = jack_stat - bias
std_err = np.sqrt(
(x.shape[0] - 1) *
np.mean((mean_jack_stat - vals) *
(mean_jack_stat - vals), axis=0)
)
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, std_err, conf_interval
results = jackknife(mnl.groupby('text').size())
print('jackknife-estimate (order=1):', results[0], results[-1])
###Output
jackknife-estimate (order=1): 117.73170731707278 [106.64468284 128.8187318 ]
###Markdown
This implementation is verbose and uses an explicit `for`-loop, which iteratively leaves out observations and tracks the drops in diversity that follow from this operation. In the code blocks below we show that the same estimate can also be obtained in a fully analytical fashion. First we calculate the frequency counts for each unique text:
###Code
num_per_text = mnl.groupby('text').size()
num_per_text
###Output
_____no_output_____
###Markdown
Next, we store the species richness (the number of unique texts) in $t$:
###Code
t = species_richness(num_per_text)
t
###Output
_____no_output_____
###Markdown
Then we set $s$ to the number of texts that are only attested in a single witness:
###Code
s = sum(num_per_text == 1)
s
###Output
_____no_output_____
###Markdown
Only the $s$ texts that occur once will affect the species richness during the iterative Jackknife procedure. We can therefore predict that we will obtain the following deviations when applying the bootstrap:
###Code
mu = (((t - s) * t) + (s * (t - 1))) / t
mu
###Output
_____no_output_____
###Markdown
That means that we can calculate the bias as follows:
###Code
bias = (t - 1) * (mu - t)
bias
###Output
_____no_output_____
###Markdown
To account for this bias, we can subtract it from the original species richness in the observed data:
###Code
t - bias
###Output
_____no_output_____
###Markdown
Simple example
###Code
counts = [5, 4, 3, 3, 1, 1, 1, 1, 1]
names = 'ABCDEFGHI'
data = zip(counts, names)
df = pd.DataFrame(zip(names, counts), columns=('naam', 'mss'))
df.to_excel('output/Tab2.xlsx')
df
print('total # of witnesses:', df['mss'].sum())
species_richness(df['mss'])
jackknife(df['mss'])
data = np.array(df['mss'])
x = np.array(sum([[i]*c for i, c in enumerate(data, 1)], []))
tradition = [names[i - 1] for i in x]
print(tradition)
bootstrap = []
for i in range(len(tradition)):
tradition_ = [tradition[j] for j in range(len(tradition)) if i != j]
bootstrap.append((
(i + 1), tradition[i], ''.join(tradition_),
len(set(tradition_)), len(set(tradition_)) - len(set(tradition))))
df = pd.DataFrame(bootstrap, columns=('iteration', 'leftout', 'imputed tradition', 'richness', 'error'))
df.to_excel('output/Tab3.xlsx')
df
mean_estimate = np.mean(df['richness'])
print('Average estimate:', mean_estimate)
print('Bias:', mean_estimate - 9)
bias = 19 * (mean_estimate - 9)
bias
corrected = 9 - bias
corrected
conf_lvl = .95
std_err = np.sqrt(
19 * np.mean((mean_estimate - df['richness']) *
(mean_estimate - df['richness']), axis=0))
z_score = np.sqrt(2.0) * erfinv(conf_lvl)
conf_interval = corrected + z_score * np.array((-std_err, std_err))
conf_interval
###Output
_____no_output_____
###Markdown
Chao1 In the paper we eventually opt for the more recent, non-parametric formula "Chao1", which is described in this paper:> A. Chao & L. Jost, ‘Estimating diversity and entropy profiles via discovery rates of new species". *Methods in Ecology and Evolution* (2015), 873-882.Because we have "doubletons" in our data, we use can the following formula, where:- $\hat{f_0}$ is the (theoretical) number of non-observed species/texts;- $f_1$ is the number of species/texts attested exactly once ("singletons");- $f_2$ is the number of species/texts attested exactly twice ("doubletons");- $n$ is the total number of individuals/manuscripts in the observed data.\begin{equation}\hat{f_0} = \frac{(n - 1)}{n} \frac{f_1^2}{2f_2}\end{equation}The code block below returns the full, theoretical species richness as etimated by Chao1, i.e. it adds the estimated $\hat{f_0}$ to the species richness that was observed in the sample:
###Code
def chao_richness(x):
x, n = x[x > 0], x.sum()
t = x.shape[0]
f1, f2 = (x == 1).sum(), (x == 2).sum()
return t + (n - 1) / n * ((f1 ** 2 / 2 / f2) if f2 > 0 else (f1 * (f1 - 1) / 2))
###Output
_____no_output_____
###Markdown
If we apply this function to our data, we obtain an even higher (but arguably more realistic) estimate of the loss in textual diversity for this literature. Note, however, that this estimate is still a theoretical *minimum estimate*, since the original loss could still be higher.
###Code
chao_richness(num_per_text)
###Output
_____no_output_____
###Markdown
Instead of reporting just this number, we apply a bootstrapped procedure in which we sample from the material using a multinomial distribution (see the Appendix Chao and Jost, 2015) and apply Chao1 to the resulting samples. This procedure allows us to calculate a .95 confidence interval for this value.
###Code
def bt_prob(x):
x, n = x[x > 0], x.sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
C = 1 - f1 / n * (((n - 1) * f1 / ((n - 1) * f1 + 2 * f2)) if f2 > 0 else
((n - 1) * (f1 - 1) / ((n - 1) * (f1 - 1) + 2)) if f1 > 0 else
0)
W = (1 - C) / np.sum(x / n * (1 - x / n) ** n)
p = x / n * (1 - W * (1 - x / n) ** n)
f0 = np.ceil(((n - 1) / n * f1 ** 2 / (2 * f2)) if f2 > 0 else
((n - 1) / n * f1 * (f1 - 1) / 2))
p0 = (1 - C) / f0
p = np.hstack((p, np.array([p0 for i in np.arange(f0)])))
return p
def bootstrap(x, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
pro = np.array([chao_richness(row) for row in data_bt])
pro_mean = pro.mean(0)
lci_pro = -np.quantile(pro, (1 - conf) / 2, axis=0) + pro_mean
uci_pro = np.quantile(pro, 1 - (1 - conf) / 2, axis=0) - pro_mean
sd_pro = np.std(pro, axis=0)
pro = pro_mean - pro
return (lci_pro, uci_pro, sd_pro, pro)
def chao_estimate(x, n_iter=1000, conf=0.95):
pro = chao_richness(x)
(lci_pro, uci_pro, sd_pro, bt_pro) = bootstrap(x, n_iter=n_iter, conf=conf)
lci_pro, uci_pro = pro - lci_pro, pro + uci_pro
bt_pro = pro - bt_pro
return (lci_pro, uci_pro, bt_pro, pro)
###Output
_____no_output_____
###Markdown
The following block applies this bootstrapped procedure to obtain the final estimates:
###Code
lci_pro, uci_pro, bt_pro, pro = chao_estimate(num_per_text, n_iter=10000)
print('pro:', pro)
print('lci_pro:', lci_pro)
print('uci_pro:', uci_pro)
###Output
pro: 148.00750469043152
lci_pro: 106.21863495939421
uci_pro: 219.01578019221017
###Markdown
The array `bt_pro` contains the estimates that were collected during the bootstrap (1,000 iterations by default). Below, we plot the distribution of these numbers using a rainplot: [removing rain_alpha =.3 argument on pt.RainCloud() because it is showing as invalid]
###Code
import ptitprince as pt
fig, ax = plt.subplots(figsize=(8, 6))
d = list([(x, 'bootstrap') for x in bt_pro])
bt = pd.DataFrame(d, columns=('bootstrap', 'type'))
pt.RainCloud(
data=bt, x="type", y="bootstrap", ax=ax,
orient="h", alpha=.8, bw=.2, rain_alpha=.3, palette="Greys"
)
ax.axvline(pro, c='black', ls='--')
ax.axvline(uci_pro, c='darkgrey', ls='--')
ax.axvline(lci_pro, c='darkgrey', ls='--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel('')
plt.savefig('output/Fig2.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
The idea that there were at least 100 texts is not completely unlikely, but it is a veryconservative estimate, at the very bottom of the probability continuum. The estimate of ~148 manuscripts (or more) is much more plausible, which would mean that *at least half ofthe chivalric texts have been lost*. Just as 100 is an extremely optimisticestimate, ~219 is the most pessimistic estimate: in thatcase, only a third of the ever available chivalric epics would have been persisted throughtime, which is quite a dramatic, but not entirely unrealistic figure. Species accumulation curve In what preceded, we have investigated how many unique texts may have been lost, or, more positively, how many unique texts we may have not yet seen. In this concluding section, we investigate how many texts should be retrieved before we arrive at this diversity estimate. This new estimate provides us with information about the total population size, i.e. the total number of text witnesses. We follow Hsieh, Ma and Chao (2016) to compute this estimate using "Rarefaction Extrapolation". For details about this method, see:> Hsieh, Ma and Chao (2016): iNEXT: an R package for rarefaction and extrapolation ofspecies diversity. *Methods in Ecology and Evolution*, 7, 1451–1456.
###Code
def bootstrap_re(x, fn=chao_richness, n_iter=1000, conf=.95):
# define a multinomial probability distribution
# for the bootstrap procedure to sample from:
p, n = bt_prob(x), x.sum()
data_bt = np.random.multinomial(n, p, n_iter)
Dq = fn(x)
pro = np.array([fn(row) for row in data_bt])
error = stats.norm.ppf(1 - (1 - conf) / 2) * np.std(pro, 0)
lci_pro = Dq - error
uci_pro = Dq + error
sd_pro = np.std(pro, axis=0)
return (lci_pro, uci_pro, sd_pro, Dq, )
def rarefaction_extrapolation(x, max_steps):
x, n = x[x > 0], x.sum()
def _sub(m):
if m <= n:
return np.sum(1 - np.array(
[np.exp(gammaln(n - i + 1) + gammaln(n - m + 1) -
gammaln(n - i - m + 1) - gammaln(n + 1)) if i <= (n - m) else
0 for i in x]))
else:
S = (x > 0).sum()
f1, f2 = (x == 1).sum(), (x == 2).sum()
f0 = ((n - 1) / n * f1 * (f1 - 1) / 2) if f2 == 0 else ((n - 1) / n * f1**2 / 2 / f2)
A = n * f0 / (n * f0 + f1)
return S if f1 == 0 else (S + f0 * (1 - A**(m - n)))
return np.array([_sub(mi) for mi in range(1, max_steps)])
counts = np.bincount(mnl.groupby('text').size())[1:] # ignore zero
x = np.array(sum([[i] * c for i, c in enumerate(counts, 1)], []))
###Output
_____no_output_____
###Markdown
Here too we use a bootstrap method with 100 samples:
###Code
max_steps = 1000
lci_pro, uci_pro, sd_pro, Dq = bootstrap_re(
x,
fn=partial(rarefaction_extrapolation, max_steps=max_steps),
n_iter=100
)
steps = np.arange(1, max_steps)
interpolated = np.arange(1, max_steps) < x.sum()
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(steps[interpolated], Dq[interpolated], color='C0')
ax.plot(x.sum(), Dq[x.sum() - 1], 'o')
ax.plot(steps[~interpolated], Dq[~interpolated], '--', color='C0')
ax.fill_between(steps, lci_pro, uci_pro, alpha=0.3)
ax.grid()
ax.set(xlabel='# handschriften', ylabel='# teksten', title='Species Accumulation Curve')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('output/Fig3.png', dpi=300, transparent=True)
###Output
_____no_output_____
###Markdown
Welcome to my Game of Thrones Analysis Below is my first Kaggle project. This project will consist of the use of the following datasets: battles.csv represent data related to the War of the Five Kings from George R.R. Martin's A Song Of Ice And Fire series. character-deaths.csv is data related to a Bayesian Survival Analysis of Game of Thrones. character-predictions.csv is data scraped from a wiki that covers some predictions, and here is the methodolgy that may or may not be covered here. 1: Load Data
###Code
import os
import pandas as pd
import numpy as np
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import *
import matplotlib.pyplot as mplt
#go offline with plotly
init_notebook_mode(connected=True)
#set working directory
WRKSPC = 'C:\\Users\\Chris\\Analytics\\gameofthrones_analysis\\'
#loading data as dataframes
battles = pd.read_csv(WRKSPC+'battles.csv')
deaths = pd.read_csv(WRKSPC+'character-deaths.csv')
predictions = pd.read_csv(WRKSPC+'character-predictions.csv')
###Output
_____no_output_____
###Markdown
Battles: A Preliminary Summarybattles.csv contains the name of each battle, the year it happened, who was attacking (along with a somewhat more granular level of who was involved), who consisted of the defense, house related to each side, count of deaths and major deaths, captures, sizes, region, some notes, and seasonality.Just looking at the data the first areas I'd like to address are the significance of vital factors regarding each side's parameters.It is also worth noting that this data set paints a higher level picture of some deaths. It is a smaller set of data but in some ways this might be worth comming back to for some relations with other data.
###Code
#print battles
battles.head()
print battles.shape
#print battles.describe()
battles.describe()
###Output
_____no_output_____
###Markdown
Deaths: A Preliminary Summarycharacter-deaths.csv contains the name of the character, allegiance to what house, death year, book they died in, chapter in which they died, gender, nobility, GoT appearance, and a each book they appeared in. Off the bat the scale of this file will allow me to get a feel for prevelence of death. There might be relations with the frequency of death and a certain house.There isn't too much depth, at least at first glance. The file just states when and if someone died (maybe how many times they died too -- we'll cover this later).
###Code
#print deaths
deaths.head()
print deaths.shape
#print deaths.describe()
deaths.describe()
###Output
_____no_output_____
###Markdown
Predictions: A Preliminary Summarycharacter-predictions.csv contains more interesting data. This will need more than a high level glance.
###Code
#print predictions
predictions.head()
print predictions.shape
#print predictions.describe()
predictions.describe()
###Output
_____no_output_____
###Markdown
2: Exploring the dataBattlesLet's count the number of commanders listed as the attackers and see if there is a relation between this field and the factor that is scale of a side. First I'll print the attacking commanders for each battle where attacker size is null or attacking commander is null.
###Code
battles_w_nulls = battles[pd.isnull(battles['attacker_size']) | pd.isnull(battles['attacker_commander'])]
battles_w_nulls[['name', 'attacker_commander']]
###Output
_____no_output_____
###Markdown
These records are going to have to be left out of the analysis, so let's create a new dataframe without them
###Code
#assign a new df for clean battles data
battles_df = battles[battles.attacker_size.notnull() & battles.attacker_commander.notnull()]
battles_df[['name','attacker_size','attacker_commander']]
###Output
_____no_output_____
###Markdown
To loosely verify we are working with the right data, let's see if our subsets add up
###Code
#length of the original should equal that of with and without nulls combined
print 'Actual:', len(battles)
print len(battles)==len(battles_df)+len(battles_w_nulls), len(battles_df), '+', len(battles_w_nulls), '=', len(battles_df)+len(battles_w_nulls)
###Output
Actual: 38
True 24 + 14 = 38
###Markdown
Moving on we can now plot the two variables. To do this we add a column to our data that is the count of attacking commanders
###Code
#REMINDER: as a learning assignment, figure out how to do this with the appropriate method
battles_df['attacking_com_count'] = battles_df.apply(lambda row: len(row['attacker_commander'].split(',')), axis=1)
#sort
battles_df = battles_df.sort_values('attacker_size')
x=battles_df['attacker_size'].tolist()
y=battles_df['attacking_com_count'].tolist()
mplt.scatter(x, y)
mplt.show()
###Output
C:\Users\Chris\Anaconda2\lib\site-packages\ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
DeathsMoving along I'd like to take a look at the character-deaths dataset. This and the battles datasets can be used for basic analyses.
###Code
deaths.head(3)
deaths.shape
deaths.describe()
deaths.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 917 entries, 0 to 916
Data columns (total 13 columns):
Name 917 non-null object
Allegiances 917 non-null object
Death Year 305 non-null float64
Book of Death 307 non-null float64
Death Chapter 299 non-null float64
Book Intro Chapter 905 non-null float64
Gender 917 non-null int64
Nobility 917 non-null int64
GoT 917 non-null int64
CoK 917 non-null int64
SoS 917 non-null int64
FfC 917 non-null int64
DwD 917 non-null int64
dtypes: float64(4), int64(7), object(2)
memory usage: 93.2+ KB
###Markdown
PredictionsThis data looks like it would be utilized for more of machine learning-related work. Since I plan on exploring pandas & jupyter as a toolkit I'll do some ad hoc work to get a picture of what this data means.
###Code
predictions.head(3)
print list(predictions.columns)
predictions[['isAlive','pred','alive','actual','male','culture',
'book1','book2','book3','book4','book5','age',
'numDeadRelations','boolDeadRelations','isPopular','popularity']].corr()
predictions.describe()
predictions.shape
###Output
_____no_output_____
###Markdown
Preliminariesloading stuff, defining helpful functions etc.n.b: code repetition etc not representative, because in a more-or-less throwaway ipynb.
###Code
import json
def load_jsonl(f, max_l=-1):
with open(f) as fh:
lines = fh.readlines()
return [json.loads(s) for s in (lines if max_l < 0 else lines[:max_l])]
def get_used_vocab_size(data):
return list(dict.fromkeys(word for d in data for word in d['message']).keys())
import numpy as np
data_small = load_jsonl('exp1.5-best-diag.jsonl')
seen_small = np.loadtxt('egg/zoo/basic_games/data_generation_scripts/exp1.5-train-l300-r0.25-s42.txt', dtype=int)
unseen_small = np.loadtxt('egg/zoo/basic_games/data_generation_scripts/exp1.5-eval-l100-r0.25-s42.txt', dtype=int)
for d in data_small:
d['input'] = (d['input'][1], d['input'][0])
if any((x, y) == d['input'] for x, y in seen_small):
d['seen'] = True
else:
d['seen'] = False
used_vocab_small = get_used_vocab_size(data_small)
normalised_vocab_small = dict((u, str(i)) for i, u in enumerate(u for u in used_vocab_small if u != 0))
normalised_vocab_small[0] = '.'
data_large = load_jsonl('exp3-best-diag.jsonl')
seen_large = np.loadtxt('egg/zoo/basic_games/data_generation_scripts/exp3-train-l900-r0.25-s42.txt', dtype=int)
unseen_large = np.loadtxt('egg/zoo/basic_games/data_generation_scripts/exp3-eval-l300-r0.25-s42.txt', dtype=int)
for d in data_large:
d['input'] = (d['input'][1], d['input'][0])
if any((x, y) == d['input'] for x, y in seen_large):
d['seen'] = True
else:
d['seen'] = False
used_vocab_large = get_used_vocab_size(data_large)
normalised_vocab_large = dict((u, str(i)) for i, u in enumerate(u for u in used_vocab_large if u != 0))
normalised_vocab_large[0] = '.'
def normalise(message, vocab):
return ' '.join(vocab[m] for m in message)
###Output
_____no_output_____
###Markdown
Some helpful grouper functions
###Code
import tabulate
from operator import itemgetter
from itertools import groupby
def group_by_sum(data):
key = itemgetter('label')
grouped = groupby(sorted(data, key=key), key=key)
grouped_by = {}
for label, group in grouped:
grouped_by[label] = list(group)
return grouped_by
def group_by_summand(data):
max_summand = max(d['input'][0] for d in data)
grouped_by = {}
for i in range(max_summand):
label = i
group = [d for d in data if label in d['input']]
grouped_by[label] = list(group)
return grouped_by
###Output
_____no_output_____
###Markdown
Analysis Correlation between performance on training and evaluation data.We see moderate correlation between performance on the training/evaluation set broken down bylabel (sum)and no statistically significant correlation when grouped by numbers appearing in input (summand).
###Code
from scipy.stats import pearsonr
def correlation_seen_unseen(groups):
accs_train = []
accs_test = []
for label, group in groups.items():
num_seen = sum(d['seen'] for d in group)
num_unseen = sum(not d['seen'] for d in group)
if num_seen and num_unseen:
accs_train.append(sum(d['correct'] for d in group if d['seen']) / num_seen)
accs_test.append(sum(d['correct'] for d in group if not d['seen']) / num_unseen)
return pearsonr(accs_train, accs_test)
print("Grouped by label:")
corr, p_val = correlation_seen_unseen(group_by_sum(data_small))
print(f"Correlation, small ds: {corr:.2f}, P-Value, {p_val:.2f}")
corr, p_val = correlation_seen_unseen(group_by_sum(data_large))
print(f"Correlation, large ds: {corr:.2f}, P-Value, {p_val:.2f}")
print("Grouped by input:")
corr, p_val = correlation_seen_unseen(group_by_summand(data_small))
print(f"Correlation small ds: {corr:.2f}, P-Value, {p_val:.2f}")
corr, p_val = correlation_seen_unseen(group_by_summand(data_large))
print(f"Correlation, large ds: {corr:.2f}, P-Value, {p_val:.2f}")
###Output
Grouped by label:
Correlation, small ds: 0.46, P-Value, 0.01
Correlation, large ds: 0.58, P-Value, 0.00
Grouped by input:
Correlation small ds: 0.26, P-Value, 0.27
Correlation, large ds: 0.18, P-Value, 0.27
###Markdown
Used vocabulary. The larger dataset uses more tokens in the vocabulary. This appears reasonable,given the increased dataset size with the same fixed message length (5 + in both cases).
###Code
print(len(normalised_vocab_small))
print(len(normalised_vocab_large))
###Output
7
11
###Markdown
Symmetry: Investigation of behaviour on input pairs of the form $((x,y),(y,x))$
###Code
def group_by_symmetry(data):
groups = {}
for d in data:
x, y = d['input']
if x != y:
if (x, y) in groups:
print((x, y))
print(groups)
raise ValueError("wat")
if (y, x) in groups:
groups[(y, x)].append(d)
else:
groups[(x, y)] = [d]
return groups
###Output
_____no_output_____
###Markdown
For the small dataset: how many pairs were consistently predicted correctly/incorrectly andhow many were not predicted consistently?
###Code
groups_small = group_by_symmetry(data_small)
assert len(groups_small) == 190
print("Both pairs predicted correctly:",
sum(d1['correct'] == d2['correct'] == True for _, (d1, d2) in groups_small.items()))
print("Both pairs predicted incorrectly:",
sum(d1['correct'] == d2['correct'] == False for _, (d1, d2) in groups_small.items()))
print("pairs predicted inconsinstently:", sum(d1['correct'] != d2['correct'] for _, (d1, d2) in groups_small.items()))
###Output
Both pairs predicted correctly: 138
Both pairs predicted incorrectly: 15
pairs predicted inconsinstently: 37
###Markdown
For the large dataset: how many pairs were consistently predicted correctly/incorrectly andhow many were not predicted consistently?The large dataset is different from the small dataset in the sense that it does notcontain all possible input pairs up to $n_{max}$. This means that there are input pairs whichsymmetric counter-pairs are not contained in the dataset. Hence the split in `symmetrics` and`asymmetrics`, i.e. pairs with/without counter-parts.
###Code
groups_large = group_by_symmetry(data_large)
large_symmetrics = {k: v for k, v in groups_large.items() if len(v) == 2}
large_asymmetrics = {k: v for k, v in groups_large.items() if len(v) == 1}
print("Both pairs predicted correctly:",
sum(d1['correct'] == d2['correct'] == True for _, (d1, d2) in large_symmetrics.items()))
print("Both pairs predicted incorrectly:",
sum(d1['correct'] == d2['correct'] == False for _, (d1, d2) in large_symmetrics.items()))
print("pairs predicted inconsinstently:",
sum(d1['correct'] != d2['correct'] for _, (d1, d2) in large_symmetrics.items()))
###Output
Both pairs predicted correctly: 263
Both pairs predicted incorrectly: 46
pairs predicted inconsinstently: 124
###Markdown
Here, we build a three-by-three table for the symmetric pairs, containing the followinginformation:Seen, unseen and 50/50 denote whether the pairs were exclusively in training/eval sets or splitbetween both. Similarly, Correct, wrong and 50/50 means whether both pairs were predictedcorrectly, wrongly or exactly one was predicted correctly.
###Code
from IPython.display import HTML, display
def three_by_three_table(groups):
table = np.zeros((3, 3))
for label, (d1, d2) in groups.items():
x = 0 if d1['seen'] == d2['seen'] == True else 1 if d1['seen'] == d2['seen'] == False else 2
y = 0 if d1['correct'] == d2['correct'] == True else 1 if d1['correct'] == d2['correct'] == False else 2
table[x, y] += 1
return table
rows = iter(['Both seen', 'Both unseen', '50/50'])
print('For the small dataset:')
tabulate.tabulate(map(lambda x: [next(rows)] + x, three_by_three_table(groups_small).tolist()),
headers=["Both Correct", "Both Wrong", "50/50"],
tablefmt='html')
rows = iter(['Both seen', 'Both unseen', '50/50'])
print('For the large dataset:')
tabulate.tabulate(map(lambda x: [next(rows)] + x, three_by_three_table(large_symmetrics).tolist()),
headers=["Both Correct", "Both Wrong", "50/50"], tablefmt='html')
###Output
For the large dataset:
###Markdown
Same as above, but for asymmetric inputs that do not have a symmetric counter-part.Naturally, only calculated for the large dataset.
###Code
def two_by_two_table(groups):
table = np.zeros((2, 2))
for label, [d1] in groups.items():
x = 0 if d1['seen'] else 1
y = 0 if d1['correct'] else 1
table[x, y] += 1
return table
rows = iter(['Seen', 'Unseen'])
tabulate.tabulate(map(lambda x: [next(rows)] + x, two_by_two_table(large_asymmetrics).tolist()),
headers=["Correct", "Wrong"], tablefmt='html')
###Output
_____no_output_____
###Markdown
The large dataset gives us the opportunity to compare the performance on inputs wherethe symmetric counterpart was observed during training and where it is not possible, becausethe symmetric input was not part of the dataset.The output of this cell describes the following contingency table:where 50/50 means that two symmetric inputs are split between train/evaluation splits and unseen meansthat an input is in the evaluation set. Correct and Wrong denotes whether the predictions for theunseen example is correct.
###Code
def two_by_two_table_sym_asym(sym_groups, asym_groups, ignore_seen_wrong=False):
fifty_fifties = [(d1, d2) for (_, (d1, d2)) in sym_groups.items() if d1['seen'] != d2['seen']]
sym_correct = 0
sym_wrong = 0
asym_correct = 0
asym_wrong = 0
for d1, d2 in fifty_fifties:
seen, unseen = d1 if d1['seen'] else d2, d2 if d1['seen'] else d1
#print(seen['correct'])
#print(unseen['correct'])
assert seen['seen']
assert not unseen['seen']
if not ignore_seen_wrong:
#print('unseen correct', int(unseen['correct']))
sym_correct += unseen['correct']
elif seen['correct'] and unseen['correct']:
#print('unseen correct')
sym_correct += 1
if seen['correct'] and not unseen['correct']:
#print('unseen wrong')
sym_wrong += 1
for label, [d1] in asym_groups.items():
if not d1['seen']:
asym_correct += d1['correct']
asym_wrong += not d1['correct']
return [[sym_correct, sym_wrong], [asym_correct, asym_wrong]]
rows = iter(['50/50 (symmetric)', 'Unseen (asymmetric)'])
tabulate.tabulate(map(lambda x: [next(rows)] + x, two_by_two_table_sym_asym(large_symmetrics, large_asymmetrics)),
headers=["Unseen Correct", "Unseen Wrong"], tablefmt='html')
###Output
_____no_output_____
###Markdown
This allows us to perform Fisher's exact test to investigate whether the results arestatistically significant. We see ($p<=0.05$), that the networks have higher predictionperformance on inputs where the symmetric counter-parts were observed in training before.
###Code
from scipy.stats import fisher_exact
_, p_value = fisher_exact(two_by_two_table_sym_asym(large_symmetrics, large_asymmetrics, ignore_seen_wrong=False),
alternative='greater')
print(f"P-Value: {p_value:.3f}")
###Output
P-Value: 0.004
###Markdown
Synonyms
###Code
def get_num_synonyms(groups, seen_only=False, size_only=True):
synonym_groups = {}
for label, group in groups.items():
examples = [d for d in group if d['correct']]
if seen_only:
examples = [d for d in examples if d['seen']]
if examples:
synonym_groups[label] = set(tuple(d['message']) for d in examples)
if size_only:
synonym_groups[label] = len(synonym_groups[label])
return synonym_groups
import math
from scipy.stats import t
def get_mean_var_ci(sample, alpha=0.025):
sample = np.array(list(sample))
t_ci = t.ppf(1 - alpha, df=len(sample) - 1)
return sample.mean(), sample.var(), t_ci * sample.std() / math.sqrt(len(sample))
###Output
_____no_output_____
###Markdown
Average number of synonymous messages (that led to correct predictions) in the whole dataset.
###Code
syns_small = get_num_synonyms(group_by_sum(data_small))
syns_large = get_num_synonyms(group_by_sum(data_large))
mean, var, ci = get_mean_var_ci(syns_small.values())
print(f"avg # synonyms in small ds: {mean:.2f} +/- {ci:.2f}")
mean, var, ci = get_mean_var_ci(syns_large.values())
print(f"avg # synonyms in large ds: {mean:.2f} +/- {ci:.2f}")
###Output
avg # synonyms in small ds: 1.59 +/- 0.37
avg # synonyms in large ds: 1.63 +/- 0.24
###Markdown
Average number of synonymous messages (that led to correct predictions) in the training set only.Interestingly the number is somewhat lower (but not statistically significant at $p=0.05$).It's interesting, because what it means is that some messages were produced by the senderthat were not observed by the receiver duringtraining, but the receiver was still able to produce the correct prediction.
###Code
syns_small_seen = get_num_synonyms(group_by_sum(data_small), seen_only=True)
syns_large_seen = get_num_synonyms(group_by_sum(data_large), seen_only=True)
mean, var, ci = get_mean_var_ci(syns_small_seen.values())
print(f"avg # synonyms in small train data: {mean:.2f} +/- {ci:.2f}")
mean, var, ci = get_mean_var_ci(syns_large_seen.values())
print(f"avg # synonyms in large train data: {mean:.2f} +/- {ci:.2f}")
from scipy.stats import ttest_rel
_, p_value = ttest_rel(list(syns_small.values()), list(syns_small_seen.values()))
print("P-Value for small dataset: ", p_value)
_, p_value = ttest_rel(list(syns_large.values()), list(syns_large_seen.values()))
print("P-Value for large dataset: ", p_value)
###Output
avg # synonyms in small train data: 1.50 +/- 0.29
avg # synonyms in large train data: 1.59 +/- 0.24
P-Value for small dataset: 0.08309875128247367
P-Value for large dataset: 0.15906635012795697
###Markdown
There is no correlation between predictive performance and the number of synonyms when groupedby the label (sum of inputs), for either dataset.
###Code
def correlation_num_synonyms_correct(groups, test_only=False):
num_syns = []
accs_test = []
for label, group in groups.items():
num_seen = sum(d['seen'] for d in group)
num_unseen = sum(not d['seen'] for d in group)
if num_seen and num_unseen:
num_synonyms = get_num_synonyms({label: group}).get(label, None)
if num_synonyms is not None:
num_syns.append(num_synonyms)
if test_only:
accs_test.append(sum(d['correct'] for d in group if not d['seen']) / num_unseen)
else:
accs_test.append(sum(d['correct'] for d in group) / len(group))
return pearsonr(num_syns, accs_test)
print(correlation_num_synonyms_correct(group_by_sum(data_small)))
print(correlation_num_synonyms_correct(group_by_sum(data_large)))
###Output
(0.10766526492862784, 0.5782712044403318)
(0.1076507466077131, 0.42969492445549634)
###Markdown
Misc There is a moderate correlation between the number of training pairs for a sum and the capabilityto learn that sum, for the large dataset this correlation persists also for inputs unseen duringtraining.
###Code
def correlation_by_train_size(groups, test_only=False):
train_set_sizes = []
accs_test = []
for label, group in groups.items():
num_seen = sum(d['seen'] for d in group)
num_unseen = sum(not d['seen'] for d in group)
if num_seen and (num_unseen or not test_only):
train_set_sizes.append(num_seen)
if test_only:
accs_test.append(sum(d['correct'] for d in group if not d['seen']) / num_unseen)
else:
accs_test.append(sum(d['correct'] for d in group) / len(group))
return pearsonr(train_set_sizes, accs_test)
corr, p_val = correlation_by_train_size(group_by_sum(data_small))
print(f"Correlation, small ds: {corr:.2f}, P-Value, {p_val:.3f}")
corr, p_val = correlation_by_train_size(group_by_sum(data_large))
print(f"Correlation, big ds: {corr:.2f}, P-Value, {p_val:.3f}")
corr, p_val = correlation_by_train_size(group_by_sum(data_small), test_only=True)
print(f"Correlation, small ds: {corr:.2f}, P-Value, {p_val:.3f}")
corr, p_val = correlation_by_train_size(group_by_sum(data_large), test_only=True)
print(f"Correlation, big ds: {corr:.2f}, P-Value, {p_val:.3f}")
###Output
Correlation, small ds: 0.24, P-Value, 0.177
Correlation, big ds: 0.46, P-Value, 0.000
###Markdown
The average edit distance between synonymous messages (that led to correct predictions) is around 2,which corresponds to e.g. flipping `[a, b]` to `[b, a]`. This largely corresponds to anecdotalobservations (see end of notebook).
###Code
import textdistance
import itertools
def get_avg_edit_distance_synonyms(groups):
distances = []
for label, synonyms in groups.items():
for x, y in ((m1, m2) for m1, m2 in itertools.product(synonyms, repeat=2) if m1 != m2):
distances.append(textdistance.levenshtein.distance(x, y))
return distances
synonyms_small = get_num_synonyms(group_by_sum(data_small), size_only=False)
mean, var, ci = get_mean_var_ci(get_avg_edit_distance_synonyms(synonyms_small))
print(f"avg edit distance for synonyms, small ds: {mean:.2f} +/- {ci:.2f}")
mean, var, ci = get_mean_var_ci(
get_avg_edit_distance_synonyms(get_num_synonyms(group_by_sum(data_large), size_only=False)))
print(f"avg edit distance for synonyms, large ds: {mean:.2f} +/- {ci:.2f}")
###Output
avg edit distance for synonyms, small ds: 2.47 +/- 0.36
avg edit distance for synonyms, large ds: 2.02 +/- 0.18
###Markdown
Average distance between messages of next higher sum.
###Code
def get_distances(syn_groups):
#print(syn_groups)
keys = sorted(list(syn_groups.keys()))
#print(keys)
results = []
for g1, g2 in ((syn_groups.get(k,[]), syn_groups.get(k+1,[])) for k in keys):
try:
min_distance = min(textdistance.levenshtein.distance(m1, m2) for m1 in g1 for m2 in g2)
results.append(min_distance)
except ValueError:
pass
return results
distances_small = get_distances(get_num_synonyms(group_by_sum(data_small), False, size_only=False))
mean, var, ci = get_mean_var_ci(distances_small)
print(f"avg minimum edit distance between messages of two sums differing in at most 1, small ds: {mean:.2f} +/- {ci:.2f}")
distances_large = get_distances(get_num_synonyms(group_by_sum(data_large), False, size_only=False))
mean, var, ci = get_mean_var_ci(distances_large)
print(f"avg minimum edit distance between messages of two sums differing in at most 1, small ds: {mean:.2f} +/- {ci:.2f}")
###Output
avg minimum edit distance between messages of two sums differing in at most 1, small ds: 1.50 +/- 0.26
avg minimum edit distance between messages of two sums differing in at most 1, small ds: 1.42 +/- 0.20
###Markdown
The average distance between the expected label and the predicted label is 1 for those examplesthat were not predicted correctly.
###Code
def get_errors(groups, test_only=False):
errors = []
for label, group in groups.items():
if test_only:
group = [d for d in group if not d['seen']]
errors.extend(abs(label - d['output']) for d in group if not d['correct'])
return errors
mean, var, ci = get_mean_var_ci(get_errors(group_by_sum(data_small)))
print(f"avg error: {mean:.2f} +/- {ci:.2f}")
mean, var, ci = get_mean_var_ci(get_errors(group_by_sum(data_large)))
print(f"avg error: {mean:.2f} +/- {ci:.2f}")
###Output
avg error: 1.07 +/- 0.06
avg error: 1.21 +/- 0.06
###Markdown
Visualisation
###Code
def inspect_by_sum(data, vocab):
for label, group in group_by_sum(data).items():
seen = [d for d in group if d['seen']]
unseen = [d for d in group if not d['seen']]
print(f"{label}: {len(group)} examples")
print(f"seen: {sum(d['correct'] for d in seen)}/{len(seen)}")
print(
tabulate.tabulate([(s['input'], normalise(s['message'], vocab), s['correct'], s['output']) for s in seen]))
print(f"unseen: {sum(d['correct'] for d in unseen)}/{len(unseen)}")
#print(f"unseen: {sum(d['correct'] for d in unseen)}/{len(unseen)}")
print(tabulate.tabulate(
[(s['input'], normalise(s['message'], vocab), s['correct'], s['output']) for s in unseen]))
print("----" * 20)
def inspect_by_summand(data, vocab):
for label, group in group_by_summand(data).items():
seen = [d for d in group if d['seen']]
unseen = [d for d in group if not d['seen']]
print(f"{label}: {len(group)} examples")
print(f"seen: {sum(d['correct'] for d in seen)}/{len(seen)}")
print(
tabulate.tabulate([(s['input'], normalise(s['message'], vocab), s['correct'], s['output']) for s in seen]))
print(f"unseen: {sum(d['correct'] for d in unseen)}/{len(unseen)}")
#print(f"unseen: {sum(d['correct'] for d in unseen)}/{len(unseen)}")
print(tabulate.tabulate(
[(s['input'], normalise(s['message'], vocab), s['correct'], s['output']) for s in unseen]))
print("----" * 20)
###Output
_____no_output_____
###Markdown
Grouping and visualising the datasets by label (sum).From left to right:- input, - message produced by the sender - whether the receiver's prediction is correct and- the actual prediction produced by the receiversplit by occurrence in training data (seen) and in evaluation data (unseen) and grouped by the sum of inputs.
###Code
print("Small ds")
inspect_by_sum(data_small, normalised_vocab_small)
print("Large ds")
inspect_by_sum(data_large, normalised_vocab_large)
###Output
Large ds
0: 1 examples
seen: 0/1
------ ----------- ----- -
(0, 0) 0 . 0 0 . . False 2
------ ----------- ----- -
unseen: 0/0
--------------------------------------------------------------------------------
1: 2 examples
seen: 0/1
------ ----------- ----- -
(0, 1) 0 . 0 0 . . False 2
------ ----------- ----- -
unseen: 0/1
------ ----------- ----- -
(1, 0) 0 . 0 0 . . False 2
------ ----------- ----- -
--------------------------------------------------------------------------------
2: 3 examples
seen: 2/2
------ ----------- ---- -
(0, 2) 0 . 0 0 . . True 2
(1, 1) 0 . 0 0 . . True 2
------ ----------- ---- -
unseen: 0/1
------ ----------- ----- -
(2, 0) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
3: 3 examples
seen: 0/1
------ ----------- ----- -
(1, 2) 0 . 0 0 . . False 2
------ ----------- ----- -
unseen: 0/2
------ ----------- ----- -
(0, 3) 0 . 0 0 0 . False 2
(2, 1) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
4: 3 examples
seen: 0/1
------ ----------- ----- -
(4, 0) 0 0 . 0 0 . False 7
------ ----------- ----- -
unseen: 0/2
------ ----------- ----- -
(1, 3) 0 . 0 0 0 . False 2
(3, 1) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
5: 5 examples
seen: 0/3
------ ----------- ----- -
(0, 5) 0 0 . 0 0 . False 7
(2, 3) 0 0 . 0 0 . False 7
(3, 2) 0 0 . 0 0 . False 7
------ ----------- ----- -
unseen: 0/2
------ ----------- ----- -
(1, 4) 0 0 . 0 0 . False 7
(5, 0) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
6: 5 examples
seen: 0/2
------ ----------- ----- -
(5, 1) 0 0 . 0 0 . False 7
(6, 0) 0 0 . 0 0 . False 7
------ ----------- ----- -
unseen: 0/3
------ ----------- ----- -
(0, 6) 0 0 . 0 0 . False 7
(1, 5) 0 0 . 0 0 . False 7
(4, 2) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
7: 6 examples
seen: 5/5
------ ----------- ---- -
(0, 7) 0 0 . 0 0 . True 7
(1, 6) 0 0 . 0 0 . True 7
(2, 5) 0 0 . 0 0 . True 7
(5, 2) 0 0 . 0 0 . True 7
(6, 1) 0 0 . 0 0 . True 7
------ ----------- ---- -
unseen: 1/1
------ ----------- ---- -
(3, 4) 0 0 . 0 0 . True 7
------ ----------- ---- -
--------------------------------------------------------------------------------
8: 7 examples
seen: 0/5
------ ----------- ----- -
(2, 6) 0 0 . 0 0 . False 7
(3, 5) 0 0 . 0 0 . False 7
(4, 4) 0 0 . 0 0 . False 7
(5, 3) 0 0 . 0 0 . False 7
(6, 2) 0 0 . 0 0 . False 7
------ ----------- ----- -
unseen: 0/2
------ ----------- ----- -
(0, 8) 0 0 . 0 0 . False 7
(1, 7) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
9: 6 examples
seen: 0/4
------ ----------- ----- -
(0, 9) 0 0 . 0 0 . False 7
(3, 6) 0 0 . 0 0 . False 7
(6, 3) 0 0 . 0 0 . False 7
(7, 2) 0 0 . 0 0 . False 7
------ ----------- ----- -
unseen: 0/2
------ ----------- ----- -
(2, 7) 0 0 . 0 0 . False 7
(5, 4) 0 0 . 0 0 . False 7
------ ----------- ----- -
--------------------------------------------------------------------------------
10: 7 examples
seen: 0/3
------- ----------- ----- --
(0, 10) 1 0 0 0 0 . False 12
(4, 6) 1 0 0 0 0 . False 12
(9, 1) 1 0 0 0 0 . False 12
------- ----------- ----- --
unseen: 0/4
------ ----------- ----- --
(1, 9) 0 0 . 0 0 . False 7
(2, 8) 1 0 0 0 0 . False 12
(3, 7) 1 0 0 0 0 . False 12
(5, 5) 0 0 . 0 0 . False 7
------ ----------- ----- --
--------------------------------------------------------------------------------
11: 7 examples
seen: 0/4
------- ----------- ----- --
(1, 10) 1 0 0 0 0 . False 12
(3, 8) 1 0 0 0 0 . False 12
(8, 3) 1 0 0 0 0 . False 12
(9, 2) 1 0 0 0 0 . False 12
------- ----------- ----- --
unseen: 0/3
------- ----------- ----- --
(5, 6) 1 0 0 0 0 . False 12
(10, 1) 1 1 0 0 0 . False 13
(11, 0) 1 0 0 0 0 . False 12
------- ----------- ----- --
--------------------------------------------------------------------------------
12: 10 examples
seen: 7/8
------- ----------- ----- --
(0, 12) 1 0 0 0 0 . True 12
(3, 9) 1 0 0 0 0 . True 12
(5, 7) 1 0 0 0 0 . True 12
(6, 6) 1 1 0 0 0 . False 13
(7, 5) 1 0 0 0 0 . True 12
(8, 4) 1 0 0 0 0 . True 12
(9, 3) 1 0 0 0 0 . True 12
(11, 1) 1 0 0 0 0 . True 12
------- ----------- ----- --
unseen: 0/2
------- ----------- ----- --
(2, 10) 1 1 0 0 0 . False 13
(4, 8) 1 1 1 0 0 . False 13
------- ----------- ----- --
--------------------------------------------------------------------------------
13: 11 examples
seen: 8/8
------- ----------- ---- --
(0, 13) 1 1 0 0 0 . True 13
(1, 12) 1 1 0 0 0 . True 13
(3, 10) 1 1 0 0 0 . True 13
(6, 7) 1 1 1 0 0 . True 13
(7, 6) 1 1 1 0 0 . True 13
(9, 4) 1 1 0 0 0 . True 13
(10, 3) 1 1 1 0 0 . True 13
(12, 1) 1 1 1 0 0 . True 13
------- ----------- ---- --
unseen: 1/3
------- ----------- ----- --
(4, 9) 1 1 1 0 0 . True 13
(5, 8) 1 1 1 1 0 . False 14
(11, 2) 1 0 0 0 0 . False 12
------- ----------- ----- --
--------------------------------------------------------------------------------
14: 13 examples
seen: 6/11
------- ----------- ----- --
(0, 14) 1 1 1 1 1 . True 14
(1, 13) 1 1 1 0 0 . False 13
(3, 11) 1 1 1 0 0 . False 13
(4, 10) 1 1 1 1 1 . True 14
(5, 9) 1 1 1 1 1 . True 14
(7, 7) 1 1 1 1 1 . True 14
(8, 6) 1 1 1 1 1 . True 14
(9, 5) 1 1 1 0 0 . False 13
(11, 3) 1 1 1 0 0 . False 13
(12, 2) 1 1 1 0 0 . False 13
(14, 0) 1 1 1 1 1 . True 14
------- ----------- ----- --
unseen: 2/2
------- ----------- ---- --
(2, 12) 1 1 1 1 1 . True 14
(6, 8) 1 1 1 1 0 . True 14
------- ----------- ---- --
--------------------------------------------------------------------------------
15: 9 examples
seen: 0/8
------- ----------- ----- --
(0, 15) 1 1 1 1 1 . False 14
(1, 14) 1 1 1 1 1 . False 14
(3, 12) 1 1 1 1 1 . False 14
(4, 11) 1 1 1 1 1 . False 14
(5, 10) 1 1 1 1 3 . False 16
(7, 8) 1 1 1 1 1 . False 14
(14, 1) 1 1 1 1 3 . False 16
(15, 0) 1 1 1 1 1 . False 14
------- ----------- ----- --
unseen: 0/1
------ ----------- ----- --
(9, 6) 1 1 1 1 3 . False 16
------ ----------- ----- --
--------------------------------------------------------------------------------
16: 12 examples
seen: 9/9
------- ----------- ---- --
(2, 14) 1 1 1 3 1 . True 16
(4, 12) 1 1 1 1 3 . True 16
(5, 11) 1 1 1 3 1 . True 16
(7, 9) 1 1 1 3 1 . True 16
(8, 8) 1 1 1 1 3 . True 16
(9, 7) 1 1 1 1 3 . True 16
(10, 6) 1 1 1 3 1 . True 16
(15, 1) 1 1 1 3 1 . True 16
(16, 0) 1 1 1 3 1 . True 16
------- ----------- ---- --
unseen: 0/3
------- ----------- ----- --
(1, 15) 1 1 1 1 1 . False 14
(6, 10) 2 1 0 0 0 . False 17
(11, 5) 1 1 1 1 1 . False 14
------- ----------- ----- --
--------------------------------------------------------------------------------
17: 15 examples
seen: 12/12
------- ----------- ---- --
(0, 17) 2 1 0 0 0 . True 17
(1, 16) 2 1 0 0 0 . True 17
(2, 15) 2 1 0 0 0 . True 17
(3, 14) 2 1 0 0 0 . True 17
(4, 13) 2 1 0 0 0 . True 17
(6, 11) 2 1 0 0 0 . True 17
(7, 10) 2 1 0 0 0 . True 17
(8, 9) 2 1 0 0 0 . True 17
(10, 7) 2 1 0 0 0 . True 17
(12, 5) 2 1 0 0 0 . True 17
(13, 4) 2 1 0 0 0 . True 17
(14, 3) 2 1 0 0 0 . True 17
------- ----------- ---- --
unseen: 0/3
------- ----------- ----- --
(9, 8) 1 1 1 3 1 . False 16
(11, 6) 1 1 1 3 1 . False 16
(16, 1) 1 1 1 3 1 . False 16
------- ----------- ----- --
--------------------------------------------------------------------------------
18: 14 examples
seen: 11/11
------- ----------- ---- --
(0, 18) 2 1 1 0 0 . True 18
(1, 17) 2 1 1 0 0 . True 18
(4, 14) 2 1 1 0 0 . True 18
(5, 13) 2 1 1 0 0 . True 18
(6, 12) 2 1 1 0 0 . True 18
(11, 7) 2 1 1 0 0 . True 18
(12, 6) 2 1 1 0 0 . True 18
(13, 5) 2 1 1 0 0 . True 18
(14, 4) 2 1 1 0 0 . True 18
(15, 3) 2 1 1 0 0 . True 18
(18, 0) 2 1 1 0 0 . True 18
------- ----------- ---- --
unseen: 1/3
------- ----------- ----- --
(8, 10) 2 1 1 0 0 . True 18
(9, 9) 1 1 1 3 1 . False 16
(17, 1) 2 1 1 1 0 . False 19
------- ----------- ----- --
--------------------------------------------------------------------------------
19: 16 examples
seen: 8/8
------- ----------- ---- --
(0, 19) 2 1 1 1 0 . True 19
(5, 14) 2 1 1 1 0 . True 19
(7, 12) 2 1 1 1 0 . True 19
(9, 10) 2 1 1 1 0 . True 19
(12, 7) 2 1 1 1 0 . True 19
(17, 2) 2 1 1 1 0 . True 19
(18, 1) 2 1 1 1 0 . True 19
(19, 0) 2 1 1 1 0 . True 19
------- ----------- ---- --
unseen: 1/8
------- ----------- ----- --
(1, 18) 2 1 1 0 0 . False 18
(2, 17) 2 1 1 0 0 . False 18
(3, 16) 2 1 1 0 0 . False 18
(6, 13) 2 1 1 0 0 . False 18
(8, 11) 2 1 1 1 1 . False 20
(10, 9) 2 1 1 1 1 . False 20
(13, 6) 2 1 1 1 1 . False 20
(14, 5) 2 1 1 1 0 . True 19
------- ----------- ----- --
--------------------------------------------------------------------------------
20: 16 examples
seen: 11/12
-------- ----------- ----- --
(6, 14) 2 1 1 1 1 . True 20
(8, 12) 2 1 1 1 1 . True 20
(9, 11) 2 1 1 1 1 . True 20
(10, 10) 2 1 1 1 1 . True 20
(11, 9) 2 1 1 1 1 . True 20
(12, 8) 2 1 1 1 1 . True 20
(14, 6) 2 1 1 1 1 . True 20
(15, 5) 2 1 1 1 1 . True 20
(16, 4) 2 1 1 1 1 . True 20
(17, 3) 2 1 1 1 1 . True 20
(18, 2) 2 1 1 1 0 . False 19
(19, 1) 2 1 1 1 1 . True 20
-------- ----------- ----- --
unseen: 0/4
------- ----------- ----- --
(1, 19) 2 1 1 1 0 . False 19
(4, 16) 2 1 1 1 0 . False 19
(5, 15) 2 1 1 1 3 . False 21
(7, 13) 2 1 1 1 0 . False 19
------- ----------- ----- --
--------------------------------------------------------------------------------
21: 16 examples
seen: 12/13
-------- ----------- ----- --
(1, 20) 2 1 1 1 3 . True 21
(2, 19) 2 1 1 3 1 . False 22
(4, 17) 2 1 1 1 3 . True 21
(5, 16) 2 1 1 1 3 . True 21
(9, 12) 2 1 1 1 3 . True 21
(10, 11) 2 1 1 1 3 . True 21
(11, 10) 2 1 1 1 3 . True 21
(12, 9) 2 1 1 1 3 . True 21
(13, 8) 2 1 1 1 3 . True 21
(15, 6) 2 1 1 1 3 . True 21
(16, 5) 2 1 1 1 3 . True 21
(19, 2) 2 1 1 1 3 . True 21
(21, 0) 2 1 1 1 3 . True 21
-------- ----------- ----- --
unseen: 2/3
------- ----------- ----- --
(6, 15) 2 1 1 1 3 . True 21
(7, 14) 2 1 1 3 1 . False 22
(17, 4) 2 1 1 1 3 . True 21
------- ----------- ----- --
--------------------------------------------------------------------------------
22: 17 examples
seen: 16/16
-------- ----------- ---- --
(0, 22) 2 1 1 3 1 . True 22
(1, 21) 2 1 1 3 1 . True 22
(2, 20) 2 1 1 3 1 . True 22
(4, 18) 2 1 1 3 1 . True 22
(5, 17) 2 1 1 3 1 . True 22
(6, 16) 2 1 1 3 4 . True 22
(9, 13) 2 1 1 3 1 . True 22
(10, 12) 2 1 1 3 1 . True 22
(11, 11) 2 1 1 3 1 . True 22
(12, 10) 2 1 1 3 1 . True 22
(13, 9) 2 1 1 3 1 . True 22
(14, 8) 2 1 1 3 1 . True 22
(15, 7) 2 1 1 3 1 . True 22
(18, 4) 2 1 1 3 1 . True 22
(19, 3) 2 1 1 3 1 . True 22
(22, 0) 2 1 1 3 1 . True 22
-------- ----------- ---- --
unseen: 1/1
------- ----------- ---- --
(7, 15) 2 1 1 3 1 . True 22
------- ----------- ---- --
--------------------------------------------------------------------------------
23: 17 examples
seen: 11/13
-------- ----------- ----- --
(3, 20) 2 1 3 4 0 . False 24
(4, 19) 2 1 3 1 0 . True 23
(8, 15) 2 1 3 1 0 . True 23
(9, 14) 2 1 3 1 0 . True 23
(10, 13) 2 1 3 1 0 . True 23
(13, 10) 2 1 3 1 0 . True 23
(14, 9) 2 1 3 1 0 . True 23
(15, 8) 2 1 3 1 0 . True 23
(18, 5) 2 1 1 3 1 . False 22
(19, 4) 2 1 3 1 0 . True 23
(20, 3) 2 1 3 1 0 . True 23
(21, 2) 2 1 3 1 0 . True 23
(22, 1) 2 1 3 1 0 . True 23
-------- ----------- ----- --
unseen: 2/4
-------- ----------- ----- --
(7, 16) 2 1 3 4 0 . False 24
(11, 12) 2 1 3 1 0 . True 23
(17, 6) 2 1 3 4 0 . False 24
(23, 0) 2 1 3 1 0 . True 23
-------- ----------- ----- --
--------------------------------------------------------------------------------
24: 19 examples
seen: 16/16
-------- ----------- ---- --
(0, 24) 2 1 3 4 0 . True 24
(2, 22) 2 1 3 4 0 . True 24
(4, 20) 2 1 3 4 0 . True 24
(5, 19) 2 1 3 4 0 . True 24
(6, 18) 2 1 3 4 0 . True 24
(9, 15) 2 1 3 4 0 . True 24
(10, 14) 2 1 3 4 0 . True 24
(11, 13) 2 1 3 4 0 . True 24
(16, 8) 2 1 3 4 0 . True 24
(17, 7) 2 1 3 4 0 . True 24
(18, 6) 2 1 3 4 0 . True 24
(19, 5) 2 1 3 4 0 . True 24
(21, 3) 2 1 3 4 0 . True 24
(22, 2) 2 1 3 4 0 . True 24
(23, 1) 2 1 3 4 0 . True 24
(24, 0) 2 1 3 4 0 . True 24
-------- ----------- ---- --
unseen: 2/3
------- ----------- ----- --
(3, 21) 2 1 3 4 0 . True 24
(7, 17) 2 3 1 0 0 . False 25
(8, 16) 2 1 3 4 0 . True 24
------- ----------- ----- --
--------------------------------------------------------------------------------
25: 22 examples
seen: 14/14
-------- ----------- ---- --
(1, 24) 2 3 1 0 0 . True 25
(2, 23) 2 3 1 0 0 . True 25
(5, 20) 2 3 1 0 0 . True 25
(7, 18) 2 3 1 0 0 . True 25
(8, 17) 2 3 1 0 0 . True 25
(10, 15) 2 3 1 0 0 . True 25
(12, 13) 2 3 1 0 0 . True 25
(13, 12) 2 3 1 0 0 . True 25
(14, 11) 2 3 1 0 0 . True 25
(15, 10) 2 3 1 0 0 . True 25
(16, 9) 2 3 1 0 0 . True 25
(20, 5) 2 3 1 0 0 . True 25
(24, 1) 2 3 1 0 0 . True 25
(25, 0) 2 3 1 0 0 . True 25
-------- ----------- ---- --
unseen: 4/8
-------- ----------- ----- --
(0, 25) 2 1 3 4 0 . False 24
(3, 22) 2 1 3 4 0 . False 24
(6, 19) 2 3 1 0 0 . True 25
(11, 14) 2 3 1 0 0 . True 25
(18, 7) 2 1 3 4 0 . False 24
(21, 4) 2 3 1 0 0 . True 25
(22, 3) 2 1 3 4 0 . False 24
(23, 2) 2 3 1 0 0 . True 25
-------- ----------- ----- --
--------------------------------------------------------------------------------
26: 19 examples
seen: 13/13
-------- ----------- ---- --
(0, 26) 2 3 4 0 0 . True 26
(1, 25) 2 3 4 0 0 . True 26
(2, 24) 2 3 4 0 0 . True 26
(5, 21) 2 3 4 0 0 . True 26
(7, 19) 2 3 4 0 0 . True 26
(12, 14) 2 3 4 0 0 . True 26
(15, 11) 2 3 4 0 0 . True 26
(16, 10) 2 3 4 0 0 . True 26
(17, 9) 2 3 4 0 0 . True 26
(19, 7) 2 3 4 0 0 . True 26
(20, 6) 2 3 4 0 0 . True 26
(22, 4) 2 3 4 0 0 . True 26
(26, 0) 2 3 4 0 0 . True 26
-------- ----------- ---- --
unseen: 2/6
-------- ----------- ----- --
(9, 17) 2 3 1 0 0 . False 25
(10, 16) 2 3 4 0 0 . True 26
(14, 12) 2 3 4 1 0 . False 27
(18, 8) 2 3 1 0 0 . False 25
(21, 5) 2 3 1 0 0 . False 25
(23, 3) 2 3 4 0 0 . True 26
-------- ----------- ----- --
--------------------------------------------------------------------------------
27: 17 examples
seen: 11/11
-------- ----------- ---- --
(1, 26) 2 3 4 1 0 . True 27
(2, 25) 2 3 4 1 0 . True 27
(3, 24) 2 3 4 1 0 . True 27
(4, 23) 2 3 4 1 0 . True 27
(7, 20) 2 3 4 1 0 . True 27
(10, 17) 2 3 4 1 0 . True 27
(20, 7) 2 3 4 1 0 . True 27
(21, 6) 2 3 4 1 0 . True 27
(23, 4) 2 3 4 1 0 . True 27
(24, 3) 2 3 4 1 0 . True 27
(25, 2) 2 3 4 1 0 . True 27
-------- ----------- ---- --
unseen: 2/6
-------- ----------- ----- --
(0, 27) 2 3 4 0 0 . False 26
(9, 18) 2 3 4 1 0 . True 27
(11, 16) 2 3 4 0 0 . False 26
(13, 14) 2 3 4 1 0 . True 27
(15, 12) 2 3 4 1 1 . False 28
(18, 9) 2 3 4 0 0 . False 26
-------- ----------- ----- --
--------------------------------------------------------------------------------
28: 22 examples
seen: 16/17
-------- ----------- ----- --
(0, 28) 2 3 4 1 1 . True 28
(2, 26) 2 3 4 1 0 . False 27
(8, 20) 2 3 4 1 1 . True 28
(9, 19) 2 3 4 1 1 . True 28
(10, 18) 2 3 4 1 1 . True 28
(11, 17) 2 3 4 1 1 . True 28
(12, 16) 2 3 4 1 1 . True 28
(13, 15) 2 3 4 1 1 . True 28
(14, 14) 2 3 4 1 1 . True 28
(15, 13) 2 3 4 1 1 . True 28
(16, 12) 2 3 4 1 1 . True 28
(17, 11) 2 3 4 1 1 . True 28
(18, 10) 2 3 4 1 1 . True 28
(21, 7) 2 3 4 1 1 . True 28
(22, 6) 2 3 4 1 1 . True 28
(23, 5) 2 3 4 1 1 . True 28
(26, 2) 2 3 4 1 1 . True 28
-------- ----------- ----- --
unseen: 2/5
------- ----------- ----- --
(4, 24) 2 3 4 2 1 . False 29
(20, 8) 2 3 4 2 1 . False 29
(24, 4) 2 3 4 1 1 . True 28
(25, 3) 2 3 4 1 1 . True 28
(28, 0) 2 3 4 0 0 . False 26
------- ----------- ----- --
--------------------------------------------------------------------------------
29: 25 examples
seen: 22/22
-------- ----------- ---- --
(0, 29) 2 3 4 2 1 . True 29
(2, 27) 2 3 4 2 1 . True 29
(4, 25) 2 3 4 2 1 . True 29
(5, 24) 2 3 4 2 1 . True 29
(6, 23) 2 3 4 2 1 . True 29
(7, 22) 2 3 4 2 1 . True 29
(8, 21) 2 3 4 2 1 . True 29
(9, 20) 2 3 4 2 1 . True 29
(13, 16) 2 3 4 2 1 . True 29
(15, 14) 2 3 4 2 1 . True 29
(16, 13) 2 3 4 2 1 . True 29
(17, 12) 2 3 4 2 1 . True 29
(19, 10) 2 3 4 2 1 . True 29
(20, 9) 2 3 4 2 1 . True 29
(21, 8) 2 3 4 2 1 . True 29
(22, 7) 2 3 4 2 1 . True 29
(23, 6) 2 3 4 2 1 . True 29
(25, 4) 2 3 4 2 1 . True 29
(26, 3) 2 3 4 2 1 . True 29
(27, 2) 2 3 4 2 1 . True 29
(28, 1) 2 3 4 2 1 . True 29
(29, 0) 2 3 4 2 1 . True 29
-------- ----------- ---- --
unseen: 1/3
-------- ----------- ----- --
(1, 28) 2 3 4 2 4 . False 30
(11, 18) 2 3 4 2 1 . True 29
(12, 17) 2 3 4 1 1 . False 28
-------- ----------- ----- --
--------------------------------------------------------------------------------
30: 20 examples
seen: 10/15
-------- ----------- ----- --
(0, 30) 2 3 4 2 4 . True 30
(3, 27) 2 3 4 2 4 . True 30
(4, 26) 2 3 4 2 1 . False 29
(5, 25) 2 3 4 2 4 . True 30
(9, 21) 2 3 4 2 4 . True 30
(11, 19) 2 3 4 2 4 . True 30
(15, 15) 2 3 4 2 1 . False 29
(16, 14) 2 3 4 2 4 . True 30
(18, 12) 2 3 4 2 1 . False 29
(19, 11) 2 3 4 2 1 . False 29
(23, 7) 2 3 4 2 4 . True 30
(24, 6) 2 3 4 2 4 . True 30
(27, 3) 2 3 4 2 4 . True 30
(28, 2) 2 3 4 2 4 . True 30
(29, 1) 2 3 4 2 1 . False 29
-------- ----------- ----- --
unseen: 3/5
-------- ----------- ----- --
(2, 28) 2 3 4 2 4 . True 30
(6, 24) 2 3 4 2 4 . True 30
(17, 13) 2 3 4 2 1 . False 29
(25, 5) 2 3 4 2 4 . True 30
(30, 0) 2 3 4 2 1 . False 29
-------- ----------- ----- --
--------------------------------------------------------------------------------
31: 22 examples
seen: 15/17
-------- ----------- ----- --
(1, 30) 2 3 4 2 3 . True 31
(4, 27) 2 3 4 2 3 . True 31
(5, 26) 2 3 4 2 3 . True 31
(7, 24) 2 3 4 2 3 . True 31
(12, 19) 2 3 4 2 3 . True 31
(13, 18) 2 3 4 2 3 . True 31
(14, 17) 2 3 4 2 3 . True 31
(15, 16) 2 3 4 2 3 . True 31
(16, 15) 2 3 4 2 3 . True 31
(17, 14) 2 3 4 2 3 . True 31
(18, 13) 2 3 4 2 4 . False 30
(19, 12) 2 3 4 2 3 . True 31
(20, 11) 2 3 4 2 3 . True 31
(21, 10) 2 3 4 2 3 . True 31
(24, 7) 2 3 4 2 3 . True 31
(25, 6) 2 3 4 2 3 . True 31
(30, 1) 2 3 4 2 1 . False 29
-------- ----------- ----- --
unseen: 1/5
-------- ----------- ----- --
(6, 25) 2 3 4 2 4 . False 30
(9, 22) 2 3 4 2 3 . True 31
(11, 20) 2 3 4 3 4 . False 32
(23, 8) 2 3 5 3 4 . False 33
(29, 2) 2 3 4 3 4 . False 32
-------- ----------- ----- --
--------------------------------------------------------------------------------
32: 26 examples
seen: 17/18
-------- ----------- ----- --
(0, 32) 2 3 4 3 4 . True 32
(1, 31) 2 3 4 3 4 . True 32
(2, 30) 2 3 4 3 3 . True 32
(4, 28) 2 3 5 4 2 . True 32
(7, 25) 2 3 4 3 4 . True 32
(8, 24) 2 3 4 3 5 . True 32
(9, 23) 2 3 4 3 4 . True 32
(11, 21) 2 3 5 4 2 . True 32
(14, 18) 2 3 4 3 4 . True 32
(15, 17) 2 3 4 3 4 . True 32
(17, 15) 2 3 4 3 4 . True 32
(18, 14) 2 3 4 2 3 . False 31
(20, 12) 2 3 4 3 3 . True 32
(22, 10) 2 3 4 3 4 . True 32
(23, 9) 2 3 5 4 2 . True 32
(26, 6) 2 3 4 3 3 . True 32
(27, 5) 2 3 4 3 4 . True 32
(32, 0) 2 3 5 4 2 . True 32
-------- ----------- ----- --
unseen: 7/8
-------- ----------- ----- --
(10, 22) 2 3 4 3 4 . True 32
(13, 19) 2 3 4 3 4 . True 32
(19, 13) 2 3 5 4 2 . True 32
(21, 11) 2 3 4 2 3 . False 31
(24, 8) 2 3 4 3 4 . True 32
(28, 4) 2 3 4 3 4 . True 32
(30, 2) 2 3 5 4 2 . True 32
(31, 1) 2 3 4 3 4 . True 32
-------- ----------- ----- --
--------------------------------------------------------------------------------
33: 20 examples
seen: 12/17
-------- ----------- ----- --
(2, 31) 2 3 5 3 4 . True 33
(4, 29) 2 3 5 3 5 . False 34
(6, 27) 2 3 5 3 4 . True 33
(7, 26) 2 3 5 3 4 . True 33
(8, 25) 2 3 5 3 4 . True 33
(9, 24) 2 3 5 3 4 . True 33
(13, 20) 2 3 5 3 4 . True 33
(15, 18) 2 3 5 3 4 . True 33
(17, 16) 2 3 5 3 4 . True 33
(18, 15) 2 3 4 2 3 . False 31
(20, 13) 2 3 5 3 4 . True 33
(22, 11) 2 3 5 3 4 . True 33
(23, 10) 2 3 5 3 5 . False 34
(27, 6) 2 3 5 3 4 . True 33
(28, 5) 2 3 5 4 2 . False 32
(30, 3) 2 3 5 3 4 . True 33
(32, 1) 2 3 5 4 2 . False 32
-------- ----------- ----- --
unseen: 0/3
------- ----------- ----- --
(5, 28) 2 3 5 3 5 . False 34
(24, 9) 2 3 5 4 2 . False 32
(25, 8) 2 3 4 3 3 . False 32
------- ----------- ----- --
--------------------------------------------------------------------------------
34: 31 examples
seen: 25/26
-------- ----------- ----- --
(0, 34) 2 3 5 6 3 . True 34
(1, 33) 2 3 5 6 3 . True 34
(2, 32) 2 3 5 6 3 . True 34
(4, 30) 2 3 5 3 5 . True 34
(5, 29) 2 3 5 6 3 . True 34
(6, 28) 2 3 5 6 3 . True 34
(8, 26) 2 3 5 6 3 . True 34
(10, 24) 2 3 5 6 3 . True 34
(11, 23) 2 3 5 3 5 . True 34
(12, 22) 2 3 5 3 5 . True 34
(13, 21) 2 3 5 6 3 . True 34
(14, 20) 2 3 5 3 5 . True 34
(15, 19) 2 3 5 6 3 . True 34
(16, 18) 2 3 5 6 3 . True 34
(17, 17) 2 3 5 6 3 . True 34
(19, 15) 2 3 5 6 3 . True 34
(20, 14) 2 3 5 6 3 . True 34
(21, 13) 2 3 5 6 3 . True 34
(22, 12) 2 3 5 3 5 . True 34
(23, 11) 2 3 5 6 7 . False 36
(25, 9) 2 3 5 3 5 . True 34
(26, 8) 2 3 5 6 3 . True 34
(27, 7) 2 3 5 6 3 . True 34
(29, 5) 2 3 5 6 3 . True 34
(30, 4) 2 3 5 6 3 . True 34
(31, 3) 2 3 5 6 3 . True 34
-------- ----------- ----- --
unseen: 4/5
-------- ----------- ----- --
(3, 31) 2 3 5 6 3 . True 34
(7, 27) 2 3 5 6 3 . True 34
(18, 16) 2 3 5 3 4 . False 33
(24, 10) 2 3 5 3 5 . True 34
(32, 2) 2 3 5 6 3 . True 34
-------- ----------- ----- --
--------------------------------------------------------------------------------
35: 23 examples
seen: 14/15
-------- ----------- ----- --
(0, 35) 2 3 5 6 5 . True 35
(1, 34) 2 3 5 6 5 . True 35
(4, 31) 2 3 5 6 5 . True 35
(7, 28) 2 3 5 6 5 . True 35
(9, 26) 2 3 5 6 5 . True 35
(12, 23) 2 3 5 6 5 . True 35
(14, 21) 2 3 5 6 5 . True 35
(15, 20) 2 3 5 6 5 . True 35
(19, 16) 2 3 5 6 5 . True 35
(20, 15) 2 3 5 6 5 . True 35
(21, 14) 2 3 5 6 5 . True 35
(28, 7) 2 3 5 6 5 . True 35
(29, 6) 2 3 5 6 5 . True 35
(32, 3) 2 3 5 6 5 . True 35
(34, 1) 2 3 5 6 3 . False 34
-------- ----------- ----- --
unseen: 2/8
-------- ----------- ----- --
(2, 33) 2 3 5 6 7 . False 36
(13, 22) 2 3 5 6 7 . False 36
(18, 17) 2 3 5 6 3 . False 34
(24, 11) 2 3 5 6 3 . False 34
(26, 9) 2 3 5 6 5 . True 35
(27, 8) 2 3 5 6 5 . True 35
(33, 2) 2 3 5 6 7 . False 36
(35, 0) 2 3 5 6 7 . False 36
-------- ----------- ----- --
--------------------------------------------------------------------------------
36: 32 examples
seen: 21/21
-------- ----------- ---- --
(1, 35) 2 3 5 6 7 . True 36
(2, 34) 2 3 5 6 7 . True 36
(5, 31) 2 3 5 6 7 . True 36
(8, 28) 2 3 5 6 7 . True 36
(9, 27) 2 3 5 6 7 . True 36
(10, 26) 2 3 5 6 7 . True 36
(11, 25) 2 3 5 6 7 . True 36
(13, 23) 2 3 5 6 7 . True 36
(15, 21) 2 3 5 6 7 . True 36
(18, 18) 2 3 5 6 7 . True 36
(19, 17) 2 3 5 6 7 . True 36
(20, 16) 2 3 5 6 7 . True 36
(22, 14) 2 3 5 6 7 . True 36
(24, 12) 2 3 5 6 7 . True 36
(27, 9) 2 3 5 6 7 . True 36
(28, 8) 2 3 5 6 7 . True 36
(30, 6) 2 3 5 6 7 . True 36
(31, 5) 2 3 5 6 7 . True 36
(32, 4) 2 3 5 6 7 . True 36
(33, 3) 2 3 5 6 7 . True 36
(36, 0) 2 3 5 6 7 . True 36
-------- ----------- ---- --
unseen: 7/11
-------- ----------- ----- --
(0, 36) 2 3 5 6 7 . True 36
(4, 32) 2 3 5 6 5 . False 35
(6, 30) 2 3 5 6 7 . True 36
(7, 29) 2 3 5 6 7 . True 36
(12, 24) 2 3 5 6 5 . False 35
(21, 15) 2 3 5 6 7 . True 36
(23, 13) 2 3 5 6 5 . False 35
(25, 11) 2 3 5 6 7 . True 36
(26, 10) 2 3 5 6 7 . True 36
(34, 2) 2 3 5 7 5 . False 37
(35, 1) 2 3 5 6 7 . True 36
-------- ----------- ----- --
--------------------------------------------------------------------------------
37: 28 examples
seen: 21/21
-------- ----------- ---- --
(0, 37) 2 3 5 7 5 . True 37
(2, 35) 2 3 5 7 5 . True 37
(3, 34) 2 3 5 7 5 . True 37
(4, 33) 2 3 5 7 5 . True 37
(7, 30) 2 3 5 7 5 . True 37
(8, 29) 2 3 5 7 5 . True 37
(10, 27) 2 3 5 7 5 . True 37
(13, 24) 2 3 5 7 5 . True 37
(17, 20) 2 3 5 7 5 . True 37
(19, 18) 2 3 5 7 5 . True 37
(20, 17) 2 3 5 7 5 . True 37
(21, 16) 2 3 5 7 5 . True 37
(24, 13) 2 3 5 7 5 . True 37
(25, 12) 2 3 5 7 5 . True 37
(26, 11) 2 3 5 7 5 . True 37
(28, 9) 2 3 5 7 5 . True 37
(29, 8) 2 3 5 7 5 . True 37
(31, 6) 2 3 5 7 5 . True 37
(32, 5) 2 3 5 7 5 . True 37
(34, 3) 2 3 5 7 5 . True 37
(37, 0) 2 3 5 7 5 . True 37
-------- ----------- ---- --
unseen: 4/7
-------- ----------- ----- --
(1, 36) 2 3 5 7 7 . False 38
(5, 32) 2 3 5 6 7 . False 36
(6, 31) 2 3 5 7 5 . True 37
(12, 25) 2 3 5 7 5 . True 37
(22, 15) 2 3 5 7 5 . True 37
(30, 7) 2 3 5 7 7 . False 38
(33, 4) 2 3 5 7 5 . True 37
-------- ----------- ----- --
--------------------------------------------------------------------------------
38: 31 examples
seen: 23/23
-------- ----------- ---- --
(0, 38) 2 3 5 7 7 . True 38
(3, 35) 2 3 5 7 7 . True 38
(4, 34) 2 3 5 7 7 . True 38
(7, 31) 2 3 5 7 7 . True 38
(8, 30) 2 3 5 7 7 . True 38
(9, 29) 2 3 5 7 7 . True 38
(13, 25) 2 3 5 7 7 . True 38
(14, 24) 2 3 5 7 7 . True 38
(16, 22) 2 3 5 7 7 . True 38
(18, 20) 2 3 5 7 7 . True 38
(20, 18) 2 3 5 7 7 . True 38
(23, 15) 2 3 5 7 7 . True 38
(25, 13) 2 3 5 7 7 . True 38
(26, 12) 2 3 5 7 7 . True 38
(27, 11) 2 3 5 7 7 . True 38
(28, 10) 2 3 5 7 7 . True 38
(30, 8) 2 3 5 7 7 . True 38
(31, 7) 2 3 5 7 7 . True 38
(32, 6) 2 3 5 7 7 . True 38
(33, 5) 2 3 5 7 7 . True 38
(34, 4) 2 3 5 7 7 . True 38
(35, 3) 2 3 5 7 7 . True 38
(38, 0) 2 3 5 7 7 . True 38
-------- ----------- ---- --
unseen: 5/8
-------- ----------- ----- --
(6, 32) 2 3 7 5 6 . False 39
(10, 28) 2 3 5 7 7 . True 38
(15, 23) 2 3 5 7 7 . True 38
(19, 19) 2 3 5 7 7 . True 38
(21, 17) 2 3 5 7 7 . True 38
(22, 16) 2 3 5 7 5 . False 37
(36, 2) 2 3 5 7 7 . True 38
(37, 1) 2 3 5 7 5 . False 37
-------- ----------- ----- --
--------------------------------------------------------------------------------
39: 28 examples
seen: 18/18
-------- ----------- ---- --
(1, 38) 2 3 7 5 6 . True 39
(6, 33) 2 3 7 5 6 . True 39
(8, 31) 2 3 7 5 6 . True 39
(9, 30) 2 3 7 5 6 . True 39
(12, 27) 2 3 7 5 6 . True 39
(16, 23) 2 3 7 5 6 . True 39
(17, 22) 2 3 7 5 6 . True 39
(19, 20) 2 3 7 5 6 . True 39
(21, 18) 2 3 7 5 6 . True 39
(23, 16) 2 3 7 5 6 . True 39
(24, 15) 2 3 7 5 6 . True 39
(28, 11) 2 3 7 5 6 . True 39
(32, 7) 2 3 7 5 6 . True 39
(33, 6) 2 3 7 5 6 . True 39
(34, 5) 2 3 7 5 6 . True 39
(35, 4) 2 3 7 5 6 . True 39
(37, 2) 2 3 7 5 6 . True 39
(39, 0) 2 3 7 5 6 . True 39
-------- ----------- ---- --
unseen: 4/10
-------- ----------- ----- --
(0, 39) 2 3 7 5 6 . True 39
(3, 36) 2 3 7 7 5 . False 40
(7, 32) 2 3 7 5 6 . True 39
(14, 25) 2 3 5 7 7 . False 38
(20, 19) 2 3 7 7 5 . False 40
(25, 14) 2 3 7 5 6 . True 39
(27, 12) 2 3 7 5 6 . True 39
(29, 10) 2 3 5 7 7 . False 38
(31, 8) 2 3 7 5 7 . False 40
(38, 1) 2 3 5 7 7 . False 38
-------- ----------- ----- --
--------------------------------------------------------------------------------
40: 33 examples
seen: 25/26
-------- ----------- ----- --
(1, 39) 2 3 7 7 5 . True 40
(2, 38) 2 3 7 5 7 . True 40
(3, 37) 2 3 7 7 5 . True 40
(4, 36) 2 3 7 7 5 . True 40
(5, 35) 2 3 7 7 5 . True 40
(6, 34) 2 3 7 7 5 . True 40
(9, 31) 2 3 7 5 7 . True 40
(11, 29) 2 3 7 7 5 . True 40
(13, 27) 2 3 7 7 5 . True 40
(15, 25) 2 3 7 7 5 . True 40
(18, 22) 2 3 7 5 6 . False 39
(19, 21) 2 3 7 7 5 . True 40
(20, 20) 2 3 7 7 5 . True 40
(22, 18) 2 3 7 7 5 . True 40
(23, 17) 2 3 7 7 5 . True 40
(24, 16) 2 3 7 7 5 . True 40
(25, 15) 2 3 7 7 5 . True 40
(27, 13) 2 3 7 7 5 . True 40
(28, 12) 2 3 7 7 5 . True 40
(31, 9) 2 3 7 7 5 . True 40
(33, 7) 2 3 7 7 5 . True 40
(35, 5) 2 3 7 7 5 . True 40
(36, 4) 2 3 7 7 5 . True 40
(37, 3) 2 3 7 7 5 . True 40
(38, 2) 2 3 7 9 7 . True 40
(39, 1) 2 3 7 5 7 . True 40
-------- ----------- ----- --
unseen: 6/7
-------- ----------- ----- --
(8, 32) 2 3 7 5 7 . True 40
(10, 30) 2 3 7 7 5 . True 40
(12, 28) 2 3 7 5 7 . True 40
(14, 26) 2 3 7 5 6 . False 39
(16, 24) 2 3 7 7 5 . True 40
(29, 11) 2 3 7 7 5 . True 40
(30, 10) 2 3 7 7 5 . True 40
-------- ----------- ----- --
--------------------------------------------------------------------------------
41: 29 examples
seen: 17/20
-------- ----------- ----- --
(2, 39) 2 3 7 7 5 . False 40
(3, 38) 2 3 7 7 7 . True 41
(4, 37) 2 3 7 7 5 . False 40
(5, 36) 2 3 7 7 7 . True 41
(6, 35) 2 3 7 7 7 . True 41
(8, 33) 2 3 7 7 7 . True 41
(12, 29) 2 3 7 7 7 . True 41
(14, 27) 2 3 7 7 7 . True 41
(15, 26) 2 3 7 7 7 . True 41
(16, 25) 2 3 7 7 7 . True 41
(17, 24) 2 3 7 7 7 . True 41
(18, 23) 2 3 7 7 5 . False 40
(21, 20) 2 3 7 7 7 . True 41
(22, 19) 2 3 7 7 7 . True 41
(27, 14) 2 3 7 7 7 . True 41
(28, 13) 2 3 7 7 7 . True 41
(29, 12) 2 3 7 7 7 . True 41
(30, 11) 2 3 7 7 7 . True 41
(31, 10) 2 3 7 7 7 . True 41
(38, 3) 2 3 7 7 9 . True 41
-------- ----------- ----- --
unseen: 3/9
-------- ----------- ----- --
(10, 31) 2 3 7 7 5 . False 40
(11, 30) 2 7 5 6 5 . False 42
(13, 28) 2 7 5 5 6 . False 42
(20, 21) 2 3 7 7 5 . False 40
(24, 17) 2 3 7 7 7 . True 41
(25, 16) 2 3 7 7 7 . True 41
(34, 7) 2 7 5 5 6 . False 42
(36, 5) 2 3 7 7 5 . False 40
(37, 4) 2 3 7 7 7 . True 41
-------- ----------- ----- --
--------------------------------------------------------------------------------
42: 29 examples
seen: 14/21
-------- ----------- ----- --
(3, 39) 2 7 5 5 6 . True 42
(4, 38) 2 7 5 5 6 . True 42
(5, 37) 2 7 5 5 6 . True 42
(7, 35) 2 7 5 6 7 . True 42
(8, 34) 2 7 5 5 6 . True 42
(9, 33) 2 7 5 7 5 . False 43
(10, 32) 2 7 7 5 6 . False 43
(11, 31) 2 7 5 6 5 . True 42
(12, 30) 2 7 5 6 5 . True 42
(13, 29) 2 7 5 6 7 . True 42
(16, 26) 2 3 7 7 7 . False 41
(19, 23) 2 7 5 7 5 . False 43
(20, 22) 2 7 5 7 7 . False 43
(22, 20) 2 7 5 6 5 . True 42
(28, 14) 2 7 5 6 7 . True 42
(29, 13) 2 7 5 6 7 . True 42
(31, 11) 2 7 5 6 5 . True 42
(32, 10) 2 7 5 5 6 . True 42
(33, 9) 2 3 7 7 7 . False 41
(34, 8) 2 7 5 7 5 . False 43
(39, 3) 2 7 5 5 6 . True 42
-------- ----------- ----- --
unseen: 5/8
-------- ----------- ----- --
(15, 27) 2 7 5 7 7 . False 43
(18, 24) 2 3 7 7 7 . False 41
(24, 18) 2 7 5 6 5 . True 42
(25, 17) 2 7 5 6 7 . True 42
(26, 16) 2 7 5 7 5 . False 43
(35, 7) 2 7 5 6 7 . True 42
(36, 6) 2 7 5 6 7 . True 42
(37, 5) 2 7 5 5 6 . True 42
-------- ----------- ----- --
--------------------------------------------------------------------------------
43: 26 examples
seen: 17/21
-------- ----------- ----- --
(4, 39) 2 7 5 6 5 . False 42
(5, 38) 2 7 5 7 7 . True 43
(6, 37) 2 7 5 7 5 . True 43
(10, 33) 2 7 7 5 6 . True 43
(11, 32) 2 7 7 7 5 . False 44
(12, 31) 2 7 5 7 5 . True 43
(13, 30) 2 7 7 5 6 . True 43
(14, 29) 2 7 7 5 6 . True 43
(16, 27) 2 7 7 5 6 . True 43
(18, 25) 2 3 7 7 7 . False 41
(20, 23) 2 7 7 5 7 . True 43
(21, 22) 2 7 7 5 6 . True 43
(24, 19) 2 7 7 5 7 . True 43
(25, 18) 2 7 7 5 6 . True 43
(26, 17) 2 7 7 5 6 . True 43
(27, 16) 2 7 7 5 6 . True 43
(28, 15) 2 7 5 6 7 . False 42
(30, 13) 2 7 7 5 6 . True 43
(34, 9) 2 7 7 5 6 . True 43
(35, 8) 2 7 7 5 6 . True 43
(37, 6) 2 7 7 5 6 . True 43
-------- ----------- ----- --
unseen: 4/5
-------- ----------- ----- --
(8, 35) 2 7 7 5 6 . True 43
(17, 26) 2 7 7 5 6 . True 43
(19, 24) 2 7 7 5 6 . True 43
(31, 12) 2 7 7 5 7 . True 43
(38, 5) 2 7 5 6 5 . False 42
-------- ----------- ----- --
--------------------------------------------------------------------------------
44: 29 examples
seen: 22/22
-------- ----------- ---- --
(8, 36) 2 7 7 7 5 . True 44
(9, 35) 2 7 7 7 5 . True 44
(14, 30) 2 7 7 7 5 . True 44
(15, 29) 2 7 7 7 5 . True 44
(16, 28) 2 7 7 7 5 . True 44
(17, 27) 2 7 7 7 5 . True 44
(20, 24) 2 7 7 7 5 . True 44
(21, 23) 2 7 7 7 5 . True 44
(22, 22) 2 7 7 7 5 . True 44
(23, 21) 2 7 7 7 5 . True 44
(25, 19) 2 7 7 7 5 . True 44
(26, 18) 2 7 7 7 5 . True 44
(27, 17) 2 7 7 7 5 . True 44
(30, 14) 2 7 7 7 5 . True 44
(31, 13) 2 7 7 7 5 . True 44
(32, 12) 2 7 7 7 5 . True 44
(33, 11) 2 7 7 7 5 . True 44
(34, 10) 2 7 7 7 5 . True 44
(35, 9) 2 7 7 7 5 . True 44
(36, 8) 2 7 7 7 5 . True 44
(37, 7) 2 7 7 7 5 . True 44
(38, 6) 2 7 7 7 5 . True 44
-------- ----------- ---- --
unseen: 1/7
-------- ----------- ----- --
(12, 32) 8 7 5 6 7 . False 46
(13, 31) 2 7 7 5 7 . False 43
(18, 26) 2 7 5 7 5 . False 43
(19, 25) 2 7 7 5 6 . False 43
(24, 20) 2 7 7 7 5 . True 44
(28, 16) 7 5 6 5 7 . False 45
(29, 15) 2 7 7 5 6 . False 43
-------- ----------- ----- --
--------------------------------------------------------------------------------
45: 27 examples
seen: 17/19
-------- ----------- ----- --
(7, 38) 8 5 7 5 6 . True 45
(9, 36) 2 7 7 7 7 . True 45
(11, 34) 2 7 7 7 7 . True 45
(15, 30) 2 7 7 7 7 . True 45
(17, 28) 7 5 6 7 5 . True 45
(18, 27) 2 7 7 7 5 . False 44
(19, 26) 7 5 6 7 5 . True 45
(22, 23) 2 7 7 7 7 . True 45
(23, 22) 2 7 7 7 7 . True 45
(27, 18) 7 5 6 7 5 . True 45
(28, 17) 7 5 6 7 5 . True 45
(29, 16) 7 5 6 7 5 . True 45
(30, 15) 7 5 6 7 5 . True 45
(31, 14) 2 7 7 7 7 . True 45
(32, 13) 7 5 7 5 6 . False 46
(34, 11) 2 7 7 7 7 . True 45
(35, 10) 2 7 7 7 7 . True 45
(38, 7) 2 7 7 7 7 . True 45
(39, 6) 8 5 7 5 6 . True 45
-------- ----------- ----- --
unseen: 1/8
-------- ----------- ----- --
(6, 39) 2 7 7 5 7 . False 43
(12, 33) 2 7 7 7 5 . False 44
(13, 32) 8 7 5 6 7 . False 46
(14, 31) 2 7 7 7 5 . False 44
(21, 24) 2 7 7 7 7 . True 45
(25, 20) 2 7 7 7 5 . False 44
(36, 9) 2 7 7 7 5 . False 44
(37, 8) 8 7 5 6 7 . False 46
-------- ----------- ----- --
--------------------------------------------------------------------------------
46: 26 examples
seen: 19/20
-------- ----------- ----- --
(7, 39) 8 7 5 6 7 . True 46
(11, 35) 8 7 5 6 7 . True 46
(12, 34) 8 7 5 6 7 . True 46
(13, 33) 7 5 7 5 6 . True 46
(14, 32) 8 7 5 6 7 . True 46
(15, 31) 7 5 7 5 6 . True 46
(17, 29) 7 5 7 5 6 . True 46
(18, 28) 7 5 6 7 5 . False 45
(20, 26) 7 5 7 5 6 . True 46
(21, 25) 7 5 7 5 6 . True 46
(25, 21) 7 5 7 5 6 . True 46
(27, 19) 7 5 7 5 6 . True 46
(29, 17) 7 5 7 5 6 . True 46
(30, 16) 7 5 7 5 6 . True 46
(31, 15) 7 5 7 5 6 . True 46
(33, 13) 7 5 7 5 6 . True 46
(34, 12) 8 7 5 6 7 . True 46
(37, 9) 8 7 5 6 7 . True 46
(38, 8) 8 7 5 6 7 . True 46
(39, 7) 8 7 5 6 7 . True 46
-------- ----------- ----- --
unseen: 1/6
-------- ----------- ----- --
(8, 38) 8 7 5 6 7 . True 46
(9, 37) 2 7 7 7 5 . False 44
(10, 36) 2 7 7 7 7 . False 45
(24, 22) 7 5 7 7 5 . False 47
(26, 20) 7 7 5 6 7 . False 47
(35, 11) 2 7 7 7 7 . False 45
-------- ----------- ----- --
--------------------------------------------------------------------------------
47: 24 examples
seen: 15/16
-------- ----------- ----- --
(8, 39) 7 7 5 6 7 . True 47
(9, 38) 7 7 5 6 7 . True 47
(14, 33) 7 7 5 6 7 . True 47
(16, 31) 7 5 7 7 5 . True 47
(17, 30) 7 7 5 6 7 . True 47
(19, 28) 7 7 5 6 7 . True 47
(22, 25) 7 7 5 6 7 . True 47
(24, 23) 7 7 5 6 7 . True 47
(26, 21) 7 7 5 6 7 . True 47
(28, 19) 7 7 5 6 7 . True 47
(29, 18) 7 7 5 6 7 . True 47
(30, 17) 7 7 5 6 7 . True 47
(31, 16) 7 7 5 6 7 . True 47
(32, 15) 7 5 7 5 7 . False 46
(34, 13) 7 7 5 6 7 . True 47
(36, 11) 7 7 5 6 7 . True 47
-------- ----------- ----- --
unseen: 4/8
-------- ----------- ----- --
(10, 37) 8 7 5 6 7 . False 46
(15, 32) 7 7 5 7 7 . True 47
(20, 27) 7 7 7 5 6 . False 48
(21, 26) 7 7 5 6 7 . True 47
(23, 24) 7 7 5 7 7 . True 47
(25, 22) 7 7 5 6 7 . True 47
(35, 12) 8 7 7 5 6 . False 48
(38, 9) 8 7 5 6 7 . False 46
-------- ----------- ----- --
--------------------------------------------------------------------------------
48: 23 examples
seen: 20/23
-------- ----------- ----- --
(9, 39) 7 7 7 5 6 . True 48
(11, 37) 8 7 7 5 6 . True 48
(12, 36) 8 7 7 5 6 . True 48
(13, 35) 8 7 7 5 6 . True 48
(14, 34) 8 7 7 5 6 . True 48
(16, 32) 7 7 7 5 6 . True 48
(17, 31) 7 7 7 5 6 . True 48
(20, 28) 7 7 7 5 6 . True 48
(21, 27) 7 7 7 5 6 . True 48
(22, 26) 7 7 7 5 6 . True 48
(23, 25) 7 7 7 5 6 . True 48
(24, 24) 7 7 7 5 6 . True 48
(25, 23) 7 7 7 5 6 . True 48
(26, 22) 7 7 7 5 6 . True 48
(29, 19) 7 7 7 5 6 . True 48
(31, 17) 7 7 7 5 7 . False 49
(33, 15) 7 7 5 7 5 . False 47
(34, 14) 8 7 7 5 6 . True 48
(35, 13) 7 7 7 5 6 . True 48
(36, 12) 8 7 7 5 6 . True 48
(37, 11) 8 7 7 5 6 . True 48
(38, 10) 8 7 5 7 7 . False 49
(39, 9) 8 7 7 5 6 . True 48
-------- ----------- ----- --
unseen: 0/0
--------------------------------------------------------------------------------
49: 23 examples
seen: 12/15
-------- ----------- ----- --
(10, 39) 8 7 7 7 7 . False 50
(11, 38) 8 7 7 5 7 . True 49
(12, 37) 8 7 7 5 7 . True 49
(14, 35) 8 7 7 7 5 . True 49
(17, 32) 7 7 7 7 5 . True 49
(18, 31) 7 5 7 7 5 . False 47
(20, 29) 7 7 7 7 7 . False 50
(27, 22) 7 7 7 7 5 . True 49
(28, 21) 7 7 7 7 5 . True 49
(31, 18) 7 7 7 7 5 . True 49
(32, 17) 7 7 7 7 5 . True 49
(33, 16) 7 7 7 5 7 . True 49
(35, 14) 8 7 7 7 5 . True 49
(36, 13) 8 7 7 7 5 . True 49
(37, 12) 8 7 7 7 5 . True 49
-------- ----------- ----- --
unseen: 5/8
-------- ----------- ----- --
(21, 28) 7 7 7 5 7 . True 49
(22, 27) 7 7 7 7 5 . True 49
(23, 26) 7 7 7 7 5 . True 49
(25, 24) 7 7 7 7 5 . True 49
(29, 20) 7 7 7 5 7 . True 49
(30, 19) 7 7 7 5 6 . False 48
(34, 15) 7 7 7 5 6 . False 48
(39, 10) 8 7 7 5 6 . False 48
-------- ----------- ----- --
--------------------------------------------------------------------------------
50: 21 examples
seen: 14/14
-------- ----------- ---- --
(11, 39) 8 7 7 7 7 . True 50
(12, 38) 8 7 7 9 7 . True 50
(15, 35) 7 7 7 7 7 . True 50
(16, 34) 7 7 7 7 7 . True 50
(21, 29) 7 7 7 7 7 . True 50
(22, 28) 7 7 7 7 7 . True 50
(23, 27) 7 7 7 7 7 . True 50
(24, 26) 7 7 7 7 7 . True 50
(25, 25) 7 7 7 7 7 . True 50
(27, 23) 7 7 7 7 7 . True 50
(34, 16) 7 7 7 7 7 . True 50
(35, 15) 7 7 7 7 7 . True 50
(36, 14) 8 7 7 7 9 . True 50
(38, 12) 8 7 7 7 9 . True 50
-------- ----------- ---- --
unseen: 4/7
-------- ----------- ----- --
(17, 33) 7 7 7 7 5 . False 49
(18, 32) 7 7 7 5 7 . False 49
(19, 31) 7 7 7 7 7 . True 50
(28, 22) 7 7 7 7 7 . True 50
(31, 19) 7 7 7 7 7 . True 50
(32, 18) 7 7 7 5 6 . False 48
(39, 11) 8 7 7 7 7 . True 50
-------- ----------- ----- --
--------------------------------------------------------------------------------
51: 17 examples
seen: 10/13
-------- ----------- ----- --
(12, 39) 8 7 9 7 7 . True 51
(13, 38) 8 7 9 7 7 . True 51
(15, 36) 7 7 7 7 9 . True 51
(21, 30) 7 7 7 7 9 . True 51
(24, 27) 7 7 7 7 9 . True 51
(26, 25) 7 7 7 7 9 . True 51
(28, 23) 7 7 7 7 9 . True 51
(29, 22) 7 7 7 7 7 . False 50
(30, 21) 7 7 7 7 7 . False 50
(31, 20) 7 7 7 9 7 . False 52
(35, 16) 7 7 7 7 9 . True 51
(36, 15) 7 7 7 7 9 . True 51
(39, 12) 8 7 9 7 7 . True 51
-------- ----------- ----- --
unseen: 4/4
-------- ----------- ---- --
(19, 32) 7 7 7 7 9 . True 51
(20, 31) 7 7 7 7 9 . True 51
(25, 26) 7 7 7 7 9 . True 51
(37, 14) 8 7 9 7 7 . True 51
-------- ----------- ---- --
--------------------------------------------------------------------------------
52: 22 examples
seen: 11/16
-------- ----------- ----- --
(14, 38) 8 7 9 7 9 . True 52
(16, 36) 7 7 7 9 7 . True 52
(17, 35) 7 7 7 9 7 . True 52
(18, 34) 7 7 7 7 7 . False 50
(19, 33) 7 7 7 9 7 . True 52
(24, 28) 7 7 7 9 7 . True 52
(25, 27) 7 7 7 7 9 . False 51
(26, 26) 7 7 7 9 7 . True 52
(28, 24) 7 7 7 9 9 . False 53
(31, 21) 7 7 9 7 7 . False 53
(32, 20) 7 7 7 9 7 . True 52
(35, 17) 7 7 7 9 7 . True 52
(36, 16) 7 7 7 9 7 . True 52
(37, 15) 7 7 7 7 9 . False 51
(38, 14) 8 7 9 7 9 . True 52
(39, 13) 8 7 9 7 9 . True 52
-------- ----------- ----- --
unseen: 3/6
-------- ----------- ----- --
(20, 32) 7 7 9 7 7 . False 53
(21, 31) 7 7 7 9 7 . True 52
(23, 29) 7 7 7 9 7 . True 52
(29, 23) 7 7 7 7 9 . False 51
(30, 22) 7 7 7 9 7 . True 52
(34, 18) 7 7 7 7 9 . False 51
-------- ----------- ----- --
--------------------------------------------------------------------------------
53: 20 examples
seen: 13/15
-------- ----------- ----- --
(14, 39) 8 9 7 7 7 . True 53
(16, 37) 7 7 9 7 7 . True 53
(18, 35) 7 7 7 9 7 . False 52
(21, 32) 7 7 7 9 9 . True 53
(22, 31) 7 7 7 9 9 . True 53
(23, 30) 7 7 9 7 7 . True 53
(24, 29) 7 7 9 7 7 . True 53
(25, 28) 7 7 7 9 9 . True 53
(28, 25) 7 7 9 7 7 . True 53
(29, 24) 7 7 7 9 9 . True 53
(33, 20) 7 7 9 7 7 . True 53
(34, 19) 7 7 9 7 7 . True 53
(37, 16) 7 7 9 7 7 . True 53
(38, 15) 7 7 7 9 7 . False 52
(39, 14) 8 9 7 7 9 . True 53
-------- ----------- ----- --
unseen: 3/5
-------- ----------- ----- --
(15, 38) 7 7 9 7 7 . True 53
(20, 33) 7 7 9 7 9 . False 54
(30, 23) 7 7 9 7 7 . True 53
(31, 22) 7 7 9 7 9 . False 54
(35, 18) 7 7 9 7 7 . True 53
-------- ----------- ----- --
--------------------------------------------------------------------------------
54: 17 examples
seen: 16/16
-------- ----------- ---- --
(15, 39) 7 7 9 7 9 . True 54
(16, 38) 7 7 9 7 9 . True 54
(17, 37) 7 7 9 7 9 . True 54
(20, 34) 7 7 9 7 9 . True 54
(22, 32) 7 7 9 7 9 . True 54
(23, 31) 7 7 9 7 9 . True 54
(25, 29) 7 7 9 7 9 . True 54
(27, 27) 7 7 9 7 9 . True 54
(28, 26) 7 7 9 7 9 . True 54
(30, 24) 7 7 9 7 9 . True 54
(32, 22) 7 7 9 7 9 . True 54
(34, 20) 7 7 9 7 9 . True 54
(35, 19) 7 7 9 7 9 . True 54
(36, 18) 7 7 9 7 9 . True 54
(37, 17) 7 7 9 7 9 . True 54
(38, 16) 7 7 9 7 9 . True 54
-------- ----------- ---- --
unseen: 1/1
-------- ----------- ---- --
(24, 30) 7 7 9 7 9 . True 54
-------- ----------- ---- --
--------------------------------------------------------------------------------
55: 21 examples
seen: 12/14
-------- ----------- ----- --
(17, 38) 7 7 9 9 7 . True 55
(18, 37) 7 7 9 7 7 . False 53
(20, 35) 7 9 7 7 9 . False 56
(21, 34) 7 7 9 9 7 . True 55
(24, 31) 7 7 9 9 7 . True 55
(25, 30) 7 7 9 9 7 . True 55
(26, 29) 7 9 7 7 7 . True 55
(27, 28) 7 7 9 9 7 . True 55
(29, 26) 7 7 9 9 7 . True 55
(30, 25) 7 7 9 9 7 . True 55
(33, 22) 7 9 7 7 7 . True 55
(35, 20) 7 7 9 9 7 . True 55
(36, 19) 7 7 9 9 7 . True 55
(39, 16) 7 7 9 9 7 . True 55
-------- ----------- ----- --
unseen: 5/7
-------- ----------- ----- --
(16, 39) 7 7 9 7 9 . False 54
(23, 32) 7 7 9 9 7 . True 55
(28, 27) 7 9 7 7 7 . True 55
(31, 24) 7 7 9 9 7 . True 55
(32, 23) 7 7 9 9 7 . True 55
(37, 18) 7 9 7 7 7 . True 55
(38, 17) 7 7 9 7 9 . False 54
-------- ----------- ----- --
--------------------------------------------------------------------------------
56: 15 examples
seen: 6/14
-------- ----------- ----- --
(18, 38) 7 7 9 9 7 . False 55
(19, 37) 7 9 7 7 9 . True 56
(20, 36) 7 9 7 7 9 . True 56
(21, 35) 7 9 7 9 7 . False 57
(22, 34) 7 9 7 9 7 . False 57
(23, 33) 7 9 7 7 7 . False 55
(27, 29) 7 9 7 7 9 . True 56
(29, 27) 7 9 7 9 7 . False 57
(31, 25) 7 9 7 9 7 . False 57
(33, 23) 7 9 7 7 7 . False 55
(35, 21) 7 9 7 7 9 . True 56
(36, 20) 7 9 7 7 9 . True 56
(37, 19) 7 9 7 9 7 . False 57
(39, 17) 7 9 7 7 9 . True 56
-------- ----------- ----- --
unseen: 0/1
-------- ----------- ----- --
(26, 30) 7 9 7 9 7 . False 57
-------- ----------- ----- --
--------------------------------------------------------------------------------
57: 16 examples
seen: 13/13
-------- ----------- ---- --
(19, 38) 7 9 7 9 7 . True 57
(20, 37) 7 9 7 9 7 . True 57
(22, 35) 7 9 7 9 9 . True 57
(25, 32) 7 9 7 9 7 . True 57
(27, 30) 7 9 7 9 9 . True 57
(28, 29) 7 9 7 9 9 . True 57
(29, 28) 7 9 7 9 7 . True 57
(30, 27) 7 9 7 9 7 . True 57
(31, 26) 7 9 7 9 9 . True 57
(32, 25) 7 9 7 9 7 . True 57
(33, 24) 7 9 7 9 7 . True 57
(34, 23) 7 9 7 9 9 . True 57
(38, 19) 7 9 7 9 7 . True 57
-------- ----------- ---- --
unseen: 2/3
-------- ----------- ----- --
(18, 39) 7 9 7 7 7 . False 55
(26, 31) 7 9 7 9 7 . True 57
(35, 22) 7 9 7 9 7 . True 57
-------- ----------- ----- --
--------------------------------------------------------------------------------
58: 19 examples
seen: 14/17
-------- ----------- ----- --
(19, 39) 9 7 7 9 7 . False 59
(20, 38) 7 9 9 7 7 . True 58
(21, 37) 7 9 9 7 7 . True 58
(22, 36) 9 7 7 7 9 . True 58
(23, 35) 9 7 7 9 7 . False 59
(24, 34) 9 7 7 7 9 . True 58
(25, 33) 7 9 9 7 7 . True 58
(27, 31) 7 9 9 7 7 . True 58
(28, 30) 7 9 9 7 7 . True 58
(29, 29) 7 9 9 7 7 . True 58
(31, 27) 7 9 9 7 7 . True 58
(33, 25) 7 9 9 7 7 . True 58
(34, 24) 7 9 9 7 7 . True 58
(35, 23) 9 7 7 9 7 . False 59
(36, 22) 9 7 7 7 9 . True 58
(37, 21) 7 9 9 7 7 . True 58
(39, 19) 7 9 9 7 7 . True 58
-------- ----------- ----- --
unseen: 1/2
-------- ----------- ----- --
(26, 32) 9 7 7 9 7 . False 59
(38, 20) 9 7 7 7 9 . True 58
-------- ----------- ----- --
--------------------------------------------------------------------------------
59: 17 examples
seen: 10/14
-------- ----------- ----- --
(20, 39) 9 7 9 7 7 . False 60
(21, 38) 9 7 7 9 7 . True 59
(22, 37) 9 7 7 9 7 . True 59
(23, 36) 9 7 7 9 7 . True 59
(24, 35) 9 7 7 9 7 . True 59
(25, 34) 9 7 7 9 7 . True 59
(26, 33) 9 7 7 9 7 . True 59
(27, 32) 9 7 9 7 7 . False 60
(29, 30) 9 7 7 7 9 . False 58
(30, 29) 9 7 7 9 7 . True 59
(33, 26) 9 7 7 9 7 . True 59
(37, 22) 9 7 7 9 7 . True 59
(38, 21) 9 7 7 9 7 . True 59
(39, 20) 9 7 9 7 7 . False 60
-------- ----------- ----- --
unseen: 2/3
-------- ----------- ----- --
(28, 31) 9 7 7 9 7 . True 59
(31, 28) 9 7 7 9 7 . True 59
(34, 25) 9 7 7 9 9 . False 58
-------- ----------- ----- --
--------------------------------------------------------------------------------
60: 11 examples
seen: 8/11
-------- ----------- ----- --
(22, 38) 9 7 9 7 7 . True 60
(23, 37) 9 7 9 7 7 . True 60
(24, 36) 9 7 9 7 7 . True 60
(25, 35) 9 7 9 7 7 . True 60
(26, 34) 9 7 9 7 7 . True 60
(27, 33) 9 7 9 9 7 . False 61
(28, 32) 9 7 9 7 7 . True 60
(31, 29) 9 7 9 9 7 . False 61
(32, 28) 9 7 7 9 7 . False 59
(35, 25) 9 7 9 7 9 . True 60
(38, 22) 9 7 9 7 7 . True 60
-------- ----------- ----- --
unseen: 0/0
--------------------------------------------------------------------------------
61: 13 examples
seen: 7/10
-------- ----------- ----- --
(22, 39) 9 7 9 9 7 . True 61
(24, 37) 9 7 9 7 9 . False 60
(27, 34) 9 7 9 9 7 . True 61
(28, 33) 9 7 9 9 7 . True 61
(29, 32) 9 7 9 7 9 . False 60
(31, 30) 9 7 9 9 7 . True 61
(32, 29) 9 7 9 9 7 . True 61
(33, 28) 9 7 9 7 9 . False 60
(34, 27) 9 7 9 9 7 . True 61
(35, 26) 9 7 9 9 7 . True 61
-------- ----------- ----- --
unseen: 2/3
-------- ----------- ----- --
(23, 38) 9 7 9 9 7 . True 61
(30, 31) 9 7 9 9 7 . True 61
(37, 24) 9 7 9 7 9 . False 60
-------- ----------- ----- --
--------------------------------------------------------------------------------
62: 13 examples
seen: 2/12
-------- ----------- ----- --
(25, 37) 9 7 9 9 7 . False 61
(27, 35) 9 9 7 7 9 . False 63
(28, 34) 9 9 7 7 9 . False 63
(29, 33) 9 7 9 9 9 . True 62
(30, 32) 9 9 7 7 9 . False 63
(32, 30) 9 9 7 7 9 . False 63
(34, 28) 9 7 9 9 9 . True 62
(35, 27) 9 9 7 7 9 . False 63
(36, 26) 9 9 7 7 9 . False 63
(37, 25) 9 9 7 7 9 . False 63
(38, 24) 9 7 9 9 7 . False 61
(39, 23) 9 9 7 7 9 . False 63
-------- ----------- ----- --
unseen: 0/1
-------- ----------- ----- --
(31, 31) 9 9 7 7 9 . False 63
-------- ----------- ----- --
--------------------------------------------------------------------------------
63: 12 examples
seen: 9/10
-------- ----------- ----- --
(24, 39) 9 9 7 7 9 . True 63
(25, 38) 9 9 7 7 9 . True 63
(26, 37) 9 9 7 7 9 . True 63
(29, 34) 9 9 7 7 9 . True 63
(32, 31) 9 9 7 7 9 . True 63
(33, 30) 9 9 7 7 9 . True 63
(34, 29) 9 9 7 7 9 . True 63
(35, 28) 9 9 7 7 9 . True 63
(36, 27) 9 9 7 7 9 . True 63
(37, 26) 9 9 7 9 7 . False 64
-------- ----------- ----- --
unseen: 1/2
-------- ----------- ----- --
(28, 35) 9 9 7 9 7 . False 64
(30, 33) 9 9 7 7 9 . True 63
-------- ----------- ----- --
--------------------------------------------------------------------------------
64: 12 examples
seen: 9/9
-------- ----------- ---- --
(26, 38) 9 9 7 9 7 . True 64
(27, 37) 9 9 7 9 7 . True 64
(28, 36) 9 9 7 9 7 . True 64
(30, 34) 9 9 7 9 7 . True 64
(31, 33) 9 9 7 9 7 . True 64
(33, 31) 9 9 7 9 7 . True 64
(36, 28) 9 9 7 9 7 . True 64
(38, 26) 9 9 7 9 7 . True 64
(39, 25) 9 9 7 9 7 . True 64
-------- ----------- ---- --
unseen: 3/3
-------- ----------- ---- --
(34, 30) 9 9 7 9 7 . True 64
(35, 29) 9 9 7 9 7 . True 64
(37, 27) 9 9 7 9 7 . True 64
-------- ----------- ---- --
--------------------------------------------------------------------------------
65: 10 examples
seen: 5/6
-------- ----------- ----- --
(28, 37) 9 9 7 9 9 . True 65
(31, 34) 9 9 7 9 9 . True 65
(33, 32) 9 9 7 9 9 . True 65
(34, 31) 9 9 7 9 7 . False 64
(36, 29) 9 9 7 9 9 . True 65
(38, 27) 9 9 7 9 9 . True 65
-------- ----------- ----- --
unseen: 1/4
-------- ----------- ----- --
(26, 39) 9 9 7 9 9 . True 65
(32, 33) 9 9 7 9 7 . False 64
(35, 30) 9 9 9 7 7 . False 66
(39, 26) 9 9 7 9 7 . False 64
-------- ----------- ----- --
--------------------------------------------------------------------------------
66: 11 examples
seen: 5/7
-------- ----------- ----- --
(27, 39) 9 9 9 7 7 . True 66
(28, 38) 9 9 9 7 7 . True 66
(29, 37) 9 9 7 9 9 . False 65
(33, 33) 9 9 9 7 7 . True 66
(35, 31) 9 9 9 7 7 . True 66
(37, 29) 9 9 9 7 7 . True 66
(38, 28) 9 9 7 9 9 . False 65
-------- ----------- ----- --
unseen: 1/4
-------- ----------- ----- --
(30, 36) 9 9 7 9 9 . False 65
(32, 34) 9 9 7 9 9 . False 65
(34, 32) 9 9 9 7 7 . True 66
(36, 30) 9 9 9 7 9 . False 67
-------- ----------- ----- --
--------------------------------------------------------------------------------
67: 10 examples
seen: 4/5
-------- ----------- ----- --
(29, 38) 9 9 9 7 7 . False 66
(30, 37) 9 9 9 7 9 . True 67
(35, 32) 9 9 9 7 9 . True 67
(38, 29) 9 9 9 7 9 . True 67
(39, 28) 9 9 9 7 9 . True 67
-------- ----------- ----- --
unseen: 5/5
-------- ----------- ---- --
(28, 39) 9 9 9 7 9 . True 67
(31, 36) 9 9 9 7 9 . True 67
(33, 34) 9 9 9 7 9 . True 67
(34, 33) 9 9 9 7 9 . True 67
(36, 31) 9 9 9 7 9 . True 67
-------- ----------- ---- --
--------------------------------------------------------------------------------
68: 6 examples
seen: 0/5
-------- ----------- ----- --
(29, 39) 9 9 9 7 9 . False 67
(30, 38) 9 9 9 7 9 . False 67
(35, 33) 9 9 9 7 9 . False 67
(38, 30) 9 9 9 7 9 . False 67
(39, 29) 9 9 9 7 9 . False 67
-------- ----------- ----- --
unseen: 0/1
-------- ----------- ----- --
(32, 36) 9 9 9 7 9 . False 67
-------- ----------- ----- --
--------------------------------------------------------------------------------
69: 9 examples
seen: 8/9
-------- ----------- ----- --
(30, 39) 9 9 9 9 7 . True 69
(31, 38) 9 9 9 9 7 . True 69
(33, 36) 9 9 9 9 7 . True 69
(34, 35) 9 9 9 9 7 . True 69
(35, 34) 9 9 9 9 7 . True 69
(36, 33) 9 9 9 9 7 . True 69
(37, 32) 9 9 9 9 7 . True 69
(38, 31) 9 9 9 7 9 . False 67
(39, 30) 9 9 9 9 7 . True 69
-------- ----------- ----- --
unseen: 0/0
--------------------------------------------------------------------------------
70: 8 examples
seen: 0/6
-------- ----------- ----- --
(31, 39) 9 9 9 9 7 . False 69
(32, 38) 9 9 9 9 7 . False 69
(33, 37) 9 9 9 9 7 . False 69
(35, 35) 9 9 9 9 7 . False 69
(36, 34) 9 9 9 9 7 . False 69
(39, 31) 9 9 9 9 7 . False 69
-------- ----------- ----- --
unseen: 0/2
-------- ----------- ----- --
(34, 36) 9 9 9 9 9 . False 73
(37, 33) 9 9 9 9 7 . False 69
-------- ----------- ----- --
--------------------------------------------------------------------------------
71: 5 examples
seen: 0/3
-------- ----------- ----- --
(33, 38) 9 9 9 9 7 . False 69
(36, 35) 9 9 9 9 7 . False 69
(37, 34) 9 9 9 9 7 . False 69
-------- ----------- ----- --
unseen: 0/2
-------- ----------- ----- --
(34, 37) 9 9 9 9 9 . False 73
(39, 32) 9 9 9 9 7 . False 69
-------- ----------- ----- --
--------------------------------------------------------------------------------
72: 3 examples
seen: 0/3
-------- ----------- ----- --
(34, 38) 9 9 9 9 9 . False 73
(35, 37) 9 9 9 9 9 . False 73
(39, 33) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 0/0
--------------------------------------------------------------------------------
73: 5 examples
seen: 4/4
-------- ----------- ---- --
(34, 39) 9 9 9 9 9 . True 73
(35, 38) 9 9 9 9 9 . True 73
(38, 35) 9 9 9 9 9 . True 73
(39, 34) 9 9 9 9 9 . True 73
-------- ----------- ---- --
unseen: 1/1
-------- ----------- ---- --
(37, 36) 9 9 9 9 9 . True 73
-------- ----------- ---- --
--------------------------------------------------------------------------------
74: 5 examples
seen: 0/2
-------- ----------- ----- --
(35, 39) 9 9 9 9 9 . False 73
(39, 35) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 0/3
-------- ----------- ----- --
(36, 38) 9 9 9 9 9 . False 73
(37, 37) 9 9 9 9 9 . False 73
(38, 36) 9 9 9 9 9 . False 73
-------- ----------- ----- --
--------------------------------------------------------------------------------
75: 4 examples
seen: 0/2
-------- ----------- ----- --
(36, 39) 9 9 9 9 9 . False 73
(38, 37) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 0/2
-------- ----------- ----- --
(37, 38) 9 9 9 9 9 . False 73
(39, 36) 9 9 9 9 9 . False 73
-------- ----------- ----- --
--------------------------------------------------------------------------------
76: 2 examples
seen: 0/1
-------- ----------- ----- --
(39, 37) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 0/1
-------- ----------- ----- --
(37, 39) 9 9 9 9 9 . False 73
-------- ----------- ----- --
--------------------------------------------------------------------------------
77: 1 examples
seen: 0/1
-------- ----------- ----- --
(39, 38) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 0/0
--------------------------------------------------------------------------------
###Markdown
Grouping and visualising the datasets by summand appearing in input.From left to right:- input, - message produced by the sender - whether the receiver's prediction is correct and- the actual prediction produced by the receiversplit by occurrence in training data (seen) and in evaluation data (unseen) and grouped by summands (i.e. all examples with 0, 1, 2 etc. as one of the inputs).
###Code
print("Small ds")
inspect_by_summand(data_small, normalised_vocab_small)
print("Large ds")
inspect_by_summand(data_large, normalised_vocab_large)
###Output
Large ds
0: 58 examples
seen: 34/43
------- ----------- ----- --
(0, 0) 0 . 0 0 . . False 2
(0, 1) 0 . 0 0 . . False 2
(0, 10) 1 0 0 0 0 . False 12
(0, 12) 1 0 0 0 0 . True 12
(0, 13) 1 1 0 0 0 . True 13
(0, 14) 1 1 1 1 1 . True 14
(0, 15) 1 1 1 1 1 . False 14
(0, 17) 2 1 0 0 0 . True 17
(0, 18) 2 1 1 0 0 . True 18
(0, 19) 2 1 1 1 0 . True 19
(0, 2) 0 . 0 0 . . True 2
(0, 22) 2 1 1 3 1 . True 22
(0, 24) 2 1 3 4 0 . True 24
(0, 26) 2 3 4 0 0 . True 26
(0, 28) 2 3 4 1 1 . True 28
(0, 29) 2 3 4 2 1 . True 29
(0, 30) 2 3 4 2 4 . True 30
(0, 32) 2 3 4 3 4 . True 32
(0, 34) 2 3 5 6 3 . True 34
(0, 35) 2 3 5 6 5 . True 35
(0, 37) 2 3 5 7 5 . True 37
(0, 38) 2 3 5 7 7 . True 38
(0, 5) 0 0 . 0 0 . False 7
(0, 7) 0 0 . 0 0 . True 7
(0, 9) 0 0 . 0 0 . False 7
(4, 0) 0 0 . 0 0 . False 7
(6, 0) 0 0 . 0 0 . False 7
(14, 0) 1 1 1 1 1 . True 14
(15, 0) 1 1 1 1 1 . False 14
(16, 0) 1 1 1 3 1 . True 16
(18, 0) 2 1 1 0 0 . True 18
(19, 0) 2 1 1 1 0 . True 19
(21, 0) 2 1 1 1 3 . True 21
(22, 0) 2 1 1 3 1 . True 22
(24, 0) 2 1 3 4 0 . True 24
(25, 0) 2 3 1 0 0 . True 25
(26, 0) 2 3 4 0 0 . True 26
(29, 0) 2 3 4 2 1 . True 29
(32, 0) 2 3 5 4 2 . True 32
(36, 0) 2 3 5 6 7 . True 36
(37, 0) 2 3 5 7 5 . True 37
(38, 0) 2 3 5 7 7 . True 38
(39, 0) 2 3 7 5 6 . True 39
------- ----------- ----- --
unseen: 3/15
------- ----------- ----- --
(0, 25) 2 1 3 4 0 . False 24
(0, 27) 2 3 4 0 0 . False 26
(0, 3) 0 . 0 0 0 . False 2
(0, 36) 2 3 5 6 7 . True 36
(0, 39) 2 3 7 5 6 . True 39
(0, 6) 0 0 . 0 0 . False 7
(0, 8) 0 0 . 0 0 . False 7
(1, 0) 0 . 0 0 . . False 2
(2, 0) 0 0 . 0 0 . False 7
(5, 0) 0 0 . 0 0 . False 7
(11, 0) 1 0 0 0 0 . False 12
(23, 0) 2 1 3 1 0 . True 23
(28, 0) 2 3 4 0 0 . False 26
(30, 0) 2 3 4 2 1 . False 29
(35, 0) 2 3 5 6 7 . False 36
------- ----------- ----- --
--------------------------------------------------------------------------------
1: 60 examples
seen: 28/40
------- ----------- ----- --
(0, 1) 0 . 0 0 . . False 2
(1, 1) 0 . 0 0 . . True 2
(1, 10) 1 0 0 0 0 . False 12
(1, 12) 1 1 0 0 0 . True 13
(1, 13) 1 1 1 0 0 . False 13
(1, 14) 1 1 1 1 1 . False 14
(1, 16) 2 1 0 0 0 . True 17
(1, 17) 2 1 1 0 0 . True 18
(1, 2) 0 . 0 0 . . False 2
(1, 20) 2 1 1 1 3 . True 21
(1, 21) 2 1 1 3 1 . True 22
(1, 24) 2 3 1 0 0 . True 25
(1, 25) 2 3 4 0 0 . True 26
(1, 26) 2 3 4 1 0 . True 27
(1, 30) 2 3 4 2 3 . True 31
(1, 31) 2 3 4 3 4 . True 32
(1, 33) 2 3 5 6 3 . True 34
(1, 34) 2 3 5 6 5 . True 35
(1, 35) 2 3 5 6 7 . True 36
(1, 38) 2 3 7 5 6 . True 39
(1, 39) 2 3 7 7 5 . True 40
(1, 6) 0 0 . 0 0 . True 7
(5, 1) 0 0 . 0 0 . False 7
(6, 1) 0 0 . 0 0 . True 7
(9, 1) 1 0 0 0 0 . False 12
(11, 1) 1 0 0 0 0 . True 12
(12, 1) 1 1 1 0 0 . True 13
(14, 1) 1 1 1 1 3 . False 16
(15, 1) 1 1 1 3 1 . True 16
(18, 1) 2 1 1 1 0 . True 19
(19, 1) 2 1 1 1 1 . True 20
(22, 1) 2 1 3 1 0 . True 23
(23, 1) 2 1 3 4 0 . True 24
(24, 1) 2 3 1 0 0 . True 25
(28, 1) 2 3 4 2 1 . True 29
(29, 1) 2 3 4 2 1 . False 29
(30, 1) 2 3 4 2 1 . False 29
(32, 1) 2 3 5 4 2 . False 32
(34, 1) 2 3 5 6 3 . False 34
(39, 1) 2 3 7 5 7 . True 40
------- ----------- ----- --
unseen: 2/20
------- ----------- ----- --
(1, 0) 0 . 0 0 . . False 2
(1, 15) 1 1 1 1 1 . False 14
(1, 18) 2 1 1 0 0 . False 18
(1, 19) 2 1 1 1 0 . False 19
(1, 28) 2 3 4 2 4 . False 30
(1, 3) 0 . 0 0 0 . False 2
(1, 36) 2 3 5 7 7 . False 38
(1, 4) 0 0 . 0 0 . False 7
(1, 5) 0 0 . 0 0 . False 7
(1, 7) 0 0 . 0 0 . False 7
(1, 9) 0 0 . 0 0 . False 7
(2, 1) 0 0 . 0 0 . False 7
(3, 1) 0 0 . 0 0 . False 7
(10, 1) 1 1 0 0 0 . False 13
(16, 1) 1 1 1 3 1 . False 16
(17, 1) 2 1 1 1 0 . False 19
(31, 1) 2 3 4 3 4 . True 32
(35, 1) 2 3 5 6 7 . True 36
(37, 1) 2 3 5 7 5 . False 37
(38, 1) 2 3 5 7 7 . False 38
------- ----------- ----- --
--------------------------------------------------------------------------------
2: 57 examples
seen: 27/39
------- ----------- ----- --
(0, 2) 0 . 0 0 . . True 2
(1, 2) 0 . 0 0 . . False 2
(2, 14) 1 1 1 3 1 . True 16
(2, 15) 2 1 0 0 0 . True 17
(2, 19) 2 1 1 3 1 . False 22
(2, 20) 2 1 1 3 1 . True 22
(2, 22) 2 1 3 4 0 . True 24
(2, 23) 2 3 1 0 0 . True 25
(2, 24) 2 3 4 0 0 . True 26
(2, 25) 2 3 4 1 0 . True 27
(2, 26) 2 3 4 1 0 . False 27
(2, 27) 2 3 4 2 1 . True 29
(2, 3) 0 0 . 0 0 . False 7
(2, 30) 2 3 4 3 3 . True 32
(2, 31) 2 3 5 3 4 . True 33
(2, 32) 2 3 5 6 3 . True 34
(2, 34) 2 3 5 6 7 . True 36
(2, 35) 2 3 5 7 5 . True 37
(2, 38) 2 3 7 5 7 . True 40
(2, 39) 2 3 7 7 5 . False 40
(2, 5) 0 0 . 0 0 . True 7
(2, 6) 0 0 . 0 0 . False 7
(3, 2) 0 0 . 0 0 . False 7
(5, 2) 0 0 . 0 0 . True 7
(6, 2) 0 0 . 0 0 . False 7
(7, 2) 0 0 . 0 0 . False 7
(9, 2) 1 0 0 0 0 . False 12
(12, 2) 1 1 1 0 0 . False 13
(17, 2) 2 1 1 1 0 . True 19
(18, 2) 2 1 1 1 0 . False 19
(19, 2) 2 1 1 1 3 . True 21
(21, 2) 2 1 3 1 0 . True 23
(22, 2) 2 1 3 4 0 . True 24
(25, 2) 2 3 4 1 0 . True 27
(26, 2) 2 3 4 1 1 . True 28
(27, 2) 2 3 4 2 1 . True 29
(28, 2) 2 3 4 2 4 . True 30
(37, 2) 2 3 7 5 6 . True 39
(38, 2) 2 3 7 9 7 . True 40
------- ----------- ----- --
unseen: 6/18
------- ----------- ----- --
(2, 0) 0 0 . 0 0 . False 7
(2, 1) 0 0 . 0 0 . False 7
(2, 10) 1 1 0 0 0 . False 13
(2, 12) 1 1 1 1 1 . True 14
(2, 17) 2 1 1 0 0 . False 18
(2, 28) 2 3 4 2 4 . True 30
(2, 33) 2 3 5 6 7 . False 36
(2, 7) 0 0 . 0 0 . False 7
(2, 8) 1 0 0 0 0 . False 12
(4, 2) 0 0 . 0 0 . False 7
(11, 2) 1 0 0 0 0 . False 12
(23, 2) 2 3 1 0 0 . True 25
(29, 2) 2 3 4 3 4 . False 32
(30, 2) 2 3 5 4 2 . True 32
(32, 2) 2 3 5 6 3 . True 34
(33, 2) 2 3 5 6 7 . False 36
(34, 2) 2 3 5 7 5 . False 37
(36, 2) 2 3 5 7 7 . True 38
------- ----------- ----- --
--------------------------------------------------------------------------------
3: 55 examples
seen: 30/42
------- ----------- ----- --
(2, 3) 0 0 . 0 0 . False 7
(3, 10) 1 1 0 0 0 . True 13
(3, 11) 1 1 1 0 0 . False 13
(3, 12) 1 1 1 1 1 . False 14
(3, 14) 2 1 0 0 0 . True 17
(3, 2) 0 0 . 0 0 . False 7
(3, 20) 2 1 3 4 0 . False 24
(3, 24) 2 3 4 1 0 . True 27
(3, 27) 2 3 4 2 4 . True 30
(3, 34) 2 3 5 7 5 . True 37
(3, 35) 2 3 5 7 7 . True 38
(3, 37) 2 3 7 7 5 . True 40
(3, 38) 2 3 7 7 7 . True 41
(3, 39) 2 7 5 5 6 . True 42
(3, 5) 0 0 . 0 0 . False 7
(3, 6) 0 0 . 0 0 . False 7
(3, 8) 1 0 0 0 0 . False 12
(3, 9) 1 0 0 0 0 . True 12
(5, 3) 0 0 . 0 0 . False 7
(6, 3) 0 0 . 0 0 . False 7
(8, 3) 1 0 0 0 0 . False 12
(9, 3) 1 0 0 0 0 . True 12
(10, 3) 1 1 1 0 0 . True 13
(11, 3) 1 1 1 0 0 . False 13
(14, 3) 2 1 0 0 0 . True 17
(15, 3) 2 1 1 0 0 . True 18
(17, 3) 2 1 1 1 1 . True 20
(19, 3) 2 1 1 3 1 . True 22
(20, 3) 2 1 3 1 0 . True 23
(21, 3) 2 1 3 4 0 . True 24
(24, 3) 2 3 4 1 0 . True 27
(26, 3) 2 3 4 2 1 . True 29
(27, 3) 2 3 4 2 4 . True 30
(30, 3) 2 3 5 3 4 . True 33
(31, 3) 2 3 5 6 3 . True 34
(32, 3) 2 3 5 6 5 . True 35
(33, 3) 2 3 5 6 7 . True 36
(34, 3) 2 3 5 7 5 . True 37
(35, 3) 2 3 5 7 7 . True 38
(37, 3) 2 3 7 7 5 . True 40
(38, 3) 2 3 7 7 9 . True 41
(39, 3) 2 7 5 5 6 . True 42
------- ----------- ----- --
unseen: 5/13
------- ----------- ----- --
(0, 3) 0 . 0 0 0 . False 2
(1, 3) 0 . 0 0 0 . False 2
(3, 1) 0 0 . 0 0 . False 7
(3, 16) 2 1 1 0 0 . False 18
(3, 21) 2 1 3 4 0 . True 24
(3, 22) 2 1 3 4 0 . False 24
(3, 31) 2 3 5 6 3 . True 34
(3, 36) 2 3 7 7 5 . False 40
(3, 4) 0 0 . 0 0 . True 7
(3, 7) 1 0 0 0 0 . False 12
(22, 3) 2 1 3 4 0 . False 24
(23, 3) 2 3 4 0 0 . True 26
(25, 3) 2 3 4 1 1 . True 28
------- ----------- ----- --
--------------------------------------------------------------------------------
4: 56 examples
seen: 33/41
------- ----------- ----- --
(4, 0) 0 0 . 0 0 . False 7
(4, 10) 1 1 1 1 1 . True 14
(4, 11) 1 1 1 1 1 . False 14
(4, 12) 1 1 1 1 3 . True 16
(4, 13) 2 1 0 0 0 . True 17
(4, 14) 2 1 1 0 0 . True 18
(4, 17) 2 1 1 1 3 . True 21
(4, 18) 2 1 1 3 1 . True 22
(4, 19) 2 1 3 1 0 . True 23
(4, 20) 2 1 3 4 0 . True 24
(4, 23) 2 3 4 1 0 . True 27
(4, 25) 2 3 4 2 1 . True 29
(4, 26) 2 3 4 2 1 . False 29
(4, 27) 2 3 4 2 3 . True 31
(4, 28) 2 3 5 4 2 . True 32
(4, 29) 2 3 5 3 5 . False 34
(4, 30) 2 3 5 3 5 . True 34
(4, 31) 2 3 5 6 5 . True 35
(4, 33) 2 3 5 7 5 . True 37
(4, 34) 2 3 5 7 7 . True 38
(4, 36) 2 3 7 7 5 . True 40
(4, 37) 2 3 7 7 5 . False 40
(4, 38) 2 7 5 5 6 . True 42
(4, 39) 2 7 5 6 5 . False 42
(4, 4) 0 0 . 0 0 . False 7
(4, 6) 1 0 0 0 0 . False 12
(8, 4) 1 0 0 0 0 . True 12
(9, 4) 1 1 0 0 0 . True 13
(13, 4) 2 1 0 0 0 . True 17
(14, 4) 2 1 1 0 0 . True 18
(16, 4) 2 1 1 1 1 . True 20
(18, 4) 2 1 1 3 1 . True 22
(19, 4) 2 1 3 1 0 . True 23
(22, 4) 2 3 4 0 0 . True 26
(23, 4) 2 3 4 1 0 . True 27
(25, 4) 2 3 4 2 1 . True 29
(30, 4) 2 3 5 6 3 . True 34
(32, 4) 2 3 5 6 7 . True 36
(34, 4) 2 3 5 7 7 . True 38
(35, 4) 2 3 7 5 6 . True 39
(36, 4) 2 3 7 7 5 . True 40
------- ----------- ----- --
unseen: 8/15
------- ----------- ----- --
(1, 4) 0 0 . 0 0 . False 7
(3, 4) 0 0 . 0 0 . True 7
(4, 16) 2 1 1 1 0 . False 19
(4, 2) 0 0 . 0 0 . False 7
(4, 24) 2 3 4 2 1 . False 29
(4, 32) 2 3 5 6 5 . False 35
(4, 8) 1 1 1 0 0 . False 13
(4, 9) 1 1 1 0 0 . True 13
(5, 4) 0 0 . 0 0 . False 7
(17, 4) 2 1 1 1 3 . True 21
(21, 4) 2 3 1 0 0 . True 25
(24, 4) 2 3 4 1 1 . True 28
(28, 4) 2 3 4 3 4 . True 32
(33, 4) 2 3 5 7 5 . True 37
(37, 4) 2 3 7 7 7 . True 41
------- ----------- ----- --
--------------------------------------------------------------------------------
5: 60 examples
seen: 36/44
------- ----------- ----- --
(0, 5) 0 0 . 0 0 . False 7
(2, 5) 0 0 . 0 0 . True 7
(3, 5) 0 0 . 0 0 . False 7
(5, 1) 0 0 . 0 0 . False 7
(5, 10) 1 1 1 1 3 . False 16
(5, 11) 1 1 1 3 1 . True 16
(5, 13) 2 1 1 0 0 . True 18
(5, 14) 2 1 1 1 0 . True 19
(5, 16) 2 1 1 1 3 . True 21
(5, 17) 2 1 1 3 1 . True 22
(5, 19) 2 1 3 4 0 . True 24
(5, 2) 0 0 . 0 0 . True 7
(5, 20) 2 3 1 0 0 . True 25
(5, 21) 2 3 4 0 0 . True 26
(5, 24) 2 3 4 2 1 . True 29
(5, 25) 2 3 4 2 4 . True 30
(5, 26) 2 3 4 2 3 . True 31
(5, 29) 2 3 5 6 3 . True 34
(5, 3) 0 0 . 0 0 . False 7
(5, 31) 2 3 5 6 7 . True 36
(5, 35) 2 3 7 7 5 . True 40
(5, 36) 2 3 7 7 7 . True 41
(5, 37) 2 7 5 5 6 . True 42
(5, 38) 2 7 5 7 7 . True 43
(5, 7) 1 0 0 0 0 . True 12
(5, 9) 1 1 1 1 1 . True 14
(7, 5) 1 0 0 0 0 . True 12
(9, 5) 1 1 1 0 0 . False 13
(12, 5) 2 1 0 0 0 . True 17
(13, 5) 2 1 1 0 0 . True 18
(15, 5) 2 1 1 1 1 . True 20
(16, 5) 2 1 1 1 3 . True 21
(18, 5) 2 1 1 3 1 . False 22
(19, 5) 2 1 3 4 0 . True 24
(20, 5) 2 3 1 0 0 . True 25
(23, 5) 2 3 4 1 1 . True 28
(27, 5) 2 3 4 3 4 . True 32
(28, 5) 2 3 5 4 2 . False 32
(29, 5) 2 3 5 6 3 . True 34
(31, 5) 2 3 5 6 7 . True 36
(32, 5) 2 3 5 7 5 . True 37
(33, 5) 2 3 5 7 7 . True 38
(34, 5) 2 3 7 5 6 . True 39
(35, 5) 2 3 7 7 5 . True 40
------- ----------- ----- --
unseen: 3/16
------- ----------- ----- --
(1, 5) 0 0 . 0 0 . False 7
(5, 0) 0 0 . 0 0 . False 7
(5, 15) 2 1 1 1 3 . False 21
(5, 28) 2 3 5 3 5 . False 34
(5, 32) 2 3 5 6 7 . False 36
(5, 4) 0 0 . 0 0 . False 7
(5, 5) 0 0 . 0 0 . False 7
(5, 6) 1 0 0 0 0 . False 12
(5, 8) 1 1 1 1 0 . False 14
(11, 5) 1 1 1 1 1 . False 14
(14, 5) 2 1 1 1 0 . True 19
(21, 5) 2 3 1 0 0 . False 25
(25, 5) 2 3 4 2 4 . True 30
(36, 5) 2 3 7 7 5 . False 40
(37, 5) 2 7 5 5 6 . True 42
(38, 5) 2 7 5 6 5 . False 42
------- ----------- ----- --
--------------------------------------------------------------------------------
6: 63 examples
seen: 38/45
------- ----------- ----- --
(1, 6) 0 0 . 0 0 . True 7
(2, 6) 0 0 . 0 0 . False 7
(3, 6) 0 0 . 0 0 . False 7
(4, 6) 1 0 0 0 0 . False 12
(6, 0) 0 0 . 0 0 . False 7
(6, 1) 0 0 . 0 0 . True 7
(6, 11) 2 1 0 0 0 . True 17
(6, 12) 2 1 1 0 0 . True 18
(6, 14) 2 1 1 1 1 . True 20
(6, 16) 2 1 1 3 4 . True 22
(6, 18) 2 1 3 4 0 . True 24
(6, 2) 0 0 . 0 0 . False 7
(6, 23) 2 3 4 2 1 . True 29
(6, 27) 2 3 5 3 4 . True 33
(6, 28) 2 3 5 6 3 . True 34
(6, 3) 0 0 . 0 0 . False 7
(6, 33) 2 3 7 5 6 . True 39
(6, 34) 2 3 7 7 5 . True 40
(6, 35) 2 3 7 7 7 . True 41
(6, 37) 2 7 5 7 5 . True 43
(6, 6) 1 1 0 0 0 . False 13
(6, 7) 1 1 1 0 0 . True 13
(7, 6) 1 1 1 0 0 . True 13
(8, 6) 1 1 1 1 1 . True 14
(10, 6) 1 1 1 3 1 . True 16
(12, 6) 2 1 1 0 0 . True 18
(14, 6) 2 1 1 1 1 . True 20
(15, 6) 2 1 1 1 3 . True 21
(18, 6) 2 1 3 4 0 . True 24
(20, 6) 2 3 4 0 0 . True 26
(21, 6) 2 3 4 1 0 . True 27
(22, 6) 2 3 4 1 1 . True 28
(23, 6) 2 3 4 2 1 . True 29
(24, 6) 2 3 4 2 4 . True 30
(25, 6) 2 3 4 2 3 . True 31
(26, 6) 2 3 4 3 3 . True 32
(27, 6) 2 3 5 3 4 . True 33
(29, 6) 2 3 5 6 5 . True 35
(30, 6) 2 3 5 6 7 . True 36
(31, 6) 2 3 5 7 5 . True 37
(32, 6) 2 3 5 7 7 . True 38
(33, 6) 2 3 7 5 6 . True 39
(37, 6) 2 7 7 5 6 . True 43
(38, 6) 2 7 7 7 5 . True 44
(39, 6) 8 5 7 5 6 . True 45
------- ----------- ----- --
unseen: 7/18
------- ----------- ----- --
(0, 6) 0 0 . 0 0 . False 7
(5, 6) 1 0 0 0 0 . False 12
(6, 10) 2 1 0 0 0 . False 17
(6, 13) 2 1 1 0 0 . False 18
(6, 15) 2 1 1 1 3 . True 21
(6, 19) 2 3 1 0 0 . True 25
(6, 24) 2 3 4 2 4 . True 30
(6, 25) 2 3 4 2 4 . False 30
(6, 30) 2 3 5 6 7 . True 36
(6, 31) 2 3 5 7 5 . True 37
(6, 32) 2 3 7 5 6 . False 39
(6, 39) 2 7 7 5 7 . False 43
(6, 8) 1 1 1 1 0 . True 14
(9, 6) 1 1 1 1 3 . False 16
(11, 6) 1 1 1 3 1 . False 16
(13, 6) 2 1 1 1 1 . False 20
(17, 6) 2 1 3 4 0 . False 24
(36, 6) 2 7 5 6 7 . True 42
------- ----------- ----- --
--------------------------------------------------------------------------------
7: 59 examples
seen: 42/44
------- ----------- ----- --
(0, 7) 0 0 . 0 0 . True 7
(5, 7) 1 0 0 0 0 . True 12
(6, 7) 1 1 1 0 0 . True 13
(7, 10) 2 1 0 0 0 . True 17
(7, 12) 2 1 1 1 0 . True 19
(7, 18) 2 3 1 0 0 . True 25
(7, 19) 2 3 4 0 0 . True 26
(7, 2) 0 0 . 0 0 . False 7
(7, 20) 2 3 4 1 0 . True 27
(7, 22) 2 3 4 2 1 . True 29
(7, 24) 2 3 4 2 3 . True 31
(7, 25) 2 3 4 3 4 . True 32
(7, 26) 2 3 5 3 4 . True 33
(7, 28) 2 3 5 6 5 . True 35
(7, 30) 2 3 5 7 5 . True 37
(7, 31) 2 3 5 7 7 . True 38
(7, 35) 2 7 5 6 7 . True 42
(7, 38) 8 5 7 5 6 . True 45
(7, 39) 8 7 5 6 7 . True 46
(7, 5) 1 0 0 0 0 . True 12
(7, 6) 1 1 1 0 0 . True 13
(7, 7) 1 1 1 1 1 . True 14
(7, 8) 1 1 1 1 1 . False 14
(7, 9) 1 1 1 3 1 . True 16
(9, 7) 1 1 1 1 3 . True 16
(10, 7) 2 1 0 0 0 . True 17
(11, 7) 2 1 1 0 0 . True 18
(12, 7) 2 1 1 1 0 . True 19
(15, 7) 2 1 1 3 1 . True 22
(17, 7) 2 1 3 4 0 . True 24
(19, 7) 2 3 4 0 0 . True 26
(20, 7) 2 3 4 1 0 . True 27
(21, 7) 2 3 4 1 1 . True 28
(22, 7) 2 3 4 2 1 . True 29
(23, 7) 2 3 4 2 4 . True 30
(24, 7) 2 3 4 2 3 . True 31
(27, 7) 2 3 5 6 3 . True 34
(28, 7) 2 3 5 6 5 . True 35
(31, 7) 2 3 5 7 7 . True 38
(32, 7) 2 3 7 5 6 . True 39
(33, 7) 2 3 7 7 5 . True 40
(37, 7) 2 7 7 7 5 . True 44
(38, 7) 2 7 7 7 7 . True 45
(39, 7) 8 7 5 6 7 . True 46
------- ----------- ----- --
unseen: 5/15
------- ----------- ----- --
(1, 7) 0 0 . 0 0 . False 7
(2, 7) 0 0 . 0 0 . False 7
(3, 7) 1 0 0 0 0 . False 12
(7, 13) 2 1 1 1 0 . False 19
(7, 14) 2 1 1 3 1 . False 22
(7, 15) 2 1 1 3 1 . True 22
(7, 16) 2 1 3 4 0 . False 24
(7, 17) 2 3 1 0 0 . False 25
(7, 27) 2 3 5 6 3 . True 34
(7, 29) 2 3 5 6 7 . True 36
(7, 32) 2 3 7 5 6 . True 39
(18, 7) 2 1 3 4 0 . False 24
(30, 7) 2 3 5 7 7 . False 38
(34, 7) 2 7 5 5 6 . False 42
(35, 7) 2 7 5 6 7 . True 42
------- ----------- ----- --
--------------------------------------------------------------------------------
8: 57 examples
seen: 33/37
------- ----------- ----- --
(3, 8) 1 0 0 0 0 . False 12
(7, 8) 1 1 1 1 1 . False 14
(8, 12) 2 1 1 1 1 . True 20
(8, 15) 2 1 3 1 0 . True 23
(8, 17) 2 3 1 0 0 . True 25
(8, 20) 2 3 4 1 1 . True 28
(8, 21) 2 3 4 2 1 . True 29
(8, 24) 2 3 4 3 5 . True 32
(8, 25) 2 3 5 3 4 . True 33
(8, 26) 2 3 5 6 3 . True 34
(8, 28) 2 3 5 6 7 . True 36
(8, 29) 2 3 5 7 5 . True 37
(8, 3) 1 0 0 0 0 . False 12
(8, 30) 2 3 5 7 7 . True 38
(8, 31) 2 3 7 5 6 . True 39
(8, 33) 2 3 7 7 7 . True 41
(8, 34) 2 7 5 5 6 . True 42
(8, 36) 2 7 7 7 5 . True 44
(8, 39) 7 7 5 6 7 . True 47
(8, 4) 1 0 0 0 0 . True 12
(8, 6) 1 1 1 1 1 . True 14
(8, 8) 1 1 1 1 3 . True 16
(8, 9) 2 1 0 0 0 . True 17
(12, 8) 2 1 1 1 1 . True 20
(13, 8) 2 1 1 1 3 . True 21
(14, 8) 2 1 1 3 1 . True 22
(15, 8) 2 1 3 1 0 . True 23
(16, 8) 2 1 3 4 0 . True 24
(21, 8) 2 3 4 2 1 . True 29
(26, 8) 2 3 5 6 3 . True 34
(28, 8) 2 3 5 6 7 . True 36
(29, 8) 2 3 5 7 5 . True 37
(30, 8) 2 3 5 7 7 . True 38
(34, 8) 2 7 5 7 5 . False 43
(35, 8) 2 7 7 5 6 . True 43
(36, 8) 2 7 7 7 5 . True 44
(38, 8) 8 7 5 6 7 . True 46
------- ----------- ----- --
unseen: 8/20
------- ----------- ----- --
(0, 8) 0 0 . 0 0 . False 7
(2, 8) 1 0 0 0 0 . False 12
(4, 8) 1 1 1 0 0 . False 13
(5, 8) 1 1 1 1 0 . False 14
(6, 8) 1 1 1 1 0 . True 14
(8, 10) 2 1 1 0 0 . True 18
(8, 11) 2 1 1 1 1 . False 20
(8, 16) 2 1 3 4 0 . True 24
(8, 32) 2 3 7 5 7 . True 40
(8, 35) 2 7 7 5 6 . True 43
(8, 38) 8 7 5 6 7 . True 46
(9, 8) 1 1 1 3 1 . False 16
(18, 8) 2 3 1 0 0 . False 25
(20, 8) 2 3 4 2 1 . False 29
(23, 8) 2 3 5 3 4 . False 33
(24, 8) 2 3 4 3 4 . True 32
(25, 8) 2 3 4 3 3 . False 32
(27, 8) 2 3 5 6 5 . True 35
(31, 8) 2 3 7 5 7 . False 40
(37, 8) 8 7 5 6 7 . False 46
------- ----------- ----- --
--------------------------------------------------------------------------------
9: 64 examples
seen: 43/49
------- ----------- ----- --
(0, 9) 0 0 . 0 0 . False 7
(3, 9) 1 0 0 0 0 . True 12
(5, 9) 1 1 1 1 1 . True 14
(7, 9) 1 1 1 3 1 . True 16
(8, 9) 2 1 0 0 0 . True 17
(9, 1) 1 0 0 0 0 . False 12
(9, 10) 2 1 1 1 0 . True 19
(9, 11) 2 1 1 1 1 . True 20
(9, 12) 2 1 1 1 3 . True 21
(9, 13) 2 1 1 3 1 . True 22
(9, 14) 2 1 3 1 0 . True 23
(9, 15) 2 1 3 4 0 . True 24
(9, 19) 2 3 4 1 1 . True 28
(9, 2) 1 0 0 0 0 . False 12
(9, 20) 2 3 4 2 1 . True 29
(9, 21) 2 3 4 2 4 . True 30
(9, 23) 2 3 4 3 4 . True 32
(9, 24) 2 3 5 3 4 . True 33
(9, 26) 2 3 5 6 5 . True 35
(9, 27) 2 3 5 6 7 . True 36
(9, 29) 2 3 5 7 7 . True 38
(9, 3) 1 0 0 0 0 . True 12
(9, 30) 2 3 7 5 6 . True 39
(9, 31) 2 3 7 5 7 . True 40
(9, 33) 2 7 5 7 5 . False 43
(9, 35) 2 7 7 7 5 . True 44
(9, 36) 2 7 7 7 7 . True 45
(9, 38) 7 7 5 6 7 . True 47
(9, 39) 7 7 7 5 6 . True 48
(9, 4) 1 1 0 0 0 . True 13
(9, 5) 1 1 1 0 0 . False 13
(9, 7) 1 1 1 1 3 . True 16
(11, 9) 2 1 1 1 1 . True 20
(12, 9) 2 1 1 1 3 . True 21
(13, 9) 2 1 1 3 1 . True 22
(14, 9) 2 1 3 1 0 . True 23
(16, 9) 2 3 1 0 0 . True 25
(17, 9) 2 3 4 0 0 . True 26
(20, 9) 2 3 4 2 1 . True 29
(23, 9) 2 3 5 4 2 . True 32
(25, 9) 2 3 5 3 5 . True 34
(27, 9) 2 3 5 6 7 . True 36
(28, 9) 2 3 5 7 5 . True 37
(31, 9) 2 3 7 7 5 . True 40
(33, 9) 2 3 7 7 7 . False 41
(34, 9) 2 7 7 5 6 . True 43
(35, 9) 2 7 7 7 5 . True 44
(37, 9) 8 7 5 6 7 . True 46
(39, 9) 8 7 7 5 6 . True 48
------- ----------- ----- --
unseen: 4/15
------- ----------- ----- --
(1, 9) 0 0 . 0 0 . False 7
(4, 9) 1 1 1 0 0 . True 13
(9, 17) 2 3 1 0 0 . False 25
(9, 18) 2 3 4 1 0 . True 27
(9, 22) 2 3 4 2 3 . True 31
(9, 37) 2 7 7 7 5 . False 44
(9, 6) 1 1 1 1 3 . False 16
(9, 8) 1 1 1 3 1 . False 16
(9, 9) 1 1 1 3 1 . False 16
(10, 9) 2 1 1 1 1 . False 20
(18, 9) 2 3 4 0 0 . False 26
(24, 9) 2 3 5 4 2 . False 32
(26, 9) 2 3 5 6 5 . True 35
(36, 9) 2 7 7 7 5 . False 44
(38, 9) 8 7 5 6 7 . False 46
------- ----------- ----- --
--------------------------------------------------------------------------------
10: 57 examples
seen: 33/40
-------- ----------- ----- --
(0, 10) 1 0 0 0 0 . False 12
(1, 10) 1 0 0 0 0 . False 12
(3, 10) 1 1 0 0 0 . True 13
(4, 10) 1 1 1 1 1 . True 14
(5, 10) 1 1 1 1 3 . False 16
(7, 10) 2 1 0 0 0 . True 17
(9, 10) 2 1 1 1 0 . True 19
(10, 10) 2 1 1 1 1 . True 20
(10, 11) 2 1 1 1 3 . True 21
(10, 12) 2 1 1 3 1 . True 22
(10, 13) 2 1 3 1 0 . True 23
(10, 14) 2 1 3 4 0 . True 24
(10, 15) 2 3 1 0 0 . True 25
(10, 17) 2 3 4 1 0 . True 27
(10, 18) 2 3 4 1 1 . True 28
(10, 24) 2 3 5 6 3 . True 34
(10, 26) 2 3 5 6 7 . True 36
(10, 27) 2 3 5 7 5 . True 37
(10, 3) 1 1 1 0 0 . True 13
(10, 32) 2 7 7 5 6 . False 43
(10, 33) 2 7 7 5 6 . True 43
(10, 39) 8 7 7 7 7 . False 50
(10, 6) 1 1 1 3 1 . True 16
(10, 7) 2 1 0 0 0 . True 17
(11, 10) 2 1 1 1 3 . True 21
(12, 10) 2 1 1 3 1 . True 22
(13, 10) 2 1 3 1 0 . True 23
(15, 10) 2 3 1 0 0 . True 25
(16, 10) 2 3 4 0 0 . True 26
(18, 10) 2 3 4 1 1 . True 28
(19, 10) 2 3 4 2 1 . True 29
(21, 10) 2 3 4 2 3 . True 31
(22, 10) 2 3 4 3 4 . True 32
(23, 10) 2 3 5 3 5 . False 34
(28, 10) 2 3 5 7 7 . True 38
(31, 10) 2 3 7 7 7 . True 41
(32, 10) 2 7 5 5 6 . True 42
(34, 10) 2 7 7 7 5 . True 44
(35, 10) 2 7 7 7 7 . True 45
(38, 10) 8 7 5 7 7 . False 49
-------- ----------- ----- --
unseen: 8/17
-------- ----------- ----- --
(2, 10) 1 1 0 0 0 . False 13
(6, 10) 2 1 0 0 0 . False 17
(8, 10) 2 1 1 0 0 . True 18
(10, 1) 1 1 0 0 0 . False 13
(10, 16) 2 3 4 0 0 . True 26
(10, 22) 2 3 4 3 4 . True 32
(10, 28) 2 3 5 7 7 . True 38
(10, 30) 2 3 7 7 5 . True 40
(10, 31) 2 3 7 7 5 . False 40
(10, 36) 2 7 7 7 7 . False 45
(10, 37) 8 7 5 6 7 . False 46
(10, 9) 2 1 1 1 1 . False 20
(24, 10) 2 3 5 3 5 . True 34
(26, 10) 2 3 5 6 7 . True 36
(29, 10) 2 3 5 7 7 . False 38
(30, 10) 2 3 7 7 5 . True 40
(39, 10) 8 7 7 5 6 . False 48
-------- ----------- ----- --
--------------------------------------------------------------------------------
11: 59 examples
seen: 36/42
-------- ----------- ----- --
(3, 11) 1 1 1 0 0 . False 13
(4, 11) 1 1 1 1 1 . False 14
(5, 11) 1 1 1 3 1 . True 16
(6, 11) 2 1 0 0 0 . True 17
(9, 11) 2 1 1 1 1 . True 20
(10, 11) 2 1 1 1 3 . True 21
(11, 1) 1 0 0 0 0 . True 12
(11, 10) 2 1 1 1 3 . True 21
(11, 11) 2 1 1 3 1 . True 22
(11, 13) 2 1 3 4 0 . True 24
(11, 17) 2 3 4 1 1 . True 28
(11, 19) 2 3 4 2 4 . True 30
(11, 21) 2 3 5 4 2 . True 32
(11, 23) 2 3 5 3 5 . True 34
(11, 25) 2 3 5 6 7 . True 36
(11, 29) 2 3 7 7 5 . True 40
(11, 3) 1 1 1 0 0 . False 13
(11, 31) 2 7 5 6 5 . True 42
(11, 32) 2 7 7 7 5 . False 44
(11, 34) 2 7 7 7 7 . True 45
(11, 35) 8 7 5 6 7 . True 46
(11, 37) 8 7 7 5 6 . True 48
(11, 38) 8 7 7 5 7 . True 49
(11, 39) 8 7 7 7 7 . True 50
(11, 7) 2 1 1 0 0 . True 18
(11, 9) 2 1 1 1 1 . True 20
(14, 11) 2 3 1 0 0 . True 25
(15, 11) 2 3 4 0 0 . True 26
(17, 11) 2 3 4 1 1 . True 28
(19, 11) 2 3 4 2 1 . False 29
(20, 11) 2 3 4 2 3 . True 31
(22, 11) 2 3 5 3 4 . True 33
(23, 11) 2 3 5 6 7 . False 36
(26, 11) 2 3 5 7 5 . True 37
(27, 11) 2 3 5 7 7 . True 38
(28, 11) 2 3 7 5 6 . True 39
(30, 11) 2 3 7 7 7 . True 41
(31, 11) 2 7 5 6 5 . True 42
(33, 11) 2 7 7 7 5 . True 44
(34, 11) 2 7 7 7 7 . True 45
(36, 11) 7 7 5 6 7 . True 47
(37, 11) 8 7 7 5 6 . True 48
-------- ----------- ----- --
unseen: 6/17
-------- ----------- ----- --
(8, 11) 2 1 1 1 1 . False 20
(11, 0) 1 0 0 0 0 . False 12
(11, 12) 2 1 3 1 0 . True 23
(11, 14) 2 3 1 0 0 . True 25
(11, 16) 2 3 4 0 0 . False 26
(11, 18) 2 3 4 2 1 . True 29
(11, 2) 1 0 0 0 0 . False 12
(11, 20) 2 3 4 3 4 . False 32
(11, 30) 2 7 5 6 5 . False 42
(11, 5) 1 1 1 1 1 . False 14
(11, 6) 1 1 1 3 1 . False 16
(21, 11) 2 3 4 2 3 . False 31
(24, 11) 2 3 5 6 3 . False 34
(25, 11) 2 3 5 6 7 . True 36
(29, 11) 2 3 7 7 5 . True 40
(35, 11) 2 7 7 7 7 . False 45
(39, 11) 8 7 7 7 7 . True 50
-------- ----------- ----- --
--------------------------------------------------------------------------------
12: 63 examples
seen: 47/50
-------- ----------- ----- --
(0, 12) 1 0 0 0 0 . True 12
(1, 12) 1 1 0 0 0 . True 13
(3, 12) 1 1 1 1 1 . False 14
(4, 12) 1 1 1 1 3 . True 16
(6, 12) 2 1 1 0 0 . True 18
(7, 12) 2 1 1 1 0 . True 19
(8, 12) 2 1 1 1 1 . True 20
(9, 12) 2 1 1 1 3 . True 21
(10, 12) 2 1 1 3 1 . True 22
(12, 1) 1 1 1 0 0 . True 13
(12, 10) 2 1 1 3 1 . True 22
(12, 13) 2 3 1 0 0 . True 25
(12, 14) 2 3 4 0 0 . True 26
(12, 16) 2 3 4 1 1 . True 28
(12, 19) 2 3 4 2 3 . True 31
(12, 2) 1 1 1 0 0 . False 13
(12, 22) 2 3 5 3 5 . True 34
(12, 23) 2 3 5 6 5 . True 35
(12, 27) 2 3 7 5 6 . True 39
(12, 29) 2 3 7 7 7 . True 41
(12, 30) 2 7 5 6 5 . True 42
(12, 31) 2 7 5 7 5 . True 43
(12, 34) 8 7 5 6 7 . True 46
(12, 36) 8 7 7 5 6 . True 48
(12, 37) 8 7 7 5 7 . True 49
(12, 38) 8 7 7 9 7 . True 50
(12, 39) 8 7 9 7 7 . True 51
(12, 5) 2 1 0 0 0 . True 17
(12, 6) 2 1 1 0 0 . True 18
(12, 7) 2 1 1 1 0 . True 19
(12, 8) 2 1 1 1 1 . True 20
(12, 9) 2 1 1 1 3 . True 21
(13, 12) 2 3 1 0 0 . True 25
(16, 12) 2 3 4 1 1 . True 28
(17, 12) 2 3 4 2 1 . True 29
(18, 12) 2 3 4 2 1 . False 29
(19, 12) 2 3 4 2 3 . True 31
(20, 12) 2 3 4 3 3 . True 32
(22, 12) 2 3 5 3 5 . True 34
(24, 12) 2 3 5 6 7 . True 36
(25, 12) 2 3 5 7 5 . True 37
(26, 12) 2 3 5 7 7 . True 38
(28, 12) 2 3 7 7 5 . True 40
(29, 12) 2 3 7 7 7 . True 41
(32, 12) 2 7 7 7 5 . True 44
(34, 12) 8 7 5 6 7 . True 46
(36, 12) 8 7 7 5 6 . True 48
(37, 12) 8 7 7 7 5 . True 49
(38, 12) 8 7 7 7 9 . True 50
(39, 12) 8 7 9 7 7 . True 51
-------- ----------- ----- --
unseen: 6/13
-------- ----------- ----- --
(2, 12) 1 1 1 1 1 . True 14
(11, 12) 2 1 3 1 0 . True 23
(12, 17) 2 3 4 1 1 . False 28
(12, 24) 2 3 5 6 5 . False 35
(12, 25) 2 3 5 7 5 . True 37
(12, 28) 2 3 7 5 7 . True 40
(12, 32) 8 7 5 6 7 . False 46
(12, 33) 2 7 7 7 5 . False 44
(14, 12) 2 3 4 1 0 . False 27
(15, 12) 2 3 4 1 1 . False 28
(27, 12) 2 3 7 5 6 . True 39
(31, 12) 2 7 7 5 7 . True 43
(35, 12) 8 7 7 5 6 . False 48
-------- ----------- ----- --
--------------------------------------------------------------------------------
13: 58 examples
seen: 43/46
-------- ----------- ----- --
(0, 13) 1 1 0 0 0 . True 13
(1, 13) 1 1 1 0 0 . False 13
(4, 13) 2 1 0 0 0 . True 17
(5, 13) 2 1 1 0 0 . True 18
(9, 13) 2 1 1 3 1 . True 22
(10, 13) 2 1 3 1 0 . True 23
(11, 13) 2 1 3 4 0 . True 24
(12, 13) 2 3 1 0 0 . True 25
(13, 10) 2 1 3 1 0 . True 23
(13, 12) 2 3 1 0 0 . True 25
(13, 15) 2 3 4 1 1 . True 28
(13, 16) 2 3 4 2 1 . True 29
(13, 18) 2 3 4 2 3 . True 31
(13, 20) 2 3 5 3 4 . True 33
(13, 21) 2 3 5 6 3 . True 34
(13, 23) 2 3 5 6 7 . True 36
(13, 24) 2 3 5 7 5 . True 37
(13, 25) 2 3 5 7 7 . True 38
(13, 27) 2 3 7 7 5 . True 40
(13, 29) 2 7 5 6 7 . True 42
(13, 30) 2 7 7 5 6 . True 43
(13, 33) 7 5 7 5 6 . True 46
(13, 35) 8 7 7 5 6 . True 48
(13, 38) 8 7 9 7 7 . True 51
(13, 4) 2 1 0 0 0 . True 17
(13, 5) 2 1 1 0 0 . True 18
(13, 8) 2 1 1 1 3 . True 21
(13, 9) 2 1 1 3 1 . True 22
(15, 13) 2 3 4 1 1 . True 28
(16, 13) 2 3 4 2 1 . True 29
(18, 13) 2 3 4 2 4 . False 30
(20, 13) 2 3 5 3 4 . True 33
(21, 13) 2 3 5 6 3 . True 34
(24, 13) 2 3 5 7 5 . True 37
(25, 13) 2 3 5 7 7 . True 38
(27, 13) 2 3 7 7 5 . True 40
(28, 13) 2 3 7 7 7 . True 41
(29, 13) 2 7 5 6 7 . True 42
(30, 13) 2 7 7 5 6 . True 43
(31, 13) 2 7 7 7 5 . True 44
(32, 13) 7 5 7 5 6 . False 46
(33, 13) 7 5 7 5 6 . True 46
(34, 13) 7 7 5 6 7 . True 47
(35, 13) 7 7 7 5 6 . True 48
(36, 13) 8 7 7 7 5 . True 49
(39, 13) 8 7 9 7 9 . True 52
-------- ----------- ----- --
unseen: 3/12
-------- ----------- ----- --
(6, 13) 2 1 1 0 0 . False 18
(7, 13) 2 1 1 1 0 . False 19
(13, 14) 2 3 4 1 0 . True 27
(13, 19) 2 3 4 3 4 . True 32
(13, 22) 2 3 5 6 7 . False 36
(13, 28) 2 7 5 5 6 . False 42
(13, 31) 2 7 7 5 7 . False 43
(13, 32) 8 7 5 6 7 . False 46
(13, 6) 2 1 1 1 1 . False 20
(17, 13) 2 3 4 2 1 . False 29
(19, 13) 2 3 5 4 2 . True 32
(23, 13) 2 3 5 6 5 . False 35
-------- ----------- ----- --
--------------------------------------------------------------------------------
14: 59 examples
seen: 46/49
-------- ----------- ----- --
(0, 14) 1 1 1 1 1 . True 14
(1, 14) 1 1 1 1 1 . False 14
(2, 14) 1 1 1 3 1 . True 16
(3, 14) 2 1 0 0 0 . True 17
(4, 14) 2 1 1 0 0 . True 18
(5, 14) 2 1 1 1 0 . True 19
(6, 14) 2 1 1 1 1 . True 20
(9, 14) 2 1 3 1 0 . True 23
(10, 14) 2 1 3 4 0 . True 24
(12, 14) 2 3 4 0 0 . True 26
(14, 0) 1 1 1 1 1 . True 14
(14, 1) 1 1 1 1 3 . False 16
(14, 11) 2 3 1 0 0 . True 25
(14, 14) 2 3 4 1 1 . True 28
(14, 17) 2 3 4 2 3 . True 31
(14, 18) 2 3 4 3 4 . True 32
(14, 20) 2 3 5 3 5 . True 34
(14, 21) 2 3 5 6 5 . True 35
(14, 24) 2 3 5 7 7 . True 38
(14, 27) 2 3 7 7 7 . True 41
(14, 29) 2 7 7 5 6 . True 43
(14, 3) 2 1 0 0 0 . True 17
(14, 30) 2 7 7 7 5 . True 44
(14, 32) 8 7 5 6 7 . True 46
(14, 33) 7 7 5 6 7 . True 47
(14, 34) 8 7 7 5 6 . True 48
(14, 35) 8 7 7 7 5 . True 49
(14, 38) 8 7 9 7 9 . True 52
(14, 39) 8 9 7 7 7 . True 53
(14, 4) 2 1 1 0 0 . True 18
(14, 6) 2 1 1 1 1 . True 20
(14, 8) 2 1 1 3 1 . True 22
(14, 9) 2 1 3 1 0 . True 23
(15, 14) 2 3 4 2 1 . True 29
(16, 14) 2 3 4 2 4 . True 30
(17, 14) 2 3 4 2 3 . True 31
(18, 14) 2 3 4 2 3 . False 31
(20, 14) 2 3 5 6 3 . True 34
(21, 14) 2 3 5 6 5 . True 35
(22, 14) 2 3 5 6 7 . True 36
(27, 14) 2 3 7 7 7 . True 41
(28, 14) 2 7 5 6 7 . True 42
(30, 14) 2 7 7 7 5 . True 44
(31, 14) 2 7 7 7 7 . True 45
(34, 14) 8 7 7 5 6 . True 48
(35, 14) 8 7 7 7 5 . True 49
(36, 14) 8 7 7 7 9 . True 50
(38, 14) 8 7 9 7 9 . True 52
(39, 14) 8 9 7 7 9 . True 53
-------- ----------- ----- --
unseen: 5/10
-------- ----------- ----- --
(7, 14) 2 1 1 3 1 . False 22
(11, 14) 2 3 1 0 0 . True 25
(13, 14) 2 3 4 1 0 . True 27
(14, 12) 2 3 4 1 0 . False 27
(14, 25) 2 3 5 7 7 . False 38
(14, 26) 2 3 7 5 6 . False 39
(14, 31) 2 7 7 7 5 . False 44
(14, 5) 2 1 1 1 0 . True 19
(25, 14) 2 3 7 5 6 . True 39
(37, 14) 8 7 9 7 7 . True 51
-------- ----------- ----- --
--------------------------------------------------------------------------------
15: 62 examples
seen: 40/49
-------- ----------- ----- --
(0, 15) 1 1 1 1 1 . False 14
(2, 15) 2 1 0 0 0 . True 17
(8, 15) 2 1 3 1 0 . True 23
(9, 15) 2 1 3 4 0 . True 24
(10, 15) 2 3 1 0 0 . True 25
(13, 15) 2 3 4 1 1 . True 28
(15, 0) 1 1 1 1 1 . False 14
(15, 1) 1 1 1 3 1 . True 16
(15, 10) 2 3 1 0 0 . True 25
(15, 11) 2 3 4 0 0 . True 26
(15, 13) 2 3 4 1 1 . True 28
(15, 14) 2 3 4 2 1 . True 29
(15, 15) 2 3 4 2 1 . False 29
(15, 16) 2 3 4 2 3 . True 31
(15, 17) 2 3 4 3 4 . True 32
(15, 18) 2 3 5 3 4 . True 33
(15, 19) 2 3 5 6 3 . True 34
(15, 20) 2 3 5 6 5 . True 35
(15, 21) 2 3 5 6 7 . True 36
(15, 25) 2 3 7 7 5 . True 40
(15, 26) 2 3 7 7 7 . True 41
(15, 29) 2 7 7 7 5 . True 44
(15, 3) 2 1 1 0 0 . True 18
(15, 30) 2 7 7 7 7 . True 45
(15, 31) 7 5 7 5 6 . True 46
(15, 35) 7 7 7 7 7 . True 50
(15, 36) 7 7 7 7 9 . True 51
(15, 39) 7 7 9 7 9 . True 54
(15, 5) 2 1 1 1 1 . True 20
(15, 6) 2 1 1 1 3 . True 21
(15, 7) 2 1 1 3 1 . True 22
(15, 8) 2 1 3 1 0 . True 23
(16, 15) 2 3 4 2 3 . True 31
(17, 15) 2 3 4 3 4 . True 32
(18, 15) 2 3 4 2 3 . False 31
(19, 15) 2 3 5 6 3 . True 34
(20, 15) 2 3 5 6 5 . True 35
(23, 15) 2 3 5 7 7 . True 38
(24, 15) 2 3 7 5 6 . True 39
(25, 15) 2 3 7 7 5 . True 40
(28, 15) 2 7 5 6 7 . False 42
(30, 15) 7 5 6 7 5 . True 45
(31, 15) 7 5 7 5 6 . True 46
(32, 15) 7 5 7 5 7 . False 46
(33, 15) 7 7 5 7 5 . False 47
(35, 15) 7 7 7 7 7 . True 50
(36, 15) 7 7 7 7 9 . True 51
(37, 15) 7 7 7 7 9 . False 51
(38, 15) 7 7 7 9 7 . False 52
-------- ----------- ----- --
unseen: 7/13
-------- ----------- ----- --
(1, 15) 1 1 1 1 1 . False 14
(5, 15) 2 1 1 1 3 . False 21
(6, 15) 2 1 1 1 3 . True 21
(7, 15) 2 1 1 3 1 . True 22
(15, 12) 2 3 4 1 1 . False 28
(15, 23) 2 3 5 7 7 . True 38
(15, 27) 2 7 5 7 7 . False 43
(15, 32) 7 7 5 7 7 . True 47
(15, 38) 7 7 9 7 7 . True 53
(21, 15) 2 3 5 6 7 . True 36
(22, 15) 2 3 5 7 5 . True 37
(29, 15) 2 7 7 5 6 . False 43
(34, 15) 7 7 7 5 6 . False 48
-------- ----------- ----- --
--------------------------------------------------------------------------------
16: 60 examples
seen: 45/46
-------- ----------- ----- --
(1, 16) 2 1 0 0 0 . True 17
(5, 16) 2 1 1 1 3 . True 21
(6, 16) 2 1 1 3 4 . True 22
(12, 16) 2 3 4 1 1 . True 28
(13, 16) 2 3 4 2 1 . True 29
(15, 16) 2 3 4 2 3 . True 31
(16, 0) 1 1 1 3 1 . True 16
(16, 10) 2 3 4 0 0 . True 26
(16, 12) 2 3 4 1 1 . True 28
(16, 13) 2 3 4 2 1 . True 29
(16, 14) 2 3 4 2 4 . True 30
(16, 15) 2 3 4 2 3 . True 31
(16, 18) 2 3 5 6 3 . True 34
(16, 22) 2 3 5 7 7 . True 38
(16, 23) 2 3 7 5 6 . True 39
(16, 25) 2 3 7 7 7 . True 41
(16, 26) 2 3 7 7 7 . False 41
(16, 27) 2 7 7 5 6 . True 43
(16, 28) 2 7 7 7 5 . True 44
(16, 31) 7 5 7 7 5 . True 47
(16, 32) 7 7 7 5 6 . True 48
(16, 34) 7 7 7 7 7 . True 50
(16, 36) 7 7 7 9 7 . True 52
(16, 37) 7 7 9 7 7 . True 53
(16, 38) 7 7 9 7 9 . True 54
(16, 4) 2 1 1 1 1 . True 20
(16, 5) 2 1 1 1 3 . True 21
(16, 8) 2 1 3 4 0 . True 24
(16, 9) 2 3 1 0 0 . True 25
(17, 16) 2 3 5 3 4 . True 33
(19, 16) 2 3 5 6 5 . True 35
(20, 16) 2 3 5 6 7 . True 36
(21, 16) 2 3 5 7 5 . True 37
(23, 16) 2 3 7 5 6 . True 39
(24, 16) 2 3 7 7 5 . True 40
(27, 16) 2 7 7 5 6 . True 43
(29, 16) 7 5 6 7 5 . True 45
(30, 16) 7 5 7 5 6 . True 46
(31, 16) 7 7 5 6 7 . True 47
(33, 16) 7 7 7 5 7 . True 49
(34, 16) 7 7 7 7 7 . True 50
(35, 16) 7 7 7 7 9 . True 51
(36, 16) 7 7 7 9 7 . True 52
(37, 16) 7 7 9 7 7 . True 53
(38, 16) 7 7 9 7 9 . True 54
(39, 16) 7 7 9 9 7 . True 55
-------- ----------- ----- --
unseen: 4/14
-------- ----------- ----- --
(3, 16) 2 1 1 0 0 . False 18
(4, 16) 2 1 1 1 0 . False 19
(7, 16) 2 1 3 4 0 . False 24
(8, 16) 2 1 3 4 0 . True 24
(10, 16) 2 3 4 0 0 . True 26
(11, 16) 2 3 4 0 0 . False 26
(16, 1) 1 1 1 3 1 . False 16
(16, 24) 2 3 7 7 5 . True 40
(16, 39) 7 7 9 7 9 . False 54
(18, 16) 2 3 5 3 4 . False 33
(22, 16) 2 3 5 7 5 . False 37
(25, 16) 2 3 7 7 7 . True 41
(26, 16) 2 7 5 7 5 . False 43
(28, 16) 7 5 6 5 7 . False 45
-------- ----------- ----- --
--------------------------------------------------------------------------------
17: 59 examples
seen: 43/44
-------- ----------- ----- --
(0, 17) 2 1 0 0 0 . True 17
(1, 17) 2 1 1 0 0 . True 18
(4, 17) 2 1 1 1 3 . True 21
(5, 17) 2 1 1 3 1 . True 22
(8, 17) 2 3 1 0 0 . True 25
(10, 17) 2 3 4 1 0 . True 27
(11, 17) 2 3 4 1 1 . True 28
(14, 17) 2 3 4 2 3 . True 31
(15, 17) 2 3 4 3 4 . True 32
(17, 11) 2 3 4 1 1 . True 28
(17, 12) 2 3 4 2 1 . True 29
(17, 14) 2 3 4 2 3 . True 31
(17, 15) 2 3 4 3 4 . True 32
(17, 16) 2 3 5 3 4 . True 33
(17, 17) 2 3 5 6 3 . True 34
(17, 2) 2 1 1 1 0 . True 19
(17, 20) 2 3 5 7 5 . True 37
(17, 22) 2 3 7 5 6 . True 39
(17, 24) 2 3 7 7 7 . True 41
(17, 27) 2 7 7 7 5 . True 44
(17, 28) 7 5 6 7 5 . True 45
(17, 29) 7 5 7 5 6 . True 46
(17, 3) 2 1 1 1 1 . True 20
(17, 30) 7 7 5 6 7 . True 47
(17, 31) 7 7 7 5 6 . True 48
(17, 32) 7 7 7 7 5 . True 49
(17, 35) 7 7 7 9 7 . True 52
(17, 37) 7 7 9 7 9 . True 54
(17, 38) 7 7 9 9 7 . True 55
(17, 7) 2 1 3 4 0 . True 24
(17, 9) 2 3 4 0 0 . True 26
(19, 17) 2 3 5 6 7 . True 36
(20, 17) 2 3 5 7 5 . True 37
(23, 17) 2 3 7 7 5 . True 40
(26, 17) 2 7 7 5 6 . True 43
(27, 17) 2 7 7 7 5 . True 44
(28, 17) 7 5 6 7 5 . True 45
(29, 17) 7 5 7 5 6 . True 46
(30, 17) 7 7 5 6 7 . True 47
(31, 17) 7 7 7 5 7 . False 49
(32, 17) 7 7 7 7 5 . True 49
(35, 17) 7 7 7 9 7 . True 52
(37, 17) 7 7 9 7 9 . True 54
(39, 17) 7 9 7 7 9 . True 56
-------- ----------- ----- --
unseen: 5/15
-------- ----------- ----- --
(2, 17) 2 1 1 0 0 . False 18
(7, 17) 2 3 1 0 0 . False 25
(9, 17) 2 3 1 0 0 . False 25
(12, 17) 2 3 4 1 1 . False 28
(17, 1) 2 1 1 1 0 . False 19
(17, 13) 2 3 4 2 1 . False 29
(17, 26) 2 7 7 5 6 . True 43
(17, 33) 7 7 7 7 5 . False 49
(17, 4) 2 1 1 1 3 . True 21
(17, 6) 2 1 3 4 0 . False 24
(18, 17) 2 3 5 6 3 . False 34
(21, 17) 2 3 5 7 7 . True 38
(24, 17) 2 3 7 7 7 . True 41
(25, 17) 2 7 5 6 7 . True 42
(38, 17) 7 7 9 7 9 . False 54
-------- ----------- ----- --
--------------------------------------------------------------------------------
18: 59 examples
seen: 26/42
-------- ----------- ----- --
(0, 18) 2 1 1 0 0 . True 18
(4, 18) 2 1 1 3 1 . True 22
(6, 18) 2 1 3 4 0 . True 24
(7, 18) 2 3 1 0 0 . True 25
(10, 18) 2 3 4 1 1 . True 28
(13, 18) 2 3 4 2 3 . True 31
(14, 18) 2 3 4 3 4 . True 32
(15, 18) 2 3 5 3 4 . True 33
(16, 18) 2 3 5 6 3 . True 34
(18, 0) 2 1 1 0 0 . True 18
(18, 1) 2 1 1 1 0 . True 19
(18, 10) 2 3 4 1 1 . True 28
(18, 12) 2 3 4 2 1 . False 29
(18, 13) 2 3 4 2 4 . False 30
(18, 14) 2 3 4 2 3 . False 31
(18, 15) 2 3 4 2 3 . False 31
(18, 18) 2 3 5 6 7 . True 36
(18, 2) 2 1 1 1 0 . False 19
(18, 20) 2 3 5 7 7 . True 38
(18, 22) 2 3 7 5 6 . False 39
(18, 23) 2 3 7 7 5 . False 40
(18, 25) 2 3 7 7 7 . False 41
(18, 27) 2 7 7 7 5 . False 44
(18, 28) 7 5 6 7 5 . False 45
(18, 31) 7 5 7 7 5 . False 47
(18, 34) 7 7 7 7 7 . False 50
(18, 35) 7 7 7 9 7 . False 52
(18, 37) 7 7 9 7 7 . False 53
(18, 38) 7 7 9 9 7 . False 55
(18, 4) 2 1 1 3 1 . True 22
(18, 5) 2 1 1 3 1 . False 22
(18, 6) 2 1 3 4 0 . True 24
(19, 18) 2 3 5 7 5 . True 37
(20, 18) 2 3 5 7 7 . True 38
(21, 18) 2 3 7 5 6 . True 39
(22, 18) 2 3 7 7 5 . True 40
(25, 18) 2 7 7 5 6 . True 43
(26, 18) 2 7 7 7 5 . True 44
(27, 18) 7 5 6 7 5 . True 45
(29, 18) 7 7 5 6 7 . True 47
(31, 18) 7 7 7 7 5 . True 49
(36, 18) 7 7 9 7 9 . True 54
-------- ----------- ----- --
unseen: 5/17
-------- ----------- ----- --
(1, 18) 2 1 1 0 0 . False 18
(9, 18) 2 3 4 1 0 . True 27
(11, 18) 2 3 4 2 1 . True 29
(18, 16) 2 3 5 3 4 . False 33
(18, 17) 2 3 5 6 3 . False 34
(18, 24) 2 3 7 7 7 . False 41
(18, 26) 2 7 5 7 5 . False 43
(18, 32) 7 7 7 5 7 . False 49
(18, 39) 7 9 7 7 7 . False 55
(18, 7) 2 1 3 4 0 . False 24
(18, 8) 2 3 1 0 0 . False 25
(18, 9) 2 3 4 0 0 . False 26
(24, 18) 2 7 5 6 5 . True 42
(32, 18) 7 7 7 5 6 . False 48
(34, 18) 7 7 7 7 9 . False 51
(35, 18) 7 7 9 7 7 . True 53
(37, 18) 7 9 7 7 7 . True 55
-------- ----------- ----- --
--------------------------------------------------------------------------------
19: 56 examples
seen: 39/44
-------- ----------- ----- --
(0, 19) 2 1 1 1 0 . True 19
(2, 19) 2 1 1 3 1 . False 22
(4, 19) 2 1 3 1 0 . True 23
(5, 19) 2 1 3 4 0 . True 24
(7, 19) 2 3 4 0 0 . True 26
(9, 19) 2 3 4 1 1 . True 28
(11, 19) 2 3 4 2 4 . True 30
(12, 19) 2 3 4 2 3 . True 31
(15, 19) 2 3 5 6 3 . True 34
(19, 0) 2 1 1 1 0 . True 19
(19, 1) 2 1 1 1 1 . True 20
(19, 10) 2 3 4 2 1 . True 29
(19, 11) 2 3 4 2 1 . False 29
(19, 12) 2 3 4 2 3 . True 31
(19, 15) 2 3 5 6 3 . True 34
(19, 16) 2 3 5 6 5 . True 35
(19, 17) 2 3 5 6 7 . True 36
(19, 18) 2 3 5 7 5 . True 37
(19, 2) 2 1 1 1 3 . True 21
(19, 20) 2 3 7 5 6 . True 39
(19, 21) 2 3 7 7 5 . True 40
(19, 23) 2 7 5 7 5 . False 43
(19, 26) 7 5 6 7 5 . True 45
(19, 28) 7 7 5 6 7 . True 47
(19, 3) 2 1 1 3 1 . True 22
(19, 33) 7 7 7 9 7 . True 52
(19, 37) 7 9 7 7 9 . True 56
(19, 38) 7 9 7 9 7 . True 57
(19, 39) 9 7 7 9 7 . False 59
(19, 4) 2 1 3 1 0 . True 23
(19, 5) 2 1 3 4 0 . True 24
(19, 7) 2 3 4 0 0 . True 26
(22, 19) 2 3 7 7 7 . True 41
(24, 19) 2 7 7 5 7 . True 43
(25, 19) 2 7 7 7 5 . True 44
(27, 19) 7 5 7 5 6 . True 46
(28, 19) 7 7 5 6 7 . True 47
(29, 19) 7 7 7 5 6 . True 48
(34, 19) 7 7 9 7 7 . True 53
(35, 19) 7 7 9 7 9 . True 54
(36, 19) 7 7 9 9 7 . True 55
(37, 19) 7 9 7 9 7 . False 57
(38, 19) 7 9 7 9 7 . True 57
(39, 19) 7 9 9 7 7 . True 58
-------- ----------- ----- --
unseen: 8/12
-------- ----------- ----- --
(1, 19) 2 1 1 1 0 . False 19
(6, 19) 2 3 1 0 0 . True 25
(13, 19) 2 3 4 3 4 . True 32
(19, 13) 2 3 5 4 2 . True 32
(19, 19) 2 3 5 7 7 . True 38
(19, 24) 2 7 7 5 6 . True 43
(19, 25) 2 7 7 5 6 . False 43
(19, 31) 7 7 7 7 7 . True 50
(19, 32) 7 7 7 7 9 . True 51
(20, 19) 2 3 7 7 5 . False 40
(30, 19) 7 7 7 5 6 . False 48
(31, 19) 7 7 7 7 7 . True 50
-------- ----------- ----- --
--------------------------------------------------------------------------------
20: 62 examples
seen: 42/49
-------- ----------- ----- --
(1, 20) 2 1 1 1 3 . True 21
(2, 20) 2 1 1 3 1 . True 22
(3, 20) 2 1 3 4 0 . False 24
(4, 20) 2 1 3 4 0 . True 24
(5, 20) 2 3 1 0 0 . True 25
(7, 20) 2 3 4 1 0 . True 27
(8, 20) 2 3 4 1 1 . True 28
(9, 20) 2 3 4 2 1 . True 29
(13, 20) 2 3 5 3 4 . True 33
(14, 20) 2 3 5 3 5 . True 34
(15, 20) 2 3 5 6 5 . True 35
(17, 20) 2 3 5 7 5 . True 37
(18, 20) 2 3 5 7 7 . True 38
(19, 20) 2 3 7 5 6 . True 39
(20, 11) 2 3 4 2 3 . True 31
(20, 12) 2 3 4 3 3 . True 32
(20, 13) 2 3 5 3 4 . True 33
(20, 14) 2 3 5 6 3 . True 34
(20, 15) 2 3 5 6 5 . True 35
(20, 16) 2 3 5 6 7 . True 36
(20, 17) 2 3 5 7 5 . True 37
(20, 18) 2 3 5 7 7 . True 38
(20, 20) 2 3 7 7 5 . True 40
(20, 22) 2 7 5 7 7 . False 43
(20, 23) 2 7 7 5 7 . True 43
(20, 24) 2 7 7 7 5 . True 44
(20, 26) 7 5 7 5 6 . True 46
(20, 28) 7 7 7 5 6 . True 48
(20, 29) 7 7 7 7 7 . False 50
(20, 3) 2 1 3 1 0 . True 23
(20, 34) 7 7 9 7 9 . True 54
(20, 35) 7 9 7 7 9 . False 56
(20, 36) 7 9 7 7 9 . True 56
(20, 37) 7 9 7 9 7 . True 57
(20, 38) 7 9 9 7 7 . True 58
(20, 39) 9 7 9 7 7 . False 60
(20, 5) 2 3 1 0 0 . True 25
(20, 6) 2 3 4 0 0 . True 26
(20, 7) 2 3 4 1 0 . True 27
(20, 9) 2 3 4 2 1 . True 29
(21, 20) 2 3 7 7 7 . True 41
(22, 20) 2 7 5 6 5 . True 42
(31, 20) 7 7 7 9 7 . False 52
(32, 20) 7 7 7 9 7 . True 52
(33, 20) 7 7 9 7 7 . True 53
(34, 20) 7 7 9 7 9 . True 54
(35, 20) 7 7 9 9 7 . True 55
(36, 20) 7 9 7 7 9 . True 56
(39, 20) 9 7 9 7 7 . False 60
-------- ----------- ----- --
unseen: 4/13
-------- ----------- ----- --
(11, 20) 2 3 4 3 4 . False 32
(20, 19) 2 3 7 7 5 . False 40
(20, 21) 2 3 7 7 5 . False 40
(20, 27) 7 7 7 5 6 . False 48
(20, 31) 7 7 7 7 9 . True 51
(20, 32) 7 7 9 7 7 . False 53
(20, 33) 7 7 9 7 9 . False 54
(20, 8) 2 3 4 2 1 . False 29
(24, 20) 2 7 7 7 5 . True 44
(25, 20) 2 7 7 7 5 . False 44
(26, 20) 7 7 5 6 7 . False 47
(29, 20) 7 7 7 5 7 . True 49
(38, 20) 9 7 7 7 9 . True 58
-------- ----------- ----- --
--------------------------------------------------------------------------------
21: 52 examples
seen: 38/41
-------- ----------- ----- --
(1, 21) 2 1 1 3 1 . True 22
(5, 21) 2 3 4 0 0 . True 26
(8, 21) 2 3 4 2 1 . True 29
(9, 21) 2 3 4 2 4 . True 30
(11, 21) 2 3 5 4 2 . True 32
(13, 21) 2 3 5 6 3 . True 34
(14, 21) 2 3 5 6 5 . True 35
(15, 21) 2 3 5 6 7 . True 36
(19, 21) 2 3 7 7 5 . True 40
(21, 0) 2 1 1 1 3 . True 21
(21, 10) 2 3 4 2 3 . True 31
(21, 13) 2 3 5 6 3 . True 34
(21, 14) 2 3 5 6 5 . True 35
(21, 16) 2 3 5 7 5 . True 37
(21, 18) 2 3 7 5 6 . True 39
(21, 2) 2 1 3 1 0 . True 23
(21, 20) 2 3 7 7 7 . True 41
(21, 22) 2 7 7 5 6 . True 43
(21, 23) 2 7 7 7 5 . True 44
(21, 25) 7 5 7 5 6 . True 46
(21, 27) 7 7 7 5 6 . True 48
(21, 29) 7 7 7 7 7 . True 50
(21, 3) 2 1 3 4 0 . True 24
(21, 30) 7 7 7 7 9 . True 51
(21, 32) 7 7 7 9 9 . True 53
(21, 34) 7 7 9 9 7 . True 55
(21, 35) 7 9 7 9 7 . False 57
(21, 37) 7 9 9 7 7 . True 58
(21, 38) 9 7 7 9 7 . True 59
(21, 6) 2 3 4 1 0 . True 27
(21, 7) 2 3 4 1 1 . True 28
(21, 8) 2 3 4 2 1 . True 29
(23, 21) 2 7 7 7 5 . True 44
(25, 21) 7 5 7 5 6 . True 46
(26, 21) 7 7 5 6 7 . True 47
(28, 21) 7 7 7 7 5 . True 49
(30, 21) 7 7 7 7 7 . False 50
(31, 21) 7 7 9 7 7 . False 53
(35, 21) 7 9 7 7 9 . True 56
(37, 21) 7 9 9 7 7 . True 58
(38, 21) 9 7 7 9 7 . True 59
-------- ----------- ----- --
unseen: 8/11
-------- ----------- ----- --
(3, 21) 2 1 3 4 0 . True 24
(20, 21) 2 3 7 7 5 . False 40
(21, 11) 2 3 4 2 3 . False 31
(21, 15) 2 3 5 6 7 . True 36
(21, 17) 2 3 5 7 7 . True 38
(21, 24) 2 7 7 7 7 . True 45
(21, 26) 7 7 5 6 7 . True 47
(21, 28) 7 7 7 5 7 . True 49
(21, 31) 7 7 7 9 7 . True 52
(21, 4) 2 3 1 0 0 . True 25
(21, 5) 2 3 1 0 0 . False 25
-------- ----------- ----- --
--------------------------------------------------------------------------------
22: 58 examples
seen: 40/44
-------- ----------- ----- --
(0, 22) 2 1 1 3 1 . True 22
(2, 22) 2 1 3 4 0 . True 24
(7, 22) 2 3 4 2 1 . True 29
(12, 22) 2 3 5 3 5 . True 34
(16, 22) 2 3 5 7 7 . True 38
(17, 22) 2 3 7 5 6 . True 39
(18, 22) 2 3 7 5 6 . False 39
(20, 22) 2 7 5 7 7 . False 43
(21, 22) 2 7 7 5 6 . True 43
(22, 0) 2 1 1 3 1 . True 22
(22, 1) 2 1 3 1 0 . True 23
(22, 10) 2 3 4 3 4 . True 32
(22, 11) 2 3 5 3 4 . True 33
(22, 12) 2 3 5 3 5 . True 34
(22, 14) 2 3 5 6 7 . True 36
(22, 18) 2 3 7 7 5 . True 40
(22, 19) 2 3 7 7 7 . True 41
(22, 2) 2 1 3 4 0 . True 24
(22, 20) 2 7 5 6 5 . True 42
(22, 22) 2 7 7 7 5 . True 44
(22, 23) 2 7 7 7 7 . True 45
(22, 25) 7 7 5 6 7 . True 47
(22, 26) 7 7 7 5 6 . True 48
(22, 28) 7 7 7 7 7 . True 50
(22, 31) 7 7 7 9 9 . True 53
(22, 32) 7 7 9 7 9 . True 54
(22, 34) 7 9 7 9 7 . False 57
(22, 35) 7 9 7 9 9 . True 57
(22, 36) 9 7 7 7 9 . True 58
(22, 37) 9 7 7 9 7 . True 59
(22, 38) 9 7 9 7 7 . True 60
(22, 39) 9 7 9 9 7 . True 61
(22, 4) 2 3 4 0 0 . True 26
(22, 6) 2 3 4 1 1 . True 28
(22, 7) 2 3 4 2 1 . True 29
(23, 22) 2 7 7 7 7 . True 45
(26, 22) 7 7 7 5 6 . True 48
(27, 22) 7 7 7 7 5 . True 49
(29, 22) 7 7 7 7 7 . False 50
(32, 22) 7 7 9 7 9 . True 54
(33, 22) 7 9 7 7 7 . True 55
(36, 22) 9 7 7 7 9 . True 58
(37, 22) 9 7 7 9 7 . True 59
(38, 22) 9 7 9 7 7 . True 60
-------- ----------- ----- --
unseen: 8/14
-------- ----------- ----- --
(3, 22) 2 1 3 4 0 . False 24
(9, 22) 2 3 4 2 3 . True 31
(10, 22) 2 3 4 3 4 . True 32
(13, 22) 2 3 5 6 7 . False 36
(22, 15) 2 3 5 7 5 . True 37
(22, 16) 2 3 5 7 5 . False 37
(22, 27) 7 7 7 7 5 . True 49
(22, 3) 2 1 3 4 0 . False 24
(24, 22) 7 5 7 7 5 . False 47
(25, 22) 7 7 5 6 7 . True 47
(28, 22) 7 7 7 7 7 . True 50
(30, 22) 7 7 7 9 7 . True 52
(31, 22) 7 7 9 7 9 . False 54
(35, 22) 7 9 7 9 7 . True 57
-------- ----------- ----- --
--------------------------------------------------------------------------------
23: 56 examples
seen: 33/42
-------- ----------- ----- --
(2, 23) 2 3 1 0 0 . True 25
(4, 23) 2 3 4 1 0 . True 27
(6, 23) 2 3 4 2 1 . True 29
(9, 23) 2 3 4 3 4 . True 32
(11, 23) 2 3 5 3 5 . True 34
(12, 23) 2 3 5 6 5 . True 35
(13, 23) 2 3 5 6 7 . True 36
(16, 23) 2 3 7 5 6 . True 39
(18, 23) 2 3 7 7 5 . False 40
(19, 23) 2 7 5 7 5 . False 43
(20, 23) 2 7 7 5 7 . True 43
(21, 23) 2 7 7 7 5 . True 44
(22, 23) 2 7 7 7 7 . True 45
(23, 1) 2 1 3 4 0 . True 24
(23, 10) 2 3 5 3 5 . False 34
(23, 11) 2 3 5 6 7 . False 36
(23, 15) 2 3 5 7 7 . True 38
(23, 16) 2 3 7 5 6 . True 39
(23, 17) 2 3 7 7 5 . True 40
(23, 21) 2 7 7 7 5 . True 44
(23, 22) 2 7 7 7 7 . True 45
(23, 25) 7 7 7 5 6 . True 48
(23, 27) 7 7 7 7 7 . True 50
(23, 30) 7 7 9 7 7 . True 53
(23, 31) 7 7 9 7 9 . True 54
(23, 33) 7 9 7 7 7 . False 55
(23, 35) 9 7 7 9 7 . False 59
(23, 36) 9 7 7 9 7 . True 59
(23, 37) 9 7 9 7 7 . True 60
(23, 4) 2 3 4 1 0 . True 27
(23, 5) 2 3 4 1 1 . True 28
(23, 6) 2 3 4 2 1 . True 29
(23, 7) 2 3 4 2 4 . True 30
(23, 9) 2 3 5 4 2 . True 32
(24, 23) 7 7 5 6 7 . True 47
(25, 23) 7 7 7 5 6 . True 48
(27, 23) 7 7 7 7 7 . True 50
(28, 23) 7 7 7 7 9 . True 51
(33, 23) 7 9 7 7 7 . False 55
(34, 23) 7 9 7 9 9 . True 57
(35, 23) 9 7 7 9 7 . False 59
(39, 23) 9 9 7 7 9 . False 63
-------- ----------- ----- --
unseen: 11/14
-------- ----------- ----- --
(15, 23) 2 3 5 7 7 . True 38
(23, 0) 2 1 3 1 0 . True 23
(23, 13) 2 3 5 6 5 . False 35
(23, 2) 2 3 1 0 0 . True 25
(23, 24) 7 7 5 7 7 . True 47
(23, 26) 7 7 7 7 5 . True 49
(23, 29) 7 7 7 9 7 . True 52
(23, 3) 2 3 4 0 0 . True 26
(23, 32) 7 7 9 9 7 . True 55
(23, 38) 9 7 9 9 7 . True 61
(23, 8) 2 3 5 3 4 . False 33
(29, 23) 7 7 7 7 9 . False 51
(30, 23) 7 7 9 7 7 . True 53
(32, 23) 7 7 9 9 7 . True 55
-------- ----------- ----- --
--------------------------------------------------------------------------------
24: 62 examples
seen: 38/41
-------- ----------- ----- --
(0, 24) 2 1 3 4 0 . True 24
(1, 24) 2 3 1 0 0 . True 25
(2, 24) 2 3 4 0 0 . True 26
(3, 24) 2 3 4 1 0 . True 27
(5, 24) 2 3 4 2 1 . True 29
(7, 24) 2 3 4 2 3 . True 31
(8, 24) 2 3 4 3 5 . True 32
(9, 24) 2 3 5 3 4 . True 33
(10, 24) 2 3 5 6 3 . True 34
(13, 24) 2 3 5 7 5 . True 37
(14, 24) 2 3 5 7 7 . True 38
(17, 24) 2 3 7 7 7 . True 41
(20, 24) 2 7 7 7 5 . True 44
(24, 0) 2 1 3 4 0 . True 24
(24, 1) 2 3 1 0 0 . True 25
(24, 12) 2 3 5 6 7 . True 36
(24, 13) 2 3 5 7 5 . True 37
(24, 15) 2 3 7 5 6 . True 39
(24, 16) 2 3 7 7 5 . True 40
(24, 19) 2 7 7 5 7 . True 43
(24, 23) 7 7 5 6 7 . True 47
(24, 24) 7 7 7 5 6 . True 48
(24, 26) 7 7 7 7 7 . True 50
(24, 27) 7 7 7 7 9 . True 51
(24, 28) 7 7 7 9 7 . True 52
(24, 29) 7 7 9 7 7 . True 53
(24, 3) 2 3 4 1 0 . True 27
(24, 31) 7 7 9 9 7 . True 55
(24, 34) 9 7 7 7 9 . True 58
(24, 35) 9 7 7 9 7 . True 59
(24, 36) 9 7 9 7 7 . True 60
(24, 37) 9 7 9 7 9 . False 60
(24, 39) 9 9 7 7 9 . True 63
(24, 6) 2 3 4 2 4 . True 30
(24, 7) 2 3 4 2 3 . True 31
(28, 24) 7 7 7 9 9 . False 53
(29, 24) 7 7 7 9 9 . True 53
(30, 24) 7 7 9 7 9 . True 54
(33, 24) 7 9 7 9 7 . True 57
(34, 24) 7 9 9 7 7 . True 58
(38, 24) 9 7 9 9 7 . False 61
-------- ----------- ----- --
unseen: 14/21
-------- ----------- ----- --
(4, 24) 2 3 4 2 1 . False 29
(6, 24) 2 3 4 2 4 . True 30
(12, 24) 2 3 5 6 5 . False 35
(16, 24) 2 3 7 7 5 . True 40
(18, 24) 2 3 7 7 7 . False 41
(19, 24) 2 7 7 5 6 . True 43
(21, 24) 2 7 7 7 7 . True 45
(23, 24) 7 7 5 7 7 . True 47
(24, 10) 2 3 5 3 5 . True 34
(24, 11) 2 3 5 6 3 . False 34
(24, 17) 2 3 7 7 7 . True 41
(24, 18) 2 7 5 6 5 . True 42
(24, 20) 2 7 7 7 5 . True 44
(24, 22) 7 5 7 7 5 . False 47
(24, 30) 7 7 9 7 9 . True 54
(24, 4) 2 3 4 1 1 . True 28
(24, 8) 2 3 4 3 4 . True 32
(24, 9) 2 3 5 4 2 . False 32
(25, 24) 7 7 7 7 5 . True 49
(31, 24) 7 7 9 9 7 . True 55
(37, 24) 9 7 9 7 9 . False 60
-------- ----------- ----- --
--------------------------------------------------------------------------------
25: 63 examples
seen: 41/46
-------- ----------- ----- --
(1, 25) 2 3 4 0 0 . True 26
(2, 25) 2 3 4 1 0 . True 27
(4, 25) 2 3 4 2 1 . True 29
(5, 25) 2 3 4 2 4 . True 30
(7, 25) 2 3 4 3 4 . True 32
(8, 25) 2 3 5 3 4 . True 33
(11, 25) 2 3 5 6 7 . True 36
(13, 25) 2 3 5 7 7 . True 38
(15, 25) 2 3 7 7 5 . True 40
(16, 25) 2 3 7 7 7 . True 41
(18, 25) 2 3 7 7 7 . False 41
(21, 25) 7 5 7 5 6 . True 46
(22, 25) 7 7 5 6 7 . True 47
(23, 25) 7 7 7 5 6 . True 48
(25, 0) 2 3 1 0 0 . True 25
(25, 12) 2 3 5 7 5 . True 37
(25, 13) 2 3 5 7 7 . True 38
(25, 15) 2 3 7 7 5 . True 40
(25, 18) 2 7 7 5 6 . True 43
(25, 19) 2 7 7 7 5 . True 44
(25, 2) 2 3 4 1 0 . True 27
(25, 21) 7 5 7 5 6 . True 46
(25, 23) 7 7 7 5 6 . True 48
(25, 25) 7 7 7 7 7 . True 50
(25, 27) 7 7 7 7 9 . False 51
(25, 28) 7 7 7 9 9 . True 53
(25, 29) 7 7 9 7 9 . True 54
(25, 30) 7 7 9 9 7 . True 55
(25, 32) 7 9 7 9 7 . True 57
(25, 33) 7 9 9 7 7 . True 58
(25, 34) 9 7 7 9 7 . True 59
(25, 35) 9 7 9 7 7 . True 60
(25, 37) 9 7 9 9 7 . False 61
(25, 38) 9 9 7 7 9 . True 63
(25, 4) 2 3 4 2 1 . True 29
(25, 6) 2 3 4 2 3 . True 31
(25, 9) 2 3 5 3 5 . True 34
(26, 25) 7 7 7 7 9 . True 51
(28, 25) 7 7 9 7 7 . True 53
(30, 25) 7 7 9 9 7 . True 55
(31, 25) 7 9 7 9 7 . False 57
(32, 25) 7 9 7 9 7 . True 57
(33, 25) 7 9 9 7 7 . True 58
(35, 25) 9 7 9 7 9 . True 60
(37, 25) 9 9 7 7 9 . False 63
(39, 25) 9 9 7 9 7 . True 64
-------- ----------- ----- --
unseen: 10/17
-------- ----------- ----- --
(0, 25) 2 1 3 4 0 . False 24
(6, 25) 2 3 4 2 4 . False 30
(12, 25) 2 3 5 7 5 . True 37
(14, 25) 2 3 5 7 7 . False 38
(19, 25) 2 7 7 5 6 . False 43
(25, 11) 2 3 5 6 7 . True 36
(25, 14) 2 3 7 5 6 . True 39
(25, 16) 2 3 7 7 7 . True 41
(25, 17) 2 7 5 6 7 . True 42
(25, 20) 2 7 7 7 5 . False 44
(25, 22) 7 7 5 6 7 . True 47
(25, 24) 7 7 7 7 5 . True 49
(25, 26) 7 7 7 7 9 . True 51
(25, 3) 2 3 4 1 1 . True 28
(25, 5) 2 3 4 2 4 . True 30
(25, 8) 2 3 4 3 3 . False 32
(34, 25) 9 7 7 9 9 . False 58
-------- ----------- ----- --
--------------------------------------------------------------------------------
26: 56 examples
seen: 36/41
-------- ----------- ----- --
(0, 26) 2 3 4 0 0 . True 26
(1, 26) 2 3 4 1 0 . True 27
(2, 26) 2 3 4 1 0 . False 27
(4, 26) 2 3 4 2 1 . False 29
(5, 26) 2 3 4 2 3 . True 31
(7, 26) 2 3 5 3 4 . True 33
(8, 26) 2 3 5 6 3 . True 34
(9, 26) 2 3 5 6 5 . True 35
(10, 26) 2 3 5 6 7 . True 36
(15, 26) 2 3 7 7 7 . True 41
(16, 26) 2 3 7 7 7 . False 41
(19, 26) 7 5 6 7 5 . True 45
(20, 26) 7 5 7 5 6 . True 46
(22, 26) 7 7 7 5 6 . True 48
(24, 26) 7 7 7 7 7 . True 50
(26, 0) 2 3 4 0 0 . True 26
(26, 11) 2 3 5 7 5 . True 37
(26, 12) 2 3 5 7 7 . True 38
(26, 17) 2 7 7 5 6 . True 43
(26, 18) 2 7 7 7 5 . True 44
(26, 2) 2 3 4 1 1 . True 28
(26, 21) 7 7 5 6 7 . True 47
(26, 22) 7 7 7 5 6 . True 48
(26, 25) 7 7 7 7 9 . True 51
(26, 26) 7 7 7 9 7 . True 52
(26, 29) 7 9 7 7 7 . True 55
(26, 3) 2 3 4 2 1 . True 29
(26, 33) 9 7 7 9 7 . True 59
(26, 34) 9 7 9 7 7 . True 60
(26, 37) 9 9 7 7 9 . True 63
(26, 38) 9 9 7 9 7 . True 64
(26, 6) 2 3 4 3 3 . True 32
(26, 8) 2 3 5 6 3 . True 34
(28, 26) 7 7 9 7 9 . True 54
(29, 26) 7 7 9 9 7 . True 55
(31, 26) 7 9 7 9 9 . True 57
(33, 26) 9 7 7 9 7 . True 59
(35, 26) 9 7 9 9 7 . True 61
(36, 26) 9 9 7 7 9 . False 63
(37, 26) 9 9 7 9 7 . False 64
(38, 26) 9 9 7 9 7 . True 64
-------- ----------- ----- --
unseen: 8/15
-------- ----------- ----- --
(14, 26) 2 3 7 5 6 . False 39
(17, 26) 2 7 7 5 6 . True 43
(18, 26) 2 7 5 7 5 . False 43
(21, 26) 7 7 5 6 7 . True 47
(23, 26) 7 7 7 7 5 . True 49
(25, 26) 7 7 7 7 9 . True 51
(26, 10) 2 3 5 6 7 . True 36
(26, 16) 2 7 5 7 5 . False 43
(26, 20) 7 7 5 6 7 . False 47
(26, 30) 7 9 7 9 7 . False 57
(26, 31) 7 9 7 9 7 . True 57
(26, 32) 9 7 7 9 7 . False 59
(26, 39) 9 9 7 9 9 . True 65
(26, 9) 2 3 5 6 5 . True 35
(39, 26) 9 9 7 9 7 . False 64
-------- ----------- ----- --
--------------------------------------------------------------------------------
27: 58 examples
seen: 42/49
-------- ----------- ----- --
(2, 27) 2 3 4 2 1 . True 29
(3, 27) 2 3 4 2 4 . True 30
(4, 27) 2 3 4 2 3 . True 31
(6, 27) 2 3 5 3 4 . True 33
(9, 27) 2 3 5 6 7 . True 36
(10, 27) 2 3 5 7 5 . True 37
(12, 27) 2 3 7 5 6 . True 39
(13, 27) 2 3 7 7 5 . True 40
(14, 27) 2 3 7 7 7 . True 41
(16, 27) 2 7 7 5 6 . True 43
(17, 27) 2 7 7 7 5 . True 44
(18, 27) 2 7 7 7 5 . False 44
(21, 27) 7 7 7 5 6 . True 48
(23, 27) 7 7 7 7 7 . True 50
(24, 27) 7 7 7 7 9 . True 51
(25, 27) 7 7 7 7 9 . False 51
(27, 11) 2 3 5 7 7 . True 38
(27, 13) 2 3 7 7 5 . True 40
(27, 14) 2 3 7 7 7 . True 41
(27, 16) 2 7 7 5 6 . True 43
(27, 17) 2 7 7 7 5 . True 44
(27, 18) 7 5 6 7 5 . True 45
(27, 19) 7 5 7 5 6 . True 46
(27, 2) 2 3 4 2 1 . True 29
(27, 22) 7 7 7 7 5 . True 49
(27, 23) 7 7 7 7 7 . True 50
(27, 27) 7 7 9 7 9 . True 54
(27, 28) 7 7 9 9 7 . True 55
(27, 29) 7 9 7 7 9 . True 56
(27, 3) 2 3 4 2 4 . True 30
(27, 30) 7 9 7 9 9 . True 57
(27, 31) 7 9 9 7 7 . True 58
(27, 32) 9 7 9 7 7 . False 60
(27, 33) 9 7 9 9 7 . False 61
(27, 34) 9 7 9 9 7 . True 61
(27, 35) 9 9 7 7 9 . False 63
(27, 37) 9 9 7 9 7 . True 64
(27, 39) 9 9 9 7 7 . True 66
(27, 5) 2 3 4 3 4 . True 32
(27, 6) 2 3 5 3 4 . True 33
(27, 7) 2 3 5 6 3 . True 34
(27, 9) 2 3 5 6 7 . True 36
(29, 27) 7 9 7 9 7 . False 57
(30, 27) 7 9 7 9 7 . True 57
(31, 27) 7 9 9 7 7 . True 58
(34, 27) 9 7 9 9 7 . True 61
(35, 27) 9 9 7 7 9 . False 63
(36, 27) 9 9 7 7 9 . True 63
(38, 27) 9 9 7 9 9 . True 65
-------- ----------- ----- --
unseen: 6/9
-------- ----------- ----- --
(0, 27) 2 3 4 0 0 . False 26
(7, 27) 2 3 5 6 3 . True 34
(15, 27) 2 7 5 7 7 . False 43
(20, 27) 7 7 7 5 6 . False 48
(22, 27) 7 7 7 7 5 . True 49
(27, 12) 2 3 7 5 6 . True 39
(27, 8) 2 3 5 6 5 . True 35
(28, 27) 7 9 7 7 7 . True 55
(37, 27) 9 9 7 9 7 . True 64
-------- ----------- ----- --
--------------------------------------------------------------------------------
28: 65 examples
seen: 41/49
-------- ----------- ----- --
(0, 28) 2 3 4 1 1 . True 28
(4, 28) 2 3 5 4 2 . True 32
(6, 28) 2 3 5 6 3 . True 34
(7, 28) 2 3 5 6 5 . True 35
(8, 28) 2 3 5 6 7 . True 36
(16, 28) 2 7 7 7 5 . True 44
(17, 28) 7 5 6 7 5 . True 45
(18, 28) 7 5 6 7 5 . False 45
(19, 28) 7 7 5 6 7 . True 47
(20, 28) 7 7 7 5 6 . True 48
(22, 28) 7 7 7 7 7 . True 50
(24, 28) 7 7 7 9 7 . True 52
(25, 28) 7 7 7 9 9 . True 53
(27, 28) 7 7 9 9 7 . True 55
(28, 1) 2 3 4 2 1 . True 29
(28, 10) 2 3 5 7 7 . True 38
(28, 11) 2 3 7 5 6 . True 39
(28, 12) 2 3 7 7 5 . True 40
(28, 13) 2 3 7 7 7 . True 41
(28, 14) 2 7 5 6 7 . True 42
(28, 15) 2 7 5 6 7 . False 42
(28, 17) 7 5 6 7 5 . True 45
(28, 19) 7 7 5 6 7 . True 47
(28, 2) 2 3 4 2 4 . True 30
(28, 21) 7 7 7 7 5 . True 49
(28, 23) 7 7 7 7 9 . True 51
(28, 24) 7 7 7 9 9 . False 53
(28, 25) 7 7 9 7 7 . True 53
(28, 26) 7 7 9 7 9 . True 54
(28, 29) 7 9 7 9 9 . True 57
(28, 30) 7 9 9 7 7 . True 58
(28, 32) 9 7 9 7 7 . True 60
(28, 33) 9 7 9 9 7 . True 61
(28, 34) 9 9 7 7 9 . False 63
(28, 36) 9 9 7 9 7 . True 64
(28, 37) 9 9 7 9 9 . True 65
(28, 38) 9 9 9 7 7 . True 66
(28, 5) 2 3 5 4 2 . False 32
(28, 7) 2 3 5 6 5 . True 35
(28, 8) 2 3 5 6 7 . True 36
(28, 9) 2 3 5 7 5 . True 37
(29, 28) 7 9 7 9 7 . True 57
(32, 28) 9 7 7 9 7 . False 59
(33, 28) 9 7 9 7 9 . False 60
(34, 28) 9 7 9 9 9 . True 62
(35, 28) 9 9 7 7 9 . True 63
(36, 28) 9 9 7 9 7 . True 64
(38, 28) 9 9 7 9 9 . False 65
(39, 28) 9 9 9 7 9 . True 67
-------- ----------- ----- --
unseen: 10/16
-------- ----------- ----- --
(1, 28) 2 3 4 2 4 . False 30
(2, 28) 2 3 4 2 4 . True 30
(5, 28) 2 3 5 3 5 . False 34
(10, 28) 2 3 5 7 7 . True 38
(12, 28) 2 3 7 5 7 . True 40
(13, 28) 2 7 5 5 6 . False 42
(21, 28) 7 7 7 5 7 . True 49
(28, 0) 2 3 4 0 0 . False 26
(28, 16) 7 5 6 5 7 . False 45
(28, 22) 7 7 7 7 7 . True 50
(28, 27) 7 9 7 7 7 . True 55
(28, 31) 9 7 7 9 7 . True 59
(28, 35) 9 9 7 9 7 . False 64
(28, 39) 9 9 9 7 9 . True 67
(28, 4) 2 3 4 3 4 . True 32
(31, 28) 9 7 7 9 7 . True 59
-------- ----------- ----- --
--------------------------------------------------------------------------------
29: 59 examples
seen: 38/50
-------- ----------- ----- --
(0, 29) 2 3 4 2 1 . True 29
(4, 29) 2 3 5 3 5 . False 34
(5, 29) 2 3 5 6 3 . True 34
(8, 29) 2 3 5 7 5 . True 37
(9, 29) 2 3 5 7 7 . True 38
(11, 29) 2 3 7 7 5 . True 40
(12, 29) 2 3 7 7 7 . True 41
(13, 29) 2 7 5 6 7 . True 42
(14, 29) 2 7 7 5 6 . True 43
(15, 29) 2 7 7 7 5 . True 44
(17, 29) 7 5 7 5 6 . True 46
(20, 29) 7 7 7 7 7 . False 50
(21, 29) 7 7 7 7 7 . True 50
(24, 29) 7 7 9 7 7 . True 53
(25, 29) 7 7 9 7 9 . True 54
(26, 29) 7 9 7 7 7 . True 55
(27, 29) 7 9 7 7 9 . True 56
(28, 29) 7 9 7 9 9 . True 57
(29, 0) 2 3 4 2 1 . True 29
(29, 1) 2 3 4 2 1 . False 29
(29, 12) 2 3 7 7 7 . True 41
(29, 13) 2 7 5 6 7 . True 42
(29, 16) 7 5 6 7 5 . True 45
(29, 17) 7 5 7 5 6 . True 46
(29, 18) 7 7 5 6 7 . True 47
(29, 19) 7 7 7 5 6 . True 48
(29, 22) 7 7 7 7 7 . False 50
(29, 24) 7 7 7 9 9 . True 53
(29, 26) 7 7 9 9 7 . True 55
(29, 27) 7 9 7 9 7 . False 57
(29, 28) 7 9 7 9 7 . True 57
(29, 29) 7 9 9 7 7 . True 58
(29, 30) 9 7 7 7 9 . False 58
(29, 32) 9 7 9 7 9 . False 60
(29, 33) 9 7 9 9 9 . True 62
(29, 34) 9 9 7 7 9 . True 63
(29, 37) 9 9 7 9 9 . False 65
(29, 38) 9 9 9 7 7 . False 66
(29, 39) 9 9 9 7 9 . False 67
(29, 5) 2 3 5 6 3 . True 34
(29, 6) 2 3 5 6 5 . True 35
(29, 8) 2 3 5 7 5 . True 37
(30, 29) 9 7 7 9 7 . True 59
(31, 29) 9 7 9 9 7 . False 61
(32, 29) 9 7 9 9 7 . True 61
(34, 29) 9 9 7 7 9 . True 63
(36, 29) 9 9 7 9 9 . True 65
(37, 29) 9 9 9 7 7 . True 66
(38, 29) 9 9 9 7 9 . True 67
(39, 29) 9 9 9 7 9 . False 67
-------- ----------- ----- --
unseen: 5/9
-------- ----------- ----- --
(7, 29) 2 3 5 6 7 . True 36
(23, 29) 7 7 7 9 7 . True 52
(29, 10) 2 3 5 7 7 . False 38
(29, 11) 2 3 7 7 5 . True 40
(29, 15) 2 7 7 5 6 . False 43
(29, 2) 2 3 4 3 4 . False 32
(29, 20) 7 7 7 5 7 . True 49
(29, 23) 7 7 7 7 9 . False 51
(35, 29) 9 9 7 9 7 . True 64
-------- ----------- ----- --
--------------------------------------------------------------------------------
30: 62 examples
seen: 37/44
-------- ----------- ----- --
(0, 30) 2 3 4 2 4 . True 30
(1, 30) 2 3 4 2 3 . True 31
(2, 30) 2 3 4 3 3 . True 32
(4, 30) 2 3 5 3 5 . True 34
(7, 30) 2 3 5 7 5 . True 37
(8, 30) 2 3 5 7 7 . True 38
(9, 30) 2 3 7 5 6 . True 39
(12, 30) 2 7 5 6 5 . True 42
(13, 30) 2 7 7 5 6 . True 43
(14, 30) 2 7 7 7 5 . True 44
(15, 30) 2 7 7 7 7 . True 45
(17, 30) 7 7 5 6 7 . True 47
(21, 30) 7 7 7 7 9 . True 51
(23, 30) 7 7 9 7 7 . True 53
(25, 30) 7 7 9 9 7 . True 55
(27, 30) 7 9 7 9 9 . True 57
(28, 30) 7 9 9 7 7 . True 58
(29, 30) 9 7 7 7 9 . False 58
(30, 1) 2 3 4 2 1 . False 29
(30, 11) 2 3 7 7 7 . True 41
(30, 13) 2 7 7 5 6 . True 43
(30, 14) 2 7 7 7 5 . True 44
(30, 15) 7 5 6 7 5 . True 45
(30, 16) 7 5 7 5 6 . True 46
(30, 17) 7 7 5 6 7 . True 47
(30, 21) 7 7 7 7 7 . False 50
(30, 24) 7 7 9 7 9 . True 54
(30, 25) 7 7 9 9 7 . True 55
(30, 27) 7 9 7 9 7 . True 57
(30, 29) 9 7 7 9 7 . True 59
(30, 3) 2 3 5 3 4 . True 33
(30, 32) 9 9 7 7 9 . False 63
(30, 34) 9 9 7 9 7 . True 64
(30, 37) 9 9 9 7 9 . True 67
(30, 38) 9 9 9 7 9 . False 67
(30, 39) 9 9 9 9 7 . True 69
(30, 4) 2 3 5 6 3 . True 34
(30, 6) 2 3 5 6 7 . True 36
(30, 8) 2 3 5 7 7 . True 38
(31, 30) 9 7 9 9 7 . True 61
(32, 30) 9 9 7 7 9 . False 63
(33, 30) 9 9 7 7 9 . True 63
(38, 30) 9 9 9 7 9 . False 67
(39, 30) 9 9 9 9 7 . True 69
-------- ----------- ----- --
unseen: 10/18
-------- ----------- ----- --
(6, 30) 2 3 5 6 7 . True 36
(10, 30) 2 3 7 7 5 . True 40
(11, 30) 2 7 5 6 5 . False 42
(24, 30) 7 7 9 7 9 . True 54
(26, 30) 7 9 7 9 7 . False 57
(30, 0) 2 3 4 2 1 . False 29
(30, 10) 2 3 7 7 5 . True 40
(30, 19) 7 7 7 5 6 . False 48
(30, 2) 2 3 5 4 2 . True 32
(30, 22) 7 7 7 9 7 . True 52
(30, 23) 7 7 9 7 7 . True 53
(30, 31) 9 7 9 9 7 . True 61
(30, 33) 9 9 7 7 9 . True 63
(30, 36) 9 9 7 9 9 . False 65
(30, 7) 2 3 5 7 7 . False 38
(34, 30) 9 9 7 9 7 . True 64
(35, 30) 9 9 9 7 7 . False 66
(36, 30) 9 9 9 7 9 . False 67
-------- ----------- ----- --
--------------------------------------------------------------------------------
31: 68 examples
seen: 37/47
-------- ----------- ----- --
(1, 31) 2 3 4 3 4 . True 32
(2, 31) 2 3 5 3 4 . True 33
(4, 31) 2 3 5 6 5 . True 35
(5, 31) 2 3 5 6 7 . True 36
(7, 31) 2 3 5 7 7 . True 38
(8, 31) 2 3 7 5 6 . True 39
(9, 31) 2 3 7 5 7 . True 40
(11, 31) 2 7 5 6 5 . True 42
(12, 31) 2 7 5 7 5 . True 43
(15, 31) 7 5 7 5 6 . True 46
(16, 31) 7 5 7 7 5 . True 47
(17, 31) 7 7 7 5 6 . True 48
(18, 31) 7 5 7 7 5 . False 47
(22, 31) 7 7 7 9 9 . True 53
(23, 31) 7 7 9 7 9 . True 54
(24, 31) 7 7 9 9 7 . True 55
(27, 31) 7 9 9 7 7 . True 58
(31, 10) 2 3 7 7 7 . True 41
(31, 11) 2 7 5 6 5 . True 42
(31, 13) 2 7 7 7 5 . True 44
(31, 14) 2 7 7 7 7 . True 45
(31, 15) 7 5 7 5 6 . True 46
(31, 16) 7 7 5 6 7 . True 47
(31, 17) 7 7 7 5 7 . False 49
(31, 18) 7 7 7 7 5 . True 49
(31, 20) 7 7 7 9 7 . False 52
(31, 21) 7 7 9 7 7 . False 53
(31, 25) 7 9 7 9 7 . False 57
(31, 26) 7 9 7 9 9 . True 57
(31, 27) 7 9 9 7 7 . True 58
(31, 29) 9 7 9 9 7 . False 61
(31, 3) 2 3 5 6 3 . True 34
(31, 30) 9 7 9 9 7 . True 61
(31, 33) 9 9 7 9 7 . True 64
(31, 34) 9 9 7 9 9 . True 65
(31, 38) 9 9 9 9 7 . True 69
(31, 39) 9 9 9 9 7 . False 69
(31, 5) 2 3 5 6 7 . True 36
(31, 6) 2 3 5 7 5 . True 37
(31, 7) 2 3 5 7 7 . True 38
(31, 9) 2 3 7 7 5 . True 40
(32, 31) 9 9 7 7 9 . True 63
(33, 31) 9 9 7 9 7 . True 64
(34, 31) 9 9 7 9 7 . False 64
(35, 31) 9 9 9 7 7 . True 66
(38, 31) 9 9 9 7 9 . False 67
(39, 31) 9 9 9 9 7 . False 69
-------- ----------- ----- --
unseen: 15/21
-------- ----------- ----- --
(3, 31) 2 3 5 6 3 . True 34
(6, 31) 2 3 5 7 5 . True 37
(10, 31) 2 3 7 7 5 . False 40
(13, 31) 2 7 7 5 7 . False 43
(14, 31) 2 7 7 7 5 . False 44
(19, 31) 7 7 7 7 7 . True 50
(20, 31) 7 7 7 7 9 . True 51
(21, 31) 7 7 7 9 7 . True 52
(26, 31) 7 9 7 9 7 . True 57
(28, 31) 9 7 7 9 7 . True 59
(30, 31) 9 7 9 9 7 . True 61
(31, 1) 2 3 4 3 4 . True 32
(31, 12) 2 7 7 5 7 . True 43
(31, 19) 7 7 7 7 7 . True 50
(31, 22) 7 7 9 7 9 . False 54
(31, 24) 7 7 9 9 7 . True 55
(31, 28) 9 7 7 9 7 . True 59
(31, 31) 9 9 7 7 9 . False 63
(31, 36) 9 9 9 7 9 . True 67
(31, 8) 2 3 7 5 7 . False 40
(36, 31) 9 9 9 7 9 . True 67
-------- ----------- ----- --
--------------------------------------------------------------------------------
32: 58 examples
seen: 26/37
-------- ----------- ----- --
(0, 32) 2 3 4 3 4 . True 32
(2, 32) 2 3 5 6 3 . True 34
(10, 32) 2 7 7 5 6 . False 43
(11, 32) 2 7 7 7 5 . False 44
(14, 32) 8 7 5 6 7 . True 46
(16, 32) 7 7 7 5 6 . True 48
(17, 32) 7 7 7 7 5 . True 49
(21, 32) 7 7 7 9 9 . True 53
(22, 32) 7 7 9 7 9 . True 54
(25, 32) 7 9 7 9 7 . True 57
(27, 32) 9 7 9 7 7 . False 60
(28, 32) 9 7 9 7 7 . True 60
(29, 32) 9 7 9 7 9 . False 60
(30, 32) 9 9 7 7 9 . False 63
(32, 0) 2 3 5 4 2 . True 32
(32, 1) 2 3 5 4 2 . False 32
(32, 10) 2 7 5 5 6 . True 42
(32, 12) 2 7 7 7 5 . True 44
(32, 13) 7 5 7 5 6 . False 46
(32, 15) 7 5 7 5 7 . False 46
(32, 17) 7 7 7 7 5 . True 49
(32, 20) 7 7 7 9 7 . True 52
(32, 22) 7 7 9 7 9 . True 54
(32, 25) 7 9 7 9 7 . True 57
(32, 28) 9 7 7 9 7 . False 59
(32, 29) 9 7 9 9 7 . True 61
(32, 3) 2 3 5 6 5 . True 35
(32, 30) 9 9 7 7 9 . False 63
(32, 31) 9 9 7 7 9 . True 63
(32, 38) 9 9 9 9 7 . False 69
(32, 4) 2 3 5 6 7 . True 36
(32, 5) 2 3 5 7 5 . True 37
(32, 6) 2 3 5 7 7 . True 38
(32, 7) 2 3 7 5 6 . True 39
(33, 32) 9 9 7 9 9 . True 65
(35, 32) 9 9 9 7 9 . True 67
(37, 32) 9 9 9 9 7 . True 69
-------- ----------- ----- --
unseen: 8/21
-------- ----------- ----- --
(4, 32) 2 3 5 6 5 . False 35
(5, 32) 2 3 5 6 7 . False 36
(6, 32) 2 3 7 5 6 . False 39
(7, 32) 2 3 7 5 6 . True 39
(8, 32) 2 3 7 5 7 . True 40
(12, 32) 8 7 5 6 7 . False 46
(13, 32) 8 7 5 6 7 . False 46
(15, 32) 7 7 5 7 7 . True 47
(18, 32) 7 7 7 5 7 . False 49
(19, 32) 7 7 7 7 9 . True 51
(20, 32) 7 7 9 7 7 . False 53
(23, 32) 7 7 9 9 7 . True 55
(26, 32) 9 7 7 9 7 . False 59
(32, 18) 7 7 7 5 6 . False 48
(32, 2) 2 3 5 6 3 . True 34
(32, 23) 7 7 9 9 7 . True 55
(32, 33) 9 9 7 9 7 . False 64
(32, 34) 9 9 7 9 9 . False 65
(32, 36) 9 9 9 7 9 . False 67
(34, 32) 9 9 9 7 7 . True 66
(39, 32) 9 9 9 9 7 . False 69
-------- ----------- ----- --
--------------------------------------------------------------------------------
33: 53 examples
seen: 31/42
-------- ----------- ----- --
(1, 33) 2 3 5 6 3 . True 34
(4, 33) 2 3 5 7 5 . True 37
(6, 33) 2 3 7 5 6 . True 39
(8, 33) 2 3 7 7 7 . True 41
(9, 33) 2 7 5 7 5 . False 43
(10, 33) 2 7 7 5 6 . True 43
(13, 33) 7 5 7 5 6 . True 46
(14, 33) 7 7 5 6 7 . True 47
(19, 33) 7 7 7 9 7 . True 52
(23, 33) 7 9 7 7 7 . False 55
(25, 33) 7 9 9 7 7 . True 58
(26, 33) 9 7 7 9 7 . True 59
(27, 33) 9 7 9 9 7 . False 61
(28, 33) 9 7 9 9 7 . True 61
(29, 33) 9 7 9 9 9 . True 62
(31, 33) 9 9 7 9 7 . True 64
(33, 11) 2 7 7 7 5 . True 44
(33, 13) 7 5 7 5 6 . True 46
(33, 15) 7 7 5 7 5 . False 47
(33, 16) 7 7 7 5 7 . True 49
(33, 20) 7 7 9 7 7 . True 53
(33, 22) 7 9 7 7 7 . True 55
(33, 23) 7 9 7 7 7 . False 55
(33, 24) 7 9 7 9 7 . True 57
(33, 25) 7 9 9 7 7 . True 58
(33, 26) 9 7 7 9 7 . True 59
(33, 28) 9 7 9 7 9 . False 60
(33, 3) 2 3 5 6 7 . True 36
(33, 30) 9 9 7 7 9 . True 63
(33, 31) 9 9 7 9 7 . True 64
(33, 32) 9 9 7 9 9 . True 65
(33, 33) 9 9 9 7 7 . True 66
(33, 36) 9 9 9 9 7 . True 69
(33, 37) 9 9 9 9 7 . False 69
(33, 38) 9 9 9 9 7 . False 69
(33, 5) 2 3 5 7 7 . True 38
(33, 6) 2 3 7 5 6 . True 39
(33, 7) 2 3 7 7 5 . True 40
(33, 9) 2 3 7 7 7 . False 41
(35, 33) 9 9 9 7 9 . False 67
(36, 33) 9 9 9 9 7 . True 69
(39, 33) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 4/11
-------- ----------- ----- --
(2, 33) 2 3 5 6 7 . False 36
(12, 33) 2 7 7 7 5 . False 44
(17, 33) 7 7 7 7 5 . False 49
(20, 33) 7 7 9 7 9 . False 54
(30, 33) 9 9 7 7 9 . True 63
(32, 33) 9 9 7 9 7 . False 64
(33, 2) 2 3 5 6 7 . False 36
(33, 34) 9 9 9 7 9 . True 67
(33, 4) 2 3 5 7 5 . True 37
(34, 33) 9 9 9 7 9 . True 67
(37, 33) 9 9 9 9 7 . False 69
-------- ----------- ----- --
--------------------------------------------------------------------------------
34: 62 examples
seen: 41/50
-------- ----------- ----- --
(0, 34) 2 3 5 6 3 . True 34
(1, 34) 2 3 5 6 5 . True 35
(2, 34) 2 3 5 6 7 . True 36
(3, 34) 2 3 5 7 5 . True 37
(4, 34) 2 3 5 7 7 . True 38
(6, 34) 2 3 7 7 5 . True 40
(8, 34) 2 7 5 5 6 . True 42
(11, 34) 2 7 7 7 7 . True 45
(12, 34) 8 7 5 6 7 . True 46
(14, 34) 8 7 7 5 6 . True 48
(16, 34) 7 7 7 7 7 . True 50
(18, 34) 7 7 7 7 7 . False 50
(20, 34) 7 7 9 7 9 . True 54
(21, 34) 7 7 9 9 7 . True 55
(22, 34) 7 9 7 9 7 . False 57
(24, 34) 9 7 7 7 9 . True 58
(25, 34) 9 7 7 9 7 . True 59
(26, 34) 9 7 9 7 7 . True 60
(27, 34) 9 7 9 9 7 . True 61
(28, 34) 9 9 7 7 9 . False 63
(29, 34) 9 9 7 7 9 . True 63
(30, 34) 9 9 7 9 7 . True 64
(31, 34) 9 9 7 9 9 . True 65
(34, 1) 2 3 5 6 3 . False 34
(34, 10) 2 7 7 7 5 . True 44
(34, 11) 2 7 7 7 7 . True 45
(34, 12) 8 7 5 6 7 . True 46
(34, 13) 7 7 5 6 7 . True 47
(34, 14) 8 7 7 5 6 . True 48
(34, 16) 7 7 7 7 7 . True 50
(34, 19) 7 7 9 7 7 . True 53
(34, 20) 7 7 9 7 9 . True 54
(34, 23) 7 9 7 9 9 . True 57
(34, 24) 7 9 9 7 7 . True 58
(34, 27) 9 7 9 9 7 . True 61
(34, 28) 9 7 9 9 9 . True 62
(34, 29) 9 9 7 7 9 . True 63
(34, 3) 2 3 5 7 5 . True 37
(34, 31) 9 9 7 9 7 . False 64
(34, 35) 9 9 9 9 7 . True 69
(34, 38) 9 9 9 9 9 . False 73
(34, 39) 9 9 9 9 9 . True 73
(34, 4) 2 3 5 7 7 . True 38
(34, 5) 2 3 7 5 6 . True 39
(34, 8) 2 7 5 7 5 . False 43
(34, 9) 2 7 7 5 6 . True 43
(35, 34) 9 9 9 9 7 . True 69
(36, 34) 9 9 9 9 7 . False 69
(37, 34) 9 9 9 9 7 . False 69
(39, 34) 9 9 9 9 9 . True 73
-------- ----------- ----- --
unseen: 4/12
-------- ----------- ----- --
(32, 34) 9 9 7 9 9 . False 65
(33, 34) 9 9 9 7 9 . True 67
(34, 15) 7 7 7 5 6 . False 48
(34, 18) 7 7 7 7 9 . False 51
(34, 2) 2 3 5 7 5 . False 37
(34, 25) 9 7 7 9 9 . False 58
(34, 30) 9 9 7 9 7 . True 64
(34, 32) 9 9 9 7 7 . True 66
(34, 33) 9 9 9 7 9 . True 67
(34, 36) 9 9 9 9 9 . False 73
(34, 37) 9 9 9 9 9 . False 73
(34, 7) 2 7 5 5 6 . False 42
-------- ----------- ----- --
--------------------------------------------------------------------------------
35: 63 examples
seen: 39/52
-------- ----------- ----- --
(0, 35) 2 3 5 6 5 . True 35
(1, 35) 2 3 5 6 7 . True 36
(2, 35) 2 3 5 7 5 . True 37
(3, 35) 2 3 5 7 7 . True 38
(5, 35) 2 3 7 7 5 . True 40
(6, 35) 2 3 7 7 7 . True 41
(7, 35) 2 7 5 6 7 . True 42
(9, 35) 2 7 7 7 5 . True 44
(11, 35) 8 7 5 6 7 . True 46
(13, 35) 8 7 7 5 6 . True 48
(14, 35) 8 7 7 7 5 . True 49
(15, 35) 7 7 7 7 7 . True 50
(17, 35) 7 7 7 9 7 . True 52
(18, 35) 7 7 7 9 7 . False 52
(20, 35) 7 9 7 7 9 . False 56
(21, 35) 7 9 7 9 7 . False 57
(22, 35) 7 9 7 9 9 . True 57
(23, 35) 9 7 7 9 7 . False 59
(24, 35) 9 7 7 9 7 . True 59
(25, 35) 9 7 9 7 7 . True 60
(27, 35) 9 9 7 7 9 . False 63
(34, 35) 9 9 9 9 7 . True 69
(35, 10) 2 7 7 7 7 . True 45
(35, 13) 7 7 7 5 6 . True 48
(35, 14) 8 7 7 7 5 . True 49
(35, 15) 7 7 7 7 7 . True 50
(35, 16) 7 7 7 7 9 . True 51
(35, 17) 7 7 7 9 7 . True 52
(35, 19) 7 7 9 7 9 . True 54
(35, 20) 7 7 9 9 7 . True 55
(35, 21) 7 9 7 7 9 . True 56
(35, 23) 9 7 7 9 7 . False 59
(35, 25) 9 7 9 7 9 . True 60
(35, 26) 9 7 9 9 7 . True 61
(35, 27) 9 9 7 7 9 . False 63
(35, 28) 9 9 7 7 9 . True 63
(35, 3) 2 3 5 7 7 . True 38
(35, 31) 9 9 9 7 7 . True 66
(35, 32) 9 9 9 7 9 . True 67
(35, 33) 9 9 9 7 9 . False 67
(35, 34) 9 9 9 9 7 . True 69
(35, 35) 9 9 9 9 7 . False 69
(35, 37) 9 9 9 9 9 . False 73
(35, 38) 9 9 9 9 9 . True 73
(35, 39) 9 9 9 9 9 . False 73
(35, 4) 2 3 7 5 6 . True 39
(35, 5) 2 3 7 7 5 . True 40
(35, 8) 2 7 7 5 6 . True 43
(35, 9) 2 7 7 7 5 . True 44
(36, 35) 9 9 9 9 7 . False 69
(38, 35) 9 9 9 9 9 . True 73
(39, 35) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 6/11
-------- ----------- ----- --
(8, 35) 2 7 7 5 6 . True 43
(28, 35) 9 9 7 9 7 . False 64
(35, 0) 2 3 5 6 7 . False 36
(35, 1) 2 3 5 6 7 . True 36
(35, 11) 2 7 7 7 7 . False 45
(35, 12) 8 7 7 5 6 . False 48
(35, 18) 7 7 9 7 7 . True 53
(35, 22) 7 9 7 9 7 . True 57
(35, 29) 9 9 7 9 7 . True 64
(35, 30) 9 9 9 7 7 . False 66
(35, 7) 2 7 5 6 7 . True 42
-------- ----------- ----- --
--------------------------------------------------------------------------------
36: 52 examples
seen: 30/34
-------- ----------- ----- --
(4, 36) 2 3 7 7 5 . True 40
(5, 36) 2 3 7 7 7 . True 41
(8, 36) 2 7 7 7 5 . True 44
(9, 36) 2 7 7 7 7 . True 45
(12, 36) 8 7 7 5 6 . True 48
(15, 36) 7 7 7 7 9 . True 51
(16, 36) 7 7 7 9 7 . True 52
(20, 36) 7 9 7 7 9 . True 56
(22, 36) 9 7 7 7 9 . True 58
(23, 36) 9 7 7 9 7 . True 59
(24, 36) 9 7 9 7 7 . True 60
(28, 36) 9 9 7 9 7 . True 64
(33, 36) 9 9 9 9 7 . True 69
(36, 0) 2 3 5 6 7 . True 36
(36, 11) 7 7 5 6 7 . True 47
(36, 12) 8 7 7 5 6 . True 48
(36, 13) 8 7 7 7 5 . True 49
(36, 14) 8 7 7 7 9 . True 50
(36, 15) 7 7 7 7 9 . True 51
(36, 16) 7 7 7 9 7 . True 52
(36, 18) 7 7 9 7 9 . True 54
(36, 19) 7 7 9 9 7 . True 55
(36, 20) 7 9 7 7 9 . True 56
(36, 22) 9 7 7 7 9 . True 58
(36, 26) 9 9 7 7 9 . False 63
(36, 27) 9 9 7 7 9 . True 63
(36, 28) 9 9 7 9 7 . True 64
(36, 29) 9 9 7 9 9 . True 65
(36, 33) 9 9 9 9 7 . True 69
(36, 34) 9 9 9 9 7 . False 69
(36, 35) 9 9 9 9 7 . False 69
(36, 39) 9 9 9 9 9 . False 73
(36, 4) 2 3 7 7 5 . True 40
(36, 8) 2 7 7 7 5 . True 44
-------- ----------- ----- --
unseen: 6/18
-------- ----------- ----- --
(0, 36) 2 3 5 6 7 . True 36
(1, 36) 2 3 5 7 7 . False 38
(3, 36) 2 3 7 7 5 . False 40
(10, 36) 2 7 7 7 7 . False 45
(30, 36) 9 9 7 9 9 . False 65
(31, 36) 9 9 9 7 9 . True 67
(32, 36) 9 9 9 7 9 . False 67
(34, 36) 9 9 9 9 9 . False 73
(36, 2) 2 3 5 7 7 . True 38
(36, 30) 9 9 9 7 9 . False 67
(36, 31) 9 9 9 7 9 . True 67
(36, 38) 9 9 9 9 9 . False 73
(36, 5) 2 3 7 7 5 . False 40
(36, 6) 2 7 5 6 7 . True 42
(36, 9) 2 7 7 7 5 . False 44
(37, 36) 9 9 9 9 9 . True 73
(38, 36) 9 9 9 9 9 . False 73
(39, 36) 9 9 9 9 9 . False 73
-------- ----------- ----- --
--------------------------------------------------------------------------------
37: 61 examples
seen: 31/45
-------- ----------- ----- --
(0, 37) 2 3 5 7 5 . True 37
(3, 37) 2 3 7 7 5 . True 40
(4, 37) 2 3 7 7 5 . False 40
(5, 37) 2 7 5 5 6 . True 42
(6, 37) 2 7 5 7 5 . True 43
(11, 37) 8 7 7 5 6 . True 48
(12, 37) 8 7 7 5 7 . True 49
(16, 37) 7 7 9 7 7 . True 53
(17, 37) 7 7 9 7 9 . True 54
(18, 37) 7 7 9 7 7 . False 53
(19, 37) 7 9 7 7 9 . True 56
(20, 37) 7 9 7 9 7 . True 57
(21, 37) 7 9 9 7 7 . True 58
(22, 37) 9 7 7 9 7 . True 59
(23, 37) 9 7 9 7 7 . True 60
(24, 37) 9 7 9 7 9 . False 60
(25, 37) 9 7 9 9 7 . False 61
(26, 37) 9 9 7 7 9 . True 63
(27, 37) 9 9 7 9 7 . True 64
(28, 37) 9 9 7 9 9 . True 65
(29, 37) 9 9 7 9 9 . False 65
(30, 37) 9 9 9 7 9 . True 67
(33, 37) 9 9 9 9 7 . False 69
(35, 37) 9 9 9 9 9 . False 73
(37, 0) 2 3 5 7 5 . True 37
(37, 11) 8 7 7 5 6 . True 48
(37, 12) 8 7 7 7 5 . True 49
(37, 15) 7 7 7 7 9 . False 51
(37, 16) 7 7 9 7 7 . True 53
(37, 17) 7 7 9 7 9 . True 54
(37, 19) 7 9 7 9 7 . False 57
(37, 2) 2 3 7 5 6 . True 39
(37, 21) 7 9 9 7 7 . True 58
(37, 22) 9 7 7 9 7 . True 59
(37, 25) 9 9 7 7 9 . False 63
(37, 26) 9 9 7 9 7 . False 64
(37, 29) 9 9 9 7 7 . True 66
(37, 3) 2 3 7 7 5 . True 40
(37, 32) 9 9 9 9 7 . True 69
(37, 34) 9 9 9 9 7 . False 69
(37, 6) 2 7 7 5 6 . True 43
(37, 7) 2 7 7 7 5 . True 44
(37, 9) 8 7 5 6 7 . True 46
(38, 37) 9 9 9 9 9 . False 73
(39, 37) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 6/16
-------- ----------- ----- --
(9, 37) 2 7 7 7 5 . False 44
(10, 37) 8 7 5 6 7 . False 46
(34, 37) 9 9 9 9 9 . False 73
(37, 1) 2 3 5 7 5 . False 37
(37, 14) 8 7 9 7 7 . True 51
(37, 18) 7 9 7 7 7 . True 55
(37, 24) 9 7 9 7 9 . False 60
(37, 27) 9 9 7 9 7 . True 64
(37, 33) 9 9 9 9 7 . False 69
(37, 36) 9 9 9 9 9 . True 73
(37, 37) 9 9 9 9 9 . False 73
(37, 38) 9 9 9 9 9 . False 73
(37, 39) 9 9 9 9 9 . False 73
(37, 4) 2 3 7 7 7 . True 41
(37, 5) 2 7 5 5 6 . True 42
(37, 8) 8 7 5 6 7 . False 46
-------- ----------- ----- --
--------------------------------------------------------------------------------
38: 64 examples
seen: 39/53
-------- ----------- ----- --
(0, 38) 2 3 5 7 7 . True 38
(1, 38) 2 3 7 5 6 . True 39
(2, 38) 2 3 7 5 7 . True 40
(3, 38) 2 3 7 7 7 . True 41
(4, 38) 2 7 5 5 6 . True 42
(5, 38) 2 7 5 7 7 . True 43
(7, 38) 8 5 7 5 6 . True 45
(9, 38) 7 7 5 6 7 . True 47
(11, 38) 8 7 7 5 7 . True 49
(12, 38) 8 7 7 9 7 . True 50
(13, 38) 8 7 9 7 7 . True 51
(14, 38) 8 7 9 7 9 . True 52
(16, 38) 7 7 9 7 9 . True 54
(17, 38) 7 7 9 9 7 . True 55
(18, 38) 7 7 9 9 7 . False 55
(19, 38) 7 9 7 9 7 . True 57
(20, 38) 7 9 9 7 7 . True 58
(21, 38) 9 7 7 9 7 . True 59
(22, 38) 9 7 9 7 7 . True 60
(25, 38) 9 9 7 7 9 . True 63
(26, 38) 9 9 7 9 7 . True 64
(28, 38) 9 9 9 7 7 . True 66
(29, 38) 9 9 9 7 7 . False 66
(30, 38) 9 9 9 7 9 . False 67
(31, 38) 9 9 9 9 7 . True 69
(32, 38) 9 9 9 9 7 . False 69
(33, 38) 9 9 9 9 7 . False 69
(34, 38) 9 9 9 9 9 . False 73
(35, 38) 9 9 9 9 9 . True 73
(38, 0) 2 3 5 7 7 . True 38
(38, 10) 8 7 5 7 7 . False 49
(38, 12) 8 7 7 7 9 . True 50
(38, 14) 8 7 9 7 9 . True 52
(38, 15) 7 7 7 9 7 . False 52
(38, 16) 7 7 9 7 9 . True 54
(38, 19) 7 9 7 9 7 . True 57
(38, 2) 2 3 7 9 7 . True 40
(38, 21) 9 7 7 9 7 . True 59
(38, 22) 9 7 9 7 7 . True 60
(38, 24) 9 7 9 9 7 . False 61
(38, 26) 9 9 7 9 7 . True 64
(38, 27) 9 9 7 9 9 . True 65
(38, 28) 9 9 7 9 9 . False 65
(38, 29) 9 9 9 7 9 . True 67
(38, 3) 2 3 7 7 9 . True 41
(38, 30) 9 9 9 7 9 . False 67
(38, 31) 9 9 9 7 9 . False 67
(38, 35) 9 9 9 9 9 . True 73
(38, 37) 9 9 9 9 9 . False 73
(38, 6) 2 7 7 7 5 . True 44
(38, 7) 2 7 7 7 7 . True 45
(38, 8) 8 7 5 6 7 . True 46
(39, 38) 9 9 9 9 9 . False 73
-------- ----------- ----- --
unseen: 4/11
-------- ----------- ----- --
(8, 38) 8 7 5 6 7 . True 46
(15, 38) 7 7 9 7 7 . True 53
(23, 38) 9 7 9 9 7 . True 61
(36, 38) 9 9 9 9 9 . False 73
(37, 38) 9 9 9 9 9 . False 73
(38, 1) 2 3 5 7 7 . False 38
(38, 17) 7 7 9 7 9 . False 54
(38, 20) 9 7 7 7 9 . True 58
(38, 36) 9 9 9 9 9 . False 73
(38, 5) 2 7 5 6 5 . False 42
(38, 9) 8 7 5 6 7 . False 46
-------- ----------- ----- --
--------------------------------------------------------------------------------
###Markdown
Calculating CWUBCJan 2016 Given JSON summaries of projects, calculate Cumulative Weighted Unique Block Count (CWUBC) of projectsSee Proposal, Page 4: http://benjixie.com/meng_proposal.pdfREQUIRED: ai2summarizer.py (https://github.com/bxie/ai2_summarizer)See also: Trajectory.ipynb CWUBC Steps: parse directory and load JSON from directory calcualte T_all matrix (see proposal) calculate block weighting POPPS Steps: Determine blocks corresponding to each programming skill Calculate POPPS matrixClustering: Separate users based on CWUBC Plot POPPS of each group Run K-Means clustering on CWUBC trajectory matrix Plot POPPS of each cluster CSP Principles Trajectory:1. Isolate CSP Blocks1. Trajectory of CSP Blocks (freq, not binary)Other: Analyze slope of CWUBC PCA Categorize Clusters (elbow) Prior Knowledge: Cluster based only on first n projects
###Code
import os
import os.path
import re
import json
import csv
import pickle
import numpy as np
import pandas as pd
#plotting
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as mtick
#making plots look pretty
%matplotlib inline
matplotlib.style.use('ggplot')
pd.options.display.mpl_style = 'default'
from collections import Counter
from sklearn.cluster import KMeans
# ROOT_DIR = "/Users/bxie/Documents/ai2_users_random_small/"
ROOT_DIR = "/Users/bxie/Documents/ai2_users_random/"
NB_DIR = "/Users/bxie/programming/ai2_user_trajectory/data/"
REGEX_ROOT_IGNORE = 'python|\.|README'
REGEX_SUMMARY = 'summary\.json'
SUMMARY_SUFFIX = "_summary.json"
USER_ID_RAND = "000317"
PROJ_ID_RAND = "4673362639978496"
THRESHOLD = 20
"""
code to get blocks
"""
"""
return dictionary of (user ids, num projects)
for users w/ at least min_num_projects
"""
def get_users(min_num_projects):
#regex to ignore non-project files
ignore = 'python|\.|README'
regexp=re.compile(ignore) #use: regexp.search(fname)
fnames = filter(lambda x: regexp.search(x) is None, os.listdir(ROOT_DIR))
super_users = {}
for fname in fnames:
num_projects = len(get_all_projects(fname))
if num_projects > min_num_projects:
super_users[fname] = num_projects
return super_users
"""
Given user_id (user directory name), return list of all project ids
"""
def get_all_projects(user_id):
unfiltered_project_names = os.listdir("{}{}".format(ROOT_DIR, user_id))
project_names = filter(lambda x: x.isdigit(), unfiltered_project_names)
return project_names
"""
given user id and project id, return project summary (as dictionary)
"""
def get_summary(user_id, project_id):
summary_dir = "{}{}/{}{}".format(ROOT_DIR, user_id, project_id, SUMMARY_SUFFIX)
with open(summary_dir) as data_file:
data = json.load(data_file)
return data
"""
Given project_id (user directory name), return lists of all active blocks, orphaned blocks
"""
def get_blocks(summary):
screen_names = filter(lambda x: x.find("*")<0, summary.keys())
blocks_count = Counter({})
orphan_count = Counter({})
for sname in screen_names:
#if has blocks
if has_blocks(summary, sname):
blocks_count += Counter(summary[sname]['Blocks']['Active Blocks']['Types'])
if has_blocks(summary, sname, check_active=False):
orphan_count += Counter(summary[sname]['Blocks']['Orphan Blocks']['Types'])
return dict(blocks_count), dict(orphan_count)
"""
Given blocks dict, save to CSV
"""
def save_blocks_to_csv(blocks_dict, new_fname_path):
writer = csv.writer(open("{}.csv".format(new_fname_path), 'wb'))
for key, value in blocks_dict.items():
writer.writerow([key, value])
return True
""""""""""""""" Helper Functions """""""""""""""
"""
Given project summary(dict) and screen name(str)
and designation of active (default) or orphan blocks,
return boolean to determine if screen has those blocks
"""
def has_blocks(summary, screen_name, check_active=True):
block_name = 'Active Blocks' if check_active else 'Orphan Blocks'
return type(summary[screen_name]['Blocks']) == dict and type(summary[screen_name]['Blocks'][block_name]) == dict
""" Get all types of blocks """
"""
get count of all blocks of projects (up to upper_bound # of projects) by users with at least n (threshold) projects
"""
def get_all_blocks(threshold=0, upper_bound=THRESHOLD, have_upper_bound=True):
counter_active = Counter({})
counter_orphan = Counter({})
for user_id in get_users(threshold):
project_ids = get_all_projects(user_id)
if have_upper_bound:
project_ids = project_ids[:upper_bound] # only select first n projects as defined by upper_bound
for project_id in project_ids:
# print "{}, {}".format(user_id, project_id)
active, orphan = get_blocks(get_summary(user_id, project_id))
counter_active += Counter(active)
counter_orphan += Counter(orphan)
return dict(counter_active), dict(counter_orphan)
"""
get count of all blocks by users with at least n (threshold) projects
return tuple of dictionaries (active, orphan blocks)
key: block type, value; block frequnecy
"""
def get_blocks_project_count(threshold=0):
counter_active = Counter({})
counter_orphan = Counter({})
for user_id in get_users(threshold):
project_ids = get_all_projects(user_id)
if threshold > 0:
project_ids = project_ids[:threshold] #analyze first n projects only
for project_id in project_ids:
# print "{}, {}".format(user_id, project_id)
active, orphan = get_blocks(get_summary(user_id, project_id))
active = {val:1 for val in active} #1 per project
orphan = {val:1 for val in orphan}
counter_active += Counter(active)
counter_orphan += Counter(orphan)
return dict(counter_active), dict(counter_orphan)
"""
get list of all block types
"""
def get_all_block_types(active_blocks, orphan_blocks):
return list(set(active_blocks.keys() + orphan_blocks.keys()))
"""
load pickled block types
"""
def load_block_types(fname):
block_types = open(fname, 'rb')
output = pickle.load(block_types)
block_types.close()
return output
"""
save pickled block types
"""
def save_block_types(block_types_list, fname):
block_types = open(fname, 'wb')
pickle.dump(block_types_list, block_types)
block_types.close()
"""
Calculating trajectory matrix (CWUBC)
"""
# order of this is important
block_types_fname = 'jeff_types.pkl'
BLOCK_TYPES = load_block_types(NB_DIR+block_types_fname)
def get_all_trajectories(threshold=THRESHOLD):
user_ids = get_users(threshold)
user_traj_vectors = [] #list of user trajectory vectors
for uid in user_ids:
V_u = get_trajectory(uid)
user_traj_vectors.append(V_u)
return np.vstack(user_traj_vectors)
# given a user_id, return trajectory as vector of # of blocks used at each project
# BXX TODO: Add weighting
def get_trajectory(user_id, threshold=THRESHOLD):
P_b = get_binary_matrix(user_id, threshold)
V_u = np.sum(P_b, axis=1)
return V_u
"""
given user id, get CUMULATIVE binary matrix of users x blocks
"""
def get_binary_matrix(user_id, threshold=THRESHOLD):
P_u = get_freq_matrix(user_id, threshold)
# print P_u[:, BLOCK_TYPES.index('color_make_color')]
P_c = np.cumsum(P_u, axis = 0)
# print P_c[:, BLOCK_TYPES.index('color_make_color')]
return P_c>0
"""
given user id, get non-cumulative binary matrix of users x blocks
"""
def get_binary_matrix_non_cum(user_id, threshold=THRESHOLD):
P_u = get_freq_matrix(user_id, threshold)
# print P_u[:, BLOCK_TYPES.index('color_make_color')]
P_u[P_u>0]=1 #binary matrix
# print P_c[:, BLOCK_TYPES.index('color_make_color')]
return P_u
# given user_id, return matrix of frequency of blocks of each project
# output: n x d matrix where n=# of projects (THRESHOLD), d = # of block types
def get_freq_matrix(user_id, threshold=THRESHOLD):
output = np.zeros((threshold, len(BLOCK_TYPES)))
project_ids = get_all_projects(user_id)[:threshold] # getting first n projects from user
for i in range(threshold):
pid = project_ids[i]
summary = get_summary(user_id, pid)
blocks = get_blocks(summary)[0]
for blk, count in blocks.items():
output[i, BLOCK_TYPES.index(blk)] = count
return output
#normalize traj_matrix by max (if by_max) or by sum
def normalize_trajectory(traj_matrix, by_max=True):
if by_max:
user_norm = traj_matrix[:,-1] #final UBC for each user
else:
user_norm = traj_matrix.sum(axis=1)
output = traj_matrix.astype(float) / user_norm[:, np.newaxis]
return np.nan_to_num(output) #NaN from divide by zero error
def difference_trajectory(traj_matrix):
return np.diff(traj_matrix, axis=1)
"""
Calculating POPPS
To get block type, from browser console: bs = Blocklies['5066549580791808_Screen1']; bs.selected.type
"""
#mapping programming skill to block types
POPPS_MAP = {
'cond': [
'controls_if', #conditional
'controls_choose'
],
'list': [
'lists_create_with', #list
'lists_create_with',
'lists_add_items',
'lists_is_in',
'lists_length',
'lists_is_empty',
'lists_pick_random_item',
'lists_position_in',
'lists_select_item',
'lists_insert_item',
'lists_replace_item',
'lists_remove_item',
'lists_append_list',
'lists_copy',
'lists_is_list',
'lists_to_csv_row',
'lists_to_csv_table',
'lists_from_csv_row',
'lists_from_csv_table',
'lists_lookup_in_pairs'
],
'loop': [
'controls_forEach', #loop
'controls_forRange',
'controls_while'
],
'logic': [
'logic_negate', #operator
'logic_or',
'logic_boolean',
'logic_false',
'logic_operation',
'logic_compare'
],
'var': [
'lexical_variable_set',
'lexical_variable_get',
'local_declaration_expression',
'local_declaration_statement'], #variable
'proc': [
'procedures_defnoreturn', #procedure
'procedures_callreturn',
'procedures_defreturn',
'procedures_callnoreturn'
],
'proc_def': [
'procedures_defnoreturn', #procedure
'procedures_defreturn',
],
}
# flat list of all CC blocks (formerly known as POPPS blocks)
POPPS_ALL_BLOCKS = []
"""
given binary matrix to show POPPS,
return vector for average proportion of users who have not learned skill by project i
"""
def get_average_survival(popps_matrix):
return np.average(popps_matrix, axis=0)
"""
given specific programming skill (string) from POPPS_MAP.keys(),
optional list of user ids (select_users),
and optional threshold for min number of projects users must have
return binary matrix to show POPPS (row: user, column: 1 if used skill by that project)
"""
def get_popps_all_users(prog_skill, select_users=[], threshold=THRESHOLD, block_types=BLOCK_TYPES):
user_ids = get_users(threshold) if len(select_users)==0 else select_users
user_popps_vectors = [] #list of user trajectory vectors
for uid in user_ids:
P_b = get_specific_popps_binary(uid, prog_skill, threshold, block_types)
user_popps_vectors.append(P_b)
return np.vstack(user_popps_vectors)
"""
given user id (string),
specific programming skill (string) from POPPS_MAP.keys(),
optional thershold for number of projects to analyze,
and optional block types matrix (block_types_matrix),
return binary matrix for all POPPS that is 1 if user has skill by project i
"""
def get_specific_popps_binary(user_id, prog_skill, threshold=THRESHOLD, block_types= BLOCK_TYPES):
if prog_skill not in POPPS_MAP:
raise Exception("{} not in POPPS_MAP. Select from {}".format(prog_skill, POPPS_MAP.keys()))
popps_binary = np.zeros([1, threshold])
popps_binary[:] = 1
P_b = get_binary_matrix(user_id, threshold)
block_inds = get_block_indices(POPPS_MAP[prog_skill], block_types)
found_proj_ind = np.argwhere(P_b[:,block_inds]==True) #locations in P_b that show block in project
if len(found_proj_ind):
#get first project that contains a block pertaining to prog_skill
first_proj_ind = np.min(np.argwhere(P_b[:,block_inds]==True)[:,0])
popps_binary[0, first_proj_ind:] = 0
return popps_binary
"""
given user id (int as string),
optional thershold for number of projects to analyze,
and optional block types matrix (block_types_matrix),
return binary matrix for all POPPS that is 1 if user has skill by project i
"""
def get_all_popps_binary(user_id, threshold=THRESHOLD, block_types= BLOCK_TYPES):
num_popps = len(POPPS_MAP)
popps_binary = np.zeros([num_popps, threshold])
popps_binary[:,:] = 1
for i in range(num_popps):
prog_skill = POPPS_MAP.keys()[i]
if prog_skill not in POPPS_MAP:
raise Exception("{} not in POPPS_MAP. Select from {}".format(prog_skill, POPPS_MAP.keys()))
P_b = get_binary_matrix(user_id, threshold)
block_inds = get_block_indices(POPPS_MAP[prog_skill], block_types)
found_proj_ind = np.argwhere(P_b[:,block_inds]==True) #locations in P_b that show block in project
if len(found_proj_ind):
#get first project that contains a block pertaining to prog_skill
first_proj_ind = np.min(np.argwhere(P_b[:,block_inds]==True)[:,0])
popps_binary[i, first_proj_ind:] = 0
return popps_binary
""" HELPER FUNCTIONS """
"""
Given list of block types to identify (selected_blocks)
and optional blocks types matrix (block_types)
return list of indices in matrix for given types
"""
def get_block_indices(selected_blocks, block_types = BLOCK_TYPES):
indices = []
for blk_type in selected_blocks:
indices.append(block_types.index(unicode(blk_type)))
return list(set(indices))
for key in POPPS_MAP.keys():
POPPS_ALL_BLOCKS += POPPS_MAP[key]
POPPS_ALL_BLOCKS_INDS = get_block_indices(POPPS_ALL_BLOCKS)
OTHER_BLOCKS_INDS = list(set(range(0,len(BLOCK_TYPES))) - set(POPPS_ALL_BLOCKS_INDS))
#ensure disjoint sets
len(BLOCK_TYPES) == len(POPPS_ALL_BLOCKS_INDS) + len(OTHER_BLOCKS_INDS)
"""
isolating CC blocks
"""
"""
given user_id,
return 1D array (vector) of cumulative trajectory of # of block types in each project
"""
def get_cc_trajectory(user_id, filter_blocks=True, block_inds=POPPS_ALL_BLOCKS_INDS):
mat_f = get_binary_matrix(user_id)
if filter_blocks:
mat_f = mat_f[: , block_inds] #select only relevant blocks
mat_f = np.sum(mat_f, axis=1)
return mat_f
def get_all_cc_trajectories(block_inds=POPPS_ALL_BLOCKS_INDS, threshold=THRESHOLD):
user_ids = get_users(threshold)
user_traj_vectors = [] #list of user trajectory vectors
for uid in user_ids:
V_u = get_cc_trajectory(uid, block_inds=block_inds)
user_traj_vectors.append(V_u)
return np.vstack(user_traj_vectors)
"""
UNUSED
"""
"""
CC block count in each project
"""
def get_cc_trajectory_repeats(user_id, block_inds=POPPS_ALL_BLOCKS_INDS, threshold=THRESHOLD):
mat_f = get_freq_matrix(user_id)
mat_f[mat_f>0] = 1 #binary matrix
mat_f = mat_f[: , block_inds] #select only relevant blocks
mat_f = np.cumsum(mat_f, axis=0)
mat_f = np.sum(mat_f, axis=1)
return mat_f
###Output
_____no_output_____
###Markdown
Sophistication of ProjectsLearning rate of users decreases with time => breadth learning decreases. Is there an emphasis on depth learning? Or does learning stagnate?
###Code
"""
given a user id,
count_types: True to count block types, false to count frequency
optionally list of block indices to select
return the vector representing number of block types used in each block (not cumulative)
"""
def get_counts_by_project(user_id, count_types=False, filter_blocks=False, block_inds=POPPS_ALL_BLOCKS_INDS):
if count_types:
mat_f = get_binary_matrix_non_cum(user_id)
else:
mat_f = get_freq_matrix(user_id)
if filter_blocks:
mat_f = mat_f[: , block_inds] #select only relevant blocks
return np.sum(mat_f, axis = 1)
def get_all_avg_counts_by_project(threshold=THRESHOLD, count_types=False, filter_blocks=False, block_inds=POPPS_ALL_BLOCKS_INDS):
uids = get_users(threshold)
count_vectors = []
for user_id in uids:
count_vec = get_counts_by_project(user_id, count_types=False, filter_blocks=filter_blocks, block_inds=block_inds)
count_vectors.append(count_vec)
return np.expand_dims(np.average(count_vectors, axis=0), axis=1)
# sandbox
counts = get_all_avg_counts_by_project(filter_blocks=True, block_inds=OTHER_BLOCKS_INDS)
counts_cc = get_all_avg_counts_by_project(filter_blocks=True)
# print counts_cc
# print counts
plt.plot(counts)
plt.plot(counts_cc)
# plot_trajectory(counts_list, title="Average Cumulative Sum of Block Types",
# ylabel="Cum. Number of Block Types",
# legend = ['Comp. Concepts Blocks', 'Non-CC Blocks'], legend_loc=2, percent=False)
print counts_cc
print counts_cc.shape
x = np.expand_dims(counts_cc, axis=1)
def plot_counts_by_project(counts_list, title="Frequency of Blocks by Project",
ylabel="Number of Blocks",
legend = ['Comp. Concepts Blocks', 'Non-CC Blocks'], legend_loc=2, percent=False):
# fig = plt.figure(figsize=(11.3,7))
fig = plt.figure(figsize=(11.3,5))
ax = fig.add_subplot(1,1,1)
plt.xlabel('Project Number', fontsize=20, color='black')
plt.ylabel(ylabel, fontsize=20, color='black')
plt.title(title, fontsize=24, color='black')
my_legend = legend
for i in range(len(counts_list)):
t_mat = counts_list[i]
if percent:
t_mat = t_mat * 100 #for percent
if i==1:
plt.plot(t_mat, linewidth=5, linestyle='dashed')
else:
plt.plot(t_mat, linewidth=5)
plt.legend(my_legend, loc=legend_loc, fontsize=16)
if percent:
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
yticks = mtick.FormatStrFormatter(fmt)
ax.yaxis.set_major_formatter(yticks)
return fig, ax
counts_list = [counts_cc, counts]
plot_counts_by_project(counts_list)
np.array([0.0]) + counts_cc
"""
get all block types
shouldn't need to run if have saved pickled block types (blocks_type_fname)
"""
# active, orphan = get_all_blocks(0)
# all_types = get_all_block_types(active, orphan)# flat list of all CC blocks (formerly known as POPPS blocks)
all_cc_blocks = []
for key in POPPS_MAP.keys():
all_cc_blocks += POPPS_MAP[key]
cc_block_inds = get_block_indices(all_cc_blocks)
len(BLOCK_TYPES)
# save block types
# save_blocks_to_csv(active, NB_DIR + "jeff_0_active")
# save_blocks_to_csv(orphan, NB_DIR + "jeff_0_orphan")
# save_block_types(all_types, NB_DIR + "jeff_types.pkl")
# print 'all saved!'
#get trajectories
T_all = get_all_trajectories()
#NB: associating blocks and block types now less trivial
T_all_cc = get_all_cc_trajectories(block_inds=POPPS_ALL_BLOCKS_INDS)
T_all_not = get_all_cc_trajectories(block_inds=OTHER_BLOCKS_INDS)
# save_block_types(T_all, 'traj.pkl')
print 'done'
# np.shape(np.average(T_all, axis=0))
###Output
done
###Markdown
Block Frequency Analysis
###Code
#blocks counts
#THIS TAKES TIME
active_total, orphan_total = get_all_blocks(threshold=20) # total # of blocks used in all projects
active_proj, orphan_proj = get_blocks_project_count(threshold=20) # num of projects to have specific block.
len(active_total)
c = Counter(active_proj)
# filter(lambda k: k in all_cc_blocks, active.keys())
cc_dict = {k: active_proj[k] for k in all_cc_blocks}
cc_count = Counter(cc_dict)
# print cc_count.most_common(10)
# print
cc_count_sorted = cc_count.most_common()
cc_count_sorted
btypes = []
counts = []
for key, count in cc_count_sorted:
btypes.append(key)
counts.append(count)
counts_reversed = counts[::-1]
btypes_reversed = btypes[::-1]
for key, count in cc_count_sorted:
print "{}\t{}".format(key, float(count))
# print "{}\t{}".format(key, float(count)/sum(counts)*100)
print len(counts)
index = np.arange(len(counts))
yticks = ["{}. {}".format(len(btypes_reversed)-i, btypes_reversed[i]) for i in range(len(btypes_reversed))]
print yticks
#Histogram of CC block frequency
fig, ax = plt.subplots()
rects1 = ax.barh(np.arange(0, len(counts))-0.4, counts_reversed)
# rects1 = ax.bar(np.arange(0, len(counts))+0.5, counts, width=1)
fig.set_size_inches(4.5,10)
# plt.xticks(index, ['']+btypes, rotation='vertical', fontsize=16)
plt.yticks(index, btypes_reversed, fontsize=14, color='black')
# plt.yticks(index, yticks, fontsize=14, color='black') #adds number to ytick
ax.set_ylabel('Block Type', fontsize=14, color='black')
ax.set_xlabel('Number of Projects', fontsize=14, color='black')
plt.tick_params(axis='x', which='major', labelsize=11, color='black')
# plt.tick_params(axis='both', which='minor', labelsize=8)
plt.title("Frequency of Computational Concept Blocks . \n", fontsize=18, color='black')
plt.autoscale()
plt.show()
counts_reversed
"""
plotting
"""
linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
"""
Given list of trajectory matrices (list of ndarray)
and text describing grouping methods
return plot of average trajectory of each matrix in list
"""
def plot_cwubc_avg(traj_matrix_list, add_zero=True, grouped_by_text=""):
plt.figure(figsize=(11.3,7))
plt.xlabel('Project Number', fontsize=20, color='black')
plt.ylabel('Cum. Number of Block Types', fontsize=20, color='black')
plt.title("Average Cumulative Sum of Block Types \n for Users Clustered By {}".format(grouped_by_text), fontsize=24)
my_legend = []
for i in range(len(traj_matrix_list)):
t_mat = traj_matrix_list[i]
num_users = np.shape(t_mat)[0]
if add_zero:
t_mat = np.insert(t_mat, 0, 0, axis=1) #0 added in beginning for plot
T_avg = np.average(t_mat, axis=0) #avg of each column/at each project
plt.plot(T_avg, linewidth=5, linestyle = linestyles[i % len(linestyles)])
my_legend.append("Cluster {} ({} users)".format(i, num_users))
plt.legend(my_legend, loc=2, fontsize=16)
"""
Given trajectory matrix (ndarray),
plot trajectory of all users (rows) separately
"""
def plot_cwubc_all_users(traj_matrix, add_zero=True):
plt.figure(figsize=(12,8))
num_users = np.shape(traj_matrix)[0]
if add_zero:
T_all_plot = np.insert(traj_matrix, 0, 0, axis=1) #0 added in beginning for plot
else:
T_all_plot = traj_matrix
# plt.plot(T_all_mean, linestyle='dotted', linewidth=5)
plt.xlabel('Project Number')
plt.ylabel('Number of Unique Blocks')
plt.title("Cummulative Number of Blocks Used by AI User")
for i in range(T_all_plot.shape[0]):
plt.plot(T_all_plot[i,:])
#TODO: figuer out return
""" HELPER FUNCTIONS """
"""
Given number of groups, return list of human-readable strings
to be element of POPPS plot that splits 100% into num_groups groups
ex: num_groups = 4 => ['<25%','25-50%','50-75%','>75%']
"""
def create_legend(num_groups):
vals = range(0,101,100/num_groups)
output = []
output.append("<{}%".format(vals[1]))
for i in range(1,len(vals)-2):
output.append("{}-{}%".format(vals[i], vals[i+1]))
output.append(">{}%".format(vals[-2]))
return output
# given 1D numpy array, return same array w/ 0.0
# added to first term
def add_zero(vector):
return np.insert(0.0, 1, vector)
"""
Grouping Users by final CWUBC (AKA dumb clustering)
"""
"""
given CWUBC trajectory matrix (traj_matrix, ndarray)
and number of desired groups (n, as int),
split traj_matrix according to final CWUBC.
return list of n matrices representing traj_matrix split n ways and
list of lists of indices in matrix that correspond to each split
"""
def split_by_end_cwubc(traj_matrix, n):
end_cwubc = traj_matrix[:,-1]
thresholds = np.percentile(traj_matrix[:,-1], range(0,101,100/n)) #get thresholds for spliting n ways
# indices = []
output = []
indices = []
for i in range(len(thresholds)-1):
# indices.append(np.argwhere(np.all([end_cwubc>=thresholds[i], end_cwubc<thresholds[i+1]], axis=0)))
inds = np.argwhere(np.all([end_cwubc>=thresholds[i], end_cwubc<thresholds[i+1]], axis=0)).flatten()
indices.append(inds)
output.append(traj_matrix[inds])
return output, indices
### DEPRECATED ###
#Splitting users by CWUBC and comparing POPPS
"""
given list of lists of indicies (list of int),
list of skills (strings corresponding to POPPS_MAP.keys()),
plot POPPS survival curve
"""
def plot_popps(grouped_inds, grouped_by_text="<something>", skills=POPPS_MAP.keys()):
for skill in skills:
user_ids = []
popps = []
all_user_ids = np.array(get_users(THRESHOLD).keys())
legend = []
for i in range(len(grouped_inds)):
indices = grouped_inds[i]
temp_uids = all_user_ids[indices]
user_ids.append(temp_uids)
p = get_popps_all_users(skill, temp_uids)
p_avg = get_average_survival(p)
p_avg = np.insert(p_avg, 0, 1.0)
popps.append(p_avg)
legend.append("{} ({} users)".format(i, len(temp_uids)))
plt.figure(figsize=(12,8))
plt.title("Surirval Curve of Users Grouped by {}: {}".format(grouped_by_text.title(), skill.title()))
plt.xlabel("Project")
plt.ylabel("Proportion of Users Who Have Never Used {}".format(skill.title()))
for p_avg in popps:
plt.plot(p_avg, linewidth=5)
plt.legend(legend)
# n = 3
# mats, inds = split_by_end_cwubc(T_all, n)
# for skill in POPPS_MAP.keys():
# user_ids = []
# popps = []
# all_user_ids = np.array(get_users(THRESHOLD).keys())
# for i in range(len(inds)):
# indices = inds[i]
# temp_uids = all_user_ids[indices]
# user_ids.append(temp_uids)
# p = get_popps_all_users(skill, temp_uids)
# p_avg = get_average_survival(p)
# p_avg = np.insert(p_avg, 0, 1.0)
# popps.append(p_avg)
# plt.figure(figsize=(12,8))
# plt.title("Surirval Curve of Users Grouped by CWUBC: {}".format(skill.title()))
# plt.xlabel("Project")
# plt.ylabel("Proportion of Users Who Have Never Used Concept")
# for p_avg in popps:
# plt.plot(p_avg, linewidth=5)
# plt.legend(create_legend(n))
# plot_cwubc_avg(mats)
# plot_cwubc_all_users(mats[3])
###Output
_____no_output_____
###Markdown
Trajectory Comparison: All Blocks vs Comp. ConceptsTODO:- function to plot these (input: trajectory list, title, ylabel, legend)- see if learning rate is exponential decay [See SO Answer](http://stackoverflow.com/questions/3938042/fitting-exponential-decay-with-no-initial-guessing)
###Code
# plot_trajectory(traj_mat_list, title="", ylabel="", percent=False, legend_loc=2)
def plot_trajectory(traj_mat_list, title="Average Cumulative Sum of Block Types",
ylabel="Cum. Number of\nBlock Types",
legend = ['Comp. Concepts Blocks', 'Non-CC Blocks'], legend_loc=2, percent=False):
# fig = plt.figure(figsize=(11.3,7))
fig = plt.figure(figsize=(11.3,5))
ax = fig.add_subplot(1,1,1)
plt.xlabel('Project Number', fontsize=20, color='black')
plt.ylabel(ylabel, fontsize=20, color='black')
plt.title(title, fontsize=24, color='black')
my_legend = legend
for i in range(len(traj_mat_list)):
t_mat = traj_mat_list[i]
if percent:
t_mat = t_mat * 100 #for percent
num_users = np.shape(t_mat)[0]
t_mat = np.insert(t_mat, 0, 0, axis=1) #0 added in beginning for plot
T_avg = np.average(t_mat, axis=0) #avg of each column/at each project
if i==1:
plt.plot(T_avg, linewidth=5, linestyle='dashed')
else:
plt.plot(T_avg, linewidth=5)
my_legend.append("{} ({} users)".format(i, num_users))
plt.legend(my_legend, loc=legend_loc, fontsize=16)
if percent:
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
yticks = mtick.FormatStrFormatter(fmt)
ax.yaxis.set_major_formatter(yticks)
return fig, ax
"""
Plotting UBC of all blocks vs CT blocks
"""
mats, inds = split_by_end_cwubc(T_all, 1)
mats_not, inds_not = split_by_end_cwubc(T_all_not, 1)
mats_cc, inds_cc = split_by_end_cwubc(T_all_cc, 1)
both_mats = [mats_cc[0], mats_not[0]]
#Avg Block Count
plot_trajectory(both_mats)
#Normalized Learning Rate
mats_delta = [
normalize_trajectory(difference_trajectory(mats_cc[0]), by_max=False),
normalize_trajectory(difference_trajectory(mats_not[0]), by_max=False)
]
plot_trajectory(mats_delta, title="Normalized Average Learning Rate",
ylabel="% of Block Types\nIntroduced",
percent=True, legend_loc=1)
#Normalized Block Count
mats_norm = [normalize_trajectory(mats_cc[0]), normalize_trajectory(mats_not[0])]
# both_mats_norm = [normalize_trajectory(mats_cc[0])]
plot_trajectory(mats_norm, title="Normalized Avg. Cum. Sum of Block Types",
ylabel="% of Cum. Number\nof Block Types", percent=True, legend_loc=2)
"""
Clustering
"""
"""
given trajectory matrix (n x d where d is num of projects)
and optional number of clusters,
return list of trajectory matrices for each cluster of users
"""
def k_means(traj_matrix, num_clusters = 3):
estimator = KMeans(n_clusters=num_clusters)
estimator.fit(traj_matrix)
predict = estimator.predict(traj_matrix)
cluster_ind = [] #list of lists of ind in given cluster
T_cluster = []
for i in range(num_clusters):
cluster_ind.append(np.argwhere(predict==i).flatten())
T_cluster.append(traj_matrix[cluster_ind[i]])
print "{} has {} users".format(i, len(cluster_ind[i]))
return T_cluster, cluster_ind
T_cluster = k_means(T_all, 3)[0]
print
delta = np.diff(T_all, axis=1)
T_diff_ind = k_means(delta, 3)[1]
T_diff = []
T_diff_not = []
T_diff_cc = []
for i in range(3):
T_diff.append(T_all[T_diff_ind[i]])
T_diff_not.append(T_all_not[T_diff_ind[i]])
T_diff_cc.append(T_all_cc[T_diff_ind[i]])
T_diff[2]
# T_diff_ind[2]
x = get_users(20)
x
mats, inds = split_by_end_cwubc(T_all, 3)
# plot_cwubc_avg(mats, 'end block count')
# plot_cwubc_avg(T_cluster, 'kmeans clustering')
plot_cwubc_avg(T_diff, grouped_by_text='Learning Rate')
traj_mat_list_not = T_diff_not
traj_mat_list_cc = T_diff_cc
for i in range(len(traj_mat_list_not)):
traj_both = [normalize_trajectory(traj_mat_list_cc[i]), normalize_trajectory(traj_mat_list_not[i])]
plot_trajectory(traj_both, title="Avg Distinct Block Count (Normalized), Cluster {}".format(i),
ylabel="% of Cum. Number of Unique Blocks", percent=True, legend_loc=4)
###Output
_____no_output_____
###Markdown
Grouping based on slope of trajectory, K-MeansSmallest Cluster (~10 users) has sharp spike in UBC from 1st to 2nd project but sample too small to determine if relevant.
###Code
# grouped_inds = k_means(T_all, 3)[1]
g_text = "KMeans diff"
delta = np.diff(T_all, axis=1)
grouped_inds = k_means(delta, 3)[1]
T_diff = []
for i in range(3):
T_diff.append(T_all[grouped_inds[i]])
plot_cwubc_avg(T_diff, g_text)
# plot_popps(grouped_inds, grouped_by_text=g_text)
###Output
_____no_output_____
###Markdown
Attempting Fixed Effect Modelsx: UBCy: of projects using procedures ( of procedures
###Code
"""
getting ind where project uses procedures
"""
"""
given user_id (string) and prog_skill (string that is in POPPS_MAP.keys()),
return vector of length threshold that is 1 if prog_skill is used in that project, 0 else
"""
# def get_specific_popps_location(user_id, prog_skill, threshold=THRESHOLD, block_types= BLOCK_TYPES):
# if prog_skill not in POPPS_MAP:
# raise Exception("{} not in POPPS_MAP. Select from {}".format(prog_skill, POPPS_MAP.keys()))
# popps_count = np.zeros(threshold)
# P_f = get_freq_matrix(user_id, threshold)
# block_inds = get_block_indices(POPPS_MAP[prog_skill], block_types)
# found_proj_ind = np.unique(np.argwhere(P_f[:,block_inds]>0)[:,0]) #project inds in P_f that have prog skill in project
# if len(found_proj_ind):
# popps_count[found_proj_ind] = 1
# return popps_count
"""
given user_id (string) and prog_skill (string that is in POPPS_MAP.keys()),
optional binary (boolean) that returns binary/boolean vector instead of true counts if true,
return vector of length threshold that shows number of blocks related to prog_skill used in project
"""
def get_specific_popps_counts(user_id, prog_skill, binary=False, threshold=THRESHOLD, block_types= BLOCK_TYPES):
if prog_skill not in POPPS_MAP:
raise Exception("{} not in POPPS_MAP. Select from {}".format(prog_skill, POPPS_MAP.keys()))
popps_count = np.zeros(threshold)
P_f = get_freq_matrix(user_id, threshold)
block_inds = get_block_indices(POPPS_MAP[prog_skill], block_types)
blk_count = P_f[:,block_inds][:,0] #num of prog skill blocks in each project
# print blk_count
if binary:
return blk_count>0
return blk_count
uid = '000317'
proj_inds = get_specific_popps_counts(uid, 'proc')
# num_proj = np.nonzero(proj_inds>0)[0].shape[0]
# num_proj
np.sum(proj_inds)
#getting UBC and # of projects w/ procedure for each user
#end UBC
ubc = T_all[:,-1]
# print ubc
is_binary = True
data = np.zeros([len(ubc), 2])
data[:,0] = ubc
# print user_ids
user_ids = get_users(20)
for i in range(len(user_ids)):
uid = user_ids.keys()[i]
proj_inds = get_specific_popps_counts(uid, 'proc', binary=is_binary)
if is_binary:
count = np.nonzero(proj_inds>0)[0].shape[0]
else:
count = np.sum(proj_inds)
data[i,1] = count
x = data[:,0]
y = data[:,1]
fit = np.polyfit(x,y,1)
p = np.poly1d(fit)
xp = np.linspace(0,200,1000)
_ = plt.plot(x, y, '.', xp, p(xp), '-')
# plt.scatter(x, y)
# calculating correlation
np.corrcoef(x,y)[0,1]
pf = get_freq_matrix('000317', 20)
block_inds = get_block_indices(POPPS_MAP['proc'], BLOCK_TYPES)
found_proj_ind = np.argwhere(pf[:,block_inds]>0) #locations in P_b that show block in project
found_proj_ind
len(np.unique(found_proj_ind[:,0]))
x=np.zeros(10)
x[[2,4,6]]=2
x
pf[:,block_inds]
###Output
_____no_output_____
###Markdown
The wikis concerned reinforces the idea that these are imported revision. I know dewiki has a policy of importing pages from other wikis before translating them, and it looks like [enwikibooks](https://en.wikibooks.org/wiki/Wikibooks:Requests_for_import) and [simplewiki](https://simple.wikipedia.org/wiki/Wikipedia:Importers) do as well.
###Code
emn_only.groupby("emn_wiki")["emn_wiki"].count().sort_values(ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Looking at 10 random rows, all ten represent imported revisions.
###Code
emn_only.sample(10, random_state=18300941).iloc[:, 8:].reset_index(drop=True)
# Filter with a datetime object because there's a "0001-11-30" month which causes all sorts of weirdness
year_2001 = pd.to_datetime("2001")
emo_only.groupby("emo_month")["emo_wiki"].count()[year_2001:].plot();
emo_only.groupby("emo_wiki")["emo_wiki"].count().sort_values(ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Looking at 10 random rows, all ten represent revisions where the user name was [revision deleted](https://www.mediawiki.org/wiki/Manual:RevisionDelete).
###Code
emo_only.sample(10, random_state=18300941).iloc[:, :8].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Taken together, these had only a minimal impact on the discrepancy.
###Code
ae_matched_sql = """
select
month,
count(*) as active_editors
from (
select
cast({table_abbrev}.month as date) as month,
{table_abbrev}.user_name,
sum({table_abbrev}.content_edits) as content_edits,
max({table_abbrev}.bot_flag) as bot_flag
from neilpquinn.{table} {table_abbrev}
inner join neilpquinn.{other_table} {other_table_abbrev}
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
{table_abbrev}.local_user_id != 0
group by {table_abbrev}.month, {table_abbrev}.user_name
) global_edits
where
content_edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_matched_sql = ae_matched_sql.format(
table="editor_month_official",
table_abbrev="emo",
other_table="editor_month_new",
other_table_abbrev="emn"
)
emn_ae_matched_sql = ae_matched_sql.format(
table="editor_month_new",
table_abbrev="emn",
other_table="editor_month_official",
other_table_abbrev="emo"
)
emo_ae_matched = hive.run(emo_ae_matched_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae_matched = hive.run(emn_ae_matched_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
(emn_ae_matched - emo_ae_matched)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Content edit counts So now let's look at the rows that exist in both datasets. Since this is a much bigger group, we'll only look at the past two year of data.
###Code
matched_rows = hive.run([
"set hive.resultset.use.unique.column.names=true",
"""
select *
from neilpquinn.editor_month_official emo
inner join neilpquinn.editor_month_new emn
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
emo.month >= "2017-02-01" and
emn.month >= "2017-02-01"
"""
]).rename(columns=lambda x: x.replace(".", "_"))
###Output
_____no_output_____
###Markdown
12% have different numbers of content edits!
###Code
rows = len(matched_rows)
len(matched_rows.query("emo_content_edits != emn_content_edits")) / rows
###Output
_____no_output_____
###Markdown
Of those rows with differing numbers of content edits, `editor_month_official` shows more 96% of the time and `editor_month_new` more only 4% of the time.
###Code
differing_content_edit_rows = len(matched_rows.query("emo_content_edits != emn_content_edits"))
len(matched_rows.query("emo_content_edits > emn_content_edits")) / differing_content_edit_rows
###Output
_____no_output_____
###Markdown
Oof. There seem to be at least four different problems at work:Deleted pages where all the revisions have null `page_namespace_historical`, `page_namespace_is_content` and `page_namespace_is_content_historical`:* nlwiki page 5160672* commonswiki page 71797356* wikidatawiki page 9637937* arwiki page 4970274* commonswiki page 73916373* ruwiki page 7054391Revisions with null `page_namespace_historical` and `page_namespace_is_content_historical`. Most but not all of the revisions to the pages concerned are affected. * https://en.wikipedia.org/w/index.php?diff=67017781 (page 28408157)* https://www.wikidata.org/w/index.php?diff=155712677 (page 21524228)* https://pt.wikipedia.org/w/index.php?diff=1691356 (page 96328)* https://en.wikipedia.org/w/index.php?diff=820879007 (page 56326900)Revisions where the join to the page table seems to have failed entirely, because where almost all revisions have null `page_title`, `page_namespace`, and `page_namespace_is_content` (including historical fields) and `page_creation_timestamp`. Most but not all of the revisions to the pages concerned are affected.* https://en.wikipedia.org/w/index.php?diff=859361756 (page 40012938)* https://pt.wikipedia.org/w/index.php?diff=2692528 (page 3177643)* https://en.wikipedia.org/w/index.php?diff=269025183 (page 29397754)* https://en.wikipedia.org/w/index.php?diff=347411263 (page 30865452)Revisions with null page data (as above) because the referenced pages simply don't exist in the underlying page table. These may be botched deletions.* https://ru.wikipedia.org/w/index.php?diff=78824747* https://da.wikipedia.org/w/index.php?diff=1751290* https://nl.wikipedia.org/w/index.php?diff=1363616* https://en.wikipedia.org/w/index.php?diff=17967486
###Code
matched_rows.query("emo_content_edits > emn_content_edits").sample(10)[
["emo_month", "emo_wiki", "emo_local_user_id", "emo_user_name", "emo_content_edits", "emn_content_edits"]
]
###Output
_____no_output_____
###Markdown
In the less common case where `editor_month_new` shows more edits, the reasons seem to be:* content namespaces not included in the list for `editor_month_official`: the Page namespace (104) and Index namespace (106) on enwikisource, the Author namespace (102) and Page namespace (104) on dewikisource, the Page namespace on guwikisource (122), the List namespace of ltwiki (104)* edits to pages later moved out of content namespaces (e.g. from the main namespace to the user namespace)* a history merge that moved edits in the Draft namespace to the main namespace (e.g. with the page https://en.wikipedia.org/w/index.php?title=Wyatt_Omsberg&action=history)
###Code
matched_rows.query("emn_content_edits > emo_content_edits").sample(10)[
["emo_month", "emo_wiki", "emo_local_user_id", "emo_user_name", "emo_content_edits", "emn_content_edits"]
]
###Output
_____no_output_____
###Markdown
This, finally, makes an big impact on the discrepancy: at its largest, it goes from about -19 000 to about -1 200, and in the past year, it goes from about -4 000 to about +50.
###Code
ae_all_ns_sql = """
select
month,
count(*) as active_editors
from (
select
cast({table_abbrev}.month as date) as month,
{table_abbrev}.user_name,
sum({table_abbrev}.edits) as edits,
max({table_abbrev}.bot_flag) as bot_flag
from neilpquinn.{table} {table_abbrev}
inner join neilpquinn.{other_table} {other_table_abbrev}
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
{table_abbrev}.local_user_id != 0
group by {table_abbrev}.month, {table_abbrev}.user_name
) global_edits
where
edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_all_ns_sql = ae_all_ns_sql.format(
table="editor_month_official",
table_abbrev="emo",
other_table="editor_month_new",
other_table_abbrev="emn"
)
emn_ae_all_ns_sql = ae_all_ns_sql.format(
table="editor_month_new",
table_abbrev="emn",
other_table="editor_month_official",
other_table_abbrev="emo"
)
emo_ae_all_ns = hive.run(emo_ae_all_ns_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae_all_ns = hive.run(emn_ae_all_ns_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
(emn_ae_all_ns - emo_ae_all_ns)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Overall edit counts Only about 0.1% of rows differ in their overall edit count.
###Code
differing_edits = matched_rows.query("emo_edits != emn_edits")
len(differing_edits) / rows
###Output
_____no_output_____
###Markdown
About 63% of these rows show more edits in `editor_month_new`.
###Code
len(differing_edits.query("emn_edits > emo_edits")) / len(differing_edits)
###Output
_____no_output_____
###Markdown
The extra edits in `editor_month_new` all seem to be due to imported revisions: for example, where a user was credited with imported revisions *in addition* to making regular revisions during that month, or had some revisions imported before the row in `editor_month_official` was generated, and others imported between that time and now.
###Code
differing_edits.query(
"emn_edits > emo_edits"
).groupby("emo_wiki")["emo_wiki"].count().sort_values(ascending=False).head(20)
differing_edits.query(
"(emn_edits > emo_edits) & (emo_wiki == 'commonswiki')"
).sample(10)[
["emo_month", "emo_wiki", "emo_local_user_id", "emo_user_name", "emo_edits", "emn_edits"]
]
###Output
_____no_output_____
###Markdown
The extra edits in `editor_month_official` all seem to be due to revision deletion.
###Code
differing_edits.query(
"emo_edits > emn_edits"
).groupby("emo_wiki")["emo_wiki"].count().sort_values(ascending=False).head(20)
differing_edits.query(
"(emo_edits > emn_edits) & (emo_local_user_id != 0)"
).sample(10)[
["emo_month", "emo_wiki", "emo_local_user_id", "emo_user_name", "emo_edits", "emn_edits"]
]
###Output
_____no_output_____
###Markdown
This doesn't make a significant impact on the discrepancy; overall, while revision importing and deletion add an unfortunate instability to our metrics, the impact is not big enough for serious concern.
###Code
ae_equal_edits_sql = """
select
month,
count(*) as active_editors
from (
select
cast({table_abbrev}.month as date) as month,
{table_abbrev}.user_name,
greatest(sum({table_abbrev}.edits), sum({other_table_abbrev}.edits)) as edits,
max({table_abbrev}.bot_flag) as bot_flag
from neilpquinn.{table} {table_abbrev}
inner join neilpquinn.{other_table} {other_table_abbrev}
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
{table_abbrev}.local_user_id != 0
group by {table_abbrev}.month, {table_abbrev}.user_name
) global_edits
where
edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_equal_edits_sql = ae_equal_edits_sql.format(
table="editor_month_official",
table_abbrev="emo",
other_table="editor_month_new",
other_table_abbrev="emn"
)
emn_ae_equal_edits_sql = ae_equal_edits_sql.format(
table="editor_month_new",
table_abbrev="emn",
other_table="editor_month_official",
other_table_abbrev="emo"
)
emo_ae_equal_edits = hive.run(emo_ae_equal_edits_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae_equal_edits = hive.run(emn_ae_equal_edits_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
(emn_ae_equal_edits - emo_ae_equal_edits)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
User names Let's look at differing user names, filtering out the rows representing anonymous editors because `editor_month_official` gives the user name as an empty string, where `editor_month_new` gives it as null.
###Code
differing_names = matched_rows.query("emn_user_name != emo_user_name & emo_local_user_id !=0")
pct_str(len(differing_names) / len(matched_rows))
###Output
_____no_output_____
###Markdown
These are all cases where the user was renamed after the `editor_month_official` row was generated.
###Code
differing_names.sample(10)[
["emo_wiki", "emo_month", "emo_user_name", "emn_user_name"]
]
###Output
_____no_output_____
###Markdown
Rerunning the active editor numbers, grouping in both cases by the user name in `editor_month_new`, it has a significant effect on the discrepancy (although not as significant as the content edits issues).
###Code
ae_new_names_sql = """
select
month,
count(*) as active_editors
from (
select
cast({table_abbrev}.month as date) as month,
emn.user_name,
greatest(sum({table_abbrev}.edits), sum({other_table_abbrev}.edits)) as edits,
max({table_abbrev}.bot_flag) as bot_flag
from neilpquinn.{table} {table_abbrev}
inner join neilpquinn.{other_table} {other_table_abbrev}
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
{table_abbrev}.local_user_id != 0
group by {table_abbrev}.month, emn.user_name
) global_edits
where
edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_new_names_sql = ae_new_names_sql.format(
table="editor_month_official",
table_abbrev="emo",
other_table="editor_month_new",
other_table_abbrev="emn"
)
emn_ae_new_names_sql = ae_new_names_sql.format(
table="editor_month_new",
table_abbrev="emn",
other_table="editor_month_official",
other_table_abbrev="emo"
)
emo_ae_new_names = hive.run(emo_ae_new_names_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae_new_names = hive.run(emn_ae_new_names_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
(emn_ae_new_names - emo_ae_new_names)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Only the `editor_month_official` active editors number would have changed, since we were already grouping `editor_month_new` by its own `user_name` column. But it looks like very little of that change happened in the past two years (the range covered by the above `differing_names` dataset.
###Code
(emo_ae_equal_edits - emo_ae_new_names)["2001":].plot()
###Output
_____no_output_____
###Markdown
So let's get an older sample of differing-name rows and see what's going on.
###Code
older_matched_rows = hive.run([
"set hive.resultset.use.unique.column.names=true",
"""
select *
from neilpquinn.editor_month_official emo
inner join neilpquinn.editor_month_new emn
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
emo.month >= "2005-01-01" and
emo.month < "2007-01-01"
"""
]).rename(columns=lambda x: x.replace(".", "_"))
###Output
_____no_output_____
###Markdown
It looks like all of these are cases where `editor_month_new` has a null username, almost certainly cases of [T218463](https://phabricator.wikimedia.org/T218463).
###Code
older_matched_rows.query("emo_user_name != emn_user_name")[
["emo_wiki", "emo_month", "emo_user_name", "emn_user_name"]
].sample(20)
###Output
_____no_output_____
###Markdown
Bot flag A very small number of rows differ in their categorization as bots.
###Code
differing_bot_flags = matched_rows.query("emo_bot_flag != emn_bot_flag")
len(differing_bot_flags) / len(matched_rows)
###Output
_____no_output_____
###Markdown
This is because the `editor_month_official` considers a user a bot if they were *ever* in the "bot" group, whereas `editor_month_new` considers them a bot in a given month if they were in the group during the month or at the time `mediawiki_history` was generated.`editor_month_new` seems to have a better approach, producing the correct result in 17 out of the 20 cases below, mainly because many human users add themselves to the bot group temporarily (sometimes for just 30 minutes or less) for testing or to make a spate of edits without cluttering up the recent changes feed. Moreover, the 3 remaining cases, where real bots were not flagged as such in `editor_month_new`, would have been caught by the user name filter anyway.
###Code
bot_flag_columns = [
"emo_wiki",
"emo_month",
"emo_user_name",
"emo_bot_flag",
"emn_user_name",
"emn_bot_flag"
]
differing_bot_flags.sample(20)[bot_flag_columns]
ae_same_bots_sql = """
select
month,
count(*) as active_editors
from (
select
cast({table_abbrev}.month as date) as month,
emn.user_name,
greatest(sum({table_abbrev}.edits), sum({other_table_abbrev}.edits)) as edits,
max(emn.bot_flag) as bot_flag
from neilpquinn.{table} {table_abbrev}
inner join neilpquinn.{other_table} {other_table_abbrev}
on
emo.month = emn.month and
emo.wiki = emn.wiki and
emo.local_user_id = emn.local_user_id
where
{table_abbrev}.local_user_id != 0
group by {table_abbrev}.month, emn.user_name
) global_edits
where
edits >= 5 and
not bot_flag and
user_name not regexp "bot\\b"
group by month
"""
emo_ae_same_bots_sql = ae_same_bots_sql.format(
table="editor_month_official",
table_abbrev="emo",
other_table="editor_month_new",
other_table_abbrev="emn"
)
emn_ae_same_bots_sql = ae_same_bots_sql.format(
table="editor_month_new",
table_abbrev="emn",
other_table="editor_month_official",
other_table_abbrev="emo"
)
emo_ae_same_bots = hive.run(emo_ae_same_bots_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
emn_ae_same_bots = hive.run(emn_ae_same_bots_sql).assign(month=lambda df: pd.to_datetime(df["month"])).set_index("month")
###Output
_____no_output_____
###Markdown
And with that, we seem to have taken care of the entire discrepancy! It's not terribly suprising, considering that I have now essentially picked a winner in every case where the two datasets differ, but it's still deeply refreshing after so much investigation 😁
###Code
(emn_ae_same_bots - emo_ae_same_bots)["2001":].plot(
title="Deviation of 'new' active editors from 'official'"
);
###Output
_____no_output_____
###Markdown
Mitigation Based on the findings above, I've regenerated `neilpquinn.editor_month`, working around as many of the issues as possible with [this SQL](https://github.com/wikimedia-research/Editing-movement-metrics/blob/1178702104d1cafd1c003759aa09d9dbd8728d0c/queries/update_editor_month.sql).
###Code
mitigated_ae = (
hive.run("""
select
month,
count(*) as active_editors
from (
select
cast(month as date) as month,
user_name,
sum(content_edits) as content_edits,
max(bot_by_group) as bot -- `bot_by_group` is misnamed and includes the user name regex
from neilpquinn.editor_month
where
month < "2019-02-01" and
user_id != 0
group by month, user_name
) global_edits
where
content_edits >= 5 and
(not bot or user_name in ("Paucabot", "Niabot", "Marbot"))
group by month
""")
.assign(month=lambda df: pd.to_datetime(df["month"]))
.set_index("month")
)
###Output
_____no_output_____
###Markdown
This seems to have worked really well!
###Code
(mitigated_ae - official_ae).plot(title="Deviation of 'mitigated' active editors from 'official'");
ae_comparison = pd.concat([
new_ae.rename(columns=lambda x: "new (mediawiki_history)"),
mitigated_ae.rename(columns=lambda x: "mitigated new (mediawiki_history)"),
official_ae.rename(columns=lambda x: "old (replicas)")
], axis=1)
plt = ae_comparison.plot()
plt.set_title("Comparison of active editor calculations");
###Output
_____no_output_____
###Markdown
We first are going to introduce the DAML catalogue for clusters. The goal will be to create an algorithm which can find the cluster and then assign stars in the region to that cluster. A similar idea is employed in https://www.aanda.org/articles/aa/pdf/2002/27/aa2476.pdf
###Code
from astroquery.vizier import Vizier
#Vizier.ROW_LIMIT = -1
catalog_list=Vizier.find_catalogs('Dias+ 2002-2015')
#This is the DAML globular cluster catalogue
#The warnings need to be dealt with
#An import of all their values
catalogs = Vizier.get_catalogs(catalog_list.values())
###Output
WARNING: UnitsWarning: Unit 'Sun' not supported by the VOUnit standard. Did you mean uN? [astropy.units.format.vounit]
WARNING: UnitsWarning: The unit 'ct' has been deprecated in the VOUnit standard. [astropy.units.format.utils]
WARNING: UnitsWarning: The unit 'pix' has been deprecated in the VOUnit standard. [astropy.units.format.utils]
###Markdown
Catalogs has load of different tables, the second one is the list of clusters
###Code
cluster_list=catalogs[1] #only has 50 rows
sorted_cluster_list=cluster_list[np.argsort(cluster_list['Dist'])]
sorted_cluster_list
###Output
_____no_output_____
###Markdown
There will be error on our measurements of the distance to our stars, reference Bailey Jones, so we want to order the clusters by the nearest ones We can look at the paper Dias for error on the distance measuremenet about the cluster, must be some error involved The closest ones of course will have the smallest error when relating to Bailer Jones parallax inversion,without doing anything rigarous we will take the closest star cluster and look at gaia data just by inverting the parallax to geta measurement. We will take a window of twice the diameter, and depth twice the diameter.
###Code
#Taking the closest cluster
closest_cluster=sorted_cluster_list[0]
closest_cluster
###Output
_____no_output_____
###Markdown
So we can see there is a diameter of 70.0 arcseconds, we will use a window of size 140.0 arcseconds.We have a distance measurement of 269pc. We need to get distance estimates of the Gaia Data in that region Next we are going to call in the GAIA data centered around the catalogued open cluster
###Code
#Looking at the cone around the point
right_as_center=closest_cluster['RAJ2000']
dec_center=closest_cluster['DEJ2000']
diam=closest_cluster['Diam']
#is the frame right
coord = SkyCoord(right_as_center,dec_center, unit=(u.hourangle, u.deg))
rad = u.Quantity(diam, u.arcminute)
r = Gaia.cone_search_async(coordinate=coord, radius=rad, verbose=True)
gaia_edr3=r.get_results()
#Print the table
gaia_edr3
###Output
_____no_output_____
###Markdown
Right so we have loads of error stuff here and there is going to be a lot of management of that.Can we trust the distance estimates on this.Either way we need to figure out the depth of this. How many we are going to accept. Now we are going to do Bailer Jones data
###Code
Vizier.ROW_LIMIT = -1
bailer = Vizier.query_region(coord,
radius=rad,
catalog='I/352/gedr3dis')
bjones=bailer[0]
bjones
#see how many matches we have
count=0
for i in range (0, len(gaia_edr3['source_id'])):
if(gaia_edr3['source_id'][i] not in bjones['Source']):
count+=1
count
count + len(bjones['Source'])-len(gaia_edr3['source_id'])
###Output
_____no_output_____
###Markdown
Okay this gives all of the data that has Bailer jones distance estimates
###Code
bailer[0].columns
gaia_edr3=gaia_edr3[gaia_edr3['parallax']>=-1000] #there is probably a better way of getting rid of the zero values but we shouldnt have that
#Now im getting rid of the values with a nonzero parallax
len(gaia_edr3)==len(bjones)
#Great, so it works and now we have catalogues with the same values
###Output
_____no_output_____
###Markdown
Now we need to add on the columns and merge them together, we order them by source code, that should maek it easier.
###Code
gaia_edr3=gaia_edr3[np.argsort(gaia_edr3['source_id'])]
bjones=bjones[np.argsort(bjones['Source'])]
False in (gaia_edr3['source_id']==bjones['Source'])
#these dataframes are weird but basically theyre the same
#moreover i think i shoud have a pipline of changing these to more readable stuff lol
###Output
_____no_output_____
###Markdown
I will of course tidy up my code more before this but we are just doing it now this way
###Code
import pandas as pd
gaia_edr3=gaia_edr3.to_pandas()
bjones=bjones.to_pandas()
#However information about the units has been lost entirely here
#The next code is a bit of a ring around btu its going to give us what we want
# We have ordered the dataframes by increasing source id so that we shoudl have right sources
total_gaia = pd.concat([gaia_edr3,bjones], axis=1, join="inner")
total_gaia
#Now finally lets just make sure again thaat its all matching in the rows
(total_gaia['source_id']==total_gaia['Source']).index[(total_gaia['source_id']==total_gaia['Source'])==False]
#Right so this says it all matches up thats good
###Output
_____no_output_____
###Markdown
Now we are going to do some of the selection criterionWe have already selected a radius twice the radius of the radius stated of the literature. We are going to make certain initial cuts:1) Parallax cut2) Magnitude cut3) Star cut
###Code
print('The closest star cluster value is:', closest_cluster['Dist'], 'pc')
###Output
The closest star cluster value is: 269 pc
###Markdown
We want to choose sources that given any posterior estimate of their distances including error they are within the desired region. Note we havent used error in the right acention or anything weve calculated so far
###Code
#Let us first plot the parallax vs error
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
ax0=total_gaia['parallax_error']
ax1=total_gaia['parallax']
ax2=total_gaia['parallax_over_error']
fig=plt.figure(figsize=(15,6))
grid=gs.GridSpec(2,2)
a1=plt.subplot(grid[0,0])
a2=plt.subplot(grid[0,1])
a3=plt.subplot(grid[1,0])
a1.hist(1.0/(ax2),density=True,bins=1000,range=[-1,5])
a1.set_title('Fractinoal parallax uncertainty')
a2.hist(ax2,density=False,bins=1000,range=[-10,50])
a2.set_title('Histogram Plot Parallax/Parallax error')
a3.scatter(ax1,ax0,s=0.5)
a3.set_title('x:parallax vs error')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
We want to now use the Bailer Jones paper to get:1) A parallax measurement correspondig to the cluster distance using the likelihood fucntion proposed in the paper2) Use the distance measurements according to him
###Code
#The distance measurement to the cluster is
closest_cluster['Dist']
closest_cluster_parallax_est=1.0/closest_cluster['Dist']
closest_cluster_parallax_est
closest_cluster['Dist']
#This is probably too far away. The parallax is tiny
#This is very far away, so what we are going to do is cut using the bailer jones
upper_r_bound=closest_cluster['Dist']+closest_cluster['Diam']
lower_r_bound=closest_cluster['Dist']-closest_cluster['Diam']
selection_region=pd.Interval(left=float(lower_r_bound), right=float(upper_r_bound))
###Output
_____no_output_____
###Markdown
First we will get rid of stuff where the digameter values not contained in the percentile region.
###Code
selection_gaia=total_gaia[(total_gaia['b_rgeo'] <= upper_r_bound)]
selection_gaia=selection_gaia[(selection_gaia['B_rgeo'] <= upper_r_bound)]
selection_gaia=selection_gaia[(selection_gaia['b_rgeo'] >= lower_r_bound)]
selection_gaia=selection_gaia[(selection_gaia['B_rgeo'] >= lower_r_bound)]
selection_gaia
###Output
_____no_output_____
###Markdown
So now we have selected objects such that the BJ percentiles are contained within the region plus or minus twice the epected radius.Now let us plot and see what our parallax is like Plot for selected region
###Code
ax0=selection_gaia['parallax_error']
ax1=selection_gaia['parallax']
ax2=selection_gaia['parallax_over_error']
fig=plt.figure(figsize=(15,6))
grid=gs.GridSpec(2,2)
a1=plt.subplot(grid[0,0])
a2=plt.subplot(grid[0,1])
a3=plt.subplot(grid[1,0])
a4=plt.subplot(grid[1,1])
a1.hist(1.0/(ax2),density=True,bins=1000,range=[-0.3,0.55])
a1.set_title('Fractinoal parallax uncertainty')
a2.hist(ax2,density=False,bins=1000,range=[-10,50])
a2.set_title('Histogram Plot Parallax/Parallax error')
a3.scatter(ax1,ax0,s=0.5)
a3.set_title('x:parallax vs error')
a4.hist(ax1,density=False,bins=100,range=[2,6])
a4.set_title('Parallax')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
There is a fairly uniform scatter from parallax uncertainty. Should analyse this a bit more to be honest. I will We can see there seems to be some sort of peak in the parallax for a lot of the data. Could this correspond to our cluster!
###Code
#Lets check with the distance and do a plot that way
fig=plt.figure(figsize=(15,6))
a=plt.subplot()
def para(x):
return (1.0/x)*1000
ax0=selection_gaia['rgeo'] #median posterior densirty
ax1=selection_gaia['parallax'].apply(para)
a.hist(ax0,density=False,bins=100)
a.hist(ax1,density=False,bins=100,color='orange',)
a.set_title('Distance')
a.vlines(closest_cluster['Dist'],0,70, colors='g')
a.vlines(closest_cluster['Dist']+0.5*closest_cluster['Diam'],0,70, colors='r')
a.vlines(closest_cluster['Dist']-0.5*closest_cluster['Diam'],0,70, colors='r')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
T he organge above is standard parallax inversion, blue is bailer jones estimates, lines correspond to center and redius Right so it seems that the majority of the stars are clumped around the distance estimate minus the radius of the literature value. We REALLY need to check the errors and analyse the BJ estimtes of distances and everything. So above we have the green line is the center to this star cluster. But all of the values seem to be gathered arouund the red mark. Weird. Now lets plot the HR diagram and the diagram of kineamtic properties
###Code
g_band=selection_gaia['phot_bp_mean_mag']-2.5*np.log10((selection_gaia['rgeo']/10.0)**2) #mean absolute magnitude in G band
bp_rp=selection_gaia['bp_rp'] #mean difference
proper_motion=selection_gaia['pm'] #total proper motion
right_asc=selection_gaia['ra']
dec=np.mod(selection_gaia['dec'],350) #position in motion still with no error bars but i will
ig=plt.figure(figsize=(15,6))
grid=gs.GridSpec(2,2)
a1=plt.subplot(grid[0,0])
a2=plt.subplot(grid[0,1])
a3=plt.subplot(grid[1,0])
a4=plt.subplot(grid[1,1])
a1.hist(proper_motion,density=True,bins=1000)
a1.set_title('Proper motion density')
a2.scatter(bp_rp,g_band,s=0.5)
a2.invert_yaxis()
a2.set_title('HR diagram')
a3.scatter(right_asc,dec,s=0.5)
a3.set_xlim(-5,10)
a3.set_title('RA vs Dec')
a4.scatter(right_asc,dec,s=0.5)
a4.set_xlim(350,375)
a4.set_title('RA vs Dec around 350 ra')
plt.tight_layout()
#can do something with mod or whatever to get the wrap around
# There is a periodic plot or a better way to do this but leave for now
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_xlim(-1,3)
ax.scatter(right_asc,dec, selection_gaia['rgeo'], s=0.5 )
###Output
_____no_output_____
###Markdown
Analysis Results
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [12, 8]
plt.rcParams['lines.linewidth'] = 2.5
pd.set_option("display.max_rows", 20)
pd.set_option("display.max_columns", 20)
from pathlib import Path
import pandas as pd
results_dir = Path("results")
results_paths = results_dir.glob("*.csv")
results = []
for path in results_paths:
results.append(pd.read_csv(path))
results_df = pd.concat(results, axis=0)
results_df
data_names = results_df["data_name"].unique()
def plot_metric_for_name(data_name, metric_name, ax=None, remove_drop=False):
if ax is None:
fig, ax = plt.subplots()
results_data_name = results_df[results_df["data_name"] == data_name]
info_first = results_data_name.iloc[0]
data_name = info_first['data_name']
results_data_name_sorted = results_data_name.sort_values(f"test_{metric_name}_mean")
null_encoders = ~results_data_name_sorted[f"test_{metric_name}_mean"].isna()
if remove_drop:
null_encoders &= (results_data_name_sorted["encoder"] != "drop")
y_values = np.arange(np.sum(null_encoders))
ax.errorbar(results_data_name_sorted.loc[null_encoders, f"test_{metric_name}_mean"],
y_values,
xerr=results_data_name_sorted.loc[null_encoders, f"test_{metric_name}_std"],
ls='', marker='o')
ax.set_yticks(y_values)
ax.set_yticklabels(results_data_name_sorted.loc[null_encoders, "encoder"])
ax.set_title(f"{data_name}: {metric_name}")
def plot_all_metrics(data_name, remove_drop=False):
results_data_name = results_df[results_df["data_name"] == data_name]
info_first = results_data_name.iloc[0]
non_null_names = info_first.notnull()
test_names = info_first.index.str.startswith("test")
score_names = info_first.index[non_null_names & test_names]
score_means_names = score_names[score_names.str.endswith("_mean")]
metric_names = [name[5:-5] for name in score_means_names]
fig, axes = plt.subplots(1, len(metric_names), figsize=(20, 6), constrained_layout=True)
for metric_name, ax in zip(metric_names, axes.flatten()):
plot_metric_for_name(data_name, metric_name, ax=ax, remove_drop=remove_drop)
return fig
data_names = ["telco", "amazon_access", "kicks", "taxi", "ames", "churn", "adult", "dresses_sales", "phishing_websites"]
fig = plot_all_metrics("telco")
for dataset in data_names:
plot_all_metrics(dataset)
# plt.savefig(f"figures/{dataset}.png")
md_names = [f"![{dataset}](figures/{dataset}.png)" for dataset in data_names]
print("\n".join(md_names))
###Output
![telco](figures/telco.png)
![amazon_access](figures/amazon_access.png)
![kicks](figures/kicks.png)
![taxi](figures/taxi.png)
![ames](figures/ames.png)
![churn](figures/churn.png)
![adult](figures/adult.png)
![dresses_sales](figures/dresses_sales.png)
![phishing_websites](figures/phishing_websites.png)
###Markdown
Get metadata for datasets
###Code
from bench_utils import fetch_openml_and_clean
from benchmark import DATA_INFOS
data_info = DATA_INFOS['kicks']
def get_metadata(data_info):
X, y = fetch_openml_and_clean(data_info)
data_info.is_classification
n_cats = X.select_dtypes(include=['object', 'category']).shape[1]
n_samples, n_features = X.shape
return {'dataset_name': data_info.data_name,
'categorical feaatures': n_cats,
'n_features': n_features,
'n_samples': n_samples,
'is_classification': data_info.is_classification,
'openml_url': f'https://www.openml.org/d/{data_info.data_id}'}
results_df.columns
MD_DATASET_COLUMNS = ["data_name", "categorical features", "n_features", "n_samples", "is_classification", "openml_url"]
md_dataset_meta = results_df.drop_duplicates("data_name")[columns_of_interest].set_index("data_name").loc[data_names].reset_index()
md_dataset_meta.to_markdown(index=False)
data_names
all_metadata = [get_metadata(data_info) for data_info in DATA_INFOS.values()]
import pandas as pd
metadata_df = pd.DataFrame.from_records(all_metadata)
from pathlib import Path
class BenchmarkResults:
def __init__(self, results_dir):
self.results = {
result_file.with_suffix("").name: result_file
for result_file = results_dir.glob("*.csv")
}
def write_results(self, name):
self.results_df.to_csv("")
results_path = Path("results").glob("*csv")
hhe = list(results_path)[0]
import pandas as pd
df = pd.read_csv(hhe)
df['encoder'].tolist()
from bench_utils import load_data
from benchmark import DATA_INFOS
meta = load_data(DATA_INFOS["Allstate_Claims_Severity"])
df = meta['X']
df
import openml
datasets_df = openml.datasets.list_datasets(output_format="dataframe")
datasets_df.columns
with_cats_mask = datasets_df['NumberOfSymbolicFeatures'] >= 4.0
binary_or_regression = (datasets_df["NumberOfClasses"] == 0.0) | (datasets_df["NumberOfClasses"] == 2.0)
mid_level_features = (datasets_df["NumberOfFeatures"] <= 2000) & (datasets_df["NumberOfFeatures"] > 8)
mid_samples = datasets_df["NumberOfInstances"] >= 5000
dataset_with_cats = datasets_df[with_cats_mask & binary_or_regression & mid_level_features & mid_samples]
dataset_with_cats = dataset_with_cats.drop_duplicates("name")
dataset_with_cats.sort_values("NumberOfSymbolicFeatures").tail(30)
dataset_with_cats[dataset_with_cats["NumberOfClasses"] == 2.0].sort_values("NumberOfSymbolicFeatures").tail(30)
dataset_with_cats[dataset_with_cats["NumberOfClasses"]== 0.0]
###Output
_____no_output_____
###Markdown
Data and parameters
###Code
# directories with input (pdbbind), results and plots, and training stats
pdbbind_path = '../pdbbind/v2016'
results_path = './results'
results_prefix = '%s/batch5-2017-06-05T07:58:47' % results_path
# network parameters
featurizer = tfbio.data.Featurizer()
max_dist = 10
box_size = 21
columns = {name: i for i, name in enumerate(featurizer.FEATURE_NAMES)}
num_features = len(columns)
# scaling factor for partial charges
charge_std = 0.425896
# colors for subsets
set_colors = {
# PDBbind v. 2016 split
'core': 'r',
'refined': 'g',
'general': 'b',
# our split
'training': 'b',
'validation': 'g',
'test': 'r',
'core2013': 'purple'
}
set_titles = {
'training': 'training set',
'validation': 'validation set',
'test': 'core set v. 2016',
'core2013': 'core set v. 2013'
}
protein_data = pd.read_csv('protein_data.csv')
protein_data.head()
affinity_data = pd.read_csv('affinity_data_cleaned.csv')
affinity_data = affinity_data.rename(columns={'set': 'pdbbind_set'})
affinity_data.head()
dataset_split = []
for set_name in ['training', 'validation', 'test']:
with h5py.File('%s/%s_set.hdf' % (pdbbind_path, set_name), 'r') as f:
dataset_split.append(pd.DataFrame({'set': set_name, 'pdbid': list(f.keys())}))
dataset_split = pd.concat(dataset_split, ignore_index=True)
dataset_split.head()
affinity_data = pd.merge(affinity_data, dataset_split)
affinity_data.head()
affinity_data['set'].value_counts()
# training logs downloaded from tensorboard
training_mse = pd.read_csv('%s-training_set_mse_all.csv' % results_prefix)
validation_mse = pd.read_csv('%s-validation_set_mse_all.csv' % results_prefix)
###Output
_____no_output_____
###Markdown
Create the network
###Code
graph = tf.Graph()
with graph.as_default():
saver = tf.train.import_meta_graph('./%s-best.meta' % results_prefix,
clear_devices=True)
# get tensors we need to get predictions and the error
x = graph.get_tensor_by_name('input/structure:0')
y = graph.get_tensor_by_name('output/prediction:0')
t = graph.get_tensor_by_name('input/affinity:0')
keep_prob = graph.get_tensor_by_name('fully_connected/keep_prob:0')
mse = graph.get_tensor_by_name('training/mse:0')
# get tensors we might need to analyze the network
# activations on hidden layers
hidden_layers = [
graph.get_tensor_by_name('convolution/conv0/h_pool:0'),
graph.get_tensor_by_name('convolution/conv1/h_pool:0'),
graph.get_tensor_by_name('convolution/conv2/h_pool:0'),
graph.get_tensor_by_name('fully_connected/fc0/h_dropout/mul:0'),
graph.get_tensor_by_name('fully_connected/fc1/h_dropout/mul:0'),
graph.get_tensor_by_name('fully_connected/fc2/h_dropout/mul:0')
]
# weights
weights = [
graph.get_tensor_by_name('convolution/conv0/w:0'),
graph.get_tensor_by_name('convolution/conv1/w:0'),
graph.get_tensor_by_name('convolution/conv2/w:0'),
graph.get_tensor_by_name('fully_connected/fc0/w:0'),
graph.get_tensor_by_name('fully_connected/fc1/w:0'),
graph.get_tensor_by_name('fully_connected/fc2/w:0'),
]
###Output
_____no_output_____
###Markdown
Training progress
###Code
best_model = validation_mse[validation_mse['Value'] == validation_mse['Value'].min()]
best_epoch = best_model.index + 1
best_value = best_model['Value']
fig, ax = plt.subplots(figsize=(3.3, 2.5))
# plot rmse instead of mse
ax.plot(range(1, 21), training_mse['Value'] ** 0.5, label=set_titles['training'])
ax.plot(range(1, 21), validation_mse['Value'] ** 0.5, label=set_titles['validation'])
ax.vlines(best_epoch, 0, 2, color='r', linestyles='--', zorder=4, label='selected model')
ax.set_xlabel('Epoch')
ax.set_ylabel('RMSE')
ax.set_xlim(0, 21)
ax.set_xticks(range(0, 21, 2))
ax.grid(True, axis='y')
ax.set_ylim(0.9, 1.9)
ax.set_yticks(np.arange(0.9, 1.9, 0.1))
ax.legend(frameon=True, loc='lower left')
fig.tight_layout()
fig.savefig('%s/rmse.pdf' % results_path);
###Output
_____no_output_____
###Markdown
Predictions Predict on PDBbind v2013 core set
###Code
# load the data
affinity = []
coords = []
features = []
ids = []
with h5py.File('%s/core2013.hdf' % pdbbind_path, 'r') as f:
for pdb_id in f:
ids.append(pdb_id)
dataset = f[pdb_id]
coords.append(dataset[:, :3])
features.append(dataset[:, 3:])
affinity.append(dataset.attrs['affinity'])
affinity = np.reshape(affinity, (-1, 1))
# prepare grids
batch_grid = []
for crd, f in zip(coords, features):
batch_grid.append(tfbio.data.make_grid(crd, f))
batch_grid = np.vstack(batch_grid)
batch_grid[..., columns['partialcharge']] /= charge_std
# restore the trained model and predict affinities
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
pred_affinity = session.run(y, feed_dict={x: batch_grid, keep_prob: 1.0})
###Output
INFO:tensorflow:Restoring parameters from ././results/batch5-2017-06-05T07:58:47-best
###Markdown
Merge with predictions for v2016 The predictions for PDBbind v 2016 were already computed at the end of the training (see `train.py` script) and saved in <prefix>-predictions.csv file.
###Code
predictions = pd.concat(
[
pd.read_csv('%s-predictions.csv' % results_prefix),
pd.DataFrame({'pdbid': ids, 'predicted': pred_affinity.flatten(),
'real': affinity.flatten(), 'set': 'core2013'})
]
)
predictions.head()
for set_name, table in predictions.groupby('set'):
rmse = ((table['predicted'] - table['real']) ** 2).mean() ** 0.5
mae = (np.abs(table['predicted'] - table['real'])).mean()
corr = scipy.stats.pearsonr(table['predicted'], table['real'])
lr = LinearRegression()
lr.fit(table[['predicted']], table['real'])
y_ = lr.predict(table[['predicted']])
sd = (((table['real'] - y_) ** 2).sum() / (len(table) - 1)) ** 0.5
print('%10s set: RMSE=%.3f, MAE=%.3f, R=%.2f (p=%.2e), SD=%.3f' % (set_name, rmse, mae, *corr, sd))
grid = sns.jointplot('real', 'predicted', data=table, stat_func=None, color=set_colors[set_name],
space=0.0, size=3, s=10, edgecolor='w', ylim=(0, 16), xlim=(0, 16))
grid.ax_joint.text(1, 14, set_titles[set_name])
grid.ax_joint.set_xticks(range(0, 16, 5))
grid.ax_joint.set_yticks(range(0, 16, 5))
grid.fig.savefig('%s/pred_%s.pdf' % (results_path, set_name))
predictions.to_csv('%s-all_predictions.csv' % results_prefix, index=False)
###Output
_____no_output_____
###Markdown
Select examples Get protein with the biggest number of complexes in the v2016 core set (which was used as test set)
###Code
core_pdbids = list(affinity_data[affinity_data['pdbbind_set'] == 'core']['pdbid'])
core_idx = np.in1d(protein_data['pdbid'], core_pdbids)
num_complexes = (protein_data
.loc[core_idx]
.groupby('uniprotid')
['name']
.agg(len)
.sort_values(ascending=False))
num_complexes[:10]
unid = num_complexes.index[0]
unid
complexes = protein_data.loc[(protein_data['uniprotid'] == unid), 'pdbid']
examples = affinity_data.loc[np.in1d(affinity_data['pdbid'], complexes),
['pdbid', 'pdbbind_set', 'set']]
examples = examples.reset_index(drop=True)
num_examples = len(examples)
print(num_examples, 'examples')
examples.head()
# load the input and affinities for selected examples
affinity = []
coords = []
features = []
ids = []
for set_name, table in examples.groupby('set'):
with h5py.File('%s/%s_set.hdf' % (pdbbind_path, set_name), 'r') as f:
for pdb_id in table['pdbid']:
ids.append(pdb_id)
dataset = f[pdb_id]
coords.append(dataset[:, :3])
features.append(dataset[:, 3:])
affinity.append(dataset.attrs['affinity'])
affinity = np.reshape(affinity, (-1, 1))
###Output
_____no_output_____
###Markdown
Results for different orientation of the input Let's check whether we get similar results for differently presented input.Our model is not invariant to input orientation.Each complex was centered at ligand's geometric center, so we do not need to worry about translations.However our model might be sensitive to input rotation.We will predict affinity for 24 orientations of a molecular complex and check if they are stable.
###Code
rot_predictions = []
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
for rotation in range(24):
print(rotation)
batch_grid = np.zeros((num_examples, box_size, box_size, box_size, num_features))
for i, (crds, ft) in enumerate(zip(coords, features)):
crds = tfbio.data.rotate(crds, rotation)
batch_grid[i] = tfbio.data.make_grid(crds, ft)
batch_grid[..., columns['partialcharge']] /= charge_std
pred_affinity = session.run(y, feed_dict={x: batch_grid,
t: np.reshape(affinity, (num_examples, 1)),
keep_prob: 1.0})
rot_predictions.append(pd.DataFrame({'rotation': rotation, 'pdbid': ids,
'predicted': np.squeeze(pred_affinity)}))
rot_predictions = pd.concat(rot_predictions)
rot_predictions = pd.merge(rot_predictions, affinity_data)
rot_predictions = rot_predictions.sort_values('predicted')
rot_predictions['pdbid'] = rot_predictions['pdbid'].str.upper()
palette = {}
for set_name, idx in rot_predictions.groupby('set')['pdbid'].agg(lambda x: set(x)).items():
for i in idx:
palette[i] = set_colors[set_name]
# plot range of predicted affinities for each complex
# sort by predicted value and color by training/validation/test split
fig, ax = plt.subplots(figsize=(3.3, 8))
sns.boxplot(y='pdbid', x='predicted', data=rot_predictions, ax=ax, palette=palette, linewidth=1, fliersize=2)
ax.set_xlim(3,)
ax.set_xlabel('Predicted affinity')
ax.set_ylabel('PDB ID')
# we need to manually add the legend
handles = []
labels = []
for set_name in ['training', 'validation', 'test']:
handles.append(Rectangle((0, 0), 1, 1, fc=set_colors[set_name], lw=1, ec='k'))
labels.append(set_titles[set_name])
ax.legend(handles, labels, loc='upper right')
fig.savefig('%s/rotations.pdf' % results_path, bbox_inches='tight');
###Output
_____no_output_____
###Markdown
Network properties Feature importance Check distribution of weights for each of the feautures.The higher the absolute value of the weight, the more information comes out of this part of an input.
###Code
# get outgoing weights for each feature
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
w0 = session.run(weights[0])
w0 = pd.DataFrame(np.transpose(w0, [0, 1, 2, 4, 3]).reshape((-1, num_features)),
columns=featurizer.FEATURE_NAMES)
###Output
INFO:tensorflow:Restoring parameters from ././results/batch5-2017-06-05T07:58:47-best
###Markdown
Check how much the distribution of weights for each feature differs from the initial one - truncated normal with std=0.001.Compute fraction of weights that are more than 1*std away from the mean.
###Code
diff = (w0.abs() > 0.001).mean()
diff.sort_values(ascending=False)
# range between 25th and 75th percentiles
perc_diff = ((w0.apply(lambda x: np.percentile(x, 75))
- w0.apply(lambda x: np.percentile(x, 25)))
.sort_values(ascending=False))
perc_diff
# plot range of weights, do not show outliers
fig, ax = plt.subplots(figsize=(3.3, 3))
sns.boxplot(data=w0, fliersize=0, orient='h', order=perc_diff.index, ax=ax)
ax.set_xlim(-0.055, 0.055)
ax.set_xticks(np.arange(-0.04, 0.05, 0.02))
ax.set_ylim(19, -1)
fig.tight_layout()
fig.savefig('%s/fi_box.pdf' % results_path)
###Output
_____no_output_____
###Markdown
Find parts of input that are crucial for predicting activity
###Code
# select a single ligand, that was predicted to be active
ligand = '3ws8'
rotation = 2
ligand_idx = ids.index(ligand)
ligand_idx
ligand_grid = tfbio.data.make_grid(coords[ligand_idx], features[ligand_idx])
ligand_rot_grid = tfbio.data.make_grid(tfbio.data.rotate(coords[ligand_idx], rotation),
features[ligand_idx])
for grid in (ligand_grid, ligand_rot_grid):
grid[..., columns['partialcharge']] /= charge_std
###Output
_____no_output_____
###Markdown
Baseline prediction Check what is the baseline - prediction that our model returns, when we do not give him any inforation about the complex.(We can think of it as an analogy to intercept in linear model.)
###Code
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
zero_pred = session.run(y, feed_dict={x: np.zeros_like(ligand_grid), keep_prob: 1.0})
zero_pred
###Output
INFO:tensorflow:Restoring parameters from ././results/batch5-2017-06-05T07:58:47-best
###Markdown
Make sure that our model did not learned to just recognize ligands or proteins and uses both ligand and protein to predict binding affinity.
###Code
no_lig = np.vstack((ligand_grid, ligand_rot_grid))
no_lig[no_lig[..., columns['molcode']] == 1.0] = 0.0
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
no_lig_pred = session.run(y, feed_dict={x: no_lig, keep_prob: 1.0})
no_lig_pred
no_prot = np.vstack((ligand_grid, ligand_rot_grid))
no_prot[no_prot[..., columns['molcode']] == -1.0] = 0.0
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
no_prot_pred = session.run(y, feed_dict={x: no_prot, keep_prob: 1.0})
no_prot_pred
###Output
INFO:tensorflow:Restoring parameters from ././results/batch5-2017-06-05T07:58:47-best
###Markdown
Interestingly, ligand with no protein gives higher predictions - this information is more important for the model.We can also see in the weights' distribution, that weights associated with the `molcode` are more often positive.This kind of filters recognizes ligand, while filters with strongly negative values recognize protein. Set parts of an input to 0
###Code
box = 5 # same size as in convolutional layers
step = 3
steps_in_loop = box_size // step
mean_pred = rot_predictions[rot_predictions['pdbid'] == ligand.upper()]['predicted'].mean()
mean_pred
# prepare grids with a single deleted box in each
modified = np.repeat(ligand_grid, steps_in_loop**3, axis=0)
origins = []
num = 0
for i in range(0, box_size-step+1, step):
for j in range(0, box_size-step+1, step):
for k in range(0, box_size-step+1, step):
origins.append((i, j, k))
modified[num, i:i+box, j:j+box, k:k+box] = 0.0
num += 1
assert num == steps_in_loop ** 3
origins = np.array(origins)
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
modified_pred = session.run(y, feed_dict={x: modified, keep_prob: 1.0})
sns.boxplot(modified_pred);
# get 10 boxes with lowest predictions - those are ost important parts of an input
important_idx = np.argsort(modified_pred[:, 0])[:10]
important_idx
def get_box_atoms(indices, grid):
"""Get atoms from the grid that are in the boxes specified with indices"""
important_atoms = []
for idx in indices:
i, j, k = origins[idx]
tmp = grid[0][i:i+box, j:j+box, k:k+box]
atom_table = pd.DataFrame(tmp[tmp[..., columns['molcode']] != 0],
columns=featurizer.FEATURE_NAMES)
atom_x, atom_y, atom_z = np.where(tmp[..., columns['molcode']] != 0)
atom_x += i
atom_y += j
atom_z += k
atom_table['x'] = atom_x
atom_table['y'] = atom_y
atom_table['z'] = atom_z
atom_table['idx'] = idx
important_atoms.append(atom_table)
important_atoms = pd.concat(important_atoms, ignore_index=True)
important_atoms['partialcharge'] *= charge_std
return important_atoms
# columns we want to print
to_print = ['x', 'y', 'z', 'B', 'C', 'N', 'O', 'P', 'S', 'Se', 'hyb', 'partialcharge',
'hydrophobic', 'aromatic', 'acceptor', 'donor', 'ring', 'idx']
def plot_boxes(complex_idx, rot, box_indices):
"""Plot molecular complex in the specified rotation and the boxes specified by indices"""
from mpl_toolkits import mplot3d
fig = plt.figure(figsize=(6, 6))
crds = tfbio.data.rotate(coords[complex_idx], rot)
ft = features[complex_idx]
ax = fig.add_subplot(111, projection='3d')
mx, my, mz = crds[ft[:, columns['molcode']] == 1.0].T + max_dist
ax.scatter(mx, my, mz, label='ligand', c='b', s=20)
mx, my, mz = crds[ft[:, columns['molcode']] == -1.0].T + max_dist
ax.scatter(mx, my, mz, label='protein', c='g', s=5)
for i, j, k in origins[box_indices]:
alpha = 0.05
ax.plot_surface([i, i+box], [[j, j], [j+box, j+box]], k, alpha=alpha, color='gray')
ax.plot_surface([i, i+box], [[j, j], [j+box, j+box]], k+box, alpha=alpha, color='gray')
ax.plot_surface(i, [[j, j], [j+box, j+box]], [k, k+box], alpha=alpha, color='gray')
ax.plot_surface(i+box, [[j, j], [j+box, j+box]], [k, k+box], alpha=alpha, color='gray')
ax.plot_surface([i, i+box], j, [k, k+box], alpha=alpha, color='gray')
ax.plot_surface([i, i+box], j+box, [[k, k], [k+box, k+box]], alpha=alpha, color='gray')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend(loc=(0.75, 0.75), frameon=True)
return fig, ax
fig, ax = plot_boxes(ligand_idx, 0, important_idx)
ax.set_xlim(0, 21)
ax.set_ylim(0, 21)
ax.set_zlim(0, 21)
fig.tight_layout()
fig.savefig('%s/changes.pdf' % results_path);
important_atoms = get_box_atoms(important_idx, ligand_grid)
(important_atoms.loc[((important_atoms['molcode'] == -1.0)), to_print]
.sort_values(['x', 'y', 'z'])
.drop_duplicates(subset=['x', 'y', 'z']))
###Output
_____no_output_____
###Markdown
Check what happens when we use different orientation
###Code
modified_rot = np.repeat(ligand_rot_grid, steps_in_loop**3, axis=0)
num = 0
for i in range(0, box_size-step+1, step):
for j in range(0, box_size-step+1, step):
for k in range(0, box_size-step+1, step):
modified_rot[num, i:i+box, j:j+box, k:k+box] = 0.0
num += 1
assert num == steps_in_loop**3
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
modified_rot_pred = session.run(y, feed_dict={x: modified_rot, keep_prob: 1.0})
sns.boxplot(modified_rot_pred);
important_idx_rot = np.argsort(modified_rot_pred[:, 0])[:10]
important_idx_rot
fig, ax = plot_boxes(ligand_idx, rotation, important_idx_rot)
ax.view_init(330, 60)
ax.set_xlim(0, 21)
ax.set_ylim(-1, 20)
ax.set_zlim(-1, 20)
fig.tight_layout()
fig.savefig('%s/changes_rot.pdf' % results_path);
important_atoms_rot = get_box_atoms(important_idx_rot, ligand_rot_grid)
(important_atoms_rot.loc[((important_atoms_rot['molcode'] == -1.0)), to_print]
.sort_values(['x', 'y', 'z'])
.drop_duplicates(subset=['x', 'y', 'z']))
###Output
_____no_output_____
###Markdown
How the activations differ
###Code
with tf.Session(graph=graph) as session:
saver.restore(session, './%s-best' % results_prefix)
activations = session.run(hidden_layers, feed_dict={x: np.vstack((ligand_grid, ligand_rot_grid)),
keep_prob: 1.0})
fig, axs = plt.subplots(nrows=len(hidden_layers), figsize=(3.3, 4))
axs = axs.flatten()
for i, ax in enumerate(axs):
tmp = activations[i].reshape((2, -1))
d = scipy.spatial.distance.pdist(tmp, metric='cos')
vmin, vmax = np.percentile(tmp, [1, 99])
sns.heatmap(tmp, xticklabels=False, yticklabels=['original', 'rotated'], vmin=vmin, vmax=vmax,
cmap=plt.cm.bone_r, ax=ax, cbar=False);
if d < 1e-2:
ax.set_title('layer %i (d=%.1e)' % (i+1, d))
else:
ax.set_title('layer %i (d=%.4f)' % (i+1, d))
fig.tight_layout()
# save as PNG - each heatmap consists of thousands of tiny rectangles
fig.savefig('%s/activations.png' % results_path, dpi=300);
###Output
_____no_output_____
###Markdown
Data on North American mushrooms from https://archive.ics.uci.edu/ml/datasets/Mushroom. We're answering questions based on the data:* Can a machine learning model reliably identify poisonous mushrooms based on the data?* Does any one feature reliably classify mushroom toxicity?* Can we formulate simple, memorizable rules for reliably classifying mushroom toxicity?
###Code
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.tree import export_graphviz, DecisionTreeClassifier
from functools import reduce
###Output
_____no_output_____
###Markdown
We are importing the "expanded" data file, which contains more samples than the single-character version.
###Code
# Input column names, which aren't included in the expanded data file
names = [
'Toxicity',
'Cap Shape',
'Cap Surface',
'Cap Color',
'Bruises?',
'Odor',
'Gill Attachment',
'Gill Spacing',
'Gill Size',
'Gill Color',
'Stalk Shape',
'Stalk Root',
'Stalk Surface Above Ring',
'Stalk Surface Below Ring',
'Stalk Color Above Ring',
'Stalk Color Below Ring',
'Veil Type',
'Veil Color',
'Ring Number',
'Ring Type',
'Spore Print Color',
'Population',
'Habitat'
]
df = pd.read_csv('data/expanded', skiprows=9, names=names, index_col=None, engine='python', skipfooter=1); df
# Assess data variability
df.describe().loc['unique']
###Output
_____no_output_____
###Markdown
Veil type has only one value, so we can remove that feature later.
###Code
# Assess missing data
df.isna().sum()
###Output
_____no_output_____
###Markdown
There appears to be no missing data.
###Code
# Transform features
# Convert binary bruised state to boolean values
def bool_bruises(dfin):
dfin_no_bruises = dfin.drop(columns='Bruises?')
dfin_bool_bruises = dfin['Bruises?'].apply(lambda x: x == 'BRUISES')
return pd.concat([dfin_no_bruises, dfin_bool_bruises], axis=1)
# Drop veil type, because it has one value
drop_veil_type = lambda dfin: dfin.drop(columns='Veil Type')
# Convert class to boolean values
def bool_toxicity(dfin):
dfin_no_toxicity = dfin.drop(columns='Toxicity')
dfin_toxic = dfin['Toxicity'].apply(lambda x: x == 'POISONOUS')
dfin_toxic.name = 'Toxic?'
return pd.concat([dfin_toxic, dfin_no_toxicity], axis=1)
# One-hot encode
one_hot_encode = lambda dfin: pd.get_dummies(dfin)
fns = [bool_bruises, bool_toxicity, drop_veil_type, one_hot_encode]
df_trans = reduce(lambda res, fn: fn(res), fns, df); df_trans
###Output
_____no_output_____
###Markdown
Although other categories beside _Bruised?_ may have only two categories, _Bruised?_ was the only column treated as binary in the data, so we have transformed it to boolean values to reflect that.
###Code
# Assess distribution of class
df_trans['Toxic?'].mean()
###Output
_____no_output_____
###Markdown
We have similarly sized edible and poisonous samples.
###Code
# Create machine learning model
X = df_trans.drop(columns='Toxic?')
y = df_trans['Toxic?']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
random_forest_model = RandomForestClassifier(random_state=0).fit(X_train, y_train); random_forest_model
f1_score(random_forest_model.predict(X_test), y_test)
###Output
_____no_output_____
###Markdown
That's extremely accurate!
###Code
# Save the DOT data and convert it to PNG
! mkdir dot
! mkdir images
export_graphviz(random_forest_model.estimators_[0], out_file='dot/subestimatortree.dot', feature_names=X.columns, class_names=['Edible', 'Poisonous'])
! dot -Tpng dot/subestimatortree.dot -o images/subestimatortree.png
# Create a decision tree with a depth of 1
decision_tree_model1 = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y); decision_tree_model1
# Save the DOT data and convert it to PNG
export_graphviz(decision_tree_model1, out_file='dot/tree1.dot', feature_names=X.columns, class_names=['Edible', 'Poisonous'])
! dot -Tpng dot/tree1.dot -o images/tree1.png
# Create a decision tree with a depth of 2
decision_tree_model2 = DecisionTreeClassifier(random_state=0, max_depth=2).fit(X, y)
dot_data2 = export_graphviz(decision_tree_model2, out_file='dot/tree2.dot', feature_names=X.columns, class_names=['Edible', 'Poisonous'])
graph2 = graphviz.Source(dot_data2)
! dot -Tpng dot/tree2.dot -o images/tree2.png
# Create a decision tree with a depth of 3
decision_tree_model3 = DecisionTreeClassifier(random_state=0, max_depth=3).fit(X, y)
dot_data3 = export_graphviz(decision_tree_model3, out_file='dot/tree3.dot', feature_names=X.columns, class_names=['Edible', 'Poisonous'])
graph3 = graphviz.Source(dot_data3)
! dot -Tpng dot/tree3.dot -o images/tree3.png
# Since odor is important, what were the different odors?
df['Odor'].value_counts()
###Output
_____no_output_____
###Markdown
finding unique usernames
###Code
df.user_screen_name.value_counts()
c2=df.in_reply_to_screen_name.value_counts()
c3=df.retweet_or_quote_screen_name.value_counts()
pd.unique(df.in_reply_to_screen_name)
s1=pd.DataFrame(c2)
s2=pd.DataFrame(c3)
s2
s1.to_csv('reply_username_list.csv', index=True)
s2.to_csv('retweet_username_list.csv', index=True)
s1.index
s3 = s1[s1.index.isin(s2.index)]
s3
###Output
_____no_output_____
###Markdown
adding types I = InstituitionOI = Other InstituitionsP = Person/IndividualNo Account = Account deleted in reply usernames
###Code
# in_reply_to_screen_name
filepath = os.path.join(
"\\".join([os.getcwd(), "reply_username_list.csv"])
)
df_s1 = pd.read_csv(filepath, index_col=False)
df_s1.rename(columns={"Unnamed: 0":"username"},inplace=True)
df_s1.head()
# df_s1[df_s1['username'].str.contains("TAMU")]
###Output
_____no_output_____
###Markdown
TAMU AGGIE _ & numbers
###Code
df_s1['type'] = df_s1.username.map(lambda x: 'I' if x.lower().__contains__("tamu") else 'I' if x.lower().__contains__("aggie") else 'P' if x.__contains__("_") else 'P' if any(chr.isdigit() for chr in x) else np.nan)
df_s1
df_s1=df_s1.sort_values(by=['type'])
df_s1.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
retweet usernames
###Code
# retweet_or_quote_screen_name
filepath = os.path.join(
"\\".join([os.getcwd(), "retweet_username_list.csv"])
)
df_s2 = pd.read_csv(filepath, index_col=False)
df_s2.rename(columns={"Unnamed: 0":"username"},inplace=True)
df_s2.head()
###Output
_____no_output_____
###Markdown
TAMU AGGIE _ & numbers
###Code
# df_s2['type'] = df_s2.username.map(lambda x: 'I' if x.lower().__contains__("tamu") else 'I' if x.lower().__contains__("aggie") else 'P' if x.__contains__("_") else 'P' if any(chr.isdigit() for chr in x) else np.nan)
df_s2['type'] = df_s2.username.map(lambda x: 'I' if x.lower().__contains__("tamu") else 'I' if x.lower().__contains__("aggie") else 'P' if x.__contains__(
"_") else 'P' if any(chr.isdigit() for chr in x) else 'OI' if x.lower().__contains__("texas") else 'OI' if x.lower().__contains__("school") else 'OI' if x.lower().__contains__("tx") else 'OI' if x.lower().__contains__("news") else 'OI' if x.isupper() else np.nan)
###Output
_____no_output_____
###Markdown
numbers & _
###Code
df_s2
df_s2=df_s2.sort_values(by=['type'])
df_s2.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
saving data
###Code
# df_s1.to_csv('reply_username_list.csv', index=True)
# df_s2.to_csv('retweet_username_list.csv', index=True)
###Output
_____no_output_____
###Markdown
Analysis of HAJ Hannover Halfmarathon 2019
###Code
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import nbimporter
from src import scraper
def hour_to_decimal(hour: str) -> float:
digits = hour.split(':')
return int(digits[0]) + int(digits[1]) / 60.0 + int(digits[2]) / 6000.0
def histogram(columns, header, xlabel, ylabel='Frequency'):
for column in columns:
plt.hist(column, bins=50, rwidth=0.85, alpha=0.4)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(header)
plt.show()
AGE_CLASSES = {
'–': -1,
'JU18': 0,
'JU20': 1,
'HK': 2,
'30': 3,
'35': 4,
'40': 5,
'45': 6,
'50': 7,
'55': 8,
'60': 9,
'65': 10,
'70': 11,
'75': 12,
'80': 13,
'M85': 14,
}
data_m = pd.read_csv(scraper.get_csv(2019, 'M'), delimiter=scraper.DELIMITER)
data_w = pd.read_csv(scraper.get_csv(2019, 'W'), delimiter=scraper.DELIMITER)
data_m_w = pd.concat([data_w, data_m])
data_m_w.head()
data_m['Finish_decimal'] = data_m['Finish'].apply(hour_to_decimal)
data_w['Finish_decimal'] = data_w['Finish'].apply(hour_to_decimal)
###Output
_____no_output_____
###Markdown
Overview men an women
###Code
histogram([data_m['Finish_decimal'], data_w['Finish_decimal']], 'Distribution of finishing times', 'hours')
main_age_class_m = data_m[data_m['AC'] == 'HK']
main_age_class_w = data_w[data_w['AC'] == 'HK']
histogram([main_age_class_m['Finish_decimal'], main_age_class_w['Finish_decimal']], 'Distribution of finishing times of main age class (HK)', 'Hours')
###Output
_____no_output_____
###Markdown
Top clubs by number
###Code
data_m_w['Club'].value_counts().head(20)
###Output
_____no_output_____
###Markdown
Detail analysis men Top and worst placements
###Code
data_m.head()
data_m.tail()
###Output
_____no_output_____
###Markdown
Average finish times
###Code
data_m.groupby('AC').mean()
###Output
_____no_output_____
###Markdown
Number of people in age class
###Code
data_m['AC'].value_counts().plot.bar()
###Output
_____no_output_____
###Markdown
Distribution of finishing times in age classes
###Code
# add map age classes to integers
data_m['AC_label'] = data_m['AC']
data_m['AC_label'] = data_m.AC.replace(AGE_CLASSES)
data_m.boxplot(by='AC_label', column='Finish_decimal')
plt.xticks(range(1, len(AGE_CLASSES) + 1), list(AGE_CLASSES.keys()))
plt.show()
###Output
_____no_output_____
###Markdown
Correlation of numeric colums
###Code
data_m.corr()
###Output
_____no_output_____
###Markdown
Analisis of business healthOn this analisis I have defined the following KPIs:- Number of sessions per month and company type- Number of clients lost per month and company type- Total and percentage profit per month and company typeThis KPIs are based on the database that has been created on the Docker container __app__ that has the scrip __app.py__ in where the tables are created and populated with fake data.![alt text](img/db_image.png "Database")The database has 3 tables:- sessions- companies- subscriptionsFor the calculation of all the KPIs I only used SQL for handling all the data aggregations and transformations.
###Code
import numpy as np
import pandas as pd
import psycopg2
import matplotlib.pyplot as plt
# Database params
POSTGRES_HOST='localhost'
POSTGRES_PASSWORD='password'
POSTGRES_USER='user'
POSTGRES_DB='db'
# Get the connection to the database
def get_db_conn():
conn = psycopg2.connect(f"dbname='{POSTGRES_DB}' user='{POSTGRES_USER}' host='{POSTGRES_HOST}' password='{POSTGRES_PASSWORD}'")
conn.autocommit = True
return(conn)
# Create a bar plot
def bar_plot(date,col1,label1,col2,label2,title,ylable,type="default"):
width = 0.35
x = np.arange(len(date))
fig, ax = plt.subplots(figsize=(14,5))
if(type == "stacked"):
rects1 = ax.bar(date, col1, 0.35, label=label1)
rects2 = ax.bar(date, col2, 0.35, bottom=col1, label=label2)
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
else:
rects1 = ax.bar(x - width/2, col1, 0.35, label=label1)
rects2 = ax.bar(x + width/2, col2, 0.35, label=label2)
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
ax.set_ylabel(ylable)
ax.set_title(title)
ax.legend()
ax.set_xticks(x, date)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Number of sessions per month and company type
###Code
# Connect to the database
conn = get_db_conn()
cur = conn.cursor()
cur.execute("""
SELECT year || '-' || month AS date,
COUNT(*) AS number_companies,
SUM(CASE WHEN company_size = 'large' THEN 1 ELSE 0 END) AS number_companies_large,
SUM(CASE WHEN company_size = 'small' THEN 1 ELSE 0 END) AS number_companies_small,
SUM(number_sessions) AS total_sessions,
SUM(CASE WHEN company_size = 'large' THEN number_sessions ELSE 0 END) AS total_sessions_large,
SUM(CASE WHEN company_size = 'small' THEN number_sessions ELSE 0 END) AS total_sessions_small
FROM (
SELECT session_company_id AS company_id,
EXTRACT(YEAR FROM session_created_at) AS year,
EXTRACT(MONTH FROM session_created_at) AS month,
COUNT(*) AS number_sessions
FROM sessions
GROUP BY session_company_id, year, month
) AS t0
INNER JOIN companies AS t1 ON t0.company_id = t1.company_id
GROUP BY year, month
ORDER BY year, month;
""")
sessions = pd.DataFrame(cur.fetchall(), columns=[i[0] for i in cur.description])
cur.close()
sessions.head(100)
# Nº of sessions by month and company size
bar_plot(sessions.date, sessions.total_sessions_large, 'large', sessions.total_sessions_small, 'small', 'Nº of sessions by month and company size', 'Nº of sessions','stacked')
# Nº of sessions by month and company size
bar_plot(sessions.date, sessions.total_sessions_large, 'large', sessions.total_sessions_small, 'small', 'Nº of sessions by month and company size', 'Nº of sessions')
###Output
_____no_output_____
###Markdown
Total and percentage profit per month and company type
###Code
# Connect to the database
conn = get_db_conn()
cur = conn.cursor()
cur.execute("""
SELECT date,
number_large_clients,
number_small_clients,
profit_large_clients,
profit_small_clients,
ROUND(profit_large_clients::numeric*100/total_profit_clients::numeric,2) AS percentage_profit_large_clients,
ROUND(profit_small_clients::numeric*100/total_profit_clients::numeric,2) AS percentage_profit_small_clients
FROM (
SELECT year || '-' || month AS date,
SUM(CASE WHEN company_size = 'large' THEN 1 ELSE 0 END) AS number_large_clients,
SUM(CASE WHEN company_size = 'small' THEN 1 ELSE 0 END) AS number_small_clients,
SUM(CASE WHEN company_size = 'large' THEN sub_price ELSE 0 END) AS profit_large_clients,
SUM(CASE WHEN company_size = 'small' THEN sub_price ELSE 0 END) AS profit_small_clients,
SUM(sub_price) AS total_profit_clients
FROM (
SELECT session_company_id AS company_id,
EXTRACT(YEAR FROM session_created_at) AS year,
EXTRACT(MONTH FROM session_created_at) AS month
FROM sessions
GROUP BY session_company_id, year, month
) AS t0
INNER JOIN companies AS t1 ON t0.company_id = t1.company_id
INNER JOIN subscriptions AS t2 ON t1.company_size = t2.sub_id
GROUP BY year, month
ORDER BY year, month
) AS final;
""")
profit = pd.DataFrame(cur.fetchall(), columns=[i[0] for i in cur.description])
cur.close()
profit.head(100)
# Total profit per month and company size
bar_plot(profit.date, profit.profit_large_clients, 'large', profit.profit_small_clients, 'small', 'Total profit per month and company size', 'Total profit (€)','stacked')
# Percentage of profit per month and company size
bar_plot(profit.date, profit.percentage_profit_large_clients, 'large', profit.percentage_profit_small_clients, 'small', 'Percentage of profit per month and company size', 'Percentage of profit','stacked')
###Output
_____no_output_____
###Markdown
Number of clients lost per month and company type
###Code
# Connect to the database
conn = get_db_conn()
cur = conn.cursor()
cur.execute("""
WITH number_companies AS (
SELECT ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS number,
date,
number_companies,
number_companies_large,
number_companies_small
FROM (
SELECT year || '-' || month AS date,
COUNT(*) AS number_companies,
SUM(CASE WHEN company_size = 'large' THEN 1 ELSE 0 END) AS number_companies_large,
SUM(CASE WHEN company_size = 'small' THEN 1 ELSE 0 END) AS number_companies_small
FROM (
SELECT session_company_id AS company_id,
EXTRACT(YEAR FROM session_created_at) AS year,
EXTRACT(MONTH FROM session_created_at) AS month
FROM sessions
GROUP BY session_company_id, year, month
) AS t0
INNER JOIN companies AS t1 ON t0.company_id = t1.company_id
GROUP BY year, month
ORDER BY year, month
) AS t1
)
SELECT date,
lost_clients,
SUM(lost_clients) OVER (ORDER BY number ASC) AS total_lost_clients,
lost_clients_large,
SUM(lost_clients_large) OVER (ORDER BY number ASC) AS total_lost_clients_large,
lost_clients_small,
SUM(lost_clients_small) OVER (ORDER BY number ASC) AS total_lost_clients_small
FROM (
SELECT current_month.date AS date,
current_month.number AS number,
CASE WHEN old_month.number_companies IS NULL THEN 0 ELSE old_month.number_companies - current_month.number_companies END AS lost_clients,
CASE WHEN old_month.number_companies_large IS NULL THEN 0 ELSE old_month.number_companies_large - current_month.number_companies_large END AS lost_clients_large,
CASE WHEN old_month.number_companies_small IS NULL THEN 0 ELSE old_month.number_companies_small - current_month.number_companies_small END AS lost_clients_small
FROM number_companies AS current_month
LEFT JOIN number_companies AS old_month ON current_month.number = old_month.number+1
) AS final;
""")
lost_clients = pd.DataFrame(cur.fetchall(), columns=[i[0] for i in cur.description])
cur.close()
lost_clients.head(100)
# Nº of lost clients by month and company size
bar_plot(lost_clients.date, lost_clients.lost_clients_large, 'large', lost_clients.lost_clients_small, 'small', 'Nº of lost clients by month and company size', 'Nº of lost clients')
# Nº of accumulated lost clients by month and company size
bar_plot(lost_clients.date, lost_clients.total_lost_clients_large, 'large', lost_clients.total_lost_clients_small, 'small', 'Nº of accumulated lost clients by month and company size', 'Nº of lost clients')
###Output
_____no_output_____
###Markdown
BEMSデータ評価用整形
###Code
import pandas as pd
import numpy as np
import copy
import datetime
import os
df = pd.read_excel('data\src\TREND_76_6904050_20210701_20210807_20210808110542.xlsx')
floors = [5]
# floors = [5]
ac_arr = {
4:["4f0","4f1","4f2","4f3","4f4","4f5","4f6","4f7","4f8","4f9"],
5:["5f0","5f1","5f2","5f3","5f4","5f5","5f6","5f7","5f8","5f9"],
6:["6f0","6f1","6f2","6f3","6f4","6f5","6f6","6f7","6f8","6f9"]
}
key_map_floor_dict = {
4:{
"時間":"信号名称",
"4f0設定温度":"C4F 事務室中ペリ PACG_設定温度",
"4f0運転モード":"C4F 事務室中ペリ PACG_運転モード",
"4f0風速":"C4F 事務室中ペリ PACG_風速",
# "4f0風速":"C4F 事務室中ペリ_風速",
"4f0吸込温度":"C4F 事務室中ペリ PACG_吸込温度",
"4f1設定温度":"C4F 事務室中ペリ PACG_設定温度",
"4f1運転モード":"C4F 事務室中ペリ PACG_運転モード",
"4f1風速":"C4F 事務室中ペリ PACG_風速",
# "4f1風速":"C4F 事務室中ペリ_風速",
"4f1吸込温度":"C4F 事務室中ペリ PACG_吸込温度",
"4f2設定温度":"C4F 事務室中 PACG_設定温度",
"4f2運転モード":"C4F 事務室中 PACG_運転モード",
"4f2風速":"C4F 事務室中 PACG_風速",
"4f2吸込温度":"C4F 事務室中 PACG_吸込温度",
"4f3設定温度":"C4F 事務室中 PACG_設定温度",
"4f3運転モード":"C4F 事務室中 PACG_運転モード",
"4f3風速":"C4F 事務室中 PACG_風速",
"4f3吸込温度":"C4F 事務室中 PACG_吸込温度",
"4f4設定温度":"C4F 事務室南ペリ PACG_設定温度",
"4f4運転モード":"C4F 事務室南ペリ PACG_運転モード",
"4f4風速":"C4F 事務室南ペリ PACG_風速",
"4f4吸込温度":"C4F 事務室南ペリ PACG_吸込温度",
"4f5設定温度":"C4F 事務室南ペリ PACG_設定温度",
"4f5運転モード":"C4F 事務室南ペリ PACG_運転モード",
"4f5風速":"C4F 事務室南ペリ PACG_風速",
"4f5吸込温度":"C4F 事務室南ペリ PACG_吸込温度",
"4f6設定温度":"C4F 事務室南 PACG_設定温度",
"4f6運転モード":"C4F 事務室南 PACG_運転モード",
"4f6風速":"C4F 事務室南 PACG_風速",
"4f6吸込温度":"C4F 事務室南 PACG_吸込温度",
"4f7設定温度":"C4F 事務室南 PACG_設定温度",
"4f7運転モード":"C4F 事務室南 PACG_運転モード",
"4f7風速":"C4F 事務室南 PACG_風速",
"4f7吸込温度":"C4F 事務室南 PACG_吸込温度",
"4f8設定温度":"C4F 事務室南 PACG_設定温度",
"4f8運転モード":"C4F 事務室南 PACG_運転モード",
"4f8風速":"C4F 事務室南 PACG_風速",
"4f8吸込温度":"C4F 事務室南 PACG_吸込温度",
"4f9設定温度":"C4F 事務室東南 PAC_設定温度",
"4f9運転モード":"C4F 事務室東南 PAC_運転モード",
"4f9風速":"C4F 事務室東南 PAC_風速",
"4f9吸込温度":"C4F 事務室東南 PAC_吸込温度",
"外気温":"B館 RF 外気温度"
},
5:{
"時間":"信号名称",
"5f0設定温度":"C5F 事務室中ペリ PACG_設定温度",
"5f0運転モード":"C5F 事務室中ペリ PACG_運転モード",
"5f0風速":"C5F 事務室中ペリ PACG_風速",
"5f0吸込温度":"C5F 事務室中ペリ PACG_吸込温度",
"5f1設定温度":"C5F 事務室中ペリ PACG_設定温度",
"5f1運転モード":"C5F 事務室中ペリ PACG_運転モード",
"5f1風速":"C5F 事務室中ペリ PACG_風速",
"5f1吸込温度":"C5F 事務室中ペリ PACG_吸込温度",
"5f2設定温度":"C5F 事務室中 PACG_設定温度",
"5f2運転モード":"C5F 事務室中 PACG_運転モード",
"5f2風速":"C5F 事務室中 PACG_風速",
"5f2吸込温度":"C5F 事務室中 PACG_吸込温度",
"5f3設定温度":"C5F 事務室中 PACG_設定温度",
"5f3運転モード":"C5F 事務室中 PACG_運転モード",
"5f3風速":"C5F 事務室中 PACG_風速",
"5f3吸込温度":"C5F 事務室中 PACG_吸込温度",
"5f4設定温度":"C5F 事務室南ペリ PACG_設定温度",
"5f4運転モード":"C5F 事務室南ペリ PACG_運転モード",
"5f4風速":"C5F 事務室南ペリ PACG_風速",
"5f4吸込温度":"C5F 事務室南ペリ PACG_吸込温度",
"5f5設定温度":"C5F 事務室南ペリ PACG_設定温度",
"5f5運転モード":"C5F 事務室南ペリ PACG_運転モード",
"5f5風速":"C5F 事務室南ペリ PACG_風速",
"5f5吸込温度":"C5F 事務室南ペリ PACG_吸込温度",
"5f6設定温度":"C5F 事務室南 PACG_設定温度",
"5f6運転モード":"C5F 事務室南 PACG_運転モード",
"5f6風速":"C5F 事務室南 PACG_風速",
"5f6吸込温度":"C5F 事務室南 PACG_吸込温度",
"5f7設定温度":"C5F 事務室南 PACG_設定温度",
"5f7運転モード":"C5F 事務室南 PACG_運転モード",
"5f7風速":"C5F 事務室南 PACG_風速",
"5f7吸込温度":"C5F 事務室南 PACG_吸込温度",
"5f8設定温度":"C5F 事務室南 PACG_設定温度",
"5f8運転モード":"C5F 事務室南 PACG_運転モード",
"5f8風速":"C5F 事務室南 PACG_風速",
"5f8吸込温度":"C5F 事務室南 PACG_吸込温度",
"5f9設定温度":"C5F 事務室東南 PAC_設定温度",
"5f9運転モード":"C5F 事務室東南 PAC_運転モード",
"5f9風速":"C5F 事務室東南 PAC_風速",
"5f9吸込温度":"C5F 事務室東南 PAC_吸込温度",
"5気温":"B館 RF 外気温度"
},
6:{
"時間":"信号名称",
"6f0設定温度":"C6F 事務室中ぺリ PACG_設定温度",
"6f0運転モード":"C6F 事務室中ペリ PACG_運転モード",
"6f0風速":"C6F 事務室中ペリ PACG_風速",
"6f0吸込温度":"C6F 事務室中ぺリ PACG_吸込温度",
"6f1設定温度":"C6F 事務室中ぺリ PACG_設定温度",
"6f1運転モード":"C6F 事務室中ペリ PACG_運転モード",
"6f1風速":"C6F 事務室中ペリ PACG_風速",
"6f1吸込温度":"C6F 事務室中ぺリ PACG_吸込温度",
"6f2設定温度":"C6F 事務室中 PACG_設定温度",
"6f2運転モード":"C6F 事務室中 PACG_運転モード",
"6f2風速":"C6F 事務室中 PACG_風速",
"6f2吸込温度":"C6F 事務室中 PACG_吸込温度",
"6f3設定温度":"C6F 事務室中 PACG_設定温度",
"6f3運転モード":"C6F 事務室中 PACG_運転モード",
"6f3風速":"C6F 事務室中 PACG_風速",
"6f3吸込温度":"C6F 事務室中 PACG_吸込温度",
"6f4設定温度":"C6F 事務室南ペリ PACG_設定温度",
"6f4運転モード":"C6F 事務室南ペリ PACG_運転モード",
"6f4風速":"C6F 事務室南ペリ PACG_風速",
"6f4吸込温度":"C6F 事務室南ペリ PACG_吸込温度",
"6f5設定温度":"C6F 事務室南ペリ PACG_設定温度",
"6f5運転モード":"C6F 事務室南ペリ PACG_運転モード",
"6f5風速":"C6F 事務室南ペリ PACG_風速",
"6f5吸込温度":"C6F 事務室南ペリ PACG_吸込温度",
"6f6設定温度":"C6F 事務室南 PACG_設定温度",
"6f6運転モード":"C6F 事務室南 PACG_運転モード",
#"6f6風速":"C6F 事務室南 PACG_風速",
"6f6吸込温度":"C6F 事務室南 PACG_吸込温度",
"6f7設定温度":"C6F 事務室南 PACG_設定温度",
"6f7運転モード":"C6F 事務室南 PACG_運転モード",
#"6f7風速":"C6F 事務室南 PACG_風速",
"6f7吸込温度":"C6F 事務室南 PACG_吸込温度",
"6f8設定温度":"C6F 事務室南 PACG_設定温度",
"6f8運転モード":"C6F 事務室南 PACG_運転モード",
#"6f8風速":"C6F 事務室南 PACG_風速",
"6f8吸込温度":"C6F 事務室南 PACG_吸込温度",
"6f9設定温度":"C6F 事務室東南 PAC_設定温度",
"6f9運転モード":"C6F 事務室東南 PAC_運転モード",
"6f9風速":"C6F 事務室東南 PAC_風速",
"6f9吸込温度":"C6F 事務室東南 PAC_吸込温度",
"外気温":"B館 RF 外気温度"
},
}
data_all = {}
for floor in floors:
result_df = pd.DataFrame()
data_all[floor] = result_df
def init_cvt(df):
df.columns = df.loc[6]
df = df.drop(df.index[[0,1,2, 3,4, 5,6,7,8,9]])
return df.loc[:,~df.columns.str.contains("ロスナイ|湿度|電力量|電流|ロスナイ")]
def split_floor_data(df,floor_arr):
df = df.reset_index()
start_time = df["信号名称"].loc[0]
end_time = df["信号名称"].loc[len(df)-1]
df_floors = {}
for floor in floor_arr:
df_floors[floor] = df.loc[:,df.columns.str.contains("信号名称|外気温度|{}F".format(floor))]
return df_floors, start_time, end_time
def select_columns(df):
control_columns = []
init_bems_columns = []
for c in df.columns:
if "吸込温度" in c:
init_bems_columns.append(c)
else:
if("時間" in c) or ("外気温" in c):
init_bems_columns.append(c)
control_columns.append(c)
else:
control_columns.append(c)
return init_bems_columns,control_columns
df_cvt = init_cvt(df)
df_cvt_arr,start_time,end_time = split_floor_data(df_cvt,floors)
df_cvt_arr
def adjustment_items(df_arr,season):
df_result_dic = {}
for floor,df in df_arr.items():
air_con_area = [f'C{floor}F 事務室北ペリ PACG_',f'C{floor}F 事務室北 PACG_',f'C{floor}F 事務室中ペリ PACG_',f'C{floor}F 事務室中 PACG_',f'C{floor}F 事務室南ペリ PACG_',f'C{floor}F 事務室南 PACG_',f'C{floor}F 事務室東南 PAC_']
df_result = copy.deepcopy(df)
for one in air_con_area:
# 運連状態が0なら電源OFF(0)
df_result.loc[df_result[one+'運転']==0,one+'運転モード'] = 0
# 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3)
df_result.loc[(df_result[one+'運転']==1) & ((df_result[f'C館 {floor}F G50_省エネレベル'] == 2) | (df_result[f'C館 {floor}F G50_省エネレベル'] == 3) | (df_result[one+'運転モード'] == 3)),one+'運転モード'] = 3
# 夏期の場合
if season == 0:
# 運転状態が1で省エネレベルが1の場合は冷房(1)
df_result.loc[(df_result[one+'運転']==1) & (df_result[f'C館 {floor}F G50_省エネレベル'] == 1),one+'運転モード'] = 1
# 冬期の場合
elif season == 1:
# 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2)
df_result.loc[(df_result[one+'運転']==1) & ((df_result[f'C館 {floor}F G50_省エネレベル'] == 1) & (df_result[one+'運転モード'] == 2)),one+'運転モード'] = 2
# 冬季のインペリ側
if (one == f'C{floor}F 事務室中 PACG_') or (one == f'C{floor}F 事務室南 PACG_'):
# インペリ側で運転ONかつ暖房のときは+4℃アップ制御
df_result.loc[(df_result[one+'運転']==1) & (df_result[one+'運転モード'] == 2),one+'吸込温度'] += 4
# 中間期の場合
else:
# 運転状態が1で省エネレベルが1の場合は冷房(1)
df_result.loc[(df_result[one+'運転']==1) & (df_result[f'C館 {floor}F G50_省エネレベル'] == 1),one+'運転モード'] = 1
# 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2)
df_result.loc[(df_result[one+'運転']==1) & ((df_result[f'C館 {floor}F G50_省エネレベル'] == 1) & (df_result[one+'運転モード'] == 2)),one+'運転モード'] = 2
df_result_dic[floor] = df_result
return df_result_dic
result_df_dic = adjustment_items(df_cvt_arr,1)
for floor in floors:
for key,value in key_map_floor_dict[floor].items():
data_all[floor][key] = result_df_dic[floor][value].values
date_gap = (end_time - start_time).days
data_all[5]
result_arr = []
base_time = start_time
for i in range(1,date_gap+1) :
floors_data = {}
floors_control_data = {}
floors_init_bems_data = {}
for key,value in data_all.items():
next_time = base_time + datetime.timedelta(days=1)
_value = value[(value["時間"] >= datetime.datetime(base_time.year,base_time.month,base_time.day))&(value["時間"] < datetime.datetime(next_time.year,next_time.month,next_time.day))]
floors_data[key] = _value
bems_columns, control_columns = select_columns(_value)
floors_control_data[key] = _value[control_columns]
floors_init_bems_data[key] = _value[bems_columns]
result_arr.append(
{
"time":"{0}_{1}_{2}".format(base_time.year,base_time.month,base_time.day),
"data":floors_data,
"control":floors_control_data,
"init_bems":floors_init_bems_data
})
base_time = next_time
def select_columns(df):
control_columns = []
init_bems_columns = []
for c in df.columns:
if "吸込温度" in c:
init_bems_columns.append(c)
else:
if("時間" in c) or ("外気温" in c):
init_bems_columns.append(c)
control_columns.append(c)
else:
control_columns.append(c)
return init_bems_columns,control_columns
all_data_dir_path = "data/evaluation/base/"
control_data_dir_path = "data/evaluation/control/"
init_bems_data_dir_path = "data/evaluation/init_bems/"
for i in result_arr:
time_dir = i["time"] + "/"
os.makedirs(all_data_dir_path + time_dir,exist_ok=True)
os.makedirs(control_data_dir_path + time_dir,exist_ok=True)
os.makedirs(init_bems_data_dir_path + time_dir,exist_ok=True)
for key in i["data"].keys():
file_all_data_path = all_data_dir_path + time_dir + "all_bems_data{}.csv".format(key)
file_control_path = control_data_dir_path + time_dir + "control_{}.csv".format(key)
file_init_bems_path = init_bems_data_dir_path + time_dir + "init_bems_{}.csv".format(key)
i["data"][key].to_csv(file_all_data_path,encoding='utf_8_sig',index=False)
i["control"][key].to_csv(file_control_path,encoding='utf_8_sig',index=False)
i["init_bems"][key].to_csv(file_init_bems_path,encoding='utf_8_sig',index=False)
###Output
_____no_output_____
###Markdown
Project 1: Quora Question Pairs Description:This notebook uses NLP to generate predictions for the Quora Question Pairs dataset from https://www.kaggle.com/c/quora-question-pairs/data
###Code
from pathlib import Path
import random
import io
import spacy
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from nltk.sentiment.vader import SentimentIntensityAnalyzer
###Output
_____no_output_____
###Markdown
Function definitions, Training Set Import, Preprocessing Define helper functions to calculate cosine similarity
###Code
def parse(nlp, docs):
parsed_docs = []
for doc in nlp.pipe(list(docs), n_threads=10):
parsed_docs.append(doc)
return parsed_docs
def get_similarity(docs):
return docs[0].similarity(docs[1])
def get_sentiment(text):
sid = SentimentIntensityAnalyzer()
polarity = sid.polarity_scores(text)
compound = polarity['compound']
neg = polarity['neg']
neu = polarity['neu']
pos = polarity['pos']
return compound, neg, neu, pos
sentiment_vectorized = np.vectorize(get_sentiment)
###Output
_____no_output_____
###Markdown
Load in train.csv. For faster computation, only load 2.5% of the full sample, or about 10,000 rows
###Code
random.seed(42)
csv = Path.cwd().joinpath('train.csv')
p = 0.025
df = pd.read_csv(csv,
index_col='id',
skiprows=lambda i: i>0 and random.random() > p)
df['is_duplicate'].value_counts()
###Output
_____no_output_____
###Markdown
Calculate cosine similarity between question 1 and question 2, then concatenate the questions for TFIDF generation
###Code
nlp = spacy.load('en_core_web_lg')
df['q1_parsed'] = parse(nlp, df['question1'].astype(str))
df['q2_parsed'] = parse(nlp, df['question2'].astype(str))
df.head()
df['similarity'] = df[['q1_parsed', 'q2_parsed']].apply(get_similarity, axis=1)
df['q_concat'] = df['question1'].map(str) + ' ' + df['question2']
df.head()
###Output
_____no_output_____
###Markdown
Calculate polarity scores for each question
###Code
sentiment1 = sentiment_vectorized(df['question1'].values)
sentiment2 = sentiment_vectorized(df['question2'].values)
df['compound1'] = sentiment1[0]
df['neg1'] = sentiment1[1]
df['neu1'] = sentiment1[2]
df['pos1'] = sentiment1[3]
df['compound2'] = sentiment2[0]
df['neg2'] = sentiment2[1]
df['neu2'] = sentiment2[2]
df['pos2'] = sentiment2[3]
df.head()
###Output
_____no_output_____
###Markdown
Calculate absolute differences in sentimentality for each question-pair
###Code
df['compound_diff'] = (df['compound1'] - df['compound2']).abs()
df['neg_diff'] = (df['neg1'] - df['neg2']).abs()
df['neu_diff'] = (df['neu1'] - df['neu2']).abs()
df['pos_diff'] = (df['pos1'] - df['pos2']).abs()
df.head(10)
###Output
_____no_output_____
###Markdown
Train-test split
###Code
x = df.drop(['question1',
'question2',
'qid1',
'qid2',
'compound1',
'neg1',
'neu1',
'pos1',
'compound2',
'neg2',
'neu2',
'pos2',
'is_duplicate'], axis=1)
y = df['is_duplicate']
x_train, x_test, y_train, y_test = train_test_split(
x, y, stratify=y, random_state=42
)
x_train.head()
###Output
_____no_output_____
###Markdown
TF-IDF VectorizerGenerate TF-IDF's for the train and test sets
###Code
vectorizer = TfidfVectorizer()
train_tfidf = vectorizer.fit_transform(
x_train['q_concat'].values.astype('U')
)
test_tfidf = vectorizer.transform(
x_test['q_concat'].values.astype('U')
)
x_train_bow = pd.merge(
x_train.drop('q_concat', axis=1),
pd.DataFrame(train_tfidf.todense(), index=x_train.index),
on=x_train.index
).set_index('key_0')
x_test_bow = pd.merge(
x_test.drop('q_concat', axis=1),
pd.DataFrame(test_tfidf.todense(), index=x_test.index),
on=x_test.index
).set_index('key_0')
x_train_bow.head()
###Output
_____no_output_____
###Markdown
Model 1: Logistic Regression
###Code
logit = LogisticRegression(solver='sag', random_state=42)
logit.fit(x_train_bow, y_train)
preds = logit.predict(x_test_bow)
print(accuracy_score(y_test, preds))
print(confusion_matrix(y_test, preds))
###Output
_____no_output_____
###Markdown
Model 2: Multinomial Naive BayesMultinomial Naive Bayes shows a strong bias towards non-duplicate predictions
###Code
mnb = MultinomialNB()
mnb.fit(x_train_bow, y_train)
preds = mnb.predict(x_test_bow)
print(accuracy_score(y_test, preds))
print(confusion_matrix(y_test, preds))
###Output
_____no_output_____
###Markdown
Feature transformation: Singular Value DecompositionUsing sklearn's TruncatedSVD class, reduce the TF-IDF's into a lower feature space of 100 components
###Code
svd = TruncatedSVD(n_components=100, random_state=42)
train_tfidf_lsa = svd.fit_transform(train_tfidf)
test_tfidf_lsa = svd.transform(test_tfidf)
x_train_lsa = pd.merge(
x_train.drop('q_concat', axis=1),
pd.DataFrame(train_tfidf_lsa, index=x_train.index),
on=x_train.index
).set_index('key_0')
x_test_lsa = pd.merge(
x_test.drop('q_concat', axis=1),
pd.DataFrame(test_tfidf_lsa, index=x_test.index),
on=x_test.index
).set_index('key_0')
x_train_lsa.head()
###Output
_____no_output_____
###Markdown
Model 1: Logistic RegressionNot much improvement over the non-reduced dataset
###Code
logit = LogisticRegression(C=999999, solver='liblinear', random_state=42)
logit.fit(x_train_lsa, y_train)
preds = logit.predict(x_test_lsa)
print(accuracy_score(y_test, preds))
print(confusion_matrix(y_test, preds))
###Output
_____no_output_____
###Markdown
Model 2: Support Vector MachineUsing cosine similarity, sentiment differences, and the decomposed TF-IDF's as features, the linear Support Vector Machine Classifier demonstrates greatly improved performance over Multinomial Naive Bayes, with much less bias toward non-duplicate predictions
###Code
svc = SVC(kernel='linear', random_state=42)
svc.fit(x_train_lsa, y_train)
preds = svc.predict(x_test_lsa)
print(accuracy_score(y_test, preds))
print(confusion_matrix(y_test, preds))
###Output
_____no_output_____
###Markdown
Imports and configuration
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (8, 5)
plt.rcParams['axes.titlesize'] = 15
plt.rcParams['axes.titlepad'] = 20
PARTIES = ['D', 'R']
PARTY_NAME = {'D': 'Democrat', 'R':'Republican', '3':'Third-Party', 'I':'Independent', 'U':'Unknown', 'L': 'Libertarian'}
PARTY_COLORS = {'D': '#3498db', 'R':'#e74c3c', '3':'#9b59b6', 'I':'#2ecc71', 'U':'#34495e', 'L': '#AAAAAA'} #TODO: add colors from the fivethirtyeight palette instead
###Output
_____no_output_____
###Markdown
Loading data
###Code
def _csv_records(filename, all_pipe_sep):
with open(filename) as f:
for line in f:
if all_pipe_sep:
yield [t.strip()[:-1].strip() for t in line[1:].split(',|')]
else:
yield [t.replace('|', '').strip() for t in line.split(',')]
def csv_to_dataframe(filename, cols=None, all_pipe_sep=True):
df = pd.DataFrame(_csv_records(filename, all_pipe_sep), columns=cols or [])
for col in df:
if set(df[col].unique()) == {'', 'Y'}:
df[col] = (df[col] == 'Y')
else:
df[col] = df[col].replace('', None)
return df
# Candidates
columns = ['cycle', 'fecc_and_id', 'c_id', 'name', 'party', 'dist_id_run_for', 'dist_id_curr', 'curr_cand', 'cycle_cand', 'crpico', 'recipcode', 'no_pacs']
cands = csv_to_dataframe('data/campaign_finance/cands16.txt', cols=columns)
crpico = dict(I='incumbent', C='challenger', O='open_seat', U='unknown')
cands['crpico'] = cands['crpico'].apply(lambda s : crpico[s] if s in crpico else s or None)
# PACS contributions
columns = ['cycle', 'fec_rec_no', 'pac_id', 'c_id', 'amount', 'date', 'real_code', 'type', 'di', 'fecc_and_id']
pacs = csv_to_dataframe('data/campaign_finance/pacs16.txt', all_pipe_sep=False, cols=columns)
pacs['amount'] = pacs['amount'].astype(pd.np.int)
pacs['date'] = pd.to_datetime(pacs['date'], dayfirst=False, infer_datetime_format=True)
# Removing some unexpected values
pacs = pacs[pacs['amount'] > 0]
pacs = pacs[(pacs['date'] >= pd.datetime(2014, 12, 1)) & (pacs['date'] < pd.datetime(2017, 1, 1))]
# Union of candidates/PACS contributions
df = pacs.merge(cands, on=['c_id', 'cycle']).sort_values('date')
###Output
_____no_output_____
###Markdown
Visualization
###Code
cands.sample(3, random_state=0)
pacs.sample(3, random_state=0)
df.sample(3, random_state=0)
t_df = df[df.amount < df.amount.quantile(.95)]
t_df = t_df.pivot_table('amount', t_df.index, 'party')
fig = t_df.plot.hist(stacked=True, bins=30)
_ = plt.legend()
amount_per_week = df.resample('7D', on='date').amount
amount_per_week.sum().plot.line()
_ = plt.title('Sum of the contributions, week by week.')
amount_per_week.mean().plot.line()
_ = plt.title('Mean of the contributions, week by week.')
t_df = df.set_index('date').groupby('party').resample('7D').amount.mean()
for party in df.party.unique():
t_df.loc[party, :].plot.line(label=PARTY_NAME[party], c=PARTY_COLORS[party])
plt.xticks([])
_ = plt.legend()
_ = plt.title('Mean contributions per week, by party.')
t_df = df.set_index('date').groupby('party').resample('7D').amount.sum()
for party in df.party.unique():
t_df.loc[party, :].plot.line(label=PARTY_NAME[party], c=PARTY_COLORS[party], sharex=True)
plt.xticks([])
_ = plt.legend()
_ = plt.title('Sum of the contributions per week, by party.')
###Output
_____no_output_____
###Markdown
Work In Progress:
###Code
t_df = df.set_index('date').groupby('party').resample('7D').amount.sum().reset_index()
pd.pivot_table(t_df, values='amount', columns='party', index='date').plot()
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, RationalQuadratic
import scipy as sp
date_mask = (df['date'] >= pd.datetime(2015, 1, 1))
t_df = df[date_mask].set_index('date').resample('7D').mean().fillna(0)
t_df.amount = (t_df.amount - t_df.amount.mean()) / t_df.amount.std()
kernel = ConstantKernel(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2))
#kernel = RationalQuadratic(1.0, 1.0, (1e-5, 1e5))
regr = GaussianProcessRegressor(kernel=kernel)
sample_df = t_df.sample(int(len(t_df) * 1.0), random_state=0)
regr.fit(sample_df.index.asi8.reshape(-1, 1), sample_df.amount.values)
x = pd.date_range(start=t_df.index[0], end=t_df.index[-1], freq='15D')
y_pred, sigma = regr.predict(x.asi8.reshape(-1, 1), return_std=True)
confidence = 0.90
conf_interval = sp.stats.norm.interval(confidence)
plt.plot(x, y_pred, 'b--', label=u'Prediction')
plt.scatter(x, y_pred, s=3, c='black')
plt.plot(t_df.index, t_df.amount, 'r-', label=u'Actual', alpha=0.5)
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred + conf_interval[0] * sigma,
(y_pred + conf_interval[1] * sigma)[::-1]]),
alpha=.2, fc='b', ec='None', label='{}% confidence interval'.format(confidence * 100))
plt.legend()
###Output
_____no_output_____
###Markdown
Imports
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
import os
import math
import pickle
plt.rcParams["font.family"] = "Liberation Sans"
plt.rcParams["font.size"] = 12
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['lines.linewidth'] = 0.5
plt.rcParams['figure.autolayout'] = True
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.xmargin'] = 0
from preprocess import load_features_labels
features, labels, batch_ind = load_features_labels()
print(features.shape)
GAS_IDENTITIES = {
1: "Acetone",
2: "Acetaldehyde",
3: "Ethanol",
4: "Ethylene",
5: "Ammonia",
6: "Toluene"
}
###Output
_____no_output_____
###Markdown
Basic statistics Number of samples per label per batch
###Code
n_labels = np.unique(labels).shape[0]
df = pd.DataFrame()
for batch_num, (start, end) in enumerate(batch_ind):
print(f"Batch {batch_num+1}: {end-start} samples")
for label in range(n_labels):
n_matching = (labels[start:end]==label).astype(np.long).sum()
df = df.append({
# add one to be consistent with original data indexing
"label": label+1,
"batch": batch_num+1,
"count": n_matching
}, ignore_index=True)
print(f" gas {label+1}: {n_matching} samples")
df.pivot("batch", "label", "count").plot(kind='bar', figsize=(12, 6))
plt.ylabel("Count")
plt.title("Amount of data in each batch")
plt.show()
###Output
Batch 1: 445 samples
gas 1: 90 samples
gas 2: 98 samples
gas 3: 83 samples
gas 4: 30 samples
gas 5: 70 samples
gas 6: 74 samples
Batch 2: 1244 samples
gas 1: 164 samples
gas 2: 334 samples
gas 3: 100 samples
gas 4: 109 samples
gas 5: 532 samples
gas 6: 5 samples
Batch 3: 1586 samples
gas 1: 365 samples
gas 2: 490 samples
gas 3: 216 samples
gas 4: 240 samples
gas 5: 275 samples
gas 6: 0 samples
Batch 4: 161 samples
gas 1: 64 samples
gas 2: 43 samples
gas 3: 12 samples
gas 4: 30 samples
gas 5: 12 samples
gas 6: 0 samples
Batch 5: 197 samples
gas 1: 28 samples
gas 2: 40 samples
gas 3: 20 samples
gas 4: 46 samples
gas 5: 63 samples
gas 6: 0 samples
Batch 6: 2300 samples
gas 1: 514 samples
gas 2: 574 samples
gas 3: 110 samples
gas 4: 29 samples
gas 5: 606 samples
gas 6: 467 samples
Batch 7: 3613 samples
gas 1: 649 samples
gas 2: 662 samples
gas 3: 360 samples
gas 4: 744 samples
gas 5: 630 samples
gas 6: 568 samples
Batch 8: 294 samples
gas 1: 30 samples
gas 2: 30 samples
gas 3: 40 samples
gas 4: 33 samples
gas 5: 143 samples
gas 6: 18 samples
Batch 9: 470 samples
gas 1: 61 samples
gas 2: 55 samples
gas 3: 100 samples
gas 4: 75 samples
gas 5: 78 samples
gas 6: 101 samples
Batch 10: 3600 samples
gas 1: 600 samples
gas 2: 600 samples
gas 3: 600 samples
gas 4: 600 samples
gas 5: 600 samples
gas 6: 600 samples
###Markdown
Readings from a single odor class over time A basic plot reveals temporal dynamics which do not depend on the
###Code
class_1_indices, = np.where(labels==0)
readings = features[class_1_indices]
x = np.arange(readings.shape[0])
feature_ids = [0]
n_plots = len(feature_ids)
fig, axes = plt.subplots(n_plots, 1, figsize=(7, 2.5), sharex=True)
if axes is not list:
axes = [axes]
for plot_id, feat_id in enumerate(feature_ids):
y = readings[:, feat_id]
ax = axes[plot_id]
ax.plot(x, y, c="black", linewidth=1)
ax.set_ylabel(f"Feature {feat_id+1} Z-score")
for start, _ in batch_ind:
# Draw a line at the first sample whose index is equal to order greater than the batch start
if start == 0:
continue
for sub_i, i in enumerate(class_1_indices):
if i >= start:
break
ax.axvline(sub_i, linestyle='--', linewidth=1)
ax.set_xlabel("Samples of Acetone")
# plt.tight_layout()
fig.savefig("writeup/figure_sources/fig_1.svg")
from preprocess import _load_raw_data
import matplotlib.pyplot as plt
import numpy as np
features, labels, batch_ind = _load_raw_data(include_gas_6=False)
anamolous_sensor = 1
readings = features[labels==0]
x = np.arange(readings.shape[0])
y = readings[:, anamolous_sensor]
plt.figure(figsize=(12, 4))
plt.plot(x, y)
plt.ylabel("Raw Reading")
plt.xlabel("Samples of Odor 1")
plt.title("Verifying that an outlier in the z-scores is present in the raw data")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Identifying outliersPrint the index of any feature with a standard deviation of more than 5, and also print that feature value.
###Code
(features > 50).nonzero()
###Output
_____no_output_____
###Markdown
Visualizing features and labels for figures
###Code
# Visualize features and labels from each batch
import numpy as np
import math
import cairo
from mpl_toolkits.axes_grid1 import make_axes_locatable
from batches import features, labels, batch_ind
n_samples = 6
n_feats = 10
batches = list(range(10))
x = []
y = []
for batch in batches:
samp = np.random.choice(np.arange(*batch_ind[batch]), n_samples, replace=False)
x.append(features[samp, :n_feats])
y.append(labels[samp])
x = np.stack(x)
y = np.stack(y)
print("x shape", x.shape, "y shape", y.shape)
fig, axes = plt.subplots(len(batches)+1, 2, figsize=(4, 16))
for row in range(len(batches)+1):
for col in range(2):
ax = axes[row, col]
if row == len(batches):
if col == 0:
plt.colorbar(im_gray, cax=ax, orientation="horizontal")
else:
plt.colorbar(im_color, cax=ax, orientation="horizontal")
continue
if col == 0:
im_gray = ax.imshow(x[row], cmap="gray")
else:
im_color = ax.imshow(y[row].reshape(n_samples, 1), cmap="rainbow")
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('top', size=0.5, pad=0.35)
ax.set_xticks([])
ax.set_yticks([])
# plt.colorbar(im_gray)
fig.savefig("writeup/fig_data_matrix_raw.svg", format="svg")
fig, axes = plt.subplots(len(batches)+1, 2, figsize=(4, 4))
for row in range(len(batches)+1):
for col in range(2):
ax = axes[row, col]
if row == len(batches):
if col == 0:
plt.colorbar(im_gray, cax=ax, orientation="horizontal")
else:
plt.colorbar(im_color, cax=ax, orientation="horizontal")
continue
if col == 0:
im_gray = ax.imshow(x[row], cmap="gray")
else:
im_color = ax.imshow(y[row].reshape(n_samples, 1), cmap="rainbow")
im_color.set_clim(0.0, 4.0)
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('top', size=0.5, pad=0.35)
ax.set_xticks([])
ax.set_yticks([])
# plt.colorbar(im_gray)
fig.savefig("writeup/fig_data_matrix_raw.svg", format="svg")
# Visualize features and labels from each batch
import matplotlib.pyplot as plt
import numpy as np
import math
import cairo
from mpl_toolkits.axes_grid1 import make_axes_locatable
from batches import features, labels, batch_ind, samples_in_batch_by_label, N_ODOR_CLASSES
n_feats = 10
batches = list(range(10))
x = []
y = []
k = 3
for batch in batches:
xb = []
yb = []
for c in range(N_ODOR_CLASSES):
choices = samples_in_batch_by_label[batch][c]
samp = np.random.choice(choices, k, replace=False)
xb.append(features[samp, :n_feats])
yb.append(labels[samp])
x.append(np.stack(xb))
y.append(np.stack(yb))
# x shape (batches, classes, k, features)
# y shape (batches, classes, k,)
x = np.stack(x)
y = np.stack(y)
print("x shape", x.shape, "y shape", y.shape)
fig, axes = plt.subplots(x.shape[0], x.shape[2], figsize=(8, 16))
for batch in range(x.shape[0]):
for samp in range(x.shape[2]):
ax = axes[batch, samp]
data = x[batch, :, samp]
im_gray = ax.imshow(data, cmap="gray")
im_gray.set_clim(-1.0, 1.0)
ax.set_xticks([])
ax.set_yticks([])
# plt.colorbar(im_gray)
fig.savefig("writeup/batch_matrix_raw.svg", format="svg")
###Output
x shape (10, 5, 3, 10) y shape (10, 5, 3)
###Markdown
Basic classification techniques ANOVAHere we run a one-way ANOVA to test the null hypothesis that the data from the different batches have the same mean. This analysis however makes the assumption that samples are independent which isn't evidently true looking at the plots above.
###Code
import scipy.stats
features_z = scipy.stats.zscore(features, axis=0)
unique_labels = np.unique(labels)
features_by_label = []
for label in unique_labels:
features_by_label.append(features_z[labels==label])
scipy.stats.f_oneway
###Output
_____no_output_____
###Markdown
Principal Components
###Code
import numpy as np
from sklearn.decomposition import PCA
from batches import split_all
pca = PCA(n_components=2)
new_features = pca.fit_transform(split_all.features)
print(pca.explained_variance_ratio_)
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
colors = list(mcolors.TABLEAU_COLORS)
for i, (start, end) in enumerate(split_all.batch_ind):
batch = i+1
batch = new_features[start:end]
plt.scatter(batch[:, 0], batch[:, 1], color=colors[i], label=batch, alpha=100/(end-start))
# plt.legend()
###Output
_____no_output_____
###Markdown
LDA Inference of the chemical from the features, within-batchAn LDA is able to discriminate odors within a batch (no generalization tested).
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
print(labels.shape, features.shape)
batches_feats = [features[s:t] for s, t in batch_ind]
batches_label = [labels[s:t] for s, t in batch_ind]
batches_accuracy = []
batches_lda_models = []
for feats_batch, labels_batch in zip(batches_feats, batches_label):
X = feats_batch
y = labels_batch
print(X.shape, y.shape)
clf = LinearDiscriminantAnalysis()
batches_lda_models.append(clf)
clf.fit(X, y)
y_pred = clf.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
batches_accuracy.append(accuracy)
print("="*80)
for batch_i, accuracy in enumerate(batches_accuracy):
print(f"Accuracy achieved by batch {batch_i}: {accuracy}")
###Output
(12077,) (12077, 128)
(371, 128) (371,)
(1239, 128) (1239,)
(1586, 128) (1586,)
(161, 128) (161,)
(197, 128) (197,)
(1833, 128) (1833,)
(3045, 128) (3045,)
(276, 128) (276,)
(369, 128) (369,)
(3000, 128) (3000,)
================================================================================
Accuracy achieved by batch 0: 1.0
Accuracy achieved by batch 1: 1.0
Accuracy achieved by batch 2: 0.9993694829760403
Accuracy achieved by batch 3: 1.0
Accuracy achieved by batch 4: 1.0
Accuracy achieved by batch 5: 0.9950900163666121
Accuracy achieved by batch 6: 0.9990147783251232
Accuracy achieved by batch 7: 1.0
Accuracy achieved by batch 8: 1.0
Accuracy achieved by batch 9: 0.9956666666666667
###Markdown
Inference of the chemical from the features, within-batch (50/50 train/test split)With a 50/50 train/test split, the LDA classifier succeeds to generalize to classify within-batch.
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
print(labels.shape, features.shape)
batches_feats = [features[s:t] for s, t in batch_ind]
batches_label = [labels[s:t] for s, t in batch_ind]
batches_sizes = [batch.shape[0] for batch in batches_label]
R_inds = [np.random.choice(np.arange(size), int(size/2)) for size in batches_sizes]
T_inds = [np.setdiff1d(np.arange(size), ind, assume_unique=True) for ind, size in zip(R_inds, batches_sizes)]
batches_feats_R = [feat[ind] for feat, ind in zip(batches_feats, R_inds)]
batches_label_R = [label[ind] for label, ind in zip(batches_label, R_inds)]
batches_feats_T = [feat[ind] for feat, ind in zip(batches_feats, T_inds)]
batches_label_T = [label[ind] for label, ind in zip(batches_label, T_inds)]
batches_lda_models = []
for feats_batch, labels_batch in zip(batches_feats_R, batches_label_R):
X = feats_batch
y = labels_batch
print(X.shape, y.shape)
clf = LinearDiscriminantAnalysis()
batches_lda_models.append(clf)
clf.fit(X, y)
# test
batches_accuracy = []
for feats_batch, labels_batch, clf in zip(batches_feats_T, batches_label_T, batches_lda_models):
X = feats_batch
y = labels_batch
y_pred = clf.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
batches_accuracy.append(accuracy)
for batch_i, accuracy in enumerate(batches_accuracy):
print(f"Accuracy achieved by batch {batch_i}: {accuracy}")
type(np.arange(5)[0].item())
###Output
_____no_output_____
###Markdown
Inference of the chemical from the features, between-batchMake a matrix between batches. This shows that, as you would expect, discriminators trained on one batch perform the best on themselves, followed by their neighbors.
###Code
# Use the models trained in a previous cell
n_batches = len(batches_lda_models)
cross_accuracy = np.empty((n_batches, n_batches))
for i, model_i in enumerate(batches_lda_models):
for j, (feats_batch_j, labels_batch_j) in enumerate(zip(batches_feats, batches_label)):
X = feats_batch_j
y = labels_batch_j
y_pred = model_i.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
cross_accuracy[i, j] = accuracy
# plt.figure(figsize=(6, 8))
plt.imshow(cross_accuracy)
# plt.xticks(np.arange(n_batches), labels=np.arange(n_batches)+1)
# plt.yticks(np.arange(n_batches), labels=np.arange(n_batches)+1)
plt.title("Prediction accuracy of LDA (similarity) between batches")
plt.xlabel("Target")
plt.ylabel("Source")
plt.colorbar()
# Use the models trained in a previous cell
n_batches = len(batches_lda_models)
cross_accuracy = np.empty((n_batches, n_batches))
for i, model_i in enumerate(batches_lda_models):
for j, (feats_batch_j, labels_batch_j) in enumerate(zip(batches_feats, batches_label)):
X = feats_batch_j
y = labels_batch_j
y_pred = model_i.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
cross_accuracy[i, j] = accuracy
fig, ax = plt.subplots()
x = np.arange(1, 11)
# ax.plot(x, cross_accuracy[0], c="purple")
# ax.axvline(1, c="purple", linestyle="--")
ax.plot(x, cross_accuracy[3], c="blue", marker="x", markersize=10)
ax.axvline(4, c="blue", linestyle="--")
ax.plot(x, cross_accuracy[6], c="black", marker="x", markersize=10)
ax.axvline(7, c="black", linestyle="--")
ax.set_xticks(x)
ax.set_ylim([0.0, 1.0])
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.grid(axis='y', color='#F7BFBE')
fig.savefig("writeup/lda_centers.png")
###Output
_____no_output_____
###Markdown
Inference of chemical from the features, all the dataCan a single LDA model classify every smell in all the batches? Random 50/50 train/test split.
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
size = features.shape[0]
R_ind = np.random.choice(np.arange(size), int(size/2))
T_ind = np.setdiff1d(np.arange(size), R_ind, assume_unique=True)
batches_feats_R = features[R_ind]
batches_label_R = labels[R_ind]
batches_feats_T = features[T_ind]
batches_label_T = labels[T_ind]
X = batches_feats_R
y = batches_label_R
print(X.shape, y.shape)
clf = LinearDiscriminantAnalysis()
batches_lda_models.append(clf)
clf.fit(X, y)
# test
accuracy = []
X = batches_feats_T
y = batches_label_T
y_pred = clf.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
print(f"Accuracy achieved: {accuracy}")
###Output
(6955, 128) (6955,)
Accuracy achieved: 0.948371817643576
###Markdown
Train on 1..T-1, test on TFor each batch number T=1...9, (0-indexed) train an LDA model on batch 0..T-1; then evaluate on batch T
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from batches import split_all
features, labels, batch_ind = split_all.features, split_all.labels, split_all.batch_ind
print(labels.shape, features.shape)
# batches_X[T] contains all data 0...T (including batch T) (0-indexed)
batches_feats = [features[0:t] for s, t in batch_ind]
batches_label = [labels[0:t] for s, t in batch_ind]
batches_accuracy = []
batches_lda_models = []
for feats_batch, labels_batch in zip(batches_feats, batches_label):
X = feats_batch
y = labels_batch
print(X.shape, y.shape)
clf = LinearDiscriminantAnalysis()
batches_lda_models.append(clf)
clf.fit(X, y)
y_pred = clf.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
batches_accuracy.append(accuracy)
print("="*80)
for batch_i, accuracy in enumerate(batches_accuracy):
print(f"Accuracy achieved by batch {batch_i}: {accuracy}")
# batches_lda_models[t] is trained on batches 0...t
# so test on batch t+1=T
batches = []
accuracies = []
for T in range(1, 10):
model = batches_lda_models[T-1]
start, end = batch_ind[T]
X = features[start:end]
y = labels[start:end]
y_pred = model.predict(X)
accuracy = (y_pred==y).astype(np.long).sum().item() / y.shape[0]
accuracies.append(accuracy)
batches.append(T + 1)
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(batches, accuracies, color="black", marker="x", markersize=10)
ax.set_xticks(batches)
ax.set_ylim([0.0, 1.0])
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.grid(axis='y', color='#F7BFBE')
fig.savefig("writeup/lda_up_to.png")
###Output
_____no_output_____
###Markdown
Neural Network Classifiers Training curves Early stopping pretraining
###Code
# data_folder = "output/backprop_context_earlystop_share2"
data_folder = "output/backprop_context_patientearlystop_epoch1"
# data_folder = "backprop_context_patientearlystop_share_epoch0"
import matplotlib.pyplot as plt
import torch
import os
Ts = list(range(3, 10))
samp_max = 20
fig, axes = plt.subplots(len(Ts), 1, figsize=(12, 2*len(Ts)))
for T, ax in zip(Ts, axes):
lloss = torch.load(os.path.join(data_folder, f"val_lloss_{T}.pt"))
lacc = torch.load(os.path.join(data_folder, f"val_lacc_{T}.pt"))
li = torch.load(os.path.join(data_folder, f"val_li_{T}.pt"))
stop_time = torch.load(os.path.join(data_folder, f"val_stop_time_{T}.pt"))
print(f"Stop time for T={T}: {stop_time}")
ax.set_ylabel(f"Batch={T+1}")
line_loss, = ax.plot(li[:samp_max], lloss[:samp_max])
line_acc, = ax.plot(li[:samp_max], lacc[:samp_max])
ax.hlines(1.0, 0, max(li[:samp_max]), linestyles="dashed")
ax.set_xlabel("n samples seen")
fig.legend([line_loss, line_acc], ["Training Loss", "Testing Accuracy"], loc="upper right")
# [Context is sequences ] [T=4]
for i, acc in enumerate(lacc[0:20]):
print(f"{acc}")
patience = 10
consec = 0
last_best = float('-inf')
last_best_i = 0
for i, k in enumerate(lacc):
if k > last_best:
print(f"better i={i}")
last_best = k
consec = 0
last_best_i = i
else:
consec += 1
if consec >= patience:
print(f"Violated patience at i={i}")
consec = 0
print(last_best_i)
###Output
0 0.7246666666666667
1 0.83
2 0.7806666666666666
3 0.8153333333333334
4 0.7873333333333333
5 0.8513333333333334
6 0.808
7 0.7106666666666667
8 0.8093333333333333
9 0.7953333333333333
10 0.8453333333333334
better i=0
better i=1
better i=5
5
###Markdown
Final training
###Code
data_folder = "output/nocontext_schedule1"
# Accuracy over time graph
import torch
import os
Ts = list(range(2, 10))
fig, axes = plt.subplots(len(Ts), 1, figsize=(4, 16))
for T, ax in zip(Ts, axes):
lloss = torch.load(os.path.join(data_folder, f"lloss_{T}.pt"))
lacc = torch.load(os.path.join(data_folder, f"lacc_{T}.pt"))
li = torch.load(os.path.join(data_folder, f"li_{T}.pt"))
ax.set_ylabel(f"Batch={T+1}")
line_loss, = ax.plot(li, lloss)
line_acc, = ax.plot(li, lacc)
ax.hlines(1.0, 0, max(li), linestyles="dashed")
ax.set_xlabel("n samples seen")
fig.legend([line_loss, line_acc], ["Training Loss", "Testing Accuracy"], loc="upper right")
# [Context is sequences ] [T=4]
# Final accuracies
import matplotlib.pyplot as plt
import torch
import os
vergara_eyeballed = [1.0, 0.74, 0.88, 0.93, 0.95, 0.70, 0.70, 0.92, 0.75, 0.65]
Ts = list(range(2, 10))
batches = [t+1 for t in Ts]
accs = []
for T, ax in zip(Ts, axes):
acc = torch.load(os.path.join(data_folder, f"acc_{T}.pt"))
accs.append(acc)
plt.plot(batches, accs, label="Choose-k neural network")
plt.plot(batches, vergara_eyeballed[2:], label="Weighted SVM ensemble")
plt.title("Generalization performance")
plt.xlabel("Batch")
plt.ylabel("Final testing accuracy")
plt.legend()
###Output
_____no_output_____
###Markdown
Adjustable accuracy plots
###Code
# Default Parameters
show_legend = True
draw_errors = True
draw_plot = False
show_scatter = False
draw_axis = False
show_svm = False
use_offset = False
show_vergara = False
save_figure = False
#data_folders = ([f"output/backprop{n}" for n in range(10)],
# [f"output/backprop_dropout{n}" for n in range(10)])
#dataset_names = ("Backprop", "Backprop+Dropout")
# data_folders = ([f"output/backprop{n}" for n in range(10)],
# [f"output/evolve{n}" for n in range(10)],
# [f"output/backprop_ensemble{n}" for n in range(10)])
# dataset_names = ("With Context", "Evolve With Context", "Ensemble")
# data_folders = ([f"output/backprop{n}" for n in range(10)],
# [f"output/evolve{n}" for n in range(10)],
# [f"output/evolve_uber{n}" for n in range(10)])
# dataset_names = ("Backprop", "Evolve (old)", "Evolve (new)")
# data_folders = ([f"output/backprop{n}" for n in range(10)],)
# dataset_names = ("Context Model",)
# data_folders = ([f"output/ensemble_long{n}" for n in range(3)],
# [f"output/nocontext_long{n}" for n in range(3)],
# [f"/media/jamie/PATRIOT/sensor-drift_bak/output/backprop{n}" for n in range(10)],
# [f"/media/jamie/PATRIOT/sensor-drift_bak/output/backprop_context_earlystop_share{n}" for n in range(3)],
# )
# dataset_names = ("Ensemble",
# "NoContext",
# "Context",
# "ContextShare"
# )
# FIGURE 2B parametrrs
# data_folders = ([f"output/nocontext_long{n}" for n in range(3)],)
# dataset_names = ("Context Model",)
# figure_path = "writeup/figure_sources/feedforward_accuracy.png"
# FIGURE 3 parametrrs
# data_folders = ([f"output/ensemble_long{n}" for n in range(3)],)
# dataset_names = ("Ensemble Model",)
# figure_path = "writeup/figure_sources/ensemble_accuracy.png"
# show_svm = True
# FIGURE 4B parametrrs
# data_folders = ([f"output/context_long{n}" for n in range(3)],)
# dataset_names = ("Context",)
# figure_path = "writeup/figure_sources/context_accuracy.png"
# FIGURE 4C parametrrs
# data_folders = ([f"output/context_share_long{n}" for n in range(3)],)
# dataset_names = ("Context Share")
# figure_path = "writeup/figure_sources/context_share_accuracy.png"
#
# n_trial = 5
# data_folders = (
# [f"output/nocontext_medium{n}" for n in range(n_trial)],
# [f"output/context_medium{n}" for n in range(n_trial)]
# )
# dataset_names = [
# "NoContext",
# "Context",
# ]
n_trial = 30
subselection = None #[2, -1]
data_folders = ([f"output/ensemble_harddecay{n}" for n in range(n_trial)],
[f"output/nocontext_harddecay{n}" for n in range(n_trial)],
[f"output/context_harddecay_k1{n}" for n in range(n_trial)],
[f"output/context_lstm_harddecay{n}" for n in range(n_trial)],
[f"output/context_harddecay_relu{n}" for n in range(n_trial)],
[f"output/nocontext_big_short_harddecay{n}" for n in range(n_trial)]
)
# data_folders = ([f"output/ensemble_short_harddecay{n}" for n in range(n_trial)],
# [f"output/nocontext_short_harddecay{n}" for n in range(n_trial)],
# [f"output/context_short_harddecay{n}" for n in range(n_trial)],
# [f"output/context_lstm_short_harddecay{n}" for n in range(n_trial)],
# [f"output/context_short_harddecay_relu{n}" for n in range(n_trial)],
# )
dataset_names = ["Feedforward NN Ensemble",
"Feedforward NN",
"Feedforward+Context NN",
"LSTM",
"Context relu",
"Feedforward+Context NN Big"
]
if subselection is not None:
data_folders = [data_folders[idx] for idx in subselection]
dataset_names = [dataset_names[idx] for idx in subselection]
# Final accuracies
all_points = []
Ts = list(range(2, 10))
batches = [t+1 for t in Ts]
# For each method, for each batch, for each n, there is an accuracy
df = pd.DataFrame(columns=["method", "batch", "n", "accuracy"])
for method_i, method_folders in enumerate(data_folders):
for T, batch in zip(Ts, batches):
for n, n_folder in enumerate(method_folders):
try:
acc = torch.load(os.path.join(n_folder, f"acc_{T}.pt"))
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
df = df.append({
"method": method_i,
"batch": batch,
"n": n,
"accuracy": acc
}, ignore_index=True)
if show_svm:
method_i += 1
print("method_i", method_i)
if "SVM Ensemble" not in dataset_names:
dataset_names.append("SVM Ensemble")
dirpath = "svm_ensemble_results/setting2"
for trial in range(n_trial):
try:
with open(os.path.join(dirpath, f"accuracies_trial{trial}.pkl"), "rb") as f:
accs = pickle.load(f)
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
accs = accs[-len(batches):]
for i, batch in enumerate(batches):
acc = accs[i]
df = df.append({
"method": method_i,
"batch": batch,
"n": trial,
"accuracy": acc
}, ignore_index=True)
# For each method, for each batch, calculate the mean and 95% confidence interval
df2 = pd.DataFrame(columns=["method", "batch", "mu", "err"])
for method_index, method_name in enumerate(dataset_names):
for T, batch in zip(Ts, batches):
data = df[(df.method==method_index) & (df.batch==batch)].accuracy
try:
err = 1.96 * data.std() / math.sqrt(data.count())
except ZeroDivisionError:
print(method_name, data, "T=", T, data, method_index)
continue
df2 = df2.append({
"method": method_index,
"batch": batch,
"mu": data.mean().item(),
"err": err.item()
}, ignore_index=True)
if use_offset:
OFFSET = 0.15
else:
OFFSET = 0.0
colors = ("black", "b", "r", "y", "c", "m", "k")
markers = ("$\u25EF$", "$\u25EF$", "v", "v", "*", "*")
fig, ax = plt.subplots(1, 1)
for i in sorted(df2.method.unique()):
i = int(i)
off = OFFSET if draw_errors or draw_plot else 0.0
m = df[df.method == i]
# Scatter plot the accuracies
if show_scatter:
ax.scatter(
x=m.batch-off,
y=m.accuracy,
marker=markers[i],
s=100,
label=dataset_names[i],
c=colors[i])
# Error bars
if draw_errors:
ax.errorbar(
x=df2[df2.method == i].batch,
y=df2[df2.method == i].mu,
yerr=df2[df2.method == i].err,
c=colors[i],
label=dataset_names[i],
capsize=10)
# Line
if draw_plot:
ax.plot(
df2[df2.method == i].batch,
df2[df2.method == i].mu,
c=colors[i],
markersize=10)
if show_vergara:
vergara_eyeballed = [1.0, 0.74, 0.88, 0.93, 0.95, 0.70, 0.70, 0.92, 0.75, 0.65]
ax.plot(batches, vergara_eyeballed[2:],
linestyle='--', c="g", marker="$\u25EF$", label="Weighted SVM ensemble")
ax.set_ylim([0.0, 1.1])
if draw_axis:
plt.xlabel("Batch")
plt.ylabel("Test Accuracy")
# Legend
if show_legend:
plt.legend()
ax.set_xticks(np.arange(3, 11))
ax.set_ylim([0.7, 1.0])
ax.set_yticks(np.arange(0.7, 1.0, 0.1))
ax.grid(axis='y', color='#F7BFBE')
plt.show()
if save_figure:
fig.savefig(figure_path)
###Output
_____no_output_____
###Markdown
Results Figure
###Code
n_trial = 30
data_folders = (
[f"output/nocontext_harddecay{n}" for n in range(n_trial)],
[f"output/context_harddecay_k1{n}" for n in range(n_trial)],
[f"output/ensemble_harddecay{n}" for n in range(n_trial)],
[f"output/context_lstm_harddecay{n}" for n in range(n_trial)],
)
dataset_names = [
"Feedforward NN",
"Feedforward+Context NN",
"Feedforward NN Ensemble",
"LSTM",
]
# Default Parameters
show_legend = True
draw_errors = True
draw_plot = False
show_scatter = False
draw_axis = False
show_svm = True
use_offset = False
show_vergara = False
save_figure = False
# Final accuracies
all_points = []
Ts = list(range(2, 10))
batches = [t+1 for t in Ts]
# For each method, for each batch, for each n, there is an accuracy
df = pd.DataFrame(columns=["method", "batch", "n", "accuracy"])
for method_i, method_folders in enumerate(data_folders):
for T, batch in zip(Ts, batches):
for n, n_folder in enumerate(method_folders):
try:
acc = torch.load(os.path.join(n_folder, f"acc_{T}.pt"))
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
df = df.append({
"method": method_i,
"batch": batch,
"n": n,
"accuracy": acc
}, ignore_index=True)
if show_svm:
method_i += 1
print("method_i", method_i)
if "SVM Ensemble" not in dataset_names:
dataset_names.append("SVM Ensemble")
dirpath = "svm_ensemble_results/setting2"
for trial in range(n_trial):
try:
with open(os.path.join(dirpath, f"accuracies_trial{trial}.pkl"), "rb") as f:
accs = pickle.load(f)
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
accs = accs[-len(batches):]
for i, batch in enumerate(batches):
acc = accs[i]
df = df.append({
"method": method_i,
"batch": batch,
"n": trial,
"accuracy": acc
}, ignore_index=True)
# For each method, for each batch, calculate the mean and 95% confidence interval
df2 = pd.DataFrame(columns=["method", "batch", "mu", "err"])
for method_index, method_name in enumerate(dataset_names):
for T, batch in zip(Ts, batches):
data = df[(df.method==method_index) & (df.batch==batch)].accuracy
try:
err = 1.96 * data.std() / math.sqrt(data.count())
except ZeroDivisionError:
print(method_name, data, "T=", T, data, method_index)
continue
df2 = df2.append({
"method": method_index,
"batch": batch,
"mu": data.mean().item(),
"err": err.item()
}, ignore_index=True)
if use_offset:
OFFSET = 0.15
else:
OFFSET = 0.0
colors = ("#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00")
markers = ("$\u25EF$", "$\u25EF$", "v", "v", "*", "*")
fig, axes = plt.subplots(1, 2, figsize=(7.05, 3), sharey=True)
highlights = [[0, 1], [2, 3]]
for ax, highlight in zip(axes, highlights):
for i in sorted(df2.method.unique()):
i = int(i)
off = OFFSET if draw_errors or draw_plot else 0.0
m = df[df.method == i]
# Scatter plot the accuracies
if show_scatter:
ax.scatter(
x=m.batch-off,
y=m.accuracy,
marker=markers[i],
s=100,
label=dataset_names[i],
c=colors[i])
# Error bars
if i in highlight:
alpha = 1.0
else:
alpha = 0.2
if draw_errors:
ax.errorbar(
x=df2[df2.method == i].batch,
y=df2[df2.method == i].mu,
yerr=df2[df2.method == i].err,
c=colors[i],
label=dataset_names[i],
capsize=10,
linewidth=1,
elinewidth=1,
capthick=1,
alpha=alpha)
# Line
if draw_plot:
ax.plot(
df2[df2.method == i].batch,
df2[df2.method == i].mu,
c=colors[i],
markersize=10)
if show_vergara:
vergara_eyeballed = [1.0, 0.74, 0.88, 0.93, 0.95, 0.70, 0.70, 0.92, 0.75, 0.65]
ax.plot(batches, vergara_eyeballed[2:],
linestyle='--', c="g", marker="$\u25EF$", label="Weighted SVM ensemble")
ax.set_ylim([0.0, 1.1])
if draw_axis:
plt.xlabel("Batch")
plt.ylabel("Test Accuracy")
# Legend
import matplotlib.patches as patches
legend_rectangles = []
legend_lables = []
for method_i in sorted(df2.method.unique()):
method_i = int(method_i)
legend_lables.append(dataset_names[method_i])
rect = patches.Rectangle((0, 0), 1, 1, facecolor=colors[method_i])
legend_rectangles.append(rect)
axes[0].legend(legend_rectangles[:2], legend_lables[:2], loc=(0.02, 0.02))
axes[1].legend(legend_rectangles[2:], legend_lables[2:], loc=(0.02, 0.02))
# Axes and ticks
axes[0].set_xticks(np.arange(3, 11))
axes[1].set_xticks(np.arange(3, 11))
ax.set_ylim([0.4, 1.0])
ax.set_yticks(np.arange(0.4, 1.05, 0.1))
axes[0].set_ylabel("Accuracy")
axes[0].grid(axis='y', color='#bbbbbb')
axes[1].grid(axis='y', color='#bbbbbb')
axes[0].set_xlabel("Batch")
axes[1].set_xlabel("Batch")
plt.show()
figure_path = "writeup/figure_sources/fig_3.svg"
fig.savefig(figure_path)
pd.__version__
###Output
_____no_output_____
###Markdown
Statistical testsThese require the "df" and "df2" variables to be present from the previous cells.
###Code
import pingouin as pg
# First test: For each batch, run an ANOVA for methods considered
# This will be used to selectively bold results in the table
if False:
print("#" * 80)
print("### ANOVA, pairwise t-tests for each batch")
print("#" * 80)
for batch in range(3, 11):
df_batch = df[df.batch == batch]
print()
print(f"### batch {batch}:")
aov = pg.welch_anova(
dv='accuracy',
between='method',
data=df_batch
)
print("ANOVA:")
print(aov)
pgs = pg.pairwise_ttests(
data=df_batch,
dv='accuracy',
between='method',
correction=True
)
print("Pairwise TTests:")
print(pgs)
print()
# Second test: Run an ANOVA for the grand means
print("#" * 80)
print("### Second test")
print("#" * 80)
# Third test: Run an ANOVA for the grand means, blocked by batch
print("#" * 80)
print("### Significance test for context, no context")
print("#" * 80)
context_index = dataset_names.index("Feedforward+Context NN")
nocontext_index = dataset_names.index("Feedforward NN")
df_context = df[(df.method == context_index) | (df.method == nocontext_index)]
aov = pg.anova(
dv='accuracy',
between=['method', 'batch'],
data=df_context
)
print("ANOVA:")
print(aov)
if False:
print("#" * 80)
print("### Significance test for context, no context, by batch")
print("#" * 80)
for batch in range(3, 11):
print()
print(f"### batch {batch}:")
df_batch = df_context[df.batch == batch]
pgs = pg.pairwise_ttests(
data=df_batch,
dv='accuracy',
between='method',
correction=True
)
print("Pairwise TTests:")
print(pgs)
print()
pgs = pg.pairwise_ttests(
data=df,
dv='accuracy',
between=['method'],
correction=True
)
print("Pairwise TTests:")
print(pgs)
# Did LSTM outperform RNN?
print("#" * 80)
print("### Significance test for context, LSTM")
print("#" * 80)
context_index = dataset_names.index("Feedforward+Context NN")
lstm_index = dataset_names.index("LSTM")
df_context = df[(df.method == context_index) | (df.method == lstm_index)]
aov = pg.anova(
dv='accuracy',
between=['method', 'batch'],
data=df_context
)
print("ANOVA:")
print(aov)
# Did the large RNN outperform the other one?
print("#" * 80)
print("### Significance test for context, big context")
print("#" * 80)
context_index = dataset_names.index("Feedforward+Context NN")
large_context_index = dataset_names.index("Feedforward+Context NN Big")
df_context = df[(df.method == context_index) | (df.method == large_context_index)]
aov = pg.anova(
dv='accuracy',
between=['method', 'batch'],
data=df_context
)
print("ANOVA:")
print(aov)
print("#" * 80)
print("### Significance test for SVM ensemble, NN ensemble")
print("#" * 80)
id_1 = dataset_names.index("Feedforward NN Ensemble")
id_2 = dataset_names.index("SVM Ensemble")
df_context = df[(df.method == id_1) | (df.method == id_2)]
aov = pg.anova(
dv='accuracy',
between=['method', 'batch'],
data=df_context
)
print("ANOVA:")
print(aov)
# Third test: Run an ANOVA for the grand means, blocked by batch
print("#" * 80)
print("### Significance test for context, no context")
print("#" * 80)
###Output
################################################################################
### Second test
################################################################################
################################################################################
### Significance test for context, no context
################################################################################
ANOVA:
Source SS DF MS F p-unc np2
0 method 0.021 1 0.021 5.294861 2.183088e-02 0.011283
1 batch 2.334 7 0.333 84.069429 1.781386e-78 0.559139
2 method * batch 0.042 7 0.006 1.529066 1.551741e-01 0.022548
3 Residual 1.840 464 0.004 NaN NaN NaN
Pairwise TTests:
Contrast A B Paired Parametric T dof Tail p-unc \
0 method 0.0 1.0 False True -2.922 464.78 two-sided 0.003644
1 method 0.0 2.0 False True -4.443 442.42 two-sided 0.000011
2 method 0.0 3.0 False True -3.276 438.39 two-sided 0.001138
3 method 0.0 4.0 False True -4.644 454.88 two-sided 0.000004
4 method 0.0 5.0 False True -3.613 461.17 two-sided 0.000336
5 method 1.0 2.0 False True -1.529 471.13 two-sided 0.126912
6 method 1.0 3.0 False True -0.214 468.94 two-sided 0.830592
7 method 1.0 4.0 False True -1.820 476.34 two-sided 0.069352
8 method 1.0 5.0 False True -0.728 477.74 two-sided 0.466948
9 method 2.0 3.0 False True 1.417 477.84 two-sided 0.157073
10 method 2.0 4.0 False True -0.355 476.16 two-sided 0.723015
11 method 2.0 5.0 False True 0.777 473.47 two-sided 0.437516
12 method 3.0 4.0 False True -1.729 474.92 two-sided 0.084386
13 method 3.0 5.0 False True -0.561 471.63 two-sided 0.574822
14 method 4.0 5.0 False True 1.093 477.38 two-sided 0.275076
BF10 hedges
0 6.218 -0.266
1 1252.874 -0.405
2 17.7 -0.299
3 2947.857 -0.423
4 53.384 -0.329
5 0.315 -0.139
6 0.104 -0.020
7 0.504 -0.166
8 0.131 -0.066
9 0.268 0.129
10 0.108 -0.032
11 0.136 0.071
12 0.432 -0.158
13 0.118 -0.051
14 0.181 0.100
################################################################################
### Significance test for context, LSTM
################################################################################
ANOVA:
Source SS DF MS F p-unc np2
0 method 0.015 1 0.015 3.617390 5.779753e-02 0.007736
1 batch 1.666 7 0.238 57.395916 5.330224e-59 0.464062
2 method * batch 0.051 7 0.007 1.747027 9.618675e-02 0.025679
3 Residual 1.924 464 0.004 NaN NaN NaN
################################################################################
### Significance test for context, big context
################################################################################
ANOVA:
Source SS DF MS F p-unc np2
0 method 0.005 1 0.005 1.257770 2.626536e-01 0.002703
1 batch 2.233 7 0.319 80.245739 6.665702e-76 0.547635
2 method * batch 0.033 7 0.005 1.179373 3.130049e-01 0.017481
3 Residual 1.845 464 0.004 NaN NaN NaN
################################################################################
### Significance test for SVM ensemble, NN ensemble
################################################################################
###Markdown
Print Table 1
###Code
# Final accuracies
import pandas as pd
import matplotlib.pyplot as plt
import torch
import os
import math
from tabulate import tabulate
all_points = []
Ts = list(range(2, 10))
batches = [t+1 for t in Ts]
# For each method, for each batch, for each n, there is an accuracy
df = pd.DataFrame(columns=["method", "batch", "n", "accuracy"])
for method_i, method_folders in enumerate(data_folders):
for T, batch in zip(Ts, batches):
for n, n_folder in enumerate(method_folders):
try:
acc = torch.load(os.path.join(n_folder, f"acc_{T}.pt"))
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
df = df.append({
"method": method_i,
"batch": batch,
"n": n,
"accuracy": acc
}, ignore_index=True)
if show_svm:
method_i += 1
if "SVM Ensemble" not in dataset_names:
dataset_names.append("SVM Ensemble")
dirpath = "svm_ensemble_results/setting2"
for trial in range(n_trial):
try:
with open(os.path.join(dirpath, f"accuracies_trial{trial}.pkl"), "rb") as f:
accs = pickle.load(f)
except FileNotFoundError:
print("not found", dataset_names[method_i], "T=", T)
accs = accs[-len(batches):]
for i, batch in enumerate(batches):
acc = accs[i]
df = df.append({
"method": method_i,
"batch": batch,
"n": trial,
"accuracy": acc
}, ignore_index=True)
# For each method, for each batch, calculate the mean and 95% confidence interval
df2 = pd.DataFrame(columns=["method", "batch", "mu", "err"])
for method_index, method_name in enumerate(dataset_names):
for T, batch in zip(Ts, batches):
data = df[(df.method==method_index) & (df.batch==batch)].accuracy
try:
err = 1.96 * data.std() / math.sqrt(data.count())
except ZeroDivisionError:
print(method_name, data, "T=", T)
continue
df2 = df2.append({
"method": method_index,
"batch": batch,
"mu": data.mean().item(),
"err": err.item()
}, ignore_index=True)
# Compute a grand total mean and 95% CI
for method_index, method_name in enumerate(dataset_names):
data = df[(df.method==method_index) & (df.batch>=3)].accuracy
try:
err = 1.96 * data.std() / math.sqrt(data.count())
except ZeroDivisionError:
print(method_name, data)
continue
df2 = df2.append({
"method": method_index,
"batch": "$\mu",
"mu": data.mean().item(),
"err": err.item()
}, ignore_index=True)
draw_errors = False
headers = list(range(3, 11)) + ["$\mu$"]
table = []
for i in sorted(df2.method.unique()):
i = int(i)
off = OFFSET if draw_errors else 0.0
means = df2[df2.method == i].mu.tolist()
errs = df2[df2.method == i].err.tolist()
# means.append(np.mean(means))
if draw_errors:
row = [f"{means[j]:.3f} $\pm$ {errs[j]:.3f}" for j in range(len(means))]
else:
row = [f"{means[j]:.3f}" for j in range(len(means))]
row.insert(0, dataset_names[i])
table.append(row)
headers.insert(0, "Batch")
# bold the largest value in each column
for column_id, _ in enumerate(table[0]):
if column_id == 0:
continue
max_row_id = -1
max_row_val = -np.inf
for row_id, _ in enumerate(table):
val = float(table[row_id][column_id])
if val > max_row_val:
max_row_id = row_id
max_row_val = val
table[max_row_id][column_id] = f"\\textbf{{{max_row_val:.3f}}}"
print(tabulate(table, headers, tablefmt="latex_raw", floatfmt=".3f"))
# How much improvement did the Context model have over NoContext?
print()
print("Average accuracy over all batches:")
indices = [0, 1, 2, 3, 4]
for i in indices:
means = df2[df2.method == i].mu.tolist()
avg = np.mean(means)
print(f"{dataset_names[i]}: {avg}")
###Output
\begin{tabular}{llllllllll}
\hline
Batch & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & $\mu$ \\
\hline
Feedforward NN Ensemble & 0.921 & \textbf{0.904} & 0.979 & 0.903 & 0.777 & 0.679 & 0.864 & 0.693 & 0.840 \\
Feedforward NN & 0.881 & 0.875 & 0.974 & 0.959 & 0.792 & 0.839 & 0.896 & 0.737 & 0.869 \\
Feedforward+Context NN & 0.882 & 0.869 & 0.975 & 0.947 & \textbf{0.820} & 0.864 & \textbf{0.939} & 0.763 & 0.882 \\
LSTM & 0.891 & 0.877 & 0.923 & 0.913 & 0.809 & 0.849 & 0.932 & \textbf{0.773} & 0.871 \\
Context relu & \textbf{0.924} & 0.878 & 0.949 & 0.955 & 0.786 & \textbf{0.880} & 0.939 & 0.769 & \textbf{0.885} \\
No Context & 0.864 & 0.885 & \textbf{0.981} & \textbf{0.960} & 0.790 & 0.851 & 0.913 & 0.761 & 0.876 \\
\hline
\end{tabular}
Average accuracy over all batches:
Feedforward NN Ensemble: 0.8399901791138339
Feedforward NN: 0.8690827237475173
Feedforward+Context NN: 0.8821937662788277
LSTM: 0.8709034082551901
Context relu: 0.8851380506704699
###Markdown
Print Table 2
###Code
from main_backprop_context import ContextModel
from main_backprop_nocontext import NoContextModel
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Number of parameters:")
context_model = ContextModel(k=1)
print(f"Context Model: {count_parameters(context_model)}")
no_context_model = NoContextModel()
print(f"No Context Model: {count_parameters(no_context_model)}")
# Find the size of a No Context model with equal # parameters to the Context Modle
n_context_parameters = count_parameters(ContextModel(k=1))
n_nocontext_parameters = 0
skill_size = 20
while n_nocontext_parameters < n_context_parameters:
skill_size += 1
n_nocontext_parameters = count_parameters(NoContextModel(skill_size=skill_size))
print(f"NoContext skill_size={skill_size}: {n_nocontext_parameters} parameters")
skill_size -= 1
n_nocontext_parameters = count_parameters(NoContextModel(skill_size=skill_size))
print(f"NoContext skill_size={skill_size}: {n_nocontext_parameters} parameters")
###Output
NoContext skill_size=96: 14429 parameters
NoContext skill_size=95: 14280 parameters
###Markdown
Single-Network Models
###Code
# Evaluate the accuracy of the single-network models on all batches
from main_backprop_ensemble import NoContextModel, test_network
from batches import split_all
import pickle
import numpy as np
n_trials = 30
def evaluate_accuracies(T: int, trial: int):
"""Load the network trained using batch T, and evaluate it on every batch 0..9
:return: Array with 10 entries"""
data_folder = f"output/ensemble_harddecay{trial}"
net = NoContextModel()
net.load_state_dict(torch.load(os.path.join(data_folder, f"model_{T}.pt")))
accuracies = []
for t in range(10):
acc = test_network(net, t, split_all)
accuracies.append(acc)
accuracies = np.array(accuracies)
return accuracies
def evaluate_accuracies_all_trials(T: int):
""":return: Accuracies shape (trials, batches)"""
trials = []
for trial in range(n_trials):
accuracies = evaluate_accuracies(T, trial)
trials.append(accuracies)
return np.stack(trials)
all_accuracies = []
for batch in range(10):
accuracies = evaluate_accuracies_all_trials(batch) # batches 0-indexed
all_accuracies.append(accuracies)
with open("output/single_network_accuracies.pkl", "wb") as f:
pickle.dump(all_accuracies, f)
with open("output/single_network_accuracies.pkl", "rb") as f:
all_accuracies = pickle.load(f)
colors = ['#d7191c', '#d98330', '#7bad74', '#2b83ba']
# Left plot
selected_batches = [1, 3, 5, 7]
fig, axes = plt.subplots(1, 2, figsize=(7.05, 3), sharey=True)
ax = axes[0]
for i, train_batch in enumerate(selected_batches):
accuracies = all_accuracies[train_batch]
errs = []
means = []
for batch in range(10):
data = accuracies[:, batch]
err = 1.96 * data.std() / math.sqrt(data.shape[0])
mean = data.mean()
errs.append(err)
means.append(mean)
ax.axvline(train_batch+1, c=colors[i], linestyle="--", linewidth=1)
ax.errorbar(
x=list(range(1, 11)),
y=means,
yerr=errs,
c=colors[i],
# label=dataset_names[i],
capsize=10,
linewidth=1,
elinewidth=1,
capthick=1,
)
ax.set_ylabel("Accuracy")
ax.set_xlabel("Batch")
ax.set_ylim([0.0, 1.0])
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_xticks(np.arange(1, 11))
ax.grid(axis='y', color='#bbbbbb')
def flatten_and_index(arr):
flat = []
ind = []
for i, sub in enumerate(arr):
flat += sub
ind += [i] * len(sub)
return ind, flat
ax = axes[1]
accuracies_by_difference = [[] for _ in range(10)]
for train_batch in range(10):
accuracies = all_accuracies[train_batch]
for test_batch in range(10):
data = accuracies[:, test_batch]
difference = abs(train_batch-test_batch)
accuracies_by_difference[difference] += data.tolist()
errs = []
means = []
for difference, accuracies in enumerate(accuracies_by_difference):
err = 1.96 * np.std(accuracies) / math.sqrt(len(accuracies))
mean = np.mean(accuracies)
errs.append(err)
means.append(mean)
ax.errorbar(
x=list(range(10)),
y=means,
yerr=errs,
c='black',
# label=dataset_names[i],
capsize=10,
linewidth=1,
elinewidth=1,
capthick=1,
)
# x, y = flatten_and_index(accuracies_by_difference)
# ax.scatter(x, y, alpha=0.2)
ax.set_xticks(np.arange(10))
ax.grid(axis='y', color='#bbbbbb')
ax.set_xlabel("Difference of Batches")
fig.savefig("writeup/figure_sources/feedforward_centers.svg")
###Output
_____no_output_____
###Markdown
analyse launches, which contains "corona" or "covid" in `spec`
###Code
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from urllib.parse import unquote
%matplotlib inline
df = pd.read_csv("covid_binder_launches_2019_12_01_2020_09_10.csv")
# convert timestamp to datetime
df["date"] = pd.to_datetime(df["timestamp"])
# select only the columns that are needed for analysis
df = df[["date", "provider", "spec"]]
# set date as index
df.set_index('date',inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Launch analysis Number of launches
###Code
len(df)
###Output
_____no_output_____
###Markdown
Number of launches per day
###Code
# .size() returns Series, so convert it into dataframe
df_launch = df.groupby([df.index.date]).size().to_frame(name="launches")
ax = df_launch.plot(y="launches", kind="bar", use_index=True, figsize=(20, 5))
# show x labels only for beginning of weeks, otherwise it not readable
x = [i.strftime('%b %d') if i.isoweekday() == 1 else "" for i in df_launch.index]
# set_xticklabels return the list, pass it to a variable in order not to output them
_ = ax.set_xticklabels(x, rotation=0)
###Output
_____no_output_____
###Markdown
Repo analysis
###Code
df.provider.unique()
def unique_repo_info(provider, spec):
"""
Strips out the ref info and returns the unique repo info from provider and spec.
"""
prefix = {
'GitHub': 'gh',
'Gist': 'gist',
'GitLab': 'gl',
'Git': 'git',
'Zenodo': 'zenodo',
'Figshare': 'figshare',
'Hydroshare': 'hydroshare',
'Dataverse': 'dataverse',
}
if provider == 'GitHub':
org, repo_name, _ = spec.split('/', 2)
namespace = f"{org}/{repo_name}"
elif provider == 'GitLab':
quoted_namespace, _ = spec.split('/', 1)
namespace = unquote(quoted_namespace)
elif provider == 'Git':
quoted_repo_url, _ = spec.rsplit('/', 1)
namespace = unquote(quoted_repo_url)
else:
raise Exception(f"parsing {provider} is not implemented")
if namespace.endswith(".git"):
namespace = namespace[:-(len(".git"))]
repo = f'{prefix[provider]}/{namespace}'
return repo
df["repo"] = df.apply(lambda row: unique_repo_info(row["provider"], row["spec"]), axis=1)
df_repo = df[["repo"]]
df_repo.head()
###Output
_____no_output_____
###Markdown
Number of unique repos
###Code
len(df_repo.repo.unique())
###Output
_____no_output_____
###Markdown
Popular repos
###Code
df_repo.groupby(["repo"]).size().reset_index(name="launches").sort_values("launches", ascending=False).head(5)
###Output
_____no_output_____
###Markdown
Number of launched repos per day
###Code
# nunique: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.core.groupby.SeriesGroupBy.nunique.html
df_repo_unique = df_repo.groupby([df_repo.index.date]).nunique()
df_repo_unique.columns = ["repos"]
ax = df_repo_unique.plot(y="repos", kind="bar", use_index=True, figsize=(20, 5))
# show x labels only for beginning of weeks, otherwise it not readable
x = [i.strftime('%b %d') if i.isoweekday() == 1 else "" for i in df_repo_unique.index]
# set_xticklabels return the list, pass it to a variable in order not to output them
_ = ax.set_xticklabels(x, rotation=90)
###Output
_____no_output_____
###Markdown
plot graphs in database
###Code
for g in gs.graphs.values():
g.plot()
from gspan import gSpan
min_support = 30
alt = False
gs = gSpan(
database_file_name='/data/experiments/DocRED/DataViewer/results/docred_train.data',
min_support=min_support,
min_num_vertices=2,
# max_num_vertices=3,
min_num_edges=1,
# max_ngraphs=5,
is_undirected=False,
verbose=False,
visualize=False,
where=True,
alternative_support=alt
)
"""Run the gSpan algorithm."""
# gathered = gs.run(gather=True)
# print(len(gathered))
# print(list(pdfs.edge for pdfs in gathered[0]))
gs.run()
df1 = gs._report_df
df1.to_pickle(f'all_patterns_support_{min_support}{"_alt" if alt else ""}.pkl')
gathered = gs.run(gather=True)
print(list(sorted(set(pdfs.gid for pdfs in gathered[0]))))
from perspective import PerspectiveWidget, Table
import pandas as pd
df1 = pd.read_pickle(f'all_patterns_support_{min_support}{"_alt" if alt else ""}.pkl')
PerspectiveWidget(df1)
print(df1['description'][58])
import itertools
with open('/data/experiments/DocRED/DataViewer/results/docred_train.data', 'r', encoding='utf8') as graph:
transactions = []
for line in graph:
line = line.strip()
t, *rest = line.split()
if t == 't':
c = int(rest[-1])
# if c >= 5:
# break
transactions.append({'v':[], 'e':[]})
elif t == 'v':
transactions[c][t].append(rest[-1])
else: # t == 'e':
transactions[c][t].append([int(rest[0]), int(rest[1]), rest[2]])
# for t in transactions:
# print()
# print(t)
# for i, e in enumerate(t['e']):
# print(f'#{i}: {t["v"][e[0]]}_{e[0]} {t["v"][e[1]]}_{e[1]} {e[2]}')
# print()
def get_edge_patterns(pattern):
pattern_dict = {'v':[], 'e':[]}
for line in pattern.split('\n'):
line = line.strip()
t, *rest = line.split()
if t == 't':
continue
elif t == 'v':
pattern_dict[t].append(rest[-1])
else: # t == 'e':
pattern_dict[t].append([int(rest[0]), int(rest[1]), rest[2]])
return [(pattern_dict['v'][start], pattern_dict['v'][end], label) for start, end, label in pattern_dict['e']], pattern_dict
def get_candidates(graph, edge_pattern):
vs = graph['v']
es = graph['e']
candidate_edges = []
for i, (start, end, label) in enumerate(es):
if (vs[start], vs[end], label) == edge_pattern:
candidate_edges.append(i)
return candidate_edges
def trace(graph2, pattern2, steps, ignore_permutations=False):
# vs2 = pattern2['v']
es2 = pattern2
candidate_paths = itertools.product(*steps)
candidate_paths = [p for p in candidate_paths if len(p) == len(set(p))]
gvs = graph2['v']
ges = graph2['e']
out = []
# print("Candidates:", candidate_paths)
varsets = list()
for cp2 in candidate_paths:
# Check if it's valid. We trust the labels and the types.
# This is basically a type of variable resolution, heh.
variables = dict()
okay = True
# print(cp2)
# print(es2)
if len(cp2) == len(es2):
for step, e in zip(cp2, es2):
# print(step, e)
v0, v1, _ = e
start, end, _ = ges[step]
if v0 in variables:
# print(f"{variables[v0]}!={start} ? (start): {variables}")
if variables[v0] != start:
okay = False
break
if v1 in variables:
# print(f"{variables[v1]}!={end} ? (end): {variables}")
if variables[v1] != end:
okay = False
break
variables[v0] = start
variables[v1] = end
if not okay:
continue
varset = set(variables.values())
# If we didn't assign the same value to different variables
if len(varset) == len(variables):
# If we haven't used exactly this set of values before.
# That is, if we change the order of values and it's still the same pattern,
# then it's just one instance of that pattern. Permutation invariance.
if varset in varsets and ignore_permutations:
continue
varsets.append(varset)
out.append(cp2)
return out
def debug_one(df, index, verbose=True):
tot = 0
pattern = df['description'][index]
support = df['support'][index]
edge_patterns, pattern_dict = get_edge_patterns(pattern)
if verbose:
print(pattern)
for tr in transactions:
possible_steps = [get_candidates(tr, ep) for ep in edge_patterns]
# possible_steps = [[10], [3], [5], [9]]
founds = trace(tr, pattern_dict['e'], possible_steps)
if verbose:
print(founds)
tot += len(founds)
if verbose:
print(index, tot, support)
def debug_all(df):
for i, (pattern, support) in enumerate(zip(df['description'], df['support'])):
tot = 0
edge_patterns, pattern_dict = get_edge_patterns(pattern)
for tr in transactions:
possible_steps = [get_candidates(tr, ep) for ep in edge_patterns]
founds = trace(tr, pattern_dict['e'], possible_steps)
# print(founds)
tot += len(founds)
if tot != support:
print(i, tot, support)
# debug_one(df1, 19)
# debug_all(df1)
# from multiprocessing.dummy import Pool, Lock
# def make_corrections(df_in):
# counts = [0 for _ in range(len(df_in['support']))]
# lock = Lock()
# def count_(inp):
# # print(inp)
# i, pattern = inp
# tot = 0
# edge_patterns, pattern_dict = get_edge_patterns(pattern)
# for tr in transactions:
# possible_steps = [get_candidates(tr, ep) for ep in edge_patterns]
# founds = trace(tr, pattern_dict['e'], possible_steps)
# # print(founds)
# tot += len(founds)
# lock.acquire()
# print(i, tot)
# counts[i] = tot
# lock.release()
# pool = Pool(23)
# # print(list(zip(*enumerate(df_in['description']))))
# pool.map(count_, enumerate(df_in['description']))
# df_in["counts"] = counts
# return df_in
# df2 = pd.read_pickle(f'all_patterns_support_{min_support}_wcounts.pkl')
import multiprocessing as mp
import numpy as np
def count_(inp):
# print(inp)
counts = np.zeros(len(df1['support']), dtype=int)
j, tr = inp
tot = 0
for i, pattern in enumerate(df1['description']):
edge_patterns, pattern_dict = get_edge_patterns(pattern)
possible_steps = [get_candidates(tr, ep) for ep in edge_patterns]
founds = trace(tr, pattern_dict['e'], possible_steps, ignore_permutations=True)
counts[i] += len(founds)
print(f'{j},', end='')
return counts
def make_corrections_2(df_in):
lock = Lock()
p = mp.Pool(mp.cpu_count())
res = p.map_async(count_, enumerate(transactions))
p.close()
p.join()
print()
# print(np.sum(res.get(), axis=0))
df_in["counts"] = np.sum(res.get(), axis=0)
return df_in
# df2 = pd.read_pickle(f'all_patterns_support_{min_support}_wcounts_2.pkl')
df2 = make_corrections_2(df1)
df2.to_pickle(f'all_patterns_support_{min_support}_wcounts_noperms_gids.pkl')
PerspectiveWidget(df2)
# Now we want to calculate the coverage the patterns have of the dev/eval set.
# Then after we want to do the same for predictions (which will need to be formatted...)
# Document coverage:
# Basically do the counting but mark triples which are included.
def trace_coverage(graph2, pattern2, steps, coverage, ignore_permutations=True):
# vs2 = pattern2['v']
es2 = pattern2
candidate_paths = itertools.product(*steps)
candidate_paths = [p for p in candidate_paths if len(p) == len(set(p))]
gvs = graph2['v']
ges = graph2['e']
if coverage is None:
coverage = np.zeros(len(ges), dtype=int)
# print("Candidates:", candidate_paths)
varsets = list()
for cp2 in candidate_paths:
# Check if it's valid. We trust the labels and the types.
# This is basically a type of variable resolution, heh.
variables = dict()
okay = True
# print(cp2)
# print(es2)
if len(cp2) == len(es2):
for step, e in zip(cp2, es2):
# print(step, e)
v0, v1, _ = e
start, end, _ = ges[step]
if v0 in variables:
# print(f"{variables[v0]}!={start} ? (start): {variables}")
if variables[v0] != start:
okay = False
break
if v1 in variables:
# print(f"{variables[v1]}!={end} ? (end): {variables}")
if variables[v1] != end:
okay = False
break
variables[v0] = start
variables[v1] = end
if not okay:
continue
varset = set(variables.values())
# If we didn't assign the same value to different variables
if len(varset) == len(variables):
# If we haven't used exactly this set of values before.
# That is, if we change the order of values and it's still the same pattern,
# then it's just one instance of that pattern. Permutation invariance.
if varset in varsets and ignore_permutations:
continue
varsets.append(varset)
coverage[list(cp2)] = 1
return coverage
def map_coverage(inp):
# print(inp)
# counts = np.zeros(len(df2['support']), dtype=int)
j, tr = inp
tot = 0
coverage = None
for i, pattern in enumerate(df2['description']):
edge_patterns, pattern_dict = get_edge_patterns(pattern)
possible_steps = [get_candidates(tr, ep) for ep in edge_patterns]
coverage = trace_coverage(tr, pattern_dict['e'], possible_steps, coverage)
if sum(coverage) == len(coverage):
break
# counts[i] += len(founds)
# print(f'{j},')
return np.sum(coverage), len(coverage)
# return counts
def find_coverage(trs):
lock = Lock()
p = mp.Pool(mp.cpu_count())
res = p.map_async(map_coverage, enumerate(trs))
p.close()
p.join()
covered, total = zip(*res.get())
print(f'{sum(covered)}/{sum(total)}: {sum(covered)/sum(total)}%')
# print(sum(covered), sum(total))
# print(np.sum(res.get(), axis=0))
# df_in["counts"] = np.sum(res.get(), axis=0)
# return df_in
find_coverage(transactions)
# map_coverage((0, transactions[0]))
with open('/data/experiments/DocRED/DataViewer/results/docred_dev.data', 'r', encoding='utf8') as graph:
transactions_dev = []
for line in graph:
line = line.strip()
t, *rest = line.split()
if t == 't':
c = int(rest[-1])
# if c >= 5:
# break
transactions_dev.append({'v':[], 'e':[]})
elif t == 'v':
transactions_dev[c][t].append(rest[-1])
else: # t == 'e':
transactions_dev[c][t].append([int(rest[0]), int(rest[1]), rest[2]])
find_coverage(transactions_dev)
# When I come back from vacation.
# I need to make sure to redo a lot of this.
###Output
_____no_output_____
###Markdown
Carregando os dados coletados da primeira página e limpando os resultados. O campo local foi separado em dois: cidade e UF. Já no campo receita os símbolos foram removidos e o formato foi transformado para numérico (antes estava como texto).
###Code
def load_main_data(file_path):
'''
Função que carrega os dados coletadas no primeiro nível do crawler
carregando os dados com os nomes das empresas, link para o segundo nível,
localização da empresa e redimento em milhões no ano.
Os dados são carregados de um arquivo pickle, contendo uma lista de dicionários
com os dados, onde as chaves são os nomes das variáveis e os valores os conteúdos.
Os dados são carregados em um dataframe com a biblioteca pandas e em seguida os dados são limpos:
- a receita é transformada em número (float) e o símbolo '$' e a letra 'M' são removidos de
todos os registros.
- a cidade é extraída do campo local (primeira parte antes da vírgula)
- o UF é extraído do local sendo a parte intermediária do local separados por vírgulas
- o nome das empresas são reformatadas para o formato título (primeiras letras de cada palavra
como tamanho maiúsculo)
'''
with open(file_path, "rb") as file:
regs_data = pickle.load(file)
df = pd.DataFrame(regs_data)
df['receita_milhao'] = df['receita'].str.strip('$|M').astype('float')
df['cidade'] = df['local'].str.split(',').apply(lambda x: x[0]).str.strip()
df['uf'] = df['local'].str.split(',').apply(lambda x: x[1]).str.strip()
df['empresa_nome'] = df['empresa_nome'].str.title()
return df[['empresa_nome', 'receita_milhao', 'cidade', 'uf', 'empresa_href']]
file_path = r'data\test_scrap.pickle'
main_df = load_main_data(file_path)
###Output
_____no_output_____
###Markdown
Testando se todos os links (empresa_href) começam com um link válido. Um endereço válido começaria com: 'https://www.dnb.com/business-directory/company-profiles'
###Code
main_df['empresa_href'].str.startswith('https://www.dnb.com/business-directory/company-profiles').all()
def load_desc_data(file_path):
with open(file_path, "rb") as file:
regs_data = pickle.load(file)
df = pd.DataFrame(regs_data)
df['descricao'] = (df['descricao'].
str.strip('<span class="company_summary">').
str.split('\n<br><br>\n', expand=True)[0].
str.replace('&', '&').str.strip())
df['industria'] = df['industria'].str.join('; ')
df.rename(columns={'url': 'empresa_href'}, inplace=True)
return df[['empresa_href', 'descricao', 'industria']]
file_path = r'data\inner_scrap.pickle'
desc_df = load_desc_data(file_path)
desc_df
df = pd.merge(main_df, desc_df)
df
def ecdf(data):
x = data.sort_values()
y = np.arange(1, len(data) + 1) / len(data)
return x, y
thereshold = 2000
data = df.loc[df['receita_milhao'] < thereshold, 'receita_milhao']
x, y = ecdf(data)
plt.plot(x, y, marker='.', linestyle='none')
plt.show()
plt.blo
df['receita_milhao'].sort_values()
df.to_csv('cafe_data.csv', sep=';', encoding='UTF-8')
df['industria'].str.split('; ').explode().reset_index().drop_duplicates()['industria'].value_counts()
uf_df = df.groupby('uf').agg({'receita_milhao': 'sum', 'empresa_nome': 'count'})
uf_df.rename(columns={'empresa_nome': 'n_empresas'}, inplace=True)
uf_df['receita_media'] = uf_df['receita_milhao'] / uf_df['n_empresas']
uf_df.sort_values(by='receita_milhao', ascending=False, inplace=True)
uf_df
###Output
_____no_output_____
###Markdown
Analysis of the Freewar statistics Imports and Setup
###Code
from datetime import datetime
from pathlib import Path
import pandas as pd
from matplotlib import dates as mdates
from matplotlib import pyplot as plt
from matplotlib import ticker
from tqdm import tqdm
csv_path = Path.cwd() / 'FreewarStatistics.csv'
###Output
_____no_output_____
###Markdown
Read the Data
###Code
csv_date_parser = lambda d: datetime.strptime(d, '%d.%m.%y %H:%M')
df = pd.read_csv(csv_path, parse_dates=['date'], date_parser=csv_date_parser)
(df.head(5))
###Output
_____no_output_____
###Markdown
Plot XP
###Code
fig, ax = plt.subplots(figsize=(16, 9))
ax.plot_date(df['date'], df['xp'], 'k-')
# grid
ax.grid()
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
# text
ax.set_title('All Data', fontsize=32, fontweight='bold')
ax.set_xlabel('Time', fontsize=24, fontweight='bold')
ax.set_ylabel('Experience [xp]', fontsize=24, fontweight='bold')
# Add second y-axis
ax2 = ax.twinx()
ax2.plot_date(df['date'], df['total'], 'r-')
ax2.set_ylabel('Total Assets [gm]', color='red', fontsize=24, fontweight='bold')
ax2.tick_params(axis='y', labelcolor='red')
# format x-axis
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=6))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %Y'))
ax.xaxis.set_tick_params(rotation=45)
ax.xaxis.set_tick_params(labelsize=16)
# format y-axis
y_formatter = ticker.EngFormatter('')
ax.yaxis.set_major_formatter(y_formatter)
ax2.yaxis.set_major_formatter(y_formatter)
ax.yaxis.set_tick_params(labelsize=16)
ax2.yaxis.set_tick_params(labelsize=16)
# white background for title and axes
fig.patch.set_facecolor('white')
fig.patch.set_alpha(0.7)
###Output
_____no_output_____
###Markdown
Save the Figure
###Code
fig.savefig('FreewarStatistics.pdf', bbox_inches='tight')
fig.savefig('FreewarStatistics.svg', bbox_inches='tight')
fig.savefig('FreewarStatistics.png', dpi=600, bbox_inches='tight')
fig.savefig('FreewarStatistics_small.png', dpi=40, bbox_inches='tight')
print('Figure saved! ' + datetime.now().strftime('%d.%m.%Y %H:%M'))
###Output
Figure saved! 12.01.2022 20:14
###Markdown
Plot years
###Code
years = range(2016, datetime.now().year + 1)
fig, axs = plt.subplots(len(years), 1, figsize=(16, len(years) * 7))
i = -1
for year in tqdm(reversed(years), desc='plot years'):
i += 1
filtered = df[df['date'].dt.strftime('%Y') == str(year)]
axs[i].plot_date(filtered['date'], filtered['xp'], 'k-')
# grid
axs[i].grid()
axs[i].minorticks_on()
axs[i].grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
# text
axs[i].set_title(f'Data for {year}', fontsize=32, fontweight='bold')
if year == years[-1]:
axs[i].xaxis.tick_top()
axs[i].xaxis.set_label_position('top')
axs[i].set_ylabel('Experience [xp]', fontsize=24, fontweight='bold')
# Add second y-axis
ax2 = axs[i].twinx()
ax2.plot_date(filtered['date'], filtered['total'], 'r-')
ax2.set_ylabel('Total Assets [gm]', color='red', fontsize=24, fontweight='bold')
ax2.tick_params(axis='y', labelcolor='red')
# format x-axis
axs[i].set_xlim([datetime(year, 1, 1), datetime(year, 12, 31)])
axs[i].xaxis.set_major_locator(mdates.MonthLocator(interval=1))
axs[i].xaxis.set_major_formatter(mdates.DateFormatter('%b'))
if year not in [years[0], years[-1]]:
axs[i].xaxis.set_ticklabels([])
axs[i].xaxis.set_tick_params(rotation=45)
axs[i].xaxis.set_tick_params(labelsize=16)
# format y-axis
y_formatter = ticker.EngFormatter('')
axs[i].yaxis.set_major_formatter(y_formatter)
ax2.yaxis.set_major_formatter(y_formatter)
axs[i].yaxis.set_tick_params(labelsize=16)
ax2.yaxis.set_tick_params(labelsize=16)
# white background for title and axes
fig.patch.set_facecolor('white')
fig.patch.set_alpha(0.7)
###Output
plot years: 7it [00:00, 43.74it/s]
###Markdown
Save Figure
###Code
fig.savefig('FreewarStatistics_years.pdf', bbox_inches='tight')
fig.savefig('FreewarStatistics_years.svg', bbox_inches='tight')
fig.savefig('FreewarStatistics_years.png', dpi=600, bbox_inches='tight')
fig.savefig('FreewarStatistics_years_small.png', dpi=40, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Profile of RMSD
###Code
for i in range(1,11):
log = log_dict['ts_%d'%i]
x = [0] + [1 + i * 5 for i in range(len(log)-1)]
plt.plot(x, log, linewidth = 1.6)
pacs_ave = np.mean([log_dict['ts_%d'%i] for i in range(1,11)], axis = 0)
plt.plot(x, pacs_ave, color = 'red', linewidth=3)
plt.figure(figsize=(7,2))
sns.boxplot(best_dict['ts'])
###Output
/anaconda/lib/python3.6/site-packages/seaborn/categorical.py:454: FutureWarning: remove_na is deprecated and is a private function. Do not use.
box_data = remove_na(group_data)
###Markdown
This is an investigatory analysis into the sales data of a restaurant... It has been done mainly SQL but on top of pyspark... So lets get started... First we import the pyspark and the findspark modules... and also initialize the findspark module...
###Code
import findspark
findspark.init('/Users/nishantuzir/spark-2.3.0-bin-hadoop2.7')
import pyspark
###Output
_____no_output_____
###Markdown
now we initialize the SparkContext objects...
###Code
sc = pyspark.SparkContext()
###Output
_____no_output_____
###Markdown
here we read both the json files...using the wholeTextFiles method...one thing to know here is that the wholeTextFiles method produces a tuple RDD whose 1st element is a filename and the 2nd element is the data with lines separated by whitespace. We use map to create the new RDD using the 2nd element of the tuple.
###Code
data1 = sc.wholeTextFiles('/data/orders.json').map(lambda x: x[1])
data2 = sc.wholeTextFiles('/data/order_items.json').map(lambda x: x[1])
###Output
_____no_output_____
###Markdown
as said earlier that the data is in the form of lines separated by whitespace, we need to remove these useless white spaces. That we will do using the re package...
###Code
import re
data1 = data1.map(lambda x: re.sub('\s+','',x))
data2 = data2.map(lambda x: re.sub('\s+','',x))
###Output
_____no_output_____
###Markdown
after that we import the SQLContext and initialize it...
###Code
from pyspark.sql import SQLContext
sqlcontext = SQLContext(sc)
###Output
_____no_output_____
###Markdown
now its time to conume the RDD using the SQLContext object named sqlcontext...after that, we create a temporary table using registerTempTable and pass the name of the tables inside it..
###Code
orders = sqlcontext.read.json(data1)
order_items = sqlcontext.read.json(data2)
orders.registerTempTable('orders')
order_items.registerTempTable('order_items')
###Output
_____no_output_____
###Markdown
So we are done with the preparation part...now lets do some analysis using good old SQL running on top of Spark!!
###Code
sqlcontext.sql('select * from orders' ).show(5)
sqlcontext.sql('select * from order_items').show(5)
###Output
+-----------+---+--------------------+--------+
|amount_paid| id| name|order_id|
+-----------+---+--------------------+--------+
| 205| 0| chicken-burger| 114|
| 225| 1|chicken-tikka-san...| 2825|
| 185| 2|almond-choco-dip-...| 4717|
| 105| 3| juice| 1035|
| 185| 4|grilled-cheese-sa...| 1023|
+-----------+---+--------------------+--------+
only showing top 5 rows
###Markdown
So, everything is working just fine!! We are good to go... Let's see how many orders are placed per day...
###Code
sqlcontext.sql('select ordered_at,count(1) as total_orders from orders group by 1 order by 1').show(10)
###Output
+----------+------------+
|ordered_at|total_orders|
+----------+------------+
|2015-08-09| 1|
|2015-08-11| 5|
|2015-08-12| 1|
|2015-08-13| 2|
|2015-08-14| 9|
|2015-08-15| 4|
|2015-08-16| 5|
|2015-08-17| 6|
|2015-08-18| 4|
|2015-08-19| 10|
+----------+------------+
only showing top 10 rows
###Markdown
Now let's see the total revenue collected from all the orders per day...to do that, we will have to use the join command...
###Code
sqlcontext.sql('select ordered_at, round(sum(amount_paid),2) as revenue_collected from orders join order_items on orders.id = order_items.order_id where name = "kale-smoothie" group by ordered_at order by ordered_at').show(20)
###Output
+----------+-----------------+
|ordered_at|revenue_collected|
+----------+-----------------+
|2015-08-23| 175|
|2015-08-26| 175|
|2015-08-27| 175|
|2015-09-01| 175|
|2015-09-03| 175|
|2015-09-05| 175|
|2015-09-10| 175|
|2015-09-11| 175|
|2015-09-12| 175|
|2015-09-13| 175|
|2015-09-20| 350|
|2015-09-23| 175|
|2015-09-26| 525|
|2015-09-28| 175|
|2015-09-29| 175|
|2015-09-30| 175|
|2015-10-04| 175|
|2015-10-06| 175|
|2015-10-09| 175|
|2015-10-10| 350|
+----------+-----------------+
only showing top 20 rows
###Markdown
well we cant say much from this....lets break this down...we will see the total revenue collected per food item for the entire duration of the time that has been depicted in the dataset...and we will arrannge it in descending order...
###Code
sqlcontext.sql('select name, round(sum(amount_paid), 2) as total_revenue from order_items group by name order by 2 desc').show()
###Output
+--------------------+-------------+
| name|total_revenue|
+--------------------+-------------+
|chicken-tikka-san...| 1130400|
|grilled-cheese-sa...| 770155|
| chicken-burger| 711350|
|almond-choco-dip-...| 525955|
| soda| 195525|
| juice| 104685|
| cake| 96660|
| banana-smoothie| 18900|
| kale-smoothie| 12600|
+--------------------+-------------+
###Markdown
it seems to be somewhat comprehensible now... lets now see the percentage of revenue each of the food items represent...this will give a better idea..
###Code
sqlcontext.sql('select name, round(sum(amount_paid) /(select sum(amount_paid) from order_items) * 100.0, 2) as pct from order_items group by 1 order by 2 desc').show()
###Output
+--------------------+-----+
| name| pct|
+--------------------+-----+
|chicken-tikka-san...| 31.7|
|grilled-cheese-sa...| 21.6|
| chicken-burger|19.95|
|almond-choco-dip-...|14.75|
| soda| 5.48|
| juice| 2.94|
| cake| 2.71|
| banana-smoothie| 0.53|
| kale-smoothie| 0.35|
+--------------------+-----+
###Markdown
whoa!! looks like smoothies are not bringing in much revenue!lets be absolutely sure about this... to do this we need to group the food items into food categries such as these sandwich, burger, juice etc...
###Code
sqlcontext.sql('select *,case name when "kale-smoothie" then "smoothie" when "banana-smoothie" then "smoothie" when "orange-juice" then "drink" when "soda" then "drink" when "almond-choco-dip-biscotti" then "desert" when "grilled-cheese-sandwich" then "sandwich" when "chicken-tikka-sandwich" then "sandwich" when "chicken-burger" then "burger" else "desert" end as category from order_items order by id limit 100').show()
###Output
+-----------+---+--------------------+--------+--------+
|amount_paid| id| name|order_id|category|
+-----------+---+--------------------+--------+--------+
| 205| 0| chicken-burger| 114| burger|
| 225| 1|chicken-tikka-san...| 2825|sandwich|
| 185| 2|almond-choco-dip-...| 4717| desert|
| 105| 3| juice| 1035| desert|
| 185| 4|grilled-cheese-sa...| 1023|sandwich|
| 205| 5| chicken-burger| 4359| burger|
| 225| 6|chicken-tikka-san...| 3929|sandwich|
| 205| 7| chicken-burger| 3704| burger|
| 185| 8|grilled-cheese-sa...| 1666|sandwich|
| 225| 9|chicken-tikka-san...| 1477|sandwich|
| 225| 10|chicken-tikka-san...| 4369|sandwich|
| 185| 11|grilled-cheese-sa...| 998|sandwich|
| 205| 12| chicken-burger| 2730| burger|
| 225| 13|chicken-tikka-san...| 3038|sandwich|
| 205| 14| chicken-burger| 3602| burger|
| 225| 15|chicken-tikka-san...| 1484|sandwich|
| 225| 16|chicken-tikka-san...| 4382|sandwich|
| 225| 17|chicken-tikka-san...| 4778|sandwich|
| 185| 18|grilled-cheese-sa...| 646|sandwich|
| 205| 19| chicken-burger| 4692| burger|
+-----------+---+--------------------+--------+--------+
only showing top 20 rows
###Markdown
now we will see the percentage sales of each of the categories of food items that we prepared in the last command...
###Code
sqlcontext.sql('select case name when "kale-smoothie" then "smoothie" when "banana-smoothie" then "smoothie" when "orange-juice" then "drink" when "soda" then "drink" when "almond-choco-dip-biscotti" then "desert" when "grilled-cheese-sandwich" then "sandwich" when "chicken-tikka-sandwich" then "sandwich" when "chicken-burger" then "burger" else "desert" end as category, round(1.0 * sum(amount_paid) /(select sum(amount_paid) from order_items) * 100, 2) as pct from order_items group by 1 order by 2 desc').show(20)
###Output
+--------+-----+
|category| pct|
+--------+-----+
|sandwich|53.29|
| desert|20.39|
| burger|19.95|
| drink| 5.48|
|smoothie| 0.88|
+--------+-----+
###Markdown
So it looks like smoothies are really not bringing in the big bucks for the restaurant...So should they remove the items al together???Well, lets get a closer look..Infact, before taking them out of the menu, we need to figure out how many customers ordered them...
###Code
sqlcontext.sql('select name, count(distinct order_id) as distinct_order_ids from order_items group by 1 order by 2 desc').show()
###Output
+--------------------+------------------+
| name|distinct_order_ids|
+--------------------+------------------+
|chicken-tikka-san...| 3168|
|grilled-cheese-sa...| 2832|
| chicken-burger| 2487|
|almond-choco-dip-...| 2175|
| soda| 2041|
| juice| 905|
| cake| 669|
| banana-smoothie| 105|
| kale-smoothie| 72|
+--------------------+------------------+
###Markdown
well looks like smoothies are not ordered by many people, especially kale-smoothies!What might be the reason? Don't they like it?what about the 72 people who ordered the smoothie in the course of 5 months...!Lets have a look at the reorder rate of kale-smoothie...the reorder rate can be defined as the ratio of total number of distinct orders for a food item to the total number customers purchasing them...if the ratio is high that means a high reorder rate and vice-versa...
###Code
sqlcontext.sql('select name, round(1.0 * count(distinct order_id) / count(delivered_to), 2) as reorder_rate from order_items join orders on orders.id = order_items.order_id group by 1 order by 2 desc').show()
###Output
+--------------------+------------+
| name|reorder_rate|
+--------------------+------------+
| kale-smoothie| 1.00|
| banana-smoothie| 0.97|
| cake| 0.93|
| juice| 0.91|
| soda| 0.78|
|almond-choco-dip-...| 0.76|
| chicken-burger| 0.72|
|grilled-cheese-sa...| 0.68|
|chicken-tikka-san...| 0.63|
+--------------------+------------+
###Markdown
The distributions of breweries in the US
###Code
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_theme(style="ticks")
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams["figure.figsize"] = (12, 8)
file = './breweries.csv'
df = pd.read_csv(file)
display(df.head(2))
#select only the brewries in the states
df = df[(df['country'] == 'United States') & (df['state'] != 'Alaska') & (df['state'] != 'Hawaii') \
& (df['brewery_type']!='closed') & (df['brewery_type']!='planning')]
#drop the NaN values for those without location information
df['longitude'].dropna(axis=0, inplace=True)
df.dropna(subset=['longitude', 'latitude'], inplace=True)
#print (df.isnull().sum())
#plot the spatial distrubutioin of each brewries
longitude = df['longitude'].to_numpy()
latitude = df['latitude'].to_numpy()
brewery_types = df['brewery_type'].unique()
brewery_types_summary = {}
for brewery_type in brewery_types:
#print(brewery_type)
sel = df['brewery_type'] == brewery_type
print (brewery_type, ":", sum (sel))
brewery_types_summary[brewery_type] = sum (sel)
plt.plot(df[sel].longitude, df[sel].latitude, 'o', alpha=1, label=brewery_type)
#print(df['brewery_type'].unique())
plt.legend(loc='best', fontsize='x-large')
plt.title("brewery types across lower 48 states", fontsize='xx-large')
plt.show()
#print(brewery_types_summary)
myList = brewery_types_summary.items()
myList = sorted(myList)
labels, counts = zip(*myList)
sorted_labels = np.arange(len(labels))
plt.bar(sorted_labels, counts)
plt.xticks(sorted_labels, labels, rotation='vertical', fontsize='xx-large')
plt.ylabel("numbers", fontsize='xx-large')
plt.show()
df = df.sort_values(by='state')
brewery_state = df['state'].unique()
brewpub_stat_per_state = {}
micro_stat_per_state = {}
for state in brewery_state:
sel = (df['brewery_type'] == 'brewpub') & (df['state'] == state)
brewpub = sum(sel)
sel = (df['brewery_type'] == 'micro') & (df['state'] == state)
micro = sum(sel)
sel = (df['state'] == state)
total = sum(sel)
brewpub_stat_per_state[state] = brewpub / total
micro_stat_per_state[state] = micro / total
bar_width = 1
brewpubList = brewpub_stat_per_state.items()
brewpub = sorted(brewpubList)
states, brewpub = zip(*brewpub)
sorted_states = np.arange(len(states))
plt.bar(sorted_states, brewpub, bar_width, label='brew pub')
microList = micro_stat_per_state.items()
microList = sorted(microList)
states, micro = zip(*microList)
sorted_states = np.arange(len(states))
plt.bar(sorted_states, micro, bar_width, bottom = brewpub, label='micro brewery')
plt.xticks(sorted_states, states, rotation='vertical', fontsize='x-large')
plt.ylabel("brew pub vs microbrewery", fontsize='xx-large')
plt.legend(fontsize='xx-large')
plt.ylim([0, 1.25])
plt.show()
###Output
_____no_output_____
###Markdown
###Code
import pandas as pd
data = pd.read_json("/Users/pbhagwat/DEV/UnivAi/Assignment3/RestroRecommender/Data/yelp_dataset/yelp_academic_dataset_review.json")
print(data.head())
###Output
_____no_output_____
###Markdown
Road User Classificationby Kuanchieh Peng Problem StatementBuild a best feasible model that will be later used to classify road users in real time. **The top priorities are: maximizing classification accuracy on unseen data and minimizing prediction speed**. BackgroundAutomatic emergency braking (AEB) system on cars brakes automatically when sensing possible collision with another road user. AEB is designed to reduce vehicle speed the most when sensing collision with cars, then bikers, finally reduces vehicle speed the least when sensing collision with pedestrians. Therefore, **in this project, we especially don't want to misclassify an actual car as another type of road user**. Steps- **EDA & Preprocessing** - Cleaning - Handling outliers - Converting independent and dependent variables into desired data types - Transformation - Uninvariate displots and boxplots to spot skewness - Scaling - Checking Multicollinearity - Bivariate correlation heatmaps and pairplots to understand pairwise correlations - Checking Linear Separability - Training a hard margin linear SVC to test linear separability- **Model Selection**- **Evaluation Metric Selection**- **Modeling** - Logistic Regression - XGBoost- **Evaluation** - F-Beta Score - Prediction Speed - ROC curve (AUC) Sources- Dataset is "Mobile Accelerometer Car 12K" from Kaggle.- Background information derived from "City Safety, 2020 XC90 owner's manual" from Volvo. Imports
###Code
!pip install hyperopt
# for preprocessing
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
# for visualizations
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# for modeling
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
import statsmodels.api as sm
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from hyperopt import hp
from sklearn.model_selection import RepeatedStratifiedKFold
# for evaluation
from sklearn.metrics import fbeta_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import time
# for suppressing system warnings
import warnings
warnings.simplefilter(action = 'ignore', category = FutureWarning)
warnings.simplefilter(action = 'ignore', category = RuntimeWarning)
df = pd.read_csv('vrudata.csv')
original_df = df.copy()
###Output
_____no_output_____
###Markdown
Pipeline
###Code
class bold:
start = '\033[1m'
end = '\033[0m'
def information(df):
# Prints typically useful statistical information about given dataframe.
print("This dataframe consists of ", df.shape[1], " columns and", df.shape[0], " rows.")
print("This dataframe consists of ", df.isnull().sum().sum(), " null entires.")
print("This dataframe consists of ", df[df.duplicated()].shape[0], " duplicate rows.")
print(df[df['target'] == 1].shape[0], " rows belong to class target = 1.")
print(df[df['target'] == 0].shape[0], " rows belong to class target = 0.")
print("")
print(bold.start, "Notable statistics of numeric features in this dataset:", bold.end)
print("")
print(df.describe())
print("")
print(bold.start, "Object type of features in this dataset:", bold.end)
print("")
df.info()
def dist_box(x, title = ''):
fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize = (14.4, 7.2))
for ax in axes:
sns.kdeplot(x, shade = False, ax = ax)
kdeline = ax.lines[0]
xs = kdeline.get_xdata()
ys = kdeline.get_ydata()
if ax == axes[0]:
middle = x.mean()
sdev = x.std()
left = middle - sdev
right = middle + sdev
ax.set_title('Mean and SD')
else:
left, middle, right = np.percentile(x, [25, 50, 75])
ax.set_title('Median and Quartiles')
ax.vlines(middle, 0, np.interp(middle, xs, ys), ls = ':')
ax.fill_between(xs, 0, ys, alpha = 0.2)
ax.fill_between(xs, 0, ys, where = (left <= xs) & (xs <= right), interpolate = True, alpha = 0.2)
fig.suptitle(title, fontsize = 16)
plt.show()
def three_d_scatter(df, target = 'target'):
fig = plt.figure(figsize = (14.4, 10.8))
ax = fig.add_subplot(111, projection = '3d')
df_target1 = df[df[target] == 1]
df_target0 = df[df[target] == 0]
legend_properties = {}
ax.scatter(df_target1['acc_x'], df_target1['acc_y'], df_target1['acc_z'], marker = 'x', label = 'Cars')
ax.scatter(df_target0['acc_x'], df_target0['acc_y'], df_target0['acc_z'], marker = 'o', label = 'Non Cars')
plt.legend(loc = 'best', prop = legend_properties)
plt.show()
space = {'learning_rate' : hp.uniform('learning_rate', 0, 1),
'max_depth' : hp.uniform('max_depth', 4, 10),
'n_estimators' : hp.uniform('n_estimators', 100, 200),
'gamma': hp.uniform ('gamma', 1, 9),
'colsample_bytree' : hp.choice('colsample_bytree', [1]),
'seed' : 60}
def logit_objective(space):
clf = LogisticRegression(**params, random_state = 60, verbose = True)
cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 60)
score = cross_val_score(clf, X_train, y_train, cv = cv, scoring = 'f1_macro')
best_score = max(score)
loss = 1 - best_score
return {'loss': loss, 'params': params, 'status': STATUS_OK}
def xgb_objective(space):
clf = xgb.XGBClassifier(
learning_rate = space['learning_rate'],
max_depth = int(space['max_depth']),
n_estimators = space['n_estimators'],
objective = space['objective'],
gamma = space['gamma'],
reg_alpha = int(space['reg_alpha']),
min_child_weight = int(space['min_child_weight']),
)
evaluation = [(X_train, y_train), (X_test, y_test)]
clf.fit(X_train, y_train,
eval_set = evaluation, eval_metric = "auc",
early_stopping_rounds = 10, verbose = True)
pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, pred > 0.5)
return {'loss': -accuracy, 'status': STATUS_OK }
def f_score(y_pred, dtrain):
y_true = dtrain.get_label()
err = 1 - f1_score(y_true, np.round(y_pred))
return 'f1_err', err
def evaluate(ytest, y_pred, speed):
confusion = confusion_matrix(y_test, y_pred)
fbeta = fbeta_score(y_test, y_pred, average = 'binary', beta = 1.2)
print(bold.start, "Classification Report:", bold.end)
print("")
print(classification_report(y_test, y_pred))
print(bold.start, "F - 1.2 Score:", bold.end)
print("")
print("{:.6f}".format(fbeta))
print("")
print(bold.start, "Prediction speed:", bold.end)
print("")
print("{:.6f} seconds".format(speed))
def roc(model_string, y_test, y_pred):
roc_auc = roc_auc_score(y_test, y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.figure(figsize = (9.6, 7.2))
plt.grid()
plt.plot(fpr, tpr, label = model_string + ' (AUC = {:.2f})'.format(roc_auc))
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc = "best")
plt.show()
###Output
_____no_output_____
###Markdown
EDA
###Code
df.head(3)
###Output
_____no_output_____
###Markdown
Cleaning
###Code
information(df)
###Output
This dataframe consists of 4 columns and 120000 rows.
This dataframe consists of 0 null entires.
This dataframe consists of 8186 duplicate rows.
0 rows belong to class target = 1.
0 rows belong to class target = 0.
[1m Notable statistics of numeric features in this dataset: [0m
acc_x acc_y acc_z
count 120000.000000 120000.000000 120000.000000
mean -0.354549 5.367115 6.729311
std 1.931744 3.420114 2.588606
min -12.509735 -19.051361 -19.093689
25% -1.116619 1.902695 4.829160
50% -0.529119 6.922834 6.459327
75% -0.092177 8.182184 9.212952
max 36.782090 13.737244 60.973206
[1m Object type of features in this dataset: [0m
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 120000 entries, 0 to 119999
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 acc_x 120000 non-null float64
1 acc_y 120000 non-null float64
2 acc_z 120000 non-null float64
3 target 120000 non-null object
dtypes: float64(3), object(1)
memory usage: 3.7+ MB
###Markdown
- There are no missing values in the dataframe.- The three features acc_x, acc_y, acc_z are in desired data type (float, instead of string like object).- Notable statistic: The data has perfectly balanced classes.- Our target variable is categorical; map not_car, car to values 0, 1.
###Code
df = pd.read_csv('vrudata.csv')
encoder = {'not car' : 0, 'car' : 1}
df['target'].replace(encoder, inplace = True)
df.head(3)
###Output
_____no_output_____
###Markdown
Transformation- Use df.skew() to determine whether our features are skewed, meaning whether transformation is neeeded.- Visualization using distplot-boxplot combined plots for better interpretability.
###Code
df.skew(axis = 0)
###Output
_____no_output_____
###Markdown
- acc_x has large skewness therefore needs transformation. Skewness of acc_y, acc_z are fine.
###Code
dist_box(df['acc_x'], 'acc_x')
###Output
_____no_output_____
###Markdown
- acc_x is right skewed. Perform log transformation on the data.
###Code
df['acc_x'] = np.log(abs(original_df['acc_x']))
dist_box(df['acc_x'], 'acc_x')
df.skew(axis = 0)
###Output
_____no_output_____
###Markdown
- Skewness in acc_x is nonw acceptale and much better than before.
###Code
dist_box(df['acc_y'], 'acc_y')
###Output
_____no_output_____
###Markdown
- accy is only slgihtly (skew coefficient < 1) but obviously left skewed (apparently mean < median < mode).
###Code
dist_box(df['acc_z'], 'acc_z')
###Output
_____no_output_____
###Markdown
Scaling- Our features are not normally distributed but has no outliers. Therefore, use min-max scaler.
###Code
scaler = MinMaxScaler()
df = pd.DataFrame(scaler.fit_transform(df), columns = df.columns)
df.head(3)
###Output
_____no_output_____
###Markdown
Checking Multicollinearity
###Code
corr = df.corr()
plt.figure(figsize = (9.6, 7.2))
sns.heatmap(corr, xticklabels = corr.columns, yticklabels = corr.columns, annot = True)
plt.title("Correlation Heatmap")
plt.show()
###Output
_____no_output_____
###Markdown
- The correlation between acc_y and acc_z is -0.6, rather strong, meaning there is some multicollinearity in the dataset. Consider removing one of the two variables if using logistic regression.- acc_y highly correlated to target variables. Reasonable explanation: cars can achieve much larger accelerations on forawrd-backward motions (which is the y-axis for accelerometer on phone, determined by how road users place phone) compared to pedestrains or bikers.
###Code
sns.pairplot(df, hue = 'target')
plt.show()
###Output
_____no_output_____
###Markdown
Checking Linear Separability
###Code
three_d_scatter(df, target = 'target')
###Output
_____no_output_____
###Markdown
- According to the 3-D graph above, data of the two classes might be linearly separable.- In the following block, to determine whether the data is linearly separable, I trained a hard margin SVC with the data. If the training is able to find a margin for the hard margin SVM, the data is linearly separable; vice versa.- I used a linear SVC with the regularization parameter C set to infinity for the hard margin SVC desired. For large values of C, the optimization will choose a smaller-margin hyperplane if that hyperplane does a better job of getting all the training points classified correctly. When C is set a infininty, we get a hard margin SVM.
###Code
X_train, X_test, y_train, y_test = train_test_split(df[{'acc_x', 'acc_y', 'acc_z'}],
df['target'],
test_size = 0.2,
random_state = 42)
svc = make_pipeline(StandardScaler(), LinearSVC(C = float('inf'), max_iter = 1000))
svc.fit(X_train, y_train)
print("Training accuracy:", svc.score(X_train, y_train))
print("Accuracy:", svc.score(X_test, y_test))
###Output
Accuracy: 0.8060416666666667
###Markdown
- The hard margin SVC found a margin and did not overfit. As a result, the data is linearly separable. Model Selection>We want to prioritize getting the best **classification accuracy on unknown data** and **classification speed**. We can sacrifice training speed and interpretability of the model since the model will be used by vehicles on roads in real time.>The entire dataset consists of 3 continuous feature columns, 1 categorical target column, and 120,000 rows. That is, we have a **small feature set** and a **large dataset**. There are no missing or questionable values in the dataset. The distribution of the transformed three variables are still slightly **skewed** left, right, and right in the order acc_x, acc_y, and acc_z. There are **no outliers** in any of the three. Data of the two classes are **linearly separable.**I will use **logistic regression** as my baseline model. The reasons I think logistic regression would serve as a great baseline model in this project are:1. It is efficient to train.2. It tends to work well andnot overfit with low dimensional datasets like ours.3. Works since our dataset is linearly separable.I will use **XGBoost** as my expected best performing model. The reasons I think XGBoost is the better choice over other boosting and bagging algorithms such as random forest are:1. Much better prediction speed compared to bagging algorithms.2. Great accuracy performance as it pushes the limit of computation resources for boosted tree algorithms.3. Sophisticated but not prone to overfitting as long as parameters are tuned properly.4. Handles large datasets well.5. Difficult to interpret but we can sacrifice interpretability thanks to the background of this project.Then, I will use SGD for optimization because:1. We have a large amount of data. Evaluation Metric selectionAccuracy is one of our two main concerns. According to our background information, **avoiding type II errors is more important** than avoiding type I error, while we want to avoid both. Therefore, I will use F-beta score with beta = 1.2 to evaluate the accuracy performance of our model.Prediction speed is the other main concern. I will use pandas library "time" to assess the prediction speed performance of our model. Logistic Regression Feature Selection- Reminder: The correlation between acc_y and acc_z is -0.6. Might have to remove one of the two variables to make sure the model has little to no multicollinearity (one of the assumptions of logistic regression).
###Code
X_train, X_test, y_train, y_test = train_test_split(df[{'acc_x', 'acc_y', 'acc_z'}],
df['target'],
test_size = 0.2,
random_state = 60)
X_train['intercept'] = 1
logit = sm.Logit(y_train, X_train)
result = logit.fit()
X_train = X_train.drop('intercept', axis = 1)
result.summary2()
###Output
_____no_output_____
###Markdown
- acc_x has p-value of 0.276 > 0.05. Remove acc_x from our feature set.
###Code
X_train_logit = X_train.drop('acc_x', axis = 1)
X_test_logit = X_test.drop('acc_x', axis = 1)
###Output
_____no_output_____
###Markdown
Fitting the Model
###Code
logit = LogisticRegression()
logit.fit(X_train_logit, y_train)
print("Training accuracy: {:.4f}".format(logit.score(X_train_logit, y_train)))
temp = X_test.drop('acc_x', axis = 1)
#########
start_time = time.time()
logit_y_pred = logit.predict(temp)
logit_speed = time.time() - start_time
#########
del temp
print("Prediction Accuracy: {:.4f}".format(logit.score(X_test_logit, y_test)))
###Output
Prediction Accuracy: 0.9297
###Markdown
Evaluation
###Code
evaluate(y_test, logit_y_pred, logit_speed)
roc('Logistic Regression', y_test, logit_y_pred)
###Output
_____no_output_____
###Markdown
XGBoost Fitting the Model
###Code
space = {'learning_rate' : hp.uniform('learning_rate', 0, 1),
'max_depth' : hp.uniform('max_depth', 4, 10),
'n_estimators' : hp.quniform('n_estimators', 100, ),
'gamma': hp.uniform ('gamma', 1, 9),
'colsample_bytree' : hp.choice('colsample_bytree', [1]),
'seed' : 60}
xgboost = xgb.XGBClassifier(learning_rate = 0.1,
n_estimators = 200,
seed = 60)
xgboost.fit(X_train, y_train)
print("Training accuracy: {:.4f}".format(xgboost.score(X_train, y_train)))
#########
start_time = time.time()
xgboost_y_pred = xgboost.predict(X_test)
xgboost_speed = time.time() - start_time
#########
print("Prediction Accuracy: {:.4f}".format(xgboost.score(X_test, y_test)))
def xgb_objective(space):
clf = xgb.XGBClassifier(
max_depth = int(space['max_depth']),
n_estimators = space['n_estimators'],
gamma = space['gamma'],
reg_alpha = int(space['reg_alpha']),
min_child_weight = int(space['min_child_weight']),
)
evaluation = [(X_train, y_train), (X_test, y_test)]
clf.fit(X_train, y_train,
eval_set = evaluation, eval_metric = "auc",
early_stopping_rounds = 10, verbose = True)
pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, pred > 0.5)
return {'loss': -accuracy, 'status': STATUS_OK }
###Output
_____no_output_____
###Markdown
人口動態を見る 所属しているメンバーの年齢構成を見る
###Code
data[data.single == 10][data.belong == 1]
ages = {}
for i in range(16):
ages[i+1] = data[data.single == i+1][data.belong == 1].age.values
f,a = plt.subplots(4,4,figsize=(15,15))
a = a.ravel()
for idx, ax in enumerate(a):
ax.hist(ages[idx+1])
ax.set_title("member age when %s single" %(idx+1))
ax.set_xlim([11, 26])
ax.set_ylim([0,10])
plt.tight_layout()
###Output
/Users/susu/anaconda/lib/python3.5/site-packages/ipykernel/__main__.py:1: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
if __name__ == '__main__':
/Users/susu/anaconda/lib/python3.5/site-packages/ipykernel/__main__.py:4: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
###Markdown
3期生所属後、どのメンバーも26歳になったら抜けていると仮定して、6年後まで1年ごとに年齢構成を見る
###Code
age_16 = data[data.single == 16][data.belong == 1].age.values
age_3rd = data[data.term == 3][data.single == 16].age.values
age_all = np.r_[age_16,age_3rd]
ages = np.ones((6, len(age_all)))
ages[0,:] = age_all
for i in range(5):
ages[i+1, :] = ages[i, :] + 1
f,a = plt.subplots(2,3,figsize=(10,8))
a = a.ravel()
years = [2016, 2017, 2018, 2019, 2020, 2021]
for idx, ax in enumerate(a):
ax.hist(ages[idx, :][ages[idx, :]<26])
ax.set_title("%s" %years[idx])
ax.set_xlim([11, 26])
ax.set_ylim([0,14])
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Filter all dataThis takes the raw results for both experiments and removes the following:* Empty columns* Privac sensitive columns* Rejected workers* Contradicting answers that were not rejected workers* Workers that did the first experiment twice* Iphone and android users* Time spent on scenarios must be > 2
###Code
import os
import pandas as pd
inputFolder = folder+"/Data/1.raw/"
outputFolder = folder+"/Data/2.filtered/"
for f in os.listdir(inputFolder):
# ignore non csv files
if not f.endswith('csv'):
continue
print f
# load data
df = pd.read_csv(inputFolder+f)
# remove empty columns
print 'Columns:',df.shape[1]
df.dropna(axis=1, how='all', inplace=True)
print 'Columns:',df.shape[1],"Empty"
# remove rejected workers
print 'Rows:',df.shape[0]
df = df[df['_tainted'] == False]
print 'Rows:',df.shape[0],"Rejected"
# remove contradicting answers that were not rejected workers
if 'spam' not in df.columns:
df['spam'] = 0
df = df[df['spam'] == 0]
print 'Rows:',df.shape[0],"Contradicting"
# remove workers that did the first experiment twice
# take their first answer as the true data
if f == 'exp1_f1232791.csv':
workers = df['_worker_id']
if f == 'exp1_f1233325.csv':
df = df[~df['_worker_id'].isin(workers)]
print 'Rows:',df.shape[0],"Doubles"
# remove iphone and android users
df = df[df.apply(lambda row: 'iPhone' not in row['browser'], axis=1)]
print 'Rows:',df.shape[0],"iPhone"
df = df[df.apply(lambda row: 'Android' not in row['browser'], axis=1)]
print 'Rows:',df.shape[0],"Android"
# remove if time is not > 2
if 'time_none' in df.columns:
df = df[df['time_none'] > 2]
df = df[df['time_warning'] > 2]
df = df[df['time_danger'] > 2]
else:
df = df[df['time_suggestion'] > 2]
df = df[df['time_hours'] > 2]
df = df[df['time_numerical'] > 2]
print 'Rows:',df.shape[0],"time"
# overwrite privacy sensitive columns
df['_ip'] = 0
df['browser'] = 0
df['_city'] = 0
df['_region'] = 0
df['naam'] = 0
print 'Columns:',df.shape[1],"Privacy"
print 'Rows:',df.shape[0],"time"
df.to_csv(outputFolder+f, index=False)
#df.head()
###Output
exp1_f1232791.csv
Columns: 62
Columns: 37 Empty
Rows: 200
Rows: 178 Rejected
Rows: 178 Contradicting
Rows: 178 Doubles
Rows: 176 iPhone
Rows: 176 Android
Rows: 175 time
Columns: 38 Privacy
Rows: 175 time
exp1_f1233325.csv
Columns: 64
Columns: 38 Empty
Rows: 725
Rows: 613 Rejected
Rows: 610 Contradicting
Rows: 471 Doubles
Rows: 470 iPhone
Rows: 462 Android
Rows: 449 time
Columns: 38 Privacy
Rows: 449 time
exp2_f1233802.csv
Columns: 74
Columns: 43 Empty
Rows: 628
Rows: 535 Rejected
Rows: 535 Contradicting
Rows: 535 Doubles
Rows: 533 iPhone
Rows: 522 Android
Rows: 501 time
Columns: 43 Privacy
Rows: 501 time
###Markdown
Combined results of both experiments
###Code
import crowdtruth
class config():
inputColumns = ['a']
outputColumns = [
'experiment1','experiment2',
'alert_suggestion', 'alert_numerical',
'feeling_danger', 'feeling_hours',
'feeling_none', 'feeling_numerical',
'feeling_suggestion', 'feeling_warning',
'imageorder', 'income', 'indebt',
'nobuyreason',
'product', 'regret', 's_danger',
's_hours', 's_none', 's_numerical',
's_suggestion', 's_warning',
'time_danger', 'time_hours', 'time_none',
'time_numerical', 'time_pre', 'time_suggestion',
'time_warning', 'warnings']
# processing of a closed task
open_ended_task = False
annotation_vector = []#['s_none','s_warning','s_danger']
def processJudgments(self, judgments):
if 's_none' not in judgments.columns:
judgments['experiment1'] = 0
judgments['experiment2'] = 1
judgments['s_suggestion'] = judgments['s_suggestion'].map(lambda x: str(x)[:-1])
judgments['s_hours'] = judgments['s_hours'].map(lambda x: str(x)[:-1])
judgments['s_numerical'] = judgments['s_numerical'].map(lambda x: str(x)[:-1])
judgments['time_suggestion'] = judgments['time_suggestion'].astype('int')
judgments['time_hours'] = judgments['time_hours'].astype('int')
judgments['time_numerical'] = judgments['time_numerical'].astype('int')
judgments['s_none'] = 0
judgments['s_warning'] = 0
judgments['s_danger'] = 0
judgments['time_none'] = 0
judgments['time_warning'] = 0
judgments['time_danger'] = 0
judgments['feeling_none'] = 0
judgments['feeling_warning'] = 0
judgments['feeling_danger'] = 0
else :
judgments['experiment1'] = 1
judgments['experiment2'] = 0
judgments['alert_suggestion'] = -1
judgments['alert_numerical'] = -1
judgments['s_none'] = judgments['s_none'].map(lambda x: str(x)[:-1])
judgments['s_warning'] = judgments['s_warning'].map(lambda x: str(x)[:-1])
judgments['s_danger'] = judgments['s_danger'].map(lambda x: str(x)[:-1])
judgments['time_none'] = judgments['time_none'].astype('int')
judgments['time_warning'] = judgments['time_warning'].astype('int')
judgments['time_danger'] = judgments['time_danger'].astype('int')
judgments['s_suggestion'] = 0
judgments['s_hours'] = 0
judgments['s_numerical'] = 0
judgments['time_suggestion'] = 0
judgments['time_hours'] = 0
judgments['time_numerical'] = 0
judgments['feeling_suggestion'] = 0
judgments['feeling_hours'] = 0
judgments['feeling_numerical'] = 0
judgments['spam'] = '0'
#judgments['time_pre'] = judgments['time_pre'].astype('int')
#judgments.fillna(0, inplace=True)
#print judgments.head()
return judgments
data, config = crowdtruth.load(
directory = "/Users/benjamin/Box Sync/TFI Research/Data/2.filtered/",
config = config()
)
for p in config.output:
#print p
#print data['judgments']['output.'+p]
data['judgments']['output.'+p] = data['judgments']['output.'+p].apply(lambda x: ','.join(x))
#print data['judgments'].head()
import pandas as pd
# aggregate post questions
posts = {
'income' : 'income',
'nobuyreason' : 'nobuyreason',
'timing_suggestion' : 'alert_suggestion',
'timing_numerical' : 'alert_numerical',
'warnings' : 'warnings',
'affordcheck' : 'indebt',
'payontime' : 'regret'
}
data['judgments']['output.experiment1'] = data['judgments']['output.experiment1'].astype('int')
data['judgments']['output.experiment2'] = data['judgments']['output.experiment2'].astype('int')
for p in posts:
data[p] = data['judgments'].copy()
data[p] = data[p][['output.'+posts[p],'output.experiment1','output.experiment2','output.s_none','output.s_warning','output.s_danger','output.s_suggestion','output.s_hours','output.s_numerical']]
data[p].columns = [p,'experiment1','experiment2','none','warning','danger','suggestion','hours','numerical']
data[p]['none'] = data[p]['none'].apply(lambda x: 1 if x == 'submit' else 0)
data[p]['warning'] = data[p]['warning'].apply(lambda x: 1 if x == 'submit' else 0)
data[p]['danger'] = data[p]['danger'].apply(lambda x: 1 if x == 'submit' else 0)
data[p]['suggestion'] = data[p]['suggestion'].apply(lambda x: 1 if x == 'submit' else 0)
data[p]['hours'] = data[p]['hours'].apply(lambda x: 1 if x == 'submit' else 0)
data[p]['numerical'] = data[p]['numerical'].apply(lambda x: 1 if x == 'submit' else 0)
agg = {
p : 'count',
'experiment1' : 'sum',
'experiment2' : 'sum',
'none' : 'sum',
'warning' : 'sum',
'danger' : 'sum',
'suggestion' : 'sum',
'hours' : 'sum',
'numerical' : 'sum',
}
data[p] = data[p].groupby(p).agg(agg)
data[p]['none'] = data[p].apply(lambda row: row['none'] / float(row['experiment1']), axis = 1)
data[p]['warning'] = data[p].apply(lambda row: row['warning'] / float(row['experiment1']), axis = 1)
data[p]['danger'] = data[p].apply(lambda row: row['danger'] / float(row['experiment1']), axis = 1)
data[p]['suggestion'] = data[p].apply(lambda row: row['suggestion'] / float(row['experiment2']), axis = 1)
data[p]['hours'] = data[p].apply(lambda row: row['hours'] / float(row['experiment2']), axis = 1)
data[p]['numerical'] = data[p].apply(lambda row: row['numerical'] / float(row['experiment2']), axis = 1)
data[p] = data[p].T
#print data[p]
# financial responsibility
data['responsibility'] = data['judgments'].copy()
data['responsibility'] = data['responsibility'][['output.experiment1','output.experiment2','output.indebt','output.regret','output.s_none','output.s_warning','output.s_danger','output.s_suggestion','output.s_hours','output.s_numerical']]
data['responsibility'].columns = ['experiment1','experiment2','affordcheck','payontime','none','warning','danger','suggestion','hours','numerical']
data['responsibility']['affordcheck'] = data['responsibility']['affordcheck'].apply(lambda x: 1 if x == 'eens' else 0)
data['responsibility']['payontime'] = data['responsibility']['payontime'].apply(lambda x: 1 if x == 'eens' else 0)
data['responsibility']['responsibility'] = data['responsibility']['affordcheck'] + data['responsibility']['payontime']
data['responsibility']['none'] = data['responsibility']['none'].apply(lambda x: 1 if x == 'submit' else 0)
data['responsibility']['warning'] = data['responsibility']['warning'].apply(lambda x: 1 if x == 'submit' else 0)
data['responsibility']['danger'] = data['responsibility']['danger'].apply(lambda x: 1 if x == 'submit' else 0)
data['responsibility']['suggestion'] = data['responsibility']['suggestion'].apply(lambda x: 1 if x == 'submit' else 0)
data['responsibility']['hours'] = data['responsibility']['hours'].apply(lambda x: 1 if x == 'submit' else 0)
data['responsibility']['numerical'] = data['responsibility']['numerical'].apply(lambda x: 1 if x == 'submit' else 0)
agg = {
'experiment1' : 'sum',
'experiment2' : 'sum',
'none' : 'sum',
'warning' : 'sum',
'danger' : 'sum',
'suggestion' : 'sum',
'hours' : 'sum',
'numerical' : 'sum',
}
data['responsibility'] = data['responsibility'].groupby(['responsibility']).agg(agg)
data['responsibility']['none'] = data['responsibility'].apply(lambda row: row['none'] / float(row['experiment1']), axis = 1)
data['responsibility']['warning'] = data['responsibility'].apply(lambda row: row['warning'] / float(row['experiment1']), axis = 1)
data['responsibility']['danger'] = data['responsibility'].apply(lambda row: row['danger'] / float(row['experiment1']), axis = 1)
data['responsibility']['suggestion'] = data['responsibility'].apply(lambda row: row['suggestion'] / float(row['experiment2']), axis = 1)
data['responsibility']['hours'] = data['responsibility'].apply(lambda row: row['hours'] / float(row['experiment2']), axis = 1)
data['responsibility']['numerical'] = data['responsibility'].apply(lambda row: row['numerical'] / float(row['experiment2']), axis = 1)
data['responsibility'] = data['responsibility'].T
#print data['responsibility']
import pandas as pd
import numpy as np
#
# aggregate by time exposure
#
data['scenarios'] = data['judgments'][['output.s_none','output.s_warning','output.s_danger','output.s_suggestion','output.s_hours','output.s_numerical']].apply(pd.Series.value_counts).T
data['scenarios'].index = ['none','warning','danger','suggestion','hours','numerical']
rows = data['judgments'].index.size
#data['scenarios']['cancel_ratio'] = data['scenarios']['cancel'].apply(lambda x: float(x) / rows)
#data['scenarios']['submit_ratio'] = 0
data['scenarios']['submit_ratio'] = data['scenarios'].apply(lambda row: row['submit'] / (float(row['cancel']) + float(row['submit'])), axis=1)
#print data['scenarios']
data['scenarios']['duration_avg'] = 0
data['scenarios'].loc['none','duration_avg'] = np.asarray(data['judgments']['output.time_none'], dtype=np.float).mean()
data['scenarios'].loc['warning','duration_avg'] = np.asarray(data['judgments']['output.time_warning'], dtype=np.float).mean()
data['scenarios'].loc['danger','duration_avg'] = np.asarray(data['judgments']['output.time_danger'], dtype=np.float).mean()
data['scenarios'].loc['suggestion','duration_avg'] = np.asarray(data['judgments']['output.time_suggestion'], dtype=np.float).mean()
data['scenarios'].loc['hours','duration_avg'] = np.asarray(data['judgments']['output.time_hours'], dtype=np.float).mean()
data['scenarios'].loc['numerical','duration_avg'] = np.asarray(data['judgments']['output.time_numerical'], dtype=np.float).mean()
# scenarios
data['scenarios'] = data['judgments'].copy()
data['scenarios'] = data['scenarios'][['output.experiment1','output.experiment2','output.s_none','output.s_warning','output.s_danger','output.s_suggestion','output.s_hours','output.s_numerical']]
data['scenarios'].columns = ['experiment1','experiment2','none','warning','danger','suggestion','hours','numerical']
data['scenarios']['none'] = data['scenarios']['none'].apply(lambda x: 1 if x == 'submit' else 0)
data['scenarios']['warning'] = data['scenarios']['warning'].apply(lambda x: 1 if x == 'submit' else 0)
data['scenarios']['danger'] = data['scenarios']['danger'].apply(lambda x: 1 if x == 'submit' else 0)
data['scenarios']['suggestion'] = data['scenarios']['suggestion'].apply(lambda x: 1 if x == 'submit' else 0)
data['scenarios']['hours'] = data['scenarios']['hours'].apply(lambda x: 1 if x == 'submit' else 0)
data['scenarios']['numerical'] = data['scenarios']['numerical'].apply(lambda x: 1 if x == 'submit' else 0)
# t.tests
import scipy.stats
exp1 = data['scenarios'][data['scenarios']['experiment1'] == 1]
print 'none-warning t-test',scipy.stats.ttest_rel(exp1['none'],exp1['warning'])
agg = {
'experiment1' : 'sum',
'experiment2' : 'sum',
'none' : 'sum',
'warning' : 'sum',
'danger' : 'sum',
'suggestion' : 'sum',
'hours' : 'sum',
'numerical' : 'sum',
}
data['scenarios'] = data['scenarios'].groupby(['experiment1']).agg(agg)
data['scenarios']['none'] = data['scenarios'].apply(lambda row: row['none'] / float(row['experiment1']), axis = 1)
data['scenarios']['warning'] = data['scenarios'].apply(lambda row: row['warning'] / float(row['experiment1']), axis = 1)
data['scenarios']['danger'] = data['scenarios'].apply(lambda row: row['danger'] / float(row['experiment1']), axis = 1)
data['scenarios']['suggestion'] = data['scenarios'].apply(lambda row: row['suggestion'] / float(row['experiment2']), axis = 1)
data['scenarios']['hours'] = data['scenarios'].apply(lambda row: row['hours'] / float(row['experiment2']), axis = 1)
data['scenarios']['numerical'] = data['scenarios'].apply(lambda row: row['numerical'] / float(row['experiment2']), axis = 1)
#data['scenarios'] = data['scenarios'].T
#print data['scenarios'].head()
from scipy import stats
anova = data['judgments'].copy()
anova[['output.s_none','output.s_warning','output.s_danger']] = anova[['output.s_none','output.s_warning','output.s_danger']].apply(lambda x: x.replace('cancel',1))
anova[['output.s_none','output.s_warning','output.s_danger']] = anova[['output.s_none','output.s_warning','output.s_danger']].apply(lambda x: x.replace('submit',0))
F, p = stats.f_oneway(anova['output.s_none'], anova['output.s_warning'], anova['output.s_danger'])
print F,p
anova = data['judgments'].copy()
anova[['output.s_suggestion','output.s_hours','output.s_numerical']] = anova[['output.s_suggestion','output.s_hours','output.s_numerical']].apply(lambda x: x.replace('cancel',1))
anova[['output.s_suggestion','output.s_hours','output.s_numerical']] = anova[['output.s_suggestion','output.s_hours','output.s_numerical']].apply(lambda x: x.replace('submit',0))
F, p = stats.f_oneway(anova['output.s_suggestion'], anova['output.s_hours'], anova['output.s_numerical'])
print F,p
# feelings
def pos(feelings):
for f in feelings.split(','):
if f in ['tevreden','blij','opgewonden','opgelucht']:
return 1
return 0
def neg(feelings):
for f in feelings.split(','):
if f in ['bezorgd','schuldig','verdrietig','boos','beschaamd','ontevreden']:
return 1
return 0
def neutral(feelings):
for f in feelings.split(','):
if f in ['weetniet']:
return 1
return 0
feelings = data['judgments'].copy()
feelings = feelings[['output.s_none','output.s_warning','output.s_danger','output.feeling_none','output.feeling_warning','output.feeling_danger','output.s_suggestion','output.s_hours','output.s_numerical','output.feeling_suggestion','output.feeling_hours','output.feeling_numerical']]
feelings['s_none_pos'] = feelings['output.feeling_none'].apply(lambda x: pos(x))
feelings['s_none_neg'] = feelings['output.feeling_none'].apply(lambda x: neg(x))
feelings['s_none_neutral'] = feelings['output.feeling_none'].apply(lambda x: neutral(x))
feelings['s_warning_pos'] = feelings['output.feeling_warning'].apply(lambda x: pos(x))
feelings['s_warning_neg'] = feelings['output.feeling_warning'].apply(lambda x: neg(x))
feelings['s_warning_neutral'] = feelings['output.feeling_warning'].apply(lambda x: neutral(x))
feelings['s_danger_pos'] = feelings['output.feeling_danger'].apply(lambda x: pos(x))
feelings['s_danger_neg'] = feelings['output.feeling_danger'].apply(lambda x: neg(x))
feelings['s_danger_neutral'] = feelings['output.feeling_danger'].apply(lambda x: neutral(x))
feelings['s_suggestion_pos'] = feelings['output.feeling_suggestion'].apply(lambda x: pos(x))
feelings['s_suggestion_neg'] = feelings['output.feeling_suggestion'].apply(lambda x: neg(x))
feelings['s_suggestion_neutral'] = feelings['output.feeling_suggestion'].apply(lambda x: neutral(x))
feelings['s_hours_pos'] = feelings['output.feeling_hours'].apply(lambda x: pos(x))
feelings['s_hours_neg'] = feelings['output.feeling_hours'].apply(lambda x: neg(x))
feelings['s_hours_neutral'] = feelings['output.feeling_hours'].apply(lambda x: neutral(x))
feelings['s_numerical_pos'] = feelings['output.feeling_numerical'].apply(lambda x: pos(x))
feelings['s_numerical_neg'] = feelings['output.feeling_numerical'].apply(lambda x: neg(x))
feelings['s_numerical_neutral'] = feelings['output.feeling_numerical'].apply(lambda x: neutral(x))
#print feelings.head()
data['feelings'] = feelings
data['feeling_count'] = pd.DataFrame()
data['feeling_count']['none'] = pd.DataFrame([i for f in data['judgments']['output.feeling_none'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
data['feeling_count']['warning'] = pd.DataFrame([i for f in data['judgments']['output.feeling_warning'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
data['feeling_count']['danger'] = pd.DataFrame([i for f in data['judgments']['output.feeling_danger'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
data['feeling_count']['suggestion'] = pd.DataFrame([i for f in data['judgments']['output.feeling_suggestion'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
data['feeling_count']['hours'] = pd.DataFrame([i for f in data['judgments']['output.feeling_hours'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
data['feeling_count']['numerical'] = pd.DataFrame([i for f in data['judgments']['output.feeling_numerical'].tolist() for i in f.split(',')]).loc[:,0].value_counts()
#data['feeling_count']['none'] = .value_counts()
#data['feeling_count']['warning'] = data['judgments']['output.feeling_warning'].value_counts()
print data['feeling_count']
crowdtruth.save(data, config, folder+'/Data/3.aggregated/')
###Output
_____no_output_____
###Markdown
Load Data
###Code
import os
import re
import json
import math
import numpy as np
import scipy.stats as stats
workdir = './'
pathdata = os.path.join(workdir, 'data.json')
pathqrels = os.path.join(workdir, 'judgments.json')
pathtests = os.path.join(workdir, 'tests.json')
data = {}
qrels = {}
tests = {}
with open(pathdata, 'r', encoding='utf-8') as f:
data = json.load(f)
with open(pathqrels, 'r', encoding='utf-8') as f:
qrels = json.load(f)
with open(pathtests, 'r', encoding='utf-8') as f:
tests = json.load(f)
###Output
_____no_output_____
###Markdown
Query Measures
###Code
# number of queries
def numq(data, qrels, tests, sid):
return [ len(data[sid]['searches']) ]
# unique number of queries
def numq_unique(data, qrels, tests, sid):
return [ len(set([s['q'] for s in data[sid]['searches']])) ]
# number of queries w/o any clicks
def numq_noclick(data, qrels, tests, sid):
return [ len([1 for s in data[sid]['searches'] if len([1 for r in s['results'] if len(r['click'])>0])>0]) ]
# number of characters per query
def qlen(data, qrels, tests, sid):
return [ len(s['q']) for s in data[sid]['searches'] ]
# remove two consecutive duplicate queries
def removeDups(queries):
nodups = []
for query in queries:
if len(nodups)==0 or query!=nodups[-1]:
nodups.append(query)
return nodups
def qsim(q1, q2, n):
ngrams1 = set( [ q1[i:i+n] for i in range(len(q1)-n+1) ] )
ngrams2 = set( [ q2[i:i+n] for i in range(len(q2)-n+1) ] )
return len(ngrams1.intersection(ngrams2)) / len(ngrams1.union(ngrams2)) if len(ngrams1.union(ngrams2))>0 else np.nan
# unigram similarity between query pairs
def qsim_unigram(data, qrels, tests, sid):
queries = removeDups( [ s['q'] for s in data[sid]['searches'] ] )
return [ qsim(queries[i], queries[i+1], 1) for i in range(len(queries)-1) ]
# bigram similarity between query pairs
def qsim_bigram(data, qrels, tests, sid):
queries = removeDups( [ s['q'] for s in data[sid]['searches'] ] )
return [ qsim(queries[i], queries[i+1], 2) for i in range(len(queries)-1) ]
DVs_query = {
'numq': numq,
'numq_unique': numq_unique,
'numq_noclick': numq_noclick,
'qlen': qlen,
'qsim_unigram': qsim_unigram,
'qsim_bigram': qsim_bigram,
}
###Output
_____no_output_____
###Markdown
Click Measures
###Code
# number of clicks
def numclicks(data, qrels, tests, sid):
return [ np.sum( [ len(r['click']) for s in data[sid]['searches'] for r in s['results'] ] ) ]
# number of clicks by result category
def numclicks_misinfo(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ len(r['click']) for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='misinfo' ] ) ]
# number of clicks by result category
def numclicks_correct(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ len(r['click']) for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='correct' ] ) ]
# number of clicks by result category
def numclicks_nrel(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ len(r['click']) for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='nrel' ] ) ]
# percentage of clicks by result category
def ratioclicks_misinfo(data, qrels, tests, sid):
num = numclicks_misinfo(data, qrels, tests, sid)[0]
total = numclicks(data, qrels, tests, sid)[0]
return [ num/total ]
# percentage of clicks by result category
def ratioclicks_correct(data, qrels, tests, sid):
num = numclicks_correct(data, qrels, tests, sid)[0]
total = numclicks(data, qrels, tests, sid)[0]
return [ num/total ]
# percentage of clicks by result category
def ratioclicks_nrel(data, qrels, tests, sid):
num = numclicks_nrel(data, qrels, tests, sid)[0]
total = numclicks(data, qrels, tests, sid)[0]
return [ num/total ]
# percentage of displayed results by category
def ratioresults_misinfo(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='misinfo' ] ) / np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] ] ) ]
# percentage of displayed results by category
def ratioresults_correct(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='correct' ] ) / np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] ] ) ]
# percentage of displayed results by category
def ratioresults_nrel(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='nrel' ] ) / np.sum( [ 1 for s in data[sid]['searches'] for r in s['results'] ] ) ]
# percentage of displayed results by category
def misinfo_density(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
num_misinfo = len([r for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='misinfo'])
num_correct = len([r for s in data[sid]['searches'] for r in s['results'] if tqrel[r['url']]=='correct'])
density = 0 if num_misinfo+num_correct==0 else num_misinfo/(num_misinfo+num_correct)
return [ density ]
# misinfo NDCG
def misinfo_ndcg(data, qrels, tests, sid):
tqrel = qrels[data[sid]['task']]
return [ rank_misinfo_ndcg(s['results'], tqrel) for s in data[sid]['searches'] ]
def rank_misinfo_ndcg(results, tqrel):
sum_dcg = 0
sum_idcg = 0
for i in range(len(results)):
discount = 1 / math.log2(i+2)
rel_r = 1 if tqrel[results[i]['url']]=='misinfo' else 0
rel_ir = 1
sum_dcg += discount*(2**rel_r-1)
sum_idcg += discount*(2**rel_ir-1)
return sum_dcg/sum_idcg
DVs_click = {
'numclicks': numclicks,
'numclicks_misinfo': numclicks_misinfo,
'numclicks_correct': numclicks_correct,
'numclicks_nrel': numclicks_nrel,
'ratioclicks_misinfo': ratioclicks_misinfo,
'ratioclicks_correct': ratioclicks_correct,
'ratioclicks_nrel': ratioclicks_nrel,
'ratioresults_misinfo': ratioresults_misinfo,
'ratioresults_correct': ratioresults_correct,
'ratioresults_nrel': ratioresults_nrel,
'misinfo_density': misinfo_density,
'misinfo_ndcg': misinfo_ndcg
}
###Output
_____no_output_____
###Markdown
Pre-task and Post-task Survey
###Code
def pre_familiar(data, qrels, tests, sid):
return [ data[sid]['presurvey']['familiar'] ]
def pre_explore(data, qrels, tests, sid):
return [ data[sid]['presurvey']['explore'] ]
def pre_interest(data, qrels, tests, sid):
return [ data[sid]['presurvey']['interest'] ]
def pre_credible(data, qrels, tests, sid):
return [ data[sid]['presurvey']['credible'] ]
def pre_expdiff(data, qrels, tests, sid):
return [ data[sid]['presurvey']['expdiff'] ]
def pre_capable(data, qrels, tests, sid):
return [ data[sid]['presurvey']['capable'] ]
def post_sufficient(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['sufficient'] ]
def post_explore(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['explore'] ]
def post_effort(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['effort'] ]
def post_useful(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['useful'] ]
def post_credible(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['credible'] ]
def post_confidence(data, qrels, tests, sid):
return [ data[sid]['postsurvey']['confidence'] ]
DVs_survey = {
'pre_familiar': pre_familiar,
'pre_explore': pre_explore,
'pre_interest': pre_interest,
'pre_credible': pre_credible,
'pre_expdiff': pre_expdiff,
'pre_capable': pre_capable,
'post_sufficient': post_sufficient,
'post_explore': post_explore,
'post_effort': post_effort,
'post_useful': post_useful,
'post_credible': post_credible,
'post_confidence': post_confidence,
}
###Output
_____no_output_____
###Markdown
Pre-task and Post-task Tests
###Code
pretest_answers = {}
for testid in [x+1 for x in range(20)]:
pretest_answers[testid] = lambda data, qrels, tests, sid, testid=testid:[ data[sid]['pretest'][str(testid)] ]
posttest_answers = {}
for testid in [x+1 for x in range(20)]:
posttest_answers[testid] = lambda data, qrels, tests, sid, testid=testid:[ data[sid]['posttest'][str(testid)] ]
deltatest_answers = {}
for testid in [x+1 for x in range(20)]:
deltatest_answers[testid] = lambda data, qrels, tests, sid, testid=testid:[ data[sid]['posttest'][str(testid)] - data[sid]['pretest'][str(testid)] ]
def pre_correctness(data, qrels, tests, sid):
correct = 0
total = 0
s = data[sid]
for testid in s['pretest']:
g = tests[s['task']][testid]['answer']
u = s['pretest'][testid]
total += 1
if ( g==1 and u>3 ) or ( g==0 and u<3 ):
correct += 1
return [ correct/total ]
def post_correctness(data, qrels, tests, sid):
correct = 0
total = 0
s = data[sid]
for testid in s['posttest']:
g = tests[s['task']][testid]['answer']
u = s['posttest'][testid]
total += 1
if ( g==1 and u>3 ) or ( g==0 and u<3 ):
correct += 1
return [ correct/total ]
def diff_correctness(data, qrels, tests, sid):
return [ post_correctness(data,qrels,tests,sid)[0] - pre_correctness(data,qrels,tests,sid)[0] ]
def pre_deviation(data, qrels, tests, sid):
deviation = []
s = data[sid]
for testid in s['pretest']:
g = 5 if tests[s['task']][testid]['answer']==1 else 1
u = s['pretest'][testid]
deviation.append(np.abs(u-g))
return [np.mean(deviation)]
def post_deviation(data, qrels, tests, sid):
deviation = []
s = data[sid]
for testid in s['posttest']:
g = 5 if tests[s['task']][testid]['answer']==1 else 1
u = s['posttest'][testid]
deviation.append(np.abs(u-g))
return [np.mean(deviation)]
def diff_deviation(data, qrels, tests, sid):
return [ pre_deviation(data,qrels,tests,sid)[0] - post_deviation(data,qrels,tests,sid)[0] ]
DVs_test = {
'pre_correctness': pre_correctness,
'post_correctness': post_correctness,
'diff_correctness': diff_correctness,
'pre_deviation': pre_deviation,
'post_deviation': post_deviation,
'diff_deviation': diff_deviation
}
DVs_test_task1 = { 'pretest_answers_Q%d'%(x+1):pretest_answers[x+1] for x in range(10) }
DVs_test_task1.update( { 'posttest_answers_Q%d'%(x+1):posttest_answers[x+1] for x in range(10) } )
DVs_test_task1.update( { 'deltatest_answers_Q%d'%(x+1):deltatest_answers[x+1] for x in range(10) } )
DVs_test_task2 = { 'pretest_answers_Q%d'%(x+11):pretest_answers[x+11] for x in range(10) }
DVs_test_task2.update( { 'posttest_answers_Q%d'%(x+11):posttest_answers[x+11] for x in range(10) } )
DVs_test_task2.update( { 'deltatest_answers_Q%d'%(x+11):deltatest_answers[x+11] for x in range(10) } )
###Output
_____no_output_____
###Markdown
ANOVA Tests
###Code
from scipy.stats import f_oneway
def getDVvalues( data, qrels, tests, DV, setting, tasks ):
values = []
for sid in data:
s = data[sid]
if s['setting']==setting and s['task'] in tasks:
for v in DV(data, qrels, tests, sid):
if not np.isnan(v):
values.append(v)
return values
def star(pval):
if pval<0.001:
return '***'
if pval<0.01:
return '**'
if pval<0.05:
return '*'
return ''
def anovaDVs( data, qrels, tests, DVs, tasks ):
print(
'%-40s%10s %7s%10s %7s%10s %7s %s' % (
'DV',
'Low', '',
'Med', '',
'High', '',
'p (ANOVA)'
)
)
for DV in DVs:
values_low = getDVvalues( data, qrels, tests, DVs[DV], 'Low', tasks )
values_med = getDVvalues( data, qrels, tests, DVs[DV], 'Med', tasks )
values_high = getDVvalues( data, qrels, tests, DVs[DV], 'High', tasks )
mean_low = np.mean( values_low )
mean_med = np.mean( values_med )
mean_high = np.mean( values_high )
sem_low = stats.sem( values_low )
sem_med = stats.sem( values_med )
sem_high = stats.sem( values_high )
f, p = f_oneway( values_low, values_med, values_high )
print(
'%-40s%10.3f (%.3f)%10.3f (%.3f)%10.3f (%.3f) p=%.4f %s' % (
DV,
mean_low, sem_low,
mean_med, sem_med,
mean_high, sem_high,
p, star(p)
)
)
from scipy.stats import f_oneway
def anovaDVsLatex( data, qrels, tests, DVs, tasks ):
for DV in DVs:
values_low = getDVvalues( data, qrels, tests, DVs[DV], 'Low', tasks )
values_med = getDVvalues( data, qrels, tests, DVs[DV], 'Med', tasks )
values_high = getDVvalues( data, qrels, tests, DVs[DV], 'High', tasks )
mean_low = np.mean( values_low )
mean_med = np.mean( values_med )
mean_high = np.mean( values_high )
sem_low = stats.sem( values_low )
sem_med = stats.sem( values_med )
sem_high = stats.sem( values_high )
f, p = f_oneway( values_low, values_med, values_high )
print(
'%s & %.2f (%.2f) & %.2f (%.2f) & %.2f (%.2f) & $p=%.3f$ %s \\\\ \hline' % (
DV,
mean_low, sem_low,
mean_med, sem_med,
mean_high, sem_high,
p, star(p)
)
)
anovaDVs( data, qrels, tests, DVs_query, ['1','2'] )
anovaDVs( data, qrels, tests, DVs_click, ['1','2'] )
anovaDVs( data, qrels, tests, DVs_survey, ['1','2'] )
anovaDVsLatex( data, qrels, tests, DVs_survey, ['1','2'] )
anovaDVs( data, qrels, tests, DVs_test, ['1','2'] )
anovaDVs( data, qrels, tests, DVs_test_task1, ['1'] )
anovaDVs( data, qrels, tests, DVs_test_task2, ['2'] )
###Output
DV Low Med High p (ANOVA)
pretest_answers_Q11 4.150 (0.131) 4.150 (0.182) 4.150 (0.244) p=1.0000
pretest_answers_Q12 2.300 (0.219) 2.800 (0.258) 2.850 (0.284) p=0.2487
pretest_answers_Q13 4.200 (0.172) 3.500 (0.276) 3.900 (0.216) p=0.0976
pretest_answers_Q14 2.150 (0.233) 1.900 (0.191) 1.950 (0.185) p=0.6579
pretest_answers_Q15 3.550 (0.246) 3.750 (0.160) 3.550 (0.256) p=0.7692
pretest_answers_Q16 3.600 (0.266) 3.850 (0.196) 3.950 (0.153) p=0.4832
pretest_answers_Q17 4.450 (0.223) 4.600 (0.112) 4.300 (0.179) p=0.4943
pretest_answers_Q18 2.500 (0.295) 2.450 (0.211) 2.850 (0.302) p=0.5306
pretest_answers_Q19 3.100 (0.216) 3.000 (0.192) 3.450 (0.198) p=0.2645
pretest_answers_Q20 3.800 (0.200) 4.000 (0.178) 3.700 (0.179) p=0.5131
posttest_answers_Q11 2.400 (0.294) 2.200 (0.345) 3.350 (0.372) p=0.0442 *
posttest_answers_Q12 1.300 (0.164) 1.250 (0.160) 1.950 (0.303) p=0.0496 *
posttest_answers_Q13 3.500 (0.286) 3.150 (0.302) 3.250 (0.362) p=0.7265
posttest_answers_Q14 2.700 (0.291) 2.300 (0.231) 2.350 (0.274) p=0.5162
posttest_answers_Q15 2.550 (0.285) 2.300 (0.309) 2.650 (0.327) p=0.7104
posttest_answers_Q16 3.600 (0.255) 3.900 (0.143) 3.850 (0.221) p=0.5655
posttest_answers_Q17 4.650 (0.131) 4.650 (0.109) 4.500 (0.224) p=0.7538
posttest_answers_Q18 1.500 (0.170) 1.850 (0.264) 2.600 (0.255) p=0.0052 **
posttest_answers_Q19 4.600 (0.112) 4.200 (0.236) 3.900 (0.270) p=0.0821
posttest_answers_Q20 2.750 (0.280) 2.450 (0.294) 3.150 (0.293) p=0.2371
deltatest_answers_Q11 -1.750 (0.339) -1.950 (0.336) -0.800 (0.304) p=0.0359 *
deltatest_answers_Q12 -1.000 (0.262) -1.550 (0.320) -0.900 (0.383) p=0.3218
deltatest_answers_Q13 -0.700 (0.317) -0.350 (0.357) -0.650 (0.302) p=0.7155
deltatest_answers_Q14 0.550 (0.366) 0.400 (0.255) 0.400 (0.197) p=0.9100
deltatest_answers_Q15 -1.000 (0.308) -1.450 (0.336) -0.900 (0.315) p=0.4378
deltatest_answers_Q16 0.000 (0.218) 0.050 (0.170) -0.100 (0.191) p=0.8563
deltatest_answers_Q17 0.200 (0.172) 0.050 (0.088) 0.200 (0.213) p=0.7623
deltatest_answers_Q18 -1.000 (0.262) -0.600 (0.275) -0.250 (0.347) p=0.2115
deltatest_answers_Q19 1.500 (0.276) 1.200 (0.277) 0.450 (0.344) p=0.0468 *
deltatest_answers_Q20 -1.050 (0.328) -1.550 (0.303) -0.550 (0.235) p=0.0608
###Markdown
Plots Query Behavior
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Num. Queries **', 'Num. Queries (unique) **', 'Query Length *' ]
means_low = [
np.mean( getDVvalues( data, qrels, tests, numq, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numq_unique, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qlen, 'Low', ['1','2'] ) )
]
means_med = [
np.mean( getDVvalues( data, qrels, tests, numq, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numq_unique, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qlen, 'Med', ['1','2'] ) )
]
means_high = [
np.mean( getDVvalues( data, qrels, tests, numq, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numq_unique, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qlen, 'High', ['1','2'] ) )
]
sem_low = [
stats.sem( getDVvalues( data, qrels, tests, numq, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numq_unique, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qlen, 'Low', ['1','2'] ) )
]
sem_med = [
stats.sem( getDVvalues( data, qrels, tests, numq, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numq_unique, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qlen, 'Med', ['1','2'] ) )
]
sem_high = [
stats.sem( getDVvalues( data, qrels, tests, numq, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numq_unique, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qlen, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,16])
# ax.set_ylabel('Scores')
ax.set_title('Number of Queries and Query Length')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper center')
ax.bar_label(rects1, padding=3, fmt='%.2f')
ax.bar_label(rects2, padding=3, fmt='%.2f')
ax.bar_label(rects3, padding=3, fmt='%.2f')
plt.savefig( os.path.join(workdir, 'query.png'), dpi=300 )
plt.show()
###Output
_____no_output_____
###Markdown
Query Similarity
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Query Sim (Unigram) *', 'Query Sim (Bigram) *' ]
means_low = [
np.mean( getDVvalues( data, qrels, tests, qsim_unigram, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qsim_bigram, 'Low', ['1','2'] ) )
]
means_med = [
np.mean( getDVvalues( data, qrels, tests, qsim_unigram, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qsim_bigram, 'Med', ['1','2'] ) )
]
means_high = [
np.mean( getDVvalues( data, qrels, tests, qsim_unigram, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, qsim_bigram, 'High', ['1','2'] ) )
]
sem_low = [
stats.sem( getDVvalues( data, qrels, tests, qsim_unigram, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qsim_bigram, 'Low', ['1','2'] ) )
]
sem_med = [
stats.sem( getDVvalues( data, qrels, tests, qsim_unigram, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qsim_bigram, 'Med', ['1','2'] ) )
]
sem_high = [
stats.sem( getDVvalues( data, qrels, tests, qsim_unigram, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, qsim_bigram, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,0.6])
# ax.set_ylabel('Scores')
ax.set_title('Similarities between Query Reformulations')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper center')
ax.bar_label(rects1, padding=3, fmt='%.2f')
ax.bar_label(rects2, padding=3, fmt='%.2f')
ax.bar_label(rects3, padding=3, fmt='%.2f')
plt.savefig( os.path.join(workdir, 'qsim.png'), dpi=300 )
plt.show()
###Output
_____no_output_____
###Markdown
Click
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Overall', 'Misinfo. ***', 'Correct ***', 'Irrelevant' ]
means_low = [
np.mean( getDVvalues( data, qrels, tests, numclicks, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_misinfo, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_correct, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_nrel, 'Low', ['1','2'] ) )
]
means_med = [
np.mean( getDVvalues( data, qrels, tests, numclicks, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_misinfo, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_correct, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_nrel, 'Med', ['1','2'] ) )
]
means_high = [
np.mean( getDVvalues( data, qrels, tests, numclicks, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_misinfo, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_correct, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, numclicks_nrel, 'High', ['1','2'] ) )
]
sem_low = [
stats.sem( getDVvalues( data, qrels, tests, numclicks, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_misinfo, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_correct, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_nrel, 'Low', ['1','2'] ) )
]
sem_med = [
stats.sem( getDVvalues( data, qrels, tests, numclicks, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_misinfo, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_correct, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_nrel, 'Med', ['1','2'] ) )
]
sem_high = [
stats.sem( getDVvalues( data, qrels, tests, numclicks, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_misinfo, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_correct, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, numclicks_nrel, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,11])
# ax.set_ylabel('Scores')
ax.set_title('Number of Clicks by Result Categories')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper right')
ax.bar_label(rects1, padding=3, fmt='%.2f')
ax.bar_label(rects2, padding=3, fmt='%.2f')
ax.bar_label(rects3, padding=3, fmt='%.2f')
plt.savefig( os.path.join(workdir, 'numclicks.png'), dpi=300 )
plt.show()
###Output
_____no_output_____
###Markdown
Click Ratio
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Misinfo. ***', 'Correct ***', 'Irrelevant' ]
means_low = [
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'Low', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_correct, 'Low', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'Low', ['1','2'] ) )
]
means_med = [
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'Med', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_correct, 'Med', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'Med', ['1','2'] ) )
]
means_high = [
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'High', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_correct, 'High', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'High', ['1','2'] ) )
]
sem_low = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'Low', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_correct, 'Low', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'Low', ['1','2'] ) )
]
sem_med = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'Med', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_correct, 'Med', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'Med', ['1','2'] ) )
]
sem_high = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_misinfo, 'High', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_correct, 'High', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioclicks_nrel, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,80])
plt.gca().set_yticklabels(['%d%%'%x for x in plt.gca().get_yticks()])
ax.set_title('Percentage of Clicks by Result Categories')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper left')
ax.bar_label(rects1, padding=3, fmt='%.1f%%')
ax.bar_label(rects2, padding=3, fmt='%.1f%%')
ax.bar_label(rects3, padding=3, fmt='%.1f%%')
plt.savefig( os.path.join(workdir, 'ratioclicks.png'), dpi=300 )
plt.show()
###Output
/tmp/ipykernel_8079/114190323.py:49: UserWarning: FixedFormatter should only be used together with FixedLocator
plt.gca().set_yticklabels(['%d%%'%x for x in plt.gca().get_yticks()])
###Markdown
Displayed Results Ratio
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Misinfo. ***', 'Correct ***', 'Irrelevant **' ]
means_low = [
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'Low', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_correct, 'Low', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_nrel, 'Low', ['1','2'] ) )
]
means_med = [
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'Med', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_correct, 'Med', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_nrel, 'Med', ['1','2'] ) )
]
means_high = [
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'High', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_correct, 'High', ['1','2'] ) ),
100*np.mean( getDVvalues( data, qrels, tests, ratioresults_nrel, 'High', ['1','2'] ) )
]
sem_low = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'Low', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_correct, 'Low', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_nrel, 'Low', ['1','2'] ) )
]
sem_med = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'Med', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_correct, 'Med', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_nrel, 'Med', ['1','2'] ) )
]
sem_high = [
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_misinfo, 'High', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_correct, 'High', ['1','2'] ) ),
100*stats.sem( getDVvalues( data, qrels, tests, ratioresults_nrel, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,90])
plt.gca().set_yticklabels(['%d%%'%x for x in plt.gca().get_yticks()])
ax.set_title('Percentage of Displayed SERP Results by Categories')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper left')
ax.bar_label(rects1, padding=3, fmt='%.1f%%')
ax.bar_label(rects2, padding=3, fmt='%.1f%%')
ax.bar_label(rects3, padding=3, fmt='%.1f%%')
plt.savefig( os.path.join(workdir, 'ratioresults.png'), dpi=300 )
plt.show()
###Output
/tmp/ipykernel_8079/2810928405.py:49: UserWarning: FixedFormatter should only be used together with FixedLocator
plt.gca().set_yticklabels(['%d%%'%x for x in plt.gca().get_yticks()])
###Markdown
Factual Questions
###Code
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Pre-task ($p=0.274$)', 'Post-task **', '$\Delta$(Pre-task, Post-task) ***' ]
means_low = [
100 * np.mean( getDVvalues( data, qrels, tests, pre_correctness, 'Low', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, post_correctness, 'Low', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, diff_correctness, 'Low', ['1','2'] ) )
]
means_med = [
100 * np.mean( getDVvalues( data, qrels, tests, pre_correctness, 'Med', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, post_correctness, 'Med', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, diff_correctness, 'Med', ['1','2'] ) )
]
means_high = [
100 * np.mean( getDVvalues( data, qrels, tests, pre_correctness, 'High', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, post_correctness, 'High', ['1','2'] ) ),
100 * np.mean( getDVvalues( data, qrels, tests, diff_correctness, 'High', ['1','2'] ) )
]
sem_low = [
100 * stats.sem( getDVvalues( data, qrels, tests, pre_correctness, 'Low', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, post_correctness, 'Low', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, diff_correctness, 'Low', ['1','2'] ) )
]
sem_med = [
100 * stats.sem( getDVvalues( data, qrels, tests, pre_correctness, 'Med', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, post_correctness, 'Med', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, diff_correctness, 'Med', ['1','2'] ) )
]
sem_high = [
100 * stats.sem( getDVvalues( data, qrels, tests, pre_correctness, 'High', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, post_correctness, 'High', ['1','2'] ) ),
100 * stats.sem( getDVvalues( data, qrels, tests, diff_correctness, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,100])
plt.gca().set_yticklabels(['%d%%'%x for x in plt.gca().get_yticks()])
ax.set_title('Factual Question: Correct Rate')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper right')
ax.bar_label(rects1, padding=3, fmt='%.1f%%')
ax.bar_label(rects2, padding=3, fmt='%.1f%%')
ax.bar_label(rects3, padding=3, fmt='%.1f%%')
plt.savefig( os.path.join(workdir, 'factq_correct.png'), dpi=300 )
plt.show()
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
labels = [ 'Pre-task ($p=0.091$)', 'Post-task **', '$\Delta$(Pre-task, Post-task) **' ]
means_low = [
np.mean( getDVvalues( data, qrels, tests, pre_deviation, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, post_deviation, 'Low', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, diff_deviation, 'Low', ['1','2'] ) )
]
means_med = [
np.mean( getDVvalues( data, qrels, tests, pre_deviation, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, post_deviation, 'Med', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, diff_deviation, 'Med', ['1','2'] ) )
]
means_high = [
np.mean( getDVvalues( data, qrels, tests, pre_deviation, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, post_deviation, 'High', ['1','2'] ) ),
np.mean( getDVvalues( data, qrels, tests, diff_deviation, 'High', ['1','2'] ) )
]
sem_low = [
stats.sem( getDVvalues( data, qrels, tests, pre_deviation, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, post_deviation, 'Low', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, diff_deviation, 'Low', ['1','2'] ) )
]
sem_med = [
stats.sem( getDVvalues( data, qrels, tests, pre_deviation, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, post_deviation, 'Med', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, diff_deviation, 'Med', ['1','2'] ) )
]
sem_high = [
stats.sem( getDVvalues( data, qrels, tests, pre_deviation, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, post_deviation, 'High', ['1','2'] ) ),
stats.sem( getDVvalues( data, qrels, tests, diff_deviation, 'High', ['1','2'] ) )
]
x = np.arange( len(labels) )
fig, ax = plt.subplots( 1, 1, figsize=(7,3), dpi=300 )
rects1 = ax.bar( x - 0.25, means_low, 0.23, label='Low', color=(0.8, 0.8, 0.8), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_low )
rects2 = ax.bar( x , means_med, 0.23, label='Med', color=(0.6, 0.6, 0.6), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_med )
rects3 = ax.bar( x + 0.25, means_high, 0.23, label='High', color=(0.4, 0.4, 0.4), edgecolor=(0.1,0.1,0.1), capsize=5, yerr=sem_high )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylim([0,2])
# ax.set_ylabel('Scores')
ax.set_title('Factual Question: Deviation from Correct Answer')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper right')
ax.bar_label(rects1, padding=3, fmt='%.2f')
ax.bar_label(rects2, padding=3, fmt='%.2f')
ax.bar_label(rects3, padding=3, fmt='%.2f')
plt.savefig( os.path.join(workdir, 'factq_dev.png'), dpi=300 )
plt.show()
###Output
_____no_output_____
###Markdown
Data Analysis
###Code
df
###Output
_____no_output_____
###Markdown
Size of the dataset is quite small, and neural networks tend to overfit on small datasets. While a small MLP would perhaps fare ok, I prefer using XGBoost due to the tabular nature of the data. Idea 1: Create an extra column from the car's manufacturer We are going to assume that the word of the `car name` column is the manufacturer. This will give us another datapoint. While the physical characteristics like weight and horsepower should be much more indicative of the value of the target variable, one could assume that each manufacturer has their own proprietary technology that might reduce the mpg or something along those lines. Under the mantra of **_more data cannot hurt_**, we will perform an with and without these added feature.
###Code
df_mfct = df.copy()
df_mfct['mfct'] = df_mfct['car name'].transform(lambda x: x.split(' ')[0])
df_mfct
Counter(df_mfct.mfct), f'NUMBER OF MANUFACTURERS: {len(set(Counter(df_mfct.mfct)))}'
###Output
_____no_output_____
###Markdown
Our assumption seems to hold pretty well! Although there is some minor data noise: `volkswagen` and `vw` likely refer to the same manufacturer. Same goes with `mercedes-benz` and `mercedes` or `maxda` and `mazda` (cute typo!) or `toyota` and `toyouta`. Let's quickly solve this issue.
###Code
replacement_dict = {
'vw': 'volkswagen',
'vokswagen': 'volkswagen',
'mercedes': 'mercedes-benz',
'maxda': 'mazda',
'toyouta': 'toyota',
'chevroelt': 'chevrolet',
'chevy': 'chevrolet',
'capri': 'ford'
}
df_mfct['mfct'] = df_mfct['mfct'].transform(lambda x: replacement_dict[x] if x in replacement_dict else x)
Counter(df_mfct.mfct)
###Output
_____no_output_____
###Markdown
I will quickly drop exactly 1 row for which I cannot find any extra information out in the wild. It is for the greater good and won't hurt us too much.
###Code
df_mfct = df_mfct[df_mfct['mfct'] != 'hi']
###Output
_____no_output_____
###Markdown
Idea 2: Use the geographical position of the producer as another data point I will admit, this might inch towards overthinking. But different manufacturers design cars for different consumer realities e.g. USA based producers might not have fuel efficiency i.e. mpg in mind when building a car since gasoline is quite cheap in the country. Let us introduce a new categorical feature for where the manufacturer is located. On the other hand, big manufacturers tend to ship worlwide + the dataset seems to be compiled on USA-based cars from Kaggle's dataset metadata. This feature might be thoroughly useless, but investigating is worth it. We will look the data on the internet for this task. Nota bene: The origin feature might be doing this already. It is categorically encoded, with a domain of [1, 2, 3]. These might be the USA, Europe, Asia i.e. continents of origin, but the metadata says nothign about this. We will proceed with both, although some duplication might be possible.
###Code
geo_dict = {
'chevrolet': 'USA',
'buick': 'USA',
'plymouth': 'USA',
'amc': 'USA',
'ford': 'USA',
'pontiac': 'USA',
'dodge': 'USA',
'toyota': 'Japan',
'datsun': 'Japan',
'volkswagen': 'Germany',
'peugeot': 'France',
'audi': 'Germany',
'saab': 'Sweden',
'bmw': 'Germany',
'mercury': 'USA',
'opel': 'Germany',
'fiat': 'Italy',
'oldsmobile': 'USA',
'chrysler': 'USA',
'mazda': 'Japan',
'volvo': 'Sweden',
'renault': 'France',
'honda': 'Japan',
'subaru': 'Japan',
'mercedes-benz': 'Germany',
'cadillac': 'USA',
'triumph': 'UK',
'nissan': 'Japan'
}
###Output
_____no_output_____
###Markdown
Pretty nice! Most companies are clustered around `USA`, `Japan` and `Germany`, which raises my hopes
###Code
df_mfct_geo = df_mfct.copy()
df_mfct_geo['geo'] = df_mfct_geo['mfct'].apply(lambda x: geo_dict[x].lower())
df_mfct_geo
###Output
_____no_output_____
###Markdown
Idea 3: Change model year XX to XXXX representation It seems the model year implicitly assumes the 20th century, and thus values are 19XX. Again, this is paranoid me, but I'd prefer to use the full representation, in case someone tries to use this model in the 21st century, thus making predictions more robust. Also, I find the use of whitespace in column names deeply offensive.
###Code
df_mfct_geo_year = df_mfct_geo.copy()
df_mfct_geo_year['model_year'] = df_mfct_geo_year['model year'].apply(lambda x: int(f"19{x}"))
del df_mfct_geo_year['model year']
df_mfct_geo_year
###Output
_____no_output_____
###Markdown
Preparing the data Dealing with horsepower missing values 6 rows have a missing rows value. The rows are marked with '?', turning the whole column into a string column. We replace '?' with NaN, turn the column into floats, and use a linear interpolation provided by pandas. **Update: interpolating increases MAE, just drop the rows**
###Code
df_mfct_geo_year_hp = df_mfct_geo_year.copy()
# df_mfct_geo_year_hp['horsepower'] = df_mfct_geo_year_hp['horsepower'].apply(lambda x: int(x) if x != '?' else np.nan)
# df_mfct_geo_year_hp['horsepower'] = df_mfct_geo_year_hp['horsepower'].interpolate()
###Output
_____no_output_____
###Markdown
Hot encoding the `geo` feature and the `mfct` feature
###Code
mfct_ohe = pd.get_dummies(df_mfct_geo_year['mfct'], dummy_na=True)
mfct_ohe = mfct_ohe.rename(columns={np.nan: 'mfct_nan'})
mfct_ohe
geo_ohe = pd.get_dummies(df_mfct_geo_year['geo'], dummy_na=True)
geo_ohe = geo_ohe.rename(columns={np.nan: 'geo_nan'})
geo_ohe
###Output
_____no_output_____
###Markdown
Putting it all together Let's append the OHE columns together and drop the ones that they are replacing
###Code
df_concat = pd.concat([df_mfct_geo_year_hp, mfct_ohe, geo_ohe], axis=1)
df_concat
df_final = df_concat.drop(columns=['car name', 'mfct', 'geo'])
# for col in ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model_year']:
# scaler = StandardScaler()
# df_final[col] = scaler.fit_transform(df_final[col].values.reshape(-1, 1))
df_final
y, X = df_final[['mpg']].to_numpy().reshape(-1, ), df_final.drop(columns=['mpg']).to_numpy()
y.shape, X.shape
###Output
_____no_output_____
###Markdown
Model evaluation We will use cross validation and the MEA to evaluate the model. We will test the effect of the hypotheses mentioned above regarding features. We should be careful about setting all random states in cross_validation and models to fixed values in order to get reproducible results. First hypothesis - enhanced columns
###Code
model = XGBRegressor(seed=42, random_state=42, colsample_bytree=1)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
scores = np.absolute(scores)
print('Mean MAE %.3f STD MAE %.3f' % (scores.mean(), scores.std()) )
###Output
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
/Users/bratu/Desktop/dsp/venv/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.
from pandas import MultiIndex, Int64Index
###Markdown
Second hypothesis - plain columns By plain columns we understand columns that do not carry any information about manufacturer or geolocation.
###Code
list(geo_ohe.columns)
df_final_second_h = df_final.drop(columns=list(geo_ohe.columns) + list(mfct_ohe.columns))
df_final_second_h
y_sec_hyp, X_sec_hyp = df_final_second_h[['mpg']].to_numpy().reshape(-1, ), df_final_second_h.drop(columns=['mpg']).to_numpy()
y_sec_hyp.shape, X_sec_hyp.shape
model_sec_hyp = XGBRegressor(seed=42, random_state=42, colsample_bytree=1)
cv_sec_hyp = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42)
scores_sec_hyp = cross_val_score(model_sec_hyp, X_sec_hyp, y_sec_hyp, scoring='neg_mean_absolute_error', cv=cv_sec_hyp, n_jobs=-1)
scores_sec_hyp = np.absolute(scores_sec_hyp)
print('Mean MAE %.3f STD MAE %.3f' % (scores_sec_hyp.mean(), scores_sec_hyp.std()) )
###Output
Mean MAE 2.049 STD MAE 0.298
###Markdown
Third hypothesis - plain columns minus origins Quick test to determine wether the origin column is relevant. It might encode all the geoencode information I was speculating above.
###Code
df_final_third_h = df_final.drop(columns=list(geo_ohe.columns) + list(mfct_ohe.columns) + ['origin'])
df_final_third_h
y_third_hyp, X_third_hyp = df_final_third_h[['mpg']].to_numpy().reshape(-1, ), df_final_third_h.drop(columns=['mpg']).to_numpy()
y_third_hyp.shape, X_third_hyp.shape
model_third_hyp = XGBRegressor(seed=42, random_state=42, colsample_bytree=1)
cv_third_hyp = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42)
scores_third_hyp = cross_val_score(model_third_hyp, X_sec_hyp, y_sec_hyp, scoring='neg_mean_absolute_error', cv=cv_third_hyp, n_jobs=-1)
scores_third_hyp = np.absolute(scores_third_hyp)
print('Mean MAE %.3f STD MAE %.3f' % (scores_third_hyp.mean(), scores_third_hyp.std()) )
###Output
Mean MAE 2.049 STD MAE 0.298
###Markdown
Fourth hypothesis: Only the `manufacturer` enhanced column matters
###Code
df_final_fourth_h = df_final.drop(columns=list(geo_ohe.columns))
df_final_fourth_h
y_fourth_h, X_fourth_h = df_final_fourth_h[['mpg']].to_numpy().reshape(-1, ), df_final_fourth_h.drop(columns=['mpg']).to_numpy()
y_fourth_h.shape, X_fourth_h.shape
model_fourth_hyp = XGBRegressor(seed=42, random_state=42, colsample_bytree=1)
cv_fourth_hyp = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42)
scores_fourth_hyp = cross_val_score(model_fourth_hyp, X_fourth_h, y_fourth_h, scoring='neg_mean_absolute_error', cv=cv_fourth_hyp, n_jobs=-1)
scores_fourth_hyp = np.absolute(scores_fourth_hyp)
print('Mean MAE %.3f STD MAE %.3f' % (scores_fourth_hyp.mean(), scores_fourth_hyp.std()) )
###Output
Mean MAE 2.013 STD MAE 0.314
###Markdown
Fifth hypothesis: Only the `geo` enhanced column matters
###Code
df_final_fifth_h = df_final.drop(columns=list(list(mfct_ohe.columns)))
df_final_fifth_h
y_fifth_h, X_fifth_h = df_final_fifth_h[['mpg']].to_numpy().reshape(-1, ), df_final_fifth_h.drop(columns=['mpg']).to_numpy()
y_fifth_h.shape, X_fifth_h.shape
model_fifth_hyp = XGBRegressor(seed=42, random_state=42, colsample_bytree=1)
cv_fifth_hyp = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42)
scores_fifth_hyp = cross_val_score(model_fifth_hyp, X_fifth_h, y_fifth_h, scoring='neg_mean_absolute_error', cv=cv_fifth_hyp, n_jobs=-1)
scores_fifth_hyp = np.absolute(scores_fifth_hyp)
print('Mean MAE %.3f STD MAE %.3f' % (scores_fifth_hyp.mean(), scores_fifth_hyp.std()) )
###Output
Mean MAE 2.021 STD MAE 0.303
###Markdown
Conclusion We conclude that our enhanced columns indeed improve the performance of the model (hypotheses 1 + 4). Adding only the manufacturer feature fares a better (4) than adding both, although scores are quite close, and 1 has lower variation. These things considered, we will move with the engineered manufacturer feature into training, where we will leverage Bayesian search for hyperparameter optimization.
###Code
# with open('best.csv', 'w+') as fp:
# df_final_fourth_h.to_csv(fp)
###Output
_____no_output_____
###Markdown
__XDF latency analysis of LSL data streams: Unity (triggered) vs EEG (measured)__ __Situation__ Every 500ms a beep sound is played and the background color changes one frame from black to white. __Unity (90 FPS):__- Color change (black or white background)- Beep sound (audio playing or not) __EEG (1024 Hz):__- Photodiode (light sensor)- Microphone (audio sensor) __TODO__* [x] Read XDF file and header and select the right data (timestamps and values)* [x] Compute the timestamps from 0* [x] Visualize the data: unity audio vs microphone and unity color vs photodiode* [x] Compare the timestamps (length, duration, sample count..): Original vs Calculated vs FileInfo* [x] Descriptive statistics of timestamps distribution and plot* [x] Actual latency test: select the microphone and photodiode peaks (starting points) and compare with the unity ones* [x] Test all recordings* [x] Make and test long recordings (half an hour) and check with two computers (local network setup)* [ ] Find out why sometimes Unity timestamps start before the EEG ones* [ ] Find out why sometimes there are two Diode spikes during one colour change* [ ] ... __Dependencies__
###Code
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pyxdf
from scipy.signal import find_peaks
import seaborn as sns
###Output
_____no_output_____
###Markdown
__Files (recordings)__
###Code
files = os.listdir("data") # get all files from the folder "data"
files.sort() # sort them alphabetically
recordings = []
for file in files:
if file.startswith("."): # filter hidden/config files
files.remove(file) # remove hidden/config file
for i, file in enumerate(files): # store and display all files
recordings.append(file)
print(f"recordings[{i}] = {file}")
###Output
recordings[0] = final_test.xdf
recordings[1] = ftest1.xdf
recordings[2] = ftest2.xdf
recordings[3] = ftest3.xdf
recordings[4] = ftest_build1.xdf
recordings[5] = ftest_build2.xdf
recordings[6] = ftest_build3.xdf
recordings[7] = ftest_lsl12.xdf
recordings[8] = long2.xdf
recordings[9] = long3.xdf
recordings[10] = long4.xdf
recordings[11] = short_new.xdf
recordings[12] = short_test.xdf
recordings[13] = short_test_old1.xdf
recordings[14] = test.xdf
###Markdown
__Helper functions__
###Code
a_ch_name = "Audio"
c_ch_name = "Diode"
e_ch_name = "openvibeSignal"
def select_streams(data):
global s_channels
s_channels = {data[i]["info"]["name"][0]: i for i in range(len(data))}
# Time values
a = s_channels[a_ch_name] # unity audio stream channel
c = s_channels[c_ch_name] # unity color stream channel
e = s_channels[e_ch_name] # eeg stream channel (diode and microphone)
return a, c, e
###Output
_____no_output_____
###Markdown
__Checking if EEG data was received before Unity data for all recordings__
###Code
print("EEG received first (✔/✗):")
for file in recordings: # check all files
streams, fileheader = pyxdf.load_xdf(f"data/{file}") # load a XDF file
a_ch, c_ch, e_ch = select_streams(streams) # select the data stream channels
a_t = streams[a_ch]["time_stamps"][0] # get the first unity timestamp
e_t = streams[e_ch]["time_stamps"][0] # get the first eeg timestamp
if a_t - e_t < 0: # unity received first (negative difference)
print(f"✗ {file}")
else: # eeg received first (positive difference)
print(f"✔ {file}")
###Output
EEG received first (✔/✗):
✔ final_test.xdf
✔ ftest1.xdf
✔ ftest2.xdf
✔ ftest3.xdf
✔ ftest_build1.xdf
✔ ftest_build2.xdf
✔ ftest_build3.xdf
✔ ftest_lsl12.xdf
✔ long2.xdf
✗ long3.xdf
✔ long4.xdf
✗ short_new.xdf
✔ short_test.xdf
✔ short_test_old1.xdf
✔ test.xdf
###Markdown
__Read XDF data__
###Code
file = recordings[11] # select a file
print(f"File: {file}") # display the file name
streams, fileheader = pyxdf.load_xdf(f"data/{file}") # load the XDF file
fileheader # just a dict describing the version and format of the XDF file
###Output
File: short_new.xdf
###Markdown
__Automatically select the stream channels__
###Code
a_ch, c_ch, e_ch = select_streams(streams)
s_channels
###Output
_____no_output_____
###Markdown
__Read EEG and Unity timestamps and sensor data__
###Code
u_ts = streams[a_ch]["time_stamps"] # unity timestamps
e_ts = streams[e_ch]["time_stamps"] # eeg timestamps
# Diode values
eeg = np.transpose(streams[e_ch]["time_series"])
# select the photodiode and microphone sensor information
# there's recordings with diode data on channels 65 and 66
# so we check which is the right one for this recording
if max(eeg[64]) != 0.0:
e_color = eeg[64] # channel 65 of the ANT amplifier
else:
e_color = eeg[65] # channel 66 of the ANT amplifier
e_audio = eeg[69] # channel 70 of the ANT amplifier
# select unity audio and background color change markers
# format: [currentFrame, value, timestamp]
u_color = np.transpose(streams[c_ch]["time_series"])
u_audio = np.transpose(streams[a_ch]["time_series"])
e_color = -e_color # invert diode data polarity, easier to visualize
###Output
_____no_output_____
###Markdown
__Preprocess data: calculate meaningful timestamps__
###Code
# calculate time values for unity and eeg from 0
e_time = [0]
length = len(e_ts)
[e_time.append(e_ts[i + 1] - e_ts[0]) for i in range(length) if i < length - 1]
u_time = [0]
length = len(u_ts)
[u_time.append(u_ts[i + 1] - u_ts[0]) for i in range(length) if i < length - 1]
# calculate the diff and shift the values left (negative) or right (positive)
diff = u_ts[0] - e_ts[0]
u_time = [i + diff for i in u_time]
# if diff is negative unity data was received before eeg
if diff < 0:
print("Unity data received first ✗")
if diff < -0.98: #so if the difference cannot be explained by normal EEG sampling
print("Something went wrong with this recording")
else:
print("EEG data received first ✔")
###Output
Unity data received first ✗
###Markdown
__Data preview__
###Code
# interactive: widget, not interactive: inline
%matplotlib inline
sns.set(rc={"figure.figsize": (14, 5)}) # set figure size
sns.set_style("darkgrid") # set seaborn plotting style
f_n = -0.2 # starting point (s)
s_n = 0.1 # ending point (s)
start_e = 1024 * f_n # eeg sampling rate = 1024
start_u = 90 * f_n # unity sampling rate = 90
five_sec = 1024 * s_n # N of eeg in 5 s
f_sec = 90 * s_n # N of unity in 5 s
u_height = 3500 # factor to improve unity (true/1) values visualization
e_t = np.array(e_time)
u_t = np.array(u_time)
# select range of timestamps, diode and microphone values (eeg)
e_time_selection = e_t[(e_t > f_n) & (e_t < s_n)]
e_color_selection = e_color[(e_t > f_n) & (e_t < s_n)]
e_audio_selection = e_audio[(e_t > f_n) & (e_t < s_n)]
# select a range of timestamps, color and audio values (unity)
u_time_selection = u_t[(u_t > f_n) & (u_t < s_n)]
u_color_selection = u_color[(u_t > f_n) & (u_t < s_n)]
u_audio_selection = u_audio[1][(u_t > f_n) & (u_t < s_n)]
# plot the selected range to compare eeg vs unity values
plt.plot(e_time_selection, e_color_selection * 0.05)
plt.plot(e_time_selection, e_audio_selection)
plt.plot(u_time_selection, u_color_selection * u_height, marker="o")
plt.plot(u_time_selection, u_audio_selection * u_height, marker="x")
plt.title(f"Sample: N = {five_sec}")
plt.ylabel("Sensor value")
plt.xlabel("Time (s)")
plt.xticks(np.arange(f_n, s_n, step=0.5))
labels = ["photosensor", "microphone", "color", "audio"]
plt.legend(labels, loc="upper right") # set the legend
plt.show()
###Output
_____no_output_____
###Markdown
__Timestamps comparison (original vs computed vs file info)__
###Code
# store unity and eeg timestamps as pandas series
# dataframe is not needed since it's 1D array
eeg_t = pd.Series(streams[e_ch]["time_stamps"])
unity_t = pd.Series(streams[a_ch]["time_stamps"])
print("Original timestamps")
print("===================")
u_start = u_ts[0]
u_end = u_ts[-1]
e_start = e_ts[0]
e_end = e_ts[-1]
u_length = u_end - u_start
e_length = e_end - e_start
print(f"EEG first timestamp: {e_start}")
print(f"EEG last timestamp: {e_end}")
print(f"EEG length: {e_length}")
print(f"EEG sample count: {len(e_ts)}")
print(f"Unity first timestamp: {u_start}")
print(f"Unity last timestamp: {u_end}")
print(f"Unity length: {u_length}")
print(f"Unity sample count: {len(u_ts)}")
print(f"Start difference: {abs(u_start - e_start)}")
print(f"Length difference: {abs(u_length - e_length)}")
print("")
print("Computed timestamps")
print("====================")
u_start = u_time[0]
# [-1:] returns the index and the type as well but [-1:].values[0] also works
u_end = u_time[-1]
e_start = e_time[0]
e_end = e_time[-1]
u_length = u_end - u_start
e_length = e_end - e_start
print(f"EEG first timestamp: {e_start}")
print(f"EEG last timestamp: {e_end}")
print(f"EEG length: {e_length}")
print(f"EEG sample count: {len(e_time)}")
print(f"Unity first timestamp: {u_start}")
print(f"Unity last timestamp: {u_end}")
print(f"Unity length: {u_length}")
print(f"Unity sample count: {len(u_time)}")
print(f"Start difference: {abs(u_start - e_start)}")
print(f"Length difference: {abs(u_length - e_length)}")
print("")
print("File info")
print("========")
e_info = streams[e_ch]["info"]
e_footer = streams[e_ch]["footer"]["info"]
u_info = streams[a_ch]["info"]
u_footer = streams[a_ch]["footer"]["info"]
print(f"EEG stream created at: {e_info['created_at'][0]}")
print(f"Unity stream created at: {u_info['created_at'][0]}")
print(f"EEG first timestamp: {e_footer['first_timestamp'][0]}")
print(f"EEG last timestamp: {e_footer['last_timestamp'][0]}")
print(f"EEG sample count: {e_footer['sample_count'][0]}")
print(f"Unity first timestamp: {u_footer['first_timestamp'][0]}")
print(f"Unity last timestamp: {u_footer['last_timestamp'][0]}")
print(f"Unity sample count: {u_footer['sample_count'][0]}")
###Output
Original timestamps
===================
EEG first timestamp: 2896.829572491063
EEG last timestamp: 2961.329951709267
EEG length: 64.50037921820376
EEG sample count: 66048
Unity first timestamp: 2896.824495070672
Unity last timestamp: 2961.3178294674435
Unity length: 64.49333439677139
Unity sample count: 5806
Start difference: 0.005077420390989573
Length difference: 0.007044821432373283
Computed timestamps
====================
EEG first timestamp: 0
EEG last timestamp: 64.50037921820376
EEG length: 64.50037921820376
EEG sample count: 66048
Unity first timestamp: -0.005077420390989573
Unity last timestamp: 64.4882569763804
Unity length: 64.49333439677139
Unity sample count: 5806
Start difference: 0.005077420390989573
Length difference: 0.007044821432373283
File info
========
EEG stream created at: 2865.822203900000
Unity stream created at: 74999.42783250001
EEG first timestamp: 2896.8412703
EEG last timestamp: 2961.3132359
EEG sample count: 66047
Unity first timestamp: 75012.3227705
Unity last timestamp: 75076.8157208
Unity sample count: 5805
###Markdown
__Descriptive statistics: EEG timestamps__
###Code
e_time_dist = [e_ts[i + 1] - e_ts[i] for i in range(len(e_ts) - 1)]
u_time_dist = [u_ts[i + 1] - u_ts[i] for i in range(len(u_ts) - 1)]
e_time_dist = pd.DataFrame(np.array(e_time_dist), columns=["eeg"])
u_time_dist = pd.DataFrame(np.array(u_time_dist), columns=["unity"])
e_time_dist.describe()
###Output
_____no_output_____
###Markdown
The EEG samples look really constant over time __Descriptive statistics: Unity timestamps__
###Code
u_time_dist.describe()
###Output
_____no_output_____
###Markdown
It does not seem the case for the unity samples __Time sampling plot comparison__
###Code
%matplotlib inline
sns.set(rc={"figure.figsize": (3, 9)}) # set figure size
sns.set_style("whitegrid") # set seaborn plotting style
p = sns.boxplot(x=u_time_dist, orient="v")
p.set_title("Time distribution (s)")
plt.show()
###Output
_____no_output_____
###Markdown
__Calculating the Latencies__ __Diode__
###Code
# get all the first peaks of each of the four recordings
e_col_peaks = find_peaks(e_color, height=10000, distance=400)
# here the len of unity is one longer than the len of
u_col_peaks = find_peaks(u_color)
# since we are only intersted in the position of the peaks not the height, lets only take the first column
ec_peak = e_col_peaks[0]
uc_peak = u_col_peaks[0]
# now we have the column where the peak occurs, now we need the corresponding time stamp
ec_time = [e_time[e] for e in ec_peak]
uc_time = [u_time[e] for e in uc_peak]
# calculate the differneces between EEG and unity
c_diff = np.empty(len(uc_time))
c_diff[:] = np.nan
c_diff = []
length = len(uc_time)
# to make sure we do not start with j = 0 if EEG starts before Unity
if np.array(uc_time)[0] > 0.25:
j = 1
else:
j = 0
for i in range(length):
if (uc_time[i] - ec_time[j] > -0.25) and (uc_time[i] - ec_time[j] < 0):
# add the difference between EEG and unity peak
c_diff.append(uc_time[i] - ec_time[j])
if j < len(ec_time):
j = j + 1
else:
# add nan if there is no EEG peak
c_diff.append(np.nan)
# check the nan values (and compare them to the graph)
nan_val = []
# get the indices of all nan values so we can check if there a diode is actually missing
nan_val.append(np.argwhere(np.isnan(c_diff)))
n = np.ravel(nan_val) # to make it look nicer
# contains the untiy timestamps when the diode is missing --> to check in graph
time_st = np.array(uc_time)[np.array(n)]
print(time_st)
###Output
[0.36199287]
###Markdown
__Speaker__
###Code
# get all the first peaks of each of the four recordings
e_audio_peaks = find_peaks(e_audio, height=2100, distance=400)
# here the len of unity is one longer than the len of
u_audio_peaks = find_peaks(u_audio[1])
# since we are only intersted in the position of the peaks not the height, lets only take the first column
ea_peak = e_audio_peaks[0]
ua_peak = u_audio_peaks[0]
# now we have the column where the peak occurs, now we need the corresponding time stamp
ea_time = [e_time[e] for e in ea_peak]
ua_time = [u_time[e] for e in ua_peak]
# calculate the differneces between EEG and unity
a_diff = []
length = len(ua_time)
# to make sure we do not start with j = 0 if EEG starts before Unity
if np.array(uc_time)[0] > 0.25:
j = 1
else:
j = 0
for i in range(length):
if (ua_time[i] - ea_time[j] > -0.3) and (ua_time[i] - ea_time[j] < 0):
# print(uc_time[i] - ec_time[j])
a_diff.append(ua_time[i] - ea_time[j])
if j < len(ea_time):
j = j + 1
else:
a_diff.append(np.nan)
nan_val = []
# get the indices of all nan values so we can check if there a diode is actually missing
nan_val.append(np.argwhere(np.isnan(a_diff)))
n = np.ravel(nan_val) # to make it look nicer
time_st = np.array(ua_time)[np.array(n)] # contains the untiy timestamps when the diode is missing --> to check in graph
print(time_st)
###Output
[]
###Markdown
__Data Preview__
###Code
# interactive: widget, not interactive: inline
%matplotlib inline
sns.set(rc={"figure.figsize": (14, 5)}) # set figure size
sns.set_style("darkgrid") # set seaborn plotting style
f_n = 0.2 # starting point (s)
s_n = 0.5 # ending point (s)
start_e = 1024 * f_n # eeg sampling rate = 1024
start_u = 90 * f_n # unity sampling rate = 90
five_sec = 1024 * s_n # N of eeg in 5 s
f_sec = 90 * s_n # N of unity in 5 s
u_height = 3500 # factor to improve unity (true/1) values visualization
e_t = np.array(e_time)
u_t = np.array(u_time)
# select range of timestamps, diode and microphone values (eeg)
e_time_selection = e_t[(e_t > f_n) & (e_t < s_n)]
e_color_selection = e_color[(e_t > f_n) & (e_t < s_n)]
e_audio_selection = e_audio[(e_t > f_n) & (e_t < s_n)]
# select a range of timestamps, color and audio values (unity)
u_time_selection = u_t[(u_t > f_n) & (u_t < s_n)]
u_color_selection = u_color[(u_t > f_n) & (u_t < s_n)]
u_audio_selection = u_audio[1][(u_t > f_n) & (u_t < s_n)]
# plot the selected range to compare eeg vs unity values
plt.plot(e_time_selection, e_color_selection * 0.05)
plt.plot(e_time_selection, e_audio_selection)
plt.plot(u_time_selection, u_color_selection * u_height, marker="o")
plt.plot(u_time_selection, u_audio_selection * u_height, marker="x")
plt.title(f"Sample: N = {five_sec}")
plt.ylabel("Sensor value")
plt.xlabel("Time (s)")
plt.xticks(np.arange(f_n, s_n, step=0.5))
labels = ["photosensor", "microphone", "color", "audio"]
plt.legend(labels, loc="upper right") # set the legend
plt.show()
###Output
_____no_output_____
###Markdown
__Descriptive Statistics__
###Code
# Descriptive Statistics of colour peak diff
c_diff_data = pd.DataFrame(c_diff)
c_diff_data.describe()
###Output
_____no_output_____
###Markdown
* ftest1: -0.080 till -0.073* ftest2: -0.078 till -0.073* ftest3: -0.080 till -0.074* test: -0.100 till -0.072* ftest_build1: -0.077 till -0.074* ftest_build2: -0.080 till -0.074* ftest_build3: -0.080 till -0.074* ftest_lsl12: -* final test: -0.076 till -0.074
###Code
# Descriptive Statistics of audio peak diff
a_diff_data = pd.DataFrame(a_diff)
a_diff_data.describe()
###Output
_____no_output_____
###Markdown
Analysis of neural tangent kernel performanceGiven the pre-generated neural tangent kernel (NTK) output from the main code (by default in the directory `'./kernel_output'`), we examine the classification performance on the MNIST dataset of the exact, sparsified, and diagonal NTKs. Additionally, for the quantum algorithms of sparsified and diagonal NTKs, the condition number and the number of measurements required for post-selection/readout are verified to be bounded by $O(\log n)$.
###Code
import numpy as np
import glob
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'svg')
import matplotlib
import seaborn as sns
sns.set(font_scale=1.3)
sns.set_style("whitegrid", {"axes.facecolor": ".97"})
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Sparsity pattern First, a sparsity pattern is constructed in $\tilde O(n)$ time. In the proposed quantum algorithm, this is performed once when the data is stored in a binary QRAM data structure (also in $\tilde O(n)$ time). Given a sparsity pattern with at most $s = O(\log n)$ nonzero elements in any row or column, multiple neural networks (of different architectures) can be efficiently trained in logarithmic time using the same sparsity pattern.
###Code
def get_target_sparsity(m):
"""
Get expected matrix sparsity, chosen to be O(log n).
"""
return np.log(m.shape[0])
def block_diagonal(m):
"""
Prepare a block diagonal matrix [[1, 0], [0, 1]] corresponding to the two data classes
in the NTK matrix.
"""
class_size = m.shape[0]//2
ones_class = np.ones((class_size, class_size))
zeros_class = np.zeros((class_size, class_size))
class_0 = np.block([[ones_class, zeros_class], [zeros_class, zeros_class]])
class_1 = np.block([[zeros_class, zeros_class], [zeros_class, ones_class]])
return class_0, class_1
def get_sparsity_pattern(m):
"""
Prepare in O(n log n) time a sparsity pattern over the n x n matrix with a
pseudorandom generator.
"""
target_sparsity = get_target_sparsity(m)
# procedure produces an equivalent distribution of 1s and 0s as sampling individual
# matrix elements i.i.d. from binomial distribution
# since we'll take half of the generated indices, we set the probability of a nonzero
# element to be double the target sparsity
p_one = min(2*target_sparsity/m.shape[0], 1.0)
# for each row, sample the binomial distribution to get the number of nonzero indices
# matches in expectation get_target_sparsity(m), i.e. O(log n)
# reference the upper triangular indices according to the lower triangular indices
# can be done efficiently by mapping indices instead of copying matrix elements
one_filter = np.zeros(m.shape)
for i in range(m.shape[0]):
# find O(log n) indices
num_nonzero = np.random.randint(m.shape[0],
size=np.random.binomial(m.shape[0], p_one))
one_filter[i][num_nonzero] = 1
one_filter = np.tril(one_filter) + np.tril(one_filter, -1).T
# set all NTK matrix elements from opposite classes to be zero
# since the NTK is larger for more similar data examples, this biases the sparse
# matrix towards selecting more important examples
class_0, class_1 = block_diagonal(m)
one_filter = one_filter * (class_0 + class_1)
# make sure the diagonal is ones
np.fill_diagonal(one_filter, 1)
return one_filter
def sparsify_unbiased(m, sparsity_pattern):
"""
Sparsify NTK matrix `m` using a given sparsity pattern.
Used for the fully-connected network.
"""
return m * sparsity_pattern
def sparsify_biased(m, sparsity_pattern, t0, t1):
"""
Sparsify NTK matrix `m` using a given sparsity pattern, then additionally sparsify by
setting elements below `t0` and `t1` in classes 0 and 1 respectively to 0.
Used for the convolutional network.
"""
class_0, class_1 = block_diagonal(m)
one_filter = sparsity_pattern * ((m > t0) * class_0 + (m > t1) * class_1)
np.fill_diagonal(one_filter, 1)
kernel_train_sparse = m * one_filter
# we expect a factor of ~target_sparsity by Gershgorin's theorem
# empirically, the well-conditioning of the kernel makes it scale better than this
f = 0.76 * get_target_sparsity(m)**0.9
conditioning = f * np.diag(kernel_train_sparse)*np.eye(kernel_train_sparse.shape[0])
kernel_train_conditioned = kernel_train_sparse + conditioning
return kernel_train_conditioned
def compute_class_percentiles(m, percentile):
"""
Compute the truncation thresholds for `sparsify_biased`. This is evaluated over a
small subset (n = 16) of the training set to efficiently bias the sparsification
towards large off-diagonal elements.
"""
class_size = m.shape[0]//2
ones_class = np.ones((class_size, class_size))
zeros_class = np.zeros((class_size, class_size))
class_0 = np.block([[ones_class - np.eye(class_size), zeros_class],
[zeros_class, zeros_class]])
class_1 = np.block([[zeros_class, zeros_class],
[zeros_class, ones_class - np.eye(class_size)]])
t0 = np.percentile(np.abs(m * class_0), percentile)
t1 = np.percentile(np.abs(m * class_1), percentile)
return t0, t1
def get_sparsity(m):
"""
Get maximum number of nonzero elements in any row or column.
"""
return np.amax(np.sum(m != 0, axis=0))
###Output
_____no_output_____
###Markdown
We verify that the sparsity pattern does indeed scale like $O(\log n)$.
###Code
Ns = [16, 32, 64, 128, 256, 512]
sparsity_trials = 100
sparsities = np.zeros(len(Ns))
sparsities_std = np.zeros(len(Ns))
for i in range(len(Ns)):
N = Ns[i]
sparsities_N = []
for t in range(sparsity_trials):
sparsity_pattern = get_sparsity_pattern(np.zeros((N, N)))
s = get_sparsity(sparsity_pattern)
sparsities_N.append(s)
sparsities[i] = np.mean(sparsities_N)
sparsities_std[i] = np.std(sparsities_N)/np.sqrt(len(sparsities_N))
plt.figure(figsize=(5, 4))
plt.errorbar(Ns, sparsities, yerr=2*sparsities_std, fmt='o', c='C1')
plt.xlabel('Training set size')
plt.ylabel('Sparsity')
plt.xscale('log')
plt.xticks(Ns)
plt.minorticks_off()
plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Neural network performanceFour quantities characterize the infinite-width neural network and its sparsified and diagonal approximations:* Binary classification accuracy: all three networks are evaluated on a balanced sample of the MNIST test set (separate from the training set).* Condition number: to invert the sparsified NTK $\tilde K$ efficiently with a quantum linear systems algorithm, the condition number $\kappa(\tilde K)$ (defined to be the ratio of the largest to smallest singular values) must be bounded by $O(\log n)$.* Post-selection measurements: to prepare the quantum state $|k_*\rangle = \frac{1}{\sqrt{P}} \sum_{i=0}^{n-1} k_i |i\rangle$ of the NTK evaluated between test data $\mathbf x_*$ and the training data $\{\mathbf x_i\}$, we require $O(1/P)$ measurements for $P = \sum_i k_i^2$. Here, $k_i$ corresponds to the kernel $k(\mathbf x_*, \mathbf x_i)$ normalized and clipped to lie within $-1 \leq k_i \leq 1$. To efficiently prepare the state, the number of measurements must be bounded by $O(\log n)$.* Readout measurements: to perform the final readout, we estimate the sign of state overlap $o = \langle k_* | y \rangle$ (for the diagonal approximation) or $o = \langle k_* | \tilde K^{-1} | y\rangle$ (for the sparsified approximation). This requires $O(1/|o|^2)$ measurements, which must be bounded by $O(\log n)$ for efficient readout.
###Code
def classify(ntk_mean):
"""
Classify raw output of the NTK on the test dataset, assuming the test data is sampled
i.i.d. from the underlying data distribution (i.e. balanced).
"""
thresh = np.median(ntk_mean)
out = np.sign(ntk_mean - thresh)
return out
def get_file_prefix(fp, seed, N, trial):
"""
NTK output filename
"""
return fp + '_seed' + str(seed) + '_data' + str(N) + '_trial' + str(trial) + '_'
def analyze(file_prefix, Ns, sparsify_fnc, sparsify_args=(), sparsity_bootstraps=3,
plot_margin=0):
"""
Plot the accuracy, condition number, number of measurements for post-selection, and
number of measurements for readout.
"""
Ns = np.array(Ns)
accs_mean = []
accs_std = []
measurements = []
post_selections = []
measurements_std = []
post_selections_std = []
all_kappas = []
for n_ind in range(len(Ns)):
N = Ns[n_ind]
# load data
prefix = get_file_prefix(file_prefix, '*', N, '*')
suffixes = ['kernel_train.npy', 'kernel_test.npy', 'kernel_test_normalized.npy',
'train_label.npy', 'test_label.npy']
files = []
for suffix in suffixes:
files.append(sorted(glob.glob(prefix + '*' + suffix)))
all_dense = []
all_sparse = []
all_identity = []
all_scale = []
trial_p = []
trial_overlaps_diag = []
trial_overlaps_sparse = []
kappas = []
for i in range(len(files[0])):
# load files
kernel_train = np.load(files[0][i])
kernel_test = np.load(files[1][i])
kernel_test_normalized = np.load(files[2][i])
train_label = np.load(files[3][i])
test_label = np.load(files[4][i])
# bootstrap over different sparsity patterns
for s in range(sparsity_bootstraps):
# randomize sparsity pattern
sparsity_pattern = get_sparsity_pattern(kernel_train)
# sparsify kernel
kernel_train_sparse = sparsify_fnc(kernel_train, sparsity_pattern,
*sparsify_args)
kernel_train_identity = np.diag(kernel_train)*np.eye(kernel_train.shape[0])
# calculate condition number
eigs = np.linalg.eigvals(kernel_train_sparse)
kappa = np.amax(np.abs(eigs))/np.amin(np.abs(eigs))
kappas.append(kappa)
# solve A^{-1}y for A being the exact NTK, sparsified NTK, and diagonal NTK
inv_y_dense = np.linalg.inv(kernel_train) @ train_label
inv_y_dense /= np.sqrt(np.sum(inv_y_dense**2))
inv_y_sparse = np.linalg.inv(kernel_train_sparse) @ train_label
inv_y_sparse /= np.sqrt(np.sum(inv_y_sparse**2))
inv_y_diag = np.linalg.inv(kernel_train_identity) @ train_label
inv_y_diag /= np.sqrt(np.sum(inv_y_diag**2))
# prepare |k_*> state
ki = kernel_test_normalized / np.amax(np.abs(kernel_test_normalized))
p = np.sum(ki**2, axis=1)
ki = ki / np.sqrt(p[:, np.newaxis])
# prepare |y> state
ny = len(train_label)
y = train_label / np.sqrt(ny)
trial_p.append(p) # for post-selection measurements
trial_overlaps_diag.append(ki @ y) # <k_*|y>
trial_overlaps_sparse.append(ki @ inv_y_sparse) # <k_*|\tilde K^{-1}|y>
# classify with the exact, sparsified, and diagonal NTKs
mean_dense = kernel_test @ inv_y_dense
mean_sparse = kernel_test_normalized @ inv_y_sparse
mean_identity = kernel_test_normalized @ inv_y_diag
correct_dense = classify(mean_dense) == test_label
correct_sparse = classify(mean_sparse) == test_label
correct_identity = classify(mean_identity) == test_label
all_dense = np.concatenate((all_dense, correct_dense))
all_sparse = np.concatenate((all_sparse, correct_sparse))
all_identity = np.concatenate((all_identity, correct_identity))
all_scale.append([trial_p, trial_overlaps_diag, trial_overlaps_sparse])
# compute the mean and standard deviation of all quantities
all_out = [all_dense, all_sparse, all_identity]
accs_mean_s = []
accs_std_s = []
for i in range(len(all_out)):
correct = all_out[i]
accs_mean_s.append(np.mean(correct))
accs_std_s.append(np.std(correct)/np.sqrt(len(correct)))
accs_mean.append(accs_mean_s)
accs_std.append(accs_std_s)
scale = np.concatenate(all_scale, axis=1)
p = scale[0, :, :].flatten()
post_measurements = N/p
post_selections.append(np.median(post_measurements))
bootstraps = 5 # Poisson bootstrapping
medians = np.zeros(bootstraps)
for b in range(bootstraps):
r = np.random.poisson(size=post_measurements.shape)
pm = r * post_measurements
medians[b] = np.median(pm)
post_selections_std.append(np.std(medians)/np.sqrt(bootstraps))
overlaps = scale[1:, :, :].reshape(2, -1)
# enough measurements for stdev to be O(overlap)
these_measurements = 1/overlaps**2 - 1
measurements.append(np.median(these_measurements, axis=1))
bootstraps = 5 # Poisson bootstrapping
medians = np.zeros((bootstraps, 2))
for b in range(bootstraps):
r = np.random.poisson(size=these_measurements.shape)
pm = r * these_measurements
medians[b] = np.median(pm, axis=1)
measurements_std.append(np.std(medians, axis=0)/np.sqrt(bootstraps))
all_kappas.append(kappas)
accs_mean = np.array(accs_mean)
accs_std = np.array(accs_std)
post_selections = (np.array(post_selections), np.array(post_selections_std))
measurements = (np.array(measurements), np.array(measurements_std))
kappa = []
kappa_std = []
for row in all_kappas:
kappa.append(np.mean(row))
kappa_std.append(np.std(row)/np.sqrt(len(row)))
kappa = np.array(kappa)
kappa_std = np.array(kappa_std)
# plot everything
plt.figure(figsize=(5, 4))
plt.errorbar(Ns - Ns*plot_margin, accs_mean[:, 0], yerr=2*accs_std[:, 0],
label='Exact NTK', fmt='o')
plt.errorbar(Ns, accs_mean[:, 1], yerr=2*accs_std[:, 1], label='Sparse NTK', fmt='o')
plt.errorbar(Ns + Ns*plot_margin, accs_mean[:, 2], yerr=2*accs_std[:, 2],
label='Diagonal NTK', fmt='o')
plt.xlabel('Training set size')
plt.ylabel('Accuracy')
plt.xscale('log')
plt.xticks(Ns)
plt.minorticks_off()
plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
plt.figure(figsize=(5, 4))
plt.errorbar(Ns, kappa, yerr=2*kappa_std, fmt='o', c='C1')
plt.xlabel('Training set size')
plt.ylabel('Condition number')
plt.xscale('log')
plt.xticks(Ns)
plt.minorticks_off()
plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter(
useOffset=False))
plt.tight_layout()
plt.show()
plt.figure(figsize=(5, 4))
plt.errorbar(Ns, post_selections[0], yerr=2*post_selections[1], fmt='o')
plt.xlabel('Training set size')
plt.ylabel('Measurements (post-selection)')
plt.xscale('log')
plt.xticks(Ns)
plt.minorticks_off()
plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.tight_layout()
plt.show()
plt.figure(figsize=(5, 4))
plt.errorbar(Ns - Ns*plot_margin/2, measurements[0][:, 1],
yerr=2*measurements[1][:, 1], label='Sparse NTK', c='C1', fmt='o')
plt.errorbar(Ns + Ns*plot_margin/2, measurements[0][:, 0],
yerr=2*measurements[1][:, 0], label='Diagonal NTK', c='C2', fmt='o')
plt.xlabel('Training set size')
plt.ylabel('Measurements (readout)')
plt.xscale('log')
plt.xticks(Ns)
plt.minorticks_off()
plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.legend()
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Plot the results for the fully-connected neural network.
###Code
analyze('kernel_output/fully-connected', Ns, sparsify_unbiased, plot_margin=1/8)
###Output
_____no_output_____
###Markdown
Estimate the appropriate normalization threshold for preparing $|k_*\rangle$ based on a small subset ($n=16$) of the training set, and then plot the results for the convolutional neural network.
###Code
fp = 'kernel_output/convolutional'
base_n = 16
base_ntk = np.load(sorted(glob.glob(get_file_prefix(fp, '*', base_n, '*') + 'kernel_train.npy'))[0])
sparsify_args = compute_class_percentiles(base_ntk, 90)
analyze(fp, Ns, sparsify_biased, sparsify_args=sparsify_args, plot_margin=1/8)
###Output
_____no_output_____
###Markdown
Analysis of Sales data DatasetThe given dataset contains monthly total sales of a company for the period 2013-2016. Objectives1. To analyse the sales data and understand the performance of the company.2. Find patterns and construct a model to forecast future sales. Load sales data and create visualization
###Code
from time_series import TimeSeries
# Imports for data visualization
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import DateFormatter
from matplotlib import dates as mpld
register_matplotlib_converters()
ts = TimeSeries('dataset/monthly_sales.csv', train_size=0.8)
print("Sales Data")
print(ts.data.describe())
print("Head and Tail of the time series")
print(ts.data.head(5).iloc[:,1])
print(ts.data.tail(5).iloc[:,1])
# Plot of raw time series data
plt.plot(ts.data.index,ts.data.sales)
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%Y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
plt.title("Sales Data Analysis (2013-2016)")
plt.xlabel("Time")
plt.ylabel("Sales")
plt.show()
###Output
Sales Data
sales
count 48.000000
mean 47858.351667
std 25221.124187
min 4519.890000
25% 29790.100000
50% 39339.515000
75% 65833.345000
max 118447.830000
Head and Tail of the time series
date
2013-01-01 14236.90
2013-02-01 4519.89
2013-03-01 55691.01
2013-04-01 28295.35
2013-05-01 23648.29
Name: sales, dtype: float64
date
2016-08-01 63120.89
2016-09-01 87866.65
2016-10-01 77776.92
2016-11-01 118447.83
2016-12-01 83829.32
Name: sales, dtype: float64
###Markdown
Seasonal Decompose of the time seriesSeasonal decompose is a method used to decompose the components of a time series into the following:- Level - average value in the series.- Trend - increasing or decreasing value in the series.- Seasonality - repeating short-term cycle in the series.- Noise - random variation in the series.The analysis of the components individually provide better insights for model selection.
###Code
from statsmodels.tsa.seasonal import seasonal_decompose
result_add = seasonal_decompose(ts.data.iloc[:,1],period=12,model='additive')
result_add.plot()
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
result_mul = seasonal_decompose(ts.data.iloc[:,1],period=12,model='multiplicative')
result_mul.plot()
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
plt.show()
###Output
_____no_output_____
###Markdown
Observations from Seasonal Decompose1. The time series seems to roughly have a constant seasonality but has an overall **increasing trend**.2. A slightly decreasing trend is observed till 2014-07 after that an increasing trend is observed. Model SelectionFrom the above observations we can evidently conclude that **Holt-Winter additive model** would be an appropriate choice as there is a constant seasonality component along with an increasing trend.
###Code
from statsmodels.tsa.holtwinters import ExponentialSmoothing
# Scaling down the data by a factor of 1000
ts.set_scale(1000)
# Training the model
model = ExponentialSmoothing(ts.train,trend='additive',seasonal='additive',seasonal_periods=12).fit(damping_slope=1)
plt.plot(ts.train.index,ts.train,label="Train")
plt.plot(ts.test.index,ts.test,label="Actual")
# Create a 5 year forecast
plt.plot(model.forecast(60),label="Forecast")
plt.legend(['Train','Actual','Forecast'])
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%Y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
plt.title("Sales Data Analysis (2013-2016)")
plt.xlabel("Time")
plt.ylabel("Sales (x1000)")
plt.show()
###Output
_____no_output_____
###Markdown
Validation of the modelLet's do a brief comparison between the additive and the multiplicative models.
###Code
from statsmodels.tsa.holtwinters import ExponentialSmoothing
ts = TimeSeries('dataset/monthly_sales.csv', train_size=0.8)
# Additive model
model_add = ExponentialSmoothing(ts.data.iloc[:,1],trend='additive',seasonal='additive',seasonal_periods=12,damped=True).fit(damping_slope=0.98)
prediction = model_add.predict(start=ts.data.iloc[:,1].index[0],end=ts.data.iloc[:,1].index[-1])
plt.plot(ts.data.iloc[:,1].index,ts.data.iloc[:,1],label="Train")
plt.plot(ts.data.iloc[:,1].index,prediction,label="Model")
plt.plot(model_add.forecast(60))
plt.legend(['Actual','Model','Forecast'])
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%Y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
plt.title("Sales Data Analysis (2013-2016)")
plt.xlabel("Time")
plt.ylabel("Sales")
plt.show()
# Multiplicative model
model_mul = ExponentialSmoothing(ts.data.iloc[:,1],trend='additive',seasonal='multiplicative',seasonal_periods=12,damped=True).fit()
prediction = model_mul.predict(start=ts.data.iloc[:,1].index[0],end=ts.data.iloc[:,1].index[-1])
plt.plot(ts.data.iloc[:,1].index,ts.data.iloc[:,1],label="Train")
plt.plot(ts.data.iloc[:,1].index,prediction,label="Model")
plt.plot(model_mul.forecast(60))
plt.legend(['Actual','Model','Forecast'])
plt.gcf().autofmt_xdate()
date_format = mpld.DateFormatter('%Y-%m')
plt.gca().xaxis.set_major_formatter(date_format)
plt.title("Sales Data Analysis (2013-2016)")
plt.xlabel("Time")
plt.ylabel("Sales")
plt.show()
print(model_add.summary())
print(model_mul.summary())
###Output
ExponentialSmoothing Model Results
================================================================================
Dep. Variable: endog No. Observations: 48
Model: ExponentialSmoothing SSE 5088109579.122
Optimized: True AIC 920.991
Trend: Additive BIC 952.801
Seasonal: Additive AICC 948.133
Seasonal Periods: 12 Date: Fri, 27 Mar 2020
Box-Cox: False Time: 16:57:56
Box-Cox Coeff.: None
=================================================================================
coeff code optimized
---------------------------------------------------------------------------------
smoothing_level 0.1052632 alpha True
smoothing_slope 0.1052632 beta True
smoothing_seasonal 0.3684211 gamma True
initial_level 23914.153 l.0 True
initial_slope 0.0098000 b.0 True
damping_slope 0.9800000 phi False
initial_seasons.0 -9677.2525 s.0 True
initial_seasons.1 -19394.263 s.1 True
initial_seasons.2 31776.858 s.2 True
initial_seasons.3 4381.1975 s.3 True
initial_seasons.4 -265.86250 s.4 True
initial_seasons.5 10680.977 s.5 True
initial_seasons.6 10032.237 s.6 True
initial_seasons.7 3995.3175 s.7 True
initial_seasons.8 57863.198 s.8 True
initial_seasons.9 7539.2375 s.9 True
initial_seasons.10 54714.568 s.10 True
initial_seasons.11 45631.467 s.11 True
---------------------------------------------------------------------------------
ExponentialSmoothing Model Results
================================================================================
Dep. Variable: endog No. Observations: 48
Model: ExponentialSmoothing SSE 5235252441.242
Optimized: True AIC 922.359
Trend: Additive BIC 954.169
Seasonal: Multiplicative AICC 949.502
Seasonal Periods: 12 Date: Fri, 27 Mar 2020
Box-Cox: False Time: 16:57:56
Box-Cox Coeff.: None
=================================================================================
coeff code optimized
---------------------------------------------------------------------------------
smoothing_level 0.0526304 alpha True
smoothing_slope 0.0526304 beta True
smoothing_seasonal 0.4739722 gamma True
initial_level 23914.153 l.0 True
initial_slope 0.0103101 b.0 True
damping_slope 0.9781040 phi True
initial_seasons.0 0.8216244 s.0 True
initial_seasons.1 0.4627010 s.1 True
initial_seasons.2 2.1666146 s.2 True
initial_seasons.3 1.3637967 s.3 True
initial_seasons.4 1.3727428 s.4 True
initial_seasons.5 1.4773012 s.5 True
initial_seasons.6 1.4485307 s.6 True
initial_seasons.7 1.4558825 s.7 True
initial_seasons.8 3.2280199 s.8 True
initial_seasons.9 1.7354292 s.9 True
initial_seasons.10 3.4934260 s.10 True
initial_seasons.11 3.1794103 s.11 True
---------------------------------------------------------------------------------
###Markdown
IntroductionWe will analyze a sample of AIS data from the Danish Maritime Authority.The data as been preprocessed using postgres, postgis, and timescaledb. We performed the following:- Remove position with incorrect coordinates- Keep one position every thirty minutes using timescaledb- Calculate a fishing score based on [Global Fish Watch heuristic model](https://github.com/GlobalFishingWatch/vessel-scoring/blob/master/notebooks/Model-Descriptions.ipynb)- Calculate a distance from land using land polygon from [pgosmdata](https://github.com/gma2th/pgosmdata) and postgis nearest neighbor algorithm- Create fishing zones with dbscan algorithmIn this notebook we will:- Load and explore the data- Find ships with the longest self-reported fishing time- Find ships with the longest fishing time that does not report fishing in their navigational status- Find the longest trip of the day
###Code
%matplotlib inline
import datetime as dt
import geopandas as gpd
import numpy as np
import movingpandas as mpd
import pandas as pd
from shapely.geometry import Polygon
from fiona.crs import from_epsg
import warnings
warnings.simplefilter("ignore")
###Output
_____no_output_____
###Markdown
Loading sample AIS data
###Code
%%time
SAMPLING_DELTA = dt.timedelta(minutes=30)
_df = gpd.read_file('data/aisdk_30min.gpkg')
df = _df.copy(deep=True)
print("Finished reading {}".format(len(df)))
###Output
_____no_output_____
###Markdown
Let's have a first look at the data:
###Code
df.head()
df.describe()
df.describe(include = ['O'])
df.columns
###Output
_____no_output_____
###Markdown
Preprocessing What type of ships are in our dataset?
###Code
df['ship_type'].value_counts().plot(kind='bar', figsize=(15,3))
###Output
_____no_output_____
###Markdown
The vessel might be spoofing its vessel type, but we will only work with vessels with a type fishing:
###Code
df = df[df.ship_type == 'Fishing']
###Output
_____no_output_____
###Markdown
Most of the navigational statuses are "Engaged in fishing", but there is also a lot of unknown values:
###Code
df.navigational_status.value_counts().plot(kind="bar")
###Output
_____no_output_____
###Markdown
There are a lot of records with speed over ground (SOG) values of zero in this dataframe:
###Code
df['sog'].hist(bins=100, figsize=(15,3))
###Output
_____no_output_____
###Markdown
Let's get rid of the rows with a SOG of zero:
###Code
print("Original size: {} rows".format(len(df)))
df = df[df.sog>0.0]
print("Reduced to {} rows after removing 0 speed records".format(len(df)))
df['sog'].hist(bins=100, figsize=(15,3))
###Output
_____no_output_____
###Markdown
Let's plot the positions:
###Code
df.hvplot(geo=True, tiles="OSM", color='red', alpha=0.2)
###Output
_____no_output_____
###Markdown
Analysis We will use movingpandas to build and plot trajectories.We first need to create a temporal index:
###Code
df['t'] = pd.to_datetime(df['bucket'])
df = df.set_index('t')
%%time
# MIN_LENGTH = 100 # meters
traj_collection = mpd.TrajectoryCollection(df, 'mmsi')
print("Finished creating {} trajectories".format(len(traj_collection)))
###Output
_____no_output_____
###Markdown
Find ships with the longest self-reported fishing time
###Code
df[df["navigational_status"] == "Engaged in fishing"].groupby("mmsi").size().nlargest(10) * SAMPLING_DELTA
traj_collection.get_trajectory(211519000).hvplot(cmap='Dark2', height=300, line_width=5.0)
###Output
_____no_output_____
###Markdown
Find ships with the longest fishing time that does not report fishing in their navigational status
###Code
df[(df["navigational_status"] != "Engaged in fishing") & (df["fishing_score"] > 0.5) & (df["distance_from_land"] > 1000)].groupby("mmsi").size().nlargest(10) * SAMPLING_DELTA
traj_collection.get_trajectory(235007860).hvplot(cmap='Dark2', height=300, line_width=5.0)
###Output
_____no_output_____
###Markdown
Find the longest trip of the day
###Code
traj_collection.df = pd.DataFrame([(traj.id, traj) for traj in traj_collection.trajectories], columns=["id", "trajectory"])
traj_collection.df["length"] = traj_collection.df.trajectory.apply(lambda traj: traj.get_length())
traj_collection.df.sort_values("length", ascending=False).head()
traj_collection.get_trajectory(220141000).hvplot(cmap='Dark2', height=300, line_width=5.0)
###Output
_____no_output_____
###Markdown
feas_error (TP and TD) vs m,k
###Code
fraction = np.round(groupedData3["rows_proj_y"].to_numpy()/groupedData3["rows_y"].to_numpy(),2)
groupedData3["fraction"] = fraction
fraction_vals = sorted(list(set(fraction)))
groupedData3
groupedData3[groupedData3["fraction"]==0.3]["TP_feas_error"]
# graph feas_error (TP and TD) vs m,k
fig,(ax1,ax2) = plt.subplots(1,2,figsize=(15,7),sharey=True)
iter = 0
for fract in fraction_vals:
iter += 1
ax1.plot(list(groupedData3[groupedData3["fraction"]==fract]["rows_y"]),list(groupedData3[groupedData3["fraction"]==fract]["TP_feas_error"]),color=(1.0-np.round(iter/len(fraction_vals),1),0.0,np.round(iter/len(fraction_vals),1)),label=str(fract))
ax1.set_xlabel("n",fontsize=17)
ax1.set_ylabel("feasibility error TP",fontsize=16)
leg = ax1.legend(loc="upper left",fontsize=14)
leg.set_title("k/n",prop={'size':14})
iter = 0
for fract in fraction_vals:
iter += 1
ax2.plot(list(groupedData3[groupedData3["fraction"]==fract]["rows_y"]),list(groupedData3[groupedData3["fraction"]==fract]["TD_feas_error"]),color=(1.0-np.round(iter/len(fraction_vals),1),0.0,np.round(iter/len(fraction_vals),1)),label=str(fract))
ax2.set_xlabel("n",fontsize=17)
ax2.set_ylabel("feasibility error TDP",fontsize=16)
leg = ax2.legend(loc="upper left",fontsize=14)
leg.set_title("k/n",prop={'size':14})
plt.subplots_adjust(wspace=0.1)
plt.savefig("feasibility_error.png",dpi=600)
plt.show()
###Output
_____no_output_____
###Markdown
obj_val (P,TP,TD) vs m,k
###Code
# graph feas_error (TP and TD) vs m,k
fig,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(15,7),sharey=True)
iter = 0
for fract in fraction_vals:
iter += 1
ax1.plot(list(groupedData3[groupedData3["fraction"]==fract]["rows_y"]),list(groupedData3[groupedData3["fraction"]==fract]["obj_val_P"]),color=(1.0-np.round(iter/len(fraction_vals),1),0.0,np.round(iter/len(fraction_vals),1)),label=str(fract))
ax1.set_xlabel("n",fontsize=17)
ax1.set_ylabel("objective value P",fontsize=16)
leg = ax1.legend(loc="upper left",fontsize=14)
leg.set_title("k/n",prop={'size':14})
iter = 0
for fract in fraction_vals:
iter += 1
ax2.plot(list(groupedData3[groupedData3["fraction"]==fract]["rows_y"]),list(groupedData3[groupedData3["fraction"]==fract]["obj_val_TP"]),color=(1.0-np.round(iter/len(fraction_vals),1),0.0,np.round(iter/len(fraction_vals),1)),label=str(fract))
ax2.set_xlabel("n",fontsize=17)
ax2.set_ylabel("objective value TP",fontsize=16)
leg = ax2.legend(loc="upper left",fontsize=14)
leg.set_title("k/n",prop={'size':14})
iter = 0
for fract in fraction_vals:
iter += 1
ax3.plot(list(groupedData3[groupedData3["fraction"]==fract]["rows_y"]),list(groupedData3[groupedData3["fraction"]==fract]["obj_val_TDP"]),color=(1.0-np.round(iter/len(fraction_vals),1),0.0,np.round(iter/len(fraction_vals),1)),label=str(fract))
ax3.set_xlabel("n",fontsize=17)
ax3.set_ylabel("objective value TDP",fontsize=16)
leg = ax3.legend(loc="upper left",fontsize=14)
leg.set_title("k/n",prop={'size':14})
plt.subplots_adjust(wspace=0.1)
plt.savefig("objective_val.png",dpi=600)
plt.show()
###Output
_____no_output_____
###Markdown
Table of Contents Part I: Data Overview 1.) [Setup](setup) 1.1.) [Standard Imports](standard_imports) 1.2.) [Visualization Imports](vis_imports) 1.3.) [Helpers](helpers) 1.4.) [Load data](load) 2.) [General Overview](general) 2.1.) [Timezone](timezone) 2.2.) [Oldest Transcript](oldest) 2.3.) [5 Oldest Stories](old_5) 2.4.) [Date spread](date_spread) 2.5.) [Earliest interview](earliest_interview) 2.6.) [Total words spoken](speaker_total_words) 3.) [Trends](trends) 3.1.) [Topic Popularity](topic_popularity) Part II: Is News a Bad Movie?1.) [Setup](movies_setup) 1.1.) [Load data](movies_load) 1.2.) [Process Data](movies_process) 2.) [Model Training](training) 2.1.) [Clean Movie Reviews](movies_clean) 2.2.) [Vectorizing words](vectorize_words) 2.3.) [Split into train, test](split_train_test) 2.4.) [Basic model](basic_model) 2.5.) [LGBM](lgb_model) 2.6.) [Score of LGBM model](lgb_score) 2.7.) [Distribution of predictions](pred_dist) 3.) [Sentiment Analysis](sent_analysis) 3.1.) [Sentiment by Speaker](speaker_sentiment) 3.2.) [Extreme Sentiments](speaker_sentiment_extreme) 3.3.) [KDE Plots](speaker_sentiment_kde) 3.4.) [Positive examples](speaker_sentiment_pos) 3.5.) [Negative Examples](speaker_sentiment_neg) 3.6.) [Topic Sentiment](topic_sentiment) 3.7.) [PBS Sentiment](pbs_sentiment) --- [^](toc) Setup [^](toc) Standard imports
###Code
### Standard imports
import pandas as pd
import numpy as np
pd.options.display.max_columns = 50
### Time imports
import datetime
import time
# Counter
from collections import Counter
# Operator
import operator
# Regular Expressions
import re
# Directory helper
import glob
# Language processing import
import nltk
# Random
import random
# Progress bar
from tqdm import tqdm
### Removes warnings that occassionally show in imports
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
[^](toc) Visualization imports
###Code
### Standard imports
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
### Altair
import altair as alt
alt.renderers.enable('notebook')
### Plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.plotly as py
from plotly import tools
init_notebook_mode(connected=True)
# WordCloud
from wordcloud import WordCloud
# Folium
import folium
###Output
_____no_output_____
###Markdown
[^](toc) Helpers
###Code
# A short hand way to plot most bar graphs
def pretty_bar(data, ax, xlabel=None, ylabel=None, title=None, int_text=False, x=None, y=None):
if x is None:
x = data.values
if y is None:
y = data.index
# Plots the data
fig = sns.barplot(x, y, ax=ax)
# Places text for each value in data
for i, v in enumerate(x):
# Decides whether the text should be rounded or left as floats
if int_text:
ax.text(0, i, int(v), color='k', fontsize=14)
else:
ax.text(0, i, round(v, 3), color='k', fontsize=14)
### Labels plot
ylabel != None and fig.set(ylabel=ylabel)
xlabel != None and fig.set(xlabel=xlabel)
title != None and fig.set(title=title)
def pretty_transcript(transcript, convert_name=False):
for speaker in transcript:
if convert_name:
speaker[0] = clean_names(speaker[0])
print(color.UNDERLINE, speaker[0] + ":", color.END)
for txt in speaker[1:]:
print("\n\n ".join(txt))
print()
def get_trend(series, ROLLING_WINDOW=16):
trend = series.rolling(
window=ROLLING_WINDOW,
center=True, min_periods=1).mean()
trend = trend.rolling(
window=ROLLING_WINDOW // 2,
center=True, min_periods=1).mean()
trend = trend.rolling(
window=ROLLING_WINDOW // 4,
center=True, min_periods=1).mean()
return trend
### Used to style Python print statements
class color:
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
###Output
_____no_output_____
###Markdown
[^](toc) Load data
###Code
pbs = pd.read_json("data/PBS-newhour-clean.json")
pbs = pbs.sort_values("Date")
pbs.Story.fillna("", inplace=True)
pbs["Year"] = pbs.Date.map(lambda x: x.year)
pbs["Month"] = pbs.Date.map(lambda x: x.month)
print("Shape of pbs:", pbs.shape)
pbs.head()
###Output
Shape of pbs: (17617, 9)
###Markdown
[^](toc) General Overview [^](toc) Timezone
###Code
pbs.Timezone.value_counts()
###Output
_____no_output_____
###Markdown
[^](toc) Oldest Clip
###Code
temp = pbs.iloc[0]
print(temp.Title)
print(temp.URL)
###Output
Watergate: The NewsHour’s 1973 Special Report
https://www.pbs.org/newshour/show/robert-macneil-and-jim-lehrer-and-the-watergate-hearings
###Markdown
[^](toc) Oldest TranscriptThe oldest complete transcript on PBS's website is an interview with Fidel Castro in February of 1985.
###Code
temp = pbs[pbs.Transcript.map(lambda x: x != [])].iloc[0]
print(f"{color.BOLD}{temp.Date}{color.END}")
print(f"{color.BOLD}{temp.Title}{color.END}")
print()
pretty_transcript(temp.Transcript)
###Output
[1m1985-02-11 06:00:00[0m
[1mRobert MacNeil Interviews Fidel Castro Part I[0m
[4m ROBERT MACNEIL: [0m
Our major focus section tonight is a newsmaker interview with Cuban President Fidel Castro. Last month the U.S. and Cuba successfully negotiated an agreement under which Cuba will take back 2,500 “undesirables” who came in the Mariel boat lift of 1980, and the United States will reopen normal immigration procedures in Havana.
Since then Castro has said he’d be willing to talk further about improving relations. Washington has reacted coldly, saying Castro is saying nothing new, and it wants to see Cuban deeds, not words. How far Castro wishes to push his new effort has not been clear, but in Havana part of his motivation is obvious.
Havana today expresses the weaknesses of the Cuban revolution. Its successes are in the countryside, where better nutrition, health care and education have changed more lives. Havana, the symbol of the decadent past, was neglected, with little new building. But with an economy still unable to meet all Fidel’s goals and an acute need for hard currency, old Havana is getting a facelift to attract tourists. Buildings and streets from the Spanish colonial period are being refurbished as is the square of the old cathedral.
The bulk of the tourists are still people from the Eastern bloc, their presence symbolizing Castro’s dependence on the Communist world for economic survival in the face of the American trade blocade. That’s been in force for a quarter of a century and has been tightened by the Reagan administration. Cuba’s lifeline is a procession of Soviet merchant ships bringing virtually everything, from oil and lumber to light bulbs. They return taking Cuban sugar, citrus and nickel, but recently not enough to meet the planned quotas.
So Cuban consumers have been asked to tighten their belts again, to wait for more attractive consumer goods while a big drive is made to boost exports to the Soviet bloc and to the West, both to meet Cuba’s commitments to her Communist partners and to earn hard currency to pay her Western debts.
This is the context for the growing suggestions that Castro, 26 years after his revolution, would like to patch things up with the U.S. There is no slackening of revolutionary zeal. The spirit that defeated the Bay of Pigs invasion of 1961 is constantly nourished, and the symbols of Castro’s rise to power are a national shrine.
The revolution is still young enough to enjoy tweaking Uncle Sam’s beard. This poster says, “Mr. Imperialist, we are absolutely not afraid of you.” It is located close to the U.S. mission, now called the U.S. Interest Section — because there are no full-scale diplomatic relations — where U.S. officials try to read the signals that Castro is sending.
On Friday night President Castro sat down with me for the first major American television interview in six years. With a Cuban government interpreter we talked for more than four hours, first about relations with the United States.
[4m ROBERT MACNEIL: [0m
Mr. President, every time that you begin to talk about improving relations with the United States, Washington says, “Show us deeds, not words.” What actions or deeds are you prepared to make to improve relations with the United States?
[4m FIDEL CASTRO: [0m
You said every time I speak of improving relations; actually there are not many times. Now then, I have read a few statements in which it is said that they want deeds and not words. I believe that that is a style of speaking. I would say a style of a great power. I understand that it is not easy for the United States to change its style.
We are a small country. We cannot speak in those terms, but we are also a country with a lot of dignity and no one can suppose that we would beg the United States for an improvement of relations. We have never done so, and we shall never do it. My intention is not that they believe what we say but rather simply to analyze our ideas and to go deeper in them and to make objective analyses of events. It is not a matter of faith, of confidence. It is a matter of objectivity.
[4m ROBERT MACNEIL: [0m
Let’s go through an objective analysis. The State Department and the White House always say that there are three obstacles to improving relations between Cuba and the United States. And they are your allegiance to the Soviet Union, what they call subversion in this hemisphere and the large number of your troops in Africa.
Sometimes they also mention human rights in Cuba. The White House mentioned human rights in Cuba this week again. Can we discuss in detail each of these, starting with relations with the Soviet Union? Is there a formula by which you could keep your ties to the Soviet Union and improve relations with the United States?
[4m FIDEL CASTRO: [0m
If the United States believed that there are three obstacles, actually there are quite few, quite little. I thought there were much more. Now, then, if we analyze these three types of obstacles, the first, that is the relations that we have with the Soviet Union, with the socialist countries and with any other country are matters of our sovereignty and that cannot be questioned, or at least we are not ready to discuss that.
And this is always — this is something that I always say in a very frank way. If, in order to improve our relations with the United States, we must give up our convictions and our principles, then relations will not improve on those lines. If we are going to question our sovereignty, then they will not improve either. Relations between Cuba and the Soviet Union are based in the most strict respect for independence and sovereignty of our country.
We have friendly relations, very close relations, and these relations cannot be affected in order to improve relation thing. The countries that do those things simply are not respected, and actually we are not going to change neither our flag nor our ideas. In our relations with the Soviet Union, in our friendship will be maintained intangible. I say this being fully frank and fully sincere. And it is necessary that this be understood.
[4m ROBERT MACNEIL: [0m
The director of Cuban affairs in the State Department, Kenneth Skout, he said in a speech in December what Cuba could not do and still retain Moscow’s favor is to alter its fundamental commitment to unswerving support for Soviet policy. And so my question is, isn’t that unswerving support for Soviet policy the price of the Soviet aid that keeps the Cuban economy going?
[4m FIDEL CASTRO: [0m
Well, we coincide in many things with the Soviet Union because we have a community of political principles. It is a socialist country; we are a socialist country. We do have many things in common with the Soviet Union and in many international problems we have a common stance that is based on political ideas and principles.
It is a friendly country of whose friendship we will not reject and of which we do not feel ashamed of because, actually, we are not going to fight with our friends to become friends of our adversaries. That we shall never do. And the Soviets have never imposed any conditions on us, on their assistance, and they have never attempted to tell us what we should do, what we must do, with which countries we are to trade and with which countries should we have relations.
So I simply can’t understand where these theories come from. But if that our relations with the Soviets are an obstacle and if someone thinks that we are going to sell out or that we are going to give up our banners or our flags or that we are going to change our ideas, that is an error. Cuba is a country that cannot be bought. And countries that are bought are simply not respected.
[4m ROBERT MACNEIL: [0m
I think what the United States government is saying is that your economic dependence on Moscow makes you automatically a part of the Soviet camp in having to agree to policies like the Soviet intervention in Afghanistan. Would you, Fidel Castro, who values the independence and integrity of a small country, would you alone have approved the Soviet intervention in Afghanistan if you had been free to make your own choice? Did you privately and personally approve of the Soviet intervention in Afghanistan?
[4m FIDEL CASTRO: [0m
When it was put forth at the U.N., that is, the question, the issue, we said clearly that in that conflict, in that attack, that tremendous attack against the Soviet Union led by the United States, we were not going to be on the side of the United States. Simply that. And we were then on the side of the Soviet Union. That is, we did not deal or delve on the topic; that is what we said. This is opposition because of this.
[4m ROBERT MACNEIL: [0m
But isn’t that the point? That your friendship and dependence on the Soviet Union makes you part of the camp and therefore take positions which Washington regards as anti-American positions?
[4m FIDEL CASTRO: [0m
You establish this dependency as something that is actual in fact and action. But in today’s world, in the economic arena, no one is absolutely independent, not even the United States nor Japan nor Western Europe. They depend on oil, raw materials, and for many other countries they need markets, they need trade. That is, no country is totally independent economically.
[4m ROBERT MACNEIL: [0m
Is it not true that your role in return for all the aid you get from the Soviet Union is to be a thorn in America’s side?
[4m FIDEL CASTRO: [0m
If that were true, we would not be talking about improving relations with the United States. If our role is to be a thorn, then it would not be convenient for us. Actually it does not bring us great benefits, either. That is, we are based on a conviction and it is the necessity to struggle in our area, in Central America, throughout the world.
It is a duty, actually a duty that we have in order to lower tensions and to achieve relations of peace in the world. And I say this sincerely, although I am a revolutionary. I was a revolutionary, I am a revolutionary, and I shall always be a revolutionary. And I will not change a single of my principles for a thousand relations with a thousand countries like the United States.
[4m ROBERT MACNEIL: [0m
Will the Soviet Union continue to provide you with the aid and support it does, do you believe, if you have good relations with the United States?
[4m FIDEL CASTRO: [0m
Look, our relations with the Soviet Union, with the socialist countries are solid things based on principles and have absolutely nothing to do with our economic and political relations with the United States. I will say one thing, though. The Soviet Union and the Soviet people feel great appreciation and great respect toward Cuba. But it is they respect Cuba because they admire, as other peoples do, the courage of Cuba, Cuba’s staunchness and Cuba’s capability to resist for over 26 years the aggressions, the economic blockade and the brutality of the United States.
[4m ROBERT MACNEIL: [0m
Would the Soviet Union like it if you had better relations with the United States, the blockade perhaps were lifted and the economic burden on the Soviet Union were shared or lessened?
[4m FIDEL CASTRO: [0m
The United States will pay us for our sugar at the price of the Soviets, or will they be buying the nickel and they will be maintaining the type of relations and trade that we have with the socialist countries. But I believe that the idea that we have any needs to trade with the United States should be totally eradicated. Everything we have done during these 26 years, we have done it without trade with the United States.
And our future has been conceived without trade with the United States. Actually, we have not asked for the Soviet Union — generally we don’t ask their opinion on our economic or political relations in an international arena. But I know the Soviet Union very well and I know the policy of the Soviet Union, and the Soviet Union would never be against Cuba’s developing its economic relations with the other capitalist countries, including the United States.
[4m ROBERT MACNEIL: [0m
So, to move on to the second point that Washington says is an obstacle to better relations — what the White House spokesman Larry Speakes called this week your subversion in the hemisphere. Let me quote you again Mr. Skout of the State Department. “It is Cuba’s striving, with Soviet support, to introduce Marxist-Leninist regimes throughout the hemisphere which still lies at the heart of our differences.” Would you comment on that?
[4m FIDEL CASTRO: [0m
Well, I could also accuse the Pope of practicing subversion in Latin America and preaching Christianity and Catholicism. He visited many countries even recently. He has met with natives and said that the land had to be given to the natives and the land properties. And he declared that schools were necessary for the children, jobs for the workers and for the families, medicine and doctors for the ill and also foodstuffs or housing.
What we preach is more or less that. And besides, it is what we have done in our country. So then, we will continue being Marxist and we’ll continue being socialist, and we will always say that our social system is more just. But we have said also, because we are convinced about it, we have said the following, and which is my answer to that. Neither can Cuba export revolution because revolutions cannot be exported, and the economic-social factors, the cultural-historical factors that determine the movement of revolution cannot be exported.
The external, the huge external debt of Latin America cannot be exported. The formula applied by the International Monetary Fund cannot be exported by Cuba. The unequal trade cannot be exported by Cuba. Underdevelopment and poverty cannot be exported by Cuba, and that is why Cuba cannot export revolution. It is absurd. It is ridiculous to say that revolutions can be exported. But the United States cannot, in the event, avoid them either. The United States accuses us maybe of wanting to promote change.
Well then, we would like to see changes occur, but changes will come whether the United States likes it or not, whether or not Cuba likes it. I could answer by saying that the United States wants to maintain an unjust social order that has meant for the peoples of this hemisphere poverty, hunger, underdevelopment, diseases, ignorance — and the United States wants to maintain that.
And we could also say that the United States wants to avoid change. If we are accused of wanting to promote change, we can also accuse the United States of wanting to avoid change and of wanting to maintain an unjust social regime. But actually neither can we export it, nor can the revolution avoid it — nor can the United States avoid it.
[4m ROBERT MACNEIL: [0m
In supporting militarily the Sandinista regime in Nicaragua, is Cuba not helping to sustain and introduce a Marxist-Leninist regime?
[4m FIDEL CASTRO: [0m
In Nicaragua, by offering military cooperation? Well, we are helping an independent country, we are helping a just revolution to defend itself. That’s simply what we are doing. In the same way that, for example, the United States has also sent weapons to this — in this hemisphere to other people.
It sent weapons to Somoza. It sent weapons to Trujillo when Trujillo was there. It sent weapons to Pinochet. It sent weapons to all of the repressive governments of Latin America, governments that murdered, tortured dozens of thousands of people, governments which disappeared tens of thousands of people. They had no moral obstacle in giving any economic, financial and military assistance to these governments.
So, with what moral grounds can it be questioned; that is, can our right be questioned to help Nicaragua and Nicaragua’s right to receive that aid? I ask the following. Can the United States help the counter-revolutionary bands, supply weapons to them — explosives — to fight inside Nicaragua, something that has meant the lives of thousands and thousands of people, and on the other hand question Cuba’s right and Nicaragua’s right for us to give them aid — economic, technical aid, and even some cooperation in the military field?
[4m ROBERT MACNEIL: [0m
So you would not stop giving such aid as a condition of improved relations with the United States?
[4m FIDEL CASTRO: [0m
We shall not make any unilateral decision in our relations and cooperation with Nicaragua. What we have said is that in Central America a politically negotiated solution is possible. What we say is that we support the effort of Contadora to seek solutions of peace in Central America, that we support it staunchly, sincerely, and that we beleive that political solutions exist and peace solutions exist that are convenient for the Nicaraguans, for Central America and for the United States itself, and that we are ready to struggle for that.
And also that the agreements that are reached shall be complied by us in a determined way. That is, any agreement reached between Nicaragua and the Contadora framework shall be complied by us to the very letter.
[4m ROBERT MACNEIL: [0m
How hopeful are you that now that some political settlement can be reached in Central America?
[4m FIDEL CASTRO: [0m
I am absolutely convinced. I have a lot of information about the work of Contadora. I have heard all the discussions, all the burning issues there, the positions of the United States, Nicaragua’s positions. And I am convinced, fully convinced, that it is possible to find formulas that would be acceptable by all parties, or to all parties. I have that conviction. I am convinced about that.
Now, then. For it, it is necessary for the United States to want to really cooperate in finding a political solution. I believe that as long as the United States is convinced that it can destroy the Sandinista revolution from within by combining the effect of the economic measures against Nicaragua with the economic difficulty inside Nicaragua and the actions of the counterrevolutionary bands, as long as they’re convinced that they can destroy the revolution from within, it will not be seriously ready to seek a political solution to the problems of Central America.
Because if it believes that it will destroy the revolution, why negotiate, then? Why reach agreementss? Now, then, Now, when the United States becomes persuaded that it shall not achieve that goal, that the Nicaraguan revolution cannot be destroyed from within, because of the questions I mentioned, the problems I mentioned, I believe that they can face the economic problems with what they produce and with the aid they are receiving, the economic aid they are receiving. If they handle it correctly, efficiently, they can face the economic problems. I’m convinced of that. I am also convinced that they can defeat the bands and that the bands will never be able to defeat —
[4m ROBERT MACNEIL: [0m
Excuse me. By the “bands” you mean what are called in the United States the “contras”?
[4m FIDEL CASTRO: [0m
Yes, the counterrevolutionary bands that will be defeated. They will be defeated. So then a situation will come up before the United States: that is, the United States will have no other alternative but to negotiate seriously to seek a solution or invade Nicaragua. And since, in my view, in my criteria, a U.S. invasion in Nicaragua is inconceivable, since it would mean such a serious mistake, a terrible mistake, that I do not simply think that the United States would really get to the point of making that mistake.
I cannot assure you that it might not do it, but I say that it is inconceivable that under the present circumstances in Latin America, under the present circumstances of crisis with the present feeling on the part of Latin American peoples, at the times we’re living in, the aggression and invasion against a Latin American country would be as catastrophic in political terms, it would mean such a political cost, and not only a political cost but also in terms of U.S. lives —
[4m ROBERT MACNEIL: [0m
Let me turn to Africa. The third of those obstacles that Washington sees to improving relations with you, your troops in Angola. You talked recently about circumstances arising which would cause you to bring them home. What would happen — what would have to happen to start bringing the Cuban troops out of Angola?
[4m FIDEL CASTRO: [0m
What is needed there? Well, discussions have taken place with the participation of the United States. The United States has had dialogues, talks with Angola’s leadership. We are informed through the Angolans about these negotiations or talks that have been held with our support and with our full cooperation. That is, they have carried out these negotiations in close contact with Cuba.
[4m ROBERT MACNEIL: [0m
Could you withdraw any of your troops before there is agreement?
[4m FIDEL CASTRO: [0m
No. No. The Angolans would not agree with that, and from our point of view it would be a mistake. And the Angolan proposal, that is, if those circumstances come up, then Angola commits itself, and Cuba of course would support it, to withdrawal in a period of three years what is called the grouping of troops in the south, which is made up by approximately 20,000 men. And even the figure was given.
This is the bulk of our troops, actually, but there are still troops in the center and to the north of Angola, including Cabinda. The Angolans have not included these troops in the negotiations, these present negotiations, and their position is that to withdraw those troops, it will be something that would have to be discussed between Angola and Cuba whenever it is considered that they can dispense of these troops.
[4m ROBERT MACNEIL: [0m
Do you think that this projected settlement of the Angola situation, does that erase Cuban troops in Angola as an issue between you and the United States?
[4m FIDEL CASTRO: [0m
Before there were no troops in Angola and relations were very bad with the United States. The day where there are not troops in Angola or in some other place or there are no advisers in Central America, maybe the United States might invent something else.
[4m ROBERT MACNEIL: [0m
Just to sum up our conversation about improving relations with the United States, why is this the right time to raise this, and realistically speaking, how hopeful are you that it can happen?
[4m FIDEL CASTRO: [0m
Whether this is the right — best moment? I believe that if the United States is objective, if it is realistic, I would say that it is the best moment for the United States, not for us. Actually, we can go on for five, 10, 15, 20 more years. The only obligation on our part, really, is toward peace.
If there’s peace here and in other areas, we will feel more pleased. If relations are normalized, even more pleased, because it would be then a progressive progress. Peace is convenient for all, but from the political point of view I am convinced — and I’m saying this fully frankly — I think that the United States benefits most than us. We can sit here and wait, calmly, and see what happens in the coming years.
[4m ROBERT MACNEIL: [0m
Tomorrow night Fidel Castro talks candidly about human rights in Cuba, political prisoners, dissent, the controlled press and the mistakes of his revolution. He also discusses what he sees as an explosive economic situation in Latin America.
Continue…
###Markdown
[^](toc) 5 Oldest Stories
###Code
for i in range(5):
print(pbs.iloc[i].Date)
print(pbs.iloc[i].Story)
print()
###Output
1973-05-17 02:26:00
“How high did the scandals reach and was President Nixon himself involved?” That was what the NewsHour’s Robert MacNeil, then co-anchoring with Jim Lehrer, dubbed “the ultimate question” as the program began its gavel-to-gavel coverage of the Watergate hearings on May 17, 1973.
1979-06-29 06:00:00
This MacNeil/Lehrer Report piece highlights the anguish caused by gas shortages at a station in Queens, New York in 1979.
1981-02-27 06:00:00
Robert MacNeil and Jim Lehrer interviewed British Prime Minister Margaret Thatcher for the The MacNeil/Lehrer Report in February of 1981.
1982-10-25 06:00:00
Jim Lehrer and Charlene Hunter Gault report on violence and instability across Guatemala and the actions of Efrain Rios Montt. Gavin Hewitt from the Canadian Broadcasting Corporation reports from Guatemala. Guests are Georges Fauriol of Georgetown University and Dana Martin of the Washington Office on Latin America.
1983-11-30 06:00:00
Robert MacNeil and Charlayne Hunter Gault report on the battles in Washington going on over violence and instability in Guatemala.
###Markdown
[^](toc) Date spreadThe activity starts around April 2011, so we have 7 years of history to analyze
###Code
temp = (pbs
.assign(n=0)
.set_index("Date")
.groupby(pd.Grouper(freq="M"))
.n
.apply(len)
.sort_index()
)
trace = go.Scatter(
x=temp.index,
y=temp.values,
)
layout = go.Layout(
title = "Number of transcripts available over time",
yaxis=dict(title="Number of transcripts"),
xaxis=dict(title="Date"),
)
fig = go.Figure(data=[trace], layout=layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
[^](toc) Earliest interviewI think it's amazing just looking back 7 years. So much has changed, but in another sense, not much has changed.The earliest mention of Donald Trump is in 2011 when he was demanding Obama for his birth certificate. During that segment he is considering running for office. ([link](https://www.pbs.org/newshour/show/with-birth-certificate-release-obama-urges-shift-in-national-dialogue)). This is tangetial, but this [clip](https://www.pbs.org/newshour/show/with-birth-certificate-release-obama-urges-shift-in-national-dialogue) also features PBS' Jim Lehrer 40 years earlier.The earliest mention of Bernie Sanders is him weighing in on the 2011 Debt Ceiling negotitions ([link](https://www.pbs.org/newshour/show/debt-deal-stalemate-spills-into-weekend-for-obama-congress)). He warns that the burden will fall on the working class.
###Code
# {x for x in set.union(*pbs.Speakers) if "BEZOS" in x}
### These are just examples
pois = {0: "BERNIE SANDERS",
1: "VLADIMIR PUTIN",
2: "DONALD TRUMP",
3: "JUDY WOODRUFF",
4: "BEN CARSON",
5: "STEPHEN COLBERT",
6: "HILLARY CLINTON",
7: "JOHN F. KENNEDY",
8: "ANGELA MERKEL",
9: "JEFF BEZOS",
10: "XI JINPING"
}
poi = pois[8]
print("Showing results for:", poi)
pbs[pbs.Speakers.map(lambda x: poi in x)]
# {x for x in set.union(*pbs.Speakers) if "RYAN" in x}
# pbs[pbs.Speakers.map(lambda x: "ELECT MIKE PENCE" in x)].Transcript.iloc[0]
###Output
_____no_output_____
###Markdown
[^](toc) Total words spoken
###Code
pois = ["BERNIE SANDERS", "DONALD TRUMP", "HILLARY CLINTON",
"BARACK OBAMA", "MITT ROMNEY", "ANGELA MERKEL",
"JOSEPH BIDEN", "MIKE PENCE"]
def get_num_articles(df, poi):
num_articles = len(df[df.Speakers.map(lambda x: poi in x)])
return num_articles
def get_num_words(df, poi):
speaker_text = list()
transcripts = df[df.Speakers.map(lambda x: poi in x)].Transcript.values
num_words = 0
for transcript in transcripts:
for person in transcript:
if person[0] == poi:
for txt in person[1]:
num_words += len(txt.split(" "))
return num_words
articles, words = list(), list()
for poi in pois:
num_articles = get_num_articles(pbs, poi)
num_words = get_num_words(pbs, poi)
articles.append(num_articles)
words.append(num_words)
trace1 = go.Bar(
x=pois,
y=articles,
name='Total articles'
)
trace2 = go.Bar(
x=pois,
y=words,
name='Total words'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
iplot(fig);
###Output
_____no_output_____
###Markdown
[^](toc) Most Popular Speakers
###Code
persons = pbs.Speakers.map(list).sum()
freq = sorted(Counter(persons).items(), key=operator.itemgetter(1), reverse=True)
x, y = list(zip(*freq[:25]))
plt.figure(figsize=(14, 14))
sns.barplot(list(y), list(x));
###Output
_____no_output_____
###Markdown
--- [^](toc) Trends [^](toc) Topic PopularityThis shows the popularity of a word for a given month. I measure the fraction of time a word is used for a particular story, then take the average value for a given month.To look at the topic of a topic, multiple moving averages are performed to smooth out fluctuations.There seems to be an increasing trend talking about immigration and racism. Interestingly, PBS has no mention of racism until 2013.
###Code
LIMIT_TIME = True
topics = ["Obama", "Trump", "Clinton", "Bush", "Immigration", "Congress", "Racism"]
def topic_popularity(topic):
def popularity_helper(transcript):
transcript = list(map(lambda x: x[1][0], transcript))
transcript = (" ".join(transcript).lower()).split(" ")
N = len(transcript)
counts = Counter(transcript)
return (counts[topic.lower()] / N) * 100
return popularity_helper
if LIMIT_TIME:
temp = pbs[pbs.Year > 2010]
else:
temp = pbs
datas = []
for topic in tqdm(topics):
temp["Temp"] = (
temp[temp.Transcript.map(lambda x: x != [])]
.Transcript
.map(topic_popularity(topic))
)
data = (temp
.set_index("Date")
.groupby(pd.Grouper(freq="M"))
.Temp
.apply(np.mean)
)
trend = get_trend(data, ROLLING_WINDOW=12)
datas.append((topic, data, trend))
traces = []
for topic, data, _ in datas:
traces.append(go.Scatter(
x=data.index,
y=data.values,
name=f"{topic} - actual"
))
for topic, _, trend in datas:
traces.append(go.Scatter(
x=trend.index,
y=trend.values,
name=f"{topic} - trend"
))
buttons = []
for i, topic in enumerate(topics):
visibility = [i==j for j in range(len(topics))]
button = dict(
label = topic,
method = 'update',
args = [{'visible': visibility},
{'title': f"'{topic}' usage over time" }])
buttons.append(button)
updatemenus = list([
dict(active=-1,
x=-0.15,
buttons=buttons
)
])
layout = dict(title='Topic popularity',
updatemenus=updatemenus,
xaxis=dict(title='Date'),
yaxis=dict(title='Percent of words')
)
fig = dict(data=traces, layout=layout)
fig['layout'].update(height=800, width=800)
iplot(fig)
###Output
100%|██████████| 7/7 [00:15<00:00, 2.29s/it]
###Markdown
--- Part II: Is News a Bad Movie?I want to see how political sentiment changes over time. However that's hard to quantify, how do I train whether. What is double jeopardy?It does feel very stupid training a model on movie reviews. In addition, I'm using naive bayes and word frequency analysis which is stupid in itself. Models like this don't understand sarcasm, different word meanings, or phrases. However we should be okay.--- [^](toc) Setup [^](toc) Load data
###Code
train = pd.read_feather("data/movie_train.csv")
test = pd.read_feather("data/movie_test.csv")
# train_dir = "data/large-movie-reviews/train/"
# test_dir = "data/large-movie-reviews/test/"
# train = pd.DataFrame(columns=["Text", "Sentiment"])
# test = pd.DataFrame(columns=["Text", "Sentiment"])
# for df, path in ([train, train_dir], [test, test_dir]):
# for sent in ("pos", "neg"):
# for txt in tqdm(glob.glob(path + sent + "/*")):
# txt = open(txt, "r")
# review = txt.read()
# df.loc[len(df)] = [review, sent]
# txt.close()
### OPTIONAL: Save time and feather the train and test data into a feathered CSV
# # train.to_feather("data/movie_train.csv")
# # test.to_feather("data/movie_test.csv")
# train.head()
###Output
_____no_output_____
###Markdown
[^](toc) Process Data
###Code
train.Sentiment = train.Sentiment.map(lambda x: int(x == "pos"))
test.Sentiment = test.Sentiment.map(lambda x: int(x == "pos"))
# Save memory space
train.Sentiment = train.Sentiment.astype(np.int8)
test.Sentiment = test.Sentiment.astype(np.int8)
###Output
_____no_output_____
###Markdown
--- [^](toc) Model training [^](toc) Clean Movie Reviews
###Code
bad_words = (">AAARGH!<", "<<<<<<<<<<<< <<<<<<<<<<<<<<<<<<<< <<<<<<<<<<<<<<<<<<<<<<<< <<<<<<<",
"<grin>", "(comedy)", "(horror)", "(Mr. Director)", "<<<sigh>>>", ">.<", "(<sp?)",
"<http://rogerebert.suntimes.com/apps/pb,cs.dll/section?category=ANSWERMAN>",
"<3", "-->", "===========>", "</3",
">>>>>>>>>>>>> >>>>>>>>>>>>>>>>> >>>>>>>>>>>>>>>>>>>>>> >>>>>>>> >>>>>>>",
":ZZZZZZZZZZzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz..............", "<=8",
"Yaaaaaaaaaaaaaawwwwwwwwwwwwwwwwwnnnnnnnnnnnnn!",
":=8O", "ZZZZZZZZzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz...........",
">>> youtube.com/watch?v=cNVrMZX2kms",
"<http://rogerebert.suntimes.com/apps/pbcs.dll/section?category=ANSWERMAN>")
html_words = ("<hr>", "<br /><br />", "<i>", "</i>", "<em>", "</em>", "<SPOILER>", "</SPOILER>",)
def clean_txt(txt, words):
for word in words:
txt = txt.replace(word, " ")
txt = txt.replace("_", " ")
txt = txt.strip()
return txt
def clean_reviews(review):
review = clean_txt(review, bad_words)
review = clean_txt(review, html_words)
return review
train.Text = train.Text.map(clean_reviews)
test.Text = test.Text.map(clean_reviews)
###Output
_____no_output_____
###Markdown
[^](toc) Vectorizing wordsDISCLAIMER: I stole a lot of this code from [Anisotropic](https://www.kaggle.com/arthurtok) and his excellent [kernel](https://www.kaggle.com/arthurtok/spooky-nlp-and-topic-modelling-tutorial).
###Code
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import WordNetLemmatizer
lemm = WordNetLemmatizer()
class LemmaCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(LemmaCountVectorizer, self).build_analyzer()
return lambda doc: (lemm.lemmatize(w) for w in analyzer(doc))
# Storing the entire training text in a list
text = list(train.Text.values)
# Calling our overwritten Count vectorizer
tf_vectorizer = LemmaCountVectorizer(max_df=0.6,
min_df=20,
stop_words='english',
decode_error='ignore')
tf = tf_vectorizer.fit_transform(text)
###Output
_____no_output_____
###Markdown
[^](toc) Split into train, test
###Code
train_x = tf_vectorizer.transform(train.Text).toarray()
train_y = train.Sentiment
test_x = tf_vectorizer.transform(test.Text).toarray()
test_y = test.Sentiment
###Output
_____no_output_____
###Markdown
[^](toc) Basic model
###Code
from sklearn.naive_bayes import GaussianNB
gnb_model = GaussianNB()
gnb_model.fit(train_x, train_y)
score = gnb_model.score(test_x, test_y)
print(f"Naive Bayes score: {round(score * 100, 2)}%")
###Output
Naive Bayes score: 66.89%
###Markdown
[^](toc) LGBMUsually Naive Bayes is used for classification, but I see great results with Light Gradient Boosting. Also instead of classification, I want to see a spectrum meaning the predictions will be some float in between 0 and 1.I think using a spectrum is more interesting as it differeniates a really negative text from a slighly negative text.
###Code
import lightgbm as lgb
from sklearn.model_selection import train_test_split
training_x, val_x, training_y, val_y = train_test_split(train_x, train_y, test_size=0.2, random_state=17)
lgb_train = lgb.Dataset(data=training_x, label=training_y)
lgb_eval = lgb.Dataset(data=val_x, label=val_y)
params = {'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc',
'learning_rate': 0.03, 'num_leaves': 55, 'num_iteration': 2000, 'verbose': 0 ,
'subsample':.9, 'max_depth':7, 'reg_alpha':20, 'reg_lambda':20,
'min_split_gain':.05, 'min_child_weight':1, "min_data_in_leaf": 40,
"feature_fraction":0.5}
start = time.time()
lgb_model = lgb.train(params, lgb_train, valid_sets=lgb_eval, early_stopping_rounds=150, verbose_eval=200)
print("Training took {} seconds".format(round(time.time() - start)))
###Output
Training until validation scores don't improve for 150 rounds.
[200] valid_0's auc: 0.885519
[400] valid_0's auc: 0.904419
[600] valid_0's auc: 0.91233
[800] valid_0's auc: 0.916617
[1000] valid_0's auc: 0.919374
[1200] valid_0's auc: 0.921255
[1400] valid_0's auc: 0.922448
[1600] valid_0's auc: 0.923493
Early stopping, best iteration is:
[1627] valid_0's auc: 0.923625
Training took 80 seconds
###Markdown
[^](toc) Score of LGBM modelThe LGBM model is considerably better than Naive Bayes! More can be done to increase this score, but it's good enough for me!
###Code
# Predict
predictions = lgb_model.predict(test_x)
print("/nFirst 5 valus of predictions")
print(" ".join(predictions[:5].astype(str)))
# Turn probabilities into classification
preds = (predictions > 0.5).astype(int)
# Check if predictions are correct and score
score = (preds == test_y).astype(int)
score = sum(score) / len(score)
print(f"LGBM score: {round(score * 100, 2)}%")
###Output
/nFirst 5 valus of predictions
0.6823096152655787 0.2563676280349399 0.754158275654756 0.7299133738366266 0.9163257142306617
LGBM score: 84.69%
###Markdown
[^](toc) Distribution of predictionsI want to look at the distribution of predictions to see if it is suitable for our purposes.The most important plot here is the predictions by label.
###Code
plt.figure(figsize=(16, 12))
### Nuanced way of creating subplots
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
preds = (2 * predictions) - 1
# Left plot: KDE plot of predictions
ax1.set_title("Distribution of predictions")
ax1.set_xlabel("Prediction")
sns.kdeplot(preds, ax=ax1)
# Right plot: KDE plot of predictions by label
pos_preds = preds[test_y[test_y == 1].index]
neg_preds = preds[test_y[test_y == 0].index]
ax2.set_title("Predictions by label")
ax2.set_xlabel("Prediction")
sns.kdeplot(pos_preds, label="Positive", ax=ax2)
sns.kdeplot(neg_preds, label="Negative", ax=ax2)
# Bottom plot: Histogram plot
ax3.set_title("Histogram of predictions")
ax3.set_xlabel("Prediction")
pd.DataFrame(preds).plot(kind="hist", bins=30, ax=ax3)
ax3.legend_.remove();
###Output
_____no_output_____
###Markdown
[^](toc) Sentiment Analysis [^](toc) Sentiment by Speaker
###Code
pois = ["BERNIE SANDERS", "DONALD TRUMP", "HILLARY CLINTON", "BARACK OBAMA"] #, "LISA DESJARDINS", "DAVID BROOKS"]
pois_sents = dict()
poi_txts = dict()
def get_speaker_text(df, poi):
speaker_text = list()
transcripts = df[df.Speakers.map(lambda x: poi in x)].Transcript.values
for transcript in transcripts:
total_txt = ""
for person in transcript:
if clean_names(person[0]) == poi:
total_txt += " ".join(person[1]) + " "
speaker_text.append(total_txt)
return speaker_text
for poi in pois:
txts = get_speaker_text(pbs, poi)
poi_txts[poi] = txts
txts = tf_vectorizer.transform(txts).toarray()
sentiments = lgb_model.predict(txts)
sentiments = (2 * sentiments) - 1
pois_sents[poi] = sentiments
sents = [(poi, np.mean(sent)) for poi, sent in pois_sents.items()]
x, y = list(zip(*sents))
plt.figure(figsize=(14, 6))
plt.ylabel("Sentiment (Positive mean positive attitude)")
plt.bar(x, y);
###Output
_____no_output_____
###Markdown
[^](toc) Extreme SentimentsFrom experience, I've seen Trump say very negative things so it's strange to see him with the same positivity of Obama.I think it will be fruitful to compare the values in the 10% and 90% percentiles
###Code
positive = [(poi, np.percentile(sent, 90)) for poi, sent in pois_sents.items()]
negative = [(poi, np.percentile(sent, 10)) for poi, sent in pois_sents.items()]
fig, axarr = plt.subplots(2, 1, figsize=(14, 10))
x, y = list(zip(*positive))
axarr[0].set_title("Most positive remarks")
axarr[0].set(ylabel="Sentiment (Positive mean positive attitude)")
axarr[0].bar(x, y)
x, y = list(zip(*negative))
axarr[1].set_title("Most negative remarks")
axarr[1].set_ylabel("Sentiment (Positive mean positive attitude)")
axarr[1].bar(x, y);
###Output
_____no_output_____
###Markdown
[^](toc) KDE PlotsThis is a very interesting plot. Notice how Trump is less likely to say something moderate. Sanders has a small hump on the negative side.
###Code
plt.figure(figsize=(14, 8))
all_sents = [(poi, sent) for poi, sent in pois_sents.items()]
for person, sentiment in all_sents:
sns.kdeplot(sentiment, label=person)
###Output
_____no_output_____
###Markdown
[^](toc) Positive examplesThe model is very accurate with Donald Trump and Barack Obama. Almost every other word in Trump's text is positive. The text from Obama is his State of the Union which was incredibly positive.
###Code
positive = [(poi, np.argmax(sent)) for poi, sent in pois_sents.items()]
for person, txt_index in positive:
print(color.UNDERLINE, person, color.END)
print(poi_txts[person][txt_index])
print()
###Output
[4m BERNIE SANDERS [0m
Great to be with you. Well, I have been in the Democratic Caucus in the Senate for over 24 years. But, as an independent, my views, in fact, are a little bit different than many of my Democratic colleagues. I worry very much that we have a billionaire class now which has enormous power not only over our economy, but over our political system as well, as a result of Citizens United Supreme Court decision. So, my own view is that we have got to be very, very bold in taking on big money and creating a situation where government begins to work for the middle class and working families of our country, rather than just the wealthy and the powerful. Judy, I’m running for president because, in my view, this country today, our country, faces more serious problems than at any time since the Great Depression. And if you throw in the planetary crisis of climate change, it may well be that the problems today are more severe. Look, for the last 40 years, the great middle class of this country has been disappearing. Median family income today is significantly less than it was in 1999. Millions are working longer hours for lower wages. And, at the same time, we have seen a huge shift of wealth to the top one-tenth of 1 percent. So, today — today, 99 percent of all new income is going to the top 1 percent. The top one-tenth of 1 percent owns almost as much wealth as the bottom 90 percent. That is immoral and unsustainable. Well, I know. Critics are often paid by large corporations or corporate think tanks. The fact of the matter is right now in America we’re losing about $100 billion every single year because very profitable corporations are stashing their money in the Cayman Islands, Bermuda, and other tax havens. And that has got to end. Second of all, we have a situation where hedge fund managers, guys that are making many, many millions of dollars a year, are paying an effective tax rate lower than what nurses or school teachers are paying. And Warren Buffett makes the point that his effective tax rate, as a multibillionaire, is lower than his secretary’s. That’s got to end. The wealthiest people in this country are in fact going to have to start paying their fair share of taxes if I’m elected president. Sure it’s a problem. The problem that we have now is that our political system is increasingly dominated by a billionaire class and by super PACs, who have unbelievable influence over what goes on politically. It is a huge problem. But in terms of this trade agreement, in my view, the Trans-Pacific Partnership trade agreement is a continuation of other disastrous trade agreements, like NAFTA, CAFTA, and permanent normal trade relations with China. These trade agreements, among other things, have contributed to the that we have lost almost 60,000 factories since 2001 and millions of decent-paying jobs. And I think enough is enough. We have got to rebuild our manufacturing base, not send it to China or other countries. Well, I think that’s a very fair question. And I think the American people will have to decide. If you are asking me why it is that the middle class is disappearing and we’re seeing more income and wealth inequality than any time since the 1920s, trade is a very important factor, not the only reason. And it is hard for me to understand how any serious candidate for president, Hillary Clinton or anybody else, can duck this issue. You can’t. You can be for it. You can be against it. But it is being hotly debated right now in Congress. You have got to have a position on it. I have spent the better part of my adult life standing up and fighting for working families. I have taken on virtually every element of the big money establishment, whether it’s the Koch brothers, and the big energy companies, whether it’s the industrial complex, whether it’s Wall Street. You’re looking at the guy who has introduced legislation to break up the largest financial institutions in this country. I have taken on the drug companies. I have taken on the insurance companies. I happen to believe that we should move to a Medicare-for-all single-payer system, similar to what other countries around the world have. So, I think if people understand that establishment politics just no longer is working, that we need some bold ideas, that we need a mass movement of people, millions of people to stand up and say, you know what, enough is enough, this great country belongs to all of us and not just to a handful of billionaires, if people believe that, I will win this election. I don’t think she can, yes. No, no, no, I have supported those efforts on the part of the president. I voted against the war in Iraq. And I think, if you go back and you read what I had to say way back when, you know, it will sound pretty prescient in terms of the destabilization that we have seen in the Middle East. So my view is, the United States has got to play an active role in defeating this barbaric organization, but at the end of the day, it’s going to be the Muslim countries themselves, supported by the United States and other Western countries, that will defeat ISIS and bring some degree of stability into the Middle East. It cannot be American troops on the ground. And I will tell you what I worry about. I think too many of my Republican friends are into perpetual warfare in the Middle East. And that scares the bejesus out of me. And I supported the airstrikes as well. But I do not want to see perpetual warfare in the Middle East. I do not want to see American combat troops on the ground in the Middle East. Thank you very much.
[4m DONALD TRUMP [0m
Reince is a good man. John Kelly will do a fantastic job. General Kelly has been a star, done an incredible job thus far, respected by everybody, a great, great American. Reince Priebus, a good man. Thank you very much.
[4m HILLARY CLINTON [0m
Well, we had a great, great time last night. The real point is about temperament and fitness and qualifications to hold the most important, hardest job in the world. And I think people saw last night some very clear differences between us. Anybody who complains about the microphone is not having a good night. He loves beauty contests, supporting them and hanging around them. And he called this woman “Miss Piggy.” Then he called her “Miss Housekeeping,” because she was Latina. Donald, she has a name. Her name is Alicia Machado. And she has become a U.S. citizen, and you can bet… … she’s going to vote this November. At one point, he was kind of digging me for spending time off the campaign trail to get prepared. And I said, yes, you know what, I did prepare. And I will tell you something else I prepared for. I prepared to be president of the United States, and I think that’s good. (CHEERING AND APPLAUSE)
[4m BARACK OBAMA [0m
God is our refuge and strength, a very present help in trouble. Therefore, we will not fear even though the earth be removed and though the mountains be carried into the midst of the sea. Thank you. Thank you. Thank you. The Bible tells us weeping may endure for a night, but joy cometh in the morning. Ten years ago, America confronted one of our darkest nights. Mighty towers crumbled, black smoke billowed up from the Pentagon, airplane wreckage smoldered on a Pennsylvania field. Friends and neighbors, sisters and brothers, mothers and fathers, sons and daughters — they were taken from us with a heartbreaking swiftness and cruelty. And on September 12th, 2001, we awoke to a world in which evil was closer at hand and uncertainty clouded our future. In the decades since, much has changed for Americans. We’ve known war and recession, passionate debates and political divides. We can never get back the lives that were lost on that day or the Americans who made the ultimate sacrifice in the wars that followed. And yet today it is worth remembering what has not changed. Our character as a nation has not changed. Our faith in God and in each other — that has not changed. Our belief in America, born of a timeless ideal that men and women should govern themselves, that all people are created equal and deserve the same freedoms to determine their own destiny — that belief through tests and trials has only been strengthened. These past 10 years have shown that America does not give in to fear. The rescue workers who rushed to the scene, the firefighters who charged up the stairs, the passengers who stormed the cockpit — these patriots define the very nature of courage. Over the years, we’ve also seen a more quiet form of heroism in the ladder company that lost so many men and still suits up and saves lives every day, the businesses that have been rebuilt from nothing, the burn victim who’s bounced back, the families who press on. Last spring, I received a letter from a woman named Suzanne Swain (ph). She had lost her husband and brother in the twin towers and said that she had been robbed of so many would-be proud moments where a father watches their child graduate or tend goal in a lacrosse game or succeed academically. But her daughters are in college, the other doing well in high school. “It has been 10 years of raising these girls on my own,” Suzanne wrote. “I could not be prouder of their strength and resilience.” That spirit typifies our American family, and the hopeful future for those girls is the ultimate rebuke to the hateful killers who took the life of their father. These past 10 years have shown America’s resolve to defend its citizens and our way of life. Diplomats serve in far-off posts and intelligence professionals work tirelessly without recognition. Two million Americans have gone to war since 9/11. They’ve demonstrated that those who do us harm cannot hide from the reach of justice anywhere in the world. America’s been defended not by conscripts but by citizens who choose to serve, young people who signed up straight out of high school, guardsmen and reservists, workers and business people, immigrants and fourth-generation soldiers. They are men and women who left behind lives of comfort for two, three, four, five tours of duty. Too many will never come home. Those that do carry dark memories from distant places and the legacy of fallen friends. The sacrifices of these men and women and of our military families reminds us that the wages of war are great and that while service to our nation is full of glory to Kandahar and Kabul, to Mosul and Basra. But our strength is not measured in our ability to stay in these places. It comes from our commitment to leave those lands to free people and sovereign states and our desire to move from a decade of war to a future of peace. These 10 years have shown that we hold fast to our freedoms. Yes, we’re more vigilant against those who threaten us, and there are inconveniences that come with our common defense. Debates about war and peace, about security and civil liberties have often be fierce these last 10 years, but it is precisely the rigor of these debates and our ability to resolve them in a way that honors our values and our democracy that is the measure of our strength. Meanwhile, our open markets still provide innovators a chance to create and succeed. Our citizens are still free to speak their minds. And our souls are enriched in churches and temples, our synagogues and our mosques. These past 10 years underscores the bonds between all Americans. We have not succumbed to suspicion, nor have we succumbed to mistrust. After 9/11, to his great credit, President Bush made clear what we have reaffirmed today. The United States will never wage war against Islam or any other religion. Immigrants come here from all parts of the globe, and in the biggest cities and the smallest towns, in schools and workplaces, you still see people of every conceivable race and religion and ethnicity, all of them pledging allegiance to the flag, all of them reaching for the same American dream. E pluribus unum — out of many we are one. These past 10 years tell us a story of our resilience. The Pentagon is repaired and filled with patriots working in common purpose. Shanksville is the scene of friendships forged between residents of that town and families who lost loved ones there. New York, New York remains the most vibrant of capitals of arts and industry and fashion and commerce. Where the World Trade Center once stood, the sun glistens off a new tower that reaches towards the sky. Our people still work in skyscrapers. Our stadiums are still filled with fans and our parks full of children playing ball. Our airports hum with travel and our buses and subways take millions where they need to go. And families sit down to Sunday dinner and students prepare for school. This land pulses with the optimism of those who set out for distant shores and the courage of those who died for human freedom. Decades from now, Americans will visit the memorials to those who were lost on 9/11. They’ll run their fingers over the places where the names of those we loved are carved into marble and stone, and they may wonder at the lives that they led. And standing before the white headstones in Arlington and in peaceful cemeteries and small town squares in every corner of the country, they will pay respects to those lost in Iraq and Afghanistan. They’ll see the names of the fallen on bridges and statues, in gardens and schools, and they will know that nothing can break the will of a truly United States of America. They will remember that we’ve overcome slavery and civil war. We’ve overcome red lines and fascism and recession and riots and communism, and yes, terrorism. They will be reminded that we are not perfect. Our democracy is durable, and that democracy, reflecting as it does the imperfections of man, also gives us the opportunity to perfect our union. That is what we honor on days of national commemoration, those aspects of the American experience that are enduring and the determination to move forward as one people. More than monuments, that will be the legacy of 9/11, a legacy of firefighters who walked into fire and soldiers who signed up to serve, of workers who raised new towers and citizens who faced down their private fears, most of all of children who realized the dreams of their parents. It will be said that we kept the faith, that we took a painful blow and we emerged stronger than before. Weeping may endure for a night, but joy cometh in the morning. With a just God as our guide, let us honor those who have been lost. Let us rededicate ourselves to the ideals that define our nation, and let us look to the future with hearts full of hope. May God bless the memory of those we lost, and may God bless the United States of America. (APPLAUSE)
###Markdown
[^](toc) Negative ExamplesBernie Sanders and Donald Trump certainly sound like they just saw a bad movie.The model seems to perform very well with Donald Trump and not so well with Hillary Clinton. Trump uses a lot of adjectives and is very direct. Hillary Clinton is somewhat less direct and a bit sarcastic which the model has trouble with.
###Code
negative = [(poi, np.argmin(sent)) for poi, sent in pois_sents.items()]
for person, txt_index in negative:
print(color.UNDERLINE, person, color.END)
print(poi_txts[person][txt_index])
print()
###Output
[4m BERNIE SANDERS [0m
Well, what went wrong, Judy, is they brought forth a disastrous health care bill that had the support of all of 12 percent of the American people, that was opposed by the American Medical Association, the American Hospital Association, the AARP. And virtually every national health care organization understood that, when you throw 22 million people off of health insurance, when you cut Medicaid by $800 billion, when you raise premiums for older workers, when you defund Planned Parenthood, and you make it almost impossible for people with preexisting conditions to get the health care they need and can afford, you know what? You have got a bill that’s a stinker, it shouldn’t go anyplace. And it didn’t go anyplace. And that’s a good thing for the American people. And I thank the millions of people who stood up and fought back and said that that legislation is not what this country is about. Well, if he wants to blame me for helping kill that bill, I accept that responsibility completely. This bill was an absolute disaster. Its goal was primarily to give tax breaks to the rich and to large corporations, rather than to address the needs of the American people. If the president wants to blame me and anyone else for preventing 22 million Americans losing their health insurance, I accept that criticism. Of course. Why not — look, nobody has said, Judy, that the Affordable Care Act is anywhere near perfect. It did add 20 million more people to the ranks of the insured. That’s good. Deductibles, however, are too high. Co-payments are too high. Premiums are too high. And we pay by far the highest in the world for prescription drugs, getting ripped off every day by the pharmaceutical industry. So, if the Republicans want to sit down and say how do we improve the Affordable Care Act, not destroy it, how do we improve it, let’s go forward and do that. I have some very specific ideas on that. Well, I’ll tell you. As I just mentioned, the cost of prescription drugs in this country is far, far higher than in any other country. You may recall that Donald Trump as a candidate for president talked about how he was going to take on the pharmaceutical industry and it was going to lower prescription drug costs. Well, we have some ideas to do that. Republicans may have other ideas. Let’s talk about lowering prescription drug costs, saving the federal government substantial sums of money. Let’s talk about having Medicare negotiate prices with the pharmaceutical industry. That’s number one. Number two, there are areas of this country right now where there are no insurance companies offering the Affordable Care Act. Let us provide a public option in every county in America, so if people don’t like what the private insurance companies are offering or there is no offer, let them have at least a public option. Number three, I believe that the American people would very much like to see lowering the eligibility age of Medicare from 65 to 55. And, lastly, in my view — and I speak only for myself — the United States must join the rest of the industrialized world, guarantee health care to all people as a right. And that is why I will be introducing a Medicare-for-all single-payer program. It will not be passed, believe me, in this session of Congress. I know that. But we have got to begin the discussion as to why we spend so much more per capita on health care than any other nation, why we pay the highest prices in the world, why we do not guarantee health care to all people, as every other major country does. Oh, yes. Well, that’s a very good question. And I’m sure that there’s absolutely nobody in the world who knows the answer to it. All that I can say is that we are spending far more per capita than people in any other country, and our health care outcomes are in many cases worse in terms of life expectancy, infant mortality and so forth. So, I think the issue is not necessarily — we may have to spend more money. The issue is to trying to figure out why we end up spending so much more than other countries. And one of the reasons, clearly, high cost of prescription drugs. Second reason, we do very, very badly in terms of primary health care. There are millions of people, even those who have insurance, who can not get to a doctor when they are sick. They end up in the emergency room, very expensive. They end up in the hospital, very, very expensive. If we greatly expanded primary health care, lower the cost of prescription drugs, we take a giant step forward in lowering health care costs in America. Prescription drugs. Thank you, Judy.
[4m DONALD TRUMP [0m
It’s all fake news. It’s phony stuff. It didn’t happen. And it was gotten by opponents of ours. But it should never have been released, but I read what was released. And I think it’s a disgrace. I think it’s an absolute disgrace. I told many people, be careful, because you don’t want to see yourself on television. There are cameras all over the place, and, again, not just Russia, all over. Does anyone really believe that story? I’m also very much of a germaphobe, by the way, believe me. It’s a failing pile of garbage writing it. I think they’re going to suffer the consequences. I think it was disgraceful, disgraceful that the intelligence agencies allowed any information that turned out to be so false and fake out. I think it’s a disgrace, and I say that. And that’s something that Nazi Germany would have done, and did do. The hacking is bad and it shouldn’t be done. But look at the things that were hacked. Look at what was learned from that hacking, that Hillary Clinton got the questions to the debate and didn’t report it? That’s a horrible thing. I think it was Russia, but I think we also get hacked by other countries and other people. If Putin likes Donald Trump, guess what, folks? That’s called an asset, not a liability. Now, Russia will have much greater respect for our country when I’m leading it than when other people have led it. You will see that. Russia will respect our country more. He shouldn’t have done it. I don’t believe he will be doing it more. We could make deals in Russia very easily if we wanted to. I just don’t want to, because I think that would be a conflict. So I have no loans, no dealings and no current pending deals.
[4m HILLARY CLINTON [0m
Let’s do everything we can to win Kentucky in November! Now, some people might say, oh, all anybody wants to hear is just, I’m going to do it, but I’m not telling you what I’m going to do. See, I don’t believe that. Americans take their vote for president seriously. And they’re going to be looking at that TV screen and saying, he still doesn’t have anything to tell us?
[4m BARACK OBAMA [0m
Don’t bet against the American auto industry. Only in politics do people root for bad news, do they greet bad news so enthusiastically. You pay more, they’re licking their chops. And you can bet that since it’s an election year, they’re already dusting off their three-point plan for $2 gas. And I will save you the suspense. Step one is to drill, and step two is to drill and then step three is to keep drilling. Well, the American people aren’t stupid. They know that’s not a plan, especially since we’re already drilling. That’s a bumper sticker.
###Markdown
[^](toc) Topic SentimentAre certain words associated with good or bad movies? Look at articles with these words in their summary
###Code
chr(65)
chr(122)
topics = ["Obama", "Trump", "Clinton", "Bush", "Immigration", "Congress"]
sents = []
Ns = []
def get_speaker_text(df, poi):
speaker_text = list()
transcripts = df[df.Speakers.map(lambda x: poi in x)].Transcript.values
for transcript in transcripts:
total_txt = ""
for person in transcript:
if clean_names(person[0]) == poi:
total_txt += " ".join(person[1]) + " "
speaker_text.append(total_txt)
return speaker_text
for poi in pois:
txts = get_speaker_text(pbs, poi)
poi_txts[poi] = (txts)
txts = tf_vectorizer.transform(txts).toarray()
sentiments = lgb_model.predict(txts)
sentiments = (2 * sentiments) - 1
pois_sents[poi] = sentiments
for topic in topics:
stories = pbs[pbs.Story.map(lambda x: topic in x)]
Ns.append(len(stories))
###Output
1537
1725
501
84
37
566
###Markdown
[^](toc) PBS Sentiment
###Code
pbs_staff = {"JUDY WOODRUFF", "GWEN IFILL", "JOHN YANG",
"RAY SUAREZ", "JIM LEHRER", "JEFFREY BROWN",
"HARI SREENIVASAN", "LISA DEJARDIN"}
def text_sent(transcript):
total_txt = ""
for person in transcript:
if clean_names(person[0]) in pbs_staff:
total_txt += " ".join(person[1]) + " "
if total_txt == "":
return np.nan
txt = tf_vectorizer.transform([total_txt]).astype(np.float64)
sentiment = lgb_model.predict(txt)
return sentiment[0]
temp = pbs[pbs.Speakers.map(lambda x: len(set.union(pbs_staff, x)) > 0)]
temp = temp[temp.Year > 2010]
temp = (temp
.set_index("Date")
.Transcript
.map(text_sent)
.dropna()
.groupby(pd.Grouper(freq="M"))
)
sent = temp.apply(np.mean)
error = temp.apply(np.std)
trace = go.Scatter(
x=sent.index,
y=sent.values,
error_y=dict(
type='data',
array=error.values,
visible=True
)
)
layout = go.Layout(
title = "PBS Newshour Sentiment over time",
yaxis=dict(title="Sentiment"),
xaxis=dict(title="Date"),
)
fig = go.Figure(data=[trace], layout=layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Most common words on Movie ReviewsThis code is copied from [Anisotropic](https://www.kaggle.com/arthurtok) and his excellent [kernel](https://www.kaggle.com/arthurtok/spooky-nlp-and-topic-modelling-tutorial).
###Code
feature_names = tf_vectorizer.get_feature_names()
count_vec = np.asarray(tf.sum(axis=0)).ravel()
zipped = list(zip(feature_names, count_vec))
x, y = (list(x) for x in zip(*sorted(zipped, key=lambda x: x[1], reverse=True)))
# Now I want to extract out on the top 15 and bottom 15 words
Y = np.concatenate([y[0:15], y[-16:-1]])
X = np.concatenate([x[0:15], x[-16:-1]])
# Plotting the Plot.ly plot for the Top 50 word frequencies
data = [go.Bar(
x = x[0:50],
y = y[0:50],
marker= dict(colorscale='Jet',
color = y[0:50]
),
text='Word counts'
)]
layout = go.Layout(
title='Top 50 Word frequencies after Preprocessing'
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
reliability diagram, expected calibration errorIn multi-class classification setting, the general idea of calibration is that confidence should match accuracy, i.e. when the model is 60% confidence, the probability of it being correct should be 60%.reliability diagram: bin validation examples by predicted probability, then calculate the average accuracy within each bin, plot average accuracy against confidence. ideal calibration should be a diagonal line.expected calibration error (ECE): the difference in expectation between confidence and accuracy is$$\mathbb{E}_{\hat{P}}[|\mathbb{P}(\hat{Y}=Y|\hat{P}=p)-p)|$$This can be approximated by a weighted average of bins' accuracy - confidence difference (the gap showns as red bars in reliability diagrams)$$\text{ECE}=\sum_{i}\frac{|B_i|}{n}|\text{acc}(B_m)-\text{conf}(B_m)|$$
###Code
results = pd.read_csv('./results/hlr.settles.acl16.learning_traces.13m.preds', delimiter='\t')
def _bin_prediction(group):
return pd.DataFrame([{'prediction': group.pp.mean()}])
(
ggplot(
results.groupby(
pd.cut(results.p, 20)
).apply(_bin_prediction).reset_index()
)
+ geom_bar(
aes(x='p', y='prediction'),
stat='identity',
fill='blue',
alpha=0.5
)
+ theme_fs()
+ theme(
axis_text_x=element_text(rotation=90)
)
)
def _bin_rmse(group):
return pd.DataFrame([{
'rmse': ((group.pp - group.p) ** 2).mean() ** (1/2)
}])
(
ggplot(
results.groupby(
pd.cut(results.pp, 20)
).apply(_bin_rmse).reset_index()
)
+ geom_bar(
aes(x='pp', y='rmse'),
stat='identity',
fill='blue',
alpha=0.5
)
+ theme_fs()
+ theme(
axis_text_x=element_text(rotation=90)
)
)
###Output
_____no_output_____
###Markdown
When our model directly predicts the probability, instead of using ECE, we can directly measure the miscalibration$$\text{ECE}=\sum_i\frac{|B_i|}{n}|\text{precition}(B_i) - \text{ground_truth}(B_i)|$$
###Code
def _bin_miscalibration(group):
return pd.DataFrame([{
'miscalibration': (group.pp - group.p).abs().mean(),
'prediction': group.pp.mean(),
'ground_truth': group.p.mean()
}])
miscalibration = results.groupby(pd.cut(results.p, 16)).apply(_bin_miscalibration).reset_index()
print('expected calibration error', miscalibration.miscalibration.mean())
(
ggplot(miscalibration)
+ geom_bar(
aes(x='p', y='ground_truth'),
stat='identity',
fill='blue',
alpha=1.0
)
+ geom_bar(
aes(x='p', y='prediction'),
stat='identity',
fill='red',
color='red',
alpha=0.3
)
+ theme_fs()
+ theme(
axis_text_x=element_text(rotation=90)
)
)
###Output
expected calibration error 0.44739960267593976
###Markdown
Half-life regression (HLR)short-hand for each record \begin{align}&=\\&=\end{align}Regression against recall probability $$l_\text{recall}(;\theta)=(p-f_\theta(x,\Delta))^2$$Regression against back-solved half-life $$l_\text{half-life}(;\theta)=(\frac{-\Delta}{\log_2{p}}-f_\theta(x,\Delta))^2$$Binary recall classification $$l_\text{binary}(;\theta)=\text{xent}(f_\theta(x,\Delta),y)$$Assume that half-life increases exponentially with each repeated exposure, with a linear approximator, you get $f_\theta(x,\Delta)=2^{\theta\cdot x}$. Use this parameterization with regression against both recall probability and back-solved half-life, you get Settles' formulation:$$l(; \theta)=(p-2^{\frac{\Delta}{2^{\theta\cdot x}}})^2+\alpha(\frac{\Delta}{\log_2(p)}-2^{\theta\cdot{x}})^2+\lambda|\theta|_2^2$$Note that this formulation incorporates two heuristics1. the memory strength follows an exponential forgetting curve, hence the half-life2. half-life increases exponentially with number of repetitionsBut in their code the `history_seen` and `history_seen_correct` feature are squre-rooted, so essentially throwing away the second heuristic.
###Code
splitpoint = int(0.9 * len(df))
train_df, test_df = df.iloc[:splitpoint], df.iloc[splitpoint:]
df1 = pd.DataFrame({
'pp': results.pp.tolist(),
'hh': results.hh.tolist(),
'p': results.p.tolist(),
'h': results.h.tolist(),
'history_seen': test_df.history_seen.tolist(),
'history_correct': test_df.history_correct.tolist(),
'session_seen': test_df.session_seen.tolist(),
'session_correct': test_df.session_correct.tolist(),
'delta_days': test_df.delta_days.tolist(),
})
def _bin_delta(group):
return pd.DataFrame([{
'prediction': group.pp.mean(),
'ground_truth': group.p.mean(),
'delta': group.delta_days.mean(),
}])
(
ggplot(
df1.groupby(
pd.cut(df1.delta_days, 16)
).apply(_bin_delta).reset_index()
)
+ geom_bar(
aes(x='delta', y='ground_truth'),
stat='identity',
fill='blue',
alpha=0.3
)
+ geom_bar(
aes(x='delta', y='prediction'),
stat='identity',
fill='red',
color='red',
alpha=0.3
)
+ theme_fs()
+ theme(
axis_text_x=element_text(rotation=90)
)
)
def _bin_history_seen(group):
return pd.DataFrame([{
'prediction': group.pp.mean(),
'ground_truth': group.p.mean(),
'seen': group.history_seen.mean(),
}])
_df1 = df1.loc[df1.history_seen < 100]
(
ggplot(
_df1.groupby(
pd.cut(_df1.history_seen, 30)
).apply(_bin_history_seen).reset_index()
)
+ geom_bar(
aes(x='seen', y='ground_truth'),
stat='identity',
fill='blue',
alpha=0.3
)
+ geom_bar(
aes(x='seen', y='prediction'),
stat='identity',
fill='red',
color='red',
alpha=0.3
)
+ theme_fs()
+ theme(
axis_text_x=element_text(rotation=90)
)
)
###Output
_____no_output_____ |
applications/notebooks/stable/kmeans_model_centroid.ipynb | ###Markdown
Kmeans over a set of GeoTiffsThis notebook loads a set of GeoTiffs into a **RDD** of Tiles, with each Tile being a band in the GeoTiff. Each GeoTiff file contains **SpringIndex-** or **LastFreeze-** value for one year over the entire USA.Kmeans takes years as dimensions. Hence, the matrix has cells as rows and the years as columns. To cluster on all years, the matrix needs to be transposed. The notebook has two flavors of matrix transpose, locally by the Spark-driver or distributed using the Spark-workers. Once transposed the matrix is converted to a **RDD** of dense vectors to be used by **Kmeans** algorithm from **Spark-MLlib**. The end result is a grid where each cell has a cluster ID which is then saved into a SingleBand GeoTiff. By saving the result into a GeoTiff, the reader can plot it using a Python notebook as the one defined in the [python examples](../examples/python).In this notebook the reader only needs to modify the variables in **Mode of Operation Setup**. Dependencies
###Code
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
import geotrellis.proj4.CRS
import geotrellis.raster.io.geotiff.{SinglebandGeoTiff, _}
import geotrellis.raster.io.geotiff.writer.GeoTiffWriter
import geotrellis.raster.{CellType, DoubleArrayTile, MultibandTile, Tile, UByteCellType}
import geotrellis.spark.io.hadoop._
import geotrellis.vector.{Extent, ProjectedExtent}
import org.apache.hadoop.io.SequenceFile.Writer
import org.apache.hadoop.io.{SequenceFile, _}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}
import org.apache.spark.mllib.linalg.distributed._
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.sys.process._
//Spire is a numeric library for Scala which is intended to be generic, fast, and precise.
import spire.syntax.cfor._
###Output
_____no_output_____
###Markdown
Mode of operationHere the user can define the mode of operation.* **rdd_offline_mode**: If false it means the notebook will create all data from scratch and store grid0, grid0_index, protected_extent and num_cols_rows (from grid0) into HDFS. Otherwise, these data structures are read from HDFS.* **matrix_offline_mode**: If false it means the notebook will create a mtrix, transposed it and save it to HDFS. Otherwise, these data structures are read from HDFS.* **kmeans_offline_mode**: If false it means the notebook will train kmeans and run kemans and store kmeans model into HDFS. Otherwise, these data structures are read from HDFS.It is also possible to define which directory of GeoTiffs is to be used and on which **band** to run Kmeans. The options are* **BloomFinal** or **LeafFinal** which are multi-band (**4 bands**)* **DamageIndex** and **LastFreeze** which are single-band and if set band_num higher, it will reset to 0For kmeans the user can define the **number of iterations** and **number of clusters** as an inclusive range. Such range is defined using **minClusters**, **maxClusters**, and **stepClusters**. These variables will set a loop starting at **minClusters** and stopping at **maxClusters** (inclusive), iterating **stepClusters** at the time. Note that when using a range **kemans offline mode** is not possible and it will be reset to **online mode**. Mode of Operation setup
###Code
var rdd_offline_mode = true
var matrix_offline_mode = true
var kmeans_offline_mode = true
//GeoTiffs to be read from "hdfs:///user/hadoop/spring-index/"
var dir_path = "hdfs:///user/hadoop/spring-index/"
var offline_dir_path = "hdfs:///user/emma/spring-index/"
var geoTiff_dir = "LeafFinal"
var band_num = 3
//Years between (inclusive) 1980 - 2015
val model_timeseries = (1980, 2015)
var model_first_year = 1989
var model_last_year = 2014
//Mask
val toBeMasked = true
val mask_path = "hdfs:///user/hadoop/usa_mask.tif"
//Kmeans number of iterations and clusters
var numIterations = 75
var minClusters = 70
var maxClusters = 70
var stepClusters = 10
var save_rdds = false
var save_grids = false
var save_kmeans_model = false
###Output
_____no_output_____
###Markdown
DON'T MODIFY ANY PIECE OF CODE FROM HERE ON!!!. Mode of operation validation
###Code
var single_band = false
if (geoTiff_dir == "BloomFinal" || geoTiff_dir == "LeafFinal") {
single_band = false
} else if (geoTiff_dir == "LastFreeze" || geoTiff_dir == "DamageIndex") {
single_band = true
if (band_num > 0) {
println("Since LastFreezze and DamageIndex are single band, we will use band 0!!!")
band_num = 0
}
} else {
println("Directory unknown, please set either BloomFinal, LeafFinal, LastFreeze or DamageIndex!!!")
}
if (minClusters > maxClusters) {
maxClusters = minClusters
stepClusters = 1
}
if (stepClusters < 1) {
stepClusters = 1
}
//Paths to store data structures for Offline runs
var mask_str = ""
if (toBeMasked)
mask_str = "_mask"
var grid0_path = offline_dir_path + geoTiff_dir + "Centroid/grid0" + "_"+ band_num + mask_str
var grid0_index_path = offline_dir_path + geoTiff_dir + "Centroid/grid0_index" + "_"+ band_num + mask_str
var grids_noNaN_path = offline_dir_path + geoTiff_dir + "Centroid/grids_noNaN" + "_"+ band_num + mask_str
var metadata_path = offline_dir_path + geoTiff_dir + "Centroid/metadata" + "_"+ band_num + mask_str
var grids_matrix_path = offline_dir_path + geoTiff_dir + "Centroid/grids_matrix" + "_"+ band_num + mask_str
//Check offline modes
var conf = sc.hadoopConfiguration
var fs = org.apache.hadoop.fs.FileSystem.get(conf)
val rdd_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(grid0_path))
val matrix_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(grids_matrix_path))
if (rdd_offline_mode != rdd_offline_exists) {
println("\"Load GeoTiffs\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + rdd_offline_exists.toString())
rdd_offline_mode = rdd_offline_exists
}
if (matrix_offline_mode != matrix_offline_exists) {
println("\"Matrix\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + matrix_offline_exists.toString())
matrix_offline_mode = matrix_offline_exists
}
if (!fs.exists(new org.apache.hadoop.fs.Path(mask_path))) {
println("The mask path: " + mask_path + " is invalid!!!")
}
//Years
//val model_years = 1980 to 2015
val model_years = model_timeseries._1 to model_timeseries._2
if (!model_years.contains(model_first_year) || !(model_years.contains(model_last_year))) {
println("Invalid range of years for " + geoTiff_dir + ". I should be between " + model_first_year + " and " + model_last_year)
System.exit(0)
}
var model_years_range = (model_years.indexOf(model_first_year), model_years.indexOf(model_last_year))
var num_kmeans :Int = 1
if (minClusters != maxClusters) {
num_kmeans = ((maxClusters - minClusters) / stepClusters) + 1
}
var kmeans_model_paths :Array[String] = Array.fill[String](num_kmeans)("")
var wssse_path :String = offline_dir_path + geoTiff_dir + "Centroid/" + numIterations +"_wssse"
var geotiff_hdfs_paths :Array[String] = Array.fill[String](num_kmeans)("")
var geotiff_tmp_paths :Array[String] = Array.fill[String](num_kmeans)("")
var numClusters_id = 0
if (num_kmeans > 1) {
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
kmeans_model_paths(numClusters_id) = offline_dir_path + geoTiff_dir + "Centroid/kmeans_model_" + band_num + "_" + numClusters + "_" + numIterations
//Check if the file exists
val kmeans_exist = fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))
if (kmeans_exist && !kmeans_offline_mode) {
println("The kmeans model path " + kmeans_model_paths(numClusters_id) + " exists, please remove it.")
} else if (!kmeans_exist && kmeans_offline_mode) {
kmeans_offline_mode = false
}
geotiff_hdfs_paths(numClusters_id) = offline_dir_path + geoTiff_dir + "Centroid/clusters_" + band_num + "_" + numClusters + "_" + numIterations + ".tif"
geotiff_tmp_paths(numClusters_id) = "/tmp/clusters_" + band_num + "_" + geoTiff_dir + "_" + numClusters + "_" + numIterations + ".tif"
if (fs.exists(new org.apache.hadoop.fs.Path(geotiff_hdfs_paths(numClusters_id)))) {
println("There is already a GeoTiff with the path: " + geotiff_hdfs_paths(numClusters_id) + ". Please make either a copy or move it to another location, otherwise, it will be over-written.")
}
numClusters_id += 1
}
kmeans_offline_mode = false
} else {
kmeans_model_paths(0) = offline_dir_path + geoTiff_dir + "Centroid/kmeans_model_" + band_num + "_" + minClusters + "_" + numIterations
val kmeans_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(0)))
if (kmeans_offline_mode != kmeans_offline_exists) {
println("\"Kmeans\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + kmeans_offline_exists.toString())
kmeans_offline_mode = kmeans_offline_exists
}
geotiff_hdfs_paths(0) = offline_dir_path + geoTiff_dir + "Centroid/clusters_" + band_num + "_" + minClusters + "_" + numIterations + ".tif"
geotiff_tmp_paths(0) = "/tmp/clusters_" + band_num + "_" + geoTiff_dir + "_" + minClusters + "_" + numIterations + ".tif"
if (fs.exists(new org.apache.hadoop.fs.Path(geotiff_hdfs_paths(0)))) {
println("There is already a GeoTiff with the path: " + geotiff_hdfs_paths(0) + ". Please make either a copy or move it to another location, otherwise, it will be over-written.")
}
}
###Output
_____no_output_____
###Markdown
Functions to (de)serialize any structure into Array[Byte]
###Code
def serialize(value: Any): Array[Byte] = {
val out_stream: ByteArrayOutputStream = new ByteArrayOutputStream()
val obj_out_stream = new ObjectOutputStream(out_stream)
obj_out_stream.writeObject(value)
obj_out_stream.close
out_stream.toByteArray
}
def deserialize(bytes: Array[Byte]): Any = {
val obj_in_stream = new ObjectInputStream(new ByteArrayInputStream(bytes))
val value = obj_in_stream.readObject
obj_in_stream.close
value
}
###Output
_____no_output_____
###Markdown
Load GeoTiffsUsing GeoTrellis all GeoTiffs of a directory will be loaded into a RDD. Using the RDD, we extract a grid from the first file to lated store the Kmeans cluster_IDS, we build an Index for populate such grid and we filter out here all NaN values.
###Code
def hadoopGeoTiffRDD(satellite_filepath :String, pattern :String): RDD[(Int, (ProjectedExtent, Tile))] = {
val listFiles = sc.binaryFiles(satellite_filepath + "/" + pattern).sortBy(_._1).keys.collect()
var prevRDD :RDD[(Int, (ProjectedExtent, Tile))] = sc.emptyRDD
cfor(0)(_ < listFiles.length, _ + 1) { k =>
val filePath :String = listFiles(k)
val kB = sc.broadcast(k)
val currRDD = sc.hadoopGeoTiffRDD(filePath).map(m => (kB.value, m))
prevRDD = currRDD.union(prevRDD)
//kB.destroy()
}
prevRDD.sortBy(_._1)
}
def hadoopMultibandGeoTiffRDD(satellite_filepath :String, pattern :String): RDD[(Int, (ProjectedExtent, MultibandTile))] = {
val listFiles = sc.binaryFiles(satellite_filepath + "/" + pattern).sortBy(_._1).keys.collect()
var prevRDD :RDD[(Int,(ProjectedExtent, MultibandTile))] = sc.emptyRDD
cfor(0)(_ < listFiles.length, _ + 1) { k =>
val filePath :String = listFiles(k)
val kB = sc.broadcast(k)
val currRDD = sc.hadoopMultibandGeoTiffRDD(filePath).map(m => (kB.value,m))
prevRDD = currRDD.union(prevRDD)
//kB.destroy()
}
prevRDD.sortBy(_._1)
}
var t0 = System.nanoTime()
//Global variables
var projected_extent = new ProjectedExtent(new Extent(0,0,0,0), CRS.fromName("EPSG:3857"))
var grid0: RDD[(Long, Double)] = sc.emptyRDD
var grid0_index: RDD[Long] = sc.emptyRDD
var grids_noNaN_RDD: RDD[(Int, Array[Double])] = sc.emptyRDD
var num_cols_rows :(Int, Int) = (0, 0)
var cellT :CellType = UByteCellType
var grids_RDD :RDD[(Int, Array[Double])] = sc.emptyRDD
var mask_tile0 :Tile = new SinglebandGeoTiff(geotrellis.raster.ArrayTile.empty(cellT, num_cols_rows._1, num_cols_rows._2), projected_extent.extent, projected_extent.crs, Tags.empty, GeoTiffOptions.DEFAULT).tile
var grid_cells_size :Long = 0
//Load Mask
if (toBeMasked) {
val mask_tiles_RDD = sc.hadoopGeoTiffRDD(mask_path).values
val mask_tiles_withIndex = mask_tiles_RDD.zipWithIndex().map{case (e,v) => (v,e)}
mask_tile0 = (mask_tiles_withIndex.filter(m => m._1==0).values.collect())(0)
}
//Local variables
val pattern: String = "*.tif"
val filepath: String = dir_path + geoTiff_dir
if (rdd_offline_mode) {
grids_noNaN_RDD = sc.objectFile(grids_noNaN_path)
grid0 = sc.objectFile(grid0_path)
grid0_index = sc.objectFile(grid0_index_path)
val metadata = sc.sequenceFile(metadata_path, classOf[IntWritable], classOf[BytesWritable]).map(_._2.copyBytes()).collect()
projected_extent = deserialize(metadata(0)).asInstanceOf[ProjectedExtent]
num_cols_rows = (deserialize(metadata(1)).asInstanceOf[Int], deserialize(metadata(2)).asInstanceOf[Int])
cellT = deserialize(metadata(3)).asInstanceOf[CellType]
} else {
if (single_band) {
//Lets load a Singleband GeoTiffs and return RDD just with the tiles.
var geos_RDD = hadoopGeoTiffRDD(filepath, pattern)
geos_RDD.cache()
var tiles_RDD :RDD[(Int, Tile)] = geos_RDD.map{ case (i,(p,t)) => (i,t)}
//Retrive the numbre of cols and rows of the Tile's grid
val tiles_withIndex = tiles_RDD//.zipWithIndex().map{case (e,v) => (v,e)}
val tile0 = (tiles_withIndex.filter(m => m._1==0).values.collect())(0)
num_cols_rows = (tile0.cols,tile0.rows)
cellT = tile0.cellType
//Retrieve the ProjectExtent which contains metadata such as CRS and bounding box
val projected_extents_withIndex = geos_RDD.map{ case (i,(p,t)) => (i,p)}//.keys.zipWithIndex().map { case (e, v) => (v, e) }
projected_extent = (projected_extents_withIndex.filter(m => m._1 == 0).values.collect()) (0)
if (toBeMasked) {
val mask_tile_broad :Broadcast[Tile] = sc.broadcast(mask_tile0)
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.localInverseMask(mask_tile_broad.value, 1, -1000).toArrayDouble().filter(!_.isNaN))}
} else {
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.toArrayDouble().filter(!_.isNaN))}
}
} else {
//Lets load Multiband GeoTiffs and return RDD just with the tiles.
val geos_RDD = hadoopMultibandGeoTiffRDD(filepath, pattern)
geos_RDD.cache()
val tiles_RDD = geos_RDD.map{ case (i,(p,t)) => (i,t)}
//Retrive the numbre of cols and rows of the Tile's grid
val tiles_withIndex = tiles_RDD//.zipWithIndex().map{case (e,v) => (v,e)}
val tile0 = (tiles_withIndex.filter(m => m._1==0).values.collect())(0)
num_cols_rows = (tile0.cols,tile0.rows)
cellT = tile0.cellType
//Retrieve the ProjectExtent which contains metadata such as CRS and bounding box
val projected_extents_withIndex = geos_RDD.map{ case (i,(p,t)) => (i,p)}//.keys.zipWithIndex().map { case (e, v) => (v, e) }
projected_extent = (projected_extents_withIndex.filter(m => m._1 == 0).values.collect()) (0)
//Lets read the average of the Spring-Index which is stored in the 4th band
val band_numB :Broadcast[Int] = sc.broadcast(band_num)
if (toBeMasked) {
val mask_tile_broad :Broadcast[Tile] = sc.broadcast(mask_tile0)
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.band(band_numB.value).localInverseMask(mask_tile_broad.value, 1, -1000).toArrayDouble())}
} else {
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.band(band_numB.value).toArrayDouble())}
}
}
//Get Index for each Cell
val grids_withIndex = grids_RDD
if (toBeMasked) {
grid0_index = grids_withIndex.filter(m => m._1 == 0).values.flatMap(m => m).zipWithIndex.filter(m => m._1 != -1000.0).map { case (v, i) => (i) }
} else {
//Dense vector
//.filter(m => !m._1.isNaN).map { case (v, i) => (i) }
//Sparse Vector
grid0_index = grids_withIndex.filter(m => m._1 == 0).values.flatMap(m => m).zipWithIndex.map { case (v, i) => (i) }
}
//Get the Tile's grid
grid0 = grids_withIndex.filter(m => m._1 == 0).values.flatMap( m => m).zipWithIndex.map{case (v,i) => (i,v)}
//Lets filter out NaN
if (toBeMasked) {
grids_noNaN_RDD = grids_RDD.map{ case (i,m) => (i,m.filter(m => m != -1000.0))}
} else {
//Dense Vector
grids_noNaN_RDD = grids_RDD
//Parse Vector
//grids_noNaN_RDD = grids_RDD.map(m => m.filter(!_.isNaN))
}
//Store data in HDFS
if (save_rdds) {
grid0.saveAsObjectFile(grid0_path)
grid0_index.saveAsObjectFile(grid0_index_path)
grids_noNaN_RDD.saveAsObjectFile(grids_noNaN_path)
}
val grids_noNaN_RDD_withIndex = grids_noNaN_RDD//.zipWithIndex().map { case (e, v) => (v, e) }
val mod_year_diff = model_first_year-model_timeseries._1
val mod_year_diffB = sc.broadcast(mod_year_diff)
grids_noNaN_RDD = grids_noNaN_RDD_withIndex.filterByRange(model_years_range._1, model_years_range._2).map{ case(i,a) => (i-(mod_year_diffB.value),a)}
if (save_rdds) {
val writer: SequenceFile.Writer = SequenceFile.createWriter(conf,
Writer.file(metadata_path),
Writer.keyClass(classOf[IntWritable]),
Writer.valueClass(classOf[BytesWritable])
)
writer.append(new IntWritable(1), new BytesWritable(serialize(projected_extent)))
writer.append(new IntWritable(2), new BytesWritable(serialize(num_cols_rows._1)))
writer.append(new IntWritable(3), new BytesWritable(serialize(num_cols_rows._2)))
writer.append(new IntWritable(4), new BytesWritable(serialize(cellT)))
writer.hflush()
writer.close()
}
}
grid_cells_size = grid0_index.count().toInt
var t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
Elapsed time: 606837161690ns
###Markdown
MatrixWe need to do a Matrix transpose to have clusters per cell and not per year. With a GeoTiff representing a single year, the loaded data looks liks this:```bands_RDD.map(s => Vectors.dense(s)).cache()//The vectors are rows and therefore the matrix will look like this:[Vectors.dense(0.0, 1.0, 2.0),Vectors.dense(3.0, 4.0, 5.0),Vectors.dense(6.0, 7.0, 8.0),Vectors.dense(9.0, 0.0, 1.0)]```To achieve that we convert the **RDD[Vector]** into a distributed Matrix, a [**CoordinateMatrix**](https://spark.apache.org/docs/latest/mllib-data-types.htmlcoordinatematrix), which as a **transpose** method.
###Code
t0 = System.nanoTime()
//Global variables
var grids_matrix: RDD[Vector] = sc.emptyRDD
val grid_cells_sizeB = sc.broadcast(grid_cells_size)
if (matrix_offline_mode) {
grids_matrix = sc.objectFile(grids_matrix_path)
} else {
//Dense Vector
//val mat :RowMatrix = new RowMatrix(grids_noNaN_RDD.map(m => Vectors.dense(m)))
//Sparse Vector
val indRowMat :IndexedRowMatrix = new IndexedRowMatrix(grids_noNaN_RDD.map{ case (i, m) => (i,m.zipWithIndex)}.map{ case (i,m) => (i,m.filter(!_._1.isNaN))}.map{ case (i,m) => new IndexedRow(i.toLong, Vectors.sparse(grid_cells_sizeB.value.toInt, m.map(v => v._2), m.map(v => v._1)))})
grids_matrix = indRowMat.toCoordinateMatrix().transpose().toIndexedRowMatrix().rows.sortBy(_.index).map(_.vector)
if (save_grids) {
grids_matrix.saveAsObjectFile(grids_matrix_path)
}
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
[Stage 25:====================================================>(994 + 6) / 1000]Elapsed time: 268104028665ns
###Markdown
KmeansWe use Kmeans from Sparl-MLlib. The user should only modify the variables on Kmeans setup. Kmeans Training
###Code
t0 = System.nanoTime()
//Global variables
var kmeans_models :Array[KMeansModel] = new Array[KMeansModel](num_kmeans)
var wssse_data :List[(Int, Int, Double)] = List.empty
if (kmeans_offline_mode) {
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
if (!fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))) {
println("One of the files does not exist, we will abort!!!")
System.exit(0)
} else {
kmeans_models(numClusters_id) = KMeansModel.load(sc, kmeans_model_paths(numClusters_id))
}
numClusters_id += 1
}
val wssse_data_RDD :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)
wssse_data = wssse_data_RDD.collect().toList
} else {
numClusters_id = 0
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
val wssse_data_RDD :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)
wssse_data = wssse_data_RDD.collect().toList
}
grids_matrix.cache()
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
println(numClusters)
kmeans_models(numClusters_id) = {
KMeans.train(grids_matrix, numClusters, numIterations)
}
// Evaluate clustering by computing Within Set Sum of Squared Errors
val WSSSE = kmeans_models(numClusters_id).computeCost(grids_matrix)
println("Within Set Sum of Squared Errors = " + WSSSE)
wssse_data = wssse_data :+ (numClusters, numIterations, WSSSE)
//Save kmeans model
if (save_kmeans_model) {
if (!fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))) {
kmeans_models(numClusters_id).save(sc, kmeans_model_paths(numClusters_id))
}
}
numClusters_id += 1
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
println("We will delete the wssse file")
try { fs.delete(new org.apache.hadoop.fs.Path(wssse_path), true) } catch { case _ : Throwable => { } }
}
println("Lets create it with the new data")
sc.parallelize(wssse_data, 1).saveAsObjectFile(wssse_path)
}
//Un-persist it to save memory
grids_matrix.unpersist()
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
70
Within Set Sum of Squared Errors = 4.774033917427731E9
We will delete the wssse file
Lets create it with the new data
Elapsed time: 619859700965ns
###Markdown
Inspect WSSSE
###Code
t0 = System.nanoTime()
//current
println(wssse_data)
//from disk
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
var wssse_data_tmp :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)//.collect()//.toList
println(wssse_data_tmp.collect().toList)
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
List((70,75,7.124371401253984E9), (70,75,4.733514498278134E9), (70,75,4.737721105803598E9), (70,75,4.774033917427731E9))
List((70,75,7.124371401253984E9), (70,75,4.733514498278134E9), (70,75,4.737721105803598E9), (70,75,4.774033917427731E9))
Elapsed time: 208855881ns
###Markdown
Run Kmeans clusteringRun Kmeans and obtain the clusters per each cell.
###Code
t0 = System.nanoTime()
//Cache it so kmeans is more efficient
grids_matrix.cache()
var kmeans_res: Array[RDD[Int]] = Array.fill(num_kmeans)(sc.emptyRDD)
var kmeans_centroids: Array[Array[Double]] = Array.fill(num_kmeans)(Array.emptyDoubleArray)
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
kmeans_res(numClusters_id) = kmeans_models(numClusters_id).predict(grids_matrix)
kmeans_centroids(numClusters_id) = kmeans_models(numClusters_id).clusterCenters.map(m => m(0))
numClusters_id += 1
}
//Un-persist it to save memory
grids_matrix.unpersist()
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
Elapsed time: 48229846ns
###Markdown
Sanity testIt can be skipped, it only shows the cluster ID for the first 50 cells
###Code
t0 = System.nanoTime()
val kmeans_res_out = kmeans_res(0).take(150)
kmeans_res_out.foreach(print)
println(kmeans_res_out.size)
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242150
Elapsed time: 416618559ns
###Markdown
Build GeoTiff with Kmeans cluster_IDsThe Grid with the cluster IDs is stored in a SingleBand GeoTiff and uploaded to HDFS. Assign cluster ID to each grid cell and save the grid as SingleBand GeoTiffTo assign the clusterID to each grid cell it is necessary to get the indices of gird cells they belong to. The process is not straight forward because the ArrayDouble used for the creation of each dense Vector does not contain the NaN values, therefore there is not a direct between the indices in the Tile's grid and the ones in **kmeans_res** (kmeans result).To join the two RDDS the knowledge was obtaing from a stackoverflow post on [how to perform basic joins of two rdd tables in spark using python](https://stackoverflow.com/questions/31257077/how-do-you-perform-basic-joins-of-two-rdd-tables-in-spark-using-python).
###Code
t0 = System.nanoTime()
numClusters_id = 0
val grid0_index_I = grid0_index.zipWithIndex().map{ case (v,i) => (i,v)}
grid0_index_I.cache()
grid0.cache()
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
//Merge two RDDs, one containing the clusters_ID indices and the other one the indices of a Tile's grid cells
val cluster_cell_pos = ((kmeans_res(numClusters_id).zipWithIndex().map{ case (v,i) => (i,v)}).join(grid0_index_I)).map{ case (k,(v,i)) => (v,i)}
//Associate a Cluster_IDs to respective Grid_cell
val grid_clusters :RDD[ (Long, (Double, Option[Int]))] = grid0.map { case (i, v) => if (v == 0.0) (i, Double.NaN) else (i, v) }.leftOuterJoin(cluster_cell_pos.map{ case (c,i) => (i.toLong, c)})
//Convert all None to NaN
val grid_clusters_res = grid_clusters.sortByKey(true).map{case (k, (v, c)) => if (c == None) (k, Int.MaxValue) else (k, c.get)}
//Define a Tile
val cluster_cellsID :Array[Int] = grid_clusters_res.values.collect()
var cluster_cells :Array[Double] = Array.fill(cluster_cellsID.length)(Double.NaN)
cfor(0)(_ < cluster_cellsID.length, _ + 1) { cellID =>
if (cluster_cellsID(cellID) != Int.MaxValue) {
cluster_cells(cellID) = kmeans_centroids(numClusters_id)(cluster_cellsID(cellID))
}
}
val cluster_cellsD = DoubleArrayTile(cluster_cells, num_cols_rows._1, num_cols_rows._2)
val geoTif = new SinglebandGeoTiff(cluster_cellsD, projected_extent.extent, projected_extent.crs, Tags.empty, GeoTiffOptions(compression.DeflateCompression))
//Save to /tmp/
GeoTiffWriter.write(geoTif, geotiff_tmp_paths(numClusters_id))
//Upload to HDFS
var cmd = "hadoop dfs -copyFromLocal -f " + geotiff_tmp_paths(numClusters_id) + " " + geotiff_hdfs_paths(numClusters_id)
Process(cmd)!
//Remove from /tmp/
cmd = "rm -fr " + geotiff_tmp_paths(numClusters_id)
Process(cmd)!
numClusters_id += 1
}
grid0_index_I.unpersist()
grid0.unpersist()
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
###Output
[Stage 463:===================================================>(996 + 4) / 1000]]DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
Elapsed time: 107152762405ns
|
3. Denoising_Autoencoder.ipynb | ###Markdown
Denoising Autoencoder Importing Libraries
###Code
from keras.models import Model
from keras.layers import Dense, Input
from keras.datasets import mnist
import numpy as np
import warnings
warnings.filterwarnings('ignore')
###Output
C:\Users\hp\Anaconda3\lib\site-packages\h5py\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
Preparing Dataset
###Code
# Load MNIST Dataset
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
###Output
(60000, 784)
(10000, 784)
###Markdown
Adding noise
###Code
# Add random noise
corruption_level = 0.3
x_train_noisy = x_train + corruption_level * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + corruption_level * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
print(x_train_noisy.shape)
print(x_test_noisy.shape)
###Output
(60000, 784)
(10000, 784)
###Markdown
Autoencoder Model
###Code
# Hyper parameters
batch_size = 128
nb_epoch = 5
# Parameters for MNIST dataset
img_rows, img_cols = 28, 28
# Parameters for denoising autoencoder
nb_visible = img_rows * img_cols
nb_hidden = 32
# Build autoencoder model
input_img = Input(shape=(nb_visible,))
encoded = Dense(nb_hidden, activation='relu')(input_img)
decoded = Dense(nb_visible, activation='sigmoid')(encoded)
autoencoder = Model(input=input_img, output=decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 784) 0
_________________________________________________________________
dense_3 (Dense) (None, 32) 25120
_________________________________________________________________
dense_4 (Dense) (None, 784) 25872
=================================================================
Total params: 50,992
Trainable params: 50,992
Non-trainable params: 0
_________________________________________________________________
###Markdown
Training
###Code
# Train
autoencoder.fit(x_train_noisy, x_train,
nb_epoch=nb_epoch, batch_size=batch_size, shuffle=True, verbose=1,
validation_data=(x_test_noisy, x_test))
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/5
60000/60000 [==============================] - 3s 53us/step - loss: 0.3036 - val_loss: 0.2596
Epoch 2/5
60000/60000 [==============================] - 3s 44us/step - loss: 0.2439 - val_loss: 0.2250
Epoch 3/5
60000/60000 [==============================] - 3s 43us/step - loss: 0.2131 - val_loss: 0.2005
Epoch 4/5
60000/60000 [==============================] - 2s 42us/step - loss: 0.1939 - val_loss: 0.1851
Epoch 5/5
60000/60000 [==============================] - 2s 41us/step - loss: 0.1806 - val_loss: 0.1737
###Markdown
Evaluation
###Code
# Evaluate
evaluation = autoencoder.evaluate(x_test_noisy, x_test, batch_size=batch_size, verbose=1)
print('\nSummary: Loss over the test dataset: %.2f' % (evaluation))
###Output
10000/10000 [==============================] - 0s 16us/step
Summary: Loss over the test dataset: 0.17
###Markdown
Visualize the reconstruction
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Decode test images
decoded_imgs = autoencoder.predict(x_test_noisy)
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test_noisy[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
###Output
_____no_output_____
###Markdown
Visualize the weights
###Code
w = []
for layer in autoencoder.layers:
weights = layer.get_weights()
w.append(weights)
layer1 = np.array(w[1][0])
print("Shape of Hidden Layer",layer1.shape)
print("Visualization of Hidden Layer")
fig=plt.figure(figsize=(12, 12))
columns = 8
rows = int(nb_hidden/8)
for i in range(1, columns*rows +1):
fig.add_subplot(rows, columns, i)
plt.imshow(layer1[:,i-1].reshape(28,28),cmap='gray')
plt.show()
###Output
Shape of Hidden Layer (784, 32)
Visualization of Hidden Layer
###Markdown
Lets corrupt the data too much and see what happpens
###Code
# Add random noise
corruption_level = 0.7
x_train_noisy = x_train + corruption_level * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + corruption_level * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
print(x_train_noisy.shape)
print(x_test_noisy.shape)
###Output
(60000, 784)
(10000, 784)
###Markdown
Model Training and Evaluation
###Code
# Hyper parameters
batch_size = 128
nb_epoch = 5
# Parameters for MNIST dataset
img_rows, img_cols = 28, 28
# Parameters for denoising autoencoder
nb_visible = img_rows * img_cols
nb_hidden = 32
# Build autoencoder model
input_img = Input(shape=(nb_visible,))
encoded = Dense(nb_hidden, activation='relu')(input_img)
decoded = Dense(nb_visible, activation='sigmoid')(encoded)
autoencoder = Model(input=input_img, output=decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()
# Train
autoencoder.fit(x_train_noisy, x_train,
nb_epoch=nb_epoch, batch_size=batch_size, shuffle=True, verbose=1,
validation_data=(x_test_noisy, x_test))
# Evaluate
evaluation = autoencoder.evaluate(x_test_noisy, x_test, batch_size=batch_size, verbose=1)
print('\nSummary: Loss over the test dataset: %.2f' % (evaluation))
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) (None, 784) 0
_________________________________________________________________
dense_5 (Dense) (None, 32) 25120
_________________________________________________________________
dense_6 (Dense) (None, 784) 25872
=================================================================
Total params: 50,992
Trainable params: 50,992
Non-trainable params: 0
_________________________________________________________________
Train on 60000 samples, validate on 10000 samples
Epoch 1/5
60000/60000 [==============================] - 3s 49us/step - loss: 0.2956 - val_loss: 0.2646
Epoch 2/5
60000/60000 [==============================] - 3s 42us/step - loss: 0.2619 - val_loss: 0.2564
Epoch 3/5
60000/60000 [==============================] - 3s 43us/step - loss: 0.2473 - val_loss: 0.2357
Epoch 4/5
60000/60000 [==============================] - 2s 37us/step - loss: 0.2280 - val_loss: 0.2194
Epoch 5/5
60000/60000 [==============================] - 2s 37us/step - loss: 0.2151 - val_loss: 0.2088
10000/10000 [==============================] - 0s 15us/step
Summary: Loss over the test dataset: 0.21
###Markdown
Visualize the reconstruction
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Decode test images
decoded_imgs = autoencoder.predict(x_test_noisy)
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test_noisy[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
###Output
_____no_output_____
###Markdown
Visualize the weights
###Code
w = []
for layer in autoencoder.layers:
weights = layer.get_weights()
w.append(weights)
layer1 = np.array(w[1][0])
print("Shape of Hidden Layer",layer1.shape)
print("Visualization of Hidden Layer")
fig=plt.figure(figsize=(12, 12))
columns = 8
rows = int(nb_hidden/8)
for i in range(1, columns*rows +1):
fig.add_subplot(rows, columns, i)
plt.imshow(layer1[:,i-1].reshape(28,28),cmap='gray')
plt.show()
###Output
Shape of Hidden Layer (784, 32)
Visualization of Hidden Layer
|
SC0X_Python_Samples.ipynb | ###Markdown
###Code
!pip install ortools
"""Simple travelling salesman problem on a circuit board."""
from __future__ import print_function
import math
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
def create_data_model():
"""Stores the data for the problem."""
data = {}
# Locations in block units
data['locations'] = [
(288, 149), (288, 129), (270, 133), (256, 141), (256, 157), (246, 157),
(236, 169), (228, 169), (228, 161), (220, 169), (212, 169), (204, 169),
(196, 169), (188, 169), (196, 161), (188, 145), (172, 145), (164, 145),
(156, 145), (148, 145), (140, 145), (148, 169), (164, 169), (172, 169),
(156, 169), (140, 169), (132, 169), (124, 169), (116, 161), (104, 153),
(104, 161), (104, 169), (90, 165), (80, 157), (64, 157), (64, 165),
(56, 169), (56, 161), (56, 153), (56, 145), (56, 137), (56, 129),
(56, 121), (40, 121), (40, 129), (40, 137), (40, 145), (40, 153),
(40, 161), (40, 169), (32, 169), (32, 161), (32, 153), (32, 145),
(32, 137), (32, 129), (32, 121), (32, 113), (40, 113), (56, 113),
(56, 105), (48, 99), (40, 99), (32, 97), (32, 89), (24, 89),
(16, 97), (16, 109), (8, 109), (8, 97), (8, 89), (8, 81),
(8, 73), (8, 65), (8, 57), (16, 57), (8, 49), (8, 41),
(24, 45), (32, 41), (32, 49), (32, 57), (32, 65), (32, 73),
(32, 81), (40, 83), (40, 73), (40, 63), (40, 51), (44, 43),
(44, 35), (44, 27), (32, 25), (24, 25), (16, 25), (16, 17),
(24, 17), (32, 17), (44, 11), (56, 9), (56, 17), (56, 25),
(56, 33), (56, 41), (64, 41), (72, 41), (72, 49), (56, 49),
(48, 51), (56, 57), (56, 65), (48, 63), (48, 73), (56, 73),
(56, 81), (48, 83), (56, 89), (56, 97), (104, 97), (104, 105),
(104, 113), (104, 121), (104, 129), (104, 137), (104, 145), (116, 145),
(124, 145), (132, 145), (132, 137), (140, 137), (148, 137), (156, 137),
(164, 137), (172, 125), (172, 117), (172, 109), (172, 101), (172, 93),
(172, 85), (180, 85), (180, 77), (180, 69), (180, 61), (180, 53),
(172, 53), (172, 61), (172, 69), (172, 77), (164, 81), (148, 85),
(124, 85), (124, 93), (124, 109), (124, 125), (124, 117), (124, 101),
(104, 89), (104, 81), (104, 73), (104, 65), (104, 49), (104, 41),
(104, 33), (104, 25), (104, 17), (92, 9), (80, 9), (72, 9),
(64, 21), (72, 25), (80, 25), (80, 25), (80, 41), (88, 49),
(104, 57), (124, 69), (124, 77), (132, 81), (140, 65), (132, 61),
(124, 61), (124, 53), (124, 45), (124, 37), (124, 29), (132, 21),
(124, 21), (120, 9), (128, 9), (136, 9), (148, 9), (162, 9),
(156, 25), (172, 21), (180, 21), (180, 29), (172, 29), (172, 37),
(172, 45), (180, 45), (180, 37), (188, 41), (196, 49), (204, 57),
(212, 65), (220, 73), (228, 69), (228, 77), (236, 77), (236, 69),
(236, 61), (228, 61), (228, 53), (236, 53), (236, 45), (228, 45),
(228, 37), (236, 37), (236, 29), (228, 29), (228, 21), (236, 21),
(252, 21), (260, 29), (260, 37), (260, 45), (260, 53), (260, 61),
(260, 69), (260, 77), (276, 77), (276, 69), (276, 61), (276, 53),
(284, 53), (284, 61), (284, 69), (284, 77), (284, 85), (284, 93),
(284, 101), (288, 109), (280, 109), (276, 101), (276, 93), (276, 85),
(268, 97), (260, 109), (252, 101), (260, 93), (260, 85), (236, 85),
(228, 85), (228, 93), (236, 93), (236, 101), (228, 101), (228, 109),
(228, 117), (228, 125), (220, 125), (212, 117), (204, 109), (196, 101),
(188, 93), (180, 93), (180, 101), (180, 109), (180, 117), (180, 125),
(196, 145), (204, 145), (212, 145), (220, 145), (228, 145), (236, 145),
(246, 141), (252, 125), (260, 129), (280, 133)
] # yapf: disable
data['num_vehicles'] = 1
data['depot'] = 0
return data
def compute_euclidean_distance_matrix(locations):
"""Creates callback to return distance between points."""
distances = {}
for from_counter, from_node in enumerate(locations):
distances[from_counter] = {}
for to_counter, to_node in enumerate(locations):
if from_counter == to_counter:
distances[from_counter][to_counter] = 0
else:
# Euclidean distance
distances[from_counter][to_counter] = (int(
math.hypot((from_node[0] - to_node[0]),
(from_node[1] - to_node[1]))))
return distances
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
def main():
"""Entry point of the program."""
# Instantiate the data problem.
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['locations']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
distance_matrix = compute_euclidean_distance_matrix(data['locations'])
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distance_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(manager, routing, solution)
if __name__ == '__main__':
main()
#finding shortest path in a graph
import numpy as np
from __future__ import print_function
import math, sys
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
np.set_printoptions(suppress=True,linewidth=sys.maxsize,threshold=sys.maxsize)
inf=-1
distances=np.array([
# [ "CH", "CL", "HB", "SL", "IN", "CO", "MT", "WA", "CI", "CN", "RI", "LV", "LX", "NV", "KV", "GR" ]
[ 000, 362, inf, 300, 201, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf ], #CH
[ inf, 000, 332, inf, inf, 142, 201, inf, inf, 251, inf, inf, inf, inf, inf, inf ], #CL
[ inf, inf, 000, inf, inf, inf, 213, 120, inf, inf, inf, inf, inf, inf, inf, inf ], #HB
[ inf, inf, inf, 000, 245, inf, inf, inf, inf, inf, inf, 263, inf, 312, inf, inf ], #SL
[ inf, inf, inf, inf, 000, 176, inf, inf, 112, inf, inf, 114, inf, inf, inf, inf ], #IN
[ inf, inf, inf, inf, inf, 000, inf, inf, 105, inf, inf, inf, inf, inf, inf, inf ], #CO
[ inf, inf, inf, inf, inf, inf, 000, 209, inf, 157, inf, inf, inf, inf, inf, inf ], #MT
[ inf, inf, inf, inf, inf, inf, inf, 000, inf, inf, 111, inf, inf, inf, inf, inf ], #WA
[ inf, inf, inf, inf, inf, inf, inf, inf, 000, 204, inf, inf, 95, inf, inf, inf ], #CI
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 318, inf, 177, inf, inf, 244 ], #CN
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, inf, inf, inf, inf, 205 ], #RI
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 86, 175, inf, inf ], #LV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, inf, 170, inf ], #LX
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 180, inf ], #NV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 299 ], #KV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000 ] #GR
])
triinf=np.tril_indices(distances.shape[0], -1)
distances[triinf] = distances.T[triinf] #https://stackoverflow.com/questions/16444930/copy-upper-triangle-to-lower-triangle-in-a-python-matrix
print(distances)
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(distances.shape[0],
1, 1)
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distances[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
#results with AUTOMATIC, LOCAL_CHEAPEST_ARC, PATH_CHEAPEST_ARC, PATH_MOST_CONSTRAINED_ARC, UNSET
#best result with AUTOMATIC, GLOBAL_CHEAPEST_ARC, PATH_CHEAPEST_ARC, PATH_MOST_CONSTRAINED_ARC, UNSET
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.AUTOMATIC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(manager, routing, solution)
#Christos's Greek Yogurt
#https://learning.edx.org/course/course-v1:MITx+CTL.SC0x+2T2020/block-v1:MITx+CTL.SC0x+2T2020+type@sequential+block@7e84b52028df41cd95b7ffef2872d379/block-v1:MITx+CTL.SC0x+2T2020+type@vertical+block@10256a90e6594e3284d9086fcdb0dd14
from ortools.linear_solver import pywraplp
import numpy as np
def create_data_model():
"""Stores the data for the problem."""
data = {}
# Boston Seattle Tampa
# Chicago
# Atlanta
# Denver
data['obj_coeffs'] = [
[1.04, 1.27, 1.22],
[1.23, 1.93, 0.60],
[1.92, 0.94, 1.03]]
data['constraint_coeffs_min_max'] = [
#quantités livrées
([[1, 0, 0],
[1, 0, 0],
[1, 0, 0]], 11000, 11000),
([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]], 6300, 6300),
([[0, 0, 1],
[0, 0, 1],
[0, 0, 1]], 7400, 7400),
#quantités expédiées
([[1, 1, 1],
[0, 0, 0],
[0, 0, 0]], 0, 10000),
([[0, 0, 0],
[1, 1, 1],
[0, 0, 0]], 0, 10000),
([[0, 0, 0],
[0, 0, 0],
[1, 1, 1]], 0, 10000) ]
return data
data = create_data_model()
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('CBC')
infinity = solver.infinity()
x=[[solver.IntVar(0, infinity, f'x[{j},{i}]') for i in range(len(data['obj_coeffs'][j]))] for j in range(len(data['obj_coeffs']))]
for c in data['constraint_coeffs_min_max']:
constraint = solver.RowConstraint(c[1], c[2], '')
for ji,v in np.ndenumerate(c[0]):
constraint.SetCoefficient(x[ji[0]][ji[1]], v*1.0)
print('Number of constraints =', solver.NumConstraints())
# In Python, you can also set the constraints as follows.
# for i in range(data['num_constraints']):
# constraint_expr = \
# [data['constraint_coeffs'][i][j] * x[j] for j in range(data['num_vars'])]
# solver.Add(sum(constraint_expr) <= data['bounds'][i])
objective = solver.Objective()
for ji,v in np.ndenumerate(data['obj_coeffs']):
objective.SetCoefficient(x[ji[0]][ji[1]], v*1.0)
objective.SetMinimization()
# In Python, you can also set the objective as follows.
# obj_expr = [data['obj_coeffs'][j] * x[j] for j in range(data['num_vars'])]
# solver.Maximize(solver.Sum(obj_expr))
status = solver.Solve()
if status == pywraplp.Solver.OPTIMAL:
print('Objective value =', solver.Objective().Value())
for j in x:
for i in j:
print(i.name(), ' = ', i.solution_value())
print()
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
else:
print('The problem does not have an optimal solution.')
###Output
_____no_output_____
###Markdown
###Code
!pip install ortools
"""Simple travelling salesman problem on a circuit board."""
from __future__ import print_function
import math
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
def create_data_model():
"""Stores the data for the problem : (X,Y) of each node."""
data = {}
# Locations in block units
data['locations'] = [
(288, 149), (288, 129), (270, 133), (256, 141), (256, 157), (246, 157),
(236, 169), (228, 169), (228, 161), (220, 169), (212, 169), (204, 169),
(196, 169), (188, 169), (196, 161), (188, 145), (172, 145), (164, 145),
(156, 145), (148, 145), (140, 145), (148, 169), (164, 169), (172, 169),
(156, 169), (140, 169), (132, 169), (124, 169), (116, 161), (104, 153),
(104, 161), (104, 169), (90, 165), (80, 157), (64, 157), (64, 165),
(56, 169), (56, 161), (56, 153), (56, 145), (56, 137), (56, 129),
(56, 121), (40, 121), (40, 129), (40, 137), (40, 145), (40, 153),
(40, 161), (40, 169), (32, 169), (32, 161), (32, 153), (32, 145),
(32, 137), (32, 129), (32, 121), (32, 113), (40, 113), (56, 113),
(56, 105), (48, 99), (40, 99), (32, 97), (32, 89), (24, 89),
(16, 97), (16, 109), (8, 109), (8, 97), (8, 89), (8, 81),
(8, 73), (8, 65), (8, 57), (16, 57), (8, 49), (8, 41),
(24, 45), (32, 41), (32, 49), (32, 57), (32, 65), (32, 73),
(32, 81), (40, 83), (40, 73), (40, 63), (40, 51), (44, 43),
(44, 35), (44, 27), (32, 25), (24, 25), (16, 25), (16, 17),
(24, 17), (32, 17), (44, 11), (56, 9), (56, 17), (56, 25),
(56, 33), (56, 41), (64, 41), (72, 41), (72, 49), (56, 49),
(48, 51), (56, 57), (56, 65), (48, 63), (48, 73), (56, 73),
(56, 81), (48, 83), (56, 89), (56, 97), (104, 97), (104, 105),
(104, 113), (104, 121), (104, 129), (104, 137), (104, 145), (116, 145),
(124, 145), (132, 145), (132, 137), (140, 137), (148, 137), (156, 137),
(164, 137), (172, 125), (172, 117), (172, 109), (172, 101), (172, 93),
(172, 85), (180, 85), (180, 77), (180, 69), (180, 61), (180, 53),
(172, 53), (172, 61), (172, 69), (172, 77), (164, 81), (148, 85),
(124, 85), (124, 93), (124, 109), (124, 125), (124, 117), (124, 101),
(104, 89), (104, 81), (104, 73), (104, 65), (104, 49), (104, 41),
(104, 33), (104, 25), (104, 17), (92, 9), (80, 9), (72, 9),
(64, 21), (72, 25), (80, 25), (80, 25), (80, 41), (88, 49),
(104, 57), (124, 69), (124, 77), (132, 81), (140, 65), (132, 61),
(124, 61), (124, 53), (124, 45), (124, 37), (124, 29), (132, 21),
(124, 21), (120, 9), (128, 9), (136, 9), (148, 9), (162, 9),
(156, 25), (172, 21), (180, 21), (180, 29), (172, 29), (172, 37),
(172, 45), (180, 45), (180, 37), (188, 41), (196, 49), (204, 57),
(212, 65), (220, 73), (228, 69), (228, 77), (236, 77), (236, 69),
(236, 61), (228, 61), (228, 53), (236, 53), (236, 45), (228, 45),
(228, 37), (236, 37), (236, 29), (228, 29), (228, 21), (236, 21),
(252, 21), (260, 29), (260, 37), (260, 45), (260, 53), (260, 61),
(260, 69), (260, 77), (276, 77), (276, 69), (276, 61), (276, 53),
(284, 53), (284, 61), (284, 69), (284, 77), (284, 85), (284, 93),
(284, 101), (288, 109), (280, 109), (276, 101), (276, 93), (276, 85),
(268, 97), (260, 109), (252, 101), (260, 93), (260, 85), (236, 85),
(228, 85), (228, 93), (236, 93), (236, 101), (228, 101), (228, 109),
(228, 117), (228, 125), (220, 125), (212, 117), (204, 109), (196, 101),
(188, 93), (180, 93), (180, 101), (180, 109), (180, 117), (180, 125),
(196, 145), (204, 145), (212, 145), (220, 145), (228, 145), (236, 145),
(246, 141), (252, 125), (260, 129), (280, 133)
]
data['num_vehicles'] = 1
data['depot'] = 0
return data
def compute_euclidean_distance_matrix(locations):
"""Creates callback to return distance between points."""
distances = {}
for from_counter, from_node in enumerate(locations):
distances[from_counter] = {}
for to_counter, to_node in enumerate(locations):
if from_counter == to_counter:
distances[from_counter][to_counter] = 0
else:
# Euclidean distance
distances[from_counter][to_counter] = (int(
math.hypot((from_node[0] - to_node[0]),
(from_node[1] - to_node[1]))))
return distances
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['locations']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
distance_matrix = compute_euclidean_distance_matrix(data['locations'])
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distance_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(manager, routing, solution)
#finding shortest path in a graph
import numpy as np
from __future__ import print_function
import math, sys
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
np.set_printoptions(suppress=True,linewidth=sys.maxsize,threshold=sys.maxsize)
inf=-1
distances=np.array([
# [ "CH", "CL", "HB", "SL", "IN", "CO", "MT", "WA", "CI", "CN", "RI", "LV", "LX", "NV", "KV", "GR" ]
[ 000, 362, inf, 300, 201, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf ], #CH
[ inf, 000, 332, inf, inf, 142, 201, inf, inf, 251, inf, inf, inf, inf, inf, inf ], #CL
[ inf, inf, 000, inf, inf, inf, 213, 120, inf, inf, inf, inf, inf, inf, inf, inf ], #HB
[ inf, inf, inf, 000, 245, inf, inf, inf, inf, inf, inf, 263, inf, 312, inf, inf ], #SL
[ inf, inf, inf, inf, 000, 176, inf, inf, 112, inf, inf, 114, inf, inf, inf, inf ], #IN
[ inf, inf, inf, inf, inf, 000, inf, inf, 105, inf, inf, inf, inf, inf, inf, inf ], #CO
[ inf, inf, inf, inf, inf, inf, 000, 209, inf, 157, inf, inf, inf, inf, inf, inf ], #MT
[ inf, inf, inf, inf, inf, inf, inf, 000, inf, inf, 111, inf, inf, inf, inf, inf ], #WA
[ inf, inf, inf, inf, inf, inf, inf, inf, 000, 204, inf, inf, 95, inf, inf, inf ], #CI
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 318, inf, 177, inf, inf, 244 ], #CN
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, inf, inf, inf, inf, 205 ], #RI
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 86, 175, inf, inf ], #LV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, inf, 170, inf ], #LX
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 180, inf ], #NV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000, 299 ], #KV
[ inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 000 ] #GR
])
triinf=np.tril_indices(distances.shape[0], -1)
distances[triinf] = distances.T[triinf] #https://stackoverflow.com/questions/16444930/copy-upper-triangle-to-lower-triangle-in-a-python-matrix
print(distances)
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(distances.shape[0],
1, 1)
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distances[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
#results with AUTOMATIC, LOCAL_CHEAPEST_ARC, PATH_CHEAPEST_ARC, PATH_MOST_CONSTRAINED_ARC, UNSET
#best result with AUTOMATIC, GLOBAL_CHEAPEST_ARC, PATH_CHEAPEST_ARC, PATH_MOST_CONSTRAINED_ARC, UNSET
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.AUTOMATIC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(manager, routing, solution)
#Use CASE : Christos's Greek Yogurt
#https://learning.edx.org/course/course-v1:MITx+CTL.SC0x+2T2020/block-v1:MITx+CTL.SC0x+2T2020+type@sequential+block@7e84b52028df41cd95b7ffef2872d379/block-v1:MITx+CTL.SC0x+2T2020+type@vertical+block@10256a90e6594e3284d9086fcdb0dd14
#Christos's Yogurt is a popular greek yogurt manufacturer in the United States.
#The company has production facilities in Chicago, Atlanta and Denver.
#Each facility can make only 10000 containers of yogurt per week.
#One of Christos's main customers, Dairy Bucket, has placed a large order.
#Dairy Bucket distributes their order over their 3 facilities located in Boston, Seattle and Tampa.
#Christos wants to minimize his transportation costs while satisfying Dairy Bucket's order.
from ortools.linear_solver import pywraplp
import numpy as np
def create_data_model():
"""Stores the data for the problem."""
data = {}
# Shipping Cost (dollars/container)
# Boston Seattle Tampa
# Chicago
# Atlanta
# Denver
data['obj_coeffs'] = [
[1.04, 1.27, 1.22],
[1.23, 1.93, 0.60],
[1.92, 0.94, 1.03]]
data['constraint_coeffs_min_max'] = [
#Dairy Bucket's Demand (containers/week)
([[1, 0, 0],
[1, 0, 0],
[1, 0, 0]], 11000, 11000),
([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]], 6300, 6300),
([[0, 0, 1],
[0, 0, 1],
[0, 0, 1]], 7400, 7400),
#quantités expédiées
([[1, 1, 1],
[0, 0, 0],
[0, 0, 0]], 0, 10000),
([[0, 0, 0],
[1, 1, 1],
[0, 0, 0]], 0, 10000),
([[0, 0, 0],
[0, 0, 0],
[1, 1, 1]], 0, 10000) ]
return data
data = create_data_model()
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('CBC')
infinity = solver.infinity()
x=[[solver.IntVar(0, infinity, f'x[{j},{i}]') for i in range(len(data['obj_coeffs'][j]))] for j in range(len(data['obj_coeffs']))]
for c in data['constraint_coeffs_min_max']:
constraint = solver.RowConstraint(c[1], c[2], '')
for ji,v in np.ndenumerate(c[0]):
constraint.SetCoefficient(x[ji[0]][ji[1]], v*1.0)
print('Number of constraints =', solver.NumConstraints())
# In Python, you can also set the constraints as follows.
# for i in range(data['num_constraints']):
# constraint_expr = \
# [data['constraint_coeffs'][i][j] * x[j] for j in range(data['num_vars'])]
# solver.Add(sum(constraint_expr) <= data['bounds'][i])
objective = solver.Objective()
for ji,v in np.ndenumerate(data['obj_coeffs']):
objective.SetCoefficient(x[ji[0]][ji[1]], v*1.0)
objective.SetMinimization()
# In Python, you can also set the objective as follows.
# obj_expr = [data['obj_coeffs'][j] * x[j] for j in range(data['num_vars'])]
# solver.Maximize(solver.Sum(obj_expr))
status = solver.Solve()
if status == pywraplp.Solver.OPTIMAL:
print('Objective value =', solver.Objective().Value())
for j in x:
for i in j:
print(i.name(), ' = ', i.solution_value())
print()
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
else:
print('The problem does not have an optimal solution.')
###Output
Number of constraints = 6
Objective value = 21992.0
x[0,0] = 10000.0
x[0,1] = 0.0
x[0,2] = 0.0
x[1,0] = 1000.0
x[1,1] = 0.0
x[1,2] = 7400.0
x[2,0] = 0.0
x[2,1] = 6300.0
x[2,2] = 0.0
Problem solved in 4.000000 milliseconds
Problem solved in 0 iterations
Problem solved in 0 branch-and-bound nodes
|
OSF_SHARE_hacking.ipynb | ###Markdown
Using the OSF APIFor more information, visit the full [OSF API docs](http://developer.osf.io)!We'll be using the staging version of the OSF and API for this tutorial. Because staging is always in active development, if the endpoints fail to work at any point, feel free to switch to production OSF! Just note that you'll have to create a new token, and that any test work you make public will be available to anyone! Simply remove "staging" from the base STAGING_OSF_API url listed below for production OSF endpoints.Before starting this tutorial, make sure to [create an account on the staging version of the osf](https://staging.osf.io), login to that account, and create an API token by [visitng your settings page](https://staging.osf.io/settings/tokens/).Save your token as an enviornment variable, or replace the enviornment variable below with the text version of your token for local testing. Create a Project, Upload a FileHere's an example of how to create a project (called a node) on the OSF, and then follow the API relationships to upload a file.This is a python implementation of a guide found on the OSF [detailing a typical OSF Workflow](https://osf.io/y9jdt/wiki/Typical%20Workflow/)
###Code
import os
import json
import requests
STAGING_OSF_TOKEN = os.environ['STAGING_OSF_TOKEN'] # replace this line with your token instead if you like
STAGING_OSF_API = 'https://staging-api.osf.io/v2/'
# Let's defne a few helper functions to make sending credentials easier
def post_request(url, data, auth):
headers = {'Content-Type': 'application/vnd.api+json'}
if auth:
headers['Authorization'] = 'Bearer {}'.format(auth)
data = json.dumps(data)
return requests.post(url, headers=headers, data=data)
def get_request(url, auth=None):
headers = {'Authorization': 'Bearer {}'.format(auth)}
return requests.get(url, headers=headers)
def put_request(url, data, auth):
headers = {
'Content-Type': 'application/vnd.api+json',
'Authorization': 'Bearer {}'.format(auth)
}
data = json.dumps(data)
return requests.put(url, headers=headers, data=data)
# Define the data for the node we'd like to create
node_data = {
"data": {
"type":"nodes",
"attributes": {
"title":"Testing Example",
"description": "This is a node created as an example of how to create a node!",
"public": False,
"category":"project"
}
}
}
# Post the data, get a response back with details about our node
node_response = post_request(STAGING_OSF_API + 'nodes/', node_data, STAGING_OSF_TOKEN)
print(json.dumps(node_response.json(), indent=4))
# Find the files relationship, follow the related -> href link
files_link = node_response.json()['data']['relationships']['files']['links']['related']['href']
files_response = get_request(files_link, STAGING_OSF_TOKEN).json()
print(json.dumps(files_response, indent=4))
# Find the upload link for OSF Storage in that list - should be the first element in the list for new nodes
# A node can have several external storage providers configured
upload_link = files_response['data'][0]['links']['upload']
upload_link
# Upload the file along with the kind and file name
upload_link_with_filename = upload_link + '?kind=file&name=newest_file.txt'
file_data = 'This is the entirety of the contents of the file I am uploading. It could have been more, but for an example, a small file seems like a better idea.'
put_response = put_request(upload_link_with_filename, file_data, STAGING_OSF_TOKEN)
print(json.dumps(put_response.json(), indent=4))
###Output
_____no_output_____
###Markdown
You did it!Visit your project on the OSF and see your newly updated file!
###Code
# Check our your project on the OSF by visiting the project's link
node_response.json()['data']['links']['html']
###Output
_____no_output_____
###Markdown
Querying the SHARE API
###Code
SHARE_API_BASE = 'https://share.osf.io/api/v2/'
# Get the total number of SHARE sources
sources_query = requests.get(SHARE_API_BASE + 'sources').json()
count = sources_query['meta']['pagination']['count']
print('There are {} sources in SHARE'.format(count))
# Get the total number of creativeworks in SHARE
creativeworks_search = 'search/creativeworks/_search'
base_search = requests.get(SHARE_API_BASE + creativeworks_search).json()
total_creativeworks = base_search['hits']['total']
print('There are {} works in SHARE'.format(total_creativeworks))
# Print out the first 10 titles
results = base_search['hits']['hits']
for result in results:
print(result['_source']['title'])
def post_query(url, query):
headers = {'Content-Type': 'application/json'}
data = json.dumps(query)
return requests.post(url, headers=headers, data=data)
# Get query forming hints by searching https://share.osf.io/discover
search_query = {
"query": {
"bool": {
"must": {
"query_string": {
"query": "climate change"
}
},
"filter": [
{
"term": {
"types": "software"
}
}
]
}
}
}
software_results = post_query(SHARE_API_BASE + creativeworks_search, search_query).json()
# Let's check out the details of the first result
print(json.dumps(software_results['hits']['hits'][0]['_source'], indent=4))
# Iterate through the first page of results, print each title
for result in software_results['hits']['hits']:
print(result['_source']['title'])
###Output
_____no_output_____ |
ChurnQuestion.ipynb | ###Markdown
This model has an accuracy of 89.83%. In the confusion matrix we can see the majority of the errors are type 1 and the errors type 2 are quite low. This model is pretty decent, can be improved but this is not the current goal. We need to check the coefficients of the model in order to see which variables are more realated to the churn action.
###Code
coef = pd.DataFrame(clf.coef_[0], index = X_indexes, columns = ['Coefficients'])
coef
no_dummie_coef = coef.iloc[0:13, 0]
dummie_coef = coef.iloc[13:len(coef), 0]
dummie_coef.sort_values()
###Output
_____no_output_____
###Markdown
It looks like the most important feature from the dummies variables is the Gender. We can see that being male impacts positively on Existing client which means that the females are more prone to churn the bank, this is something that we already measured. We also can see that the people with lower income are more prone to stay in the bankin the same way people with status Married, and people who don't declare the income.
###Code
no_dummie_coef.sort_values()
###Output
_____no_output_____ |
site/en/r2/tutorials/estimators/boosted_trees.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tf-nightly-2.0-preview
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
!pip install tf-nightly-2.0-preview
from IPython.display import clear_output
import tensorflow as tf
tf.random.set_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
_____no_output_____
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
try:
!pip install tf-nightly-2.0-preview
except Exception:
pass
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tf-nightly-2.0-preview
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
_____no_output_____
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tf-nightly-2.0-preview
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
!pip install tf-nightly-2.0-preview
from IPython.display import clear_output
import tensorflow as tf
tf.random.set_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
_____no_output_____
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
Feature value: "Third"
One-hot encoded: [[ 0. 0. 1.]]
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.765152
accuracy_baseline 0.625000
auc 0.832844
auc_precision_recall 0.789631
average_loss 0.478908
global_step 100.000000
label/mean 0.375000
loss 0.478908
precision 0.703297
prediction/mean 0.350790
recall 0.646465
dtype: float64
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
accuracy 0.829545
accuracy_baseline 0.625000
auc 0.872788
auc_precision_recall 0.857807
average_loss 0.411839
global_step 100.000000
label/mean 0.375000
loss 0.411839
precision 0.793478
prediction/mean 0.381942
recall 0.737374
dtype: float64
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tf-nightly-2.0-preview
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
_____no_output_____
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters.2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
How to train Boosted Trees models in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.Boosted Trees models are popular with many machine learning practitioners as they can achieve impressive performance with minimal hyperparameter tuning. Load the titanic datasetYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
!pip install tf-nightly-2.0-preview
import tensorflow as tf
tf.random.set_seed(123)
###Output
_____no_output_____
###Markdown
The dataset consists of a training set and an evaluation set:* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.* The model is tested against the *eval set*, `dfeval`, and `y_eval`.For training you will use the following features: Feature Name Description sex Gender of passenger age Age of passenger n_siblings_spouses siblings and partners aboard parch of parents and children aboard fare Fare passenger paid. class Passenger's class on ship deck Which deck passenger was on embark_town Which town passenger embarked from alone If passenger was alone Explore the data Let's first preview some of the data and create summary statistics on the training set.
###Code
dftrain.head()
dftrain.describe()
###Output
_____no_output_____
###Markdown
There are 627 and 264 examples in the training and evaluation sets, respectively.
###Code
dftrain.shape[0], dfeval.shape[0]
###Output
_____no_output_____
###Markdown
The majority of passengers are in their 20's and 30's.
###Code
dftrain.age.hist(bins=20);
###Output
_____no_output_____
###Markdown
There are approximately twice as male passengers as female passengers aboard.
###Code
dftrain.sex.value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
The majority of passengers were in the "third" class.
###Code
dftrain['class'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Most passengers embarked from Southampton.
###Code
dftrain['embark_town'].value_counts().plot(kind='barh');
###Output
_____no_output_____
###Markdown
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
###Code
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive');
###Output
_____no_output_____
###Markdown
Create feature columns and input functionsThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
###Code
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
###Output
_____no_output_____
###Markdown
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
###Code
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
###Output
_____no_output_____
###Markdown
Additionally, you can view all of the feature column transformations together:
###Code
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
###Output
_____no_output_____
###Markdown
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
###Code
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
###Output
_____no_output_____
###Markdown
Train and evaluate the modelBelow you will do the following steps:1. Initialize the model, specifying the features and hyperparameters. 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
###Code
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported. Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
###Code
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
###Output
_____no_output_____
###Markdown
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
###Code
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities');
###Output
_____no_output_____
###Markdown
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
###Code
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,);
###Output
_____no_output_____ |
adaboost-101/adaboost_tutorial.ipynb | ###Markdown
AdaBoostIn this tutorial, we'll build a stump classifier and apply the AdaBoost algorithm. Our goal is to transform a weak classifier into something useful. This lecture covers the first part of chapter 7 in Peter Harrington's book (Harrington, P. (2012). Machine Learning in Action. Shelter Island, NY: Manning) with some added commentary. ImportsRunning the code below will be comprensive for the tutorial.
###Code
# base requirements
from IPython.display import Image
from IPython.display import display
from datetime import *
import json
from copy import *
from pprint import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import rpy2
%load_ext rpy2.ipython
%R require("ggplot2")
% matplotlib inline
from ggplot import *
randn = np.random.randn
# optional
import warnings
warnings.filterwarnings('ignore')
# tutorial requirements
#bokeh - http://bokeh.pydata.org/en/latest/docs/installation.html
from bokeh.io import output_notebook
from bokeh.plotting import figure, output_file, show
output_notebook() # inline graphs
#import bokeh.sampledata # this download is commented out b/c it's optional
# bokeh.sampledata.download() # this download is commented out b/c it's optional
###Output
_____no_output_____
###Markdown
FunctionsWe'll dump the major code base into this section.
###Code
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
"""
Performs a threshold comparison to classify data.
Everything on one side of the threshold is thrown into class -1,
and everything on the other side is thrown into class +1.
"""
retArray = np.ones((np.shape(dataMatrix)[0],1))
#print "retArray"
#display(retArray)
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = 1.0
return retArray
def buildStump(dataArr,classLabels,D):
"""
Iterates over all of the possible inputs to stumpClassify() and finds
the best decision stump for our dataset. Best here will be with respect
to the data weight vector D.
"""
dataMatrix = np.mat(dataArr); labelMat = np.mat(classLabels).T
#print "dataMatrix:"
#display(dataMatrix)
#print "labelMat:"
#display(labelMat)
m,n = np.shape(dataMatrix)
#print ("m:{}, n:{}".format(m,n))
numSteps = 10.0; bestStump = {}; bestClasEst = np.mat(np.zeros((m,1)))
#print "bestClasEst:"
#display(bestClasEst)
minError = np.inf #init error sum, to +infinity
#print "minError:"
#display(minError)
#The first one goes over all the features in our dataset. You’re
# considering numeric values, and you calculate the minimum and
# maximum to see how large your step size should be.
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
#print "stepSize:{}".format(stepSize)
# The next for loops loop over these values.
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
#The last for loop toggles your inequality between greater than and less than
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize) #value at which we make our decision to classify one way or another
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal) #returns labels for each element
errArr = np.mat(np.ones((m,1)))
errArr[predictedVals == labelMat] = 0
#print "\n\nerrArr:"
#display(errArr)
#display(D.T)
weightedError = D.T*errArr #calc total error multiplied by D <---------D is constant in this function but varied inside AdaBoost
#print "weightedError:"
#display(weightedError)
#####
##### uncomment line below for 1st pass
#####
#print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError: #finds thhe best stump
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['feature'] = i+1
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
def alpha(error):
return float(0.5*np.log((1.0-error)/max(error,1e-16)))
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
"""
The implementation of AdaBoost. We get back a set of weak \
classifiers and weights (the signs of which we use as labels).
"""
weakClassArr = []
m = np.shape(dataArr)[0]
D = np.mat(np.ones((m,1))/m) #init D to all weights being equal
aggClassEst = np.mat(np.zeros((m,1))) #init to zero
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)# note: D varies to improve the classifier
alpha = float(0.5*np.log((1.0-error)/max(error,1e-16)))#calc alpha; note: max(error,eps) accounts for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
expon = np.multiply(-1*alpha*np.mat(classLabels).T,classEst) #exponent for D calc, notice that multiplying \
# np.mat(classLabels).T & classEst is for sign \
# that drives D values to 0 or 1
D = np.multiply(D,np.exp(expon)) #Calc New D for next iteration
D = D/D.sum() # D.sum() normalizes the values as probabilities that all sum to 1
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst # <----- the magic; this allows the signs (labels) to be pushed around
aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T,np.ones((m,1))) # 1's when error
errorRate = aggErrors.sum()/m # percent error
print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
def adaClassify(datToClass,classifierArr):
"""
Given an unknown datum, we label it from training data.
"""
dataMatrix = np.mat(datToClass)
m = np.shape(dataMatrix)[0]
#print "m:{}".format(m)
aggClassEst = np.mat(np.zeros((m,1))) # predicted values
#print "initial aggClassEst:{}".format(aggClassEst)
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix,classifierArr[i]['feature']-1\
, classifierArr[i]['thresh']\
, classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print "set{}:{}".format(i,aggClassEst)
return np.sign(aggClassEst)
def loadData():
"""
Loads sample dataset as arrays.
"""
datMat = np.array([[ 1. , 2.1],
[2., 1.1], [1.3, 1.], [1., 1.], [2., 1.]])
classLabels = np.array([1.0, 1.0, -1.0, -1.0, 1.0])
return datMat,classLabels
def loadSimpData():
"""
Loads dataset as matrix.
"""
datMat = np.matrix([[ 1. , 2.1],
[2., 1.1], [1.3, 1.], [1., 1.], [2., 1.]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
def build_simple_bokeh_graph():
data,labels = loadData()
print "data:"
display(data)
print "labels:"
display(labels)
print "Feature 1 data:"
d1 = data[(labels==1)]
display(d1)
print "Feature 2 data:"
d2 = data[(labels==-1)]
display(d2)
## set up Bokeh figure
p = figure(
tools="pan,box_zoom,reset,save"
, title="Data: Two Features & Two Classes"
#y_axis_type="log", y_range=[0.001, 10**11]
, x_axis_label='Feature 1'
, y_axis_label='Feature 2'
)
## add data to Bokeh figure
p.scatter(d1[:,0], d1[:,1], legend="class1", fill_color="red", size=20,marker="circle")
p.scatter(d2[:,0], d2[:,1], legend="class2", fill_color="blue", size=20,marker="square")
# display Bokeh figure
show(p)
def run_stump():
# run stump classifier without adaboost
datMat,classLabels=loadSimpData()
print "Data:"
display(datMat)
D = np.mat( np.ones((5,1)) / 5 )
print "initial D:"
display(D)
numSteps = 10.0;
print "TEST:"
x,y,z=buildStump(datMat,classLabels,D) # note: D is constant here, but this is the value that we will vary with adaboost.
print "\n\nRESTULS:"
print " bestStump:{}".format(x)
print " smallest error:{}".format(y)
print " predicted labels:"
display(z)
def graph_alpha():
# Create graph of alpha values
x = np.arange(0.01,1,0.01)
alpha_calc = np.vectorize(alpha)
y = alpha_calc(x)
## Bokeh output inline
#output_notebook()
## set up Bokeh figure
p = figure(
tools="pan,box_zoom,reset,save"
, title="How are the classifiers scored?"
#y_axis_type="log", y_range=[0.001, 10**11]
, x_axis_label='Error'
, y_axis_label='Alpha'
)
## add data to Bokeh figure
p.line(x, y, legend="alpha curve", color="blue", line_width=2)
# guide line
a = np.array([.5,.5])
b = np.array([-1,1])
p.line(a,b, legend="50% error", color="red",line_width = 1, alpha=0.6, line_dash="4 4")
# display Bokeh figure
show(p)
def simple_application():
datArr,labelArr=loadSimpData()
print "Building training set."
classifierArr = adaBoostTrainDS(datArr,labelArr,30)
print "\nclassifierArr:"
display(classifierArr[0])
print "Classification of unknown point:"
display(adaClassify([0, 0],classifierArr[0]))
###Output
_____no_output_____
###Markdown
What is a decision stump?Decision trees typically create a path that uses several features to label a dataset. With a stump, we try to pick a single feature in a dataset and use it to label every element. Let's start with an example. We'll create some labeled data.
###Code
build_simple_bokeh_graph()
###Output
_____no_output_____
###Markdown
Which individual feature best helps us classify this dataset? As you might note, we'll always have an error. As such, we could call this method a week classifier. Let's first see how to build a decision stump, test if any of values are less than or greater than the threshold value we’re testing, and then loop over a weighted version of the dataset to find the stump that yields the lowest error. __One important distinction at this point is that we're using equal weights across all elements in the dataset.__ Late, we'll use the AdaBoost algorithm to change these weights to optimize the accuracy of the labels. We now have the ability to choose which point on a specific continuous feature we'll use as the threshold value to label our data. Let's see which value and dimension are selected to choose the best stump. Use stump classifier w/out AdaBoost
###Code
run_stump()
###Output
_____no_output_____
###Markdown
Implement AdaBoostAfter building our stump classifier, we'll try to improve it using AdaBoost. We're going to change one set of values: `D`, which is a vector of weights. We'll change `D` through an iterative process. This weight vector adjust for incorrect labels. So we'll change `D` by evaluating those labels that we classified incorrectly and increasing their weight while simultaneously decreasing the weight on those values that we classify correctly. Initially, all of these weights will be equal, but at each iteration we'll re-evaluate the weights to adjust for failure/success. Hence, each point in the dataset will receive a custom weight depending on how well we classified it in the last iteration. To calculate alpha, $\alpha$, we then sum up the weighted errors for each stump. __In short, the vector `D` is varied per stump - each of which is scored with an alpha value.__ Before we move on to undersatand how adaboots uses the our sets of alpha values, let's look a little more deeply at what this score means.We calculate our error rate with \begin{equation*}\epsilon = \frac{number\ of\ incorrectly\ classified\ examples}{total\ number\ of\ examples}\\ \end{equation*}These errors are multiplied by the weights and then the alpha value is calculated as follows:\begin{equation*}\alpha = \frac{1}{2}ln(\frac{1-\epsilon}{\epsilon})\end{equation*}Let's look at a graph of alpha values.
###Code
graph_alpha()
###Output
_____no_output_____
###Markdown
What we can learn from this graph?(see Chris McCormick's discussion https://chrisjmccormick.wordpress.com/2013/12/13/adaboost-tutorial/)1. The classifier weight grows exponentially as the error approaches 0. Better classifiers are given exponentially more weight.2. The classifier weight is zero if the error rate is 0.5. A classifier with 50% accuracy is no better than random guessing, so we ignore it.3. The classifier weight grows exponentially negative as the error approaches 1. We give a negative weight to classifiers with worse worse than 50% accuracy. “Whatever that classifier says, do the opposite!”.We end up using alpha through a series of iterations that drive the labeling error closer to zero. The way this works is that we sum together the product of alpha and each stump's predicted values, which provides a vector of floats whose signs indicate our labels. We now understand that alpha relates to the sum of errors and is in some way associated with how much to weight each stump. Now we just need to understand how alpha (\alpha) relates to the individualized weights in vector `D`:Correctly predicted,\begin{equation*}D_{i}^{(t+1)}= \frac{D_{i}^{(t)}e^{-\alpha}}{Sum(D)}\\ \end{equation*}Incorrectly predicted,\begin{equation*}D_{i}^{(t+1)}= \frac{D_{i}^{(t)}e^{\alpha}}{Sum(D)}\\ \end{equation*}D is a probability distribution, so the sum of all the elements in D must be 1.0. Let's consider the entire AdaBoost process: Create a set of weak classifiers using AdaBoostIn this section, we'll apply the AdaBoost algorithm to labeled data. As we evaluate each of the classifiers, we will score them with an alpha value. Finally, we sum the product of the predicted labels and alpha for each point to create a matrix of floats. Each value in this matrix has a sign, which should correspond to the correct lables if our error went to zero.
###Code
datMat,classLabels=loadSimpData()
adaBoostTrainDS(datMat,classLabels,9)
###Output
_____no_output_____
###Markdown
Application of AdaBoostWith the code that we've already written, we have a list of weak classifiers and with their corresponding alpha scores: [ {'alpha': 0.6931471805599453, 'feature': 1, 'ineq': 'lt', 'thresh': 1.3} , {'alpha': 0.9729550745276565, 'feature': 2, 'ineq': 'lt', 'thresh': 1.0} , {'alpha': 0.8958797346140273, 'feature': 1, 'ineq': 'lt', 'thresh': 0.9} ]So we can reuse the threshold value of the corresponding features in each of these weak classifiers as a stump to label the unknown data. We'll recycle `stumpClassify()` with this training data, which means that we can rate classifier's lable using the previously assigned alpha value. See `adaClassify()`.
###Code
display(simple_application())
###Output
_____no_output_____
###Markdown
Pros/Cons of AdaBoost(Pro/Con notes below borrowed from Eric Emer's [presentation](http://math.mit.edu/~rothvoss/18.304.3PM/Presentations/1-Eric-Boosting304FinalRpdf.pdf))Pros* Fast * Simple and easy to program* No parameters to tune* No prior knowledge neededabout weak learner* Provably effective givenWeak Learning Assumption* versatileCons* Weak classifiers toocomplex leads tooverfitting.* Weak classifiers too weakcan lead to low margins,and can also lead tooverfitting.* From empirical evidence,AdaBoost is particularlyvulnerable to uniformnoise. SummaryHow does AdaBoost optimize weights?The data points that have been misclassified most by the previous weak classifier are pinpointed and become the focus for the next iteration. By pinpointed, we see these reguarly misclassified elements receiving a larger weight and associated larger error. How does AdaBoost aggregate many weak classifiers into a single prediction?With the score (alpha value) applied to the prediction set for each classifier, we aggregate the scores by their index value. The aggregated vector provides an optimally weighted majority vote of weak classifiers! See Rober Schapire's [Explaining Adaboost](http://rob.schapire.net/papers/explaining-adaboost.pdf) for a good discussion on Adaboost. Appendix Random notes: Bagging* reshuffle your training data to create k different trainig sets andlearn * Combine the k different classifiers by majority votingBoosting* Assign different weights to training samples in a “smart” way sothat different classifiers pay more attention to different samples* Weighted majority voting, the weight of individual classifier isproportional to its accuracy* Ada-boost (1996) was influenced by bagging, and it is superiorto bagging Non linearly separable examplehttp://scikit-learn.org/stable/auto_examples/ensemble/plot_adaboost_twoclass.html
###Code
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
###Output
_____no_output_____ |
Introduction to Data Science in Python/Assignment+4.ipynb | ###Markdown
---_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._---
###Code
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
###Output
_____no_output_____
###Markdown
Assignment 4 - Hypothesis TestingThis assignment requires more individual learning than previous assignments - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.Definitions:* A _quarter_ is a specific three month period, Q1 is January through March, Q2 is April through June, Q3 is July through September, Q4 is October through December.* A _recession_ is defined as starting with two consecutive quarters of GDP decline, and ending with two consecutive quarters of GDP growth.* A _recession bottom_ is the quarter within a recession which had the lowest GDP.* A _university town_ is a city which has a high percentage of university students compared to the total population of the city.**Hypothesis**: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (`price_ratio=quarter_before_recession/recession_bottom`)The following data files are available for this assignment:* From the [Zillow research data site](http://www.zillow.com/research/data/) there is housing data for the United States. In particular the datafile for [all homes at a city level](http://files.zillowstatic.com/research/public/City/City_Zhvi_AllHomes.csv), ```City_Zhvi_AllHomes.csv```, has median home sale prices at a fine grained level.* From the Wikipedia page on college towns is a list of [university towns in the United States](https://en.wikipedia.org/wiki/List_of_college_townsCollege_towns_in_the_United_States) which has been copy and pasted into the file ```university_towns.txt```.* From Bureau of Economic Analysis, US Department of Commerce, the [GDP over time](http://www.bea.gov/national/index.htmgdp) of the United States in current dollars (use the chained value in 2009 dollars), in quarterly intervals, in the file ```gdplev.xls```. For this assignment, only look at GDP data from the first quarter of 2000 onward.Each function in this assignment below is worth 10%, with the exception of ```run_ttest()```, which is worth 50%.
###Code
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'}
def get_list_of_university_towns():
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
state = None
state_towns = []
data = []
with open('university_towns.txt') as file :
for line in file :
thisLine = line[:-1]
if (thisLine[-6:] == '[edit]'):
state = thisLine[:-6]
continue
if ('(' in line):
town = thisLine[:thisLine.index('(')-1]
state_towns.append([state,town])
else:
town = thisLine
state_towns.append([state,town])
data.append(thisLine)
df = pd.DataFrame(state_towns,columns = ['State','RegionName'])
return df
print(get_list_of_university_towns())
def get_recession_start():
'''Returns the year and quarter of the recession start time as a
string value in a format such as 2005q3'''
gdp = pd.ExcelFile('gdplev.xls')
gdp = gdp.parse("Sheet1", skiprows=219)
gdp = gdp[['1999q4', 9926.1]]
gdp.columns = ['Quarter', 'GDP']
for i in range (2,len(gdp)) :
if (gdp.iloc[i-2][1] > gdp.iloc[i-1][1]) and (gdp.iloc[i-1][1] > gdp.iloc[i][1]):
return gdp.iloc[i-2][0]
print(get_recession_start())
def get_recession_end():
'''Returns the year and quarter of the recession end time as a
string value in a format such as 2005q3'''
gdplev = pd.ExcelFile('gdplev.xls')
gdplev = gdplev.parse("Sheet1", skiprows=219)
gdplev = gdplev[['1999q4', 9926.1]]
gdplev.columns = ['Quarter','GDP']
start = get_recession_start()
start_index = gdplev[gdplev['Quarter'] == start].index.tolist()[0]
gdplev=gdplev.iloc[start_index:]
for i in range(2, len(gdplev)):
if (gdplev.iloc[i-2][1] < gdplev.iloc[i-1][1]) and (gdplev.iloc[i-1][1] < gdplev.iloc[i][1]):
return gdplev.iloc[i][0]
print(get_recession_end())
def get_recession_bottom():
'''Returns the year and quarter of the recession bottom time as a
string value in a format such as 2005q3'''
gdplev = pd.ExcelFile('gdplev.xls')
gdplev = gdplev.parse("Sheet1", skiprows=219)
gdplev = gdplev[['1999q4', 9926.1]]
gdplev.columns = ['Quarter','GDP']
start = get_recession_start()
start_index = gdplev[gdplev['Quarter'] == start].index.tolist()[0]
end = get_recession_end()
end_index = gdplev[gdplev['Quarter'] == end].index.tolist()[0]
gdplev=gdplev.iloc[start_index:end_index+1]
bottom = gdplev['GDP'].min()
bottom_index = gdplev[gdplev['GDP'] == bottom].index.tolist()[0]-start_index
return gdplev.iloc[bottom_index]['Quarter']
print(get_recession_bottom())
def convert_housing_data_to_quarters():
'''Converts the housing data to quarters and returns it as mean
values in a dataframe. This dataframe should be a dataframe with
columns for 2000q1 through 2016q3, and should have a multi-index
in the shape of ["State","RegionName"].
Note: Quarters are defined in the assignment description, they are
not arbitrary three month periods.
The resulting dataframe should have 67 columns, and 10,730 rows.
'''
hdata = pd.read_csv('City_Zhvi_AllHomes.csv')
hdata = hdata.drop(hdata.columns[[0]+list(range(3,51))],axis=1)
hdata2 = pd.DataFrame(hdata[['State','RegionName']])
for year in range(2000,2016):
hdata2[str(year)+'q1'] = hdata[[str(year)+'-01',str(year)+'-02',str(year)+'-03']].mean(axis=1)
hdata2[str(year)+'q2'] = hdata[[str(year)+'-04',str(year)+'-05',str(year)+'-06']].mean(axis=1)
hdata2[str(year)+'q3'] = hdata[[str(year)+'-07',str(year)+'-08',str(year)+'-09']].mean(axis=1)
hdata2[str(year)+'q4'] = hdata[[str(year)+'-10',str(year)+'-11',str(year)+'-12']].mean(axis=1)
year = 2016
hdata2[str(year)+'q1'] = hdata[[str(year)+'-01',str(year)+'-02',str(year)+'-03']].mean(axis=1)
hdata2[str(year)+'q2'] = hdata[[str(year)+'-04',str(year)+'-05',str(year)+'-06']].mean(axis=1)
hdata2[str(year)+'q3'] = hdata[[str(year)+'-07',str(year)+'-08']].mean(axis=1)
hdata2 = hdata2.replace({'State':states})
hdata2 = hdata2.set_index(['State','RegionName'])
return hdata2
convert_housing_data_to_quarters()
def run_ttest():
'''First creates new data showing the decline or growth of housing prices
between the recession start and the recession bottom. Then runs a ttest
comparing the university town values to the non-university towns values,
return whether the alternative hypothesis (that the two groups are the same)
is true or not as well as the p-value of the confidence.
Return the tuple (different, p, better) where different=True if the t-test is
True at a p<0.01 (we reject the null hypothesis), or different=False if
otherwise (we cannot reject the null hypothesis). The variable p should
be equal to the exact p value returned from scipy.stats.ttest_ind(). The
value for better should be either "university town" or "non-university town"
depending on which has a lower mean price ratio (which is equivilent to a
reduced market loss).'''
unitowns = get_list_of_university_towns()
bottom = get_recession_bottom()
start = get_recession_start()
hdata = convert_housing_data_to_quarters()
bstart = hdata.columns[hdata.columns.get_loc(start) -1]
hdata['ratio'] = hdata[bottom] - hdata[bstart]
hdata = hdata[[bottom,bstart,'ratio']]
hdata = hdata.reset_index()
unitowns_hdata = pd.merge(hdata,unitowns,how='inner',on=['State','RegionName'])
unitowns_hdata['uni'] = True
hdata2 = pd.merge(hdata,unitowns_hdata,how='outer',on=['State','RegionName',bottom,bstart,'ratio'])
hdata2['uni'] = hdata2['uni'].fillna(False)
ut = hdata2[hdata2['uni'] == True]
nut = hdata2[hdata2['uni'] == False]
t,p = ttest_ind(ut['ratio'].dropna(),nut['ratio'].dropna())
different = True if p < 0.01 else False
better = "non-university town" if ut['ratio'].mean() < nut['ratio'].mean() else "university town"
return different, p, better
run_ttest()
###Output
_____no_output_____
###Markdown
---_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._---
###Code
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
###Output
_____no_output_____
###Markdown
Assignment 4 - Hypothesis TestingThis assignment requires more individual learning than previous assignments - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.Definitions:* A _quarter_ is a specific three month period, Q1 is January through March, Q2 is April through June, Q3 is July through September, Q4 is October through December.* A _recession_ is defined as starting with two consecutive quarters of GDP decline, and ending with two consecutive quarters of GDP growth.* A _recession bottom_ is the quarter within a recession which had the lowest GDP.* A _university town_ is a city which has a high percentage of university students compared to the total population of the city.**Hypothesis**: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (`price_ratio=quarter_before_recession/recession_bottom`)The following data files are available for this assignment:* From the [Zillow research data site](http://www.zillow.com/research/data/) there is housing data for the United States. In particular the datafile for [all homes at a city level](http://files.zillowstatic.com/research/public/City/City_Zhvi_AllHomes.csv), ```City_Zhvi_AllHomes.csv```, has median home sale prices at a fine grained level.* From the Wikipedia page on college towns is a list of [university towns in the United States](https://en.wikipedia.org/wiki/List_of_college_townsCollege_towns_in_the_United_States) which has been copy and pasted into the file ```university_towns.txt```.* From Bureau of Economic Analysis, US Department of Commerce, the [GDP over time](http://www.bea.gov/national/index.htmgdp) of the United States in current dollars (use the chained value in 2009 dollars), in quarterly intervals, in the file ```gdplev.xls```. For this assignment, only look at GDP data from the first quarter of 2000 onward.Each function in this assignment below is worth 10%, with the exception of ```run_ttest()```, which is worth 50%.
###Code
CityLvl = pd.read_csv('City_Zhvi_AllHomes.csv')
CityLvl.head()
GDP = pd.read_excel('gdplev.xls',skiprows=8, header=None).iloc[:,[4,6]]
GDP.columns = ['Quarter','value']
GDP = GDP[GDP['Quarter'] >= '2000q1'].reset_index(drop=True)
GDP.head()
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'}
CityLvl['State'] = CityLvl['State'].replace(states)
CityLvl.head()
def get_list_of_university_towns():
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
with open("university_towns.txt", "r") as f:
lines = f.readlines()
unitown = []
for line in lines:
if line[-7:]=="[edit]\n":
state = line[:-7]
continue
unitown.append([state, line[:line.find(" (")]])
f.close()
UTown = pd.DataFrame(unitown, columns=['State','RegionName'])
#print(UTown)
return UTown
get_list_of_university_towns()
def get_recession_start():
'''Returns the year and quarter of the recession start time as a
string value in a format such as 2005q3'''
values = GDP['value'].values
res1 = (values[1:] - values[:-1])<0
res2 = res1[:-1].astype(int) + res1[1:].astype(int)
q = (np.where(res2==2)[0][0])+1
#print(q)
#print(GDP[q-5:q+5])
return GDP['Quarter'][q]
get_recession_start()
def get_recession_end():
'''Returns the year and quarter of the recession end time as a
string value in a format such as 2005q3'''
values = GDP['value'].values
res1 = (values[1:] - values[:-1])<0
res2 = res1[:-1].astype(int) + res1[1:].astype(int)
grow1 = (values[1:] - values[:-1])>0
grow2 = grow1[:-1].astype(int) + grow1[1:].astype(int)
q = (np.where(grow2==2)[0])+1
GDP_res = GDP.iloc[q]
GDP_res = GDP_res[GDP_res['Quarter'] >= get_recession_start()].reset_index(drop=True)
return GDP_res['Quarter'][1]
get_recession_end()
def get_recession_bottom():
'''Returns the year and quarter of the recession bottom time as a
string value in a format such as 2005q3'''
GDP_res = GDP[(GDP['Quarter']>=get_recession_start()) & (GDP['Quarter']<=get_recession_end())].set_index('Quarter')
return (GDP_res.idxmin().values[0])
#return GDP_res
get_recession_bottom()
def convert_housing_data_to_quarters():
'''Converts the housing data to quarters and returns it as mean
values in a dataframe. This dataframe should be a dataframe with
columns for 2000q1 through 2016q3, and should have a multi-index
in the shape of ["State","RegionName"].
Note: Quarters are defined in the assignment description, they are
not arbitrary three month periods.
The resulting dataframe should have 67 columns, and 10,730 rows.
'''
y_range = [str(i) for i in range(2000,2017)]
q_begin = ['01','04','07','10']
q_end = ['03','06','09','12']
yq_begin = []
yq_end = []
for y in y_range:
for q1, q2 in zip(q_begin,q_end):
yq_begin.append(y+'-'+q1)
yq_end.append(y+'-'+q2)
yq_begin.pop(-1)
yq_end.pop(-1)
#print(yq_begin)
CityLvlDate = CityLvl.set_index(['State','RegionName'])
#print(CityLvlDate)
date_col = (CityLvlDate.columns>='2000-01') & (CityLvlDate.columns<='2016-12')
CityLvlDate = (CityLvlDate.loc[:,date_col])
for q, (start, ending) in enumerate(zip(yq_begin, yq_end)):
quarter = (start[:4]+'q'+str(q%4+1))
date_col = (CityLvlDate.columns>=start) & (CityLvlDate.columns<=ending)
#print(quarter)
CityLvlDate[quarter] = CityLvlDate.loc[:,date_col].apply(np.nanmean,axis=1)
used_col = CityLvlDate.loc[:,date_col].columns.values
for col in used_col:
del CityLvlDate[col]
return CityLvlDate
convert_housing_data_to_quarters()
housingQ = convert_housing_data_to_quarters()
from scipy import stats
def run_ttest():
'''First creates new data showing the decline or growth of housing prices
between the recession start and the recession bottom. Then runs a ttest
comparing the university town values to the non-university towns values,
return whether the alternative hypothesis (that the two groups are the same)
is true or not as well as the p-value of the confidence.
Return the tuple (different, p, better) where different=True if the t-test is
True at a p<0.01 (we reject the null hypothesis), or different=False if
otherwise (we cannot reject the null hypothesis). The variable p should
be equal to the exact p value returned from scipy.stats.ttest_ind(). The
value for better should be either "university town" or "non-university town"
depending on which has a lower mean price ratio (which is equivilent to a
reduced market loss).'''
housingQ_res = housingQ[get_recession_bottom()] - housingQ[get_recession_start()]
housingQ_res = housingQ_res.dropna().to_frame()
utown_list = get_list_of_university_towns()
utown_list['U'] = True
utown_list = utown_list.set_index(['State','RegionName'])
housingQ_utown = pd.merge(housingQ_res, utown_list, how='left', left_index=True, right_index=True)
housingQ_utown = housingQ_utown.rename(columns={0: "price", 'U': "is U"})
#print(housingQ_utown['is U'] is True)
housingQ_utown['is U'] = housingQ_utown['is U'].apply(lambda x: x is True)
housingQ_utown['is not U'] = housingQ_utown['is U'].apply(lambda x: x is not True)
U_price = housingQ_utown[housingQ_utown['is U']]
notU_price = housingQ_utown[housingQ_utown['is not U']]
U_price = U_price['price'].values
NU_price = notU_price['price'].values
tstat, pval = stats.ttest_ind(U_price, NU_price)
#print(stats.ttest_ind(U_price, NU_price))
#print(tstat)
better = 'university town' if tstat > 0 else 'non-university town'
return (pval<.01, pval, better)
run_ttest()
###Output
_____no_output_____ |
module2-regression-2/LS_DS_212.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '/Users/keila/Documents/Lambda/Units_Git/DS-Unit-2-Linear-Models/data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
import numpy as np
train,test = np.split()
test = df[df.Year>=2008]
train = df[df.Year<2008]
print(test)
train
###Output
Year ... Incumbent Party Vote Share
14 2008 ... 46.32
15 2012 ... 52.00
16 2016 ... 48.20
[3 rows x 6 columns]
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
print(len(train))
print(len(test))
###Output
14
3
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
print(df.shape)
df
def wrangle(filepath):
# Read in the data, rename columns and set index as 'year'
col_names = ['year', 'incumbent', 'challenger', 'income', 'fatalities', 'incumbent_vote_share']
df = pd.read_csv(filepath,
header=0,
names=col_names,
index_col='year')
return df
df = wrangle(DATA_PATH + 'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
y = df['incumbent_vote_share']
X = df[['income']]
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
print(y.shape, X.shape)
mask = X.index < 2008
X_train, y_train = X.loc[mask], y.loc[mask]
X_test, y_test = X.loc[~mask], y.loc[~mask]
print(X_train.tail(), X_train.shape, y_train.shape)
X_test.head()
mask
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train = df[df['Year'] < 2008]
test = df[df['Year'] >= 2008]
test
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
y_test
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
guess
y_train
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
(y_test - y_pred).abs().mean()
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
model
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred_train)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_train
X_test = test[features]
X_test
# TODO: Fit the model
model.fit(X_train, y_train)
# TODO: Apply the model to new data
y_pred_train = model.predict(X_train)
mean_absolute_error(y_pred_train, y_train)
y_pred = model.predict(X_test)
mean_absolute_error(y_pred, y_test)
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$$y = \beta_0 + \beta_1x_1 + \beta_2x_2 + \beta_3x_3 + ... + \beta_nx_n$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
df.head()
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0:.2f} + {beta1:.2f}*x1 + {beta2:.2f}*x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train =df[df['Year']<2008]
test = df[df['Year']>=2008]
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning:
pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
x_train = train[features]
x_test = test[features]
# TODO: Fit the model
model.fit(x_train,y_train)
# TODO: Apply the model to new data
y_pred = model.predict(x_train)
(y_pred - y_train).abs().mean()
mae = mean_absolute_error(y_train,y_pred)
print(f'Train Error: {mae:.2f} precentage points')
###Output
Train Error: 1.33 precentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
df['Year']
df['Year'] < 2008
df[df['Year'] < 2008]
df['Year'] >= 2008
df[df['Year'] >= 2008]
df_train = df[df['Year'] < 2008].copy()
df_test = df[df['Year'] >= 2008].copy()
df_train.shape, df_test.shape, df.shape
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
print(f'Observations in train: {len(df_train)}')
print(f'Observations in test: {len(df_test)}')
###Output
Observations in train: 14
Observations in test: 3
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
df_train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = df_train[target]
y_test = df_test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
guess
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
df_train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = df_train[features]
X_test = df_test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? basically halved the error from the baseline predictions Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
df_train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = df_train[features]
X_test = df_test[features]
# TODO: Fit the model
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# TODO: Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? train error is halved againtest error is smaller, but not by much Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
df_train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$$y = \beta_0 + \beta_1x_1 + \beta_2x_2+...\beta_nx_n$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
#values
model.coef_
#index
features
pd.Series(data=model.coef_, index=features)
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
# that is intercept
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
# holding one feature constant
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(df_train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(df_train, feature, target, m=3, b=46)
squared_errors(df_train, feature, target, m=4, b=46)
squared_errors(df_train, feature, target, m=4, b=44)
###Output
Mean Squared Error: 14.727814285714283
Root Mean Squared Error: 3.837683453037038
Mean Absolute Error: 2.797142857142856
R^2: 0.5277570066120691
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(df_train[features].values)
print('X')
print(X)
# y is a column vector
y = df_train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
# filtering into train and test subsets
train = df[df['Year'] < 2007]
test = df[df['Year'] > 2007]
# features and target declaration
features = df.columns[:-1]
target = "Incumbent Party Vote Share"
# splitting train and test into X and y
train_X = train[features]
train_y = train[target]
test_X = test[features]
test_y = test[target]
# showing shape of resulting subsets
print("Train shape:", train.shape)
print("Test shape:", test.shape)
# ensuring columns of train_X and test_X are correct
train_X.columns
###Output
Train shape: (14, 6)
Test shape: (3, 6)
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
# Train shape: 14
# Test shape: 3
# if we were using k-fold cross validation
k = 4
fold_size = df.shape[0] // k
folds = []
for i in range(k):
curr_fold = df.iloc[i*fold_size:i+1*fold_size, :]
if i == k-1:
curr_fold = df.iloc[i*fold_size:, :]
folds.append(curr_fold)
folds[2].head()
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
# target = 'Incumbent Party Vote Share'
# y_train = train[target]
# y_test = test[target]
# did this above
y_train = train_y
y_test = test_y
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline?~2% better than the baseline! Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
model = LinearRegression()
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_pred, y_train)
print("MAE of train dataset:", mae)
# TODO: Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_pred, y_test)
print("MAE of test dataset:", mae)
###Output
MAE of test dataset: 1.6341722692537293
###Markdown
How does the error compare to the prior model?---More than a percentage point better on train dataset and only ~.2% better on test set. Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean?For every percent growth in personal income the prediction of incumbet voting % will be increased by 3.59. For every million of US military fatalities, the model decreases the prediction by .05%. Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
# Stretch goal: Use the Scikit-learn Standard Scaler to standardize the data and fit the multiple regression model.
df
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
from sklearn.metrics import mean_absolute_error
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Mean Baseline (using 0 features)
Train Error (1952-2004 elections): 4.85 percentage points
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes', #the bread
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Linear Regression, dependent on: ['Average Recent Growth in Personal Incomes']
Train Error: 2.65 percentage points
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
# Fit the model
model.fit(X_train,y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train,y_pred)
print(f'Train error {mae:,.2f}')
# Apply the model to new data
y_pred_test = model.predict(X_test)
mae = mean_absolute_error(y_test,y_pred_test)
print(f'Test Error {mae:,.2f}')
coeffs = model.coef_
print(f'The model coefficients are: {list(coeffs)}')
print(f'This means that the coeff for Bread = {coeffs[0]:,.2f}\n'
f'And the coeff for War = {coeffs[1]:,.2f}')
###Output
Linear Regression, dependent on: ['Average Recent Growth in Personal Incomes', 'US Military Fatalities per Million']
Train error 1.33
Test Error 1.63
The model coefficients are: [3.5900473494560536, -0.05315709351049324]
This means that the coeff for Bread = 3.59
And the coeff for War = -0.05
###Markdown
The coefficients tell us that the "War" component has a much smaller impact on the overall result compared to the "Bread" How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
xmin,xmax = -5,5
ymin,ymax = xmin,xmax
num = 10
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
#This gives us a set of coordinates that exists on the x,y
print(list(itertools.product(xcoords,ycoords)))
print(xcoords,'\n',ycoords,'\n',coords)
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean?As fatalities increase then the incumbent vote share decreases (Our model is really Bread, War, not Bread, Peace) What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
#the beta 1 value (proven)
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
#still beta1
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
#output = 100* beta2
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train = df[df['Year'] < 2008]
train.head()
train.dtypes
test = df[df['Year'] >= 2008]
test.head()
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
print(train.shape)
print(test.shape)
###Output
(14, 6)
(3, 6)
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
guess
train['Incumbent Party Vote Share'].mean()
train['Average Recent Growth in Personal Incomes'].mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
len(y_train)
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
# TODO: Fit the model
model.fit(X_train, y_train)
# TODO: Apply the model to new data
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
from sklearn.metrics import mean_absolute_error
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# Fit the model
# Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train = df[df['Year'] < 2008]
# train = df.query('Year' < 2008)
test = df[df['Year'] >= 2008]
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
len(train), len(test)
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
from sklearn.metrics import mean_absolute_error
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
(guess - y_train).abs().mean()
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Linear Regression, dependent on: ['Average Recent Growth in Personal Incomes']
Train Error: 2.65 percentage points
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
# Fit the model
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred_train)
print(f'Train Error: {mae:.2f} percentage points')
# Apply the model to new data
y_pred_test = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred_test)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Linear Regression, dependent on: ['Average Recent Growth in Personal Incomes', 'US Military Fatalities per Million']
Train Error: 1.33 percentage points
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept:', '\t'*4 + ' '*4, f'{model.intercept_:.6f}')
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept: 46.254900
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.GridSearchCV, RandomizedSearchCVFortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
# Splitting with slicing syntax
train = df[:14]
test = df[14:]
# Splitting with dataframe filtering
train = df[df['Year'] < 2008]
test = df[df['Year'] >= 2008]
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
print(guess)
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
# Fit the model
model.fit(X_train, y_train)
# Check train error
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
# This does not exactly match correlation
df.corr()['Incumbent Party Vote Share']
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
# train = df[df['Year'] < 2008]
train = df.query('Year < 2008')
test = df.query('Year >= 2008')
train
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
[guess] * len(y_train)
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
print(X_train.shape, X_test.shape)
# TODO: Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# TODO: Apply the model to new data
y_test_pred = model.predict(X_test)
mae_test = mean_absolute_error(y_test, y_test_pred)
print(f'Train Error: {mae_test:.2f} percentage points')
###Output
Train Error: 1.63 percentage points
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
#good for large data sets
#train = df[df['Year'] < 2008]
#this is more sql like, and a little bit easier to read
train = df.query('Year < 2008')
test = df.query('Year >= 2008')
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
print(X_train.shape, X_test.shape)
# TODO: Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# TODO: Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
#STRETCH GOAL: Use the scikit-learn Standard Scaler to standardize the data and
#fit the multiple regression model.
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after. How many observations (rows) are in the train set? In the test set? Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train = df[df['Year']<2008]
train
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
test = df[df['Year']>=2008]
test
# 14 Rows in the training set, 3 in the test set
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
target = 'Incumbent Party Vote Share'
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
y_train = train[target]
# TODO: Fit the model
model.fit(X_train, y_train)
# Training error
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# TODO: Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.63 percentage points
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
df['Year']<2008
train=df[df['Year']<2008]
print(train.shape)
train
test=df[df['Year']>=2008]
print(test.shape)
test
###Output
(3, 6)
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
_____no_output_____
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
# TODO: Fit the model
# TODO: Apply the model to new data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0} + {beta1}x1 + {beta2}x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
_____no_output_____
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46)
###Output
_____no_output_____
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2- Do train/test split- Use scikit-learn to fit a multiple regression- Understand how ordinary least squares regression minimizes the sum of squared errors- Define overfitting/underfitting and the bias/variance tradeoff SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- matplotlib- numpy- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Do train/test split Overview Predict Elections! 🇺🇸🗳️ How could we try to predict the 2020 US Presidential election? According to Douglas Hibbs, a political science and economics professor, you can [explain elections with just two features, "Bread and Peace":](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/)> Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants:>> (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. Let's look at the data that Hibbs collected and analyzed:
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'elections/bread_peace_voting.csv')
df
###Output
_____no_output_____
###Markdown
Data Sources & Definitions- 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40- 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections)- 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html)- 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12> Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 Here we have data from the 1952-2016 elections. We could make a model to predict 1952-2016 election outcomes — but do we really care about that? No, not really. We already know what happened, we don't need to predict it. This is explained in [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/), Chapter 2.2, Assessing Model Accuracy:> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about? >> Suppose that we are interested in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price. >> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes. So, we're really interested in the 2020 election — but we probably don't want to wait until then to evaluate our model.There is a way we can estimate now how well our model will generalize in the future. We can't fast-forward time, but we can rewind it...We can split our data in **two sets.** For example: 1. **Train** a model on elections before 2008.2. **Test** the model on 2008, 2012, 2016. This "backtesting" helps us estimate how well the model will predict the next elections going forward, starting in 2020. This is explained in [_Forecasting,_ Chapter 3.4,](https://otexts.com/fpp2/accuracy.html) Evaluating forecast accuracy:> The accuracy of forecasts can only be determined by considering how well a model performs on new data that were not used when fitting the model.>>When choosing models, it is common practice to separate the available data into two portions, training and test data, where the training data is used to estimate any parameters of a forecasting method and the test data is used to evaluate its accuracy. Because the test data is not used in determining the forecasts, it should provide a reliable indication of how well the model is likely to forecast on new data.>>![](https://otexts.com/fpp2/fpp_files/figure-html/traintest-1.png)>>The size of the test set is typically about 20% of the total sample, although this value depends on how long the sample is and how far ahead you want to forecast. The following points should be noted.>>- A model which fits the training data well will not necessarily forecast well.>- A perfect fit can always be obtained by using a model with enough parameters.>- Over-fitting a model to data is just as bad as failing to identify a systematic pattern in the data.>>Some references describe the test set as the “hold-out set” because these data are “held out” of the data used for fitting. Other references call the training set the “in-sample data” and the test set the “out-of-sample data”. We prefer to use “training data” and “test data” in this book. **How should we split: Randomly? Before/after a given date?**I recommend you all read a great blog post, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/), by fast.ai cofounder Rachel Thomas.She gives great examples to answer the question “When is a random subset not good enough?” I’m not as opposed to random splits as Rachel Thomas seems to be. But it’s worth thinking about the trade-offs!Time-based and random splits can both be useful, and you’ll get repeated hands-on practice with both during this unit! (She also talks about the distinction between validation & test sets, which we’ll introduce in the last lesson of this Sprint.) Follow AlongSplit the data in two sets:1. Train on elections before 2008.2. Test on 2008 and after.
###Code
train = df[df['Year'] < 2008] #a pythonic way to write a condition
test = df[df['Year'] >= 2008]
###Output
_____no_output_____
###Markdown
How many observations (rows) are in the train set? In the test set?
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Note that this volume of data is at least two orders of magnitude smaller than we usually want to work with for predictive modeling.There are other validation techniques that could be used here, such as [time series cross-validation](https://scikit-learn.org/stable/modules/cross_validation.htmltime-series-split), or [leave-one-out cross validation](https://scikit-learn.org/stable/modules/cross_validation.htmlleave-one-out-loo) for small datasets. However, for this module, let's start simpler, with train/test split. Using a tiny dataset is intentional here. It's good for learning because we can see all the data at once. ChallengeIn your assignment, you will do train/test split, based on date. Use scikit-learn to fit a multiple regression OverviewWe've done train/test split, and we're ready to fit a model. We'll proceed in 3 steps. The first 2 are review from the previous module. The 3rd is new.- Begin with baselines (0 features) - Simple regression (1 feature)- Multiple regression (2 features) Follow Along Begin with baselines (0 features) What was the average Incumbent Party Vote Share, in the 1952-2004 elections?
###Code
train['Incumbent Party Vote Share'].mean()
###Output
_____no_output_____
###Markdown
What if we guessed this number for every election? How far off would this be on average?
###Code
# Arrange y target vectors
target = 'Incumbent Party Vote Share'
y_train = train[target]
y_test = test[target]
# Get mean baseline
print('Mean Baseline (using 0 features)')
guess = y_train.mean()
guess
# Train Error
from sklearn.metrics import mean_absolute_error
y_pred = [guess] * len(y_train) #creates a list of repeating guess values with a length of y_train
mae = mean_absolute_error(y_train, y_pred)
print(f'Train Error (1952-2004 elections): {mae:.2f} percentage points')
# Test Error
y_pred = [guess] * len(y_test) #creates a list of repeating guess values with a length of y_test
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error (2008-16 elections): {mae:.2f} percentage points')
###Output
Test Error (2008-16 elections): 3.63 percentage points
###Markdown
Simple regression (1 feature) Make a scatterplot of the relationship between 1 feature and the target.We'll use an economic feature: Average Recent Growth in Personal Incomes. ("Bread")
###Code
import pandas as pd
import plotly.express as px
px.scatter(
train,
x='Average Recent Growth in Personal Incomes',
y='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004',
trendline='ols', # Ordinary Least Squares
)
###Output
_____no_output_____
###Markdown
1952 & 1968 are outliers: The incumbent party got fewer votes than predicted by the regression. What do you think could explain those years? We'll come back to this soon, but first... Use scikit-learn to fit the simple regression with one feature.Follow the [5 step process](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression(fit_intercept=True) #True means that the y intercept will be taken into consideration. It will not automatically 0,0 as the origin
# 3. Arrange X features matrices (already did y target vectors)
features = ['Average Recent Growth in Personal Incomes']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# 4. Fit the model
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
mae = mean_absolute_error(y_train, y_pred_train)
print(f'Train Error: {mae:.2f} percentage points')
# 5. Apply the model to new data
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Error: {mae:.2f} percentage points')
###Output
Test Error: 1.80 percentage points
###Markdown
How does the error compare to the baseline? Multiple regression (2 features) Make a scatterplot of the relationship between 2 features and the target.We'll add another feature: US Military Fatalities per Million. ("Peace" or the lack thereof.)Rotate the scatterplot to explore the data. What's different about 1952 & 1968?
###Code
px.scatter_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Use scikit-learn to fit a multiple regression with two features.
###Code
# TODO: Complete this cell
# Re-arrange X features matrices
features = ['Average Recent Growth in Personal Incomes',
'US Military Fatalities per Million']
print(f'Linear Regression, dependent on: {features}')
X_train = train[features]
X_test = test[features]
# TODO: Fit the model
model.fit(X_train, y_train)
# TODO: Apply the model to new data
y_pred_train = model.predict(X_train)
mean_absolute_error(y_pred_train, y_train) #Train Data
y_pred = model.predict(X_test)
mean_absolute_error(y_pred, y_test) #Test Data
###Output
_____no_output_____
###Markdown
How does the error compare to the prior model? Plot the plane of best fit For a regression with 1 feature, we plotted the line of best fit in 2D. (There are many ways to do this. Plotly Express's `scatter` function makes it convenient with its `trendline='ols'` parameter.)For a regression with 2 features, we can plot the plane of best fit in 3D!(Plotly Express has a `scatter_3d` function but it won't plot the plane of best fit for us. But, we can write our own function, with the same "function signature" as the Plotly Express API.)
###Code
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='Average Recent Growth in Personal Incomes',
y='US Military Fatalities per Million',
z='Incumbent Party Vote Share',
text='Year',
title='US Presidential Elections, 1952-2004'
)
###Output
_____no_output_____
###Markdown
Where are 1952 & 1968 in relation to the plane? Which elections are the biggest outliers now? Roll over points on the plane to see predicted incumbent party vote share (z axis), dependent on personal income growth (x axis) and military fatatlies per capita (y axis). Get and interpret coefficients During the previous module, we got the simple regression's coefficient and intercept. We plugged these numbers into an equation for the line of best fit, in slope-intercept form: $y = mx + b$Let's review this objective, but now for multiple regression.What's the equation for the plane of best fit?$y = \beta_0 + \beta_1x_1 + \beta_2x_2$Can you relate the intercept and coefficients to what you see in the plot above?
###Code
model.intercept_, model.coef_
beta0 = model.intercept_
beta1, beta2 = model.coef_
print(f'y = {beta0:.2f} + {beta1:.2f}*x1 + {beta2:.2f}*x2')
# This is easier to read
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
###Output
Intercept 46.25489966153873
Average Recent Growth in Personal Incomes 3.590047
US Military Fatalities per Million -0.053157
###Markdown
One of the coefficients is positive, and the other is negative. What does this mean? Let's look at some scenarios. We'll see that one unit's change in an independent variable results in a coefficient worth of change in the dependent variable. What does the model predict if income growth=0%, fatalities=0
###Code
model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
Income growth = 1% (fatalities = 0)
###Code
model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[1, 0]]) - model.predict([[0, 0]])
###Output
_____no_output_____
###Markdown
What if... income growth = 2% (fatalities = 0)
###Code
model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 0]]) - model.predict([[1, 0]])
###Output
_____no_output_____
###Markdown
What if... (income growth=2%) fatalities = 100
###Code
model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[2, 100]]) - model.predict([[2, 0]])
###Output
_____no_output_____
###Markdown
What if income growth = 3% (fatalities = 100)
###Code
model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 100]]) - model.predict([[2, 100]])
###Output
_____no_output_____
###Markdown
What if (income growth = 3%) fatalities = 200
###Code
model.predict([[3, 200]])
###Output
_____no_output_____
###Markdown
The difference between these predictions = ?
###Code
model.predict([[3, 200]]) - model.predict([[3, 100]])
###Output
_____no_output_____
###Markdown
ChallengeIn your assignment, you'll fit a Linear Regression with at least 2 features. Understand how ordinary least squares regression minimizes the sum of squared errors OverviewSo far, we've evaluated our models by their absolute error. It's an intuitive metric for regression problems.However, ordinary least squares doesn't directly minimize absolute error. Instead, it minimizes squared error. In this section, we'll introduce two new regression metrics: - Squared error- $R^2$ We'll demostrate two possible methods to minimize squared error:- Guess & check- Linear Algebra Follow Along Guess & CheckThis function visualizes squared errors. We'll go back to simple regression with 1 feature, because it's much easier to visualize.Use the function's m & b parameters to "fit the model" manually. Guess & check what values of m & b minimize squared error.
###Code
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def squared_errors(df, feature, target, m, b):
"""
Visualize linear regression, with squared errors,
in 2D: 1 feature + 1 target.
Use the m & b parameters to "fit the model" manually.
df : Pandas DataFrame
feature : string, feature column in df
target : string, target column in df
m : numeric, slope for linear equation
b : numeric, intercept for linear requation
"""
# Plot data
fig = plt.figure(figsize=(7,7))
ax = plt.axes()
df.plot.scatter(feature, target, ax=ax)
# Make predictions
x = df[feature]
y = df[target]
y_pred = m*x + b
# Plot predictions
ax.plot(x, y_pred)
# Plot squared errors
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
scale = (xmax-xmin)/(ymax-ymin)
for x, y1, y2 in zip(x, y, y_pred):
bottom_left = (x, min(y1, y2))
height = abs(y1 - y2)
width = height * scale
ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1))
# Print regression metrics
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print('Mean Squared Error:', mse)
print('Root Mean Squared Error:', rmse)
print('Mean Absolute Error:', mae)
print('R^2:', r2)
###Output
_____no_output_____
###Markdown
Here's what the mean baseline looks like:
###Code
feature = 'Average Recent Growth in Personal Incomes'
squared_errors(train, feature, target, m=0, b=y_train.mean())
###Output
Mean Squared Error: 31.186940816326533
Root Mean Squared Error: 5.584526910699467
Mean Absolute Error: 4.846938775510204
R^2: 0.0
###Markdown
Notice that $R^2$ is exactly zero. [$R^2$ represents the proportion of the variance for a dependent variable that is explained by the independent variable(s).](https://en.wikipedia.org/wiki/Coefficient_of_determination)The mean baseline uses zero independent variables and explains none of the variance in the dependent variable, so its $R^2$ score is zero.The highest possible $R^2$ score is 1. The lowest possible *Train* $R^2$ score with ordinary least squares regression is 0.In this demo, it's possible to get a negative Train $R^2$, if you manually set values of m & b that are worse than the mean baseline. But that wouldn't happen in the real world.However, in the real world, it _is_ possible to get a negative *Test/Validation* $R^2$. It means that your *Test/Validation* predictions are worse than if you'd constantly predicted the mean of the *Test/Validation* set. ---Now that we've visualized the squared errors for the mean baseline, let's guess & check some better values for the m & b parameters:
###Code
squared_errors(train, feature, target, m=3, b=46) #m is the coefficient, b is the intercept
###Output
Mean Squared Error: 13.611378571428576
Root Mean Squared Error: 3.6893601845616235
Mean Absolute Error: 2.742142857142858
R^2: 0.5635551863970272
###Markdown
You can run the function repeatedly, with different values for m & b.How do you interpret each metric you see?- Mean Squared Error- Root Mean Squared Error- Mean Absolute Error- $R^2$Does guess & check really get used in machine learning? Sometimes! Some complex functions are hard to minimize, so we use a sophisticated form of guess & check called "gradient descent", which you'll learn about in Unit 4.Fortunately, we don't need to use guess & check for ordinary least squares regression. We have a solution, using linear algebra! Linear AlgebraThe same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:"\begin{align}\hat{\beta} = (X^{T}X)^{-1}X^{T}y\end{align}Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. The $\beta$ vectorThe $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$Now that we have all of the necessary parts we can set them up in the following equation:\begin{align}y = X \beta + \epsilon\end{align}Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average.\begin{align}y = X \beta\end{align}The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$.\begin{align}X^{T}y = X^{T}X \beta\end{align}Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.)\begin{align}(X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta\end{align}Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side:\begin{align}(X^{T}X)^{-1}X^{T}y = \hat{\beta}\end{align}We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ Lets calculate our $\beta$ parameters with numpy!
###Code
# This is NOT something you'll be tested on. It's just a demo.
# X is a matrix. Add column of constants for fitting the intercept.
def add_constant(X):
constant = np.ones(shape=(len(X),1))
return np.hstack((constant, X))
X = add_constant(train[features].values)
print('X')
print(X)
# y is a column vector
y = train[target].values[:, np.newaxis]
print('y')
print(y)
# Least squares solution in code
X_transpose = X.T
X_transpose_X = X_transpose @ X
X_transpose_X_inverse = np.linalg.inv(X_transpose_X)
X_transpose_y = X_transpose @ y
beta_hat = X_transpose_X_inverse @ X_transpose_y
print('Beta Hat')
print(beta_hat)
# Scikit-learn gave the exact same results!
model.intercept_, model.coef_
###Output
_____no_output_____
###Markdown
Define overfitting/underfitting and the bias/variance tradeoff Overview Read [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.htmlThe-Bias-variance-trade-off). Jake VanderPlas explains overfitting & underfitting:> Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset:> >![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-bias-variance-2.png)>> The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_.>> The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. VanderPlas goes on to connect these concepts to the "bias/variance tradeoff":> From the scores associated with these two models, we can make an observation that holds more generally:>>- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.>>- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set.>> If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:>>![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png)>> The diagram shown here is often called a validation curve, and we see the following essential features:>>- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.>- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.>- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.>- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.>>The means of tuning the model complexity varies from model to model. So far, our only "means of tuning the model complexity" has been selecting one feature or two features for our linear regression models. But we'll quickly start to select more features, and more complex models, with more "hyperparameters."This is just a first introduction to underfitting & overfitting. We'll continue to learn about this topic all throughout this unit. Follow Along Let's make our own Validation Curve, by tuning a new type of model complexity: polynomial degrees in a linear regression. Go back to the the NYC Tribeca condo sales data
###Code
# Read NYC Tribeca condo sales data, from first 4 months of 2019.
# Dataset has 90 rows, 9 columns.
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
assert df.shape == (90, 9)
# Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = 'SALE_PRICE'
X = df[features]
y = df[target]
###Output
_____no_output_____
###Markdown
Do random [train/test split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11)
###Output
_____no_output_____
###Markdown
Repeatedly fit increasingly complex models, and keep track of the scores
###Code
from IPython.display import display, HTML
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
# Credit for PolynomialRegression: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3
# https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
polynomial_degrees = range(1, 10, 2)
train_r2s = []
test_r2s = []
for degree in polynomial_degrees:
model = PolynomialRegression(degree)
display(HTML(f'Polynomial degree={degree}'))
model.fit(X_train, y_train)
train_r2 = model.score(X_train, y_train)
test_r2 = model.score(X_test, y_test)
display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>'))
display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>'))
plt.scatter(X_train, y_train, color='blue', alpha=0.5)
plt.scatter(X_test, y_test, color='red', alpha=0.5)
plt.xlabel(features)
plt.ylabel(target)
x_domain = np.linspace(X.min(), X.max())
curve = model.predict(x_domain)
plt.plot(x_domain, curve, color='blue')
plt.show()
display(HTML('<hr/>'))
train_r2s.append(train_r2)
test_r2s.append(test_r2)
display(HTML('Validation Curve'))
plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train')
plt.plot(polynomial_degrees, test_r2s, color='red', label='Test')
plt.xlabel('Model Complexity (Polynomial Degree)')
plt.ylabel('R^2 Score')
plt.legend()
plt.show()
###Output
_____no_output_____ |
src/2017-03-27.ipynb | ###Markdown
2017-03-27
###Code
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
###Output
_____no_output_____
###Markdown
Nonrigorous Simulation We first establish a working resolution
###Code
res = 0.01
dt = res
###Output
_____no_output_____
###Markdown
Plotting our solutions We consider the following set of parameters:
###Code
default_lambda_1, default_lambda_2, default_lambda_3 = 0.086, 0.141, 0.773
###Output
_____no_output_____
###Markdown
We simulate the path of a solution
###Code
def quad2(x_1, y_1, x_2, y_2,
lambda_1 = default_lambda_1,
lambda_2 = default_lambda_2,
lambda_3 = default_lambda_3):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return x_1_dot, y_1_dot, x_2_dot, y_2_dot
###Output
_____no_output_____
###Markdown
We have three methods of plotting
###Code
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
###Output
_____no_output_____
###Markdown
Here we step through our simulation
###Code
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.372854105052,
0.393518965248,
-0.0359026080443,
-0.216701666067 )
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
#plot_quad(ws, xs, ys, zs, float(1))
###Output
_____no_output_____
###Markdown
Seeking a periodic orbitWe will leverage the homoegeneity of the system to find a hypothetical solution (nonrigorously still, of course). We do this by fixing a period T and seeking to minimize the distance between f(x_0 + T) and f(x_0). We will vary x_0, seeking improvements via Newton's method. Random restarts may be necessary.
###Code
def f(x_1, y_1, x_2, y_2):
"""Just a clone of quad2"""
return quad2(x_1, y_1, x_2, y_2)
def F(x_1, y_1, x_2, y_2, T):
"""Find f(x + T)"""
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = f(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
return ws[-1], xs[-1], ys[-1], zs[-1]
def quad_dist(w, x, y, z):
"""Computes the Euclidian distance"""
return [w[i]**2 + x[i]**2 + y[i]**2 + z[i]**2 for i in range(len(w))]
def quad_sq_distance(x, y):
"""Computes the squared distance"""
dists = [ x[i] - y[i] for i in range(len(x) )]
dists = [ dists[i]**2 for i in range(len(x) )]
return sum(dists)
default_start = (0.372854105052, 0.393518965248, -0.0359026080443, -0.216701666067)
## break up tuple, test
testf = f(*default_start)
start_point = F(*default_start, 0)
end_point = F(*default_start, 1)
## testing
print(testf)
print(start_point)
print(end_point)
print(quad_sq_distance(start_point, end_point))
###Output
(-0.06794027539678509, 0.12813928269344135, -0.06568099441507005, 0.08288001831502431)
(0.37285410505200001, 0.39351896524800001, -0.035902608044299997, -0.216701666067)
(0.30628826341289472, 0.51958905370904163, -0.08841827178792637, -0.13720689546493597)
0.0294019919692
###Markdown
We define g to be ( F(x_0) - F(x_0 + T) )^2
###Code
def g(x_1, y_1, x_2, y_2, T = 1):
return quad_sq_distance( F(x_1, y_1, x_2, y_2, T), F(x_1, y_1, x_2, y_2, 0) )
## Testing
print(g(*default_start))
###Output
0.0294019919692
###Markdown
We try minimizing g while varying only T.
###Code
current_T = 105.9
current_dist = 0.7339
current_radius = 5
print(g(*default_start, current_T))
while current_dist > 0.0725:
## compute distances at edges
d_up = g(*default_start, current_T + current_radius)
d_down = g(*default_start, current_T - current_radius)
## halve the radius
current_radius = current_radius / 2
## determine whether or not to move
d_swap = min(d_up, d_down)
if current_dist > d_swap:
if d_up > d_down:
current_T = current_T - current_radius
else:
current_T = current_T + current_radius
current_dist = min(current_dist, d_swap)
print("d_down: " + str(d_down) + " d_current: " + str(current_dist) + " d_up: " + str(d_up))
print("T: " + str(current_T) + " radius: " + str(current_radius) + " dist: " + str(current_dist))
print(g(*default_start, current_T))
###Output
0.357088556366
d_down: 0.806794673054 d_current: 0.150002516851 d_up: 0.150002516851
T: 108.4 radius: 2.5 dist: 0.150002516851
d_down: 0.357088556366 d_current: 0.150002516851 d_up: 0.150002516851
T: 108.4 radius: 1.25 dist: 0.150002516851
d_down: 0.183891097251 d_current: 0.0817850721512 d_up: 0.0817850721512
T: 109.025 radius: 0.625 dist: 0.0817850721512
d_down: 0.086199590712 d_current: 0.0817850721512 d_up: 0.0817850721512
T: 109.025 radius: 0.3125 dist: 0.0817850721512
d_down: 0.0762484007108 d_current: 0.074520993286 d_up: 0.074520993286
T: 109.18125 radius: 0.15625 dist: 0.074520993286
d_down: 0.072553750155 d_current: 0.072553750155 d_up: 0.074520993286
T: 109.103125 radius: 0.078125 dist: 0.072553750155
d_down: 0.072553750155 d_current: 0.072553750155 d_up: 0.0728805925208
T: 109.103125 radius: 0.0390625 dist: 0.072553750155
d_down: 0.0724952627042 d_current: 0.0724952627042 d_up: 0.0726593604254
T: 109.08359375 radius: 0.01953125 dist: 0.0724952627042
0.0725012738654
###Markdown
We see that it is easy to get stuck in infinite loops while in troughs.
###Code
iteration_count = 1000
x = default_start
import operator
def tuple_add(a, b):
return tuple(map(operator.add, a, b) )
def tuple_subtract(a, b):
b_neg = tuple([-k for k in b])
return tuple(map(operator.add, a, b_neg) )
def list_subtract(a, b):
return list(map(operator.sub, a, b))
def approx_derivs(x):
"""Approximate partial deritatives of x"""
gx0 = g(*x)
x_1_dot = ( g(*tuple_add(x, (dt, 0, 0 , 0 ) ) ) - gx0 ) / dt
x_2_dot = ( g(*tuple_add(x, (0, dt, 0 , 0 ) ) ) - gx0 ) / dt
y_1_dot = ( g(*tuple_add(x, (0, 0, dt, 0 ) ) ) - gx0 ) / dt
y_2_dot = ( g(*tuple_add(x, (0, 0, 0 , dt) ) ) - gx0 ) / dt
return (x_1_dot, x_2_dot, y_1_dot, y_2_dot)
def newton_iterate(x):
gx0 = g(*x)
x_1_dot = ( g(*tuple_add(x, (dt, 0, 0 , 0 ) ) ) - gx0 ) / dt
x_2_dot = ( g(*tuple_add(x, (0, dt, 0 , 0 ) ) ) - gx0 ) / dt
y_1_dot = ( g(*tuple_add(x, (0, 0, dt, 0 ) ) ) - gx0 ) / dt
y_2_dot = ( g(*tuple_add(x, (0, 0, 0 , dt) ) ) - gx0 ) / dt
# for i in range(iteration_count):
# ## perform Newton iteration
# x_dot = approx_derivs(x)
# x = tuple_substract(x, x_dot)
import numdifftools as nd
def g_nd(x):
return g(*tuple(x))
g_hessian = nd.core.Hessian(g_nd)
g_jacobian = nd.core.Jacobian(g_nd)
x_0 = default_start
print(g_hessian(x_0))
print("---")
print(g_jacobian(x_0))
print("---")
print(np.linalg.inv(g_hessian(x_0)))
print("---")
print(np.matmul(np.linalg.inv(g_hessian(x_0)), np.transpose(g_jacobian(x_0))))
###Output
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/scipy/linalg/basic.py:884: RuntimeWarning: internal gelsd driver lwork query error, required iwork dimension not returned. This is likely the result of LAPACK bug 0038, fixed in LAPACK 3.2.2 (released July 21, 2010). Falling back to 'gelss' driver.
warnings.warn(mesg, RuntimeWarning)
###Markdown
We now begin Newton iterations
###Code
x_0 = list(default_start)
x = x_0
hessian = nd.core.Hessian(g_nd)
jacobian = nd.core.Jacobian(g_nd)
for i in range(10):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(x)
print(g_nd(x))
print(x)
print(default_start)
print(default_lambda_1, default_lambda_2, default_lambda_3)
def newton_search(x_0, T = 1):
x = x_0
hessian = nd.core.Hessian(g_nd)
jacobian = nd.core.Jacobian(g_nd)
for i in range(100):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(x)
print(g_nd(x))
print(x)
x_0 = list([3, 2, 3, 2])
#newton_search(x_0)
start_plot_1 = (0, 5, -40, -40)
start_plot_1 = default_start
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = start_plot_1
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# print(w_dot, x_dot, y_dot, z_dot)
# print(ws[i], xs[i], ys[i], zs[i])
#plot_quad(ws, xs, ys, zs, 0)
#plot_quad(ws, ys, xs, zs, 0)
#plot_quad(*start_plot_1)
def experiment_1(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def g(x_0, lmbda, T = 1):
"""objective function"""
return quad_sq_distance( f(x_0, lmbda, T), f(x_0, lmbda, 0) )
def g_T(x_0):
"""g instantiated with a fixed period"""
return g(x_0, lmbda, T)
def newton_search(x_0, T = 1, N = 25):
x = x_0
hessian = nd.core.Hessian(g_T)
jacobian = nd.core.Jacobian(g_T)
for i in range(N):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(g_T(x))
print(x)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(start_pt)
if expmt == 'plot':
plot_sim_path(x_0, T)
# experiment_1((10.2,
# 9.3,
# 14.4,
# 12.2) , expmt = 'plot')
# experiment_1((4.2, 3.3, 4.4, 2.2),
# T = 10000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'plot')
# experiment_1(default_start,
# T = 1000000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'plot')
# experiment_1((4.2, 3.3, 4.4, 2.2),
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
# experiment_1(default_start,
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
###Output
_____no_output_____
###Markdown
Call on 3/31/2017- Time reversal might not work: not all directions are good or bad. If you have some in either direction it's not clear what to do.- Paper by Cvitanovic, reference 8, from Royal society paper: http://rsta.royalsocietypublishing.org/content/369/1944/2345- Very similar to what we've tried: https://journals.aps.org/pre/abstract/10.1103/PhysRevE.69.016217So far: Minimize |F(T) - F(0)|^2, T = 1, s.t. F(o) = f_0- Now I'm waiting on another paper he's looking for, to minimize instead:Question: how do I look for start points? Call of 4/8- Try minimizing instead:phi(t) = int_0^T |f(t + T) - (t)|^2 dtSo that if therewere a periodic orbit , phi(t) = 0
###Code
import scipy.integrate as integrate
import scipy.special as special
from scipy.integrate import quad
def test_integrand(t, a = 0, b = 0):
return t**2 + a - b
result = quad(test_integrand, 2, 4, args=(0.13, 1.03))
#result_simps = integrate.simps(integrand, 2, 4, args=(0.13, 1.03))
print("Result: " + str(result[0]) + ",\nError Bound: " + str(result[1]))
from scipy.optimize import newton
def experiment_2(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def f_integrand(t, x_0, lmbda, T = 1):
return quad_sq_distance(f(x_0, lmbda, t + T), f(x_0, lmbda, t))
def phi(t, x_0, lmbda):
"""What we want to minimize"""
return quad(f_integrand, 0, T, args=(x_0, lmbda))[0]
def phi_instance(t):
return phi(t, start_pt, lmbda)
def newton_search(t, T = 1, N = 25):
newton(phi_instance, t)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(t = 100)
if expmt == 'plot':
plot_sim_path(x_0, T)
# experiment_2((4.2, 3.3, 4.4, 2.2),
# T = 1,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
# experiment_2(default_start,
# T = 1,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
###Output
_____no_output_____
###Markdown
It appears that the computation takes too long or is too expensive to be interesting.
###Code
# experiment_2((-4.2, 3.3, -4.4, 2.2),
# T = 100,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
###Output
_____no_output_____
###Markdown
Same here: It appears that the computation takes too long or is too expensive to be interesting. Can we consider other optimization algorithms, such as: - Nelder-Mead: Like gradient descent- Powell's Conjugate Direction Method- Conjugate Gradient: For system of linear equations- Broyden–Fletcher–Goldfarb–Shanno: Quasi-Newton method- Newton-Conjugate Gradient- Limited Memory BFGS- Truncade Newton- COBYLA: For constrained problem. Licensed, in Fortran.- Sequential Least Squares Programming: quasi-Newton method- Trust Region, dogleg: https://optimization.mccormick.northwestern.edu/index.php/Trust-region_methods - Newton conjugate gradient trust-region algorithm: Questions for 4/12/2017:- How do choose starting points?- What can I do with random restarts?- What can we do with trust regions? Can we explore that? Next Steps from 4/12/2017- Poincare sections: plot it in 3-D. Try a few. - Simulate a path - Write method that sees which side of the section you're on - Record point if it swaps sections
###Code
class HyperPlane:
def __init__(self, a, b, c, d, e):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
def __call__(self, xywz):
"""Determines which side of the hyperplane that xywz is on"""
return self.a * xywz[0] + self.b * xywz[1] + self.c * xywz[2] + self.d * xywz[3] - self.e
def whichSide(self, pt):
val = self.__call__(pt)
if val > 0: return 1
elif val < 0: return -1
else: return 0
def __str__(self):
return "Hyperplane: " + str(self.a) + "*x_1 + " + \
str(self.b) + "*y_1 + " + \
str(self.c) + "*x_2 + " + \
str(self.d) + "*y_2" + \
" = " + str(self.e)
## Testing the HyperPlane class
testplane = HyperPlane(3,2,1,3,-4)
print(testplane([2,2,2,4,1]))
print(testplane([-10,-10,-10,-10,-10]))
print(testplane.whichSide([2,2,2.4,4,1]))
print(testplane.whichSide([0, 0.0, -4.0, 0, 0]))
print(testplane.whichSide([-10.3,-10,-10,-10,-10]))
print(testplane)
class IntersectChecker:
def __init__(self, hyperplane = HyperPlane(1, 1, 1, 1, 4) ):
self.hyperplane = hyperplane
self.flip = 0
def __call__(self, xywz):
"""
Checks if we crossed the hyperplane given by abcde.
Returns 0 if no crossing.
Return -1 if crossed from positive to negative.
Return 1 if crossed from negative to positive
"""
val = self.hyperplane.whichSide(xywz)
if self.flip == 0:
## oj first pass
self.flip = val
return 0
elif val != self.flip:
## changed
self.flip = val
return val
else:
## unchanged
return 0
def poincarePlot(ws, xs, ys, zs, crossings, txt = " "):
## Plot setup
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
## slice
crossings_array = np.array(crossings)
indices = list(np.where(crossings_array < 0)[0])
ws = list(np.array(ws)[indices])
xs = list(np.array(xs)[indices])
ys = list(np.array(ys)[indices])
## execute
ax.plot(ws, xs, ys)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title(txt)
plt.show()
def poincareExtract(ws, xs, ys, zs, crossings):
## slice
crossings_array = np.array(crossings)
indices = list(np.where(crossings_array < 0)[0])
print("crossings: " + str(len(indices)))
ws = list(np.array(ws)[indices])
xs = list(np.array(xs)[indices])
ys = list(np.array(ys)[indices])
zs = list(np.array(zs)[indices])
return ws, xs, ys, zs
def experiment_3(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
hyperplane = HyperPlane(0.2, 0.4, 1.2, -2.1, -1.1),
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
## if reversing time
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def f_integrand(t, x_0, lmbda, T = 1):
return quad_sq_distance(f(x_0, lmbda, t + T), f(x_0, lmbda, t))
def phi(t, x_0, lmbda):
"""What we want to minimize"""
return quad(f_integrand, 0, T, args=(x_0, lmbda))[0]
def phi_instance(t):
return phi(t, start_pt, lmbda)
def newton_search(t, T = 1, N = 25):
newton(phi_instance, t)
def plot_sim_path(x_0, T):
"""Simulate path, collecting Poincare crossings"""
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
crossings = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
crossings[0] = 0
intersect_checker = IntersectChecker(hyperplane)
## for trackcing min/max/mean of path, relative to hyperplane
pts = np.empty((stepCnt,))
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
pt = (ws[i + 1], xs[i + 1], ys[i + 1], zs[i + 1])
pts[i] = hyperplane(pt)
# print(hyperplane(pt))
crossings[i + 1] = intersect_checker((ws[i + 1], xs[i + 1], ys[i + 1], zs[i + 1]))
print(max(pts))
print(min(pts))
print(sum(pts) / len(pts))
poincareExtract(ws, xs, ys, zs, crossings)
poincarePlot(ws, xs, ys, zs, crossings, str(hyperplane))
if expmt == 'print':
print("not yet implemented")
if expmt == 'plot':
plot_sim_path(x_0, T)
# experiment_3((4.2, 3.3, 4.4, 2.2),
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
experiment_1( (0.032, 0.308, -0.1, -0.5) ,
T = 10000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'plot')
# experiment_3(default_start,
# T = 10000,
# lmbda = [0.086, 0.141, 0.773],
# hyperplane = HyperPlane(4, -3, -1, -4, 0),
# expmt = 'plot')
###Output
Plotting Double Plot Quad Viz
|
Data Warehouse/Amazon United Kingdom/.ipynb_checkpoints/Amazon_UK - Food - Coffee --ns-checkpoint.ipynb | ###Markdown
List of Products
###Code
amazon_usa = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A17911764011%2Cn%3A11057651&dc&',
'conditioner':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A17911764011%2Cn%3A11057251&dc&',
'hair_scalp_treatment':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A11057431&dc&',
'treatment_oil':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A10666439011&dc&',
'hair_loss':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11057241%2Cn%3A10898755011&dc&'},
'skin_care':{'body':{'cleansers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11056281&dc&',
'moisturizers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11060661&dc&',
'treatments':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060521%2Cn%3A11056421&dc&'},
'eyes':{'creams':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730090011&dc&',
'gels':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730092011&dc&',
'serums':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11061941%2Cn%3A7730098011&dc&'},
'face':{'f_cleansers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11060901&dc&',
'f_moisturizers':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11060901&dc&',
'scrubs':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061091&dc&',
'toners':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061931&dc&',
'f_treatments':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A11060711%2Cn%3A11061931&dc&'},
'lipcare':'https://www.amazon.com/s?i=beauty-intl-ship&bbn=16225006011&rh=n%3A%2116225006011%2Cn%3A11060451%2Cn%3A3761351&dc&'}},
'food':{'tea':{'herbal':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318511&dc&',
'green':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318471&dc&',
'black':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A16318411&dc&',
'chai':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318401%2Cn%3A348022011&dc&'},
'coffee':'https://www.amazon.com/s?k=tea&i=grocery&rh=n%3A16310101%2Cn%3A16310231%2Cn%3A16521305011%2Cn%3A16318031%2Cn%3A2251593011&dc&',
'dried_fruits':{'mixed':'https://www.amazon.com/s?k=dried+fruits&i=grocery&rh=n%3A16310101%2Cn%3A6506977011%2Cn%3A9865332011%2Cn%3A9865334011%2Cn%3A9865348011&dc&',
'mangoes':'https://www.amazon.com/s?k=dried+fruits&rh=n%3A16310101%2Cn%3A9865346011&dc&'},
'nuts':{'mixed':'https://www.amazon.com/s?k=nuts&rh=n%3A16310101%2Cn%3A16322931&dc&',
'peanuts':'https://www.amazon.com/s?k=nuts&i=grocery&rh=n%3A16310101%2Cn%3A18787303011%2Cn%3A16310221%2Cn%3A16322881%2Cn%3A16322941&dc&',
'cashews':'https://www.amazon.com/s?k=nuts&i=grocery&rh=n%3A16310101%2Cn%3A18787303011%2Cn%3A16310221%2Cn%3A16322881%2Cn%3A16322901&dc&'}},
'supplements':{'sports':{'pre_workout':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973697011&dc&',
'protein':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973704011&dc&',
'fat_burner':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973679011&dc&',
'weight_gainer':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A6973663011%2Cn%3A6973725011&dc&'},
'vitamins_dietary':{'supplements':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A3764441%2Cn%3A6939426011&dc&',
'multivitamins':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A3774861&dc&'}},
'wellness':{'ayurveda':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A13052941&dc&',
'essential_oil_set':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A18502613011&dc&',
'massage_oil':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A14442631&dc&'},
'personal_accessories':{'bags':{'women':{'clutches':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A17037745011&dc&',
'crossbody':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A2475899011&dc&',
'fashion':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977745011&dc&',
'hobo':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977747011&dc&'}},
'jewelry':{'anklets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454897011&dc&',
'bracelets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454898011&dc&',
'earrings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&',
'necklaces':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&',
'rings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454939011&dc&'},
'artisan_fabrics':'https://www.amazon.com/s?k=fabrics&rh=n%3A2617941011%2Cn%3A12899121&dc&'}}
amazon_uk = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.co.uk/b/ref=amb_link_5?ie=UTF8&node=74094031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031',
'conditioner':'https://www.amazon.co.uk/b/ref=amb_link_6?ie=UTF8&node=2867976031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031',
'hair_loss':'https://www.amazon.co.uk/b/ref=amb_link_11?ie=UTF8&node=2867979031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031',
'hair_scalp_treatment':'https://www.amazon.co.uk/b/ref=amb_link_7?ie=UTF8&node=2867977031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031',
'treatment_oil':'https://www.amazon.co.uk/hair-oil-argan/b/ref=amb_link_8?ie=UTF8&node=2867981031&pf_rd_m=A3P5ROKL5A1OLE&pf_rd_s=merchandised-search-leftnav&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_r=KF9SM53J2HXHP4EJD3AH&pf_rd_t=101&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_p=aaaa7182-fdd6-4b35-8f0b-993e78880b69&pf_rd_i=66469031'},
'skin_care':{'body':{'cleanser':'https://www.amazon.co.uk/s/ref=lp_344269031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A344269031%2Cn%3A344282031&bbn=344269031&ie=UTF8&qid=1581612722&rnid=344269031',
'moisturizers':'https://www.amazon.co.uk/s/ref=lp_344269031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A344269031%2Cn%3A2805272031&bbn=344269031&ie=UTF8&qid=1581612722&rnid=344269031'},
'eyes':{'creams':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_0?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344259031&bbn=118465031&ie=UTF8&qid=1581612984&rnid=118465031',
'gels':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344258031&bbn=118465031&ie=UTF8&qid=1581613044&rnid=118465031',
'serums':'https://www.amazon.co.uk/s/ref=lp_118465031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118465031%2Cn%3A344257031&bbn=118465031&ie=UTF8&qid=1581613044&rnid=118465031'},
'face':{'cleansers':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_1?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A344265031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031',
'moisturizers':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_3?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A2805291031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031',
'toners':'https://www.amazon.co.uk/s/ref=lp_118466031_nr_n_0?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A344267031&bbn=118466031&ie=UTF8&qid=1581613120&rnid=118466031',
'treatments':'https://www.amazon.co.uk/s?bbn=118466031&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118466031%2Cn%3A18918424031&dc&fst=as%3Aoff&qid=1581613120&rnid=118466031&ref=lp_118466031_nr_n_7'},
'lipcare':'https://www.amazon.co.uk/s/ref=lp_118464031_nr_n_4?fst=as%3Aoff&rh=n%3A117332031%2Cn%3A%21117333031%2Cn%3A118464031%2Cn%3A118467031&bbn=118464031&ie=UTF8&qid=1581613357&rnid=118464031'}},
'food':{'tea':{'herbal':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406567031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_1',
'green':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406566031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_3',
'black':'https://www.amazon.co.uk/s?k=tea&i=grocery&rh=n%3A340834031%2Cn%3A358584031%2Cn%3A11711401%2Cn%3A406564031&dc&qid=1581613483&rnid=344155031&ref=sr_nr_n_2'},
'coffee':'https://www.amazon.co.uk/s?k=coffee&rh=n%3A340834031%2Cn%3A11711391&dc&qid=1581613715&rnid=1642204031&ref=sr_nr_n_2',
'dried_fruits':{'mixed':'https://www.amazon.co.uk/s?k=dried+fruits&rh=n%3A340834031%2Cn%3A9733163031&dc&qid=1581613770&rnid=1642204031&ref=sr_nr_n_2'},
'nuts':{'mixed':'https://www.amazon.co.uk/s?k=mixed&rh=n%3A359964031&ref=nb_sb_noss',
'peanuts':'https://www.amazon.co.uk/s?k=peanuts&rh=n%3A359964031&ref=nb_sb_noss',
'cashews':'https://www.amazon.co.uk/s?k=cashew&rh=n%3A359964031&ref=nb_sb_noss'}},
'supplements':{'sports':{'pre_workout':'https://www.amazon.co.uk/b/?node=5977685031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_1&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031',
'protein':'https://www.amazon.co.uk/b/?node=2826510031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_0&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031',
'fat_burner':'https://www.amazon.co.uk/b/?node=5977737031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hc3L_2&pf_rd_r=C5MZHH5TH5F868B6FQWD&pf_rd_p=8086b6c9-ae16-5c3c-a879-030afa4ee08f&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826478031'},
'vitamins_dietary':{'supplements':'https://www.amazon.co.uk/b/?_encoding=UTF8&node=2826534031&bbn=65801031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hdc7_2&pf_rd_r=AY01DQVCB4SE7VVE7MTK&pf_rd_p=1ecdbf02-af23-502a-b7ab-9916ddd6690c&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826484031',
'multivitamins':'https://www.amazon.co.uk/b/?_encoding=UTF8&node=2826506031&bbn=65801031&ref_=Oct_s9_apbd_odnav_hd_bw_b35Hdc7_1&pf_rd_r=AY01DQVCB4SE7VVE7MTK&pf_rd_p=1ecdbf02-af23-502a-b7ab-9916ddd6690c&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=2826484031'}},
'wellness':{'massage_oil':'https://www.amazon.co.uk/b/?node=3360479031&ref_=Oct_s9_apbd_odnav_hd_bw_b50nmJ_4&pf_rd_r=GYVYF52HT2004EDTY67W&pf_rd_p=3f8e4361-c00b-588b-a07d-ff259bf98bbc&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=74073031',
'ayurveda':'https://www.amazon.co.uk/s?k=ayurveda&rh=n%3A65801031%2Cn%3A2826449031&dc&qid=1581686978&rnid=1642204031&ref=sr_nr_n_22'},
'personal_accessories':{'bags':{'women':{'clutches':'https://www.amazon.co.uk/b/?node=1769563031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_3&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031',
'crossbody':'https://www.amazon.co.uk/b/?node=1769564031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_1&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031',
'fashion':'https://www.amazon.co.uk/b/?node=1769560031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_5&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031',
'hobo':'https://www.amazon.co.uk/b/?node=1769565031&ref_=Oct_s9_apbd_odnav_hd_bw_b1vkt8h_4&pf_rd_r=VC8RX89R4V4JJ5TEBANF&pf_rd_p=cefca17f-8dac-5c80-848f-812aff1bfdd7&pf_rd_s=merchandised-search-11&pf_rd_t=BROWSE&pf_rd_i=1769559031'}},
'jewelry':{'anklets':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_0?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382860031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031',
'bracelets':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_1?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382861031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031',
'earrings':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_4?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382865031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031',
'necklaces':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_7?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382868031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031',
'rings':'https://www.amazon.co.uk/s/ref=lp_10382835031_nr_n_10?fst=as%3Aoff&rh=n%3A193716031%2Cn%3A%21193717031%2Cn%3A10382835031%2Cn%3A10382871031&bbn=10382835031&ie=UTF8&qid=1581687575&rnid=10382835031'},
'artisan_fabrics':'https://www.amazon.co.uk/s?k=fabric&rh=n%3A11052681%2Cn%3A3063518031&dc&qid=1581687726&rnid=1642204031&ref=a9_sc_1'}}
amazon_india = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.in/b/ref=s9_acss_bw_cg_btyH1_2a1_w?ie=UTF8&node=1374334031&pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-5&pf_rd_r=JHDJ4QHM0APVS05NGF4G&pf_rd_t=101&pf_rd_p=41b9c06b-1514-47de-a1c6-f4f13fb55ffe&pf_rd_i=1374305031',
'conditioner':'https://www.amazon.in/b/ref=s9_acss_bw_cg_btyH1_2b1_w?ie=UTF8&node=1374306031&pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-5&pf_rd_r=CBABMCW6C69JRBGZNWWP&pf_rd_t=101&pf_rd_p=41b9c06b-1514-47de-a1c6-f4f13fb55ffe&pf_rd_i=1374305031',
'treatment_oil':''},
'skin_care':[],
'wellness_product':[]},
'food':{'tea':[],
'coffee':[],
'dried_fruits':[],
'nuts':[],
'supplements':[]},
'personal_accessories':{'bags':[],
'jewelry':[],
'artisan_fabrics':[]}}
amazon_aus = {'health_and_beauty':{'hair_products':{'shampoo':'https://www.amazon.com.au/b/?_encoding=UTF8&node=5150253051&bbn=4851917051&ref_=Oct_s9_apbd_odnav_hd_bw_b5cXATz&pf_rd_r=6SEM7GFDN7CQ2W4KXM9M&pf_rd_p=9dd4b462-1094-5e36-890d-bb1b694c8b53&pf_rd_s=merchandised-search-12&pf_rd_t=BROWSE&pf_rd_i=5150070051',
'conditioner':'https://www.amazon.com.au/b/?_encoding=UTF8&node=5150226051&bbn=4851917051&ref_=Oct_s9_apbd_odnav_hd_bw_b5cXATz&pf_rd_r=6SEM7GFDN7CQ2W4KXM9M&pf_rd_p=9dd4b462-1094-5e36-890d-bb1b694c8b53&pf_rd_s=merchandised-search-12&pf_rd_t=BROWSE&pf_rd_i=5150070051'},
'skin_care':[],
'wellness_product':[]},
'food':{'tea':{'herbal':'',
'green':'',
'black':'',
'chai':''},
'coffee':'https://www.amazon.com.au/s/ref=lp_5555314051_nr_n_0?fst=as%3Aoff&rh=n%3A5547635051%2Cn%3A%215547636051%2Cn%3A5555314051%2Cn%3A5555382051&bbn=5555314051&ie=UTF8&qid=1584207291&rnid=5555314051',
'dried_fruits':{'mixed':'',
'mangoes':''},
'nuts':{'mixed':'',
'peanuts':'',
'cashews':''}},
'supplements':{'sports':{'pre_workout':'',
'protein':'',
'fat_burner':'',
'weight_gainer':''},
'vitamins_dietary':{'supplements':'',
'multivitamins':''}},
########
'wellness':{'ayurveda':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A13052941&dc&',
'essential_oil_set':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A13052911%2Cn%3A18502613011&dc&',
'massage_oil':'https://www.amazon.com/s?k=supplements&i=hpc&rh=n%3A3760901%2Cn%3A10079996011%2Cn%3A14442631&dc&'},
'personal_accessories':{'bags':{'women':{'clutches':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A17037745011&dc&',
'crossbody':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A2475899011&dc&',
'fashion':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977745011&dc&',
'hobo':'https://www.amazon.com/s?k=bags&i=fashion-womens-handbags&bbn=15743631&rh=n%3A7141123011%2Cn%3A%217141124011%2Cn%3A7147440011%2Cn%3A15743631%2Cn%3A16977747011&dc&'}},
'jewelry':{'anklets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454897011&dc&',
'bracelets':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454898011&dc&',
'earrings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&',
'necklaces':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454917011&dc&',
'rings':'https://www.amazon.com/s?i=fashion-womens-intl-ship&bbn=16225018011&rh=n%3A16225018011%2Cn%3A7192394011%2Cn%3A7454939011&dc&'},
'artisan_fabrics':'https://www.amazon.com/s?k=fabrics&rh=n%3A2617941011%2Cn%3A12899121&dc&'}}
amazon = {'USA':amazon_usa,
'UK':amazon_uk,
'India':amazon_india,
'Australia':amazon_aus}
def hover(browser, xpath):
'''
This function makes an automated mouse hovering in the selenium webdriver
element based on its xpath.
PARAMETER
---------
browser: Selenium based webbrowser
xpath: str
xpath of the element in the webpage where hover operation has to be
performed.
'''
element_to_hover_over = browser.find_element_by_xpath(xpath)
hover = ActionChains(browser).move_to_element(element_to_hover_over)
hover.perform()
element_to_hover_over.click()
def browser(link):
'''This funtion opens a selenium based chromebrowser specifically tuned
to work for amazon product(singular item) webpages. Few functionality
includes translation of webpage, clicking the initial popups, and hovering
over product imagesso that the images can be scrape
PARAMETER
---------
link: str
Amazon Product item link
RETURN
------
driver: Selenium web browser with operated functions
'''
options = Options()
prefs = {
"translate_whitelists": {"ja":"en","de":'en'},
"translate":{"enabled":"true"}
}
# helium = r'C:\Users\Dell-pc\AppData\Local\Google\Chrome\User Data\Default\Extensions\njmehopjdpcckochcggncklnlmikcbnb\4.2.12_0'
# options.add_argument(helium)
options.add_experimental_option("prefs", prefs)
options.headless = True
driver = webdriver.Chrome(chrome_options=options)
driver.get(link)
try:
driver.find_element_by_xpath('//*[@id="nav-main"]/div[1]/div[2]/div/div[3]/span[1]/span/input').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[3]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[4]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[5]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[6]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[7]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[8]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
try:
hover(driver,'//*[@id="altImages"]/ul/li[9]')
except:
pass
try:
driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click()
except:
pass
return driver
def scroll_temp(driver):
'''
Automated Scroller in Selenium Webbrowser
PARAMETER
---------
driver: Selenium Webbrowser
'''
pre_scroll_height = driver.execute_script('return document.body.scrollHeight;')
run_time, max_run_time = 0, 2
while True:
iteration_start = time.time()
# Scroll webpage, the 100 allows for a more 'aggressive' scroll
driver.execute_script('window.scrollTo(0,0.6*document.body.scrollHeight);')
post_scroll_height = driver.execute_script('return document.body.scrollHeight;')
scrolled = post_scroll_height != pre_scroll_height
timed_out = run_time >= max_run_time
if scrolled:
run_time = 0
pre_scroll_height = post_scroll_height
elif not scrolled and not timed_out:
run_time += time.time() - iteration_start
elif not scrolled and timed_out:
break
# def scroll(driver):
# scroll_temp(driver)
# from selenium.common.exceptions import NoSuchElementException
# try:
# element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]/div/div[1]')
# except NoSuchElementException:
# try:
# element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]')
# except NoSuchElementException:
# element = driver.find_element_by_xpath('//*[@id="detail-bullets_feature_div"]')
# actions = ActionChains(driver)
# actions.move_to_element(element).perform()
def scroll(driver):
scroll_temp(driver)
from selenium.common.exceptions import NoSuchElementException
try:
try:
element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]/div/div[1]')
except NoSuchElementException:
try:
element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]')
except NoSuchElementException:
element = driver.find_element_by_xpath('//*[@id="detail-bullets_feature_div"]')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
except NoSuchElementException:
pass
def browser_link(product_link,country):
'''Returns all the web link of the products based on the first
page of the product category. It captures product link of all the pages for
that specific product.
PARAMETER
---------
link: str
The initial web link of the product page. This is generally the
first page of the all the items for that specfic product
RETURN
------
links: list
It is a list of strings which contains all the links of the items
for the specific product
'''
driver = browser(product_link)
soup = BeautifulSoup(driver.page_source, 'lxml')
try:
pages_soup = soup.findAll("ul",{"class":"a-pagination"})
pages = int(pages_soup[0].findAll("li",{'class':'a-disabled'})[1].text)
except:
pass
try:
pages_soup = soup.findAll("div",{"id":"pagn"})
pages = int(pages_soup[0].findAll("span",{'class':'pagnDisabled'})[0].text)
except:
try:
pages_soup = soup.findAll("div",{"id":"pagn"})
pages = int(pages_soup[0].findAll("span",{'class':'pagnDisabled'})[1].text)
except:
pass
print(pages)
links = []
for page in range(1,pages+1):
print(page)
link_page = product_link + '&page=' + str(page)
driver_temp = browser(link_page)
time.sleep(2)
soup_temp = BeautifulSoup(driver_temp.page_source, 'lxml')
try:
search = soup_temp.findAll("div",{"id":"mainResults"})
temp_search = search[1].findAll("a",{'class':'a-link-normal s-access-detail-page s-color-twister-title-link a-text-normal'})
for i in range(len(temp_search)):
if country == 'Australia':
link = temp_search[i].get('href')
else:
link = countries_link[country] + temp_search[i].get('href')
links.append(link)
print(len(links))
except:
try:
search = soup_temp.findAll("div",{"class":"s-result-list s-search-results sg-row"})
temp_search = search[1].findAll("h2")
if len(temp_search) < 2:
for i in range(len(search[0].findAll("h2"))):
temp = search[0].findAll("h2")[i]
for j in range(len(temp.findAll('a'))):
link = countries_link[country]+temp.findAll('a')[j].get('href')
links.append(link)
print(len(links))
else:
for i in range(len(search[1].findAll("h2"))):
temp = search[1].findAll("h2")[i]
for j in range(len(temp.findAll('a'))):
link = countries_link[country]+temp.findAll('a')[j].get('href')
links.append(link)
print(len(links))
except:
pass
try:
search = soup_temp.findAll("div",{"id":"mainResults"})
temp_search = search[0].findAll("a",{'class':'a-link-normal s-access-detail-page s-color-twister-title-link a-text-normal'})
for i in range(len(temp_search)):
if country == 'Australia':
link = temp_search[i].get('href')
else:
link = countries_link[country] + temp_search[i].get('href')
links.append(link)
print(len(links))
except:
try:
search = soup_temp.findAll("div",{"class":"s-result-list s-search-results sg-row"})
temp_search = search[1].findAll("h2")
if len(temp_search) < 2:
for i in range(len(search[0].findAll("h2"))):
temp = search[0].findAll("h2")[i]
for j in range(len(temp.findAll('a'))):
link = countries_link[country]+temp.findAll('a')[j].get('href')
links.append(link)
print(len(links))
else:
for i in range(len(search[1].findAll("h2"))):
temp = search[1].findAll("h2")[i]
for j in range(len(temp.findAll('a'))):
link = countries_link[country]+temp.findAll('a')[j].get('href')
links.append(link)
print(len(links))
except:
print('Not Scrapable')
return links
def indexes(amazon_links,link_list):
amazon_dict = amazon_links
if len(link_list) == 5:
return amazon_dict[link_list[0]][link_list[1]][link_list[2]][link_list[3]][link_list[4]]
elif len(link_list) == 4:
return amazon_dict[link_list[0]][link_list[1]][link_list[2]][link_list[3]]
elif len(link_list) == 3:
return amazon_dict[link_list[0]][link_list[1]][link_list[2]]
elif len(link_list) == 2:
return amazon_dict[link_list[0]][link_list[1]]
elif len(link_list) == 1:
return amazon_dict[link_list[0]]
else:
return print("Invalid Product")
def products_links(country, **kwargs):
amazon_links = amazon[country]
directory_temp = []
for key, value in kwargs.items():
directory_temp.append(value)
directory = '/'.join(directory_temp)
print(directory)
product_link = indexes(amazon_links,directory_temp)
main_links = browser_link(product_link,country=country)
return main_links,directory
###Output
_____no_output_____
###Markdown
Product Scraper Function
###Code
def delete_images(filename):
import os
file_path = '/home/jishu/Amazon_AU/'
os.remove(file_path + filename)
def upload_s3(filename,key):
key_id = 'AKIAWR6YW7N5ZKW35OJI'
access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm'
bucket_name = 'amazon-data-ecfullfill'
s3 = boto3.client('s3',aws_access_key_id=key_id,
aws_secret_access_key=access_key)
try:
s3.upload_file(filename,bucket_name,key)
except FileNotFoundError:
pass
def product_info(link,directory,country):
'''Get all the product information of an Amazon Product'''
#Opening Selenium Webdrive with Amazon product
driver = browser(link)
time.sleep(4)
scroll(driver)
time.sleep(2)
#Initializing BeautifulSoup operation in selenium browser
selenium_soup = BeautifulSoup(driver.page_source, 'lxml')
time.sleep(2)
#Product Title
try:
product_title = driver.find_element_by_xpath('//*[@id="productTitle"]').text
except:
product_title = 'Not Scrapable'
print(product_title)
#Ratings - Star
try:
rating_star = float(selenium_soup.findAll('span',{'class':'a-icon-alt'})[0].text.split()[0])
except:
rating_star = 'Not Scrapable'
print(rating_star)
#Rating - Overall
try:
overall_rating = int(selenium_soup.findAll('span',{'id':'acrCustomerReviewText'})[0].text.split()[0].replace(',',''))
except:
overall_rating = 'Not Scrapable'
print(overall_rating)
#Company
try:
company = selenium_soup.findAll('a',{'id':'bylineInfo'})[0].text
except:
company = 'Not Scrapable'
print(country)
#Price
try:
denomination = '$'
if country=='UAE':
denomination = selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[:3]
price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:])
else:
denomination = selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[0]
price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[1:])
except:
try:
if country=='UAE':
try:
price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:].replace(',',''))
except:
price = float(selenium_soup.findAll('span',{'id':'priceblock_dealprice'})[0].text[3:].replace(',',''))
else:
try:
price = float(selenium_soup.findAll('span',{'id':'priceblock_ourprice'})[0].text[3:].replace(',',''))
except:
price = float(selenium_soup.findAll('span',{'id':'priceblock_dealprice'})[0].text[3:].replace(',',''))
except:
denomination = 'Not Scrapable'
price = 'Not Scrapable'
print(denomination,price)
#Product Highlights
try:
temp_ph = selenium_soup.findAll('ul',{'class':'a-unordered-list a-vertical a-spacing-none'})[0].findAll('li')
counter_ph = len(temp_ph)
product_highlights = []
for i in range(counter_ph):
raw = temp_ph[i].text
clean = raw.strip()
product_highlights.append(clean)
product_highlights = '<CPT14>'.join(product_highlights)
except:
try:
temp_ph = selenium_soup.findAll('div',{'id':'rich-product-description'})[0].findAll('p')
counter_ph = len(temp_ph)
product_highlights = []
for i in range(counter_ph):
raw = temp_ph[i].text
clean = raw.strip()
product_highlights.append(clean)
product_highlights = '<CPT14>'.join(product_highlights)
except:
product_highlights = 'Not Available'
print(product_highlights)
#Product Details/Dimensions:
#USA
try:
temp_pd = selenium_soup.findAll('div',{'class':'content'})[0].findAll('ul')[0].findAll('li')
counter_pd = len(temp_pd)
for i in range(counter_pd):
try:
if re.findall('ASIN',temp_pd[i].text)[0]:
try:
asin = temp_pd[i].text.split(' ')[1]
except:
pass
except IndexError:
pass
try:
if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]:
pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(';')
try:
product_length = float(pd_temp[0].split('x')[0])
except IndexError:
pass
try:
product_width = float(pd_temp[0].split('x')[1])
except IndexError:
pass
try:
product_height = float(pd_temp[0].split('x')[2].split(' ')[1])
except IndexError:
pass
try:
pd_unit = pd_temp[0].split('x')[2].split(' ')[2]
except IndexError:
pass
try:
product_weight = float(pd_temp[1].split(' ')[1])
except IndexError:
pass
try:
weight_unit = pd_temp[1].split(' ')[2]
except IndexError:
pass
except:
pass
try:
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]:
sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
except IndexError:
pass
try:
if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]:
x = temp_pd[i].text.replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
try:
best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',',''))
best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0])
except:
try:
best_seller_cat = x[indexes[0]].split('#')[1]
except:
pass
try:
best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0]
except:
pass
except IndexError:
pass
print(asin)
except:
pass
try:
temp_pd = selenium_soup.findAll('div',{'class':'content'})[1].findAll('ul')[0].findAll('li')
counter_pd = len(temp_pd)
for i in range(counter_pd):
try:
if re.findall('ASIN',temp_pd[i].text)[0]:
try:
asin = temp_pd[i].text.split(' ')[1]
except:
pass
except IndexError:
pass
try:
if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]:
pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(';')
try:
product_length = float(pd_temp[0].split('x')[0])
except IndexError:
pass
try:
product_width = float(pd_temp[0].split('x')[1])
except IndexError:
pass
try:
product_height = float(pd_temp[0].split('x')[2].split(' ')[1])
except IndexError:
pass
try:
pd_unit = pd_temp[0].split('x')[2].split(' ')[2]
except IndexError:
pass
try:
product_weight = float(pd_temp[1].split(' ')[1])
except IndexError:
pass
try:
weight_unit = pd_temp[1].split(' ')[2]
except IndexError:
pass
except:
pass
try:
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]:
sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
except IndexError:
pass
try:
if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]:
x = temp_pd[i].text.replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
try:
best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',',''))
best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0])
except:
try:
best_seller_cat = x[indexes[0]].split('#')[1]
except:
pass
try:
best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0]
except:
pass
except IndexError:
pass
print(asin)
except:
pass
#India
try:
temp_pd = selenium_soup.findAll('div',{'class':'content'})[0].findAll('ul')[0].findAll('li')
counter_pd = len(temp_pd)
for i in range(counter_pd):
try:
if re.findall('ASIN',temp_pd[i].text)[0]:
asin = temp_pd[i].text.split(' ')[1]
except:
pass
try:
if re.findall('Product Dimensions|Product Dimension|Product dimensions',temp_pd[i].text)[0]:
pd_temp = temp_pd[i].text.strip().split('\n')[2].strip().split(' ')
try:
product_length = float(pd_temp[0])
except:
pass
try:
product_width = float(pd_temp[2])
except:
pass
try:
product_height = float(pd_temp[4])
except:
pass
try:
pd_unit = pd_temp[5]
except:
pass
try:
product_weight = float(pd_temp[1].split(' ')[1])
except:
pass
try:
weight_unit = pd_temp[1].split(' ')[2]
except:
pass
print(asin)
except IndexError:
pass
try:
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text)[0]:
sweight_temp = temp_pd[i].text.split(':')[1].strip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
except IndexError:
pass
try:
if re.findall('Item Weight|Product Weight|Item weight|Product weight|Boxed-product Weight',temp_pd[i].text)[0]:
pd_weight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].strip()
product_weight = float(pd_weight_temp.split(' ')[0])
weight_unit = pd_weight_temp.split(' ')[1]
except IndexError:
pass
try:
if re.findall('Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text)[0]:
x = temp_pd[i].text.strip().replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
try:
best_seller_cat = int(temp_pd[i].text.strip().replace('\n','').split(' ')[3].replace(',',''))
best_seller_prod = int(x[indexes[0]].split('#')[1].split('in')[0])
except:
try:
best_seller_cat = x[indexes[0]].split('#')[1]
except:
pass
try:
best_seller_prod = x[indexes[1]].split('#')[1].split('in')[0]
except:
pass
except IndexError:
pass
print(asin)
except:
pass
try:
try:
asin = list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[0].findAll('td')[1])[0]
except:
pass
try:
dimensions = list(selenium_soup.findAll('div',{'class':'pdTab'})[0].findAll('tr')[0].findAll('td')[1])[0]
except:
pass
try:
weight_temp = list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[1].findAll('td')[1])[0]
except:
pass
try:
best_seller_cat = float(list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[5].findAll('td')[1])[0].split('\n')[-1].split(' ')[0].replace(',',''))
except:
pass
try:
best_seller_prod = int(list(list(list(list(selenium_soup.findAll('div',{'class':'pdTab'})[1].findAll('tr')[5].findAll('td')[1])[5])[1])[1])[0].replace('#',''))
except:
pass
try:
product_length = float(dimensions.split('x')[0])
except:
pass
try:
product_width = float(dimensions.split('x')[1])
except:
pass
try:
product_height = float(dimensions.split('x')[2].split(' ')[1])
except:
pass
try:
product_weight = weight_temp.split(' ')[0]
except:
pass
try:
weight_unit = weight_temp.split(' ')[1]
except:
pass
try:
pd_unit = dimensions.split(' ')[-1]
except:
pass
print(asin)
except:
try:
for j in [0,1]:
temp_pd = selenium_soup.findAll('table',{'class':'a-keyvalue prodDetTable'})[j].findAll('tr')
for i in range(len(temp_pd)):
if re.findall('ASIN',temp_pd[i].text):
asin = temp_pd[i].text.strip().split('\n')[3].strip()
if re.findall('Item Model Number|Item model number',temp_pd[i].text):
bait = temp_pd[i].text.strip().split('\n')[3].strip()
if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text):
x = temp_pd[i].text.strip().replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
best_seller_cat = int(x[indexes[0]].split('#')[1])
best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0])
if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text):
dimensions = temp_pd[i].text.strip().split('\n')[3].strip().split('x')
product_length = float(dimensions[0].strip())
product_width = float(dimensions[1].strip())
product_height = float(dimensions[2].strip().split(' ')[0])
pd_unit = dimensions[2].strip().split(' ')[1]
if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text):
weight_temp = temp_pd[i].text.strip().split('\n')[3].strip()
product_weight = float(weight_temp.split(' ')[0])
weight_unit = weight_temp.split(' ')[1]
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text):
sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
print(asin,bait)
except:
try:
temp_pd = selenium_soup.findAll('div',{'id':'prodDetails'})[0].findAll('tr')
for i in range(len(temp_pd)):
if re.findall('ASIN',temp_pd[i].text):
asin = temp_pd[i].text.strip().split('\n')[3].strip()
if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text):
x = temp_pd[i].text.strip().replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
best_seller_cat = int(x[indexes[0]].split('#')[1])
best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0])
if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text):
dimensions = temp_pd[i].text.strip().split('\n')[3].strip().split('x')
product_length = float(dimensions[0].strip())
product_width = float(dimensions[1].strip())
product_height = float(dimensions[2].strip().split(' ')[0])
pd_unit = dimensions[2].strip().split(' ')[1]
if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text):
weight_temp = temp_pd[i].text.strip().split('\n')[3].strip()
product_weight = float(weight_temp.split(' ')[0])
weight_unit = weight_temp.split(' ')[1]
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text):
sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
except:
try:
temp_pd = selenium_soup.findAll('div',{'id':'detail_bullets_id'})[0].findAll('tr')[0].findAll('li')
for i in range(len(temp_pd)):
if re.findall('ASIN',temp_pd[i].text):
asin = temp_pd[i].text.strip().split(':')[1].strip()
if re.findall('Best Sellers Rank|Amazon Best Sellers Rank|Amazon Bestsellers Rank',temp_pd[i].text):
x = temp_pd[i].text.strip().replace('\n','').split(' ')
indexes = []
for j,k in enumerate(x):
if re.findall('#',k):
indexes.append(j)
best_seller_cat = int(x[indexes[0]].split('#')[1])
best_seller_prod = int(x[indexes[1]].split('#')[1].split('in')[0])
if re.findall('Product Dimensions|Product dimension|Product Dimension',temp_pd[i].text):
dimensions = temp_pd[i].text.strip().split('\n')[2].strip().split('x')
product_length = float(dimensions[0].strip())
product_width = float(dimensions[1].strip())
product_height = float(dimensions[2].strip().split(' ')[0])
pd_unit = dimensions[2].strip().split(' ')[1]
if re.findall('Item Weight|Product Weight|Item weight|Boxed-product Weight',temp_pd[i].text):
weight_temp = temp_pd[i].text.strip().split('\n')[2].strip()
product_weight = float(weight_temp.split(' ')[0])
weight_unit = weight_temp.split(' ')[1]
if re.findall('Shipping Weight|Shipping weight|shipping weight',temp_pd[i].text):
sweight_temp = temp_pd[i].text.replace('\n','').strip().split(' ')[1].lstrip().split(' ')
shipping_weight = float(sweight_temp[0])
shipping_weight_unit = sweight_temp[1]
except:
pass
try:
print(asin)
except NameError:
asin = 'Not Scrapable'
try:
print(best_seller_cat)
except NameError:
best_seller_cat = 'Not Scrapable'
try:
print(best_seller_prod)
except NameError:
best_seller_prod = 'Not Scrapable'
try:
print(product_length)
except NameError:
product_length = 'Not Scrapable'
try:
print(product_width)
except NameError:
product_width = 'Not Scrapable'
try:
print(product_height)
except NameError:
product_height = 'Not Scrapable'
try:
print(product_weight)
except NameError:
product_weight = 'Not Scrapable'
try:
print(weight_unit)
except NameError:
weight_unit = 'Not Scrapable'
try:
print(pd_unit)
except NameError:
pd_unit = 'Not Scrapable'
try:
print(shipping_weight_unit)
except NameError:
shipping_weight_unit = 'Not Scrapable'
try:
print(shipping_weight)
except NameError:
shipping_weight = 'Not Scrapable'
print(product_length,product_width,product_height,product_weight,asin,pd_unit,
best_seller_cat,best_seller_prod,weight_unit,shipping_weight,shipping_weight_unit)
#Customer Review Ratings - Overall
time.sleep(0.5)
try:
temp_crr = selenium_soup.findAll('table',{'id':'histogramTable'})[1].findAll('a')
crr_main = {}
crr_temp = []
counter_crr = len(temp_crr)
for i in range(counter_crr):
crr_temp.append(temp_crr[i]['title'])
crr_temp = list(set(crr_temp))
for j in range(len(crr_temp)):
crr_temp[j] = crr_temp[j].split(' ')
stopwords = ['stars','represent','of','rating','reviews','have']
for word in list(crr_temp[j]):
if word in stopwords:
crr_temp[j].remove(word)
print(crr_temp[j])
try:
if re.findall(r'%',crr_temp[j][1])[0]:
crr_main.update({int(crr_temp[j][0]): int(crr_temp[j][1].replace('%',''))})
except:
crr_main.update({int(crr_temp[j][1]): int(crr_temp[j][0].replace('%',''))})
except:
try:
temp_crr = selenium_soup.findAll('table',{'id':'histogramTable'})[1].findAll('span',{'class':'a-offscreen'})
crr_main = {}
counter_crr = len(temp_crr)
star = counter_crr
for i in range(counter_crr):
crr_main.update({star:int(temp_crr[i].text.strip().split('/n')[0].split(' ')[0].replace('%',''))})
star -= 1
except:
pass
try:
crr_5 = crr_main[5]
except:
crr_5 = 0
try:
crr_4 = crr_main[4]
except:
crr_4 = 0
try:
crr_3 = crr_main[3]
except:
crr_3 = 0
try:
crr_2 = crr_main[2]
except:
crr_2 = 0
try:
crr_1 = crr_main[1]
except:
crr_1 = 0
#Customer Review Ratings - By Feature
time.sleep(1)
try:
driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]/div[4]/a/span').click()
temp_fr = driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]').text
temp_fr = temp_fr.split('\n')
crr_feature_title = []
crr_feature_rating = []
for i in [0,2,4]:
crr_feature_title.append(temp_fr[i])
for j in [1,3,5]:
crr_feature_rating.append(temp_fr[j])
crr_feature = dict(zip(crr_feature_title,crr_feature_rating))
except:
try:
temp_fr = driver.find_element_by_xpath('//*[@id="cr-summarization-attributes-list"]').text
temp_fr = temp_fr.split('\n')
crr_feature_title = []
crr_feature_rating = []
for i in [0,2,4]:
crr_feature_title.append(temp_fr[i])
for j in [1,3,5]:
crr_feature_rating.append(temp_fr[j])
crr_feature = dict(zip(crr_feature_title,crr_feature_rating))
except:
crr_feature = 'Not Defined'
try:
crr_feature_key = list(crr_feature.keys())
except:
pass
try:
crr_fr_1 = crr_feature[crr_feature_key[0]]
except:
crr_fr_1 = 0
try:
crr_fr_2 = crr_feature[crr_feature_key[1]]
except:
crr_fr_2 = 0
try:
crr_fr_3 = crr_feature[crr_feature_key[2]]
except:
crr_fr_3 = 0
#Tags:
time.sleep(1)
try:
temp_tags = selenium_soup.findAll('div',{'class':'cr-lighthouse-terms'})[0]
counter_tags = len(temp_tags)
print('Counter Tags:',counter_tags)
tags = []
for i in range(counter_tags):
tags.append(temp_tags.findAll('span')[i].text.strip())
print(tags[i])
except:
tags = ['None']
try:
for feature in crr_feature_key:
tags.append(feature)
except:
pass
tags = list(set(tags))
tags = '<CPT14>'.join(tags)
print(tags)
#Images
images = []
for i in [0,3,4,5,6,7,8,9]:
try:
images.append(selenium_soup.findAll('div',{'class':'imgTagWrapper'})[i].find('img')['src'])
except:
pass
import urllib.request
for i in range(len(images)):
if asin =='Not Scrapable':
product_image = "{}_{}.jpg".format(product_title,i)
product_image = product_image.replace('/','')
urllib.request.urlretrieve(images[i],product_image)
upload_s3("{}_{}.jpg".format(product_title,i),
directory+"/images/" + product_image)
delete_images(product_image)
else:
product_image = "{}_{}.jpg".format(asin,i)
product_image = product_image.replace('/','')
urllib.request.urlretrieve(images[i],product_image)
upload_s3("{}_{}.jpg".format(asin,i),
directory+"/images/" + product_image)
delete_images(product_image)
return [product_title,rating_star,overall_rating,company,price,
product_highlights,product_length,product_width,product_height,
product_weight,asin,pd_unit,best_seller_cat,best_seller_prod,
weight_unit,shipping_weight,shipping_weight_unit,crr_5,crr_4,
crr_3,crr_2,crr_1,crr_fr_1,crr_fr_2,crr_fr_3,tags,directory]
###Output
_____no_output_____
###Markdown
Data Wrangling
###Code
def database(product_data,**kwargs):
try:
try:
link = kwargs['link']
except KeyError:
print('Error in Link')
try:
country = kwargs['country']
except KeyError:
print("Enter Country Name")
try:
cat1 = kwargs['cat1']
except KeyError:
pass
try:
cat2 = kwargs['cat2']
except KeyError:
pass
try:
cat3 = kwargs['cat3']
except KeyError:
pass
try:
cat4 = kwargs['cat4']
except KeyError:
pass
try:
product = kwargs['product']
except KeyError:
print("Enter Product Name")
metadata = [link,country,cat1,cat2,cat3,cat4,product]
except NameError:
try:
cat4 = None
metadata = [link,country,cat1,cat2,cat3,cat4,product]
except NameError:
try:
cat4 = None
cat3 = None
metadata = [link,country,cat1,cat2,cat3,cat4,product]
except NameError:
cat4 = None
cat3 = None
cat2 = None
metadata = [link,country,cat1,cat2,cat3,cat4,product]
conn = sqlite3.connect('{}.db'.format(product))
headers = ['link','country','cat1','cat2','cat3','cat4','product','product_title',
'rating_star','overall_rating','company','price',
'product_highlights','product_length','product_width','product_height',
'product_weight','asin','pd_unit','best_seller_cat','best_seller_prod',
'weight_unit','shipping_weight','shipping_weight_unit','crr_5','crr_4',
'crr_3','crr_2','crr_1','crr_fr_1','crr_fr_2','crr_fr_3','tags','images_link']
product_data.append(metadata)
product_data = product_data[-1] + product_data[:len(product_data)-1]
temp = pd.DataFrame(data= [product_data],columns=headers)
temp.to_sql('Product',conn,if_exists='append')
upload_s3(product+'.db',directory+'/'+product+'.db')
conn.close()
def checkpoint(link_list,directory,product):
BUCKET_NAME = 'amazon-data-ecfullfill'
key_id = 'AKIAWR6YW7N5ZKW35OJI'
access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm'
KEY = '{}/{}.db'.format(directory,product)
s3 = boto3.resource('s3',aws_access_key_id=key_id,
aws_secret_access_key=access_key)
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'test.db')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
conn = sqlite3.connect('test.db')
try:
df = pd.read_sql('''SELECT * FROM Product''', conn)
product_link = df['link'].unique()
new_list = []
for i in link_list:
if i in product_link:
pass
else:
new_list.append(i)
except:
new_list = link_list
return new_list
###Output
_____no_output_____
###Markdown
Execution
###Code
#Initializing the product per Jupyter Notebook
country = 'Australia'
cat1 = 'food'
# cat2='tea'
# cat3='None'
# cat4 = 'None'
product='coffee'
# links,directory = products_links(country=country,category=cat1,product=product)
# test_1 = {'links':links,'directory':directory}
# import pickle
# with open('au_food_coffee.pkl', 'wb') as f:
# pickle.dump(test_1, f)
with open('au_food_coffee.pkl', 'rb') as f:
file = pickle.load(f)
links = file['links']
directory = 'Amazon_AU/food/coffee'
#replace links with new_links if interruption
for link in new_links:
data = product_info(link=link,directory=directory,country=country)
conn = sqlite3.connect('{}.db'.format(product))
database(product_data=data,link=link,country=country,
cat1=cat1,product=product)
# Run if there is an interruption
new_links = checkpoint(links,directory,product)
len(new_links)
len(links)
###Output
_____no_output_____
###Markdown
Testing the datasets in S3
###Code
BUCKET_NAME = 'amazon-data-ecfullfill' # replace with your bucket name
key_id = 'AKIAWR6YW7N5ZKW35OJI'
access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm'
KEY = 'Amazon_USA/health_and_beauty/hair_products/shampoo/shampoo.db' # replace with your object key
s3 = boto3.resource('s3',aws_access_key_id=key_id,
aws_secret_access_key=access_key)
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'test.db')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
conn = sqlite3.connect('shampoo.db')
df_USA = pd.read_sql("SELECT * FROM Product",conn)
df_USA.iloc[:,:15]
df_USA.iloc[:,15:]
len(link_db)
# def upload_s3(filename,key):
# key_id = 'AKIAWR6YW7N5ZKW35OJI'
# access_key = 'h/xrcI9A2SRU0ds+zts4EClKAqbzU+/iXdiDcgzm'
# bucket_name = 'amazon-data-ecfullfill'
# s3 = boto3.client('s3',aws_access_key_id=key_id,
# aws_secret_access_key=access_key)
# # s3.put_object(Bucket=bucket_name, Key='Amazon/health_and_beauty/hair_product/shampoo')
# s3.upload_file(filename,bucket_name,key)
###Output
_____no_output_____ |
notebooks/09_FPR_CNN_Training.ipynb | ###Markdown
Importing Modules
###Code
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Dropout, Input, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from IPython.display import clear_output
###Output
_____no_output_____
###Markdown
Mounting Google Drive to access Training Data
###Code
from google.colab import drive
drive.mount("drive")
###Output
Mounted at drive
###Markdown
Unzip Data
###Code
!unzip drive/MyDrive/Datasets/lc/FPR/FPRDataset.zip
clear_output()
print("Train Nodule:",len(os.listdir("FPRDataset/train/nodule")))
print("Train Non-Nodule:",len(os.listdir("FPRDataset/train/non-nodule")))
print("Test Nodule:",len(os.listdir("FPRDataset/test/nodule")))
print("Test Non-Nodule:",len(os.listdir("FPRDataset/test/non-nodule")))
###Output
Train Nodule: 5126
Train Non-Nodule: 7500
Test Nodule: 1709
Test Non-Nodule: 2500
###Markdown
Creating train & test data generators
###Code
BATCH_SIZE = 96
generator = ImageDataGenerator(rescale=1./255)
trainData = generator.flow_from_directory(
"FPRDataset/train",
target_size=(50,50),
batch_size=BATCH_SIZE,
color_mode='grayscale',
class_mode='binary'
)
testData = generator.flow_from_directory(
"FPRDataset/test",
target_size=(50,50),
batch_size=BATCH_SIZE,
color_mode='grayscale',
class_mode='binary'
)
print(trainData.class_indices)
print(testData.class_indices)
###Output
{'nodule': 0, 'non-nodule': 1}
{'nodule': 0, 'non-nodule': 1}
###Markdown
Callback function for training
###Code
weight_path="checkpoint-{epoch:03d}-{val_loss:.3f}.hdf5"
modelcheckpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, mode='min')
###Output
_____no_output_____
###Markdown
Defining & creating CNN model
###Code
def get_model():
input = Input(shape=(50,50,1))
x = Conv2D(50, (3,3), activation='relu')(input)
x = MaxPool2D((2,2))(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPool2D((2,2))(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=[input], outputs=[x])
return model
model = get_model()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 50, 50, 1)] 0
conv2d (Conv2D) (None, 48, 48, 50) 500
max_pooling2d (MaxPooling2D (None, 24, 24, 50) 0
)
conv2d_1 (Conv2D) (None, 22, 22, 64) 28864
conv2d_2 (Conv2D) (None, 20, 20, 64) 36928
max_pooling2d_1 (MaxPooling (None, 10, 10, 64) 0
2D)
flatten (Flatten) (None, 6400) 0
dense (Dense) (None, 512) 3277312
dropout (Dropout) (None, 512) 0
dense_1 (Dense) (None, 1) 513
=================================================================
Total params: 3,344,117
Trainable params: 3,344,117
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train model
###Code
with tf.device("/device:GPU:0"):
history = model.fit_generator(
trainData,
epochs=20,
validation_data=testData,
verbose=1,
callbacks=[modelcheckpoint]
)
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:7: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
import sys
###Markdown
Plot the history of training
###Code
plt.figure(figsize=(20,6))
for i, met in enumerate(['accuracy', 'loss']):
plt.subplot(1,2,i+1)
plt.plot(history.history[met], color="b")
plt.plot(history.history["val_"+met], color="g")
plt.title('Model '+met.capitalize())
plt.xlabel('epochs')
plt.ylabel(met)
plt.legend(['train', 'val'])
###Output
_____no_output_____
###Markdown
Save model
###Code
model.save("drive/MyDrive/Datasets/lc/FPR/Training_Logs/2nd_Trial/model.h5")
###Output
_____no_output_____
###Markdown
Load checkpoint model
###Code
model2 = get_model()
model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model2.load_weights("checkpoint-008-0.257.hdf5")
model2.save("checkpoint_model.h5")
###Output
_____no_output_____
###Markdown
Copy saved model to google drive for future use
###Code
!cp checkpoint_model.h5 drive/MyDrive/Datasets/lc/FPR/Training_Logs/2nd_Trial/best_checkpoint_model.h5
###Output
_____no_output_____ |
06.3.astar-8-puzzle.ipynb | ###Markdown
Solving 8-puzzle using A* Search Algorithmhttps://gist.github.com/flatline/838202
###Code
import random
import math
_goal_state = [[1,2,3],
[4,5,6],
[7,8,0]]
def index(item, seq):
"""Helper function that returns -1 for non-found index value of a seq"""
if item in seq:
return seq.index(item)
else:
return -1
class EightPuzzle:
def __init__(self):
# heuristic value
self._hval = 0
# search depth of current instance
self._depth = 0
# parent node in search path
self._parent = None
self.adj_matrix = []
for i in range(3):
self.adj_matrix.append(_goal_state[i][:])
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
else:
return self.adj_matrix == other.adj_matrix
def __str__(self):
res = ''
for row in range(3):
res += ' '.join(map(str, self.adj_matrix[row]))
res += '\r\n'
return res
def _clone(self):
p = EightPuzzle()
for i in range(3):
p.adj_matrix[i] = self.adj_matrix[i][:]
return p
def _get_legal_moves(self):
"""Returns list of tuples with which the free space may
be swapped"""
# get row and column of the empty piece
row, col = self.find(0)
free = []
# find which pieces can move there
if row > 0:
free.append((row - 1, col))
if col > 0:
free.append((row, col - 1))
if row < 2:
free.append((row + 1, col))
if col < 2:
free.append((row, col + 1))
return free
def _generate_moves(self):
free = self._get_legal_moves()
zero = self.find(0)
def swap_and_clone(a, b):
p = self._clone()
p.swap(a,b)
p._depth = self._depth + 1
p._parent = self
return p
return map(lambda pair: swap_and_clone(zero, pair), free)
def _generate_solution_path(self, path):
if self._parent == None:
return path
else:
path.append(self)
return self._parent._generate_solution_path(path)
def set_board(self, start_str):
self.adj_matrix = []
id = 0
for row in range(3):
vector = []
for col in range(3):
ch = start_str[row * 3 + col]
vector.append(int(ch))
self.adj_matrix.append(vector)
def solve(self, h):
"""Performs A* search for goal state.
h(puzzle) - heuristic function, returns an integer
"""
def is_solved(puzzle):
return puzzle.adj_matrix == _goal_state
openl = [self]
closedl = []
move_count = 0
while len(openl) > 0:
x = openl.pop(0)
move_count += 1
if (is_solved(x)):
if len(closedl) > 0:
return x._generate_solution_path([]), move_count
else:
return [x]
succ = x._generate_moves()
idx_open = idx_closed = -1
for move in succ:
# have we already seen this node?
idx_open = index(move, openl)
idx_closed = index(move, closedl)
hval = h(move)
fval = hval + move._depth
if idx_closed == -1 and idx_open == -1:
move._hval = hval
openl.append(move)
elif idx_open > -1:
copy = openl[idx_open]
if fval < copy._hval + copy._depth:
# copy move's values over existing
copy._hval = hval
copy._parent = move._parent
copy._depth = move._depth
elif idx_closed > -1:
copy = closedl[idx_closed]
if fval < copy._hval + copy._depth:
move._hval = hval
closedl.remove(copy)
openl.append(move)
closedl.append(x)
openl = sorted(openl, key=lambda p: p._hval + p._depth)
# if finished state not found, return failure
return [], 0
def shuffle(self, step_count):
for i in range(step_count):
row, col = self.find(0)
free = self._get_legal_moves()
target = random.choice(free)
self.swap((row, col), target)
row, col = target
def find(self, value):
"""returns the row, col coordinates of the specified value
in the graph"""
if value < 0 or value > 8:
raise Exception("value out of range")
for row in range(3):
for col in range(3):
if self.adj_matrix[row][col] == value:
return row, col
def peek(self, row, col):
"""returns the value at the specified row and column"""
return self.adj_matrix[row][col]
def poke(self, row, col, value):
"""sets the value at the specified row and column"""
self.adj_matrix[row][col] = value
def swap(self, pos_a, pos_b):
"""swaps values at the specified coordinates"""
temp = self.peek(*pos_a)
self.poke(pos_a[0], pos_a[1], self.peek(*pos_b))
self.poke(pos_b[0], pos_b[1], temp)
def heur(puzzle, item_total_calc, total_calc):
"""
Heuristic template that provides the current and target position for each number and the
total function.
Parameters:
puzzle - the puzzle
item_total_calc - takes 4 parameters: current row, target row, current col, target col.
Returns int.
total_calc - takes 1 parameter, the sum of item_total_calc over all entries, and returns int.
This is the value of the heuristic function
"""
t = 0
for row in range(3):
for col in range(3):
val = puzzle.peek(row, col) - 1
target_col = val % 3
target_row = val / 3
# account for 0 as blank
if target_row < 0:
target_row = 2
t += item_total_calc(row, target_row, col, target_col)
return total_calc(t)
#some heuristic functions, the best being the standard manhattan distance in this case, as it comes
#closest to maximizing the estimated distance while still being admissible.
def h_manhattan(puzzle):
return heur(puzzle,
lambda r, tr, c, tc: abs(tr - r) + abs(tc - c),
lambda t : t)
def h_manhattan_lsq(puzzle):
return heur(puzzle,
lambda r, tr, c, tc: (abs(tr - r) + abs(tc - c))**2,
lambda t: math.sqrt(t))
def h_linear(puzzle):
return heur(puzzle,
lambda r, tr, c, tc: math.sqrt(math.sqrt((tr - r)**2 + (tc - c)**2)),
lambda t: t)
def h_linear_lsq(puzzle):
return heur(puzzle,
lambda r, tr, c, tc: (tr - r)**2 + (tc - c)**2,
lambda t: math.sqrt(t))
def h_default(puzzle):
return 0
def main(start_str):
p = EightPuzzle()
print(p)
if start_str is None:
p.shuffle(20)
else:
p.set_board(start_str)
print(p)
path, count = p.solve(h_manhattan)
path.reverse()
for i in path:
print(i)
print("Solved with Manhattan distance exploring", count, "states")
path, count = p.solve(h_manhattan_lsq)
print("Solved with Manhattan least squares exploring", count, "states")
path, count = p.solve(h_linear)
print("Solved with linear distance exploring", count, "states")
path, count = p.solve(h_linear_lsq)
print("Solved with linear least squares exploring", count, "states")
# path, count = p.solve(heur_default)
# print "Solved with BFS-equivalent in", count, "moves"
main("134702865")
###Output
_____no_output_____ |
classes/SQL1_subprocesses.ipynb | ###Markdown
Subprocesses One of the biggest strengths of Python is that it can be used as a *glue* language. It can 'glue' together a series of programs into a flexible and highly extensible pipline. Why subprocessesOne of the most common, yet complicated, tasks that most programming languages need to do is creating new processes. This could be as simple as seeing what files are present in the current working directory (`ls`) or as complicated as creating a program workflow that *pipes* output from one program into another program's input. Many such tasks are easily taken care of through the use of Python libraries and modules (`import`) that *wrap* the programs into Python code, effectively creating Application Programming Interfaces (API). However, there are many use cases that require the user to make calls to the terminal from ***within*** a Python program. Operating System Conundrum As many in this class have found out, while Python can be installed on most operating systems; doing the same thing in one operating system (Unix) may not always yield the same results in another (Windows).The very first step to making a program **"OS-agnostic"** is through the use of the `os` module.
###Code
import os
###Output
_____no_output_____
###Markdown
https://docs.python.org/3/library/os.html
###Code
#dir(os)
help(os.getcwd)
os.getcwd()
help(os.chdir)
# The name of the operating system dependent module imported.
# The following names have currently been registered: 'posix', 'nt', 'java'
# Portable Operating System Interface - IEEE standard designed to facilitate application portability
# (Windows) New Technology - a 32-bit operating system that supports preemptive multitasking
#
os.name
# A list of strings that specifies the search path for modules.
import sys
sys.path
# A mapping object that contains environment variables and their values.
os.environ
# A mapping object representing the string environment.
print(os.environ['HOME'])
#Return the value of the environment variable key if it exists,
#or default if it doesn’t. key, default and the result are str.
print(os.getenv("HOME"))
print(os.getenv("PATH"))
# Returns the list of directories that will be searched for a named executable,
#similar to a shell, when launching a process.
# env, when specified, should be an environment variable dictionary to lookup the PATH in.
# By default, when env is None, environ is used.
os.get_exec_path()
###Output
_____no_output_____
###Markdown
The `os` module wraps OS-specific operations into a set of standardized commands. For instance, the Linux end-of-line (EOL) character is a `\n`, but `\r\n` in Windows. In Python, we can just use the following:
###Code
# EOL - for the current (detected) environment
'''
The string used to separate (or, rather, terminate) lines on the current platform.
This may be a single character, such as '\n' for POSIX, or multiple characters,
for example, '\r\n' for Windows.
Do not use os.linesep as a line terminator when writing files opened in text mode (the default);
use a single '\n' instead, on all platforms.
'''
os.linesep
###Output
_____no_output_____
###Markdown
Another example, in a Linux environment, one must use the following command to list the contents of a given directory:```ls -alh ```In Windows, the equivalent is as follows:```dir```Python allows users to do a single command, in spite of the OS:
###Code
# List directory contents
os.listdir("ProjectCM")
###Output
_____no_output_____
###Markdown
However, the biggest issue for creating an OS-agnostic program is ***paths*** Windows: `"C:\\Users\\MDS\\Documents"`Linux: `/mnt/c/Users/MDS/Documents/`Enter Python:
###Code
# path joining from pwd
pwd = os.getcwd()
print(pwd)
print(os.path.dirname(pwd))
os.path.join(pwd,"ProjectCM","demoCM","test.py")
###Output
_____no_output_____
###Markdown
`subprocess` If you Google anything on how to run shell commands, but don't specify Python 3.x, you will likely get an answer that includes `popen`, `popen2`, or `popen3`. These were the most prolific ways to *open* a new *p*rocess. In Python 3.x, they encapsulated these functions into a new one called `run` available through the `subprocess` library.
###Code
# Import and alias
import subprocess as sp
###Output
_____no_output_____
###Markdown
`check_output`
###Code
help(sp.check_output)
# check_output returns a bytestring by default, so I set encoding to convert it to strings.
# [command, command line arguments]
# change from bytes to string using encoding
sp.check_output(["echo","test"],encoding='utf_8')
# demonstration, might not work if test.py does not have the parsing code
sp.check_output([os.path.join(pwd,"test.py"),"[1,2,3]"],encoding='utf_8')
###Output
_____no_output_____
###Markdown
The first thing we will look are trivial examples that demonstrate just capturing the *output* (stdout) of a program However, while the `check_output` function is still in the `subprocess` module, it can easily be converted into into a more specific and/or flexible `run` function signature. `run`
###Code
help(sp.run)
sub = sp.run(
[
'echo', # The command we want to run
'test' # Arguments for the command
],
encoding='utf_8', # Converting byte code
stdout=sp.PIPE, # Where to send the output
check=True # Whether to raise an error if the process fails
)
sub
[elem for elem in dir(sub) if not elem.startswith("__")]
print(sub.stdout)
###Output
_____no_output_____
###Markdown
The main utility of `check_output` was to capture the output (stdout) of a program. By using the `stdout=subprocess.PIPE` argument, the output can easily be captured, along with its return code. A return code signifies the program's exit status: 0 for success, anything else otherwise
###Code
sub.returncode
###Output
_____no_output_____
###Markdown
With our `run` code above, our program ran to completetion, exiting with status 0. The next example shows a different status.
###Code
sp.run(
'exit 1', # Command & arguments
shell = True # Run from the shell
)
###Output
_____no_output_____
###Markdown
However, if the `check=True` argument is used, it will raise a `CalledProcessError` if your program exits with anything different than 0. This is helpful for detecting a pipeline failure, and exiting or correcting before attempting to continue computation.
###Code
sp.run(
'exit 1', # Command & arguments
shell = True, # Run from the shell
check = True # Check exit status
)
sub = sp.run(
'exit 1', # Command & arguments
shell = True, # Run from the shell
# check = True # Check exit status
)
if (sub.returncode != 0):
print(f"Exit code {sub.returncode}. Expected 0 when there is no error.")
###Output
_____no_output_____
###Markdown
Syntax when using `run`:1. A list of arguments: `subprocess.run(['echo', 'test', ...], ...)` 2. A string and `shell`: `subprocess.run('exit 1', shell = True, ...)` The preferred way of using `run` is the first way. This preference is mainly due to security purposes (to prevent shell injection attacks). It also allows the module to take care of any required escaping and quoting of arguments for a pseudo-OS-agnostic approach. There are some guidelines though:1. Sequence (list) of arguments is generally preferred2. A str is appropriate if the user is just calling a program with no arguments3. The user should use a str to pass argument if `shell` is `True`Your next questions should be, "What is `shell`?" `shell` is just your terminal/command prompt. This is the environment where you call `ls/dir` in. It is also where users can define variables. More importantly, this is where your *environmental variables* are set...like `PATH`.By using `shell = True`, the user can now use shell-based environmental variable expansion from within a Python program.
###Code
sp.run(
'echo $PATH', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
) # Look at the output
p1 = sp.run(
'sleep 5; echo done1', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p1)
p2 = sp.run(
'echo done2', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p2)
###Output
_____no_output_____
###Markdown
For the most part, you shouldn't need to use `shell` simply because Python has modules in the standard library that can do most of the shell commands. For example `mkdir` can be done with `os.mkdir()`, and `$PATH` can be retrieved using os.getenv("PATH") or os.get_exec_path() as shown above. Blocking vs Non-blocking The last topic of this lecture is "blocking". This is computer science lingo/jargon for whether or not a program ***waits*** until something is complete before moving on. Think of this like a really bad website that takes forever to load because it is waiting until it has rendered all its images first, versus the website that sets the formatting and text while it works on the images. 1. `subprocess.run()` is blocking (it waits until the process is complete)2. `subprocess.Popen()` is non-blocking (it will run the command, then move on) ***Most*** use cases can be handled through the use of `run()`. `run()` is just a *wrapped* version of `Popen()` that simplifies use. However, `Popen()` allows the user a more flexible control of the subprocess call. `Popen()` can be used similar way as run (with more optional parameters). An example use case for `Popen()` is if the user has some intermediate data that needs to get processed, but the output of that data doesn't necessarily affect the rest of the pipeline. `Popen`
###Code
p1 = sp.Popen(
'sleep 5; echo done1', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p1)
p2 = sp.Popen(
'echo done2', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p2)
print("processes ran")
print(p1.stdout.read())
print(p2.stdout.read())
print("processes completed")
# Use context manager to handle process while it is running,
# and gracefully close it
with sp.Popen(
[
'echo', # Command
'here we are' # Command line arguments
],
encoding='utf_8', # Convert from byte to string
stdout=sp.PIPE # Where to send it
) as proc: # Enclose and alias the context manager
print(
proc.stdout.read() # Look at the output
)
for elem in dir(proc):
if not elem.startswith('_'):
print(elem)
###Output
_____no_output_____
###Markdown
***NOTE***: From here on out, there might be different commands used for **Linux** / **MacOS** or **Windows**
###Code
#test_pipe.txt - a file to be used to demonstrate pipe of cat and sort
!echo testing > test_pipe.txt
!echo the >> test_pipe.txt
!echo subprocess >> test_pipe.txt
!echo pipe >> test_pipe.txt
# mac OS
p1 = sp.Popen(['cat','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
# windows OS
# p1 = sp.Popen(['type','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
print(p1.stdout.read())
# mac OS
p1 = sp.Popen(['cat','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
# windows OS
# p1 = sp.Popen(['type','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
p2 = sp.Popen(['sort'], stdin=p1.stdout, stdout=sp.PIPE, encoding='utf_8')
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits
output = p2.communicate()[0]
print(output)
###Output
_____no_output_____
###Markdown
`Popen` can create background processes, shell-background-like behavior means not blocking. `Popen` has a lot more functionality than `run`.
###Code
sub_popen = sp.Popen(
[
'echo', # Command
'test', # Command line arguments
],
encoding='utf_8', # Convert from byte to string
stdout=sp.PIPE # Where to send it
)
for j in dir(sub_popen):
if not j.startswith('_'):
print(j)
# sub - returned by run
for j in dir(sub):
if not j.startswith('_'):
print(j)
sub_popen.kill() # Close the process
###Output
_____no_output_____
###Markdown
Example creating child process.https://pymotw.com/3/subprocess/A collection of `Popen` examples: https://www.programcreek.com/python/example/50/subprocess.Popen SQL What is a database? * Is an organized collection of data (files)* A way to store and retrieve that information* A relational database is structured to recognize relations between the data elementsE.g. NCBI Gene https://www.ncbi.nlm.nih.gov/gene/statistics https://www.researchgate.net/profile/Adam_Richards3/publication/282134102/figure/fig3/AS:289128232046602@1445944950296/Database-entity-diagram-Data-collected-from-NCBI-the-Gene-Ontology-and-UniProt-are.png More database examples: * The Python dictionary qualifies* A spreadsheet is a type of database – a table* A fasta file could be considered a database Why use databases?* Databases can handle very large data sets * Databases scale well* Databases are concurrent * Databases are fault-tolerant* Your data has a built-in structure to it* Information of a given type is typically stored only once* You can query the data in a database and easily create meaningful reports* You can relate data from different tables What is the Structured Query Language (SQL) ?* SQL is the standard language for relational database management systems (ANSI)* SQL is used to communicate with a database* SQL can be used to: add, remove, modify, request data * SQL is a declarative language - you describe what you want Relational Database Management Systems* Software programs such as Oracle, MySQL, SQLServer, DB2, postgreSQL are the backbone on which a specific database can be built * They are called RDBMS (relational database management systems)* They handle the data storage, indexing, logging, tracking and security * They have a very fine-grained way of granting permissions to users at the level of commands that may be used * Create a database * Create a table * Update or insert data * View certain tables ... and many more * An important part of learning databases is to understand the type of data which is stored in columns and rows. * Likewise when we get to the database design section, it is critically important to know what type of data you will be modeling and storing (and roughly how much, in traditional systems) * Exactly which types are available depends on the database system SQLite * SQLite is a software library that implements a self-contained, serverless, zero-configuration, embedded high-reliability, full-featured, public-domain SQL database engine. SQLite is the most widely deployed database engine in the world (https://sqlite.org/)* A SQLite database is a single file that is transportable* Check-out bioconductor (annotation) packages that come with sqlite databases * hgu133a.db * https://bioconductor.org/packages/release/data/annotation/html/hgu133a.db.html * org.Hs.eg.db - Genome wide annotation for Human, primarily based on mapping using Entrez Gene identifiers * https://bioconductor.org/packages/release/data/annotation/html/org.Hs.eg.db.html SQLite uses a greatly simplified set of data types:* INTEGER - numeric* REAL - numeric* TEXT – text of any length * Dates are held as text* BLOB – binary large objects * Such as images
###Code
from sqlite3 import connect
# the file org.Hs.eg.sqlite should be in the datasets folder
# if you pulled the info from the class github repo
# otherwise retrieve from the class github repo or canvas
conn = connect('../datasets/org.Hs.eg.sqlite')
curs = conn.cursor()
# close cursor and connection
curs.close()
conn.close()
conn = connect('org.Hs.eg.sqlite')
curs = conn.cursor()
###Output
_____no_output_____
###Markdown
There is a special sqlite_master table that describes the contents of the database Major SQL commands: SELECT, INSERT, DELETE, UPDATE SELECT - Retrieves data from one or more tables and doesn’t change the data at all * SELECT * (means all columns), or the comma separated names of the columns of data you wish to return * They will return (left to right) in the order received. * FROM is the table source or sources (comma separated)* WHERE (optional) is the predicate clause: conditions for the query * Evaluates to True or False for each row * This clause almost always includes Column-Value pairs. * Omitting the Where clause returns ALL the records in that table. * Note: the match is case sensitive* ORDER BY (optional) indicates a sort order for the output data * default is row_id, which can be very non-intuitive * ASCending or DESCending can be appended to change the sort order. (ASC is default)* In most SQL clients, the ";" indicates the end of a statement and requests execution SELECT - which columns to include in the result, use * for all columns FROM - which tables to use WHERE (optional) - predicate clause, which rows to include '*' selects ALL rows and ALL columns and returns them by column order and row_id
###Code
sql = '''SELECT * FROM sqlite_master;'''
curs.execute(sql)
###Output
_____no_output_____
###Markdown
See result header
###Code
curs.description
###Output
_____no_output_____
###Markdown
See result
###Code
for row in curs: print(row)
###Output
_____no_output_____
###Markdown
WHERE clause example
###Code
sql = '''
SELECT name
FROM sqlite_master
WHERE type= "table";
'''
curs.execute(sql)
for row in curs: print(row)
def get_header(cursor):
'''Makes a header row from the cursor description. Its tab
delimited.
Arguments:
cursor: a cursor after a select query
Returns:
string: A string consisting of the column names separated by tabs, no new line
'''
return '\t'.join([row[0] for row in cursor.description])
# colNames = []
# for row in cursor.description:
# colNames.append(row[0])
# return '\t'.join(colNames)
print(get_header(curs))
sql = '''
SELECT *
FROM go_bp LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
###Output
_____no_output_____
###Markdown
http://geneontology.org/docs/guide-go-evidence-codes/* Inferred from Experiment (EXP)* Inferred from Direct Assay (IDA)* Inferred from Physical Interaction (IPI)* Inferred from Mutant Phenotype (IMP)* Inferred from Genetic Interaction (IGI)* Inferred from Expression Pattern (IEP) Aliasing column names to make them easier to understand
###Code
sql = '''
SELECT * FROM gene_info LIMIT 5;
'''
curs.execute(sql)
for i in curs.description: print(i[0])
for row in curs: print(row)
sql = '''
SELECT _id 'Gene Identifier', symbol "Gene Symbol"
FROM gene_info LIMIT 5;
'''
curs.execute(sql)
curs.description
curs.fetchall()
sql = '''
SELECT _id 'ID', symbol "Symbol"
FROM gene_info LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
#select all from go_bp
###Output
_____no_output_____
###Markdown
http://geneontology.org/docs/guide-go-evidence-codes/* Inferred from Experiment (EXP)* Inferred from Direct Assay (IDA)* Inferred from Physical Interaction (IPI)* Inferred from Mutant Phenotype (IMP)* Inferred from Genetic Interaction (IGI)* Inferred from Expression Pattern (IEP)* Inferred from High Throughput Experiment (HTP)* Inferred from High Throughput Direct Assay (HDA)* Inferred from High Throughput Mutant Phenotype (HMP)* Inferred from High Throughput Genetic Interaction (HGI)* Inferred from High Throughput Expression Pattern (HEP)* Inferred from Biological aspect of Ancestor (IBA)* Inferred from Biological aspect of Descendant (IBD)* Inferred from Key Residues (IKR)* Inferred from Rapid Divergence (IRD)* Inferred from Sequence or structural Similarity (ISS)* Inferred from Sequence Orthology (ISO)* Inferred from Sequence Alignment (ISA)* Inferred from Sequence Model (ISM)* Inferred from Genomic Context (IGC)* Inferred from Reviewed Computational Analysis (RCA)* Traceable Author Statement (TAS)* Non-traceable Author Statement (NAS)* Inferred by Curator (IC)* No biological Data available (ND)* Inferred from Electronic Annotation (IEA) SELECT - which columns to include in the result FROM - which tables to use WHERE (optional) - predicate clause, which rows to include ORDER BY (optional) - indicates a sort order for the output data
###Code
sql = '''
SELECT _id, go_id
FROM go_bp
WHERE evidence="ND"
ORDER BY _id DESC
LIMIT 20;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
#curs.fetchall()
#for row in curs: print(row)
###Output
_____no_output_____
###Markdown
COUNT returns a single number, which is the count of all rows in the table
###Code
sql = '''
SELECT count(*) FROM genes;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT count(_id) AS 'Number of genes'
FROM genes;
'''
curs.execute(sql)
print(get_header(curs))
curs.fetchall()[0][0]
###Output
_____no_output_____
###Markdown
DISTINCT selects non-duplicated elements (rows)
###Code
sql = '''
SELECT _id FROM go_bp LIMIT 20;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT DISTINCT _id FROM go_bp LIMIT 10;
'''
curs.execute(sql)
curs.fetchall()
#count the number of rows on go_bp
sql = '''
SELECT DISTINCT _id FROM go_bp;
'''
curs.execute(sql)
result = curs.fetchall()
len(result)
###Output
_____no_output_____
###Markdown
WHERE clause operators https://www.sqlite.org/lang_expr.html , != inequality = equal '> greater than '>= greater than or equal BETWEEN v1 AND v2 tests that a value to lies in a given range EXISTS test for existence of rows matching query IN tests if a value falls within a given set or query IS [ NOT ] NULL is or is not null [ NOT ] LIKE tests value to see if like or not like another % is the wildcard in SQL, used in conjunction with LIKE
###Code
sql = '''
SELECT * FROM go_bp
WHERE _id = '1';
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT * FROM go_bp
WHERE _id IN (1,5,7);
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT * FROM go_bp
WHERE evidence = 'ND' AND _id BETWEEN 20 AND 2000
LIMIT 10
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT *
FROM go_bp
WHERE go_id LIKE '%0081%'
LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
# Retrieve rows from go_bp where the go_id is GO:0008104 and evidence is IEA or IDA
###Output
_____no_output_____
###Markdown
Sqlite3 also has some PRAGMA methods SQL extension specific to SQLite and used to modify the operation of the SQLite library or to query the SQLite library for internal (non-table) data https://www.sqlite.org/pragma.html The code below shows how to get the schema (columns and columns information)
###Code
sql = 'PRAGMA table_info("go_bp")'
curs.execute(sql)
curs.fetchall()
sql = '''SELECT * FROM pragma_table_info("go_bp") '''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT _id, symbol, gene_name
FROM gene_info
WHERE _id IN
(SELECT DISTINCT _id
FROM go_bp
WHERE go_id == 'GO:0008104');
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
###Output
_____no_output_____
###Markdown
GROUP BY groups by a column and creates summary data for a different column
###Code
sql = '''
SELECT go_id, count(*) FROM go_bp GROUP BY go_id LIMIT 10;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT go_id, count(_id) as gene_no FROM go_bp GROUP BY go_id LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
###Output
_____no_output_____
###Markdown
HAVING allows restrictions on the rows used or selected
###Code
sql = '''
SELECT go_id, count(_id) as gene_no FROM go_bp GROUP BY go_id
HAVING gene_no>500;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
# Select gene ids with more than 100 biological processes associated
###Output
_____no_output_____
###Markdown
See the create table statement
###Code
sql = '''
SELECT name,sql
FROM sqlite_master
WHERE type= "table" and name == "go_bp"
LIMIT 2;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
print(row[1])
curs.close()
conn.close()
###Output
_____no_output_____ |
model/Reff_estimator.ipynb | ###Markdown
EpiEstim (python) Configure the data
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-poster')
from datetime import datetime as dt
from datetime import timedelta
import glob
from Reff_functions import *
from Reff_constants import *
from scipy.stats import gamma
#Code taken from read_in_cases from Reff_functions. Preprocessing was not helpful for this situation.
def read_cases_lambda(case_file_date):
path = "../data/COVID-19 UoM "+case_file_date+"*.xlsx"
for file in glob.glob(path):
df_NNDSS = pd.read_excel(file,
parse_dates=['SPECIMEN_DATE','NOTIFICATION_DATE','NOTIFICATION_RECEIVE_DATE','TRUE_ONSET_DATE'],
dtype= {'PLACE_OF_ACQUISITION':str})
df_NNDSS.PLACE_OF_ACQUISITION.fillna('00038888',inplace=True) #Fill blanks with simply unknown
# df_NNDSS['date_inferred'] = df_NNDSS.TRUE_ONSET_DATE
# df_NNDSS.loc[df_NNDSS.TRUE_ONSET_DATE.isna(),'date_inferred'] = df_NNDSS.loc[df_NNDSS.TRUE_ONSET_DATE.isna()].NOTIFICATION_DATE - timedelta(days=5)
# df_NNDSS.loc[df_NNDSS.date_inferred.isna(),'date_inferred'] = df_NNDSS.loc[df_NNDSS.date_inferred.isna()].NOTIFICATION_RECEIVE_DATE - timedelta(days=6)
df_NNDSS['imported'] = df_NNDSS.PLACE_OF_ACQUISITION.apply(lambda x: 1 if x[-4:]=='8888' and x != '00038888' else 0)
df_NNDSS['local'] = 1 - df_NNDSS.imported
df_interim = df_NNDSS[['NOTIFICATION_RECEIVE_DATE','STATE','imported','local']]
#df_interim = df_interim[~np.isnat(df_interim.NOTIFICATION_DATE)] #Get rid of non-existent dates.
# Importantly, imported and local are indicator variables in df_interim.
#df_state = df_NNDSS[['NOTIFICATION_DATE','STATE','imported','local']].groupby(['STATE','NOTIFICATION_DATE']).sum()
return(df_interim)
def tidy_cases_lambda(interim_data, remove_territories=True):
#Remove non-existent notification dates
interim_data = interim_data[~np.isnat(interim_data.NOTIFICATION_RECEIVE_DATE)]
#Filter out territories
if(remove_territories):
df_linel = interim_data[(interim_data['STATE']!='NT') & (interim_data['STATE']!='ACT')]
#Melt down so that imported and local are no longer columns. Allows multiple draws for infection date.
#i.e. create linelist data
df_linel = df_linel.melt(id_vars = ['NOTIFICATION_RECEIVE_DATE','STATE'], var_name = 'SOURCE',value_name='n_cases')
#Reset index or the joining doesn't work
df_linel = df_linel[df_linel.n_cases!=0]
df_linel = df_linel.reset_index(drop=True)
return(df_linel)
date = '10Aug'
df_interim = read_cases_lambda(date)
df_linel = tidy_cases_lambda(df_interim)
###Output
_____no_output_____
###Markdown
Part 1: Inferring infection dates$\Lambda$ depends on the infection date (ID), while the data contains the notification date (ND). We obtain ID through the following relationship:$$ID = ND - reporting\_delay - incubation\_period.$$A gamma distribution was fitted to case data using the MLE algorithm to produce distributions for reporting delay and incubation period.
###Code
##uncomment for debugging
# notification_dates = df_linel['NOTIFICATION_DATE']
# mean_rd = 5.47
# sd_rd = 4.04
# mean_inc = 2.0
# sd_inc = 1.41
# nreplicates = 3
##gamma draws take arguments (shape, scale)
def draw_inf_dates(df_linelist, shape_rd=2.77, scale_rd=3.17, offset_rd=0,
shape_inc=2.0/1.5, scale_inc=1.5, offset_inc=1,nreplicates=1):
notification_dates = df_linelist['NOTIFICATION_RECEIVE_DATE']
nsamples = notification_dates.shape[0]
# DEFINE DELAY DISTRIBUTION
# mean_rd = 5.47
# sd_rd = 4.04
#scale_rd = shape_rd/(scale_rd)**2
#shape_rd = shape_rd/scale_rd
# DEFINE INCUBATION PERIOD DISTRIBUTION
# mean_inc = 2.0
# sd_inc = 1.41
#scale_inc = (scale_inc)**2/shape_inc #scale**2 = var / shape
#shape_inc =(scale_inc)**2/scale_inc**2
#Draw from distributions - these are long vectors
inc_period = offset_inc+np.random.gamma(shape_inc, scale_inc, size = (nsamples*nreplicates))
rep_delay = offset_rd+np.random.gamma(shape_rd, scale_rd, size = (nsamples*nreplicates))
#infection date is id_nd_diff days before notification date. This is also a long vector.
id_nd_diff = inc_period + rep_delay
#Minutes aren't included in df. Take the ceiling because the day runs from 0000 to 2359. This can still be a long vector.
whole_day_diff = np.ceil(id_nd_diff)
time_day_diffmat = whole_day_diff.astype('timedelta64[D]').reshape((nsamples, nreplicates))
#Vector must be coerced into a nsamples by nreplicates array. Then each column must be subtracted from notification_dates.
#Subtract days off of notification dates.
notification_mat = np.tile(notification_dates, (nreplicates,1)).T #notification_dates is repeated as a column nreplicates times.
infection_dates = notification_mat - time_day_diffmat
#Make infection dates into a dataframe
datecolnames = [*map(str,range(nreplicates))]
infdates_df = pd.DataFrame(infection_dates,columns = datecolnames)
#Uncomment this if theres errors
#print([df_linelist.shape, infdates_df.shape])
#Combine infection dates and original dataframe
df_inf = pd.concat([df_linelist, infdates_df], axis=1, verify_integrity=True)
return(df_inf)
df_inf = draw_inf_dates(df_linel, nreplicates=1000)
df_inf.head()
def index_by_infection_date(infections_wide):
datecolnames = [*infections_wide.columns[4:]]
df_combined = infections_wide[['STATE','SOURCE',datecolnames[0],'n_cases']].groupby(['STATE', datecolnames[0],'SOURCE']).sum()
#For each column (cn=column number): concatenate each sample as a column.
for cn in range(1,len(datecolnames)):
df_addin = infections_wide[['STATE','SOURCE',datecolnames[cn],'n_cases']].groupby(['STATE', datecolnames[cn],'SOURCE']).sum()
df_combined = pd.concat([df_combined,df_addin], axis=1, ignore_index = True)
#NaNs are inserted for missing values when concatenating. If it's missing, there were zero infections
df_combined[np.isnan(df_combined)]=0
#Rename the index.
df_combined.index.set_names(["STATE","INFECTION_DATE","SOURCE"], inplace=True)
#return(df_combined)
##INCLUDE ALL DAYS WITH ZERO INFECTIONS IN THE INDEX AS WELL.
# Reindex to include days with zero total infections.
local_infs = df_combined.xs('local',level='SOURCE')
imported_infs = df_combined.xs('imported',level='SOURCE')
statelist = [*df_combined.index.get_level_values('STATE').unique()]
#Should all states have the same start date? Current code starts from the first case in each state.
#For the same start date:
local_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
imported_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
#Determine start date as the first infection date for all.
#start_date = np.datetime64("2020-02-01")
start_date = df_combined.index.get_level_values('INFECTION_DATE').min()
#Determine end dates as the last infected date by state.
index_only = df_combined.index.to_frame()
index_only = index_only.reset_index(drop=True)
maxdates = index_only.groupby(['STATE'])['INFECTION_DATE'].max()
for aus_state in statelist:
state_data = local_infs.xs(aus_state, level='STATE')
#start_date = state_data.index.min()
#dftest.index=dftest.reindex(alldates, fill_value=0)
alldates = pd.date_range(start_date, maxdates[aus_state]) #All days from start_date to the last infection day.
local_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
for aus_state in statelist:
state_data = imported_infs.xs(aus_state, level='STATE')
alldates = pd.date_range(start_date, maxdates[aus_state])
imported_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
#Convert dictionaries to data frames
df_local_inc_zeros = pd.concat(local_statedict)
df_local_inc_zeros['SOURCE']='local'
df_imp_inc_zeros = pd.concat(imported_statedict)
df_imp_inc_zeros['SOURCE']='imported'
#Merge dataframes and reindex.
df_inc_zeros = pd.concat([df_local_inc_zeros, df_imp_inc_zeros])
df_inc_zeros = df_inc_zeros.reset_index()
df_inc_zeros= df_inc_zeros.groupby(['level_0',"level_1","SOURCE"]).sum()
df_inc_zeros.index = df_inc_zeros.index.rename(['STATE','INFECTION_DATE',"SOURCE"])
return(df_inc_zeros)
df_inc_zeros = index_by_infection_date(df_inf)
df_inc_zeros.head()
# unit test to ensure min and max dates captures all imputations of
# infection dates
summary = np.sum(df_inc_zeros, axis=0).describe() #Differences in numbers: start date?
assert summary.loc['max'] == summary.loc['min'], "Min number of cases does not match max number, dates have truncated cases"
###Output
_____no_output_____
###Markdown
Part 2: Calculating Lambda$$\Lambda_t(w_s) = \sum_{s=1}^t (I_{t-s}^{local} + I_{t-s}^{imported})w_s = \sum_{s=1}^t I_{t-s}w_s,$$where $w_s$ is the probability that the generation interval is $s$ and $I_t$ is the number of infected individuals at time $t$. Part 2a: Discretizing the gamma generation interval distributionIn the formula for $\Lambda_t$, we sum over $w$. We should consider generation interval as a discrete random variable here.
###Code
#Define gamma distribution for generation interval
#mean_gen = 2
#sd_gen = 1.74
scale_gen = 1#mean_gen/(sd_gen)**2
shape_gen = 2#mean_gen/scale_gen
trunc_days = 21
shift=0
xmids = [x+shift for x in range(trunc_days+1)] #Find midpoints for discretisation
#scipy uses scale in the compsci sense
gamma_vals = gamma.pdf(xmids,
a=shape_gen,
scale=scale_gen)
disc_gamma = gamma_vals/sum(gamma_vals)
#Discretisation error check (should sum to 1)
print("Sum of gamma values is " + str(sum(gamma_vals))+"; \n Sum of discretised gamma values is " + str(sum(disc_gamma)))
xrange = np.linspace(0,trunc_days,150)
fig,ax = plt.subplots(figsize=(12,9))
w = ax.bar(xmids,height=disc_gamma, width=1)
ax.set_title("Generation time distribution")
ax.plot(xrange, gamma.pdf(xrange, a=shape_gen, scale=scale_gen), linewidth=4,alpha=0.8, color="orange")
ax.set_xlabel('Days')
plt.show()
###Output
_____no_output_____
###Markdown
Part 2b: Calculating $\Lambda$
###Code
def generate_lambda(infection_dates, shape_gen=2, scale_gen=1,
trunc_day=21,shift=0, offset=1):
"""
Given array of infection_dates (N_dates by N_samples), where values are possible
number of cases infected on this day, generate the force of infection Lambda_t,
a N_dates-tau by N_samples array.
"""
from scipy.stats import gamma
#scale_gen = mean_gen/(sd_gen)**2
#shape_gen = mean_gen/scale_gen
xmids = [x+shift for x in range(trunc_days+1)] #Find midpoints for discretisation
gamma_vals = gamma.pdf(xmids, a=shape_gen, scale=scale_gen) #double check parameterisation of scipy
#renormalise the pdf
disc_gamma = gamma_vals/sum(gamma_vals)
ws = disc_gamma[:trunc_day]
#offset
ws[offset:] = disc_gamma[:trunc_day-offset]
ws[:offset] = 0
lambda_t = np.zeros(shape=(infection_dates.shape[0]-trunc_day+1, infection_dates.shape[1]))
for n in range(infection_dates.shape[1]):
lambda_t[:,n] = np.convolve(infection_dates[:,n], ws, mode='valid')
return lambda_t
def lambda_all_states(df_infection, **kwargs):
"""
Use geenrate lambda on every state
"""
statelist = [*df_infection.index.get_level_values('STATE').unique()]
lambda_dict ={}
for state in statelist:
df_total_infections = df_infection.groupby(['STATE','INFECTION_DATE']).agg(sum)
lambda_dict[state] = generate_lambda(
df_total_infections.loc[state].values,
**kwargs
)
return lambda_dict
trunc_day = 21
#get all lambdas
lambda_dict = lambda_all_states(df_inc_zeros,
shape_gen=2,scale_gen=1,offset=1,
trunc_day=trunc_day)
lambda_dict['VIC']
###Output
_____no_output_____
###Markdown
3. Sample from the posteriorUsing Cori et al. 2013, the posterior distribution of $R_{t,\tau}$ is a Gamma distribution with parameters shape and scale\begin{equation}\left( a + \sum^t_{s = t - \tau +1} I_s , \frac{1}{\frac{1}{b} + \sum^t_{ s = t-\tau + 1} \Lambda_s } \right)\end{equation}
###Code
def Reff_from_case(cases_by_infection, lamb, prior_a=1, prior_b=5, tau=7, samples=1000):
"""
Using Cori at al. 2013, given case incidence by date of infection, and the force
of infection \Lambda_t on day t, estimate the effective reproduction number at time
t with smoothing parameter \tau.
cases_by_infection: A T by N array, for T days and N samples
lamb : A T by N array, for T days and N samples
"""
csum_incidence = np.cumsum(cases_by_infection, axis = 0)
#remove first few incidences to align with size of lambda
# Generation interval length 20
csum_incidence = csum_incidence[20:,:]
csum_lambda = np.cumsum(lamb, axis =0)
roll_sum_incidence = csum_incidence[tau:, :] - csum_incidence[:-tau, :]
roll_sum_lambda = csum_lambda[tau:,:] - csum_lambda[:-tau,:]
a = prior_a + roll_sum_incidence
b = 1/(1/prior_b + roll_sum_lambda)
R = np.random.gamma(a,b) #shape, scale
#Need to empty R when there is too few cases...
#Use array inputs to output to same size
#inputs are T-tau by N, output will be T-tau by N
#
return a,b, R
def generate_summary(samples, dates_by='rows'):
"""
Given an array of samples (T by N) where rows index the dates,
generate summary statistics and quantiles
"""
if dates_by=='rows':
#quantiles of the columns
ax = 1
else:
#quantiles of the rows
ax = 0
mean = np.mean(samples, axis = ax)
bottom, lower, median, upper, top = np.quantile(samples,
(0.05, 0.25, 0.5, 0.75, 0.95),
axis =ax)
std = np.std(samples, axis = ax)
output = {
'mean':mean,
'std':std,
'bottom':bottom,
'lower':lower,
'median':median,
'upper':upper,
'top': top,
}
return output
def plot_Reff(Reff:dict, dates=None, ax_arg=None, **kwargs):
"""
Given summary statistics of Reff as a dictionary, plot the distribution over time
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
if ax_arg is None:
fig, ax = plt.subplots(figsize=(12,9))
else:
fig, ax = ax_arg
color_cycle = ax._get_lines.prop_cycler
curr_color = next(color_cycle)['color']
if dates is None:
dates = range(len(Reff['mean']))
ax.plot(dates, Reff['mean'], color= curr_color, **kwargs)
ax.fill_between(dates, Reff['lower'],Reff['upper'], alpha=0.4, color = curr_color)
ax.fill_between(dates, Reff['bottom'],Reff['top'], alpha=0.4, color= curr_color)
#grid line at R_eff =1
ax.set_yticks([1],minor=True,)
ax.set_yticks([0,2,3],minor=False)
ax.set_yticklabels([0,2,3],minor=False)
ax.yaxis.grid(which='minor',linestyle='--',color='black',linewidth=2)
ax.tick_params(axis='x', rotation = 45)
return fig, ax
###Output
_____no_output_____
###Markdown
4. Plot the estimates
###Code
tau = 14
prior_a=1
prior_b=5
#get all lambdas
lambda_DL = lambda_all_states(df_inc_zeros)
states = [initial[1] for initial in sorted(list(states_initials.items()))]
states.remove('NT')
states.remove('ACT')
#read in old LSHTM estimates
df_L_R = read_in_LSHTM()
date_filter = pd.date_range(start='2020-03-01',end='2020-08-01')
#prepare NNDSS cases
df_cases = df_interim.groupby(['NOTIFICATION_RECEIVE_DATE','STATE']).agg(sum)
df_cases = df_cases.reset_index()
fig, ax = plt.subplots(nrows=2, ncols=3,
sharex=True, sharey=True,
figsize=(15,12)
)
for i,state in enumerate(states):
row = i//3
col = i%3
lambda_state = lambda_DL[state]
df_state_I = df_inc_zeros.xs((state,'local'),level=('STATE','SOURCE'))
a,b,R = Reff_from_case(df_state_I.values,lambda_state,prior_a=1, prior_b=2, tau=tau)
R_summary = generate_summary(R)
fig, ax[row,col] = plot_Reff(R_summary,
dates=df_state_I.index.values[trunc_days-1+tau:],
ax_arg=(fig, ax[row,col]),
label='Our Model')
#plot formatting
ax[row,col].set_title(state)
ax[row,col].set_ylim((0,4))
ax[row,col].set_xlim((pd.to_datetime('2020-03-01'),pd.to_datetime('2020-08-10')))
#plot cases behind
ax2 = ax[row,col].twinx()
ax2.bar(df_cases.loc[df_cases.STATE==state,'NOTIFICATION_RECEIVE_DATE'],
df_cases.loc[df_cases.STATE==state,'local']+df_cases.loc[df_cases.STATE==state,'imported'],
color='grey',
alpha=0.3
)
ax2.bar(df_cases.loc[df_cases.STATE==state,'NOTIFICATION_RECEIVE_DATE'],
df_cases.loc[df_cases.STATE==state,'local'],
color='grey',
alpha=0.8
)
#plot old LSHTM estimates
df_june = df_L_R.loc[(df_L_R.date_of_analysis=='2020-07-27')&(df_L_R.state==state)]
df = df_june.loc[(df_june.date.isin(date_filter))]
ax[row,col].plot(df.date, df['median'], label='Old LSHTM',color='C1')
ax[row,col].fill_between(df.date, df['bottom'], df['top'],color='C1', alpha=0.3)
ax[row,col].fill_between(df.date, df['lower'], df['upper'],color='C1', alpha=0.3)
plt.legend()
plt.savefig("../figs/EpyEstim_tau_"+str(tau)+"_"+date+".png",dpi=300)
plt.show()
###Output
No handles with labels found to put in legend.
###Markdown
Sophie's implementationUse this as a unit test for changes to the estimator
###Code
#ws is the discretised gamma distribution; reversed due to the formula for lambda t.
ws = [*reversed(disc_gamma[:(trunc_days+1)])] #was taken from 1 before.
#Calculate lambda t for a given t in one state.
def calculate_lambda_t(state_df, t, trunc_days = 21, ws=ws):
#t = predict_date_range[30]
#state_df = input_state
tstart= t-np.timedelta64(trunc_days,'D')
relevant_dates = pd.date_range(tstart, t-np.timedelta64(1,'D'))
reldates_df = state_df.loc[relevant_dates]
#Dates don't matter, since we're calculating lambda t for t = t.
reldates_df = reldates_df.reset_index(drop=True)
ws_mat = pd.DataFrame(np.tile(ws, (reldates_df.shape[1],1)).T)
#lambda_t=sum(reldates*ws)
lambda_t = np.sum(reldates_df.mul(ws_mat), axis=0)
return(lambda_t)
#Loop over states and times to calculate all lambda t's
#Input: imported/local counts of infections by date and state. Each column should be a different sample.
#Output: Lambda t by date and state. Each column corresponds to a different original sample.
def calculate_all_lambdas(infection_df):
#Depending on data format, flatten if necessary
if type(infection_df.index)!=pd.RangeIndex:
infection_df = infection_df.reset_index()
#Create data frame with the total number of infections.
I_total = infection_df.groupby(['STATE',"INFECTION_DATE"]).sum()
#Find states and preallocate to dict
statelist = [*I_total.index.get_level_values('STATE').unique()]
state_dict = dict(zip(statelist, np.repeat(None, len(statelist))))
predict_reff_from = np.datetime64('2020-02-01')
#Calculate Reff for each state.
for state in statelist:
#print(state)
input_state_df = I_total.xs(state, level='STATE')
tmax = input_state_df.index.get_level_values('INFECTION_DATE').max()
predict_date_range = pd.date_range(predict_reff_from, tmax)
date_dict = dict(zip(predict_date_range, np.repeat(None, len(predict_date_range))))
#Find lambda t for every day.
for t in predict_date_range:
#print(t)
date_dict[t] = calculate_lambda_t(input_state_df, t).to_numpy()
state_dict[state]=date_dict
#Convert dict to a dataframe
lambda_df = pd.DataFrame.from_dict({(i,j): state_dict[i][j]
for i in state_dict.keys()
for j in state_dict[i].keys()},
orient='index')
lambda_df.index = pd.MultiIndex.from_tuples(lambda_df.index,names = ['STATE','INFECTION_DATE'])
return(lambda_df)
lambdas = calculate_all_lambdas(df_inc_zeros)
#test run on a state
state='SA'
tau = 14
#df_VIC = df_inc_zeros.xs(('VIC','local'),level=('STATE','SOURCE'))
#lambda_VIC = generate_lambda(df_VIC.values )
#get all lambdas
lambda_DL = lambda_all_states(df_inc_zeros)
#select lambda for the right state
lambda_VIC = lambda_DL[state]
df_VIC = df_inc_zeros.xs((state,'local'),level=('STATE','SOURCE'))
a,b,R = Reff_from_case(df_VIC.values,lambda_VIC,prior_a=1, prior_b=2, tau = tau)
R_summary = generate_summary(R)
fig, ax = plot_Reff(R_summary, dates=df_VIC.index.values[20+tau:])
a,b,R = Reff_from_case(df_VIC.values[7:],lambdas.loc[state].values,prior_a=1, prior_b=2, tau = tau)
R_summary = generate_summary(R)
fig, ax = plot_Reff(R_summary, dates=df_VIC.index.values[27+tau:],ax_arg =(fig,ax))
#grid line at R_eff =1
ax2 = ax.twinx()
df_cases = df_interim.groupby(['NOTIFICATION_RECEIVE_DATE','STATE']).agg(sum)
df_cases = df_cases.reset_index()
ax2.bar(df_cases.loc[df_cases.STATE==state,'NOTIFICATION_RECEIVE_DATE'],
df_cases.loc[df_cases.STATE==state,'local']+df_cases.loc[df_cases.STATE==state,'imported'],
color='grey',
alpha=0.3
)
ax2.bar(df_cases.loc[df_cases.STATE==state,'NOTIFICATION_RECEIVE_DATE'],
df_cases.loc[df_cases.STATE==state,'local'],
color='grey',
alpha=0.8
)
plt.show()
###Output
_____no_output_____ |
notebooks/het04_linear_fits.ipynb | ###Markdown
Leveraging the dataframe generated in het03, we are going to look at the best summary statistics.First with some plots, then with standard linear models, last with more flexible bayesian modeling.
###Code
%run ../scripts/notebook_settings_lean.py
###Output
_____no_output_____
###Markdown
Reading in metadata and stats
###Code
metadata = pd.read_csv("/home/eriks/primatediversity/people/erik/data/Primate_data_Erik - FROH.csv")
stats_df = pd.read_csv("../steps/het_dataframe_het03.txt")
###Output
_____no_output_____
###Markdown
Calculating ratios. x autosomes ratio both based on means and medians.
###Code
total_df = pd.merge(stats_df, metadata, on = "PDGP_ID")
total_df["x_a_ratio"] = total_df.x_het_mean/total_df.aut_het_mean
total_df["x_a_ratio_median"] = total_df.x_het_median/total_df.aut_het_median
total_df["FROH"] = total_df.FROH.astype(float)
###Output
_____no_output_____
###Markdown
Plotting relationship. I am using facetgrid to distinguish between the various geni.
###Code
g = sns.FacetGrid(data=total_df, col="GENUS", col_wrap = 10)
g.map_dataframe(sns.scatterplot, x="aut_het_mean", y="x_a_ratio", hue="SPECIES")
###Output
_____no_output_____
###Markdown
Check on the very low ratios.They seem to be males, as their heterozygosity on autosomes is drastically higher.
###Code
total_df.loc[total_df.x_a_ratio < 0.1]
g = sns.FacetGrid(data=total_df, col="GENUS", col_wrap = 10)
g.map_dataframe(sns.scatterplot, x="FROH", y="x_a_ratio", hue="SPECIES")
###Output
_____no_output_____
###Markdown
Distribution without very low individuals.
###Code
ss = total_df.loc[total_df.x_a_ratio > 0.05]
g = sns.histplot(data=total_df, x="x_a_ratio")
g.set_xlabel("X autosome ratio")
g.set_title("Based on {} females".format(len(ss)))
total_df.loc[total_df.x_a_ratio < 0.1]
total_df.loc[total_df.GENUS == "Macaca"]
###Output
_____no_output_____
###Markdown
Checking the low outliers
###Code
ss = total_df.loc[total_df.x_a_ratio < 0.2]
g = sns.FacetGrid(data=ss, col="GENUS", col_wrap = 10)
g.map_dataframe(sns.scatterplot, x="FROH", y="x_a_ratio", hue="SPECIES")
###Output
_____no_output_____
###Markdown
Additional visualizations inspired by the earlier notebooks (het01/02).
###Code
sns.scatterplot(data=total_df, x="aut_het_mean", y="x_a_ratio")
###Output
_____no_output_____
###Markdown
Some quick regressions
###Code
from sklearn import linear_model
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
###Output
_____no_output_____
###Markdown
I am going to see which columns are best at predicting x_a_ratio (excepting the x_het).
###Code
total_df.columns
# This is simply too many features for this time.
X = total_df[['aut_het_std', 'aut_het_mean', 'aut_het_median',
'aut_std', 'aut_mean', 'aut_median', 'aut_cons_windows',
'aut_q0.05', 'aut_q0.1', 'aut_q0.2', 'aut_q0.3', 'aut_q0.5', 'aut_q0.7', 'aut_q0.9',
'x_q0.05', 'x_q0.1', 'x_q0.2', 'x_q0.3', 'x_q0.5', 'x_q0.7', 'x_q0.9',
'FROH']]
X = total_df[['aut_het_std', 'aut_het_mean', 'aut_het_median',
'aut_q0.05', 'aut_q0.1',
'x_q0.05',
'FROH']]
y = total_df['x_a_ratio']
lm = linear_model.LinearRegression()
model = lm.fit(X,y)
predictions = lm.predict(X)
print(lm.score(X,y), lm.coef_)
estimator = SVR(kernel="linear")
selector = RFE(estimator, n_features_to_select=5, step=1)
selector = selector.fit(X, y)
selector.ranking_
estimator = SVR(kernel="linear")
selector = RFE(estimator, n_features_to_select=2, step=1)
selector = selector.fit(X, y)
selector.ranking_
X = total_df[['aut_het_mean',
'aut_q0.05', 'aut_q0.1',
'x_q0.05',
'FROH']]
y = total_df['x_a_ratio']
lm = linear_model.LinearRegression()
model = lm.fit(X,y)
predictions = lm.predict(X)
print(lm.score(X,y), lm.coef_)
###Output
0.1930423565490368 [-2.56051534e-05 -6.54025939e-02 4.53543921e-01 -1.49078831e-03
3.47566926e-01]
###Markdown
Normalization check.
###Code
normalized_df=(total_df-total_df.mean())/total_df.std()
X = normalized_df[['aut_het_mean', 'aut_het_median',
'aut_q0.05', 'aut_q0.1',
'x_q0.05', 'x_q0.1',
'FROH']]
y = total_df['x_a_ratio']
lm = linear_model.LinearRegression()
model = lm.fit(X,y)
predictions = lm.predict(X)
print(lm.score(X,y), lm.coef_)
###Output
0.3088028603937024 [-2.82103383e-01 2.82139734e-01 -7.35570037e-02 8.87751310e-02
3.92638873e+00 -3.95338742e+00 -3.87331324e-04]
|
notebooks/1.0-Data_exploration.ipynb | ###Markdown
Data exploration
###Code
from scipy.io import wavfile
import matplotlib.pyplot as plt
rate, data = wavfile.read('../data/samples/birds1.wav')
data = np.mean(data, axis=1)
plt.rc('font', size=12)
fig, axs = plt.subplots(2,1, figsize=(20, 10))
axs[0].plot(data)
axs[0].set_ylabel('Amplitude')
axs[0].set_xlim(0, len(data))
axs[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
axs[1].specgram(data, Fs=rate, cmap=plt.get_cmap('magma'))
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Frequency')
plt.tight_layout()
plt.show()
# fig.savefig('../reports/figures/demo.png', dpi=100)
###Output
/home/ignacio/anaconda3/lib/python3.6/site-packages/scipy/io/wavfile.py:273: WavFileWarning: Chunk (non-data) not understood, skipping it.
WavFileWarning)
|
3_AlanineDipeptide.ipynb | ###Markdown
AlanineDipeptide 1. Training & Loading
###Code
import numpy as np
import torch
from torch import nn
import flow
import train
import utils
import math
import h5py
# Set gobal variables.
rootFolder = "./demo/Model_CC(=O)NC(C)C(=O)NC_Batch_200_T_300_depthLevel_1_l8_M2_H128/"
device = torch.device("cpu")
dtype = torch.float32
smile = "CC(=O)NC(C)C(=O)NC"
dataset = "./database/alanine-dipeptide-3x250ns-heavy-atom-positions.npz"
# Load paremeters
with h5py.File(rootFolder+"/parameter.hdf5","r") as f:
n = int(np.array(f["n"]))
numFlow = int(np.array(f["numFlow"]))
lossPlotStep = int(np.array(f["lossPlotStep"]))
hidden = int(np.array(f["hidden"]))
nlayers = int(np.array(f["nlayers"]))
nmlp = int(np.array(f["nmlp"]))
lr = int(np.array(f["lr"]))
batchSize = int(np.array(f["batchSize"]))
Nepochs = int(np.array(f["Nepochs"]))
K = int(np.array(f["K"]))
fix = np.array(f["fix"])
scaling = float(np.array(f["scaling"]))
# Rebuild the model.
def innerBuilder(num):
maskList = []
for i in range(nlayers):
if i %2==0:
b = torch.zeros(num)
i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2)
b.zero_()[i] = 1
b=b.reshape(1,num)
else:
b = 1-b
maskList.append(b)
maskList = torch.cat(maskList,0).to(torch.float32)
fl = flow.RNVP(maskList, [utils.SimpleMLPreshape([num]+[hidden]*nmlp+[num],[nn.Softplus()]*nmlp+[None]) for _ in range(nlayers)], [utils.SimpleMLPreshape([num]+[hidden]*nmlp+[num],[nn.Softplus()]*nmlp+[utils.ScalableTanh(num)]) for _ in range(nlayers)])
return fl
from utils import flowBuilder
f = flowBuilder(n,numFlow,innerBuilder,1).to(device).to(dtype)
# Load saving.
import os
import glob
name = max(glob.iglob(rootFolder+"savings/"+'*.saving'), key=os.path.getctime)
print("load saving at "+name)
saved = torch.load(name,map_location=device)
f.load(saved);
###Output
/Users/lili/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
2. Analysis
###Code
# Calculate modes in the latent space.
d0 = f.layerList[0].elements[:n]
d1 = f.layerList[0].elements[n:]
omega = (1/(torch.exp(d0+d1))).detach()
omega, idx = torch.sort(omega)
from matplotlib import pyplot as plt
klist = np.arange(len(omega)) +1
plt.figure()
plt.plot(klist, omega.detach().cpu().numpy(), 'o', markerfacecolor='none', markeredgewidth=2)
plt.xlabel('$k$')
plt.ylabel('$\omega_k$')
plt.yscale('log')
plt.legend(loc='lower right')
from thirdparty import kraskov_mi
Nsamples = 5
Npersample = 1000
loadrange = ['arr_0','arr_1','arr_2']
from utils import loadmd, variance, smile2mass
SMILE = smile2mass(smile)
pVariance = torch.tensor([variance(torch.tensor(item),K) for item in SMILE]).reshape(1,-1).repeat(3,1).permute(1,0).reshape(-1).to(dtype)
theta = loadmd("./database/alanine-dipeptide-3x250ns-backbone-dihedrals.npz",loadrange,1,[0,0,0]).to(dtype)
data = loadmd("./database/alanine-dipeptide-3x250ns-heavy-atom-positions.npz",loadrange,scaling,fix).to(dtype)
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data = data[perm][:Nsamples* Npersample, :]
theta = theta[perm][:Nsamples* Npersample, :]
batchsize, halfdim = data.shape[0], data.shape[1]
p = torch.randn(batchsize,data.shape[-1]).to(data)*pVariance
data = torch.cat([data,p], dim=1)
z = f.forward(data)[0]
z = z.detach().cpu().numpy()
mi_phi = []
mi_psi = []
Nk = 6
for k in range(Nk):
for sample in range(Nsamples):
mi_phi.append(kraskov_mi(theta[sample*Npersample:(sample+1)*Npersample, 0].reshape(-1, 1), z[sample*Npersample:(sample+1)*Npersample, idx[k]].reshape(-1, 1) ))
mi_psi.append( kraskov_mi(theta[sample*Npersample:(sample+1)*Npersample, 1].reshape(-1, 1), z[sample*Npersample:(sample+1)*Npersample, idx[k]].reshape(-1, 1) ))
mi_phi = np.array(mi_phi)
mi_phi = mi_phi.reshape(Nk, Nsamples)
mi_psi = np.array(mi_psi)
mi_psi = mi_psi.reshape(Nk, Nsamples)
plt.figure()
plt.errorbar(np.arange(Nk)+1, mi_phi.mean(axis=1), yerr=mi_phi.std(axis=1)/np.sqrt(Nsamples), fmt='o-', label='$I(Q_k:\Phi)$', markerfacecolor='none', markeredgewidth=2, capsize=8, lw=2)
plt.errorbar(np.arange(Nk)+1, mi_psi.mean(axis=1), yerr=mi_psi.std(axis=1)/np.sqrt(Nsamples), fmt='o-', label='$I(Q_k:\Psi)$', markerfacecolor='none', markeredgewidth=2, capsize=8, lw=2)
plt.xlabel('$k$')
plt.ylabel('$mutual information$')
plt.legend(loc='upper right')
plt.show()
###Output
/Users/lili/anaconda3/lib/python3.6/site-packages/matplotlib/axes/_axes.py:545: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.
warnings.warn("No labelled objects found. "
###Markdown
3. InterpolationInterpolations of the slowest and the second slowest mode, to plot this to video, check [xyzFile2Animation](https://github.com/li012589/xyzFile2Animation)
###Code
sample = data[0].reshape(1,-1)
latent = f.forward(sample)[0].detach()
from copy import deepcopy
lat1 = deepcopy(latent)
lat2 = deepcopy(latent)
omega, idx = torch.sort(omega)
omega0 = 1/torch.exp(-f.layerList[0].elements[idx[0]])
omega1 = 1/torch.exp(-f.layerList[0].elements[idx[1]])
lats1 = lat1.repeat(100,1)
for i in range(100):
Q0 = -omega0 + i/(100-1) * 2*omega0 - f.layerList[0].shift[idx[0]]
lats1[i,idx[0]]=Q0
x1 = f.inverse(lats1)[0].detach().numpy()[:,:n]
np.savez(smile+'_idx0.npz', x1)
print("Generated mode 0 interpolation data:",smile+"_idx0.npz")
lats2 = lat2.repeat(100,1)
for i in range(100):
Q1 = -omega1 + i/(100-1) * 2*omega1 - f.layerList[0].shift[idx[1]]
lats2[i,idx[1]]=Q1
x2 = f.inverse(lats2)[0].detach().numpy()[:,:n]
np.savez(smile+'_idx1.npz', x2)
print("Generated mode 1 interpolation data:",smile+"_idx1.npz")
###Output
Generated mode 0 interpolation data: CC(=O)NC(C)C(=O)NC_idx0.npz
Generated mode 1 interpolation data: CC(=O)NC(C)C(=O)NC_idx1.npz
|
Chapter 1 - Getting Started with TensorFlow 2.x/Declaring operations.ipynb | ###Markdown
How to do it...
###Code
print(tf.math.divide(3, 4))
print(tf.math.truediv(3, 4))
print(tf.math.floordiv(3.0, 4.0))
print(tf.math.mod(22.0, 5.0))
print(tf.linalg.cross([1., 0., 0.], [0., 1., 0.]))
###Output
tf.Tensor([0. 0. 1.], shape=(3,), dtype=float32)
###Markdown
How it works...
###Code
# Tangent function (tan(pi/4)=1)
def pi_tan(x):
return tf.tan(3.14159/x)
print(pi_tan(4))
###Output
tf.Tensor(0.99999875, shape=(), dtype=float32)
###Markdown
We can also create a custom polynomial function...
###Code
def custom_polynomial(value):
return tf.math.subtract(3 * tf.math.square(value), value) + 10
print(custom_polynomial(11))
###Output
tf.Tensor(362, shape=(), dtype=int32)
|
notebooks/nlp/raw/ex2.ipynb | ###Markdown
Natural Language ClassificationYou did such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that predicts the sentiment of text examples. - First, tokenize the texts using `nlp.tokenizer()`. - Then, pass those docs to the TextCategorizer which you can get from `nlp.get_pipe()`. - Use the `textcat.predict()` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(nlp, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(nlp, texts):
# Use the tokenizer to tokenize each input text example
docs = [nlp.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = nlp.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did a great such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
train_labels[:2]
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that uses a model to predict the sentiment of text examples. The function takes a spaCy model (with a `TextCategorizer`) and a list of texts. First, tokenize the texts using `model.tokenizer`. Then, pass those docs to the TextCategorizer which you can get from `model.get_pipe`. Use the `textcat.predict` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(model, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(model, texts):
# Use the tokenizer to tokenize each input text example
docs = [model.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = model.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did a great such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
!pip install -U -t /kaggle/working/ git+https://github.com/Kaggle/learntools.git
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that predicts the sentiment of text examples. - First, tokenize the texts using `nlp.tokenizer()`. - Then, pass those docs to the TextCategorizer which you can get from `nlp.get_pipe()`. - Use the `textcat.predict()` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(nlp, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(nlp, texts):
# Use the tokenizer to tokenize each input text example
docs = [nlp.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = nlp.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did a great such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
!pip install -U -t /kaggle/working/ git+https://github.com/Kaggle/learntools.git
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
Setup complete
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that predicts the sentiment of text examples. - First, tokenize the texts using `nlp.tokenizer()`. - Then, pass those docs to the TextCategorizer which you can get from `nlp.get_pipe()`. - Use the `textcat.predict()` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(nlp, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(nlp, texts):
# Use the tokenizer to tokenize each input text example
docs = [nlp.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = nlp.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
!pip install -U -t /kaggle/working/ git+https://github.com/Kaggle/learntools.git
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that predicts the sentiment of text examples. - First, tokenize the texts using `nlp.tokenizer()`. - Then, pass those docs to the TextCategorizer which you can get from `nlp.get_pipe()`. - Use the `textcat.predict()` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(nlp, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(nlp, texts):
# Use the tokenizer to tokenize each input text example
docs = [nlp.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = nlp.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? If you're not sure, take a second look at the data, and pay particular attention to the labels that should be fed to the text classifier.
###Code
import spacy
# Create an empty model
nlp = spacy.blank('en')
# Add the TextCategorizer to the empty model
textcat = nlp.add_pipe('textcat')
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
step_2.assert_check_failed()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank('en')
# Add the TextCategorizer to the empty model
textcat = nlp.add_pipe('textcat')
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
import random
from spacy.util import minibatch
from spacy.training.example import Example
def train(model, train_data, optimizer, batch_size=8):
losses = {}
random.seed(1)
random.shuffle(train_data)
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
for batch in minibatch(train_data, size=batch_size):
# Split batch into text and labels
for text, labels in batch:
doc = nlp.make_doc(text)
example = Example.from_dict(doc, labels)
# TODO: Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
random.seed(1)
random.shuffle(train_data)
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
for batch in minibatch(train_data, size=batch_size):
# Split batch into text and labels
for text, labels in batch:
doc = nlp.make_doc(text)
example = Example.from_dict(doc, labels)
# Update model with texts and labels
model.update([example], sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that predicts the sentiment of text examples. - First, tokenize the texts using `nlp.tokenizer()`. - Then, pass those docs to the TextCategorizer which you can get from `nlp.get_pipe()`. - Use the `textcat.predict()` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(nlp, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(nlp, texts):
# Use the tokenizer to tokenize each input text example
docs = [nlp.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = nlp.get_pipe('textcat')
scores = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
# Check your answer
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____
###Markdown
Natural Language ClassificationYou did a great such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
import pandas as pd
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("\nSetup complete")
###Output
_____no_output_____
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# Check your answer (Run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the modelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# Shuffle data
train_data = data.sample(frac=1, random_state=7)
texts = train_data.text.values
labels = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)}
for y in train_data.sentiment.values]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n------')
print(train_texts[:2])
print('\nLabels from training data\n------')
print(train_labels[:2])
###Output
_____no_output_____
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
# Add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# Add labels to text classifier
textcat.add_label("ham")
textcat.add_label("spam")
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
#%%RM_IF(PROD)%%
import spacy
# Create an empty model
nlp = spacy.blank("en")
# Create the TextCategorizer with exclusive classes and "bow" architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"})
nlp.add_pipe(textcat)
# Add NEGATIVE and POSITIVE labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
step_2.assert_check_passed()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
from spacy.util import minibatch
import random
def train(model, train_data, optimizer):
losses = {}
random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=8)
for batch in batches:
# train_data is a list of tuples [(text0, label0), (text1, label1), ...]
# Split batch into texts and labels
texts, labels = zip(*batch)
# Update model with texts and labels
____
return losses
# Check your answer
step_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.hint()
#_COMMENT_IF(PROD)_
step_3.solution()
#%%RM_IF(PROD)%%
from spacy.util import minibatch
import random
def train(model, train_data, optimizer, batch_size=8):
losses = {}
#random.seed(1)
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
texts, labels = zip(*batch)
model.update(texts, labels, sgd=optimizer, losses=losses)
return losses
step_3.assert_check_passed()
# Fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
# This may take a while to run!
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
_____no_output_____
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
_____no_output_____
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that uses a model to predict the sentiment of text examples. The function takes a spaCy model (with a `TextCategorizer`) and a list of texts. First, tokenize the texts using `model.tokenizer`. Then, pass those docs to the TextCategorizer which you can get from `model.get_pipe`. Use the `textcat.predict` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(model, texts):
# Use the model's tokenizer to tokenize each input text
docs = ____
# Use textcat to get the scores for each doc
____
# From the scores, find the class with the highest score/probability
predicted_class = ____
return predicted_class
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.hint()
#_COMMENT_IF(PROD)_
step_4.solution()
#%%RM_IF(PROD)%%
def predict(model, texts):
# Use the tokenizer to tokenize each input text example
docs = [model.tokenizer(text) for text in texts]
# Use textcat to get the scores for each doc
textcat = model.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# From the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
step_4.assert_check_passed()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model (using your predict method)
predicted_class = ____
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = ____
# A boolean or int array indicating correct predictions
correct_predictions = ____
# The accuracy, number of correct predictions divided by all predictions
accuracy = ____
return accuracy
step_5.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_5.hint()
#_COMMENT_IF(PROD)_
step_5.solution()
#%%RM_IF(PROD)%%
def evaluate(model, texts, labels):
""" Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# Get predictions from textcat model
predicted_class = predict(model, texts)
# From labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(each['cats']['POSITIVE']) for each in labels]
# A boolean or int array indicating correct predictions
correct_predictions = predicted_class == true_class
# The accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# just changed this. not sure ...
step_5.assert_check_passed()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
_____no_output_____
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
# This may take a while to run!
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
_____no_output_____
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with spaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# Check your answer (Run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____ |
Drug Discovery.ipynb | ###Markdown
| Name | Date || ---------------------------------------------------| ------------------------------------- || Diaaeldin SHALABY | 14.05.2021 | Hands-on AI IIUnit 3 — Drug Discovery (Assignment) Authors: B. Schäfl, S. Lehner, J. Schimunek, J. BrandstetterDate: 23-04-2021This file is part of the "Hands-on AI II" lecture material. The following copyright statement applies to all code within this file.Copyright statement:This material, no matter whether in printed or electronic form, may be used for personal and non-commercial educational use only. Any reproduction of this manuscript, no matter whether as a whole or in parts, no matter whether in printed or in electronic form, requires explicit prior acceptance of the authors. Table of contents Extracting Information of MOL/SDF Representations Extracting properties Inspecting atom numbers Atomic Properties and Bonds Extracting atomic properties Extracting bond properties Further Applications Molecular substructure matching Molecular fingerprints Molecular clustering How to use this notebookThis notebook is designed to run from start to finish. There are different tasks (displayed in orange boxes) which require your contribution (in form of code, plain text, ...). Most/All of the supplied functions are imported from the file u3_utils.py which can be seen and treated as a black box. However, for further understanding, you can look at the implementations of the helper functions. In order to run this notebook, the packages which are imported at the beginning of u3_utils.py need to be installed.
###Code
# Import pre-defined utilities specific to this notebook.
import u3_utils as u3
# Import additional utilities needed in this notebook.
import numpy as np
import pandas as pd
import seaborn as sns
from copy import deepcopy
from rdkit import Chem
# Setup Jupyter notebook (warning: this may affect all Jupyter notebooks running on the same Jupyter server).
u3.setup_jupyter()
###Output
_____no_output_____
###Markdown
Module versionsAs mentioned in the introductiory slides, specific minimum versions of Python itself as well as of used modules is recommended.
###Code
u3.check_module_versions()
###Output
Installed Python version: 3.8 (✓)
Installed numpy version: 1.19.1 (✓)
Installed pandas version: 1.1.3 (✓)
Installed PyTorch version: 1.7.1 (✓)
Installed scikit-learn version: 0.23.2 (✓)
Installed scipy version: 1.5.0 (✓)
Installed matplotlib version: 3.3.1 (✓)
Installed seaborn version: 0.11.0 (✓)
Installed PIL version: 8.0.0 (✓)
Installed rdkit version: 2020.09.1 (✓)
###Markdown
Extracting Information of MOL/SDF RepresentationsThe first step of working with molecule data is actually getting them into memory. RDKit provides this functionality with SDMolSupplier – be aware, that the specified file is not loaded at once, but piece by piece, depending on what information is retrieved. This behavior is solely for performance reasons, hence you do not need to worry about this besides not deleting/moving the specified data file during the whole process. Execute the notebook until here and try to solve the following tasks: Load the molecule data set molecules.sdf using the appropriate function as supplied by RDKit. To avoid any problems with the lazy loading mechanics of RDKit, print the total amount of loaded molecules. Visualize the $16$ molecules with the lowest LUMO values in a grid including their Formulas as well as their LUMO values. What does the acronym LUMO stand for? Cite your sources (find an appropriate source, even if you know it by heart).
###Code
data_molecules = Chem.SDMolSupplier(r'resources/molecules.sdf')
num_molecules = len(data_molecules)
print(f'{num_molecules} molecules loaded from file.')
all_lumo = []
for mol in data_molecules:
list(mol.GetPropNames())
all_lumo.append(mol.GetProp(r'LUMO'))
all_lumo.sort()
lowest_16_lumo = all_lumo[0:16]
mol_lowest_16_lumo =[]
for mol in data_molecules:
if mol.GetProp(r'LUMO') in lowest_16_lumo:
mol_lowest_16_lumo.append(mol)
# Select specific molecules and extract some of their properties.
specific_molecules = mol_lowest_16_lumo
specific_molecule_labels = [
f'{mol.GetProp(r"Formula")}: {mol.GetProp(r"LUMO")}' for mol in specific_molecules]
# Plot specified molecules with extracted properties as labels in a grid plot.
Chem.Draw.MolsToGridImage(
specific_molecules,
legends=specific_molecule_labels,
maxMols=len(specific_molecules),
molsPerRow=4)
###Output
_____no_output_____
###Markdown
LUMO stands for lowest unoccupied molecular orbital. according to https://en.wikipedia.org/wiki/HOMO_and_LUMO Execute the notebook until here and try to solve the following tasks: For each of the previously found molecules, annote their atoms and compute their respective atom count. Visualize the result in a grid including their Formulas as well as their atom counts (sorted according to atom count). Do you observe visually similar molecules? In either case, comment on their respective differences.
###Code
def annotate_molecule_atoms(molecule: Chem.rdchem.Mol) -> Chem.rdchem.Mol:
"""
Annotate molecule atoms with corresponding atom numbers.
:param molecule: molecule to annotate
:return: annotated molecule
"""
molecule_annotated = deepcopy(molecule)
for atom in molecule_annotated.GetAtoms():
atom.SetProp(r'atomNote', str(atom.GetIdx()))
return molecule_annotated
molecules_annotated = []
for molecule in mol_lowest_16_lumo:
molecules_annotated.append(annotate_molecule_atoms(molecule))
# Sort list according to atom count
molecules_annotated.sort(key=lambda mol: mol.GetNumAtoms())
# Select specific molecules and extract some of their properties.
specific_molecules = molecules_annotated
specific_molecule_labels = [
f'{mol.GetProp(r"Formula")}: {mol.GetNumAtoms()}' for mol in specific_molecules]
# Plot specified molecules with extracted properties as labels in a grid plot.
Chem.Draw.MolsToGridImage(
specific_molecules,
legends=specific_molecule_labels,
maxMols=len(specific_molecules),
molsPerRow=4)
###Output
_____no_output_____
###Markdown
I see a couple of similar molecules. Atomic Properties and BondsExtracting atomic as well as bond properties often allows for a more throrough undertstanding of the molecules at hand. Unsurprisingly, RDKit provides the necessary functionality for this purpose – almost. The missing functionality may be taken from the exercise notebook, but needs to be adapted accordingly. Execute the notebook until here and try to solve the following tasks: Compute the amount of atoms participating in a ring structure for each of the molecules of the previous exercise. Adapt and apply annotate_molecule_atoms in a way to only mark atoms participating in a ring structure with an R. Visualize the result in a grid including their Formulas as well as their amount of ring atoms (sorted according to the last).
###Code
# only mark atoms participating in a ring structure with an R.
def annotate_molecule_atoms(molecule: Chem.rdchem.Mol) -> Chem.rdchem.Mol:
"""
Annotate molecule atoms with corresponding atom numbers.
:param molecule: molecule to annotate
:return: annotated molecule
"""
molecule_annotated = deepcopy(molecule)
for atom in molecule_annotated.GetAtoms():
if atom.IsInRing():
atom.SetProp(r'atomNote', 'R')
return molecule_annotated
annotated_mols_w_rings = {}
for mol in mol_lowest_16_lumo:
count_rings = 0
for atom in mol.GetAtoms():
if atom.IsInRing():
count_rings += 1
if count_rings > 0:
annotated_mols_w_rings[annotate_molecule_atoms(mol)] = count_rings
annotated_mols_w_rings = dict(sorted(annotated_mols_w_rings.items(), key= lambda x: x[1]))
# Select specific molecules and extract some of their properties.
specific_molecule_labels = [
f'{k.GetProp(r"Formula")}: {v}' for k,v in annotated_mols_w_rings.items()]
# Plot specified molecules with extracted properties as labels in a grid plot.
Chem.Draw.MolsToGridImage(
annotated_mols_w_rings.keys(),
legends=specific_molecule_labels,
maxMols=len(annotated_mols_w_rings),
molsPerRow=4)
###Output
_____no_output_____
###Markdown
Execute the notebook until here and try to solve the following tasks: Compute the amount of bonds for each of the molecules of the previous exercise (disregarding their specific type). Adapt and apply annotate_molecule_bonds in a way to mark bonds with the first letter of their respective type. Visualize the result in a grid including their Formulas as well as their amount of bonds (sorted according to the last).
###Code
# Adapt and apply annotate_molecule_bonds in a way to mark bonds with the first letter of their respective type.
def annotate_molecule_atoms(molecule: Chem.rdchem.Mol) -> Chem.rdchem.Mol:
"""
Annotate molecule atoms with corresponding atom numbers.
:param molecule: molecule to annotate
:return: annotated molecule
"""
molecule_annotated = deepcopy(molecule)
for bond in molecule_annotated.GetBonds():
bond.SetProp(r'atomNote', str(bond.GetBondType())[0])
return molecule_annotated
annotated_mols_w_bonds = {}
for mol in annotated_mols_w_rings:
count_bonds = 0
for atom in mol.GetAtoms():
count_bonds += len(atom.GetNeighbors()[-1].GetBonds())
annotated_mols_w_bonds[annotate_molecule_atoms(mol)] = count_bonds
annotated_mols_w_bonds = dict(sorted(annotated_mols_w_bonds.items(), key= lambda x: x[1]))
# Select specific molecules and extract some of their properties.
specific_molecule_labels = [
f'{k.GetProp(r"Formula")}: {v}' for k,v in annotated_mols_w_bonds.items()]
# Plot specified molecules with extracted properties as labels in a grid plot.
Chem.Draw.MolsToGridImage(
annotated_mols_w_bonds.keys(),
legends=specific_molecule_labels,
maxMols=len(annotated_mols_w_bonds),
molsPerRow=4)
###Output
_____no_output_____
###Markdown
Further ApplicationsIn the following exercises, you'll have to dig into the more interesting applications of chemoinformatics, namely: molecular substructure matching molecular fingerprints molecular clustering Execute the notebook until here and try to solve the following tasks: Specify a C(=O) template and scan the molecules data set. Visualize the template including a respective atom numbering. For each of the found molecules, annote their atoms and compute their respective substructure matches (w.r.t. C(=0)). Visualize the result in a grid including their substructure matches. Can you recognize the substructures in the plot?
###Code
template = Chem.MolFromSmiles(r'C(=O)')
Chem.Draw.MolToImage(annotate_molecule_atoms(template))
annotated_mols_w_substructs = {}
for mol in data_molecules:
has_substructure_match = mol.HasSubstructMatch(template)
if has_substructure_match:
mol = annotate_molecule_atoms(mol)
annotated_mols_w_substructs[mol] = mol.GetSubstructMatch(template)
specific_molecule_labels = [
f'{k.GetProp(r"Formula")}: {v}' for k,v in annotated_mols_w_substructs.items()]
annotated_mols_w_substructs.values()
# Plot specified molecules with extracted properties as labels in a grid plot.
Chem.Draw.MolsToGridImage(
annotated_mols_w_substructs.keys(),
legends=specific_molecule_labels,
maxMols=len(annotated_mols_w_substructs),
molsPerRow=4)
###Output
_____no_output_____
###Markdown
I can recognize the substructures in the cells above. Execute the notebook until here and try to solve the following tasks: Compute the ECFPs from the previously found molecules and visualize them in tabular form (use a fold size of $256$). How many substructures are present in each molecule? Compute and sort their total amount for each molecule.
###Code
# First, all molecules need to be converted to corresponding SMILES representations.
data_molecules_smiles = [Chem.MolToSmiles(molecule) for molecule in annotated_mols_w_substructs.keys()]
# Afterwards, ECFPs are computed and visualized in tabular form.
data_molecules_ecfps = u3.compute_ecfps(data_molecules_smiles, fold=256)
data_molecules_ecfps
pd.DataFrame(data_molecules_ecfps.sum(axis=1), columns=['substructures']).sort_values(by='substructures').transpose()
###Output
_____no_output_____
###Markdown
Execute the notebook until here and try to solve the following tasks: Downproject the previously computed ECFPs using PCA. Visualize the result in a scatter plot. Are there any visible clusters? Cluster the resulting downprojections using affinity propagation. Why would k-means be a little bit disadvantageous here? Plot all molecules of all clusters in separate grids including their Compound Name and Activity. Do you see similarities?
###Code
# Set default plotting style and random seed for reproducibility.
sns.set()
np.random.seed(seed=42)
# Compute Principal Component Analysis (PCA) and reduce the dimensionality of the ECFPs.
data_molecules_ecfps_pca = u3.apply_pca(n_components=2, data=data_molecules_ecfps)
u3.plot_points_2d(data=data_molecules_ecfps_pca, figsize=(14, 7))
###Output
_____no_output_____
###Markdown
Are there any visible clusters? - Yes
###Code
# Set default plotting style and random seed for reproducibility.
sns.set()
np.random.seed(seed=42)
# Compute affinity propagation on the t-SNE downprojected data set.
data_molecules_ecfps_pca_ap = data_molecules_ecfps_pca.copy()
data_molecules_ecfps_pca_ap[r'cluster'] = u3.apply_affinity_propagation(data=data_molecules_ecfps_pca_ap)
u3.plot_points_2d(data=data_molecules_ecfps_pca_ap, target_column=r'cluster', figsize=(14, 7))
# Select specific molecules and extract some of their properties.
def plot_sperate_grids(i):
specific_molecules = data_molecules_ecfps_pca_ap[data_molecules_ecfps_pca_ap[r'cluster'] == i].index
specific_molecules = [data_molecules[_] for _ in specific_molecules]
specific_molecule_labels = [
f'{mol.GetProp(r"Compound Name")}: {mol.GetProp(r"Activity")}' for mol in specific_molecules]
# Plot specified molecules with extracted properties as labels in a grid plot.
return Chem.Draw.MolsToGridImage(
specific_molecules,
legends=specific_molecule_labels,
maxMols=len(specific_molecules),
molsPerRow=4)
plot_sperate_grids(0)
plot_sperate_grids(1)
plot_sperate_grids(2)
###Output
_____no_output_____ |
Lesson-11_6_Packedsequence.ipynb | ###Markdown
PackedSequence 와 PaddedSequence[링크: PackedSequence에 대한 PyTorch 공식 문서](https://pytorch.org/docs/stable/nn.htmlpackedsequence)이 튜토리얼에서는 RNN / LSTM 계열의 모델에서 sequence batch를 잘 활용할 수 있는 `PackedSequence` 와 `PaddedSequence`를 만드는 법을 배워보겠습니다.PyTorch 라이브러리 안에는 다음 4가지 함수들이 주어집니다.`pad_sequence`, `pack_sequence`, `pack_padded_sequence`, `pad_packed_sequence`하지만 함수 이름만 봐서는 상당히 헷갈릴 수 있기 때문에 다음 그림을 참고하시면 이해하기 편하실 것 같습니다.
###Code
import torch
import numpy as np
from torch.nn.utils.rnn import pad_sequence, pack_sequence, pack_padded_sequence, pad_packed_sequence
###Output
_____no_output_____
###Markdown
예제 데이터실습을 위해 간단한 예제 데이터를 만들었습니다.여기서 잘 기억하셔야할 점은 batch size가 5이고, sequence 중 가장 긴 길이는 13라는 것 입니다.
###Code
# Random word from random word generator
data = ['hello world',
'midnight',
'calculation',
'path',
'short circuit']
# Make dictionary
char_set = ['<pad>'] + list(set(char for seq in data for char in seq)) # Get all characters and include pad token
char2idx = {char: idx for idx, char in enumerate(char_set)} # Constuct character to index dictionary
print('char_set:', char_set)
print('char_set length:', len(char_set))
# Convert character to index and make list of tensors
X = [torch.LongTensor([char2idx[char] for char in seq]) for seq in data]
# Check converted result
for sequence in X:
print(sequence)
###Output
tensor([15, 10, 4, 4, 17, 11, 16, 17, 6, 4, 3])
tensor([ 1, 2, 3, 5, 2, 18, 15, 7])
tensor([14, 9, 4, 14, 13, 4, 9, 7, 2, 17, 5])
tensor([12, 9, 7, 15])
tensor([ 8, 15, 17, 6, 7, 11, 14, 2, 6, 14, 13, 2, 7])
###Markdown
다음과 같이 sequence의 길이가 제각각인 것을 확인하실 수 있습니다.
###Code
# Make length tensor (will be used later in 'pack_padded_sequence' function)
lengths = [len(seq) for seq in X]
print('lengths:', lengths)
###Output
lengths: [11, 8, 11, 4, 13]
###Markdown
Sequence 데이터의 경우 어떻게 batch로 묶을까요?위와같이 Text 나 audio 처럼 sequence 형식인 데이터의 경우 길이가 각각 다 다르기 때문에 하나의 batch로 만들어주기 위해서 일반적으로 제일 긴 sequence 길이에 맞춰 뒷부분에 padding을 추가해줍니다.이 방식이 일반적으로 많이 쓰이는 Padding 방식입니다.하지만 PyTorch에서는 `PackedSequence`라는 것을 쓰면 padding 없이도 정확히 필요한 부분까지만 병렬 계산을 할 수 있습니다. `pad_sequence` 함수를 이용하여 PaddedSequence (그냥 Tensor) 만들기사실, PaddedSequence는 sequence중에서 가장 긴 sequence와 길이를 맞추어주기 위해 padding을 추가한 일반적인 **Tensor**를 말합니다.(따로 PaddedSequence라는 class는 존재하지 않습니다.)이때, `pad_sequence`라는 PyTorch 기본 라이브러리 함수를 이용하면 쉽게 padding을 추가할 수 있습니다.여기서 주의하실 점은 input이 **Tensor들의 list** 로 주어져야합니다. (그냥 **Tensor** 가 아닌 **Tensor들의 list** 입니다.)list 안에 있는 각각의 Tensor들의 shape가 `(?, a, b, ...)` 라고 할때, (여기서 ?는 각각 다른 sequence length 입니다.)`pad_sequence` 함수를 쓰면 `(T, batch_size, a, b, ...)` shape를 가지는 Tensor가 리턴됩니다. (여기서 `T`는 batch안에서 가장 큰 sequence length 입니다.)만약, `pad_sequence`에 명시적으로 `batch_first=True`라는 파라미터를 지정해주면, `(batch_size, T, a, b, ...)` shape를 가지는 Tensor가 리턴됩니다. 기본적으로 padding 값은 0으로 되어있지만, `padding_value=42`와 같이 파라미터를 지정해주면, padding하는 값도 정할 수 있습니다.
###Code
# Make a Tensor of shape (Batch x Maximum_Sequence_Length)
padded_sequence = pad_sequence(X, batch_first=True) # X is now padded sequence
print(padded_sequence)
print(padded_sequence.shape)
###Output
tensor([[15, 10, 4, 4, 17, 11, 16, 17, 6, 4, 3, 0, 0],
[ 1, 2, 3, 5, 2, 18, 15, 7, 0, 0, 0, 0, 0],
[14, 9, 4, 14, 13, 4, 9, 7, 2, 17, 5, 0, 0],
[12, 9, 7, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 8, 15, 17, 6, 7, 11, 14, 2, 6, 14, 13, 2, 7]])
torch.Size([5, 13])
###Markdown
`pack_sequence` 함수를 이용하여 PackedSequence 만들기PackedSequence는 위와같이 padding token을 추가하여 sequence의 최대 길이에 맞는 Tensor를 만드는게 아닌,padding을 추가하지 않고 정확히 주어진 sequence 길이까지만 모델이 연산을 하게끔 만드는 PyTorch의 자료구조입니다.이 PackedSequence를 만들기 위해서는 한가지 조건이 필요합니다.- **주어지는 input (list of Tensor)는 길이에 따른 내림차순으로 정렬이 되어있어야 합니다.**따라서 먼저 input을 길이에 따른 내림차순으로 정렬해봅시다.
###Code
# Sort by descending lengths
sorted_idx = sorted(range(len(lengths)), key=lengths.__getitem__, reverse=True)
sorted_X = [X[idx] for idx in sorted_idx]
# Check converted result
for sequence in sorted_X:
print(sequence)
###Output
tensor([ 8, 15, 17, 6, 7, 11, 14, 2, 6, 14, 13, 2, 7])
tensor([15, 10, 4, 4, 17, 11, 16, 17, 6, 4, 3])
tensor([14, 9, 4, 14, 13, 4, 9, 7, 2, 17, 5])
tensor([ 1, 2, 3, 5, 2, 18, 15, 7])
tensor([12, 9, 7, 15])
###Markdown
자, 이제 input Tensor가 정렬되었으니 `pack_sequence`를 이용하여 PackedSequence를 만들어보겠습니다.
###Code
packed_sequence = pack_sequence(sorted_X)
print(packed_sequence)
###Output
PackedSequence(data=tensor([ 8, 15, 14, 1, 12, 15, 10, 9, 2, 9, 17, 4, 4, 3, 7, 6, 4, 14,
5, 15, 7, 17, 13, 2, 11, 11, 4, 18, 14, 16, 9, 15, 2, 17, 7, 7,
6, 6, 2, 14, 4, 17, 13, 3, 5, 2, 7]), batch_sizes=tensor([5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 1, 1]))
###Markdown
Embedding 적용해보기자 이제, `PackedSequence`와 padding이 된 Tensor인 `PaddedSequence`를 만들어보았으니, RNN에 input으로 넣어서 테스트해보려고 합니다.그 전에, 위에 예제들에서는 input이 character의 index들을 가지고 있는 데이터였지만, 보통은 주로 이를 embedding한 값을 RNN의 input으로 넣어줍니다.이 튜토리얼에서는 one-hot character embedding을 해보도록 하겠습니다.
###Code
# one-hot embedding using PaddedSequence
eye = torch.eye(len(char_set)) # Identity matrix of shape (len(char_set), len(char_set))
embedded_tensor = eye[padded_sequence]
print(embedded_tensor.shape) # shape: (Batch_size, max_sequence_length, number_of_input_tokens)
# one-hot embedding using PackedSequence
embedded_packed_seq = pack_sequence([eye[X[idx]] for idx in sorted_idx])
print(embedded_packed_seq.data.shape)
###Output
torch.Size([47, 19])
###Markdown
RNN 모델 만들기간단한 RNN 모델을 한번 만들어봅시다.
###Code
# declare RNN
rnn = torch.nn.RNN(input_size=len(char_set), hidden_size=30, batch_first=True)
###Output
_____no_output_____
###Markdown
`PaddedSequence`를 이용하여 RNN에 넣어봅시다.
###Code
rnn_output, hidden = rnn(embedded_tensor)
print(rnn_output.shape) # shape: (batch_size, max_seq_length, hidden_size)
print(hidden.shape) # shape: (num_layers * num_directions, batch_size, hidden_size)
###Output
torch.Size([5, 13, 30])
torch.Size([1, 5, 30])
###Markdown
`PackedSequence`를 이용하여 RNN에 넣어봅시다.
###Code
rnn_output, hidden = rnn(embedded_packed_seq)
print(rnn_output.data.shape)
print(hidden.data.shape)
###Output
torch.Size([47, 30])
torch.Size([1, 5, 30])
###Markdown
`pad_packed_sequence`위 함수는 `PackedSequence`를 `PaddedSequence`(Tensor)로 바꾸어주는 함수입니다.`PackedSequence`는 각 sequence에 대한 길이 정보도 가지고있기 때문에, 이 함수는 Tensor와 함께 길이에 대한 리스트를 튜플로 리턴해줍니다.리턴값: (Tensor, list_of_lengths)
###Code
unpacked_sequence, seq_lengths = pad_packed_sequence(embedded_packed_seq, batch_first=True)
print(unpacked_sequence.shape)
print(seq_lengths)
###Output
torch.Size([5, 13, 19])
tensor([13, 11, 11, 8, 4])
###Markdown
`pack_padded_sequence`반대로, Padding이 된 Tensor인 `PaddedSequence`를 `PackedSequence`로 바꾸어주는 함수도 있습니다.`pack_padded_sequence` 함수는 실제 sequence길이에 대한 정보를 모르기때문에, 파라미터로 꼭 제공해주어야합니다.여기서 주의하여야 할 점은, input인 `PaddedSequence`가 아까 언급드린 **길이에 따른 내림차순으로 정렬되어야 한다는** 조건이 성립되어야 `PackedSequence`로 올바르게 변환될 수 있습니다.아까 저희가 만든 `padded_sequence` 변수는 이 조건을 만족하지 않기 때문에 다시 새로 만들어보겠습니다.
###Code
embedded_padded_sequence = eye[pad_sequence(sorted_X, batch_first=True)]
print(embedded_padded_sequence.shape)
###Output
torch.Size([5, 13, 19])
###Markdown
이제 이 padding이 된 Tensor를 `PackedSequence`로 변환해보겠습니다.
###Code
sorted_lengths = sorted(lengths, reverse=True)
new_packed_sequence = pack_padded_sequence(embedded_padded_sequence, sorted_lengths, batch_first=True)
print(new_packed_sequence.data.shape)
print(new_packed_sequence.batch_sizes)
###Output
torch.Size([47, 19])
tensor([5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 1, 1])
|
Resnet_50Transfer_Learning_CIFAR_10.ipynb | ###Markdown
Transfer LearningIn this notebook, you will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras. Imports
###Code
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
###Output
_____no_output_____
###Markdown
Parameters - Define the batch size- Define the class (category) names
###Code
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
###Output
_____no_output_____
###Markdown
Define some functions that will help you to create some visualizations. (These will be used later)
###Code
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
###Output
_____no_output_____
###Markdown
Loading and Preprocessing Data[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
###Code
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
###Output
_____no_output_____
###Markdown
Visualize DatasetUse the `display_image` to view some of the images and their class labels.
###Code
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
###Output
_____no_output_____
###Markdown
Preprocess DatasetHere, you'll perform normalization on images in training and validation set. - You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
###Code
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
###Output
_____no_output_____
###Markdown
Define the NetworkYou will be performing transfer learning on **ResNet50** available in Keras.- You'll load pre-trained **imagenet weights** to the model.- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
###Code
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
###Output
_____no_output_____
###Markdown
Evaluate the ModelCalculate the loss and accuracy metrics using the model's `.evaluate` function.
###Code
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
###Output
_____no_output_____
###Markdown
Plot Loss and Accuracy CurvesPlot the loss (in blue) and validation loss (in green).
###Code
plot_metrics("loss", "Loss")
###Output
_____no_output_____
###Markdown
Plot the training accuracy (blue) as well as the validation accuracy (green).
###Code
plot_metrics("accuracy", "Accuracy")
###Output
_____no_output_____
###Markdown
Visualize predictionsYou can take a look at the predictions on the validation set.
###Code
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.")
###Output
_____no_output_____ |
notebooks/quickstarter.ipynb | ###Markdown
Welcome to the `atiim` quickstarter! `atiim` is the Area-Time Inundatation Index Model which was created to address the challenge of rapidly characterizing spatiotemporally-complex inundation patterns in dynamic systems, such as estuarine tidal-fluvial environments. Load packages
###Code
import os
import atiim
# this is atiim's built in sample data
from atiim import SampleData
###Output
_____no_output_____
###Markdown
Setup data
###Code
# load sample data
sample_data = SampleData()
gage_data_file = sample_data.sample_gage_data_file
dem_file = sample_data.sample_dem
basin_shp = sample_data.sample_basin_shapefile
gage_shp = sample_data.sample_gage_shapefile
# directory to store output files
output_dir = os.path.dirname(gage_data_file)
###Output
_____no_output_____
###Markdown
Exploring the gage data
###Code
gadf = atiim.import_gage_data(gage_data_file)
gadf.head(2)
###Output
_____no_output_____
###Markdown
Plot the water surface elevation time series from the gage data
###Code
atiim.plot_wse_timeseries(data=gadf,
save_plot=False,
show_plot=True)
###Output
_____no_output_____
###Markdown
Plot the cumulative distribution of water surface elevation from the gage data
###Code
atiim.plot_wse_cumulative_distribution(data=gage_data_file)
###Output
_____no_output_____
###Markdown
Plot the probability density of water surface elevation from the gage data
###Code
atiim.plot_wse_probability_density(data=gage_data_file)
###Output
_____no_output_____
###Markdown
Plot the exceedance probability of water surface elevation from the gage data
###Code
atiim.plot_wse_exceedance_probability(data=gage_data_file)
###Output
_____no_output_____
###Markdown
Simulate and explore the area of inundation through the time series of water surface elevations Simulate inundation over the area of interest using the gage data `n_jobs` can be set to run all elevation slices in parallel. Default setting is `1` to run sequentially. See `help(atiim.simulate_inundation)` for more information.
###Code
%%time
df = atiim.simulate_inundation(dem_file=dem_file,
basin_shp=basin_shp,
gage_shp=gage_shp,
gage_data_file=gage_data_file,
run_name='test_1',
output_directory=output_dir,
write_csv=False,
elevation_interval=0.1,
hour_interval=1.0,
n_jobs=1,
verbose=True)
df.head(2)
###Output
CPU times: user 3.16 s, sys: 152 ms, total: 3.32 s
Wall time: 3.45 s
###Markdown
Plot the hectare hours of inundation
###Code
atiim.plot_inundation_hectare_hours(data=df)
###Output
_____no_output_____
###Markdown
Plot the inundation perimeter by water surface elevation
###Code
atiim.plot_inundation_perimeter(data=df)
###Output
_____no_output_____
###Markdown
Plot the inundated area by water surface elevation with the bankfull elevation noted
###Code
atiim.plot_inundation_area(data=df)
###Output
_____no_output_____
###Markdown
Explore the DEM Generate hypsometric curve data
###Code
hydf = atiim.hypsometric_curve(dem_file=dem_file,
elevation_interval=0.1)
hydf.head(2)
###Output
_____no_output_____
###Markdown
Plot the hypsometric curve by area
###Code
atiim.plot_hypsometric_curve(data=hydf)
###Output
_____no_output_____
###Markdown
Welcome to the cerf quickstarter! `cerf` is an open-source geospatial Python package for evaluating and analyzing future electricity technology capacity expansion feasibility. Purpose `cerf` was created to:- Evaluate the feasibility of a future scenario-driven electricity technology capacity expansion plan as generated by a parent model,- Site power plants in the least cost configuration when considering regional economics an on-the-ground barriers to siting,- Assist planners and modelers of alternate future realizations of the electricity system to gain an understanding of how siting costs and service area congestion may respond under certain stressors. A brief introductionThe Capacity Expansion Regional Feasibility model (cerf) helps us evaluate the feasibility and structure of future electricity capacity expansion plans by siting power plants in areas that have been deemed the least cost option. We can use cerf to gain an understanding of topics such as: 1) whether or not future projected electricity expansion plans from models such as GCAM are possible to achieve, 2) where and which on-the-ground barriers to siting (e.g., protected areas, cooling water availability) may influence our ability to achieve certain expansions, and 3) how power plant infrastructure build outs and value may evolve into the future when considering locational marginal pricing (LMP) based on the supply and demand of electricity from a grid operations model.Each grid cell in cerf is given an initial value of suitable (0) or unsuitable (1) based on a collection of suitability criteria gleaned from the literature. cerf’s default suitability layers include both those that are common to all thermal technologies as well as technology-specific suitability criteria. Common suitability layers represent categories such as protected lands, critical habitat areas, and much more. Technology-specific suitability layers are those that satisfy requirements that may not be applicable to all technologies. An example would be minimum mean annual flow requirements for cooling water availability for individual thermal technologies.Though cerf provides sample data to run the conterminous United States (CONUS), it could be extended to function for any country or set of regions that had the following prerequisite data sources: a spatial representation of substations or electricity transmission infrastructure, a spatial representation of gas pipeline infrastructure if applicable, any regionally applicable spatial data to construct suitability rasters from, access to hourly zonal LMP, and access to technology-specific information and each technologies accompanying electricity capacity expansion plan per region. The Global Change Analysis Model (GCAM) is used to build our expansion plans and establish our technology-specific requirements through the end of the century. We derive our LMP from a grid operations model that also is harmonized with GCAM to provide consistent projections of energy system evolution. See more about how to generalize cerf for your research here.We introduce a metric named Net Locational Cost (NLC) that is used compete power plant technologies for each grid cell based on the least cost option to site. NLC is calculated by subtracting the Net Operating Value (NOV) of a proposed power plant from the cost of its interconnection to the grid to represent the potential deployment value. Both the NOV parameter which incorporates many technology-specific values such as variable operations and maintenance costs, carbon price, heat rate, etc. and the interconnection cost parameter used for both electricity transmission and gas pipelines are configurable per time step. All equations used in cerf are described in detail in the [documentation](https://immm-sfa.github.io/cerf/user_guide.htmlfundamental-equations-and-concepts). Load packages
###Code
import cerf
###Output
_____no_output_____
###Markdown
Install package data **NOTE**: The package data will require approximately 195 MB of storage
###Code
cerf.install_package_data()
###Output
_____no_output_____
###Markdown
Conduct a run with CERF We will be exploring the main functionality of the `cerf` package using our example data which is meant for illustrative purposes only. `cerf` runs using a single YAML configuration file that contains project and technology-specific settings, an electricity capacity expansion plan, and lmp zones pricing data which is described in detail in the docs [here](https://immm-sfa.github.io/cerf/). Expansion plans and technology data are generally generated by models such as GCAM which capture multi-sector dynamics that represent alternate futures based on scenario assumptions for socioeconomics, radiative forcing, etc. The `cerf` package also comes equipped with power plant siting suitability data at a 1-km resolution over the CONUS, publically available data from EIA and HIFLD for transmission and pipeline infrastructure, and generic 8760 locational marginal pricing similar to what you could model using your prefered grid operations model. Get up and running right away! 1. Run `cerf` to site power plants in an expansion plan for a single year for the CONUS
###Code
# sample year
yr = 2030
# load the sample configuration file path for the target year
config_file = cerf.config_file(yr)
# run the configuration for the target year and return a data frame
result_df = cerf.run(config_file, write_output=False)
###Output
_____no_output_____
###Markdown
`cerf` results are returned as a Pandas DataFrame Each record is a sited power plant having a geographic location and other siting attributes. Reminder: `cerf` uses the `USA_Contiguous_Albers_Equal_Area_Conic` projected coordinate reference system in its CONUS example data, so the `xcoord` and `ycoord` are relative to that projection.
###Code
result_df.head()
###Output
_____no_output_____
###Markdown
2. Run `cerf` to site power plants in an expansion plan for multiple years for the CONUS This exercise demonstrates how to inherit sites from a previous year's results and keep them in the mix if they have not yet reached retirement. If this is done in `cerf`, users should ensure that their expansion plan is only for new vintage each timestep.
###Code
import cerf
# process year 2010, 2030, and 2050
for index, yr in enumerate([2010, 2030, 2050]):
print(f"\nProcessing year: {yr}")
# load the sample configuration file path for the target year
config_file = cerf.config_file(yr)
# do not intialize the run with previously sited data if it is the first time step
if index == 0:
result_df = cerf.run(config_file, write_output=False)
else:
result_df = cerf.run(config_file, write_output=False, initialize_site_data=result_df)
###Output
_____no_output_____
###Markdown
Explore the results that account for retirement Since we inherited the each year, and we are only siting new vintage per year, we see power plants from multiple technlogies until they reach their retirement age. We can narrow in on `biomass` power plants in Virginia to see this:
###Code
result_df.loc[(result_df['region_name'] == 'virginia') & (result_df['tech_id'] == 9)]
###Output
_____no_output_____
###Markdown
Plot the output
###Code
cerf.plot_siting(result_df)
###Output
_____no_output_____ |
notebooks/dense_sentiment_classifier.ipynb | ###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
###Output
_____no_output_____
###Markdown
**N.B.**: If you're using Google Colab and the above line of code throws this error: [ValueError: Object arrays cannot be loaded when allow_pickle=False](https://stackoverflow.com/questions/55890813/how-to-fix-object-arrays-cannot-be-loaded-when-allow-pickle-false-for-imdb-loa)As of May 24th, 2019 you can resolve this error by executing `!pip install numpy==1.16.2` and restarting the runtime (by default, Colab uses a later version of NumPy -- 1.16.3 -- that causes the error).
###Code
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/jonkrohn/DLTFpT/blob/master/notebooks/dense_sentiment_classifier.ipynb) Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
###Output
_____no_output_____
###Markdown
**N.B.**: If you're using Google Colab and the above line of code throws this error: [ValueError: Object arrays cannot be loaded when allow_pickle=False](https://stackoverflow.com/questions/55890813/how-to-fix-object-arrays-cannot-be-loaded-when-allow-pickle-false-for-imdb-loa)As of May 24th, 2019 you can resolve this error by executing `!pip install numpy==1.16.2` and restarting the runtime (by default, Colab uses a later version of NumPy -- 1.16.3 -- that causes the error).
###Code
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * The TensorFlow Keras module's text utilities [here](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text) quickly preprocess natural language and convert it into an index* The `Tokenizer` class covered therein may do everything you need in a single line of code: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index* Other natural language preprocessing steps you may want to consider for your particular dataset and application are covered in the [*Natural Language Preprocessing* notebook](https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/natural_language_preprocessing.ipynb), including: * removing stop words * either stemming or lemmatization * colocating n-grams, such as bigrams and trigrams
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
# hidden layer
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
# second hidden layer
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 15s 606us/sample - loss: 0.5229 - accuracy: 0.7160 - val_loss: 0.3527 - val_accuracy: 0.8431
Epoch 2/4
25000/25000 [==============================] - 11s 436us/sample - loss: 0.2697 - accuracy: 0.8933 - val_loss: 0.3505 - val_accuracy: 0.8475
Epoch 3/4
25000/25000 [==============================] - 14s 558us/sample - loss: 0.1118 - accuracy: 0.9688 - val_loss: 0.4315 - val_accuracy: 0.8296
Epoch 4/4
25000/25000 [==============================] - 12s 489us/sample - loss: 0.0248 - accuracy: 0.9965 - val_loss: 0.5296 - val_accuracy: 0.8301
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * The TensorFlow Keras module's text utilities [here](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text) quickly preprocess natural language and convert it into an index* The `Tokenizer` class covered therein may do everything you need in a single line of code: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index* Other natural language preprocessing steps you may want to consider for your particular dataset and application are covered in the [*Natural Language Preprocessing* notebook](https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/natural_language_preprocessing.ipynb), including: * removing stop words * either stemming or lemmatization * colocating n-grams, such as bigrams and trigrams
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid) # In recent Keras versions, if .predict_proba() throws an error, try .predict()
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * The TensorFlow Keras module's text utilities [here](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text) quickly preprocess natural language and convert it into an index* The `Tokenizer` class covered therein may do everything you need in a single line of code: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index* Other natural language preprocessing steps you may want to consider for your particular dataset and application are covered in the [*Natural Language Preprocessing* notebook](https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/natural_language_preprocessing.ipynb), including: * removing stop words * either stemming or lemmatization * colocating n-grams, such as bigrams and trigrams
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import keras
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Embedding # new!
from keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words, skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
# 84.7% validation accuracy in epoch 2
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 1s - loss: 0.5402 - acc: 0.7095 - val_loss: 0.3701 - val_acc: 0.8367
Epoch 2/4
25000/25000 [==============================] - 0s - loss: 0.2825 - acc: 0.8881 - val_loss: 0.3459 - val_acc: 0.8470
Epoch 3/4
25000/25000 [==============================] - 1s - loss: 0.1279 - acc: 0.9616 - val_loss: 0.4151 - val_acc: 0.8333
Epoch 4/4
25000/25000 [==============================] - 0s - loss: 0.0303 - acc: 0.9941 - val_loss: 0.5222 - val_acc: 0.8322
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.01.hdf5") # zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[489])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[927])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import keras
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Embedding # new!
from keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words, skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
# CODE HERE
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
# 84.7% validation accuracy in epoch 2
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 4s - loss: 0.5394 - acc: 0.7058 - val_loss: 0.3658 - val_acc: 0.8368
Epoch 2/4
25000/25000 [==============================] - 3s - loss: 0.2732 - acc: 0.8924 - val_loss: 0.3493 - val_acc: 0.8465
Epoch 3/4
25000/25000 [==============================] - 3s - loss: 0.1131 - acc: 0.9664 - val_loss: 0.4306 - val_acc: 0.8312
Epoch 4/4
25000/25000 [==============================] - 3s - loss: 0.0255 - acc: 0.9956 - val_loss: 0.5283 - val_acc: 0.8337
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.01.hdf5") # zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[489])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[927])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import keras
from keras.datasets import imdb # new!
from keras.preprocessing.sequence import pad_sequences #new!
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Embedding # new!
from keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/the-deep-learners/deep-learning-illustrated/blob/master/notebooks/dense_sentiment_classifier.ipynb) Load dependencies
###Code
import keras
from keras.datasets import imdb # new!
from keras.preprocessing.sequence import pad_sequences #new!
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Embedding # new!
from keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * the Keras text utilities [here](https://keras.io/preprocessing/text/) quickly preprocess natural language and convert it into an index* the `keras.preprocessing.text.Tokenizer` class may do everything you need in one line: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
###Output
_____no_output_____
###Markdown
**N.B.**: If you're using Google Colab and the above line of code throws this error: [ValueError: Object arrays cannot be loaded when allow_pickle=False](https://stackoverflow.com/questions/55890813/how-to-fix-object-arrays-cannot-be-loaded-when-allow-pickle-false-for-imdb-loa)As of May 24th, 2019 you can resolve this error by executing `!pip install numpy==1.16.2` and restarting the runtime (by default, Colab uses a later version of NumPy -- 1.16.3 -- that causes the error).
###Code
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/4
25000/25000 [==============================] - 2s 80us/step - loss: 0.5612 - acc: 0.6892 - val_loss: 0.3630 - val_acc: 0.8398
Epoch 2/4
25000/25000 [==============================] - 2s 69us/step - loss: 0.2851 - acc: 0.8841 - val_loss: 0.3486 - val_acc: 0.8447
Epoch 3/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.1158 - acc: 0.9646 - val_loss: 0.4252 - val_acc: 0.8337
Epoch 4/4
25000/25000 [==============================] - 2s 70us/step - loss: 0.0237 - acc: 0.9961 - val_loss: 0.5304 - val_acc: 0.8340
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____
###Markdown
Dense Sentiment Classifier In this notebook, we build a dense neural net to classify IMDB movie reviews by their sentiment. Load dependencies
###Code
import tensorflow
from tensorflow.keras.datasets import imdb # new!
from tensorflow.keras.preprocessing.sequence import pad_sequences #new!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Embedding # new!
from tensorflow.keras.callbacks import ModelCheckpoint # new!
import os # new!
from sklearn.metrics import roc_auc_score, roc_curve # new!
import pandas as pd
import matplotlib.pyplot as plt # new!
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
# output directory name:
output_dir = 'model_output/dense'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000 # as per Maas et al. (2011); may not be optimal
n_words_to_skip = 50 # ditto
max_review_length = 100
pad_type = trunc_type = 'pre'
# neural network architecture:
n_dense = 64
dropout = 0.5
###Output
_____no_output_____
###Markdown
Load data For a given data set: * The TensorFlow Keras module's text utilities [here](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text) quickly preprocess natural language and convert it into an index* The `Tokenizer` class covered therein may do everything you need in a single line of code: * tokenize into words or characters * `num_words`: maximum unique tokens * filter out punctuation * lower case * convert words to an integer index* Other natural language preprocessing steps you may want to consider for your particular dataset and application are covered in the [*Natural Language Preprocessing* notebook](https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/natural_language_preprocessing.ipynb), including: * removing stop words * either stemming or lemmatization * colocating n-grams, such as bigrams and trigrams
###Code
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words,
skip_top=n_words_to_skip)
x_train[0:6] # 0 reserved for padding; 1 would be starting character; 2 is unknown; 3 is most common word, etc.
for x in x_train[0:6]:
print(len(x))
y_train[0:6]
len(x_train), len(x_valid)
###Output
_____no_output_____
###Markdown
Restoring words from index
###Code
word_index = tensorflow.keras.datasets.imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["PAD"] = 0
word_index["START"] = 1
word_index["UNK"] = 2
word_index
index_word = {v:k for k,v in word_index.items()}
x_train[0]
' '.join(index_word[id] for id in x_train[0])
(all_x_train,_),(all_x_valid,_) = imdb.load_data()
' '.join(index_word[id] for id in all_x_train[0])
###Output
_____no_output_____
###Markdown
Preprocess data
###Code
x_train = pad_sequences(x_train, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length,
padding=pad_type, truncating=trunc_type, value=0)
x_train[0:6]
for x in x_train[0:6]:
print(len(x))
' '.join(index_word[id] for id in x_train[0])
' '.join(index_word[id] for id in x_train[5])
###Output
_____no_output_____
###Markdown
Design neural network architecture
###Code
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid')) # mathematically equivalent to softmax with two classes
model.summary() # so many parameters!
# embedding layer dimensions and parameters:
n_dim, n_unique_words, n_dim*n_unique_words
# ...flatten:
max_review_length, n_dim, n_dim*max_review_length
# ...dense:
n_dense, n_dim*max_review_length*n_dense + n_dense # weights + biases
# ...and output:
n_dense + 1
###Output
_____no_output_____
###Markdown
Configure model
###Code
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+
"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###Output
_____no_output_____
###Markdown
Train!
###Code
model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[modelcheckpoint])
###Output
_____no_output_____
###Markdown
Evaluate
###Code
model.load_weights(output_dir+"/weights.02.hdf5") # NOT zero-indexed
y_hat = model.predict_proba(x_valid)
len(y_hat)
y_hat[0]
y_valid[0]
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
pct_auc = roc_auc_score(y_valid, y_hat)*100.0
"{:0.2f}".format(pct_auc)
float_y_hat = []
for y in y_hat:
float_y_hat.append(y[0])
ydf = pd.DataFrame(list(zip(float_y_hat, y_valid)), columns=['y_hat', 'y'])
ydf.head(10)
' '.join(index_word[id] for id in all_x_valid[0])
' '.join(index_word[id] for id in all_x_valid[6])
ydf[(ydf.y == 0) & (ydf.y_hat > 0.9)].head(10)
' '.join(index_word[id] for id in all_x_valid[386])
ydf[(ydf.y == 1) & (ydf.y_hat < 0.1)].head(10)
' '.join(index_word[id] for id in all_x_valid[224])
###Output
_____no_output_____ |
ml/cc/exercises/intro_to_sparse_data_and_embeddings.ipynb | ###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Intro to Sparse Data and Embeddings**Learning Objectives:*** Convert movie-review string data to a sparse feature vector* Implement a sentiment-analysis linear model using a sparse feature vector* Implement a sentiment-analysis DNN model using an embedding that projects data into two dimensions* Visualize the embedding to see what the model has learned about the relationships between wordsIn this exercise, we'll explore sparse data and work with embeddings using text data from movie reviews (from the [ACL 2011 IMDB dataset](http://ai.stanford.edu/~amaas/data/sentiment/)). This data has already been processed into `tf.Example` format. SetupLet's import our dependencies and download the training and test data. [`tf.keras`](https://www.tensorflow.org/api_docs/python/tf/keras) includes a file download and caching tool that we can use to retrieve the data sets.
###Code
from __future__ import print_function
import collections
import io
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from IPython import display
from sklearn import metrics
tf.logging.set_verbosity(tf.logging.ERROR)
train_url = 'https://download.mlcc.google.com/mledu-datasets/sparse-data-embedding/train.tfrecord'
train_path = tf.keras.utils.get_file(train_url.split('/')[-1], train_url)
test_url = 'https://download.mlcc.google.com/mledu-datasets/sparse-data-embedding/test.tfrecord'
test_path = tf.keras.utils.get_file(test_url.split('/')[-1], test_url)
###Output
_____no_output_____
###Markdown
Building a Sentiment Analysis Model Let's train a sentiment-analysis model on this data that predicts if a review is generally *favorable* (label of 1) or *unfavorable* (label of 0).To do so, we'll turn our string-value `terms` into feature vectors by using a *vocabulary*, a list of each term we expect to see in our data. For the purposes of this exercise, we've created a small vocabulary that focuses on a limited set of terms. Most of these terms were found to be strongly indicative of *favorable* or *unfavorable*, but some were just added because they're interesting.Each term in the vocabulary is mapped to a coordinate in our feature vector. To convert the string-value `terms` for an example into this vector format, we encode such that each coordinate gets a value of 0 if the vocabulary term does not appear in the example string, and a value of 1 if it does. Terms in an example that don't appear in the vocabulary are thrown away. **NOTE:** *We could of course use a larger vocabulary, and there are special tools for creating these. In addition, instead of just dropping terms that are not in the vocabulary, we can introduce a small number of OOV (out-of-vocabulary) buckets to which you can hash the terms not in the vocabulary. We can also use a __feature hashing__ approach that hashes each term, instead of creating an explicit vocabulary. This works well in practice, but loses interpretability, which is useful for this exercise. See see the tf.feature_column module for tools handling this.* Building the Input Pipeline First, let's configure the input pipeline to import our data into a TensorFlow model. We can use the following function to parse the training and test data (which is in [TFRecord](https://www.tensorflow.org/guide/datasetsconsuming_tfrecord_data) format) and return a dict of the features and the corresponding labels.
###Code
def _parse_function(record):
"""Extracts features and labels.
Args:
record: File path to a TFRecord file
Returns:
A `tuple` `(labels, features)`:
features: A dict of tensors representing the features
labels: A tensor with the corresponding labels.
"""
features = {
"terms": tf.VarLenFeature(dtype=tf.string), # terms are strings of varying lengths
"labels": tf.FixedLenFeature(shape=[1], dtype=tf.float32) # labels are 0 or 1
}
parsed_features = tf.parse_single_example(record, features)
terms = parsed_features['terms'].values
labels = parsed_features['labels']
return {'terms':terms}, labels
###Output
_____no_output_____
###Markdown
To confirm our function is working as expected, let's construct a `TFRecordDataset` for the training data, and map the data to features and labels using the function above.
###Code
# Create the Dataset object.
ds = tf.data.TFRecordDataset(train_path)
# Map features and labels with the parse function.
ds = ds.map(_parse_function)
ds
###Output
_____no_output_____
###Markdown
Run the following cell to retrieve the first example from the training data set.
###Code
n = ds.make_one_shot_iterator().get_next()
sess = tf.Session()
sess.run(n)
###Output
_____no_output_____
###Markdown
Now, let's build a formal input function that we can pass to the `train()` method of a TensorFlow Estimator object.
###Code
# Create an input_fn that parses the tf.Examples from the given files,
# and split them into features and targets.
def _input_fn(input_filenames, num_epochs=None, shuffle=True):
# Same code as above; create a dataset and map features and labels.
ds = tf.data.TFRecordDataset(input_filenames)
ds = ds.map(_parse_function)
if shuffle:
ds = ds.shuffle(10000)
# Our feature data is variable-length, so we pad and batch
# each field of the dataset structure to whatever size is necessary.
ds = ds.padded_batch(25, ds.output_shapes)
ds = ds.repeat(num_epochs)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
###Output
_____no_output_____
###Markdown
Task 1: Use a Linear Model with Sparse Inputs and an Explicit VocabularyFor our first model, we'll build a [`LinearClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier) model using 50 informative terms; always start simple!The following code constructs the feature column for our terms. The [`categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list) function creates a feature column with the string-to-feature-vector mapping.
###Code
# 50 informative terms that compose our model vocabulary
informative_terms = ("bad", "great", "best", "worst", "fun", "beautiful",
"excellent", "poor", "boring", "awful", "terrible",
"definitely", "perfect", "liked", "worse", "waste",
"entertaining", "loved", "unfortunately", "amazing",
"enjoyed", "favorite", "horrible", "brilliant", "highly",
"simple", "annoying", "today", "hilarious", "enjoyable",
"dull", "fantastic", "poorly", "fails", "disappointing",
"disappointment", "not", "him", "her", "good", "time",
"?", ".", "!", "movie", "film", "action", "comedy",
"drama", "family")
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms", vocabulary_list=informative_terms)
###Output
_____no_output_____
###Markdown
Next, we'll construct the `LinearClassifier`, train it on the training set, and evaluate it on the evaluation set. After you read through the code, run it and see how you do.
###Code
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
feature_columns = [ terms_feature_column ]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
optimizer=my_optimizer,
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
Task 2: Use a Deep Neural Network (DNN) ModelThe above model is a linear model. It works quite well. But can we do better with a DNN model?Let's swap in a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) for the `LinearClassifier`. Run the following cell, and see how you do.
###Code
##################### Here's what we changed ##################################
classifier = tf.estimator.DNNClassifier( #
feature_columns=[tf.feature_column.indicator_column(terms_feature_column)], #
hidden_units=[20,20], #
optimizer=my_optimizer, #
) #
###############################################################################
try:
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
except ValueError as err:
print(err)
###Output
_____no_output_____
###Markdown
Task 3: Use an Embedding with a DNN ModelIn this task, we'll implement our DNN model using an embedding column. An embedding column takes sparse data as input and returns a lower-dimensional dense vector as output. **NOTE:** *An embedding_column is usually the computationally most efficient option to use for training a model on sparse data. In an [optional section](scrollTo=XDMlGgRfKSVz) at the end of this exercise, we'll discuss in more depth the implementational differences between using an `embedding_column` and an `indicator_column`, and the tradeoffs of selecting one over the other.* In the following code, do the following:* Define the feature columns for the model using an `embedding_column` that projects the data into 2 dimensions (see the [TF docs](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column) for more details on the function signature for `embedding_column`).* Define a `DNNClassifier` with the following specifications: * Two hidden layers of 20 units each * Adagrad optimization with a learning rate of 0.1 * A `gradient_clip_norm` of 5.0 **NOTE:** *In practice, we might project to dimensions higher than 2, like 50 or 100. But for now, 2 dimensions is easy to visualize.* Hint
###Code
# Here's a example code snippet you might use to define the feature columns:
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
###Output
_____no_output_____
###Markdown
Complete the Code Below
###Code
########################## YOUR CODE HERE ######################################
terms_embedding_column = # Define the embedding column
feature_columns = # Define the feature columns
classifier = # Define the DNNClassifier
################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
SolutionClick below for a solution.
###Code
########################## SOLUTION CODE ########################################
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[20,20],
optimizer=my_optimizer
)
#################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
Task 4: Convince yourself there's actually an embedding in thereThe above model used an `embedding_column`, and it seemed to work, but this doesn't tell us much about what's going on internally. How can we check that the model is actually using an embedding inside?To start, let's look at the tensors in the model:
###Code
classifier.get_variable_names()
###Output
_____no_output_____
###Markdown
Okay, we can see that there is an embedding layer in there: `'dnn/input_from_feature_columns/input_layer/terms_embedding/...'`. (What's interesting here, by the way, is that this layer is trainable along with the rest of the model just as any hidden layer is.)Is the embedding layer the correct shape? Run the following code to find out. **NOTE:** *Remember, in our case, the embedding is a matrix that allows us to project a 50-dimensional vector down to 2 dimensions.*
###Code
classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights').shape
###Output
_____no_output_____
###Markdown
Spend some time manually checking the various layers and shapes to make sure everything is connected the way you would expect it would be. Task 5: Examine the EmbeddingLet's now take a look at the actual embedding space, and see where the terms end up in it. Do the following:1. Run the following code to see the embedding we trained in **Task 3**. Do things end up where you'd expect?2. Re-train the model by rerunning the code in **Task 3**, and then run the embedding visualization below again. What stays the same? What changes?3. Finally, re-train the model again using only 10 steps (which will yield a terrible model). Run the embedding visualization below again. What do you see now, and why?
###Code
import numpy as np
import matplotlib.pyplot as plt
embedding_matrix = classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights')
for term_index in range(len(informative_terms)):
# Create a one-hot encoding for our term. It has 0s everywhere, except for
# a single 1 in the coordinate that corresponds to that term.
term_vector = np.zeros(len(informative_terms))
term_vector[term_index] = 1
# We'll now project that one-hot vector into the embedding space.
embedding_xy = np.matmul(term_vector, embedding_matrix)
plt.text(embedding_xy[0],
embedding_xy[1],
informative_terms[term_index])
# Do a little setup to make sure the plot displays nicely.
plt.rcParams["figure.figsize"] = (15, 15)
plt.xlim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.ylim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.show()
###Output
_____no_output_____
###Markdown
Task 6: Try to improve the model's performanceSee if you can refine the model to improve performance. A couple things you may want to try:* **Changing hyperparameters**, or **using a different optimizer** like Adam (you may only gain one or two accuracy percentage points following these strategies).* **Adding additional terms to `informative_terms`.** There's a full vocabulary file with all 30,716 terms for this data set that you can use at: https://download.mlcc.google.com/mledu-datasets/sparse-data-embedding/terms.txt You can pick out additional terms from this vocabulary file, or use the whole thing via the `categorical_column_with_vocabulary_file` feature column.
###Code
# Download the vocabulary file.
terms_url = 'https://download.mlcc.google.com/mledu-datasets/sparse-data-embedding/terms.txt'
terms_path = tf.keras.utils.get_file(terms_url.split('/')[-1], terms_url)
# Create a feature column from "terms", using a full vocabulary file.
informative_terms = None
with io.open(terms_path, 'r', encoding='utf8') as f:
# Convert it to a set first to remove duplicates.
informative_terms = list(set(f.read().split()))
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms",
vocabulary_list=informative_terms)
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10,10],
optimizer=my_optimizer
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Intro to Sparse Data and Embeddings**Learning Objectives:*** Convert movie-review string data to a sparse feature vector* Implement a sentiment-analysis linear model using a sparse feature vector* Implement a sentiment-analysis DNN model using an embedding that projects data into two dimensions* Visualize the embedding to see what the model has learned about the relationships between wordsIn this exercise, we'll explore sparse data and work with embeddings using text data from movie reviews (from the [ACL 2011 IMDB dataset](http://ai.stanford.edu/~amaas/data/sentiment/)). This data has already been processed into `tf.Example` format. SetupLet's import our dependencies and download the training and test data. [`tf.keras`](https://www.tensorflow.org/api_docs/python/tf/keras) includes a file download and caching tool that we can use to retrieve the data sets.
###Code
import collections
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from IPython import display
from sklearn import metrics
tf.logging.set_verbosity(tf.logging.ERROR)
train_url = 'https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/train.tfrecord'
train_path = tf.keras.utils.get_file(train_url.split('/')[-1], train_url)
test_url = 'https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/test.tfrecord'
test_path = tf.keras.utils.get_file(test_url.split('/')[-1], test_url)
###Output
_____no_output_____
###Markdown
Building a Sentiment Analysis Model Let's train a sentiment-analysis model on this data that predicts if a review is generally *favorable* (label of 1) or *unfavorable* (label of 0).To do so, we'll turn our string-value `terms` into feature vectors by using a *vocabulary*, a list of each term we expect to see in our data. For the purposes of this exercise, we've created a small vocabulary that focuses on a limited set of terms. Most of these terms were found to be strongly indicative of *favorable* or *unfavorable*, but some were just added because they're interesting.Each term in the vocabulary is mapped to a coordinate in our feature vector. To convert the string-value `terms` for an example into this vector format, we encode such that each coordinate gets a value of 0 if the vocabulary term does not appear in the example string, and a value of 1 if it does. Terms in an example that don't appear in the vocabulary are thrown away. **NOTE:** *We could of course use a larger vocabulary, and there are special tools for creating these. In addition, instead of just dropping terms that are not in the vocabulary, we can introduce a small number of OOV (out-of-vocabulary) buckets to which you can hash the terms not in the vocabulary. We can also use a __feature hashing__ approach that hashes each term, instead of creating an explicit vocabulary. This works well in practice, but loses interpretability, which is useful for this exercise. See see the tf.feature_column module for tools handling this.* Building the Input Pipeline First, let's configure the input pipeline to import our data into a TensorFlow model. We can use the following function to parse the training and test data (which is in [TFRecord](https://www.tensorflow.org/programmers_guide/datasets) format) and return a dict of the features and the corresponding labels.
###Code
def _parse_function(record):
"""Extracts features and labels.
Args:
record: File path to a TFRecord file
Returns:
A `tuple` `(labels, features)`:
features: A dict of tensors representing the features
labels: A tensor with the corresponding labels.
"""
features = {
"terms": tf.VarLenFeature(dtype=tf.string), # terms are strings of varying lengths
"labels": tf.FixedLenFeature(shape=[1], dtype=tf.float32) # labels are 0 or 1
}
parsed_features = tf.parse_single_example(record, features)
terms = parsed_features['terms'].values
labels = parsed_features['labels']
return {'terms':terms}, labels
###Output
_____no_output_____
###Markdown
To confirm our function is working as expected, let's construct a `TFRecordDataset` for the training data, and map the data to features and labels using the function above.
###Code
# Create the Dataset object
ds = tf.data.TFRecordDataset(train_path)
# Map features and labels with the parse function
ds = ds.map(_parse_function)
ds
###Output
_____no_output_____
###Markdown
Run the following cell to retrieve the first example from the training data set.
###Code
n = ds.make_one_shot_iterator().get_next()
sess = tf.Session()
sess.run(n)
###Output
_____no_output_____
###Markdown
Now, let's build a formal input function that we can pass to the `train()` method of a TensorFlow Estimator object.
###Code
# Create an input_fn that parses the tf.Examples from the given files,
# and split them into features and targets.
def _input_fn(input_filenames, num_epochs=None, shuffle=True):
# Same code as above; create a dataset and map features and labels
ds = tf.data.TFRecordDataset(input_filenames)
ds = ds.map(_parse_function)
if shuffle:
ds = ds.shuffle(10000)
# Our feature data is variable-length, so we pad and batch
# each field of the dataset structure to whatever size is necessary
ds = ds.padded_batch(25, ds.output_shapes)
ds = ds.repeat(num_epochs)
# Return the next batch of data
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
###Output
_____no_output_____
###Markdown
Task 1: Use a Linear Model with Sparse Inputs and an Explicit VocabularyFor our first model, we'll build a [`LinearClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier) model using 54 informative terms; always start simple!The following code constructs the feature column for our terms. The [`categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list) function creates a feature column with the string-to-feature-vector mapping.
###Code
# 54 informative terms that compose our model vocabulary
informative_terms = ("bad", "great", "best", "worst", "fun", "beautiful",
"excellent", "poor", "boring", "awful", "terrible",
"definitely", "perfect", "liked", "worse", "waste",
"entertaining", "loved", "unfortunately", "amazing",
"enjoyed", "favorite", "horrible", "brilliant", "highly",
"simple", "annoying", "today", "hilarious", "enjoyable",
"dull", "fantastic", "poorly", "fails", "disappointing",
"disappointment", "not", "him", "her", "good", "time",
"?", ".", "!", "movie", "film", "action", "comedy",
"drama", "family", "man", "woman", "boy", "girl")
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms", vocabulary_list=informative_terms)
###Output
_____no_output_____
###Markdown
Next, we'll construct the `LinearClassifier`, train it on the training set, and evaluate it on the evaluation set. After you read through the code, run it and see how you do.
###Code
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
feature_columns = [ terms_feature_column ]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
optimizer=my_optimizer,
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print "Training set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print "Test set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
###Output
_____no_output_____
###Markdown
Task 2: Use a Deep Neural Network (DNN) ModelThe above model is a linear model. It works quite well. But can we do better with a DNN model?Let's swap in a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) for the `LinearClassifier`. Run the following cell, and see how you do.
###Code
##################### Here's what we changed ##################################
classifier = tf.estimator.DNNClassifier( #
feature_columns=[tf.feature_column.indicator_column(terms_feature_column)], #
hidden_units=[20,20], #
optimizer=my_optimizer, #
) #
###############################################################################
try:
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1)
print "Training set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1)
print "Test set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
except ValueError as err:
print err
###Output
_____no_output_____
###Markdown
Task 3: Use an Embedding with a DNN ModelIn this task, we'll implement our DNN model using an embedding column. An embedding column takes sparse data as input and returns a lower-dimensional dense vector as output. **NOTE:** *An embedding_column is usually the computationally most efficient option to use for training a model on sparse data. In an [optional section](scrollTo=XDMlGgRfKSVz) at the end of this exercise, we'll discuss in more depth the implementational differences between using an `embedding_column` and an `indicator_column`, and the tradeoffs of selecting one over the other.* In the following code, do the following:* Define the feature columns for the model using an `embedding_column` that projects the data into 2 dimensions (see the [TF docs](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column) for more details on the function signature for `embedding_column`).* Define a `DNNClassifier` with the following specifications: * Two hidden layers of 20 units each * Adagrad optimization with a learning rate of 0.1 * A `gradient_clip_norm` of 5.0 **NOTE:** *In practice, we might project to dimensions higher than 2, like 50 or 100. But for now, 2 dimensions is easy to visualize.* Hint
###Code
# Here's a example code snippet you might use to define the feature columns:
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
###Output
_____no_output_____
###Markdown
Complete the Code Below
###Code
########################## YOUR CODE HERE ######################################
terms_embedding_column = # Define the embedding column
feature_columns = # Define the feature columns
classifier = # Define the DNNClassifier
################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print "Training set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print "Test set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
###Output
_____no_output_____
###Markdown
SolutionClick below for a solution.
###Code
########################## SOLUTION CODE ########################################
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10,10],
optimizer=my_optimizer
)
#################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print "Training set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print "Test set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
###Output
_____no_output_____
###Markdown
Task 4: Convince yourself there's actually an embedding in thereThe above model used an `embedding_column`, and it seemed to work, but this doesn't tell us much about what's going on internally. How can we check that the model is actually using an embedding inside?To start, let's look at the tensors in the model:
###Code
classifier.get_variable_names()
###Output
_____no_output_____
###Markdown
Okay, we can see that there is an embedding layer in there: `'dnn/input_from_feature_columns/input_layer/terms_embedding/...'`. (What's interesting here, by the way, is that this layer is trainable along with the rest of the model just as any hidden layer is.)Is the embedding layer the correct shape? Run the following code to find out. **NOTE:** *Remember, in our case, the embedding is a matrix that allows us to project a 54-dimensional vector down to 2 dimensions.*
###Code
classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights').shape
###Output
_____no_output_____
###Markdown
Spend some time manually checking the various layers and shapes to make sure everything is connected the way you would expect it would be. Task 5: Examine the EmbeddingLet's now take a look at the actual embedding space, and see where the terms end up in it. Do the following:1. Run the following code to see the embedding we trained in **Task 3**. Do things end up where you'd expect?2. Re-train the model by rerunning the code in **Task 3**, and then run the embedding visualization below again. What stays the same? What changes?3. Finally, re-train the model again using only 10 steps (which will yield a terrible model). Run the embedding visualization below again. What do you see now, and why?
###Code
import numpy as np
import matplotlib.pyplot as plt
embedding_matrix = classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights')
for term_index in range(len(informative_terms)):
# Create a one-hot encoding for our term. It has 0's everywhere, except for
# a single 1 in the coordinate that corresponds to that term.
term_vector = np.zeros(len(informative_terms))
term_vector[term_index] = 1
# We'll now project that one-hot vector into the embedding space.
embedding_xy = np.matmul(term_vector, embedding_matrix)
plt.text(embedding_xy[0],
embedding_xy[1],
informative_terms[term_index])
# Do a little set-up to make sure the plot displays nicely.
plt.rcParams["figure.figsize"] = (12, 12)
plt.xlim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.ylim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.show()
###Output
_____no_output_____
###Markdown
Task 6: Try to improve the model's performanceSee if you can refine the model to improve performance. A couple things you may want to try:* **Changing hyperparameters**, or **using a different optimizer** like Adam (you may only gain one or two accuracy percentage points following these strategies).* **Adding additional terms to `informative_terms`.** There's a full vocabulary file with all 30,716 terms for this data set that you can use at: https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/terms.txt You can pick out additional terms from this vocabulary file, or use the whole thing via the `categorical_column_with_vocabulary_file` feature column.
###Code
!wget https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/terms.txt -O /tmp/terms.txt
# Create a feature column from "terms", using a full vocabulary file.
informative_terms = None
with open("/tmp/terms.txt", 'r') as f:
# Convert it to set first to remove duplicates.
informative_terms = list(set(f.read().split()))
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms",
vocabulary_list=informative_terms)
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10,10],
optimizer=my_optimizer
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print "Training set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print "Test set metrics:"
for m in evaluation_metrics:
print m, evaluation_metrics[m]
print "---"
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Intro to Sparse Data and Embeddings**Learning Objectives:*** Convert movie-review string data to a sparse feature vector* Implement a sentiment-analysis linear model using a sparse feature vector* Implement a sentiment-analysis DNN model using an embedding that projects data into two dimensions* Visualize the embedding to see what the model has learned about the relationships between wordsIn this exercise, we'll explore sparse data and work with embeddings using text data from movie reviews (from the [ACL 2011 IMDB dataset](http://ai.stanford.edu/~amaas/data/sentiment/)). This data has already been processed into `tf.Example` format. SetupLet's import our dependencies and download the training and test data. [`tf.keras`](https://www.tensorflow.org/api_docs/python/tf/keras) includes a file download and caching tool that we can use to retrieve the data sets.
###Code
from __future__ import print_function
import collections
import io
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from IPython import display
from sklearn import metrics
tf.logging.set_verbosity(tf.logging.ERROR)
train_url = 'https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/train.tfrecord'
train_path = tf.keras.utils.get_file(train_url.split('/')[-1], train_url)
test_url = 'https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/test.tfrecord'
test_path = tf.keras.utils.get_file(test_url.split('/')[-1], test_url)
###Output
_____no_output_____
###Markdown
Building a Sentiment Analysis Model Let's train a sentiment-analysis model on this data that predicts if a review is generally *favorable* (label of 1) or *unfavorable* (label of 0).To do so, we'll turn our string-value `terms` into feature vectors by using a *vocabulary*, a list of each term we expect to see in our data. For the purposes of this exercise, we've created a small vocabulary that focuses on a limited set of terms. Most of these terms were found to be strongly indicative of *favorable* or *unfavorable*, but some were just added because they're interesting.Each term in the vocabulary is mapped to a coordinate in our feature vector. To convert the string-value `terms` for an example into this vector format, we encode such that each coordinate gets a value of 0 if the vocabulary term does not appear in the example string, and a value of 1 if it does. Terms in an example that don't appear in the vocabulary are thrown away. **NOTE:** *We could of course use a larger vocabulary, and there are special tools for creating these. In addition, instead of just dropping terms that are not in the vocabulary, we can introduce a small number of OOV (out-of-vocabulary) buckets to which you can hash the terms not in the vocabulary. We can also use a __feature hashing__ approach that hashes each term, instead of creating an explicit vocabulary. This works well in practice, but loses interpretability, which is useful for this exercise. See see the tf.feature_column module for tools handling this.* Building the Input Pipeline First, let's configure the input pipeline to import our data into a TensorFlow model. We can use the following function to parse the training and test data (which is in [TFRecord](https://www.tensorflow.org/programmers_guide/datasets) format) and return a dict of the features and the corresponding labels.
###Code
def _parse_function(record):
"""Extracts features and labels.
Args:
record: File path to a TFRecord file
Returns:
A `tuple` `(labels, features)`:
features: A dict of tensors representing the features
labels: A tensor with the corresponding labels.
"""
features = {
"terms": tf.VarLenFeature(dtype=tf.string), # terms are strings of varying lengths
"labels": tf.FixedLenFeature(shape=[1], dtype=tf.float32) # labels are 0 or 1
}
parsed_features = tf.parse_single_example(record, features)
terms = parsed_features['terms'].values
labels = parsed_features['labels']
return {'terms':terms}, labels
###Output
_____no_output_____
###Markdown
To confirm our function is working as expected, let's construct a `TFRecordDataset` for the training data, and map the data to features and labels using the function above.
###Code
# Create the Dataset object.
ds = tf.data.TFRecordDataset(train_path)
# Map features and labels with the parse function.
ds = ds.map(_parse_function)
ds
###Output
_____no_output_____
###Markdown
Run the following cell to retrieve the first example from the training data set.
###Code
n = ds.make_one_shot_iterator().get_next()
sess = tf.Session()
sess.run(n)
###Output
_____no_output_____
###Markdown
Now, let's build a formal input function that we can pass to the `train()` method of a TensorFlow Estimator object.
###Code
# Create an input_fn that parses the tf.Examples from the given files,
# and split them into features and targets.
def _input_fn(input_filenames, num_epochs=None, shuffle=True):
# Same code as above; create a dataset and map features and labels.
ds = tf.data.TFRecordDataset(input_filenames)
ds = ds.map(_parse_function)
if shuffle:
ds = ds.shuffle(10000)
# Our feature data is variable-length, so we pad and batch
# each field of the dataset structure to whatever size is necessary.
ds = ds.padded_batch(25, ds.output_shapes)
ds = ds.repeat(num_epochs)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
###Output
_____no_output_____
###Markdown
Task 1: Use a Linear Model with Sparse Inputs and an Explicit VocabularyFor our first model, we'll build a [`LinearClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier) model using 50 informative terms; always start simple!The following code constructs the feature column for our terms. The [`categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list) function creates a feature column with the string-to-feature-vector mapping.
###Code
# 50 informative terms that compose our model vocabulary
informative_terms = ("bad", "great", "best", "worst", "fun", "beautiful",
"excellent", "poor", "boring", "awful", "terrible",
"definitely", "perfect", "liked", "worse", "waste",
"entertaining", "loved", "unfortunately", "amazing",
"enjoyed", "favorite", "horrible", "brilliant", "highly",
"simple", "annoying", "today", "hilarious", "enjoyable",
"dull", "fantastic", "poorly", "fails", "disappointing",
"disappointment", "not", "him", "her", "good", "time",
"?", ".", "!", "movie", "film", "action", "comedy",
"drama", "family")
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms", vocabulary_list=informative_terms)
###Output
_____no_output_____
###Markdown
Next, we'll construct the `LinearClassifier`, train it on the training set, and evaluate it on the evaluation set. After you read through the code, run it and see how you do.
###Code
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
feature_columns = [ terms_feature_column ]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
optimizer=my_optimizer,
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
Task 2: Use a Deep Neural Network (DNN) ModelThe above model is a linear model. It works quite well. But can we do better with a DNN model?Let's swap in a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) for the `LinearClassifier`. Run the following cell, and see how you do.
###Code
##################### Here's what we changed ##################################
classifier = tf.estimator.DNNClassifier( #
feature_columns=[tf.feature_column.indicator_column(terms_feature_column)], #
hidden_units=[20,20], #
optimizer=my_optimizer, #
) #
###############################################################################
try:
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
except ValueError as err:
print(err)
###Output
_____no_output_____
###Markdown
Task 3: Use an Embedding with a DNN ModelIn this task, we'll implement our DNN model using an embedding column. An embedding column takes sparse data as input and returns a lower-dimensional dense vector as output. **NOTE:** *An embedding_column is usually the computationally most efficient option to use for training a model on sparse data. In an [optional section](scrollTo=XDMlGgRfKSVz) at the end of this exercise, we'll discuss in more depth the implementational differences between using an `embedding_column` and an `indicator_column`, and the tradeoffs of selecting one over the other.* In the following code, do the following:* Define the feature columns for the model using an `embedding_column` that projects the data into 2 dimensions (see the [TF docs](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column) for more details on the function signature for `embedding_column`).* Define a `DNNClassifier` with the following specifications: * Two hidden layers of 20 units each * Adagrad optimization with a learning rate of 0.1 * A `gradient_clip_norm` of 5.0 **NOTE:** *In practice, we might project to dimensions higher than 2, like 50 or 100. But for now, 2 dimensions is easy to visualize.* Hint
###Code
# Here's a example code snippet you might use to define the feature columns:
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
###Output
_____no_output_____
###Markdown
Complete the Code Below
###Code
########################## YOUR CODE HERE ######################################
terms_embedding_column = # Define the embedding column
feature_columns = # Define the feature columns
classifier = # Define the DNNClassifier
################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
SolutionClick below for a solution.
###Code
########################## SOLUTION CODE ########################################
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[20,20],
optimizer=my_optimizer
)
#################################################################################
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____
###Markdown
Task 4: Convince yourself there's actually an embedding in thereThe above model used an `embedding_column`, and it seemed to work, but this doesn't tell us much about what's going on internally. How can we check that the model is actually using an embedding inside?To start, let's look at the tensors in the model:
###Code
classifier.get_variable_names()
###Output
_____no_output_____
###Markdown
Okay, we can see that there is an embedding layer in there: `'dnn/input_from_feature_columns/input_layer/terms_embedding/...'`. (What's interesting here, by the way, is that this layer is trainable along with the rest of the model just as any hidden layer is.)Is the embedding layer the correct shape? Run the following code to find out. **NOTE:** *Remember, in our case, the embedding is a matrix that allows us to project a 50-dimensional vector down to 2 dimensions.*
###Code
classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights').shape
###Output
_____no_output_____
###Markdown
Spend some time manually checking the various layers and shapes to make sure everything is connected the way you would expect it would be. Task 5: Examine the EmbeddingLet's now take a look at the actual embedding space, and see where the terms end up in it. Do the following:1. Run the following code to see the embedding we trained in **Task 3**. Do things end up where you'd expect?2. Re-train the model by rerunning the code in **Task 3**, and then run the embedding visualization below again. What stays the same? What changes?3. Finally, re-train the model again using only 10 steps (which will yield a terrible model). Run the embedding visualization below again. What do you see now, and why?
###Code
import numpy as np
import matplotlib.pyplot as plt
embedding_matrix = classifier.get_variable_value('dnn/input_from_feature_columns/input_layer/terms_embedding/embedding_weights')
for term_index in range(len(informative_terms)):
# Create a one-hot encoding for our term. It has 0s everywhere, except for
# a single 1 in the coordinate that corresponds to that term.
term_vector = np.zeros(len(informative_terms))
term_vector[term_index] = 1
# We'll now project that one-hot vector into the embedding space.
embedding_xy = np.matmul(term_vector, embedding_matrix)
plt.text(embedding_xy[0],
embedding_xy[1],
informative_terms[term_index])
# Do a little setup to make sure the plot displays nicely.
plt.rcParams["figure.figsize"] = (15, 15)
plt.xlim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.ylim(1.2 * embedding_matrix.min(), 1.2 * embedding_matrix.max())
plt.show()
###Output
_____no_output_____
###Markdown
Task 6: Try to improve the model's performanceSee if you can refine the model to improve performance. A couple things you may want to try:* **Changing hyperparameters**, or **using a different optimizer** like Adam (you may only gain one or two accuracy percentage points following these strategies).* **Adding additional terms to `informative_terms`.** There's a full vocabulary file with all 30,716 terms for this data set that you can use at: https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/terms.txt You can pick out additional terms from this vocabulary file, or use the whole thing via the `categorical_column_with_vocabulary_file` feature column.
###Code
# Download the vocabulary file.
terms_url = 'https://storage.googleapis.com/mledu-datasets/sparse-data-embedding/terms.txt'
terms_path = tf.keras.utils.get_file(terms_url.split('/')[-1], terms_url)
# Create a feature column from "terms", using a full vocabulary file.
informative_terms = None
with io.open(terms_path, 'r', encoding='utf8') as f:
# Convert it to a set first to remove duplicates.
informative_terms = list(set(f.read().split()))
terms_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(key="terms",
vocabulary_list=informative_terms)
terms_embedding_column = tf.feature_column.embedding_column(terms_feature_column, dimension=2)
feature_columns = [ terms_embedding_column ]
my_optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10,10],
optimizer=my_optimizer
)
classifier.train(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([train_path]),
steps=1000)
print("Training set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
evaluation_metrics = classifier.evaluate(
input_fn=lambda: _input_fn([test_path]),
steps=1000)
print("Test set metrics:")
for m in evaluation_metrics:
print(m, evaluation_metrics[m])
print("---")
###Output
_____no_output_____ |
cleared-demos/pdes/Sparse Matrix Factorizations and Fill-In.ipynb | ###Markdown
Sparse Matrix Factorizations and Fill-InCopyright (C) 2020 Andreas KloecknerMIT LicensePermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included inall copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INTHE SOFTWARE.
###Code
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as pt
import random
###Output
_____no_output_____
###Markdown
Here's a helper routine to make a random **symmetric** sparse matrix:
###Code
def make_random_sparse_matrix(n, row_fill):
nentries = (n*row_fill) // 2 # because of symmetry
data = np.random.randn(nentries)
rows = np.random.randint(0, n-1, nentries)
cols = np.random.randint(0, n-1, nentries)
import scipy.sparse as sps
coo = sps.coo_matrix((data, (rows, cols)), shape=(n,n))
# NOTE: Cuthill-McKee applies only to symmetric matrices!
return (100*np.eye(n) + np.array(coo.todense() + coo.todense().T))
###Output
_____no_output_____
###Markdown
Next, we will take a look at that matrix from a "birds eye view". Every entry with absolute value greater that $10^{-10}$ will show up as a 'dot':
###Code
prec = 1e-10
np.random.seed(15)
random.seed(15)
A = make_random_sparse_matrix(200, 3)
print("%d non-zeros" % len(np.where(np.abs(A)>prec)[0]))
pt.figure()
pt.spy(A, marker=",", precision=prec)
###Output
_____no_output_____
###Markdown
Next, let's apply the same visualization to the inverse:
###Code
Ainv = la.inv(A)
print("%d non-zeros" % len(np.where(np.abs(Ainv) > prec)[0]))
pt.spy(Ainv, marker=",", precision=prec)
###Output
_____no_output_____
###Markdown
And the Cholesky factorization:
###Code
L = la.cholesky(A)
print("%d non-zeros" % len(np.where(np.abs(L) > prec)[0]))
pt.spy(L, marker=",", precision=prec)
###Output
_____no_output_____
###Markdown
Cholesky is often less bad, but in principle affected the same way. Reducing the fill-in Define the *degree* of a row as the number of non-zeros in it.
###Code
def degree(mat, row):
return len(np.where(mat[row])[0])
print(degree(A, 3))
print(degree(A, 4))
print(degree(A, 5))
###Output
_____no_output_____
###Markdown
Then find an ordering so that all the low degrees come first.The [Cuthill-McKee algorithm](https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm) is a greedy algorithm to find such an ordering:
###Code
def argmin2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmin, current_min = next(it)
except StopIteration:
raise ValueError("argmin of empty iterable")
for arg, item in it:
if item < current_min:
current_argmin = arg
current_min = item
if return_value:
return current_argmin, current_min
else:
return current_argmin
def argmin(iterable):
return argmin2(enumerate(iterable))
def cuthill_mckee(mat):
"""Return a Cuthill-McKee ordering for the given matrix.
See (for example)
Y. Saad, Iterative Methods for Sparse Linear System,
2nd edition, p. 76.
"""
# this list is called "old_numbers" because it maps a
# "new number to its "old number"
old_numbers = []
visited_nodes = set()
levelset = []
all_nodes = set(range(len(mat)))
while len(old_numbers) < len(mat):
if not levelset:
unvisited = list(all_nodes - visited_nodes)
if not unvisited:
break
start_node = unvisited[
argmin(degree(mat, node) for node in unvisited)]
visited_nodes.add(start_node)
old_numbers.append(start_node)
levelset = [start_node]
next_levelset = set()
levelset.sort(key=lambda row: degree(mat, row))
#print(levelset)
for node in levelset:
row = mat[node]
neighbors, = np.where(row)
for neighbor in neighbors:
if neighbor in visited_nodes:
continue
visited_nodes.add(neighbor)
next_levelset.add(neighbor)
old_numbers.append(neighbor)
levelset = list(next_levelset)
return np.array(old_numbers, dtype=np.intp)
cmk = cuthill_mckee(A)
###Output
_____no_output_____
###Markdown
Someone (empirically) observed that the *reverse* of the Cuthill-McKee ordering often does better than forward Cuthill-McKee.So construct a permutation matrix corresponding to that:
###Code
P = np.eye(len(A))[cmk[::-1]]
###Output
_____no_output_____
###Markdown
And then reorder both rows and columns according to that--a similarity transform:
###Code
A_reordered = P @ A @ P.T
pt.spy(A_reordered, marker=",", precision=prec)
###Output
_____no_output_____
###Markdown
Next, let's try Cholesky again:
###Code
L = la.cholesky(A_reordered)
print("%d non-zeros" % len(np.where(np.abs(L) > prec)[0]))
pt.spy(L, marker=",", precision=prec)
###Output
_____no_output_____ |
3rdparty/fast_retraining/experiments/05_FraudDetection.ipynb | ###Markdown
Experiment 05: Credit card FraudThis experiment uses the data from the Kaggle dataset [Credit Card Fraud Detection](https://www.kaggle.com/dalpozz/creditcardfraud). The dataset is made up of a number of variables which are a result of PCA transformation.The details of the machine we used and the version of the libraries can be found in [experiment 01](01_airline.ipynb).
###Code
import json
import sys
import matplotlib.pyplot as plt
import pkg_resources
from libs.loaders import load_fraud
from libs.timer import Timer
from libs.utils import get_number_processors
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings('ignore')
print("System version: {}".format(sys.version))
print("XGBoost version: {}".format(pkg_resources.get_distribution('xgboost').version))
print("LightGBM version: {}".format(pkg_resources.get_distribution('lightgbm').version))
random_seed = 42
%%time
df = load_fraud()
print(df.shape)
df.head()
pipeline_steps = [('scale', StandardScaler())]
continuous_pipeline = Pipeline(steps=pipeline_steps)
featurisers = [('continuous', continuous_pipeline)]
number_processors = get_number_processors()
print(number_processors)
xgb_clf_pipeline = Pipeline(steps=[('features', FeatureUnion(featurisers)),
('clf', XGBClassifier(max_depth=3,
learning_rate=0.1,
scale_pos_weight=2,
n_estimators=100,
gamma=0.1,
min_child_weight=1,
reg_lambda=1,
subsample=1,
nthread=number_processors
))])
xgb_hist_clf_pipeline = Pipeline(steps=[('features', FeatureUnion(featurisers)),
('clf', XGBClassifier(max_depth=0,
learning_rate=0.1,
scale_pos_weight=2,
n_estimators=100,
gamma=0.1,
min_child_weight=1,
reg_lambda=1,
subsample=1,
max_leaves=2**3,
grow_policy='lossguide',
tree_method='hist',
nthread=number_processors
))])
lgbm_clf_pipeline = Pipeline(steps=[('features', FeatureUnion(featurisers)),
('clf', LGBMClassifier(num_leaves=2**3,
learning_rate=0.1,
scale_pos_weight=2,
n_estimators=100,
min_split_gain=0.1,
min_child_weight=1,
reg_lambda=1,
subsample=1,
nthread=number_processors
))])
metrics_dict = {
'Accuracy': accuracy_score,
'Precision': precision_score,
'Recall': recall_score,
'AUC': roc_auc_score,
'F1': f1_score,
}
def classification_metrics(metrics, y_true, y_pred):
return {metric_name:metric(y_true, y_pred) for metric_name, metric in metrics.items()}
X = df[[col for col in df.columns if col.startswith('V')]].values
y = df['Class'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=random_seed, test_size=0.3)
results_dict = dict()
###Output
_____no_output_____
###Markdown
XGBoost
###Code
with Timer() as train_t:
xgb_clf_pipeline.fit(X_train,y_train)
with Timer() as test_t:
y_pred = xgb_clf_pipeline.predict(X_test)
results_dict['xgb']={
'train_time': train_t.interval,
'test_time': test_t.interval,
'performance': classification_metrics(metrics_dict,
y_test,
y_pred)
}
with Timer() as t_train:
xgb_hist_clf_pipeline.fit(X_train,y_train)
with Timer() as t_test:
y_pred = xgb_hist_clf_pipeline.predict(X_test)
results_dict['xgb_hist']={
'train_time': t_train.interval,
'test_time': t_test.interval,
'performance': classification_metrics(metrics_dict,
y_test,
y_pred)
}
###Output
_____no_output_____
###Markdown
LightGBM
###Code
with Timer() as train_t:
lgbm_clf_pipeline.fit(X_train, y_train)
with Timer() as test_t:
y_pred = lgbm_clf_pipeline.predict(X_test)
results_dict['lgbm']={
'train_time': train_t.interval,
'test_time': test_t.interval,
'performance': classification_metrics(metrics_dict,
y_test,
y_pred)
}
# Results
print(json.dumps(results_dict, indent=4, sort_keys=True))
###Output
{
"lgbm": {
"performance": {
"AUC": 0.8749179318834633,
"Accuracy": 0.999403110845827,
"F1": 0.8131868131868133,
"Precision": 0.888,
"Recall": 0.75
},
"test_time": 0.05075380699963716,
"train_time": 0.6608378439996159
},
"xgb": {
"performance": {
"AUC": 0.8884197213803287,
"Accuracy": 0.9994265182636377,
"F1": 0.8243727598566308,
"Precision": 0.8778625954198473,
"Recall": 0.777027027027027
},
"test_time": 0.06871192899961898,
"train_time": 4.349258283999916
},
"xgb_hist": {
"performance": {
"AUC": 0.8715278294884368,
"Accuracy": 0.9993679997191109,
"F1": 0.8029197080291971,
"Precision": 0.873015873015873,
"Recall": 0.7432432432432432
},
"test_time": 0.08524090300034004,
"train_time": 2.0142575339996256
}
}
|
site/en/r2/guide/autograph.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: the example above shows how to perform simple conditionals when scalar values are involves. Typical ML code involves batches; in those cases you should consider using the faster and vectorized `tf.where` if possible. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
A note on batchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
!pip install tf-nightly-2.0-preview
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: the example above shows how to perform simple conditionals when scalar values are involves. Typical ML code involves batches; in those cases you should consider using the faster and vectorized `tf.where` if possible. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
A note on batchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
!pip install tf-nightly-2.0-preview
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: the example above shows how to perform simple conditionals when scalar values are involves. Typical ML code involves batches; in those cases you should consider using the faster and vectorized `tf.where` if possible. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
A note on batchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Debugging`tf.function` and AutoGraph work by generating code and tracing it into TensorFlow graphs. This mechanism does not yet support step-by-step debuggers like `pdb`. However, you can call `tf.config.run_functions_eagerly(True)` to temporarily enable eager execution inside the `tf.function' and use your favorite debugger:
###Code
@tf.function
def f(x):
if x > 0:
# Try setting a breakpoint here!
# Example:
# import pdb
# pdb.set_trace()
x = x + 1
return x
tf.config.experimental_run_functions_eagerly(True)
# You can now set breakpoints and run the code in a debugger.
f(tf.constant(1))
tf.config.experimental_run_functions_eagerly(False)
###Output
_____no_output_____
###Markdown
Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
try:
%tensorflow_version 2.x # Colab only.
except Exception:
pass
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Debugging`tf.function` and AutoGraph work by generating code and tracing it into TensorFlow graphs. This mechanism does not yet support step-by-step debuggers like `pdb`. However, you can call `tf.config.run_functions_eagerly(True)` to temporarily enable eager execution inside the `tf.function' and use your favorite debugger:
###Code
@tf.function
def f(x):
if x > 0:
# Try setting a breakpoint here!
# Example:
# import pdb
# pdb.set_trace()
x = x + 1
return x
tf.config.experimental_run_functions_eagerly(True)
# You can now set breakpoints and run the code in a debugger.
f(tf.constant(1))
tf.config.experimental_run_functions_eagerly(False)
###Output
_____no_output_____
###Markdown
Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
!pip install tf-nightly-2.0-preview
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: the example above shows how to perform simple conditionals when scalar values are involves. Typical ML code involves batches; in those cases you should consider using the faster and vectorized `tf.where` if possible. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in range(n):
if i % 3 == 0:
msg += 'Fizz'
elif i % 5 == 0:
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Use Python `print`AutoGraph will also convert Python builtins like `print`.Note: due to the parallel nature of calculations in TensorFlow, statements might execute out of order. It's best to use `print` only to inspect actual values, and you should not use it to determine whether the program execution reaches a certain point.
###Code
@tf.function
def count(n):
for i in tf.range(n):
print(i)
count(tf.constant(5))
###Output
_____no_output_____
###Markdown
Other handy conversions Other builtins that AutoGraph can adapt for TensorFlow`range` and `len`. `range` is a shortcut for `tf.range`:
###Code
@tf.function
def range_example(n):
return range(n)
print(range_example(tf.constant(3)))
###Output
_____no_output_____
###Markdown
`len` is a shortcut for `.shape[0]`:
###Code
@tf.function
def len_example(n):
return len(n)
print(len_example(tf.zeros((20, 10))))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if v % 2 == 0:
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
def compute_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.cast(predictions == labels, tf.float32))
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
tape.watch(model.trainable_variables)
logits = model(x)
loss = compute_loss(logits, y)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
accuracy = compute_accuracy(logits, y)
return loss, accuracy
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
for x, y in train_ds:
step += 1
loss, accuracy = train_one_step(model, optimizer, x, y)
if step % 10 == 0:
print('Step', step, ': loss', loss, '; accuracy', accuracy)
return step
_ = train(model, optimizer)
###Output
_____no_output_____
###Markdown
A note on batchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=len(x))
for i in range(len(x)):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function
import numpy as np
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
try:
# %tensorflow_version only exists in Colab.
!pip install tf-nightly-2.0-preview
except Exception:
pass
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
for i in tf.range(n):
if (i % 3) == 0:
tf.print('Fizz')
elif (i % 5) == 0:
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if v % 2 == 0:
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Debugging`tf.function` and AutoGraph work by generating code and tracing it into TensorFlow graphs. This mechanism does not yet support step-by-step debuggers like `pdb`. However, you can call `tf.config.run_functions_eagerly(True)` to temporarily enable eager execution inside the `tf.function' and use your favorite debugger:
###Code
@tf.function
def f(x):
if x > 0:
# Try setting a breakpoint here!
# Example:
# import pdb
# pdb.set_trace()
x = x + 1
return x
tf.config.experimental_run_functions_eagerly(True)
# You can now set breakpoints and run the code in a debugger.
f(tf.constant(1))
tf.config.experimental_run_functions_eagerly(False)
###Output
_____no_output_____
###Markdown
Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if step % 10 == 0:
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
msg += 'Fizz'
elif tf.equal(i % 5, 0):
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Functions can be faster than eager code, for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
###Code
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state); lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: The previous example uses simple conditionals with scalar values. Batching is typically used in real-world code. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphAutoGraph is available by default in non-dynamic Keras models. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Debugging`tf.function` and AutoGraph work by generating code and tracing it into TensorFlow graphs. This mechanism does not yet support step-by-step debuggers like `pdb`. However, you can call `tf.config.run_functions_eagerly(True)` to temporarily enable eager execution inside the `tf.function' and use your favorite debugger:
###Code
@tf.function
def f(x):
if x > 0:
# Try setting a breakpoint here!
# Example:
# import pdb
# pdb.set_trace()
x = x + 1
return x
tf.config.experimental_run_functions_eagerly(True)
# You can now set breakpoints and run the code in a debugger.
f(tf.constant(1))
tf.config.experimental_run_functions_eagerly(False)
###Output
_____no_output_____
###Markdown
Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result())
###Output
_____no_output_____
###Markdown
BatchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
tf.function and AutoGraph in TensorFlow 2.0 View on TensorFlow.org Run in Google Colab View source on GitHub TF 2.0 brings together the ease of eager execution and the power of TF 1.0. At the center of this merger is `tf.function`, which allows you to transform a subset of Python syntax into portable, high-performance TensorFlow graphs.A cool new feature of `tf.function` is AutoGraph, which lets you write graph code using natural Python syntax. For a list of the Python features that you can use with AutoGraph, see [AutoGraph Capabilities and Limitations](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md). For more details about `tf.function`, see the RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md). For more details about AutoGraph, see `tf.autograph`.This tutorial will walk you through the basic features of `tf.function` and AutoGraph. SetupImport TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
!pip install tf-nightly-2.0-preview
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Install a temporary patch to enable a few extra TF 2.0 upgrades. This piece will be removed soon.
###Code
from tensorflow.python.ops import control_flow_util
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
###Output
_____no_output_____
###Markdown
The `tf.function` decoratorWhen you annotate a function with `tf.function`, you can still call it like any other function. But it will be compiled into a graph, which means you get the benefits of faster execution, running on GPU or TPU, or exporting to SavedModel.
###Code
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
###Output
_____no_output_____
###Markdown
If we examine the result of the annotation, we can see that it's a special callable that handles all interactions with the TensorFlow runtime.
###Code
simple_nn_layer
###Output
_____no_output_____
###Markdown
If your code uses multiple functions, you don't need to annotate them all - any functions called from an annotated function will also run in graph mode.
###Code
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
###Output
_____no_output_____
###Markdown
Use Python control flowWhen using data-dependent control flow inside `tf.function`, you can use Python control flow statements and AutoGraph will convert them into appropriate TensorFlow ops. For example, `if` statements will be converted into `tf.cond()` if they depend on a `Tensor`.In the example below, `x` is a `Tensor` but the `if` statement works as expected:
###Code
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
###Output
_____no_output_____
###Markdown
Note: the example above shows how to perform simple conditionals when scalar values are involves. Typical ML code involves batches; in those cases you should consider using the faster and vectorized `tf.where` if possible. AutoGraph supports common Python statements like `while`, `for`, `if`, `break`, `continue` and `return`, with support for nesting. That means you can use `Tensor` expressions in the condition of `while` and `if` statements, or iterate over a `Tensor` in a `for` loop.
###Code
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
###Output
_____no_output_____
###Markdown
AutoGraph also provides a low-level API for advanced users. For example we can use it to have a look at the generated code.
###Code
print(tf.autograph.to_code(sum_even.python_function, experimental_optional_features=None))
###Output
_____no_output_____
###Markdown
Here's an example of more complicated control flow:
###Code
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in range(n):
if i % 3 == 0:
msg += 'Fizz'
elif i % 5 == 0:
msg += 'Buzz'
else:
msg += tf.as_string(i)
msg += '\n'
return msg
print(fizzbuzz(tf.constant(15)).numpy().decode())
###Output
_____no_output_____
###Markdown
Use Python `print`AutoGraph will also convert Python builtins like `print`.Note: due to the parallel nature of calculations in TensorFlow, statements might execute out of order. It's best to use `print` only to inspect actual values, and you should not use it to determine whether the program execution reaches a certain point.
###Code
@tf.function
def count(n):
for i in tf.range(n):
print(i)
count(tf.constant(5))
###Output
_____no_output_____
###Markdown
Other handy conversions Other builtins that AutoGraph can adapt for TensorFlow`range` and `len`. `range` is a shortcut for `tf.range`:
###Code
@tf.function
def range_example(n):
return range(n)
print(range_example(tf.constant(3)))
###Output
_____no_output_____
###Markdown
`len` is a shortcut for `.shape[0]`:
###Code
@tf.function
def len_example(n):
return len(n)
print(len_example(tf.zeros((20, 10))))
###Output
_____no_output_____
###Markdown
Keras and AutoGraphYou can use `tf.function` with object methods as well. For example, you can decorate your custom Keras models, typically by annotating the model's `call` function. For more information, see `tf.keras`.
###Code
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
###Output
_____no_output_____
###Markdown
Side effectsJust like in eager mode, you can use operations with side effects, like `tf.assign` or `tf.print` normally inside `tf.function`, and it will insert the necessary control dependencies to ensure they execute in order.
###Code
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if v % 2 == 0:
v.assign(v + 1)
find_next_odd()
v
###Output
_____no_output_____
###Markdown
Example: training a simple modelAutoGraph also allows you to move more computation inside TensorFlow. For example, a training loop is just control flow, so it can actually be brought into TensorFlow. Download data
###Code
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
###Output
_____no_output_____
###Markdown
Define the model
###Code
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
Define the training loop
###Code
def compute_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.cast(predictions == labels, tf.float32))
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
tape.watch(model.variables)
logits = model(x)
loss = compute_loss(logits, y)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
accuracy = compute_accuracy(logits, y)
return loss, accuracy
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
for x, y in train_ds:
step += 1
loss, accuracy = train_one_step(model, optimizer, x, y)
if step % 10 == 0:
print('Step', step, ': loss', loss, '; accuracy', accuracy)
return step
_ = train(model, optimizer)
###Output
_____no_output_____
###Markdown
A note on batchingIn real applications batching is essential for performance. The best code to convert to AutoGraph is code where the control flow is decided at the _batch_ level. If making decisions at the individual _example_ level, try to use batch APIs to maintain performance.For example, if you have the following code in Python:
###Code
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
###Output
_____no_output_____
###Markdown
You may be tempted to write it in TensorFlow as such (and this would work!):
###Code
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=len(x))
for i in range(len(x)):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
###Output
_____no_output_____
###Markdown
But in this case, it turns out you can write the following:
###Code
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
###Output
_____no_output_____ |
notebooks/Dstripes/adversarial/basic/inference_adversarial/dense/AE/pokemonIAAE_Dense_reconst_ssim.ipynb | ###Markdown
Settings
###Code
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Dataset loading
###Code
dataset_name='Dstripes'
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
###Output
_____no_output_____
###Markdown
Model's Layers definition
###Code
enc_lays = [tf.keras.layers.Dense(units=intermediate_dim, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim, activation='relu'),
tf.keras.layers.Dense(units=_outputs_shape),
tf.keras.layers.Reshape(inputs_shape)]
###Output
_____no_output_____
###Markdown
Model definition
###Code
model_name = dataset_name+'IAAE_Dense_reconst_ssmi'
experiments_dir='experiments'+sep_local+model_name
from training.autoencoding_basic.autoencoders.autoencoder import autoencoder as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': enc_lays
}
,
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
#to restore trained model, set filepath=_restore
from statistical.basic_adversarial_losses import \
create_inference_discriminator_real_losses, \
create_inference_discriminator_fake_losses, \
create_inference_generator_fake_losses
inference_discriminator_losses = {
'inference_discriminator_real_outputs': create_inference_discriminator_real_losses,
'inference_discriminator_fake_outputs': create_inference_discriminator_fake_losses,
'inference_generator_fake_outputs': create_inference_generator_fake_losses,
}
ae = AE(
name=model_name,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.structural_similarity import prepare_ssim_multiscale
discr2gen_rate = 0.001
gen2trad_rate = 0.1
ae.compile(
loss={'x_logits': similarity_to_distance(prepare_ssim_multiscale([ae.batch_size]+ae.get_inputs_shape()))},
adversarial_losses=inference_discriminator_losses,
adversarial_weights={'generator_weight': gen2trad_rate,
'discriminator_weight': discr2gen_rate}
)
###Output
_____no_output_____
###Markdown
Callbacks
###Code
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
###Output
_____no_output_____
###Markdown
Model Training
###Code
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
###Output
_____no_output_____
###Markdown
Model Evaluation inception_score
###Code
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
###Output
_____no_output_____
###Markdown
Frechet_inception_distance
###Code
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
###Output
_____no_output_____
###Markdown
perceptual_path_length_score
###Code
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
###Output
_____no_output_____
###Markdown
precision score
###Code
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
###Output
_____no_output_____
###Markdown
recall score
###Code
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
###Output
_____no_output_____
###Markdown
Image Generation image reconstruction Training dataset
###Code
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
with Randomness
###Code
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
Complete Randomness
###Code
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
from training.generators.image_generation_testing import interpolate_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
###Output
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
|
numpy/agregations.ipynb | ###Markdown
Aggregations: Min, Max, and Everything In Between Numpy has fast built-in aggregation functions for working on arrays; we'll discuss and demonstrate some of them here Summing the Values in an ArrayAs as quick example, consider computing the sum of all values in an array. Python itself can do this using the buil-in `sum` function:
###Code
import numpy as np
L = np.random.random(100)
sum(L)
np.sum(L)
###Output
_____no_output_____
###Markdown
NumPy version of the operation is computed much more quickly:
###Code
big_array = np.random.rand(1_000_000)
%timeit sum(big_array)
%timeit np.sum(big_array)
###Output
179 ms ± 18.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
1.18 ms ± 120 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Minimun and Maximum
###Code
min(big_array), max(big_array)
%timeit min(big_array)
%timeit np.min(big_array)
###Output
84.3 ms ± 2.52 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
471 µs ± 23.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
For `min` and `max` and `sum`, and seveal other NumPy aggregates a shorter syntax is to use methods ofthe array object itself:
###Code
print(big_array.min(), big_array.max(), big_array.sum())
###Output
6.990105299031768e-07 0.9999992162584009 499848.77304655063
###Markdown
Multi dimensional aggregatesOne common type of aggregation operation is an aggregate along a row or column.Say you have some data stored in a two-dimensional array:
###Code
M = np.random.random((3, 4))
print(M)
###Output
[[0.17768175 0.26264154 0.63198318 0.50042224]
[0.52442396 0.5038188 0.56468719 0.29566009]
[0.37235568 0.10338796 0.66210778 0.00653843]]
###Markdown
By default, each NumPy aggregation function will return the aggregate over the entire array: M.sum() Aggregation functions take an additional argument specifying the axis along which the aggregate is computed. For example, we can find the minimum value within each column by specifying `axis=0`.
###Code
M.min(axis=0)
M.max(axis=1)
###Output
_____no_output_____
###Markdown
The `axis` keyword specifies the dimension of hte arrya that will be collapsed. So specigying `axis=0` means that the first axis will be collapsed. For two-dimensional arrays, this means that values within each column will be aggregated. Other aggregations functionsMost aggregates have a `NaN` safe counterpart that computes the result while ignoring missing values, which are marked by the special IEEE floating-point `NaN` value.```Function Name NaN-safe Version Descriptionnp.sum np.nansum Compute sum of elementsnp.prod np.nanprod Compute product of elementsnp.mean np.nanmean Compute mean of elementsnp.std np.nanstd Compute standard deviationnp.var np.nanvar Compute variancenp.min np.nanmin Find minimum valuenp.max np.nanmax Find maximum valuenp.argmin np.nanargmin Find index of minimum valuenp.argmax np.nanargmax Find index of maximum valuenp.median np.nanmedian Compute median of elementsnp.percentile np.nanpercentile Compute rank-based statistics of elementsnp.any N/A Evaluate whether any elements are truenp.all N/A Evaluate whether all elements are true``` Example: What is the Average Height of US Presidents?Aggregates available in NumPy can be extremely useful for summarizing a set of values. As a simple example, let's consider the heighs of all US presidents. Thisdata is available in the file presidents_heigths.csv
###Code
import pandas as pd
data = pd.read_csv('./data/president_heights.csv')
data.head()
heigths = np.array(data['height(cm)'])
print(heigths)
print(f"mean heigth:{heigths.mean()}")
print(f"Standard deviation:{heigths.std()}")
print(f"Minimum heigth:{heigths.min()}")
print(f"Maximum heigth:{heigths.max()}")
###Output
mean heigth:179.73809523809524
Standard deviation:6.931843442745892
Minimum heigth:163
Maximum heigth:193
###Markdown
The aggregation operation reduced the entire array to a single summarizing value. We may also wish to compute quantiles:
###Code
print('25th percentile: ', np.percentile(heigths, 25))
print('median: ', np.median(heigths))
print('75th percentile: ', np.percentile(heigths, 75))
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set() # set plot style
plt.hist(heigths)
plt.title("heigth Distribution of US Presidents")
plt.xlabel('height (cm)')
plt.ylabel('number');
###Output
_____no_output_____ |
_labs/Lab04/Lab04-GroupByPivotTables.ipynb | ###Markdown
Lab 04: Group By, Pivot Tables, and Data CubesThis lab is presented with some revisions from [Dennis Sun at Cal Poly](https://web.calpoly.edu/~dsun09/index.html) and his [Data301 Course](http://users.csc.calpoly.edu/~dsun09/data301/lectures.html) When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/)
###Code
%matplotlib inline
import numpy as np
import pandas as pd
titanic_df = pd.read_csv("../data/titanic.csv")
###Output
_____no_output_____
###Markdown
In the previous section, we discussed how to restrict our analysis to a particular subset of observations using boolean masks. So, for example, if we wanted to calculate the survival rate for passengers in third class, we would write:
###Code
titanic_df[titanic_df.pclass == 3].survived.mean()
###Output
_____no_output_____
###Markdown
But what if we wanted to calculate the survival rate by class? We could slice the data set three times, once for each class:
###Code
(titanic_df[titanic_df.pclass == 1]['survived'].mean(),
titanic_df[titanic_df.pclass == 2]['survived'].mean(),
titanic_df[titanic_df.pclass == 3]['survived'].mean())
###Output
_____no_output_____
###Markdown
But this code is inefficient and repetitive. It also does not generalize well to variables with hundreds of possible categories. The problem of calculating the survival rate by class is an example of a problem that can be solved using the **split-apply-combine strategy**. The key insight here is that many data analyses follow the same basic pattern:- First, a data set is **split** into several subsets based on some variable.- Next, some analysis is **applied** to each subset.- Finally, the results from each analysis are **combined**.The three steps are diagrammed in the figure below:![](../images/split_apply_combine.png) [source](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.08-Aggregation-and-Grouping.ipynb)Applying this strategy to our working example above, we should first _split_ up the Titanic data according to the value of `pclass`, _apply_ `.survived.mean()` to each subset, and finally _combine_ the results into one `Series`.[_Note:_ The term "split-apply-combine" was coined by Hadley Wickham in [a 2011 paper](https://www.jstatsoft.org/article/view/v040i01), but the idea is not new. It should already be familiar to you if you know SQL or MapReduce.] Split-Apply-Combine in `pandas`: the `.groupby()` methodTo implement the split-apply-combine strategy in `pandas`, we use the `.groupby()` method. First, we specify one or more variables to split on in the argument to `.groupby()`. Then, we specify our analysis as usual. Pandas will handle splitting the data, applying the analysis to each subset, and combining the results at the end.
###Code
titanic_df.groupby("pclass").survived.mean()
###Output
_____no_output_____
###Markdown
Compare this line of code with the code to calculate the overall survival rate:`titanic_df.survived.mean()`.The only difference is `.groupby("pclass")`. This turns a `DataFrame` into a `DataFrameGroupBy` object, which behaves like a `DataFrame`, except that any analysis that we specify will be applied to subsets of the `DataFrame` instead of the whole `DataFrame`. You can even make visualizations with `.groupby()`! To plot the age distribution of the survivors and non-survivors, we can group by the `survived` variable and then ask for a histogram of `age`. Behind the scenes, `pandas` will do this once for the survivors and again for the non-survivors and then combine them into one histogram.
###Code
titanic_df.groupby("survived").age.plot.hist(alpha=.5, density=True, legend=True)
###Output
_____no_output_____
###Markdown
It is also possible to group by more than one variable. Simply pass in a list of variable names to `.groupby()`. For example, the following code calculates the survival rate by class and sex:
###Code
survival_rates = titanic_df.groupby(["pclass", "sex"])["survived"].mean()
survival_rates
###Output
_____no_output_____
###Markdown
It's clear that survival rates on the Titanic varied drastically by class and by sex.Notice that when we use `.groupby()`, the resulting index is whatever variable(s) we grouped by. Since we grouped by two variables, this index actually has two levels. An index with more than one level is called a `MultiIndex` in `pandas`. To access a particular row in a `DataFrame` that is indexed by a `MultiIndex`, we pass in a tuple of the values we want from each level.So, for example, to get female passengers in 2nd class, we would do:
###Code
survival_rates.loc[(2, "female")]
###Output
_____no_output_____
###Markdown
If we pass in fewer values than there are levels in the index, `pandas` will return everything from the remaining levels.
###Code
survival_rates.loc[2]
survival_rates.loc[:, 'female']
###Output
_____no_output_____
###Markdown
Note that some times the above won't work depending on how the indicies are setup. It may be eaiser to use the [.xs method sometimes](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.xs.html).
###Code
survival_rates.xs('female', level=1)
###Output
_____no_output_____
###Markdown
Pivot Tables and The Data Cube
###Code
titanic_df["adult"] = (titanic_df["age"] >= 18)
###Output
_____no_output_____
###Markdown
In Section 2.2, we learned to split a `pandas` `DataFrame` and apply the same analysis to each of the resulting, smaller `DataFrame`s. For example, the following code calculates the proportion of Titanic passengers of each sex, age group, and class who survived:
###Code
survivors_table = (titanic_df.
groupby(["sex", "adult", "pclass"]).
survived.
mean())
survivors_table.to_frame()
###Output
_____no_output_____
###Markdown
Here's another way to think about these results: there are three dimensions, `sex`, `adult`, and `pclass`, and we calculate a metric, the proportion of survivors, for each of the $2 \times 2 \times 3 = 12$ possible combinations of the dimension values.There are many equivalent ways to represent these results. The representation above is essentially the _tabular form_ that we learned in Chapter 1. Each row represents an observation (i.e., a distinct combination of sex, adult, and class) and each column a variable (i.e., the proportion of passengers who survived). Another way to represent these results is using a **data cube**. In a data cube, the possible values of each dimension are laid out along one dimension of a cube, as shown below:![](../images/datacube.png)The term "data _cube_" is somewhat of a misnomer, since it does not have to be a cube. First, as we can plainly see in the figure above, the dimensions need not all be the same size; some dimensions may have more values than others. Second, a data cube can have any number of dimensions, so it does not have to be three-dimensional. A data cube with $d$ dimensions is really a $d$-dimensional hypercube. A 2-dimensional hypercube is a square (or rectangle), a 1-dimensional hypercube is a line, and a 0-dimensional hypercube is a point.While it is useful to imagine a data cube as a $d$-dimensional hypercube, it is not practical to display data in a hypercube---at least not when $d > 2$. So a data cube is often printed as a two-dimensional table, with multi-level row indexes and columns to represent the dimensions. This two-dimensional representation of the data cube is called a **pivot table**. Here is the code to produce a pivot table from the raw data:
###Code
survivors_cube = titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.mean)
survivors_cube
###Output
_____no_output_____
###Markdown
To create a pivot table, we had to specify - the row index(es): Here, we chose to include two of the dimensions (`adult`, `sex`) along the rows of the pivot table.- the column(s): Here, we chose to include the one remaining dimension (`pclass`) in the columns.- the metric in the cells of the table: Here, we chose to report the _mean_ of the `survived` column in each cell.The resulting pivot table is just stored in an ordinary `DataFrame`; `pandas` does not have a special data structure for pivot tables.Notice how we explicitly specified an aggregation function `aggfunc`. That's because in the original `DataFrame` (`titanic_df`), there were many passengers with the same values for all three dimensions, so each cell of this pivot table actually represents many passengers. In order to summarize all of these passengers by a single value, we have to aggregate the values. The mean is not the only aggregation function we could have used; we could have also calculated the sum, to obtain the _number_ of survivors.
###Code
titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.sum
)
###Output
_____no_output_____
###Markdown
If the data is in data cube form (i.e., in a pivot table), it can be converted to tabular form by simply stacking the columns, one on top of the other. In `pandas`, this can be done using the `.stack()` function:
###Code
survivors_cube.stack(["adult", "pclass"])
###Output
_____no_output_____
###Markdown
Compare the above result with `survivors_table`.Likewise, we can convert a `pandas` object in tabular form to data cube form by _unstacking_ the index, assuming that all of the dimensions are already in the index.
###Code
survivors_cube = survivors_table.unstack(["adult", "pclass"])
survivors_cube
###Output
_____no_output_____
###Markdown
Stacking tends to produce longer objects with more rows, while unstacking tends to produce wider objects with more columns. For this reason, tabular form is sometimes referred to as "long form", in contrast to the data cube, which is "wide form." Some Features of Data CubesIt is much easier to quickly compare numbers in data cube form than in tabular form. For example, it is apparent from the preceding pivot table that males had much lower survival rates than females just by comparing the numbers across each row; this fact is more difficult to discern from `survivors_table`.It is also more efficient to store data in a data cube. Recall that `survivors_table` and `survivors_cube` contain the exact same information. However, the data cube is 70% smaller than the tabular version of the same data:
###Code
survivors_table.__sizeof__(), survivors_cube.__sizeof__()
###Output
_____no_output_____
###Markdown
In many implementations of the data cube, it is also faster to access values in a data cube than in a table. Unfortunately, because `pandas` represents data cubes as two-dimensional pivot tables, it does not enjoy these advantages.
###Code
survivors_table.loc["female", True, 1]
survivors_cube.loc["female", (True, 1)]
###Output
_____no_output_____
###Markdown
Data cubes also play nicely with bar charts in `pandas`. When `.plot.bar()` is called on a `pandas` `DataFrame`, one set of bars will be created for each column. So when we call `.plot.bar()` on a pivot table, we will get one set of bars for females and another set of bars for males.
###Code
survivors_cube.plot.bar()
###Output
_____no_output_____
###Markdown
Notice that the $x$-axis of the bar graph contains all of the dimensions in the row index. So to get `pclass` on the $x$-axis, we have to create a pivot table where `pclass` is the row index:
###Code
titanic_df.pivot_table(
index="pclass", columns=["adult", "sex"],
values="survived", aggfunc=np.mean
).plot.bar()
###Output
_____no_output_____
###Markdown
Finally, many analytical operations are easier to do when the data is in data cube format. ExercisesExercises 1-2 deal with the Tips data set (`../data/tips.csv`).
###Code
tips_df = pd.read_csv("../data/tips.csv")
tips_df["tip_percent"] = tips_df.tip / tips_df.total_bill
tips_df.head()
###Output
_____no_output_____
###Markdown
**Exercise 1.** On which day of the week does the waiter serve the largest parties, on average? (You did this exercise in the previous section. See how much easier it is to do using `.groupby()`.)
###Code
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
**Exercise 2.** Calculate the average bill by day and time. What day-time combination has the highest average bill?
###Code
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
**Answer Here:** **Exercise 3.** Extract the average bill for Friday lunch from the result of Exercise 2.
###Code
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
**Exercise 4.** Use `.groupby()` to make a visualization comparing the distribution of tip percentages left by males and females. How do they compare?
###Code
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
**Exercise 5.** Calculate the average total bill by day, time, and table size. Display the results in a pivot table.
###Code
# TYPE YOUR CODE HERE.
###Output
_____no_output_____
###Markdown
**Exercise 6.** Make a bar chart showing the average total bill by table size, day, and time. (You will have to decide which variable(s) to represent on the $x$-axis and which variable(s) to represent using different colored bars.) Explain your choice below.
###Code
# TYPE YOUR CODE HERE.
###Output
_____no_output_____
###Markdown
**Answer Here:** Exercises 3-4 deal with the Ames Housing data set (`../data/ames.tsv`). For more information about the variables in this data set, please refer to the [data documentation](https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt).
###Code
df_ames = pd.read_csv("../data/ames.tsv", sep='\t')
display(df_ames.head())
###Output
_____no_output_____
###Markdown
**Exercise 7.** Calculate the average house price by neighborhood and building type, and store it in data cube form. Use the data cube to determine the neighborhood with the most expensive single-family homes.
###Code
# TYPE YOUR CODE HERE.
###Output
_____no_output_____
###Markdown
Lab 04: Group By, Pivot Tables, and Data CubesThis lab is presented with some revisions from [Dennis Sun at Cal Poly](https://web.calpoly.edu/~dsun09/index.html) and his [Data301 Course](http://users.csc.calpoly.edu/~dsun09/data301/lectures.html) When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/)
###Code
%matplotlib inline
import numpy as np
import pandas as pd
titanic_df = pd.read_csv("../data/titanic.csv")
###Output
_____no_output_____
###Markdown
In the previous section, we discussed how to restrict our analysis to a particular subset of observations using boolean masks. So, for example, if we wanted to calculate the survival rate for passengers in third class, we would write:
###Code
titanic_df[titanic_df.pclass == 3].survived.mean()
###Output
_____no_output_____
###Markdown
But what if we wanted to calculate the survival rate by class? We could slice the data set three times, once for each class:
###Code
(titanic_df[titanic_df.pclass == 1]['survived'].mean(),
titanic_df[titanic_df.pclass == 2]['survived'].mean(),
titanic_df[titanic_df.pclass == 3]['survived'].mean())
###Output
_____no_output_____
###Markdown
But this code is inefficient and repetitive. It also does not generalize well to variables with hundreds of possible categories. The problem of calculating the survival rate by class is an example of a problem that can be solved using the **split-apply-combine strategy**. The key insight here is that many data analyses follow the same basic pattern:- First, a data set is **split** into several subsets based on some variable.- Next, some analysis is **applied** to each subset.- Finally, the results from each analysis are **combined**.The three steps are diagrammed in the figure below:![](../images/split_apply_combine.png) [source](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.08-Aggregation-and-Grouping.ipynb)Applying this strategy to our working example above, we should first _split_ up the Titanic data according to the value of `pclass`, _apply_ `.survived.mean()` to each subset, and finally _combine_ the results into one `Series`.[_Note:_ The term "split-apply-combine" was coined by Hadley Wickham in [a 2011 paper](https://www.jstatsoft.org/article/view/v040i01), but the idea is not new. It should already be familiar to you if you know SQL or MapReduce.] Split-Apply-Combine in `pandas`: the `.groupby()` methodTo implement the split-apply-combine strategy in `pandas`, we use the `.groupby()` method. First, we specify one or more variables to split on in the argument to `.groupby()`. Then, we specify our analysis as usual. Pandas will handle splitting the data, applying the analysis to each subset, and combining the results at the end.
###Code
titanic_df.groupby("pclass").survived.mean()
###Output
_____no_output_____
###Markdown
Compare this line of code with the code to calculate the overall survival rate:`titanic_df.survived.mean()`.The only difference is `.groupby("pclass")`. This turns a `DataFrame` into a `DataFrameGroupBy` object, which behaves like a `DataFrame`, except that any analysis that we specify will be applied to subsets of the `DataFrame` instead of the whole `DataFrame`. You can even make visualizations with `.groupby()`! To plot the age distribution of the survivors and non-survivors, we can group by the `survived` variable and then ask for a histogram of `age`. Behind the scenes, `pandas` will do this once for the survivors and again for the non-survivors and then combine them into one histogram.
###Code
titanic_df.groupby("survived").age.plot.hist(alpha=.5, density=True, legend=True)
###Output
_____no_output_____
###Markdown
It is also possible to group by more than one variable. Simply pass in a list of variable names to `.groupby()`. For example, the following code calculates the survival rate by class and sex:
###Code
survival_rates = titanic_df.groupby(["pclass", "sex"])["survived"].mean()
survival_rates
###Output
_____no_output_____
###Markdown
It's clear that survival rates on the Titanic varied drastically by class and by sex.Notice that when we use `.groupby()`, the resulting index is whatever variable(s) we grouped by. Since we grouped by two variables, this index actually has two levels. An index with more than one level is called a `MultiIndex` in `pandas`. To access a particular row in a `DataFrame` that is indexed by a `MultiIndex`, we pass in a tuple of the values we want from each level.So, for example, to get female passengers in 2nd class, we would do:
###Code
survival_rates.loc[(2, "female")]
###Output
_____no_output_____
###Markdown
If we pass in fewer values than there are levels in the index, `pandas` will return everything from the remaining levels.
###Code
survival_rates.loc[2]
survival_rates.loc[:, 'female']
###Output
_____no_output_____
###Markdown
Note that some times the above won't work depending on how the indicies are setup. It may be eaiser to use the [.xs method sometimes](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.xs.html).
###Code
survival_rates.xs('female', level=1)
###Output
_____no_output_____
###Markdown
Pivot Tables and The Data Cube
###Code
titanic_df["adult"] = (titanic_df["age"] >= 18)
###Output
_____no_output_____
###Markdown
In Section 2.2, we learned to split a `pandas` `DataFrame` and apply the same analysis to each of the resulting, smaller `DataFrame`s. For example, the following code calculates the proportion of Titanic passengers of each sex, age group, and class who survived:
###Code
survivors_table = (titanic_df.
groupby(["sex", "adult", "pclass"]).
survived.
mean())
survivors_table.to_frame()
###Output
_____no_output_____
###Markdown
Here's another way to think about these results: there are three dimensions, `sex`, `adult`, and `pclass`, and we calculate a metric, the proportion of survivors, for each of the $2 \times 2 \times 3 = 12$ possible combinations of the dimension values.There are many equivalent ways to represent these results. The representation above is essentially the _tabular form_ that we learned in Chapter 1. Each row represents an observation (i.e., a distinct combination of sex, adult, and class) and each column a variable (i.e., the proportion of passengers who survived). Another way to represent these results is using a **data cube**. In a data cube, the possible values of each dimension are laid out along one dimension of a cube, as shown below:![](../images/datacube.png)The term "data _cube_" is somewhat of a misnomer, since it does not have to be a cube. First, as we can plainly see in the figure above, the dimensions need not all be the same size; some dimensions may have more values than others. Second, a data cube can have any number of dimensions, so it does not have to be three-dimensional. A data cube with $d$ dimensions is really a $d$-dimensional hypercube. A 2-dimensional hypercube is a square (or rectangle), a 1-dimensional hypercube is a line, and a 0-dimensional hypercube is a point.While it is useful to imagine a data cube as a $d$-dimensional hypercube, it is not practical to display data in a hypercube---at least not when $d > 2$. So a data cube is often printed as a two-dimensional table, with multi-level row indexes and columns to represent the dimensions. This two-dimensional representation of the data cube is called a **pivot table**. Here is the code to produce a pivot table from the raw data:
###Code
survivors_cube = titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.mean)
survivors_cube
###Output
_____no_output_____
###Markdown
To create a pivot table, we had to specify - the row index(es): Here, we chose to include two of the dimensions (`adult`, `sex`) along the rows of the pivot table.- the column(s): Here, we chose to include the one remaining dimension (`pclass`) in the columns.- the metric in the cells of the table: Here, we chose to report the _mean_ of the `survived` column in each cell.The resulting pivot table is just stored in an ordinary `DataFrame`; `pandas` does not have a special data structure for pivot tables.Notice how we explicitly specified an aggregation function `aggfunc`. That's because in the original `DataFrame` (`titanic_df`), there were many passengers with the same values for all three dimensions, so each cell of this pivot table actually represents many passengers. In order to summarize all of these passengers by a single value, we have to aggregate the values. The mean is not the only aggregation function we could have used; we could have also calculated the sum, to obtain the _number_ of survivors.
###Code
titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.sum
)
###Output
_____no_output_____
###Markdown
If the data is in data cube form (i.e., in a pivot table), it can be converted to tabular form by simply stacking the columns, one on top of the other. In `pandas`, this can be done using the `.stack()` function:
###Code
survivors_cube.stack(["adult", "pclass"])
###Output
_____no_output_____
###Markdown
Compare the above result with `survivors_table`.Likewise, we can convert a `pandas` object in tabular form to data cube form by _unstacking_ the index, assuming that all of the dimensions are already in the index.
###Code
survivors_cube = survivors_table.unstack(["adult", "pclass"])
survivors_cube
###Output
_____no_output_____
###Markdown
Stacking tends to produce longer objects with more rows, while unstacking tends to produce wider objects with more columns. For this reason, tabular form is sometimes referred to as "long form", in contrast to the data cube, which is "wide form." Some Features of Data CubesIt is much easier to quickly compare numbers in data cube form than in tabular form. For example, it is apparent from the preceding pivot table that males had much lower survival rates than females just by comparing the numbers across each row; this fact is more difficult to discern from `survivors_table`.It is also more efficient to store data in a data cube. Recall that `survivors_table` and `survivors_cube` contain the exact same information. However, the data cube is 70% smaller than the tabular version of the same data:
###Code
survivors_table.__sizeof__(), survivors_cube.__sizeof__()
###Output
_____no_output_____
###Markdown
In many implementations of the data cube, it is also faster to access values in a data cube than in a table. Unfortunately, because `pandas` represents data cubes as two-dimensional pivot tables, it does not enjoy these advantages.
###Code
survivors_table.loc["female", True, 1]
survivors_cube.loc["female", (True, 1)]
###Output
_____no_output_____
###Markdown
Data cubes also play nicely with bar charts in `pandas`. When `.plot.bar()` is called on a `pandas` `DataFrame`, one set of bars will be created for each column. So when we call `.plot.bar()` on a pivot table, we will get one set of bars for females and another set of bars for males.
###Code
survivors_cube.plot.bar()
###Output
_____no_output_____
###Markdown
Notice that the $x$-axis of the bar graph contains all of the dimensions in the row index. So to get `pclass` on the $x$-axis, we have to create a pivot table where `pclass` is the row index:
###Code
titanic_df.pivot_table(
index="pclass", columns=["adult", "sex"],
values="survived", aggfunc=np.mean
).plot.bar()
###Output
_____no_output_____
###Markdown
Finally, many analytical operations are easier to do when the data is in data cube format. ExercisesExercises 1-2 deal with the Tips data set (`../data/tips.csv`).
###Code
tips_df = pd.read_csv("../data/tips.csv")
tips_df["tip_percent"] = tips_df.tip / tips_df.total_bill
tips_df.head()
###Output
_____no_output_____
###Markdown
**Exercise 1.** On which day of the week does the waiter serve the largest parties, on average? (You did this exercise in the previous section. See how much easier it is to do using `.groupby()`.)
###Code
tips_df.groupby("day")["size"].mean()
# The waiter serves the largest parties on average on Sundays.
###Output
_____no_output_____
###Markdown
**Exercise 2.** Calculate the average bill by day and time. What day-time combination has the highest average bill?
###Code
avg_bill_timeday = tips_df.groupby(["day", "time"]).total_bill.mean()
avg_bill_timeday
###Output
_____no_output_____
###Markdown
**Answer Here:** The highest average bill is on Sundays at dinner time. **Exercise 3.** Extract the average bill for Friday lunch from the result of Exercise 2.
###Code
avg_bill_timeday.loc[("Fri", "Lunch")]
###Output
_____no_output_____
###Markdown
**Exercise 4.** Use `.groupby()` to make a visualization comparing the distribution of tip percentages left by males and females. How do they compare?
###Code
tips_df.groupby("sex").tip_percent.plot.hist(alpha=.5, legend=True, density=True)
# More women tip around the 20% mark, while men are more spread out.
###Output
_____no_output_____
###Markdown
**Exercise 5.** Calculate the average total bill by day, time, and table size. Display the results in a pivot table.
###Code
pivot = tips_df.pivot_table(
index="size", columns=["time", "day"],
values="total_bill", aggfunc=np.mean
)
pivot
###Output
_____no_output_____
###Markdown
**Exercise 6.** Make a bar chart showing the average total bill by table size, day, and time. (You will have to decide which variable(s) to represent on the $x$-axis and which variable(s) to represent using different colored bars.) Explain your choice below.
###Code
pivot.plot.bar()
# Putting the size on the x-axis allows there to be less bars in one cluster.
###Output
_____no_output_____
###Markdown
Exercises 3-4 deal with the Ames Housing data set (`../data/ames.tsv`). For more information about the variables in this data set, please refer to the [data documentation](https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt).
###Code
df_ames = pd.read_csv("../data/ames.tsv", sep='\t')
display(df_ames.head())
###Output
_____no_output_____
###Markdown
**Exercise 7.** Calculate the average house price by neighborhood and building type, and store it in data cube form. Use the data cube to determine the neighborhood with the most expensive single-family homes.
###Code
avg_price_cube = df_ames.pivot_table(
index="Neighborhood", columns="Bldg Type",
values="SalePrice", aggfunc=np.mean
)
avg_price_cube["1Fam"].max() # $400,546.04
avg_price_cube
###Output
_____no_output_____
###Markdown
Lab 04: Group By, Pivot Tables, and Data CubesThis lab is presented with some revisions from [Dennis Sun at Cal Poly](https://web.calpoly.edu/~dsun09/index.html) and his [Data301 Course](http://users.csc.calpoly.edu/~dsun09/data301/lectures.html) When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/)
###Code
%matplotlib inline
import numpy as np
import pandas as pd
titanic_df = pd.read_csv("../data/titanic.csv")
###Output
_____no_output_____
###Markdown
In the previous section, we discussed how to restrict our analysis to a particular subset of observations using boolean masks. So, for example, if we wanted to calculate the survival rate for passengers in third class, we would write:
###Code
titanic_df[titanic_df.pclass == 3].survived.mean()
###Output
_____no_output_____
###Markdown
But what if we wanted to calculate the survival rate by class? We could slice the data set three times, once for each class:
###Code
(titanic_df[titanic_df.pclass == 1]['survived'].mean(),
titanic_df[titanic_df.pclass == 2]['survived'].mean(),
titanic_df[titanic_df.pclass == 3]['survived'].mean())
###Output
_____no_output_____
###Markdown
But this code is inefficient and repetitive. It also does not generalize well to variables with hundreds of possible categories. The problem of calculating the survival rate by class is an example of a problem that can be solved using the **split-apply-combine strategy**. The key insight here is that many data analyses follow the same basic pattern:- First, a data set is **split** into several subsets based on some variable.- Next, some analysis is **applied** to each subset.- Finally, the results from each analysis are **combined**.The three steps are diagrammed in the figure below:![](../images/split_apply_combine.png) [source](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.08-Aggregation-and-Grouping.ipynb)Applying this strategy to our working example above, we should first _split_ up the Titanic data according to the value of `pclass`, _apply_ `.survived.mean()` to each subset, and finally _combine_ the results into one `Series`.[_Note:_ The term "split-apply-combine" was coined by Hadley Wickham in [a 2011 paper](https://www.jstatsoft.org/article/view/v040i01), but the idea is not new. It should already be familiar to you if you know SQL or MapReduce.] Split-Apply-Combine in `pandas`: the `.groupby()` methodTo implement the split-apply-combine strategy in `pandas`, we use the `.groupby()` method. First, we specify one or more variables to split on in the argument to `.groupby()`. Then, we specify our analysis as usual. Pandas will handle splitting the data, applying the analysis to each subset, and combining the results at the end.
###Code
titanic_df.groupby("pclass").survived.mean()
###Output
_____no_output_____
###Markdown
Compare this line of code with the code to calculate the overall survival rate:`titanic_df.survived.mean()`.The only difference is `.groupby("pclass")`. This turns a `DataFrame` into a `DataFrameGroupBy` object, which behaves like a `DataFrame`, except that any analysis that we specify will be applied to subsets of the `DataFrame` instead of the whole `DataFrame`. You can even make visualizations with `.groupby()`! To plot the age distribution of the survivors and non-survivors, we can group by the `survived` variable and then ask for a histogram of `age`. Behind the scenes, `pandas` will do this once for the survivors and again for the non-survivors and then combine them into one histogram.
###Code
titanic_df.groupby("survived").age.plot.hist(alpha=.5, density=True, legend=True)
###Output
_____no_output_____
###Markdown
It is also possible to group by more than one variable. Simply pass in a list of variable names to `.groupby()`. For example, the following code calculates the survival rate by class and sex:
###Code
survival_rates = titanic_df.groupby(["pclass", "sex"])["survived"].mean()
survival_rates
###Output
_____no_output_____
###Markdown
It's clear that survival rates on the Titanic varied drastically by class and by sex.Notice that when we use `.groupby()`, the resulting index is whatever variable(s) we grouped by. Since we grouped by two variables, this index actually has two levels. An index with more than one level is called a `MultiIndex` in `pandas`. To access a particular row in a `DataFrame` that is indexed by a `MultiIndex`, we pass in a tuple of the values we want from each level.So, for example, to get female passengers in 2nd class, we would do:
###Code
survival_rates.loc[(2, "female")]
###Output
_____no_output_____
###Markdown
If we pass in fewer values than there are levels in the index, `pandas` will return everything from the remaining levels.
###Code
survival_rates.loc[2]
survival_rates.loc[:, 'female']
###Output
_____no_output_____
###Markdown
Note that some times the above won't work depending on how the indicies are setup. It may be eaiser to use the [.xs method sometimes](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.xs.html).
###Code
survival_rates.xs('female', level=1)
###Output
_____no_output_____
###Markdown
Pivot Tables and The Data Cube
###Code
titanic_df["adult"] = (titanic_df["age"] >= 18)
###Output
_____no_output_____
###Markdown
In Section 2.2, we learned to split a `pandas` `DataFrame` and apply the same analysis to each of the resulting, smaller `DataFrame`s. For example, the following code calculates the proportion of Titanic passengers of each sex, age group, and class who survived:
###Code
survivors_table = (titanic_df.
groupby(["sex", "adult", "pclass"]).
survived.
mean())
survivors_table.to_frame()
###Output
_____no_output_____
###Markdown
Here's another way to think about these results: there are three dimensions, `sex`, `adult`, and `pclass`, and we calculate a metric, the proportion of survivors, for each of the $2 \times 2 \times 3 = 12$ possible combinations of the dimension values.There are many equivalent ways to represent these results. The representation above is essentially the _tabular form_ that we learned in Chapter 1. Each row represents an observation (i.e., a distinct combination of sex, adult, and class) and each column a variable (i.e., the proportion of passengers who survived). Another way to represent these results is using a **data cube**. In a data cube, the possible values of each dimension are laid out along one dimension of a cube, as shown below:![](../images/datacube.png)The term "data _cube_" is somewhat of a misnomer, since it does not have to be a cube. First, as we can plainly see in the figure above, the dimensions need not all be the same size; some dimensions may have more values than others. Second, a data cube can have any number of dimensions, so it does not have to be three-dimensional. A data cube with $d$ dimensions is really a $d$-dimensional hypercube. A 2-dimensional hypercube is a square (or rectangle), a 1-dimensional hypercube is a line, and a 0-dimensional hypercube is a point.While it is useful to imagine a data cube as a $d$-dimensional hypercube, it is not practical to display data in a hypercube---at least not when $d > 2$. So a data cube is often printed as a two-dimensional table, with multi-level row indexes and columns to represent the dimensions. This two-dimensional representation of the data cube is called a **pivot table**. Here is the code to produce a pivot table from the raw data:
###Code
survivors_cube = titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.mean)
survivors_cube
###Output
_____no_output_____
###Markdown
To create a pivot table, we had to specify - the row index(es): Here, we chose to include two of the dimensions (`adult`, `sex`) along the rows of the pivot table.- the column(s): Here, we chose to include the one remaining dimension (`pclass`) in the columns.- the metric in the cells of the table: Here, we chose to report the _mean_ of the `survived` column in each cell.The resulting pivot table is just stored in an ordinary `DataFrame`; `pandas` does not have a special data structure for pivot tables.Notice how we explicitly specified an aggregation function `aggfunc`. That's because in the original `DataFrame` (`titanic_df`), there were many passengers with the same values for all three dimensions, so each cell of this pivot table actually represents many passengers. In order to summarize all of these passengers by a single value, we have to aggregate the values. The mean is not the only aggregation function we could have used; we could have also calculated the sum, to obtain the _number_ of survivors.
###Code
titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.sum
)
###Output
_____no_output_____
###Markdown
If the data is in data cube form (i.e., in a pivot table), it can be converted to tabular form by simply stacking the columns, one on top of the other. In `pandas`, this can be done using the `.stack()` function:
###Code
survivors_cube.stack(["adult", "pclass"])
###Output
_____no_output_____
###Markdown
Compare the above result with `survivors_table`.Likewise, we can convert a `pandas` object in tabular form to data cube form by _unstacking_ the index, assuming that all of the dimensions are already in the index.
###Code
survivors_cube = survivors_table.unstack(["adult", "pclass"])
survivors_cube
###Output
_____no_output_____
###Markdown
Stacking tends to produce longer objects with more rows, while unstacking tends to produce wider objects with more columns. For this reason, tabular form is sometimes referred to as "long form", in contrast to the data cube, which is "wide form." Some Features of Data CubesIt is much easier to quickly compare numbers in data cube form than in tabular form. For example, it is apparent from the preceding pivot table that males had much lower survival rates than females just by comparing the numbers across each row; this fact is more difficult to discern from `survivors_table`.It is also more efficient to store data in a data cube. Recall that `survivors_table` and `survivors_cube` contain the exact same information. However, the data cube is 70% smaller than the tabular version of the same data:
###Code
survivors_table.__sizeof__(), survivors_cube.__sizeof__()
###Output
_____no_output_____
###Markdown
In many implementations of the data cube, it is also faster to access values in a data cube than in a table. Unfortunately, because `pandas` represents data cubes as two-dimensional pivot tables, it does not enjoy these advantages.
###Code
survivors_table.loc["female", True, 1]
survivors_cube.loc["female", (True, 1)]
###Output
_____no_output_____
###Markdown
Data cubes also play nicely with bar charts in `pandas`. When `.plot.bar()` is called on a `pandas` `DataFrame`, one set of bars will be created for each column. So when we call `.plot.bar()` on a pivot table, we will get one set of bars for females and another set of bars for males.
###Code
survivors_cube.plot.bar()
###Output
_____no_output_____
###Markdown
Notice that the $x$-axis of the bar graph contains all of the dimensions in the row index. So to get `pclass` on the $x$-axis, we have to create a pivot table where `pclass` is the row index:
###Code
titanic_df.pivot_table(
index="pclass", columns=["adult", "sex"],
values="survived", aggfunc=np.mean
).plot.bar()
###Output
_____no_output_____
###Markdown
Finally, many analytical operations are easier to do when the data is in data cube format. ExercisesExercises 1-2 deal with the Tips data set (`../data/tips.csv`).
###Code
tips_df = pd.read_csv("../data/tips.csv")
tips_df["tip_percent"] = tips_df.tip / tips_df.total_bill
tips_df.head()
###Output
_____no_output_____
###Markdown
**Exercise 1.** On which day of the week does the waiter serve the largest parties, on average? (You did this exercise in the previous section. See how much easier it is to do using `.groupby()`.)
###Code
tips_df.rename(columns={"size":"party"}, inplace=True)
tips_df.groupby("day").party.mean()
# according to this output, the waiter serves the largest parties on Sunday.
###Output
_____no_output_____
###Markdown
**Exercise 2.** Calculate the average bill by day and time. What day-time combination has the highest average bill?
###Code
day_time_average = tips_df.groupby(["day", "time"])["total_bill"].mean()
day_time_average
###Output
_____no_output_____
###Markdown
**Answer Here: The combination with the highest average bill is Sunday dinner.** **Exercise 3.** Extract the average bill for Friday lunch from the result of Exercise 2.
###Code
day_time_average.loc[("Fri", "Lunch")]
###Output
_____no_output_____
###Markdown
**Exercise 4.** Use `.groupby()` to make a visualization comparing the distribution of tip percentages left by males and females. How do they compare?
###Code
tips_df.groupby("sex").tip_percent.plot.hist(alpha=.5, density=True, legend=True)
###Output
_____no_output_____
###Markdown
**Exercise 5.** Calculate the average total bill by day, time, and table size. Display the results in a pivot table.
###Code
bill_cube = tips_df.pivot_table(
index="party", columns=["day", "time"],
values="total_bill", aggfunc=np.mean
)
bill_cube
###Output
_____no_output_____
###Markdown
**Exercise 6.** Make a bar chart showing the average total bill by table size, day, and time. (You will have to decide which variable(s) to represent on the $x$-axis and which variable(s) to represent using different colored bars.) Explain your choice below.
###Code
tips_df.pivot_table(
index="party", columns=["time", "day"],
values="total_bill", aggfunc=np.mean
).plot.bar()
###Output
_____no_output_____
###Markdown
**Answer Here: After trying each variable as the index, it turned out that "party" was the best one because it required fewer actual bars on the graph in order to represent the data.** Exercises 3-4 deal with the Ames Housing data set (`../data/ames.tsv`). For more information about the variables in this data set, please refer to the [data documentation](https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt).
###Code
df_ames = pd.read_csv("../data/ames.tsv", sep='\t')
display(df_ames.head())
###Output
_____no_output_____
###Markdown
**Exercise 7.** Calculate the average house price by neighborhood and building type, and store it in data cube form. Use the data cube to determine the neighborhood with the most expensive single-family homes.
###Code
house_cube = df_ames.pivot_table(
index="Neighborhood", columns=["Bldg Type"],
values="SalePrice", aggfunc=np.mean
)
house_cube
###Output
_____no_output_____
###Markdown
Lab 04: Group By, Pivot Tables, and Data CubesThis lab is presented with some revisions from [Dennis Sun at Cal Poly](https://web.calpoly.edu/~dsun09/index.html) and his [Data301 Course](http://users.csc.calpoly.edu/~dsun09/data301/lectures.html) When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/)
###Code
%matplotlib inline
import numpy as np
import pandas as pd
titanic_df = pd.read_csv("../data/titanic.csv")
###Output
_____no_output_____
###Markdown
In the previous section, we discussed how to restrict our analysis to a particular subset of observations using boolean masks. So, for example, if we wanted to calculate the survival rate for passengers in third class, we would write:
###Code
titanic_df[titanic_df.pclass == 3].survived.mean()
###Output
_____no_output_____
###Markdown
But what if we wanted to calculate the survival rate by class? We could slice the data set three times, once for each class:
###Code
(titanic_df[titanic_df.pclass == 1]['survived'].mean(),
titanic_df[titanic_df.pclass == 2]['survived'].mean(),
titanic_df[titanic_df.pclass == 3]['survived'].mean())
###Output
_____no_output_____
###Markdown
But this code is inefficient and repetitive. It also does not generalize well to variables with hundreds of possible categories. The problem of calculating the survival rate by class is an example of a problem that can be solved using the **split-apply-combine strategy**. The key insight here is that many data analyses follow the same basic pattern:- First, a data set is **split** into several subsets based on some variable.- Next, some analysis is **applied** to each subset.- Finally, the results from each analysis are **combined**.The three steps are diagrammed in the figure below:![](../images/split_apply_combine.png) [source](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.08-Aggregation-and-Grouping.ipynb)Applying this strategy to our working example above, we should first _split_ up the Titanic data according to the value of `pclass`, _apply_ `.survived.mean()` to each subset, and finally _combine_ the results into one `Series`.[_Note:_ The term "split-apply-combine" was coined by Hadley Wickham in [a 2011 paper](https://www.jstatsoft.org/article/view/v040i01), but the idea is not new. It should already be familiar to you if you know SQL or MapReduce.] Split-Apply-Combine in `pandas`: the `.groupby()` methodTo implement the split-apply-combine strategy in `pandas`, we use the `.groupby()` method. First, we specify one or more variables to split on in the argument to `.groupby()`. Then, we specify our analysis as usual. Pandas will handle splitting the data, applying the analysis to each subset, and combining the results at the end.
###Code
titanic_df.groupby("pclass").survived.mean()
###Output
_____no_output_____
###Markdown
Compare this line of code with the code to calculate the overall survival rate:`titanic_df.survived.mean()`.The only difference is `.groupby("pclass")`. This turns a `DataFrame` into a `DataFrameGroupBy` object, which behaves like a `DataFrame`, except that any analysis that we specify will be applied to subsets of the `DataFrame` instead of the whole `DataFrame`. You can even make visualizations with `.groupby()`! To plot the age distribution of the survivors and non-survivors, we can group by the `survived` variable and then ask for a histogram of `age`. Behind the scenes, `pandas` will do this once for the survivors and again for the non-survivors and then combine them into one histogram.
###Code
titanic_df.groupby("survived").age.plot.hist(alpha=.5, density=True, legend=True)
###Output
_____no_output_____
###Markdown
It is also possible to group by more than one variable. Simply pass in a list of variable names to `.groupby()`. For example, the following code calculates the survival rate by class and sex:
###Code
survival_rates = titanic_df.groupby(["pclass", "sex"])["survived"].mean()
survival_rates
###Output
_____no_output_____
###Markdown
It's clear that survival rates on the Titanic varied drastically by class and by sex.Notice that when we use `.groupby()`, the resulting index is whatever variable(s) we grouped by. Since we grouped by two variables, this index actually has two levels. An index with more than one level is called a `MultiIndex` in `pandas`. To access a particular row in a `DataFrame` that is indexed by a `MultiIndex`, we pass in a tuple of the values we want from each level.So, for example, to get female passengers in 2nd class, we would do:
###Code
survival_rates.loc[(2, "female")]
###Output
_____no_output_____
###Markdown
If we pass in fewer values than there are levels in the index, `pandas` will return everything from the remaining levels.
###Code
survival_rates.loc[2]
survival_rates.loc[:, 'female']
###Output
_____no_output_____
###Markdown
Note that some times the above won't work depending on how the indicies are setup. It may be eaiser to use the [.xs method sometimes](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.xs.html).
###Code
survival_rates.xs('female', level=1)
###Output
_____no_output_____
###Markdown
Pivot Tables and The Data Cube
###Code
titanic_df["adult"] = (titanic_df["age"] >= 18)
###Output
_____no_output_____
###Markdown
In Section 2.2, we learned to split a `pandas` `DataFrame` and apply the same analysis to each of the resulting, smaller `DataFrame`s. For example, the following code calculates the proportion of Titanic passengers of each sex, age group, and class who survived:
###Code
survivors_table = (titanic_df.
groupby(["sex", "adult", "pclass"]).
survived.
mean())
survivors_table.to_frame()
###Output
_____no_output_____
###Markdown
Here's another way to think about these results: there are three dimensions, `sex`, `adult`, and `pclass`, and we calculate a metric, the proportion of survivors, for each of the $2 \times 2 \times 3 = 12$ possible combinations of the dimension values.There are many equivalent ways to represent these results. The representation above is essentially the _tabular form_ that we learned in Chapter 1. Each row represents an observation (i.e., a distinct combination of sex, adult, and class) and each column a variable (i.e., the proportion of passengers who survived). Another way to represent these results is using a **data cube**. In a data cube, the possible values of each dimension are laid out along one dimension of a cube, as shown below:![](../images/datacube.png)The term "data _cube_" is somewhat of a misnomer, since it does not have to be a cube. First, as we can plainly see in the figure above, the dimensions need not all be the same size; some dimensions may have more values than others. Second, a data cube can have any number of dimensions, so it does not have to be three-dimensional. A data cube with $d$ dimensions is really a $d$-dimensional hypercube. A 2-dimensional hypercube is a square (or rectangle), a 1-dimensional hypercube is a line, and a 0-dimensional hypercube is a point.While it is useful to imagine a data cube as a $d$-dimensional hypercube, it is not practical to display data in a hypercube---at least not when $d > 2$. So a data cube is often printed as a two-dimensional table, with multi-level row indexes and columns to represent the dimensions. This two-dimensional representation of the data cube is called a **pivot table**. Here is the code to produce a pivot table from the raw data:
###Code
survivors_cube = titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.mean)
survivors_cube
###Output
_____no_output_____
###Markdown
To create a pivot table, we had to specify - the row index(es): Here, we chose to include two of the dimensions (`adult`, `sex`) along the rows of the pivot table.- the column(s): Here, we chose to include the one remaining dimension (`pclass`) in the columns.- the metric in the cells of the table: Here, we chose to report the _mean_ of the `survived` column in each cell.The resulting pivot table is just stored in an ordinary `DataFrame`; `pandas` does not have a special data structure for pivot tables.Notice how we explicitly specified an aggregation function `aggfunc`. That's because in the original `DataFrame` (`titanic_df`), there were many passengers with the same values for all three dimensions, so each cell of this pivot table actually represents many passengers. In order to summarize all of these passengers by a single value, we have to aggregate the values. The mean is not the only aggregation function we could have used; we could have also calculated the sum, to obtain the _number_ of survivors.
###Code
titanic_df.pivot_table(
index="sex", columns=["adult", "pclass"],
values="survived", aggfunc=np.sum
)
###Output
_____no_output_____
###Markdown
If the data is in data cube form (i.e., in a pivot table), it can be converted to tabular form by simply stacking the columns, one on top of the other. In `pandas`, this can be done using the `.stack()` function:
###Code
survivors_cube.stack(["adult", "pclass"])
###Output
_____no_output_____
###Markdown
Compare the above result with `survivors_table`.Likewise, we can convert a `pandas` object in tabular form to data cube form by _unstacking_ the index, assuming that all of the dimensions are already in the index.
###Code
survivors_cube = survivors_table.unstack(["adult", "pclass"])
survivors_cube
###Output
_____no_output_____
###Markdown
Stacking tends to produce longer objects with more rows, while unstacking tends to produce wider objects with more columns. For this reason, tabular form is sometimes referred to as "long form", in contrast to the data cube, which is "wide form." Some Features of Data CubesIt is much easier to quickly compare numbers in data cube form than in tabular form. For example, it is apparent from the preceding pivot table that males had much lower survival rates than females just by comparing the numbers across each row; this fact is more difficult to discern from `survivors_table`.It is also more efficient to store data in a data cube. Recall that `survivors_table` and `survivors_cube` contain the exact same information. However, the data cube is 70% smaller than the tabular version of the same data:
###Code
survivors_table.__sizeof__(), survivors_cube.__sizeof__()
###Output
_____no_output_____
###Markdown
In many implementations of the data cube, it is also faster to access values in a data cube than in a table. Unfortunately, because `pandas` represents data cubes as two-dimensional pivot tables, it does not enjoy these advantages.
###Code
survivors_table.loc["female", True, 1]
survivors_cube.loc["female", (True, 1)]
###Output
_____no_output_____
###Markdown
Data cubes also play nicely with bar charts in `pandas`. When `.plot.bar()` is called on a `pandas` `DataFrame`, one set of bars will be created for each column. So when we call `.plot.bar()` on a pivot table, we will get one set of bars for females and another set of bars for males.
###Code
survivors_cube.plot.bar()
###Output
_____no_output_____
###Markdown
Notice that the $x$-axis of the bar graph contains all of the dimensions in the row index. So to get `pclass` on the $x$-axis, we have to create a pivot table where `pclass` is the row index:
###Code
titanic_df.pivot_table(
index="pclass", columns=["adult", "sex"],
values="survived", aggfunc=np.mean
).plot.bar()
###Output
_____no_output_____
###Markdown
Finally, many analytical operations are easier to do when the data is in data cube format. ExercisesExercises 1-2 deal with the Tips data set (`../data/tips.csv`).
###Code
tips_df = pd.read_csv("../data/tips.csv")
tips_df["tip_percent"] = tips_df.tip / tips_df.total_bill
tips_df.head()
###Output
_____no_output_____
###Markdown
**Exercise 1.** On which day of the week does the waiter serve the largest parties, on average? (You did this exercise in the previous section. See how much easier it is to do using `.groupby()`.)
###Code
# YOUR CODE HERE
tips_df.groupby(['day'])['size'].mean()
#serves largest parties on Sundays
###Output
_____no_output_____
###Markdown
**Exercise 2.** Calculate the average bill by day and time. What day-time combination has the highest average bill?
###Code
# YOUR CODE HERE
avgBill_byDay_byTime = tips_df.groupby(['day','time']).total_bill.mean()
avgBill_byDay_byTime
#Sunday dinners have highest average bill.
###Output
_____no_output_____
###Markdown
**Answer Here:** **Exercise 3.** Extract the average bill for Friday lunch from the result of Exercise 2.
###Code
avgBill_byDay_byTime.loc['Fri','Lunch']
###Output
_____no_output_____
###Markdown
**Exercise 4.** Use `.groupby()` to make a visualization comparing the distribution of tip percentages left by males and females. How do they compare?
###Code
# YOUR CODE HERE
tips_df.groupby(['sex']).tip_percent.plot.hist(alpha=0.5,bins=50,legend=True)
#The tip avg. is roughly the same for both sexes, but Males seem to be paying the bill more often.
###Output
_____no_output_____
###Markdown
**Exercise 5.** Calculate the average total bill by day, time, and table size. Display the results in a pivot table.
###Code
# TYPE YOUR CODE HERE.
tips_df.pivot_table(index=['size'],columns=['day','time'],values='total_bill')
###Output
_____no_output_____
###Markdown
**Exercise 6.** Make a bar chart showing the average total bill by table size, day, and time. (You will have to decide which variable(s) to represent on the $x$-axis and which variable(s) to represent using different colored bars.) Explain your choice below.
###Code
# TYPE YOUR CODE HERE.
tips_df.pivot_table(index=['size'],columns=['day','time'],values='total_bill').plot.bar()
#I chose to make the index/ x variable the size as this had the most variation and would result in a lot more hard to consume
#lines if it had been a dependent variable
###Output
_____no_output_____
###Markdown
**Answer Here:** Exercises 3-4 deal with the Ames Housing data set (`../data/ames.tsv`). For more information about the variables in this data set, please refer to the [data documentation](https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt).
###Code
df_ames = pd.read_csv("../data/ames.tsv", sep='\t')
display(df_ames.head())
df_ames.dtypes
###Output
_____no_output_____
###Markdown
**Exercise 7.** Calculate the average house price by neighborhood and building type, and store it in data cube form. Use the data cube to determine the neighborhood with the most expensive single-family homes.
###Code
# TYPE YOUR CODE HERE.
df_ames.pivot_table( index=["Neighborhood"],columns=["Bldg Type"],
values="SalePrice",
aggfunc=np.mean
)
###Output
_____no_output_____ |
notebooks/GTO_integrals/.ipynb_checkpoints/GTO_1D_P-checkpoint.ipynb | ###Markdown
Parameters and two Gaussians
###Code
a, b, c, a1, a2 = symbols('a b c a1 a2', positive=True, real=True)
g1=x*exp(-a1*x**2)
g2=x*exp(-a2*x**2)
g1, g2
###Output
_____no_output_____
###Markdown
Normalization constant
###Code
N=integrate(g1*g1, (x, -oo, oo))
N
1/sqrt(N)
printing.sstrrepr(1/sqrt(N))
###Output
_____no_output_____
###Markdown
Overlap integral S
###Code
S=integrate(g1*g2, (x, -oo, oo))
S
S.simplify()
printing.sstrrepr(S.simplify())
###Output
_____no_output_____
###Markdown
Kinetic energy $T = -\frac{\hbar^2}{2m} \frac{d^2}{dx^2} = \frac{1}{2m}\left(\frac{\hbar}{i}\frac{d}{dx} \right)^2$
###Code
d1=diff(g1,x)
d2=diff(g2,x)
d1, d2
T = 1/2 * integrate(d1*d2, (x, -oo, oo))
#T=T.simplify()
#T=T.factor()
T.factor()
printing.sstrrepr(T.factor())
###Output
_____no_output_____
###Markdown
Potential $V(x) = (ax^2 - b)e^{-cx^2}$
###Code
v=(a*x**2-b)*exp(-c*x**2)
v
V = integrate(g1*v*g2, (x, -oo, oo))
V
V.factor()
printing.sstrrepr(V.factor())
###Output
_____no_output_____ |
Case_Studies_Notebooks/greenhouse-gas-emissions-by-sector.ipynb | ###Markdown
![alt text](https://github.com/callysto/callysto-sample-notebooks/blob/master/notebooks/images/Callysto_Notebook-Banner_Top_06.06.18.jpg?raw=true) Case Study: Greenhouse gas emissions, by sector (1990 - 2008)Greenhouse gas emissions (carbon dioxide equivalents), by industries and households. Industry aggregation is at the L-level of the input-output accounts of Statistics Canada.Geography: CanadaTable ID 38100111Sourcehttps://open.canada.ca/data/en/dataset/2d60830b-ee2e-4fb5-8c6c-f241f6bf76ba
###Code
%run -i ./stats_can/helpers.py
%run -i ./stats_can/scwds.py
%run -i ./stats_can/sc.py
from ipywidgets import widgets, VBox, HBox, Button
from ipywidgets import Button, Layout, widgets
from IPython.display import display, Javascript, Markdown, HTML
import datetime as dt
import pandas as pd
import json
import datetime
from tqdm import tnrange, tqdm_notebook
from time import sleep
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
style = {'description_width': 'initial'}
# # Download data
# DATA SET PRODUCT ID for internal use only.
productId = '38100111'
download_tables(str(productId))
df_fullDATA = zip_table_to_dataframe(productId)
# Clean up full dataset - remove internal use columns
cols = list(df_fullDATA.loc[:,'REF_DATE':'UOM'])+ ['SCALAR_FACTOR'] + ['VALUE']
df_less = df_fullDATA[cols]
df_less2 = df_less.drop(["DGUID"], axis=1)
# Display only first five entries
df_less2.head()
# Fancy user interface to explore datasets
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,\
IPython.notebook.get_selected_index()+3)'))
def run_4cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,\
IPython.notebook.get_selected_index()+5)'))
style = {'description_width': 'initial'}
all_the_widgets = [widgets.Dropdown(
value = df_less2["Sector"].tolist()[0],
options = df_less2["Sector"].unique(),
description ='Sector:',
style = style,
disabled=False)]
# Button widget
CD_button = widgets.Button(
button_style='success',
description="Preview Dataset",
layout=Layout(width='15%', height='30px'),
style=style
)
# Connect widget to function - run subsequent cells
CD_button.on_click( rerun_cell )
# user menu using categories found above
tab3 = VBox(children=[HBox(children=all_the_widgets[0:3]),
CD_button])
tab = widgets.Tab(children=[tab3])
tab.set_title(0, 'Load Data Subset')
display(tab)
sub_df = df_less2[(df_less2["Sector"]==all_the_widgets[0].value)]
# Time to plot!
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib.pyplot import figure
register_matplotlib_converters()
%matplotlib inline
# Actual plot of time series
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
# Get start and end date, plot value found under "VALUE" command
plt.plot(sub_df["REF_DATE"],sub_df["VALUE"],'b--',label='Value')
plt.xlabel('Year', fontsize=15)
plt.ylabel('Greenhouse Gas Emissions (kilotonnes)',fontsize=15)
# Title changes depending on the subcategory explored
plt.title(str(all_the_widgets[0].value),fontsize=20)
plt.xticks(rotation=90)
plt.grid(True)
#load "cufflinks" library under short name "cf"
import cufflinks as cf
#command to display graphics correctly in Jupyter notebook
cf.go_offline()
def enable_plotly_in_cell():
import IPython
from plotly.offline import init_notebook_mode
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
'''))
init_notebook_mode(connected=False)
get_ipython().events.register('pre_run_cell', enable_plotly_in_cell)
# pivot table to display total greenhouse gas emissions, by sector and year
all_data = pd.pivot_table(df_less2[df_less2["Sector"]!="Total, all sectors"], \
values='VALUE', index=["REF_DATE"],columns=["Sector"])
###Output
_____no_output_____
###Markdown
Total Greenhouse Gas Emissions by Sector, by year (1990 - 2008)
###Code
all_data
# Plot
title="Boxplot of Greenhouse Gas Emissions by Sector (1990 - 2008)"
print(title)
layout = dict(yaxis=dict(side='left'))
my_fig = all_data.iplot(asFigure=True,kind='box',layout=layout)
my_fig.layout.legend=dict(x=1.0, y=1.8)
my_fig.iplot(filename='line-example.html')
# Use pivot command to get average
all_data2 = pd.pivot_table(df_less2[df_less2["Sector"]!="Total, all sectors"], \
values='VALUE', index=["Sector"], aggfunc=np.average)
###Output
_____no_output_____
###Markdown
Average Greenhouse Gas Emissions by Sector (1990 - 2008)
###Code
all_data2
sorted_sector = all_data2.sort_values(by='VALUE', ascending=False)
sorted_sector = sorted_sector.reset_index("Sector")
sorted_sector.iloc[0:20].iplot(kind="pie",values="VALUE",labels="Sector",title="Average Greenhouse Emissions by Sector")
all_data.iplot(labels='Sector',legend=False,title="Time Series, Yearly Greenhouse Gas Emissions, by Sector (1990-2008)",xaxis_title="Year",yaxis_title="Greenhouse Gas Emissions (kilotonnes)")
###Output
_____no_output_____ |
CollaborativeFiltering.ipynb | ###Markdown
Collaborative FilteringUsing your experience from analyzing Black Scholes, profile and analyze the composability methods used for the Collaborative Filtering algorithm
###Code
#!/usr/bin/env python
# Copyright (c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import timeit
import numpy as np
import dask.array as da
from dask.diagnostics import ProgressBar
import random
import argparse
import numba
number_of_users = 40000
features = 900
chunk = 1000
try:
import numpy.random_intel as rnd
numpy_ver="intel"
except:
import numpy.random as rnd
numpy_ver="std"
print("Generating fake similarity")
#topk = da.random.normal(size=(features, features), chunks=(features, features)).compute()
topk = rnd.normal(size=(features, features))
t = da.from_array(topk, chunks=(features, features))
print("Generating fake user data")
#users = da.random.normal(size=(features, number_of_users), chunks=(features, chunk)).compute()
#users = rnd.normal(size=(features, number_of_users))
users = np.zeros(shape=(features, number_of_users), dtype=np.float64)
objects_idx = np.arange(features)
rated = rnd.randint(0, 10, size=number_of_users, dtype=np.int32)
for user in range(number_of_users):
rnd.shuffle(objects_idx)
items_rated = rated[user]
users[objects_idx[:items_rated], user] = rnd.randint(1, 5, size=items_rated, dtype=np.int32)
u = da.from_array(users, chunks=(features, chunk), name=False)
def run_numpy():
x = topk.dot(users)
x = np.where(users>0, 0, x)
return x.argmax(axis=0)
def run_dask():
x = t.dot(u)
x = da.where(u>0, 0, x)
r = x.argmax(axis=0)
return r.compute()
@numba.guvectorize('(f8[:],f8[:],i4[:])', '(n),(n)->()', nopython=True, target="parallel")
def recommendation(x, u, r):
maxx = x[0]
r[0] = -1
for i in range(x.shape[0]):
if u[i] == 0 and maxx < x[i]: # if user has no rank for the item
maxx = x[i]
r[0] = i
def run_numpy_numba():
x = topk.dot(users)
return recommendation(x, users)
def run_dask_numba():
x = t.dot(u)
r = da.map_blocks(recommendation, x, u, drop_axis=0)
return r.compute()
###Output
_____no_output_____
###Markdown
Run the standard NumPy version with timeit, cProfile, line_profiler
###Code
import cProfile
%load_ext line_profiler
###Output
_____no_output_____
###Markdown
![ebooks.jpg](attachment:ebooks.jpg) Kindle eBook Recommendation System: Content-BasedAuthors: Daniel Burdeno --- Contents- Overview- Business Understanding - Collaborative Filtering - Imports - Surprise Data & Split - Baselines - SVD & Grid Searches - NMF & Grid Searches - SVD++ & Grid Searches - Model Evaluation - Recommendation Function - Building - Function - Evaulation - Conclusion Overview > This project aims to build a two system approach to recommending Kindle eBook's to both existing reviewers and new users looking to find similar books. For existing reviewers a collaborative approach is taken by comparing similar reviewer profiles based on exisitng ratings. A content-based approach is taken in order to recommend books based on similar review text data and can be used by anyone. Business Understanding > Currently eBooks are outsold by print books at about a 4 to 1 ratio. In 2020 there was 191 million eBooks sold. While Amazon holds over 70% of the market in eBooks via their kindle platform there is a large untapped potential for increasing eBook sales and promoting the use of eReaders compared to print. By utilzing quality recommendation systems Amazon can boost the interest and useablity of eBooks thus improving upon this market. The kindle platform and eBooks in general are incredidly accesibile for anyone with a tablet, smartphone, computer, or eReader. These eBooks can be immediatley purchased from a multitude of platforms and are able to read within minutes of purchase, which is far superior to obtaining a print book. This notion of real time purchase and useablily plays greater into Amazon's one click purchase philsophy.> The kindle store is also full of cheap reads, with some eBooks even being free with certain subsripctions like prime and unlimited. A broad span of genres are available ranging from things like self-help books, cookbooks, and photography books to more traditional literature genres like Science Fiction & Fantasy and Romance novels. A final huge plus for the advocacy of eBooks is the ease in which readers can rate and reviews books they have either just read or already read. This can all be done via the same platform used to access and read the eBook (aka kindle). Ultimately this plays into the collection of more review and rating data wich in turn can attribute to better performing recommendations for each indiviudal user. A quality recommendation system can thus create a positive feedback loop that not only enhances itself but promotoes the increase in eBook sales across the board. Collaborative Filtering > The Collaborative Filtering system was based on reviewer rating data used to create 'user profiles' which could then be compared with one another. Similar users, based on prior eBook ratings, were then used to return the top recommended books by predicting an estimated rating. This approach is known as user to user. I iterated through several model algorithms and grid searchs before settling on my final model (SVD GS3). This model achieved my lowest Root Mean Squared Error, coming in at 0.782 rating (RMSE). > I heavily utlized the Surprise library within python in order to produce recommendation models. This package utilizes common methods found within sklearn and adapts them for use in building recommendation models. Please see the [documentation](https://surprise.readthedocs.io/en/stable/) and [github](https://github.com/NicolasHug/Surprise) for more information. Imports > The main library used to produce a collaborative filtering model was Surprise. Within this package there are methods of cross validating a model, performing grid searchs, and a plethora of algorithms used for recommendation systems. The Surprise syntax is modeled after sklearn and thus should look familar to most Data Scienctist. It utlizes methods such as .fit and .predict. It also contains Dataset readers in order to transform data into the appropriate form for use in recommendation systems. I utlized Pickle to save my final model which can then be loaded into the app.py file for ease of use.
###Code
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from surprise import Dataset, Reader, accuracy
from surprise.model_selection import cross_validate, train_test_split, GridSearchCV
from surprise.prediction_algorithms import SVD, SVDpp, NMF, BaselineOnly, NormalPredictor
plt.style.use('fast')
%matplotlib inline
# Read in csv file saved from the DataPrepFinal notebook
df_rev5 = pd.read_csv('Data/df_rev5.csv')
df_rev5.info()
# Sanity check for null values, somehow the saved csv had 6 nulls appear despite removing in data prep
df_rev5.isna().sum()
# Drop the 6 nulls found
df_rev5.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
Surprise Data & Split > Surprise has a convenient reader function that can load appropriate data into the correct form for recommendation systems. You can set custom scales based on the rating information within your data (in this case 1-5). Using the load_from_df function you can specify which columns to includes. In order to perform user to user comparsion the data needs to be in the format of User ID(reviewerID) followed by Product ID (asin), followed by rating (overall). In the similar convention to sklearn, Surprise also contains a train_test_split function.
###Code
# Instantiate reader the same convention as a sklearn class
reader = Reader(rating_scale=(1, 5))
# Load user_data from imported dataframe specifiyng which columns to use
user_data = Dataset.load_from_df(df_rev5[['reviewerID', 'asin', 'overall']], reader)
# Perform an appropriate split for the recommendation models
trainset, testset = train_test_split(user_data, test_size=0.2, random_state=42)
# How many users and items are in the trainset
print('Number of users: ', trainset.n_users, '\n')
print('Number of items: ', trainset.n_items, '\n')
###Output
Number of users: 97947
Number of items: 92463
###Markdown
Baselines > Surprise has several baseline models on which comparisons can be made. The normal predictor predicts a random rating based solely on the distribution of the dataset, acting kinda of like a dummy model from sklearn. The baseline only algorithm utlizes only the baseline basis in the dataset to return predictions. In order to evaluate model perform I will use the Root Mean Squared Error (RMSE) metric. This metric best represents how far the predicted rating is from the actual rating thus quanitfing the error made when making new predictions for a user, which will be used to return recommendations.
###Code
# Instantiate and fit model
baseline = NormalPredictor()
baseline.fit(trainset)
# Return test predictions for model fit on trainset
predictions = baseline.test(testset)
# Save RMSE score to variable
baseline_normal = accuracy.rmse(predictions)
# Instantiate and fit model
baseline2 = BaselineOnly()
baseline2.fit(trainset)
# Return test predictions for model fit on trainset
predictions = baseline2.test(testset)
# Save RMSE score to variable
baseline_only = accuracy.rmse(predictions)
###Output
Estimating biases using als...
RMSE: 0.8163
###Markdown
SVD & Grid Searches > Following the baseline models I explored the Singular Value Decomposition (SVD) algorithm, popularized by Simon Funk, which is famous for winning the netflix recommendation prize. The Surprise package utilizes Funk's SVD version. It is a matrix factorization-based model meaning it transforms the user-item matrix into latent factor matrices. Regularized squared error is minimized using straightforward stochastic gradient descent. Within SVD there are a number of important hyperparameters that can be tuned including but not limited to regularization and learning rate, as well as the number of factors and epochs to train on. This allows for what can be extensive grid searches.
###Code
# Cross validate a basic SVD with no hyperparameter tuning expecting sub-par results
svd_basic = SVD(random_state=42)
results = cross_validate(svd_basic, user_data, measures=['RMSE'], cv=3, n_jobs = -1, verbose=True)
# Fit to trainset and predict on the testset for evaluation
svd_basic.fit(trainset)
predictions = svd_basic.test(testset)
svd_simple = accuracy.rmse(predictions)
###Output
RMSE: 0.8048
###Markdown
> The basic SVD performed only slightly better than our baseline only model indicating room for improvement through grid search tuning of hyperparamters. Reducing the number of latent factors will help the model better train on the dataset. Increasing the number of epochs (iterations) of training should also improve the RMSE. I also wanted to check if turning off the biased approach would effect the model (default is True).
###Code
# Similar convention to sklearn grid search, I setup a dictionary on the hyperparamters I wanted to tune
svd_param_grid = {'n_factors':[20, 40],
'n_epochs': [10, 20],
'biased': [True, False]}
# Grid Search with CV is instantiate
svd_gs_model = GridSearchCV(SVD,param_grid=svd_param_grid,joblib_verbose=10, n_jobs=-1, cv=3)
# Model is fit on data and best_params scored based on RMSE are returned
svd_gs_model.fit(user_data)
svd_gs_model.best_params['rmse']
# Instantiate SVD using the best found hyperparameters
svd_model = SVD(n_factors=20, n_epochs=20, random_state=42)
# Fit on trainset and make predictions using testset to return RMSE metric
svd_model.fit(trainset)
predictions = svd_model.test(testset)
svd_gs1 = accuracy.rmse(predictions)
###Output
RMSE: 0.7964
###Markdown
> While improvement was made it is only slightly better and continued grid searching/hyperparameter tuning is needed to try and further improve the model results. Again I further adjusted the number of factors and increased in the number of training epochs to try and reduce RMSE. I also attempted to fiddle with the regularization and learning rates.
###Code
# Setup new hyperparameter dictionary
svd_param_grid2 = {'n_factors':[5, 20],
'n_epochs': [20, 40],
'lr_all': [0.05, .005],
'reg_all': [0.01, 0.02]}
svd_gs2_model = GridSearchCV(SVD,param_grid=svd_param_grid2,joblib_verbose=10, n_jobs=-1, cv=3)
# Return best_params based on RMSE
svd_gs2_model.fit(user_data)
svd_gs2_model.best_params['rmse']
# Instantiate new SVD using the new best found hyperparameters
svd2_model = SVD(n_factors=5, n_epochs=40, random_state=42)
# Fit on trainset and make predictions using testset to return RMSE metric
svd2_model.fit(trainset)
predictions = svd2_model.test(testset)
svd_gs2 = accuracy.rmse(predictions)
###Output
RMSE: 0.7848
###Markdown
> The second grid searched worked better in order to tune hyperparamters, reducing the RMSE by over .01. I still believe I could achieve a further decrease in RMSE so I performed a third grid search, once again adjusting factors and training epochs as well as investigating a different change in regularizaiton and/or learning rate.
###Code
# Setup new hyperparameter dictionary
svd_param_grid3 = {'n_factors':[1, 3, 5],
'n_epochs':[40, 50],
'lr_all':[0.005, 0.001],
'reg_all':[0.02, .05]}
svd_gs3_model = GridSearchCV(SVD, param_grid=svd_param_grid3, cv=3, joblib_verbose=10, n_jobs=-1, return_train_measures=True)
# Return best_params based on RMSE
svd_gs3_model.fit(user_data)
svd_gs3_model.best_params['rmse']
# Instantiate a third SVD using the new best found hyperparameters
svd3_model = SVD(n_factors=1, n_epochs=50,
lr_all=0.005, reg_all=0.05, random_state=42)
# Fit on trainset and make predictions using testset to return RMSE metric
svd3_model.fit(trainset)
predictions = svd3_model.test(testset)
svd_gs3 = accuracy.rmse(predictions)
###Output
RMSE: 0.7824
###Markdown
> This third SVD model showed a very slight increase in performance as compared to the second model. I decided to stop here as further grid searchs would be a waste of time and computational power. I expected the SVD model to out perform other recommendation algorithms that use matrix factorization given its wide spread use and success within the domain. NMF & Grid Searches > In an attempt to explore further algorithms within Surpirse I wanted to try a Non-negative Matrix Factorization (NMF) model. This algorithm is very similar to SVD but keeps all user and item factors positive by setting a very specfic step size for the stochastic gradient descent. The biased version can be very prone to overfitting but this is addressed by reducing the number of factors. Given the similarities to SVD I performed a similar grid search as the third SVD model.
###Code
# New hyperparameter dictionary for nmf model
nmf_param_grid = {'biased':[True, False],
'n_factors':[10, 5, 1],
'n_epochs': [25, 50]}
nmf_gs_model = GridSearchCV(NMF, param_grid=nmf_param_grid, cv=3, joblib_verbose=10, n_jobs=-1, return_train_measures=True)
# Fit and return the best hyperparameters
nmf_gs_model.fit(user_data)
nmf_gs_model.best_params['rmse']
# Instantiate - fit on trainset - score the model on testset
nmf_model = NMF(n_factors=1, n_epochs=50, random_state=42, biased=True)
nmf_model.fit(trainset)
predictions = nmf_model.test(testset)
nmf_gs = accuracy.rmse(predictions)
###Output
RMSE: 0.7864
###Markdown
> As expected based on its similarity to SVD this model performed only slightly worse than then the second and third SVD models. It had a slightly lower fit time but the increased RMSE is not worth the trade-off. I concluded the SVD was still better performing than nmf. SVD++ & Grid Searches > For a final look at Surprise I wanted to try and utilize the SVD++ algorithm, again, very similar to SVD. The difference is that SVD++ attempts to add an extension onto the base SVD that uses implicit rating as well as explicit. In other words it infers the action of rating an item as a latent factor regardless of the rating value given to the item while also factoring the actual rating value. I thought this might further improve the RMSE by taking into account reviewers who have rated a large number of eBooks. Please note that this Grid Search will take a very long time to run.
###Code
# New dictionary for SVD++
svdpp_param_grid = {'n_factors':[1, 5],
'n_epochs':[25, 50],
'reg_all':[0.02, 0.05]}
svdpp_gs_model = GridSearchCV(SVDpp, param_grid=svdpp_param_grid, cv=3, joblib_verbose=10, n_jobs=-1, return_train_measures=True)
# Fit and return the best_params based on cross validation this will take a VERY long time to run
svdpp_gs_model.fit(user_data)
svdpp_gs_model.best_params['rmse']
# Instantiate - fit on trainset - score the model on testset
SVDpp_model = SVDpp(n_factors=1, n_epochs=25, random_state=42, reg_all=0.05)
SVDpp_model.fit(trainset)
predictions = SVDpp_model.test(testset)
SVDpp_gs = accuracy.rmse(predictions)
###Output
RMSE: 0.7845
###Markdown
> Surprisingly the SVD++ did not perform better then the third standard SVD, where I thought it might be able to use implicit ratings to reduce RMSE. It also took considerably longer to fit on the dataset given the addtion of the implicit rating factors. Fit time was more than tripled based on the grid search output timings. Further hyperparameter tuning might work to slighlty improve performance but was not worth the required computational time. Model Evaluation > Based on the selected metric of RMSE, the third SVD model is my best performing model and will be used as the final model within my function in order to make rating predictions and return top recommendations. Below I visualize the difference achieved through the iterative process. I was only able to improve slighly upon the baseline only model however this makes sense given that all these models utilize the baseline bias within their calculations. The scored was dramactically improved from the normal predictor which makes random predictions based on rating distribution (more akin to a true dummy baseline). RMSE was reduced from 1.234 down to 0.782, almost half. I am quite happy with a RMSE of only 0.782 rating taken in the context of a scale from 1-5. I am confident in the model's ability to return estimated rating predictions which can be used to determine which eBooks to recommend to reviewers.
###Code
X = ['Baseline Only', 'SVD Basic', 'SVD GS1', 'NMF GS', 'SVDpp GS', 'SVD GS3']
y = [baseline_only, svd_simple, svd_gs1, nmf_gs, SVDpp_gs, svd_gs3]
fig, ax = plt.subplots()
plt.bar(X, y, color=['black', 'blue', 'blue', 'blue', 'blue', 'green'])
plt.xticks(rotation=25)
plt.ylim(0.7, .85)
plt.grid(False)
ax.set_title("Surprise Models")
plt.ylabel('Root Mean Squared Error (RMSE)')
plt.savefig('Images/Model_bar.png', dpi=300, bbox_inches='tight');
###Output
_____no_output_____
###Markdown
Recommendation Function > My final model (svd3) can now be utilized within a function in order to return top n-recommendations. In order for the model to return meta data associated with the recommendations I have loaded in the cleaned meta5 csv containing the appropriate information. This will also be used to index against eBooks that the inputed reviewers has already rated in order to not return already read books. A user dataframe is created from the rev5 set, which has been subsetted to only include reviewers with 5 or more entries, in order to determine books that a user has already rated. These entries are then dropped from the dataframe on which predicted ratings are performed. This was done so the recommendations do not return any previously rated books to the reviewer which would be useless. The prediction (estimated) rating is then returned as a new column within the eBook meta data frame and used to sort by descending value. Recommendations are made based on top estimated ratings for books the reviewer has not rated. Building
###Code
# Using Surprise a full trainset incorporating all data can be built and fit to the model in order to make full predictions
trainset_full = user_data.build_full_trainset()
svd3_model.fit(trainset_full)
# The model is pickled and saved into the Model folder in the repository so it can used in the app.py file
pickle.dump(svd3_model, open('Model/collab_model.sav', 'wb'))
# Load in the meta data for use in returning eBook recommendation information
df_meta5 = pd.read_csv('Data/meta5.csv', index_col='asin')
df_meta5.drop(columns =['Unnamed: 0'], inplace=True)
df_meta5.info()
# Sanity check on meta data
df_meta5.head()
# The rating data is subset to just show reviewers and the eBooks they have rated
df_user = df_rev5.set_index('reviewerID')
df_user.drop(columns=['Unnamed: 0', 'reviewText', 'overall'], inplace=True)
df_user.head()
df_user.to_csv('Data/df_user.csv')
# Dual input for unique reviewer ID and how many recommendations you would like
user = input('UserId: ')
n_recs = int(input('How many recommendations? '))
# Creating a list of the eBooks that said reviewer has already rated
have_reviewed = list(df_user.loc[user, 'asin'])
# Creating new dataframe from meta data to subset based on already reviewed eBooks
not_reviewed = df_meta5.copy()
# Dropping indexes (asin) that correspond to already reviewed eBooks
not_reviewed.drop(have_reviewed, inplace=True)
# Reset index to pull out asin as a seperate column
not_reviewed.reset_index(inplace=True)
not_reviewed.head()
# Obtain rating predictions based on model.predict, passing in user input. Using .apply with lamdba function to iterate through
not_reviewed['est_rating'] = not_reviewed['asin'].apply(lambda x: svd3_model.predict(user, x).est)
# Sort dataframe based on newly created est_rating problem in order to return top predictions
not_reviewed.sort_values(by='est_rating', ascending=False, inplace=True)
not_reviewed.head()
not_reviewed.rename(columns={'title':'Title', 'author':'Author',
'genre':'Genre', 'print_length':'# Pages',
'word_wise':'Word Wise', 'lending':'Lending', 'asin':'ASIN', 'est_rating':'Estimated Rating'}, inplace=True)
not_reviewed[['# Pages', 'Word Wise', 'Lending']] = not_reviewed[['# Pages', 'Word Wise', 'Lending']].astype(int)
# Rename columns for better display value and presentation
not_reviewed.rename(columns={'title':'Title', 'author':'Author',
'genre':'Genre', 'print_length':'# Pages',
'word_wise':'Word Wise', 'lending':'Lending',
'asin':'ASIN', 'est_rating':'Estimated Rating'}, inplace=True)
# Change integers into floats for better display value and presentation
not_reviewed[['# Pages', 'Word Wise', 'Lending']] = not_reviewed[['# Pages', 'Word Wise', 'Lending']].astype(int)
not_reviewed.head()
# Final step is to only return the top n_recs as denoted by input, done using .head(n_recs)
not_reviewed.head(n_recs)
###Output
_____no_output_____
###Markdown
Function
###Code
def user_recommend_books():
user = input('ReviewerId: ')
n_recs = int(input('How many recommendations? '))
have_reviewed = list(df_user.loc[user, 'asin'])
not_reviewed = df_meta5.copy()
not_reviewed.drop(have_reviewed, inplace=True)
not_reviewed.reset_index(inplace=True)
not_reviewed['est_rating'] = not_reviewed['asin'].apply(lambda x: svd3_model.predict(user, x).est)
not_reviewed.sort_values(by='est_rating', ascending=False, inplace=True)
not_reviewed.rename(columns={'title':'Title', 'author':'Author',
'genre':'Genre', 'print_length':'# Pages',
'word_wise':'Word Wise', 'lending':'Lending',
'asin':'ASIN', 'est_rating':'Estimated Rating'}, inplace=True)
not_reviewed[['# Pages', 'Word Wise', 'Lending']] = not_reviewed[['# Pages', 'Word Wise', 'Lending']].astype(int)
return not_reviewed.head(n_recs)
###Output
_____no_output_____
###Markdown
Evaulation > The built function can now be used to return recommendations for specfic users. Below I have demonstrated two examples of this. This system is working well, returning a wide range of genres and eBooks. eBook returns are based on the reviewers profile (previously rated eBooks) when compared to other profiles. My final trained SVD model is used to predict an estimated rating for every book that a reviewer has not rated. In turn this is used to return top recommended books based off the highest estimated ratings.> The model performs very well with reviewers who have a wide variety of ratings, true to their preferences. This means that they rate some books highly if they liked them and rate others they don't negatively. This allows the model to accurately distinuish between individual preference profiles. The system begins to break down slightly when looking at users who only gave high ratings. The model has a harder time distinusghing between predicted ratings at the top end and the recommendation system returns a large value of max rated eBooks (5). Reviewers can still recieve recommended books but the system might not be able to determine their unique preferences as well. To improve performance for every user, it should be encouraged to rate books across the spectrum of rating scale so that the model can be retrained to pick up on these subtle nuiances.
###Code
# Set display max column width to none for better display and visual
pd.set_option('display.max_colwidth', None)
user_recommend_books()
user_recommend_books()
###Output
ReviewerId: A2KQSBNFAYTFYO
How many recommendations? 10
|
Scikit-Learn-Tutorial/3. Training a model on the Iris Dataset.ipynb | ###Markdown
Scikit Learn Tutorial 3 - Training a model on the Iris Dataset Run in Google Colab View source on GitHub ![Scikit Learn Logo](http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png) Loading in Dataset
###Code
import pandas as pd
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
data.head()
###Output
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\importlib\_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject
return f(*args, **kwds)
###Markdown
Preprocessing Data Transforming the classes to numeric data
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
data['class'] = le.fit_transform(data['class'])
data.head()
###Output
_____no_output_____
###Markdown
Split features and label and transform them to a Numpy Array
###Code
import numpy as np
X = np.array(data.drop(['class'], axis=1))
y = np.array(data['class'])
###Output
_____no_output_____
###Markdown
Building Model
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X, y)
###Output
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\site-packages\sklearn\linear_model\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.
"this warning.", FutureWarning)
###Markdown
Evaluating Accuracy In this notebook we are going to evaluate the accuracy on the data which we used to train. You shouldn't to this because you can't be sure if the result you get means anything because the model could just overfit the data. In reality we would split the dataset into a training and test set. We will cover this in the next tutorial.
###Code
accuracy = clf.score(X, y)
accuracy
###Output
_____no_output_____
###Markdown
Scikit Learn Tutorial 3 - Training a model on the Iris Dataset Run in Google Colab View source on GitHub ![Scikit Learn Logo](http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png) Loading in Dataset
###Code
import pandas as pd
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
data.head()
###Output
_____no_output_____
###Markdown
Preprocessing Data Transforming the classes to numeric data
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
data['class'] = le.fit_transform(data['class'])
data.head()
###Output
_____no_output_____
###Markdown
Split features and label and transform them to a Numpy Array
###Code
import numpy as np
X = np.array(data.drop(['class'], axis=1))
y = np.array(data['class'])
###Output
_____no_output_____
###Markdown
Building Model
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X, y)
help(clf)
###Output
Help on LogisticRegression in module sklearn.linear_model.logistic object:
class LogisticRegression(sklearn.base.BaseEstimator, sklearn.linear_model.base.LinearClassifierMixin, sklearn.linear_model.base.SparseCoefMixin)
| LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='warn', max_iter=100, multi_class='warn', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)
|
| Logistic Regression (aka logit, MaxEnt) classifier.
|
| In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
| scheme if the 'multi_class' option is set to 'ovr', and uses the
| cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
| (Currently the 'multinomial' option is supported only by the 'lbfgs',
| 'sag', 'saga' and 'newton-cg' solvers.)
|
| This class implements regularized logistic regression using the
| 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
| that regularization is applied by default**. It can handle both dense
| and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
| floats for optimal performance; any other input format will be converted
| (and copied).
|
| The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
| with primal formulation, or no regularization. The 'liblinear' solver
| supports both L1 and L2 regularization, with a dual formulation only for
| the L2 penalty. The Elastic-Net regularization is only supported by the
| 'saga' solver.
|
| Read more in the :ref:`User Guide <logistic_regression>`.
|
| Parameters
| ----------
| penalty : str, 'l1', 'l2', 'elasticnet' or 'none', optional (default='l2')
| Used to specify the norm used in the penalization. The 'newton-cg',
| 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
| only supported by the 'saga' solver. If 'none' (not supported by the
| liblinear solver), no regularization is applied.
|
| .. versionadded:: 0.19
| l1 penalty with SAGA solver (allowing 'multinomial' + L1)
|
| dual : bool, optional (default=False)
| Dual or primal formulation. Dual formulation is only implemented for
| l2 penalty with liblinear solver. Prefer dual=False when
| n_samples > n_features.
|
| tol : float, optional (default=1e-4)
| Tolerance for stopping criteria.
|
| C : float, optional (default=1.0)
| Inverse of regularization strength; must be a positive float.
| Like in support vector machines, smaller values specify stronger
| regularization.
|
| fit_intercept : bool, optional (default=True)
| Specifies if a constant (a.k.a. bias or intercept) should be
| added to the decision function.
|
| intercept_scaling : float, optional (default=1)
| Useful only when the solver 'liblinear' is used
| and self.fit_intercept is set to True. In this case, x becomes
| [x, self.intercept_scaling],
| i.e. a "synthetic" feature with constant value equal to
| intercept_scaling is appended to the instance vector.
| The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
|
| Note! the synthetic feature weight is subject to l1/l2 regularization
| as all other features.
| To lessen the effect of regularization on synthetic feature weight
| (and therefore on the intercept) intercept_scaling has to be increased.
|
| class_weight : dict or 'balanced', optional (default=None)
| Weights associated with classes in the form ``{class_label: weight}``.
| If not given, all classes are supposed to have weight one.
|
| The "balanced" mode uses the values of y to automatically adjust
| weights inversely proportional to class frequencies in the input data
| as ``n_samples / (n_classes * np.bincount(y))``.
|
| Note that these weights will be multiplied with sample_weight (passed
| through the fit method) if sample_weight is specified.
|
| .. versionadded:: 0.17
| *class_weight='balanced'*
|
| random_state : int, RandomState instance or None, optional (default=None)
| The seed of the pseudo random number generator to use when shuffling
| the data. If int, random_state is the seed used by the random number
| generator; If RandomState instance, random_state is the random number
| generator; If None, the random number generator is the RandomState
| instance used by `np.random`. Used when ``solver`` == 'sag' or
| 'liblinear'.
|
| solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, optional (default='liblinear').
|
| Algorithm to use in the optimization problem.
|
| - For small datasets, 'liblinear' is a good choice, whereas 'sag' and
| 'saga' are faster for large ones.
| - For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
| handle multinomial loss; 'liblinear' is limited to one-versus-rest
| schemes.
| - 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
| - 'liblinear' and 'saga' also handle L1 penalty
| - 'saga' also supports 'elasticnet' penalty
| - 'liblinear' does not handle no penalty
|
| Note that 'sag' and 'saga' fast convergence is only guaranteed on
| features with approximately the same scale. You can
| preprocess the data with a scaler from sklearn.preprocessing.
|
| .. versionadded:: 0.17
| Stochastic Average Gradient descent solver.
| .. versionadded:: 0.19
| SAGA solver.
| .. versionchanged:: 0.20
| Default will change from 'liblinear' to 'lbfgs' in 0.22.
|
| max_iter : int, optional (default=100)
| Maximum number of iterations taken for the solvers to converge.
|
| multi_class : str, {'ovr', 'multinomial', 'auto'}, optional (default='ovr')
| If the option chosen is 'ovr', then a binary problem is fit for each
| label. For 'multinomial' the loss minimised is the multinomial loss fit
| across the entire probability distribution, *even when the data is
| binary*. 'multinomial' is unavailable when solver='liblinear'.
| 'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
| and otherwise selects 'multinomial'.
|
| .. versionadded:: 0.18
| Stochastic Average Gradient descent solver for 'multinomial' case.
| .. versionchanged:: 0.20
| Default will change from 'ovr' to 'auto' in 0.22.
|
| verbose : int, optional (default=0)
| For the liblinear and lbfgs solvers set verbose to any positive
| number for verbosity.
|
| warm_start : bool, optional (default=False)
| When set to True, reuse the solution of the previous call to fit as
| initialization, otherwise, just erase the previous solution.
| Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
|
| .. versionadded:: 0.17
| *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
|
| n_jobs : int or None, optional (default=None)
| Number of CPU cores used when parallelizing over classes if
| multi_class='ovr'". This parameter is ignored when the ``solver`` is
| set to 'liblinear' regardless of whether 'multi_class' is specified or
| not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
| context. ``-1`` means using all processors.
| See :term:`Glossary <n_jobs>` for more details.
|
| l1_ratio : float or None, optional (default=None)
| The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
| used if ``penalty='elasticnet'`. Setting ``l1_ratio=0`` is equivalent
| to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
| to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
| combination of L1 and L2.
|
| Attributes
| ----------
|
| classes_ : array, shape (n_classes, )
| A list of class labels known to the classifier.
|
| coef_ : array, shape (1, n_features) or (n_classes, n_features)
| Coefficient of the features in the decision function.
|
| `coef_` is of shape (1, n_features) when the given problem is binary.
| In particular, when `multi_class='multinomial'`, `coef_` corresponds
| to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
|
| intercept_ : array, shape (1,) or (n_classes,)
| Intercept (a.k.a. bias) added to the decision function.
|
| If `fit_intercept` is set to False, the intercept is set to zero.
| `intercept_` is of shape (1,) when the given problem is binary.
| In particular, when `multi_class='multinomial'`, `intercept_`
| corresponds to outcome 1 (True) and `-intercept_` corresponds to
| outcome 0 (False).
|
| n_iter_ : array, shape (n_classes,) or (1, )
| Actual number of iterations for all classes. If binary or multinomial,
| it returns only 1 element. For liblinear solver, only the maximum
| number of iteration across all classes is given.
|
| .. versionchanged:: 0.20
|
| In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
| ``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
|
| Examples
| --------
| >>> from sklearn.datasets import load_iris
| >>> from sklearn.linear_model import LogisticRegression
| >>> X, y = load_iris(return_X_y=True)
| >>> clf = LogisticRegression(random_state=0, solver='lbfgs',
| ... multi_class='multinomial').fit(X, y)
| >>> clf.predict(X[:2, :])
| array([0, 0])
| >>> clf.predict_proba(X[:2, :]) # doctest: +ELLIPSIS
| array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
| [9.7...e-01, 2.8...e-02, ...e-08]])
| >>> clf.score(X, y)
| 0.97...
|
| See also
| --------
| SGDClassifier : incrementally trained logistic regression (when given
| the parameter ``loss="log"``).
| LogisticRegressionCV : Logistic regression with built-in cross validation
|
| Notes
| -----
| The underlying C implementation uses a random number generator to
| select features when fitting the model. It is thus not uncommon,
| to have slightly different results for the same input data. If
| that happens, try with a smaller tol parameter.
|
| Predict output may not match that of standalone liblinear in certain
| cases. See :ref:`differences from liblinear <liblinear_differences>`
| in the narrative documentation.
|
| References
| ----------
|
| LIBLINEAR -- A Library for Large Linear Classification
| https://www.csie.ntu.edu.tw/~cjlin/liblinear/
|
| SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
| Minimizing Finite Sums with the Stochastic Average Gradient
| https://hal.inria.fr/hal-00860051/document
|
| SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
| SAGA: A Fast Incremental Gradient Method With Support
| for Non-Strongly Convex Composite Objectives
| https://arxiv.org/abs/1407.0202
|
| Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
| methods for logistic regression and maximum entropy models.
| Machine Learning 85(1-2):41-75.
| https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
|
| Method resolution order:
| LogisticRegression
| sklearn.base.BaseEstimator
| sklearn.linear_model.base.LinearClassifierMixin
| sklearn.base.ClassifierMixin
| sklearn.linear_model.base.SparseCoefMixin
| builtins.object
|
| Methods defined here:
|
| __init__(self, penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='warn', max_iter=100, multi_class='warn', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)
| Initialize self. See help(type(self)) for accurate signature.
|
| fit(self, X, y, sample_weight=None)
| Fit the model according to the given training data.
|
| Parameters
| ----------
| X : {array-like, sparse matrix}, shape (n_samples, n_features)
| Training vector, where n_samples is the number of samples and
| n_features is the number of features.
|
| y : array-like, shape (n_samples,)
| Target vector relative to X.
|
| sample_weight : array-like, shape (n_samples,) optional
| Array of weights that are assigned to individual samples.
| If not provided, then each sample is given unit weight.
|
| .. versionadded:: 0.17
| *sample_weight* support to LogisticRegression.
|
| Returns
| -------
| self : object
|
| Notes
| -----
| The SAGA solver supports both float64 and float32 bit arrays.
|
| predict_log_proba(self, X)
| Log of probability estimates.
|
| The returned estimates for all classes are ordered by the
| label of classes.
|
| Parameters
| ----------
| X : array-like, shape = [n_samples, n_features]
|
| Returns
| -------
| T : array-like, shape = [n_samples, n_classes]
| Returns the log-probability of the sample for each class in the
| model, where classes are ordered as they are in ``self.classes_``.
|
| predict_proba(self, X)
| Probability estimates.
|
| The returned estimates for all classes are ordered by the
| label of classes.
|
| For a multi_class problem, if multi_class is set to be "multinomial"
| the softmax function is used to find the predicted probability of
| each class.
| Else use a one-vs-rest approach, i.e calculate the probability
| of each class assuming it to be positive using the logistic function.
| and normalize these values across all the classes.
|
| Parameters
| ----------
| X : array-like, shape = [n_samples, n_features]
|
| Returns
| -------
| T : array-like, shape = [n_samples, n_classes]
| Returns the probability of the sample for each class in the model,
| where classes are ordered as they are in ``self.classes_``.
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.BaseEstimator:
|
| __getstate__(self)
|
| __repr__(self, N_CHAR_MAX=700)
| Return repr(self).
|
| __setstate__(self, state)
|
| get_params(self, deep=True)
| Get parameters for this estimator.
|
| Parameters
| ----------
| deep : boolean, optional
| If True, will return the parameters for this estimator and
| contained subobjects that are estimators.
|
| Returns
| -------
| params : mapping of string to any
| Parameter names mapped to their values.
|
| set_params(self, **params)
| Set the parameters of this estimator.
|
| The method works on simple estimators as well as on nested objects
| (such as pipelines). The latter have parameters of the form
| ``<component>__<parameter>`` so that it's possible to update each
| component of a nested object.
|
| Returns
| -------
| self
|
| ----------------------------------------------------------------------
| Data descriptors inherited from sklearn.base.BaseEstimator:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.linear_model.base.LinearClassifierMixin:
|
| decision_function(self, X)
| Predict confidence scores for samples.
|
| The confidence score for a sample is the signed distance of that
| sample to the hyperplane.
|
| Parameters
| ----------
| X : array_like or sparse matrix, shape (n_samples, n_features)
| Samples.
|
| Returns
| -------
| array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
| Confidence scores per (sample, class) combination. In the binary
| case, confidence score for self.classes_[1] where >0 means this
| class would be predicted.
|
| predict(self, X)
| Predict class labels for samples in X.
|
| Parameters
| ----------
| X : array_like or sparse matrix, shape (n_samples, n_features)
| Samples.
|
| Returns
| -------
| C : array, shape [n_samples]
| Predicted class label per sample.
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.ClassifierMixin:
|
| score(self, X, y, sample_weight=None)
| Returns the mean accuracy on the given test data and labels.
|
| In multi-label classification, this is the subset accuracy
| which is a harsh metric since you require for each sample that
| each label set be correctly predicted.
|
| Parameters
| ----------
| X : array-like, shape = (n_samples, n_features)
| Test samples.
|
| y : array-like, shape = (n_samples) or (n_samples, n_outputs)
| True labels for X.
|
| sample_weight : array-like, shape = [n_samples], optional
| Sample weights.
|
| Returns
| -------
| score : float
| Mean accuracy of self.predict(X) wrt. y.
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.linear_model.base.SparseCoefMixin:
|
| densify(self)
| Convert coefficient matrix to dense array format.
|
| Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
| default format of ``coef_`` and is required for fitting, so calling
| this method is only required on models that have previously been
| sparsified; otherwise, it is a no-op.
|
| Returns
| -------
| self : estimator
|
| sparsify(self)
| Convert coefficient matrix to sparse format.
|
| Converts the ``coef_`` member to a scipy.sparse matrix, which for
| L1-regularized models can be much more memory- and storage-efficient
| than the usual numpy.ndarray representation.
|
| The ``intercept_`` member is not converted.
|
| Notes
| -----
| For non-sparse models, i.e. when there are not many zeros in ``coef_``,
| this may actually *increase* memory usage, so use this method with
| care. A rule of thumb is that the number of zero elements, which can
| be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
| to provide significant benefits.
|
| After calling this method, further fitting with the partial_fit
| method (if any) will not work until you call densify.
|
| Returns
| -------
| self : estimator
###Markdown
Evaluating Accuracy In this notebook we are going to evaluate the accuracy on the data which we used to train. You shouldn't to this because you can't be sure if the result you get means anything because the model could just overfit the data. In reality we would split the dataset into a training and test set. We will cover this in the next tutorial.
###Code
accuracy = clf.score(X, y)
accuracy
###Output
_____no_output_____
###Markdown
Scikit Learn Tutorial 3 - Training a model on the Iris Dataset ![Scikit Learn Logo](http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png) Loading in Dataset
###Code
import pandas as pd
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
data.head()
###Output
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\importlib\_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject
return f(*args, **kwds)
###Markdown
Preprocessing Data Transforming the classes to numeric data
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
data['class'] = le.fit_transform(data['class'])
data.head()
###Output
_____no_output_____
###Markdown
Split features and label and transform them to a Numpy Array
###Code
import numpy as np
X = np.array(data.drop(['class'], axis=1))
y = np.array(data['class'])
###Output
_____no_output_____
###Markdown
Building Model
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X, y)
###Output
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\Gilbert\AppData\Local\Continuum\anaconda3\envs\ml\lib\site-packages\sklearn\linear_model\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.
"this warning.", FutureWarning)
###Markdown
Evaluating Accuracy In this notebook we are going to evaluate the accuracy on the data which we used to train. You shouldn't to this because you can't be sure if the result you get means anything because the model could just overfit the data. In reality we would split the dataset into a training and test set. We will cover this in the next tutorial.
###Code
accuracy = clf.score(X, y)
accuracy
###Output
_____no_output_____ |
vgg_training.ipynb | ###Markdown
###Code
import numpy as np
from keras.models import Sequential
from keras import applications
from keras import optimizers
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, Lambda
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.utils import shuffle
import csv
import cv2
import scipy
import os
num_classes = 4
epochs = 20
# BASE_PATH = '/home/ec2-user/cell_classifier/'
BASE_DIR = '../'
batch_size = 32
def get_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=(120, 160, 3), output_shape=(120, 160, 3)))
model.add(Conv2D(32, (3, 3), input_shape=(120, 160, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def top_model(input_shape):
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
return model
def get_data(folder):
X = []
y = []
for wbc_type in os.listdir(folder):
if not wbc_type.startswith('.'):
# if wbc_type in ['NEUTROPHIL', 'EOSINOPHIL']:
# label = 'MONONUCLEAR'
# else:
# label = 'POLYNUCLEAR'
for image_filename in os.listdir(folder + wbc_type):
img_file = cv2.imread(folder + wbc_type + '/' + image_filename)
img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))
if img_file is not None:
img_arr = np.asarray(img_file)
X.append(img_arr)
y.append(wbc_type)
X = np.asarray(X)
y = np.asarray(y)
return X,y
X_train, y_train = get_data(BASE_DIR + 'images/TRAIN/')
X_test, y_test = get_data(BASE_DIR + 'images/TEST/')
X_test_simple, y_test_simple = get_data(BASE_DIR + 'images/TEST_SIMPLE/')
X_train = X_train * 1./255.
X_test = X_test * 1./255.
X_test_simple = X_test_simple * 1./255.
encoder = LabelEncoder()
encoder.fit(y_test_simple)
y_train = np_utils.to_categorical(encoder.transform(y_train))
y_test = np_utils.to_categorical(encoder.transform(y_test))
y_test_simple = np_utils.to_categorical(encoder.transform(y_test_simple))
from keras.models import Model
from keras.layers import Input
from keras import optimizers
base_model = applications.VGG16(include_top=False, weights='imagenet')
input = Input(shape=(120, 160, 3),name = 'image_input')
vgg_output = base_model(input)
top_model = Flatten()(vgg_output)
top_model = Dense(64, activation='relu')(top_model)
predictions = Dense(num_classes, activation='softmax', name='prediction_layer')(top_model)
model = Model(input=input, output=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
layers = base_model.layers[:-2]
for layer in layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, shuffle=True, verbose=1)
model.save_weights('vgg_top.h5')
###Output
_____no_output_____
###Markdown
batch_size = 16 fine-tune the modelmodel.fit( X_train, y_train, validation_data=(X_validation, y_validation), epochs=epochs)
###Code
model.load_weights('vgg_top.h5')
from sklearn.metrics import accuracy_score
print('Predicting on test data')
y_pred = np.rint(model.predict(X_test_simple))
print(accuracy_score(y_test_simple, y_pred))
model.summary()
print(base_model.layers[-2].name)
###Output
_____no_output_____ |
docs/tutorials/generalized_polynomial_chaos.ipynb | ###Markdown
Generalized Polynomial ChaosGeneralized polynomial chaos is an advanced polynomial chaos method for dealing with problematic random variables.The problems it deals with include heavy tailed distributions (like Log-lormal, Cauchy, etc.) which breaks premises for using chaos expansion as approximations, and stochastic dependencies, which there currently does not exist numerically stable method for creating.Let us consider an synthetic exponential model than encompases both issues by using a multivariate log-normal distribution for its uncertainty:
###Code
import numpy
import chaospy
coordinates = numpy.linspace(0, 10, 1000)
def exponential_model(parameters):
param_init, param_rate = parameters
return param_init*numpy.e**(-param_rate*coordinates)
distribution = chaospy.MvNormal(mu=[10, 1], sigma=[[1.0, 0.09], [0.09, 0.1]])
###Output
_____no_output_____
###Markdown
We are interested in the mean and standard deviation. Monte Carlo integrationAs a baseline we can solve this using quasi-Monte Carlo integration. It requires no modification compared to the stochastic independent case. It consists of generating samples:
###Code
samples = distribution.sample(10**5, rule="sobol")
###Output
_____no_output_____
###Markdown
evaluate model for each sample:
###Code
evaluations = numpy.array([exponential_model(sample) for sample in samples.T])
###Output
_____no_output_____
###Markdown
and performing analysis on samples:
###Code
# NBVAL_CHECK_OUTPUT
mean = numpy.mean(evaluations, axis=0)
std = numpy.std(evaluations, axis=0)
(mean[:5].round(5), std[:5].round(5))
###Output
_____no_output_____
###Markdown
We can also plot the final result:
###Code
from matplotlib import pyplot
pyplot.fill_between(coordinates, mean-std, mean+std, alpha=0.6)
pyplot.plot(coordinates, mean)
pyplot.axis([0, 6, 0, 10])
pyplot.show()
###Output
_____no_output_____
###Markdown
Generalized polynomial chaosPolynomial chaos expansions builds on the assumption of having an orthogonal polynomial expansion. However, the classical extension to the multivariate case assumes that the probabilty distribution consist of stochastically independent components. If the distribution has dependencies, the classical approach will not work.The recommended approach for addressing dependent distribution is to use *generalized polynomial chaos expansion* (g-pce). It assumes that there exists a smooth map $T$ between the dependent variables $Q$ and some other stochastic independent variables $R$, which we can build an expansion for. In other words:$$\hat u(x, q) = \hat u(x, T(r)) = \sum_{n=0}^N c_n \Phi_n(r)$$For multivariate normal distributions, the obvious choice is to select $R$ to be standard normal:
###Code
distribution_q = distribution
distribution_r = chaospy.J(chaospy.Normal(0, 1), chaospy.Normal(0, 1))
###Output
_____no_output_____
###Markdown
The $T$ is defined as a double Rosenblatt transformation:$$T(r) = F_Q^{-1}\left( F_R(r) \right)$$which in `chaospy` can be constructed as follows:
###Code
def transform(samples):
return distribution_q.inv(distribution_r.fwd(samples))
###Output
_____no_output_____
###Markdown
This formulation is general and can be used with any two distributions of the same size. Point collocation methodImplementing g-pce for point collocation require us to generate samples from $R$ and transform them using $T$:
###Code
samples_r = distribution_r.sample(1000, rule="sobol")
samples_q = transform(samples_r)
###Output
_____no_output_____
###Markdown
The resluting samples can then be used to solve the equation above using regression-based method:
###Code
expansion = chaospy.generate_expansion(7, distribution_r)
evaluations = numpy.array([exponential_model(sample) for sample in samples_q.T])
model_approx = chaospy.fit_regression(expansion, samples_r, evaluations)
###Output
_____no_output_____
###Markdown
Note that for generating the expansion and the model approximation, we use the distribution from $R$, while for the model evalutation we use the transformed samples from $Q$.The solution model can then be used to do analysis. Just remember that the model is defined with respect to $R$ , not $Q$:
###Code
# NBVAL_CHECK_OUTPUT
mean = chaospy.E(model_approx, distribution_r)
std = chaospy.Std(model_approx, distribution_r)
(mean[:5].round(5), std[:5].round(5))
###Output
_____no_output_____
###Markdown
Plotting the final results:
###Code
pyplot.fill_between(coordinates, mean-std, mean+std, alpha=0.6)
pyplot.plot(coordinates, mean)
pyplot.axis([0, 6, 0, 10])
pyplot.show()
###Output
_____no_output_____
###Markdown
Pseudo-Spectral ProjectionImplementing g-pce for pseudo-spectral projection require us to generate nodes and weights from $R$ and transform the nodes using $T$:
###Code
nodes_r, weights_r = chaospy.generate_quadrature(10, distribution_r, rule="gaussian")
nodes_q = transform(nodes_r)
###Output
_____no_output_____
###Markdown
The resluting samples can then be used to solve the equation above using the quadrature-based method:
###Code
expansion = chaospy.generate_expansion(7, distribution_r)
evaluations = numpy.array([exponential_model(sample) for sample in nodes_q.T])
model_approx = chaospy.fit_quadrature(expansion, nodes_r, weights_r, evaluations)
###Output
_____no_output_____
###Markdown
Note that for generating the expansion and the model approximation, we use the nodes and weights from $R$, while for the model evalutation we use the transformed samples from $Q$.The solution model, defined with respect to $R$ can then be used to do analysis:
###Code
# NBVAL_CHECK_OUTPUT
mean = chaospy.E(model_approx, distribution_r)
std = chaospy.Std(model_approx, distribution_r)
(mean[:5].round(5), std[:5].round(5))
###Output
_____no_output_____
###Markdown
Plotting the final results:
###Code
pyplot.fill_between(coordinates, mean-std, mean+std, alpha=0.6)
pyplot.plot(coordinates, mean)
pyplot.axis([0, 6, 0, 10])
pyplot.show()
###Output
_____no_output_____
###Markdown
Cholesky decompositionThe assumption with generalized polynomial chaos expansion is that there exists a smooth mapping to a stochastic independent variable.However, such a mapping does not always exists.In those cases making an orthogonal expansion directly on the dependent variable using Cholesky decomposion.This can be done as follows:
###Code
# NBVAL_CHECK_OUTPUT
expansion = chaospy.generate_expansion(5, distribution_q, rule="cholesky")
expansion[:5].round(10)
###Output
_____no_output_____
###Markdown
The method is known to be numerical unstable, so it is important to verify that the expansion is indeed orthogonal:
###Code
chaospy.Corr(expansion[-10:], distribution).round(5)
###Output
_____no_output_____
###Markdown
This expansion can be used with point colloction method directly:
###Code
samples_q = distribution_q.sample(1000, rule="sobol")
evaluations = numpy.array([exponential_model(sample) for sample in samples_q.T])
model_approx = chaospy.fit_regression(expansion, samples_q, evaluations)
# NBVAL_CHECK_OUTPUT
mean = chaospy.E(model_approx, distribution_q)
std = chaospy.Std(model_approx, distribution_q)
(mean[:5].round(5), std[:5].round(5))
pyplot.fill_between(coordinates, mean-std, mean+std, alpha=0.6)
pyplot.plot(coordinates, mean)
pyplot.axis([0, 6, 0, 10])
pyplot.show()
###Output
_____no_output_____ |
samples/interoperability/python/tomography-sample.ipynb | ###Markdown
Quantum Process Tomography with Q and Python Abstract In this sample, we will demonstrate interoperability between Q and Python by using the QInfer and QuTiP libraries for Python to characterize and verify quantum processes implemented in Q.In particular, this sample will use *quantum process tomography* to learn about the behavior of a "noisy" Hadamard operation from the results of random Pauli measurements. Preamble
###Code
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
We can enable Q support in Python by importing the `qsharp` package.
###Code
import qsharp
###Output
Preparing Q# environment...
###Markdown
Once we do so, any Q source files in the current working directory are compiled, and their namespaces are made available as Python modules.For instance, the `Quantum.qs` source file provided with this sample implements a `HelloWorld` operation in the `Microsoft.Quantum.Samples.Python` Q namespace:
###Code
with open('Quantum.qs') as f:
print(f.read())
###Output
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
namespace Microsoft.Quantum.Samples.Python {
open Microsoft.Quantum.Intrinsic;
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Preparation;
function HelloWorld (pauli : Pauli) : Unit {
Message($"Hello, world! {pauli}");
}
operation NoisyHadamardChannelImpl (depol : Double, target : Qubit) : Unit {
let idxAction = Random([1.0 - depol, depol]);
if (idxAction == 0) {
H(target);
}
else {
PrepareSingleQubitIdentity(target);
}
}
function NoisyHadamardChannel (depol : Double) : (Qubit => Unit) {
return NoisyHadamardChannelImpl(depol, _);
}
}
###Markdown
We can import this `HelloWorld` operation as though it was an ordinary Python function by using the Q namespace as a Python module:
###Code
from Microsoft.Quantum.Samples.Python import HelloWorld
HelloWorld
###Output
_____no_output_____
###Markdown
Once we've imported the new names, we can then ask our simulator to run each function and operation using the `simulate` method.
###Code
HelloWorld.simulate(pauli=qsharp.Pauli.Z)
###Output
Hello, world! PauliZ
###Markdown
Tomography The `qsharp` interoperability package also comes with a `single_qubit_process_tomography` function which uses the QInfer library for Python to learn the channels corresponding to single-qubit Q operations.
###Code
from qsharp.tomography import single_qubit_process_tomography
###Output
_____no_output_____
###Markdown
Next, we import plotting support and the QuTiP library, since these will be helpful to us in manipulating the quantum objects returned by the quantum process tomography functionality that we call later.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import qutip as qt
qt.settings.colorblind_safe = True
###Output
_____no_output_____
###Markdown
To use this, we define a new operation that takes a preparation and a measurement, then returns the result of performing that tomographic measurement on the noisy Hadamard operation that we defined in `Quantum.qs`.
###Code
experiment = qsharp.compile("""
open Microsoft.Quantum.Samples.Python;
open Microsoft.Quantum.Characterization;
operation Experiment(prep : Pauli, meas : Pauli) : Result {
return SingleQubitProcessTomographyMeasurement(prep, meas, NoisyHadamardChannel(0.1));
}
""")
###Output
_____no_output_____
###Markdown
Here, we ask for 10,000 measurements from the noisy Hadamard operation that we defined above.
###Code
estimation_results = single_qubit_process_tomography(experiment, n_measurements=10000)
###Output
Preparing tomography model...
Performing tomography...
###Markdown
To visualize the results, it's helpful to compare to the actual channel, which we can find exactly in QuTiP.
###Code
depolarizing_channel = sum(map(qt.to_super, [qt.qeye(2), qt.sigmax(), qt.sigmay(), qt.sigmaz()])) / 4.0
actual_noisy_h = 0.1 * qt.to_choi(depolarizing_channel) + 0.9 * qt.to_choi(qt.hadamard_transform())
###Output
_____no_output_____
###Markdown
We then plot the estimated and actual channels as Hinton diagrams, showing how each acts on the Pauli operators $X$, $Y$ and $Z$.
###Code
fig, (left, right) = plt.subplots(ncols=2, figsize=(12, 4))
plt.sca(left)
plt.xlabel('Estimated', fontsize='x-large')
qt.visualization.hinton(estimation_results['est_channel'], ax=left)
plt.sca(right)
plt.xlabel('Actual', fontsize='x-large')
qt.visualization.hinton(actual_noisy_h, ax=right)
###Output
_____no_output_____
###Markdown
We also obtain a wealth of other information as well, such as the covariance matrix over each parameter of the resulting channel.This shows us which parameters we are least certain about, as well as how those parameters are correlated with each other.
###Code
plt.figure(figsize=(10, 10))
estimation_results['posterior'].plot_covariance()
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Diagnostics
###Code
for component, version in sorted(qsharp.component_versions().items(), key=lambda x: x[0]):
print(f"{component:20}{version}")
import sys
print(sys.version)
###Output
3.7.6 | packaged by conda-forge | (default, Mar 23 2020, 22:22:21) [MSC v.1916 64 bit (AMD64)]
###Markdown
Quantum Process Tomography with Q and Python Abstract In this sample, we will demonstrate interoperability between Q and Python by using the QInfer and QuTiP libraries for Python to characterize and verify quantum processes implemented in Q.In particular, this sample will use *quantum process tomography* to learn about the behavior of a "noisy" Hadamard operation from the results of random Pauli measurements. Preamble
###Code
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
We can enable Q support in Python by importing the `qsharp` package.
###Code
import qsharp
###Output
Preparing Q# environment...
###Markdown
Once we do so, any Q source files in the current working directory are compiled, and their namespaces are made available as Python modules.For instance, the `Quantum.qs` source file provided with this sample implements a `HelloWorld` operation in the `Microsoft.Quantum.Samples.Python` Q namespace:
###Code
with open('Quantum.qs') as f:
print(f.read())
###Output
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
namespace Microsoft.Quantum.Samples.Python {
open Microsoft.Quantum.Intrinsic;
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Preparation;
function HelloWorld (pauli : Pauli) : Unit {
Message($"Hello, world! {pauli}");
}
operation NoisyHadamardChannelImpl (depol : Double, target : Qubit) : Unit {
let idxAction = Random([1.0 - depol, depol]);
if (idxAction == 0) {
H(target);
}
else {
PrepareSingleQubitIdentity(target);
}
}
function NoisyHadamardChannel (depol : Double) : (Qubit => Unit) {
return NoisyHadamardChannelImpl(depol, _);
}
}
###Markdown
We can import this `HelloWorld` operation as though it was an ordinary Python function by using the Q namespace as a Python module:
###Code
from Microsoft.Quantum.Samples.Python import HelloWorld
HelloWorld
###Output
_____no_output_____
###Markdown
Once we've imported the new names, we can then ask our simulator to run each function and operation using the `simulate` method.
###Code
HelloWorld.simulate(pauli=qsharp.Pauli.Z)
###Output
Hello, world! PauliZ
###Markdown
Tomography The `qsharp` interoperability package also comes with a `single_qubit_process_tomography` function which uses the QInfer library for Python to learn the channels corresponding to single-qubit Q operations.
###Code
from qsharp.tomography import single_qubit_process_tomography
###Output
_____no_output_____
###Markdown
Next, we import plotting support and the QuTiP library, since these will be helpful to us in manipulating the quantum objects returned by the quantum process tomography functionality that we call later.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import qutip as qt
qt.settings.colorblind_safe = True
###Output
_____no_output_____
###Markdown
To use this, we define a new operation that takes a preparation and a measurement, then returns the result of performing that tomographic measurement on the noisy Hadamard operation that we defined in `Quantum.qs`.
###Code
experiment = qsharp.compile("""
open Microsoft.Quantum.Samples.Python;
open Microsoft.Quantum.Characterization;
operation Experiment(prep : Pauli, meas : Pauli) : Result {
return SingleQubitProcessTomographyMeasurement(prep, meas, NoisyHadamardChannel(0.1));
}
""")
###Output
_____no_output_____
###Markdown
Here, we ask for 10,000 measurements from the noisy Hadamard operation that we defined above.
###Code
estimation_results = single_qubit_process_tomography(experiment, n_measurements=10000)
###Output
Preparing tomography model...
Performing tomography...
###Markdown
To visualize the results, it's helpful to compare to the actual channel, which we can find exactly in QuTiP.
###Code
depolarizing_channel = sum(map(qt.to_super, [qt.qeye(2), qt.sigmax(), qt.sigmay(), qt.sigmaz()])) / 4.0
actual_noisy_h = 0.1 * qt.to_choi(depolarizing_channel) + 0.9 * qt.to_choi(qt.hadamard_transform())
###Output
_____no_output_____
###Markdown
We then plot the estimated and actual channels as Hinton diagrams, showing how each acts on the Pauli operators $X$, $Y$ and $Z$.
###Code
fig, (left, right) = plt.subplots(ncols=2, figsize=(12, 4))
plt.sca(left)
plt.xlabel('Estimated', fontsize='x-large')
qt.visualization.hinton(estimation_results['est_channel'], ax=left)
plt.sca(right)
plt.xlabel('Actual', fontsize='x-large')
qt.visualization.hinton(actual_noisy_h, ax=right)
###Output
_____no_output_____
###Markdown
We also obtain a wealth of other information as well, such as the covariance matrix over each parameter of the resulting channel.This shows us which parameters we are least certain about, as well as how those parameters are correlated with each other.
###Code
plt.figure(figsize=(10, 10))
estimation_results['posterior'].plot_covariance()
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Diagnostics
###Code
for component, version in sorted(qsharp.component_versions().items(), key=lambda x: x[0]):
print(f"{component:20}{version}")
import sys
print(sys.version)
###Output
3.7.6 | packaged by conda-forge | (default, Mar 23 2020, 22:22:21) [MSC v.1916 64 bit (AMD64)]
###Markdown
Quantum Process Tomography with Q and Python Abstract In this sample, we will demonstrate interoperability between Q and Python by using the QInfer and QuTiP libraries for Python to characterize and verify quantum processes implemented in Q.In particular, this sample will use *quantum process tomography* to learn about the behavior of a "noisy" Hadamard operation from the results of random Pauli measurements. Preamble
###Code
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
We can enable Q support in Python by importing the `qsharp` package.
###Code
import qsharp
###Output
Preparing Q# environment...
.
###Markdown
Once we do so, any Q source files in the current working directory are compiled, and their namespaces are made available as Python modules.For instance, the `Quantum.qs` source file provided with this sample implements a `HelloWorld` operation in the `Microsoft.Quantum.Samples.Python` Q namespace:
###Code
with open('Quantum.qs') as f:
print(f.read())
###Output
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
namespace Microsoft.Quantum.Samples.Python {
open Microsoft.Quantum.Intrinsic;
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Preparation;
function HelloWorld (pauli : Pauli) : Unit {
Message($"Hello, world! {pauli}");
}
operation NoisyHadamardChannelImpl (depol : Double, target : Qubit) : Unit {
let idxAction = Random([1.0 - depol, depol]);
if (idxAction == 0) {
H(target);
}
else {
PrepareSingleQubitIdentity(target);
}
}
function NoisyHadamardChannel (depol : Double) : (Qubit => Unit) {
return NoisyHadamardChannelImpl(depol, _);
}
}
###Markdown
We can import this `HelloWorld` operation as though it was an ordinary Python function by using the Q namespace as a Python module:
###Code
from Microsoft.Quantum.Samples.Python import HelloWorld
HelloWorld
###Output
_____no_output_____
###Markdown
Once we've imported the new names, we can then ask our simulator to run each function and operation using the `simulate` method.
###Code
HelloWorld.simulate(pauli=qsharp.Pauli.Z)
###Output
Hello, world! PauliZ
###Markdown
Tomography The `qsharp` interoperability package also comes with a `single_qubit_process_tomography` function which uses the QInfer library for Python to learn the channels corresponding to single-qubit Q operations.
###Code
from qsharp.tomography import single_qubit_process_tomography
###Output
_____no_output_____
###Markdown
Next, we import plotting support and the QuTiP library, since these will be helpful to us in manipulating the quantum objects returned by the quantum process tomography functionality that we call later.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import qutip as qt
qt.settings.colorblind_safe = True
###Output
_____no_output_____
###Markdown
To use this, we define a new operation that takes a preparation and a measurement, then returns the result of performing that tomographic measurement on the noisy Hadamard operation that we defined in `Quantum.qs`.
###Code
experiment = qsharp.compile("""
open Microsoft.Quantum.Samples.Python;
open Microsoft.Quantum.Characterization;
operation Experiment(prep : Pauli, meas : Pauli) : Result {
return SingleQubitProcessTomographyMeasurement(prep, meas, NoisyHadamardChannel(0.1));
}
""")
###Output
_____no_output_____
###Markdown
Here, we ask for 10,000 measurements from the noisy Hadamard operation that we defined above.
###Code
estimation_results = single_qubit_process_tomography(experiment, n_measurements=10000)
###Output
Preparing tomography model...
Performing tomography...
###Markdown
To visualize the results, it's helpful to compare to the actual channel, which we can find exactly in QuTiP.
###Code
depolarizing_channel = sum(map(qt.to_super, [qt.qeye(2), qt.sigmax(), qt.sigmay(), qt.sigmaz()])) / 4.0
actual_noisy_h = 0.1 * qt.to_choi(depolarizing_channel) + 0.9 * qt.to_choi(qt.hadamard_transform())
###Output
_____no_output_____
###Markdown
We then plot the estimated and actual channels as Hinton diagrams, showing how each acts on the Pauli operators $X$, $Y$ and $Z$.
###Code
fig, (left, right) = plt.subplots(ncols=2, figsize=(12, 4))
plt.sca(left)
plt.xlabel('Estimated', fontsize='x-large')
qt.visualization.hinton(estimation_results['est_channel'], ax=left)
plt.sca(right)
plt.xlabel('Actual', fontsize='x-large')
qt.visualization.hinton(actual_noisy_h, ax=right)
###Output
_____no_output_____
###Markdown
We also obtain a wealth of other information as well, such as the covariance matrix over each parameter of the resulting channel.This shows us which parameters we are least certain about, as well as how those parameters are correlated with each other.
###Code
plt.figure(figsize=(10, 10))
estimation_results['posterior'].plot_covariance()
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Diagnostics
###Code
for component, version in sorted(qsharp.component_versions().items(), key=lambda x: x[0]):
print(f"{component:20}{version}")
import sys
print(sys.version)
###Output
3.7.4 (default, Jul 17 2019, 23:33:46)
[GCC 6.3.0 20170516]
###Markdown
Quantum Process Tomography with Q and Python Abstract In this sample, we will demonstrate interoperability between Q and Python by using the QInfer and QuTiP libraries for Python to characterize and verify quantum processes implemented in Q.In particular, this sample will use *quantum process tomography* to learn about the behavior of a "noisy" Hadamard operation from the results of random Pauli measurements. Preamble
###Code
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
We can enable Q support in Python by importing the `qsharp` package.
###Code
import qsharp
###Output
Preparing Q# environment...
###Markdown
Once we do so, any Q source files in the current working directory are compiled, and their namespaces are made available as Python modules.For instance, the `Quantum.qs` source file provided with this sample implements a `HelloWorld` operation in the `Microsoft.Quantum.Samples.Python` Q namespace:
###Code
with open('Quantum.qs') as f:
print(f.read())
###Output
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
namespace Microsoft.Quantum.Samples.Python {
open Microsoft.Quantum.Intrinsic;
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Preparation;
function HelloWorld (pauli : Pauli) : Unit {
Message($"Hello, world! {pauli}");
}
operation NoisyHadamardChannelImpl (depol : Double, target : Qubit) : Unit {
let idxAction = Random([1.0 - depol, depol]);
if (idxAction == 0) {
H(target);
}
else {
PrepareSingleQubitIdentity(target);
}
}
function NoisyHadamardChannel (depol : Double) : (Qubit => Unit) {
return NoisyHadamardChannelImpl(depol, _);
}
}
###Markdown
We can import this `HelloWorld` operation as though it was an ordinary Python function by using the Q namespace as a Python module:
###Code
from Microsoft.Quantum.Samples.Python import HelloWorld
HelloWorld
###Output
_____no_output_____
###Markdown
Once we've imported the new names, we can then ask our simulator to run each function and operation using the `simulate` method.
###Code
HelloWorld.simulate(pauli=qsharp.Pauli.Z)
###Output
Hello, world! PauliZ
###Markdown
Tomography The `qsharp` interoperability package also comes with a `single_qubit_process_tomography` function which uses the QInfer library for Python to learn the channels corresponding to single-qubit Q operations.
###Code
from qsharp.tomography import single_qubit_process_tomography
###Output
_____no_output_____
###Markdown
Next, we import plotting support and the QuTiP library, since these will be helpful to us in manipulating the quantum objects returned by the quantum process tomography functionality that we call later.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import qutip as qt
qt.settings.colorblind_safe = True
###Output
_____no_output_____
###Markdown
To use this, we define a new operation that takes a preparation and a measurement, then returns the result of performing that tomographic measurement on the noisy Hadamard operation that we defined in `Quantum.qs`.
###Code
experiment = qsharp.compile("""
open Microsoft.Quantum.Samples.Python;
open Microsoft.Quantum.Characterization;
operation Experiment(prep : Pauli, meas : Pauli) : Result {
return SingleQubitProcessTomographyMeasurement(prep, meas, NoisyHadamardChannel(0.1));
}
""")
###Output
_____no_output_____
###Markdown
Here, we ask for 10,000 measurements from the noisy Hadamard operation that we defined above.
###Code
estimation_results = single_qubit_process_tomography(experiment, n_measurements=10000)
###Output
Preparing tomography model...
Performing tomography...
###Markdown
To visualize the results, it's helpful to compare to the actual channel, which we can find exactly in QuTiP.
###Code
depolarizing_channel = sum(map(qt.to_super, [qt.qeye(2), qt.sigmax(), qt.sigmay(), qt.sigmaz()])) / 4.0
actual_noisy_h = 0.1 * qt.to_choi(depolarizing_channel) + 0.9 * qt.to_choi(qt.hadamard_transform())
###Output
_____no_output_____
###Markdown
We then plot the estimated and actual channels as Hinton diagrams, showing how each acts on the Pauli operators $X$, $Y$ and $Z$.
###Code
fig, (left, right) = plt.subplots(ncols=2, figsize=(12, 4))
plt.sca(left)
plt.xlabel('Estimated', fontsize='x-large')
qt.visualization.hinton(estimation_results['est_channel'], ax=left)
plt.sca(right)
plt.xlabel('Actual', fontsize='x-large')
qt.visualization.hinton(actual_noisy_h, ax=right)
###Output
_____no_output_____
###Markdown
We also obtain a wealth of other information as well, such as the covariance matrix over each parameter of the resulting channel.This shows us which parameters we are least certain about, as well as how those parameters are correlated with each other.
###Code
plt.figure(figsize=(10, 10))
estimation_results['posterior'].plot_covariance()
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Diagnostics
###Code
for component, version in sorted(qsharp.component_versions().items(), key=lambda x: x[0]):
print(f"{component:20}{version}")
import sys
print(sys.version)
###Output
3.7.3 | packaged by conda-forge | (default, Jul 1 2019, 22:01:29) [MSC v.1900 64 bit (AMD64)]
###Markdown
Quantum Process Tomography with Q and Python Abstract In this sample, we will demonstrate interoperability between Q and Python by using the QInfer and QuTiP libraries for Python to characterize and verify quantum processes implemented in Q.In particular, this sample will use *quantum process tomography* to learn about the behavior of a "noisy" Hadamard operation from the results of random Pauli measurements. Preamble
###Code
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
We can enable Q support in Python by importing the `qsharp` package.
###Code
import qsharp
###Output
Preparing Q# environment...
###Markdown
Once we do so, any Q source files in the current working directory are compiled, and their namespaces are made available as Python modules.For instance, the `Quantum.qs` source file provided with this sample implements a `HelloWorld` operation in the `Microsoft.Quantum.Samples.Python` Q namespace:
###Code
with open('Quantum.qs') as f:
print(f.read())
###Output
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
namespace Microsoft.Quantum.Samples.Python {
open Microsoft.Quantum.Intrinsic;
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Preparation;
function HelloWorld (pauli : Pauli) : Unit {
Message($"Hello, world! {pauli}");
}
operation NoisyHadamardChannelImpl (depol : Double, target : Qubit) : Unit {
let idxAction = Random([1.0 - depol, depol]);
if (idxAction == 0) {
H(target);
}
else {
PrepareSingleQubitIdentity(target);
}
}
function NoisyHadamardChannel (depol : Double) : (Qubit => Unit) {
return NoisyHadamardChannelImpl(depol, _);
}
}
###Markdown
We can import this `HelloWorld` operation as though it was an ordinary Python function by using the Q namespace as a Python module:
###Code
from Microsoft.Quantum.Samples.Python import HelloWorld
HelloWorld
###Output
_____no_output_____
###Markdown
Once we've imported the new names, we can then ask our simulator to run each function and operation using the `simulate` method.
###Code
HelloWorld.simulate(pauli=qsharp.Pauli.Z)
###Output
Hello, world! PauliZ
###Markdown
Tomography The `qsharp` interoperability package also comes with a `single_qubit_process_tomography` function which uses the QInfer library for Python to learn the channels corresponding to single-qubit Q operations.
###Code
from qsharp.tomography import single_qubit_process_tomography
###Output
_____no_output_____
###Markdown
Next, we import plotting support and the QuTiP library, since these will be helpful to us in manipulating the quantum objects returned by the quantum process tomography functionality that we call later.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import qutip as qt
qt.settings.colorblind_safe = True
###Output
_____no_output_____
###Markdown
To use this, we define a new operation that takes a preparation and a measurement, then returns the result of performing that tomographic measurement on the noisy Hadamard operation that we defined in `Quantum.qs`.
###Code
experiment = qsharp.compile("""
open Microsoft.Quantum.Samples.Python;
open Microsoft.Quantum.Characterization;
operation Experiment(prep : Pauli, meas : Pauli) : Result {
return SingleQubitProcessTomographyMeasurement(prep, meas, NoisyHadamardChannel(0.1));
}
""")
###Output
_____no_output_____
###Markdown
Here, we ask for 10,000 measurements from the noisy Hadamard operation that we defined above.
###Code
estimation_results = single_qubit_process_tomography(experiment, n_measurements=10000)
###Output
Preparing tomography model...
Performing tomography...
###Markdown
To visualize the results, it's helpful to compare to the actual channel, which we can find exactly in QuTiP.
###Code
depolarizing_channel = sum(map(qt.to_super, [qt.qeye(2), qt.sigmax(), qt.sigmay(), qt.sigmaz()])) / 4.0
actual_noisy_h = 0.1 * qt.to_choi(depolarizing_channel) + 0.9 * qt.to_choi(qt.hadamard_transform())
###Output
_____no_output_____
###Markdown
We then plot the estimated and actual channels as Hinton diagrams, showing how each acts on the Pauli operators $X$, $Y$ and $Z$.
###Code
fig, (left, right) = plt.subplots(ncols=2, figsize=(12, 4))
plt.sca(left)
plt.xlabel('Estimated', fontsize='x-large')
qt.visualization.hinton(estimation_results['est_channel'], ax=left)
plt.sca(right)
plt.xlabel('Actual', fontsize='x-large')
qt.visualization.hinton(actual_noisy_h, ax=right)
###Output
_____no_output_____
###Markdown
We also obtain a wealth of other information as well, such as the covariance matrix over each parameter of the resulting channel.This shows us which parameters we are least certain about, as well as how those parameters are correlated with each other.
###Code
plt.figure(figsize=(10, 10))
estimation_results['posterior'].plot_covariance()
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Diagnostics
###Code
for component, version in sorted(qsharp.component_versions().items(), key=lambda x: x[0]):
print(f"{component:20}{version}")
import sys
print(sys.version)
###Output
3.7.6 (default, Jan 3 2020, 23:35:31)
[GCC 8.3.0]
|
notebooks/test/path_simulation_analysis.ipynb | ###Markdown
Multi-Frame Motion Deblur Path AnalysisThis notebook generates a simulation object, solves the linear inverse problem, and analyzes the quality ofthe blur path.
###Code
%matplotlib notebook
%load_ext autoreload
%autoreload 2
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.misc as misc
import time
import sys
import itertools
import math
import imageio
import skimage as sk
# Libwallerlab imports
from libwallerlab.algorithms import iterative as iterative
from libwallerlab.opticsalgorithms.motiondeblur import blurkernel
from libwallerlab.operators import operators as ops
from libwallerlab.utilities import displaytools, iotools
from libwallerlab.algorithms import objectivefunctions
from libwallerlab.algorithms import regularizers
from libwallerlab.operators import proximal as proxops
from libwallerlab.utilities.opticstools import Ft, iFt
###Output
_____no_output_____
###Markdown
Flow of Notebook1. Generate Object2. Generate Measurements (Images)3. Solve Inverse Problem4. Save measurements and blur kernels in accurate libwallerlab.utilities.iotools.Dataset format To-Do- // linear_y kernels don't extend whole way- // multi-pass kernels- // Add Sarah's PGD kernel optimization methods- // Find out why result is not converging to correct values- // save/load to lwl dataset format- Add nesterov acceleration- Implement CG- Implement FISTA- clean up blurkernel.py Define Motion Blur Parameters
###Code
# Image to use when generating object
object_file_name = '../../../../../common/test_images/brain_to_scan.png'
# Color channel to use when generating object
object_color_channel = 2
# Illumination vector generation type
blur_vector_type = 'random_phase' # 'random_phase' # can be strobe, constant, random, random_phase, projected_gradient_descent
# Motion scanning type
scan_type = 'raster' # can be linear_x, linear_y, raster, (add more here)
# Flag to scan entire object in multiple times (True) or in individual segments of a single pass (False)
full_object_multi_pass = 0
# Method of padding object (for convolution support)
object_edge_pad_type = 'mean'
# Use spectrally-variant blur kernel (single-led flickering)
## NOT WORKING YET ##
use_spectrally_variant_blur = False
# Illumination throughput coefficient ( \in [0,1] )
throughput_coefficient = 0.5
# Image size to simulate
image_size = np.array([32, 32])
# Object size to image size ratio (>=1, integer)
object_size_ratio = 3
# Redundancy in measurements (> 1 means extra pixels are recorded, <1 means not enough)
measurement_redundancy = 1
# Directory to save output in
simulation_output_dir = '/home/sarah/Dropbox/deblurring/COSI/data/simulations/blurred'
# '/Users/zfphil/develop/datasets/'
###Output
_____no_output_____
###Markdown
Generate Object Load Object
###Code
# Load object
brain = imageio.imread(object_file_name)
# Generate object with correct size
object_size_0 = np.round(np.array([object_size_ratio * image_size[0], object_size_ratio * image_size[1] * (brain.shape[1] / brain.shape[0])])).astype(np.int) # image_size #* 3
# brain_cropped = misc.imresize(brain, size=object_size_0) / 255.
brain_cropped = misc.imresize(brain, size=object_size_0) / 255.
# Determine object size
if scan_type == 'linear_y':
brain_cropped = brain_cropped[:image_size[0]*object_size_ratio, :image_size[1]]
elif scan_type in ['linear', 'linear_x']:
brain_cropped = brain_cropped[:image_size[0], :image_size[1]*object_size_ratio]
elif scan_type in ['raster', 'raster_major_both', 'raster_major_y', 'raster_2x']:
brain_cropped = brain_cropped[:image_size[0]*object_size_ratio, :image_size[1]*object_size_ratio]
# Redefine object size
# new_size = np.round([object_size_0[0], object_size_0[0] * (brain.shape[1] / brain.shape[0])]).astype(np.int)
object_color_channel = 2 # Choose one of RGB channels (TODO: implement color)
object_true = brain_cropped[:, :, object_color_channel] # remove alpha
object_size_0 = object_true.shape[:2]
print("Object size is %d x %d" % (object_size_0[0], object_size_0[1]))
# Plot
plt.figure(figsize = (5,2))
plt.imshow(object_true, cmap='gray')
# plt.axis('off')
###Output
Object size is 96 x 96
###Markdown
Generate Blur Pathway
###Code
point_list_segmented
object_size_0
point_list_segmented = blurkernel.genRasterMotionPathway(object_size_0, image_size)
# Generate illumination vectors using quick and cheap optimization
blur_vector_type = 'random_phase'
illum_vector_list = []
for kernel_index, positions in enumerate(point_list_segmented):
if blur_vector_type == 'constant':
illum_vector_list.append(np.ones(positions.shape[0]))
elif blur_vector_type == 'strobe':
illum_vector_list.append(np.zeros(positions.shape[0]))
illum_vector_list[-1][positions.shape[0] // 2] = throughput_coefficient
elif blur_vector_type == 'random':
illum_vector_list.append(np.random.rand(positions.shape[0]))
illum_vector_list[-1] = illum_vector_list[-1] / np.sum(illum_vector_list[-1]) * throughput_coefficient
elif blur_vector_type == 'random_phase':
k, v = blurkernel.genIllum_pseudoRandom_len(len(positions))
illum_vector_list.append(v)
elif blur_vector_type == 'projected_gradient_descent':
blur_kernel_fourier = blurkernel.positionListToBlurKernelMap(object_size_0, positions, return_fourier=True)
def blurMapCol(i):
return (blur_kernel_fourier[i]).reshape(-1) #/ len(positions)
result = blurkernel.genIllum(blurMapCol, len(blur_kernel_fourier),
maxiter=100, throughputCoeff=throughput_coefficient,
resultType='final', verbose=False)
illum_vector_list.append(result['xopt'])
print('kernel %d has length %d and condition number %.2f' % (kernel_index, len(positions), result['fopt']))
else:
raise NotImplementedError('Illumination vector type %s is not implemented.' % blur_vector_type)
print(len(point_list_segmented))
# Generate blur kernel maps for each frame
blur_kernel_list = np.zeros((len(point_list_segmented), object_size_0[0], object_size_0[1]))
for frame_index in range(len(illum_vector_list)):
for position_index, position in enumerate(point_list_segmented[frame_index]):
blur_kernel_list[frame_index, position[0], position[1]] = illum_vector_list[frame_index][position_index]
# Define cropped object sizes and crop true image
object_size = blur_kernel_list[0].shape
# Show blur kernels
displaytools.show3dArray(blur_kernel_list, figsize=(8,6))
plt.matshow(sum(blur_kernel_list))
###Output
_____no_output_____
###Markdown
Quality of Blur Kernel
###Code
from libwallerlab.utilities.opticstools import iFt, Ft
# Generate windowed coverage for each frame
midpoint_list = []
weighted_midpoint_list = []
for frame_index, point_list in enumerate(point_list_segmented):
nonzero_illum = np.where(illum_vector_list[frame_index].reshape(-1) != 0)
included_point_list = point_list[np.min(nonzero_illum):(np.max(nonzero_illum)+1)]
# weighted_midpoint_list.append(np.round(np.average(point_list, axis=0, \
# weights=illum_vector_list[frame_index].reshape(-1)).astype(np.int)))
midpoint_list.append(np.round(np.mean(point_list, axis=0)).astype(np.int))
weighted_midpoint_list.append(np.round(np.mean(included_point_list, axis=0)).astype(np.int))
midpoint_kernel_list = np.zeros((len(midpoint_list), object_size_0[0], object_size_0[1]))
weighted_midpoint_kernel_list = np.zeros((len(midpoint_list), object_size_0[0], object_size_0[1]))
for frame_index in range(len(illum_vector_list)):
position = midpoint_list[frame_index]
weighted_position = weighted_midpoint_list[frame_index]
midpoint_kernel_list[frame_index, position[0], position[1]] = 1
weighted_midpoint_kernel_list[frame_index, weighted_position[0], weighted_position[1]] = 1
_, object_support_mask = blurkernel.genConvolutionSupportList(midpoint_kernel_list, image_size, threshold=1e-2)
_, object_support_mask_weighted = blurkernel.genConvolutionSupportList(weighted_midpoint_kernel_list, image_size, threshold=1e-2)
displaytools.show3dArray(object_support_mask_weighted, figsize=(8,6))
sv_spectrum = []
for blur_kernel in blur_kernel_list:
svs = np.abs(Ft(blur_kernel.astype(np.complex64)))**2
sv_spectrum.append(svs)
np.mean(point_list_segmented[2],axis=0)
print(midpoint_list)
plt.matshow(sum(object_support_mask))
## spatially varying singular value list
spatial_svs = np.zeros([object_size[0], object_size[1], np.prod(object_size)])
for frame_index in range(len(blur_kernel_list)):
spatial_svs[object_support_mask[frame_index],:] += sv_spectrum[frame_index].reshape(-1)
spatial_svs_weighted = np.zeros([object_size[0], object_size[1], np.prod(object_size)])
for frame_index in range(len(blur_kernel_list)):
spatial_svs_weighted[object_support_mask_weighted[frame_index],:] += sv_spectrum[frame_index].reshape(-1)
from matplotlib.colors import LogNorm
spatial_min_sv = np.amin(spatial_svs, axis=2)
spatial_max_sv = np.amax(spatial_svs, axis=2)
spatial_cond = np.divide(spatial_max_sv, spatial_min_sv)
spatial_min_sv_weighted = np.amin(spatial_svs_weighted, axis=2)
spatial_max_sv_weighted = np.amax(spatial_svs_weighted, axis=2)
spatial_cond_weighted = np.divide(spatial_max_sv_weighted, spatial_min_sv_weighted)
for toplot in ['spatial_min_sv', 'spatial_cond_weighted']:
plt.figure(); plt.imshow(eval(toplot), cmap='viridis',norm=LogNorm());
plt.colorbar(); plt.tick_params(labelbottom='off',labelleft='off'); plt.title(toplot)
###Output
/home/sarah/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:5: RuntimeWarning: invalid value encountered in true_divide
/home/sarah/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:9: RuntimeWarning: invalid value encountered in true_divide
###Markdown
Confirmation of Quality via Simulation: Measurement generations
###Code
noise_magnitude = 0# 1e-8
noise_type = 'shot'
# Determine maximum kernel support in x/y for all blur kernels in blur_kernel_list. This is how much we will pad our object by.
support_size_list = []
for blur_kernel in blur_kernel_list:
# support_size_list.append(blurkernel.getBoundingBox(blur_kernel, return_roi=True).size())
support_size_list.append(blurkernel.getPositionListBoundingBox(point_list_segmented).size())
max_kernel_support = np.max(np.asarray(support_size_list),axis=0)
# Generate pad operator for object support
object_size_padded = (np.asarray(object_size) + max_kernel_support).tolist() # Add to object_size
W_object_support = ops.Crop(object_size_padded, object_size, crop_start=(max_kernel_support[0] // 2, max_kernel_support[1] // 2)) # Add support
# Pad object with random values (to simulate an extended object)
object_extended = W_object_support.H * object_true.reshape(-1).astype(np.complex64)
if object_edge_pad_type == 'random':
object_extended += (1. - W_object_support.H * np.ones(object_true.size, dtype=np.complex64)) * np.random.rand(np.prod(object_size_padded))
elif object_edge_pad_type == 'zeros':
object_extended += (1. - W_object_support.H * np.zeros(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == 'ones':
object_extended += (1. - W_object_support.H * np.ones(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == 'mean':
object_extended += (1. - W_object_support.H * np.mean(object_true) * np.ones(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == None:
object_extended = object_true
object_size_padded = object_true.shape
W_object_support = ops.Identity(object_true.shape)
# Define crop operator for object to image
W = ops.Crop(object_size, image_size)
A_list = []
y_list = []
C_list = []
y_list_uncropped = []
# Generate forward model operators for each blur kernel
for blur_kernel_index, blur_kernel in enumerate(blur_kernel_list):
blur_kernel = blur_kernel.astype(np.complex64) / np.sum(np.abs(blur_kernel.astype(np.complex64)))
# 2D Convolution Operator with the given kernel
C = ops.Convolution(object_size_padded, (W_object_support.H * blur_kernel.reshape(-1)).reshape(object_size_padded))
C_list.append(C)
# Forward operator with image crop and full object crop
A_list.append(W * W_object_support * C)
# Generate measurements using padded convolution
y_list.append(A_list[-1] * object_extended.reshape(-1).astype(np.complex64))
# Store uncropped measurements so we can observe what the padding is actually doing
y_list_uncropped.append(W_object_support * C * object_extended.reshape(-1).astype(np.complex64))
# Show first three blur kernels, uncropped measurements, and cropped measurements
plt.figure(figsize=(8,6))
nshow = min(3, len(blur_kernel_list))
for i in range(nshow):
plt.subplot(3, nshow, i+1)
plt.imshow(blur_kernel_list[i], interpolation='none')
plt.title('Measurement '+str(i))
plt.ylabel('Blur Kernel')
plt.subplot(3, nshow, nshow + i + 1)
plt.imshow(np.abs(y_list_uncropped[i].reshape(object_size)))
plt.ylabel('Uncropped y')
plt.subplot(3, nshow, nshow*2 + i + 1)
plt.imshow(np.abs(y_list[i].reshape(image_size)))
plt.ylabel('Cropped y')
###Output
_____no_output_____
###Markdown
Recovery via Gradient Descent
###Code
y_list_noise = []
for y in y_list:
noise = noise_magnitude * np.random.normal(size=y.shape)
if noise_type == 'shot': noise = noise * y
y_list_noise.append((y + noise).astype(np.float32))
# Generate measurements from image list
y_full = np.empty(0, dtype=np.complex64)
for y in y_list_noise:
y_full = np.append(y_full, y)
# Normalize measurements
y_mean = np.mean(np.abs(y_full))
y_full /= y_mean
# Generate full A Operator
A_full = ops.Vstack(Operators=A_list)
# Initialization: choosing a "good" coefficient value will help in convergence
initialization = np.ones(object_size_padded, dtype=np.complex64).reshape(-1)
# Define cost function
objective = objectivefunctions.L2(A_full, y_full)
solve_method = 'gd'
display_type = 'text'
# Solve linear inverse problem
if solve_method is 'gd':
iteration_count = 300
object_recovered = iterative.GradientDescent(objective).solve(initialization=initialization,
step_size=1,
iteration_count=iteration_count,
display_type=display_type,
display_iteration_delta=(iteration_count // 10))
elif solve_method is 'cg':
iteration_count = 300
object_recovered = iterative.ConjugateGradient(A_full, y_full).solve(initialization=initialization,
iteration_count=iteration_count,
display_type=display_type,
use_log_y=False,
use_log_x=False,
debug=True,
display_iteration_delta=(iteration_count // 10))
elif solve_method is 'fista':
iteration_count = 300
object_recovered = iterative.Fista(objective, proximal_operator=proxops.positivity).solve(initialization=initialization,
iteration_count=iteration_count,
display_type=display_type,
use_log_y=True,
use_log_x=False,
debug=True,
display_iteration_delta=(iteration_count // 10))
###Output
/home/sarah/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:5: ComplexWarning: Casting complex values to real discards the imaginary part
###Markdown
Show Results
###Code
object_recovered_crop = (W_object_support * object_recovered).reshape(object_size)
# normalize true object (because zero-frequency is irrelevent and recon is zero-mean)
object_true_normalized = object_true / np.mean(object_true)
object_recovered_crop = object_recovered_crop / np.mean(object_recovered_crop)
# Calculate SSE
print('Recovery SSE is %.2f' % np.sum(np.abs(object_true_normalized - object_recovered_crop) ** 2))
plt.figure(figsize=(12, 4))
plt.subplot(1,3,1); i_true = plt.imshow(np.abs(object_true_normalized), cmap='gray'); plt.title('Ground Truth')
plt.subplot(1,3,2); i_rec = plt.imshow(np.abs(object_recovered_crop), cmap='gray'); plt.title('Recovered');# i_rec.set_clim(i_true.get_clim())
plt.subplot(1,3,3); plt.imshow(np.abs(object_true_normalized - object_recovered_crop), cmap='gray'); plt.colorbar(); plt.title('Difference')
plt.figure(figsize=(12, 4))
plt.subplot(1,3,1); i_true = plt.imshow(np.abs(spatial_cond_weighted), cmap='viridis', norm=LogNorm());
plt.title('condition #'); plt.colorbar();
plt.subplot(1,3,2); i_rec = plt.imshow(np.abs(spatial_min_sv_weighted), cmap='viridis', norm=LogNorm());
plt.title('min sv'); plt.colorbar();
plt.subplot(1,3,3); plt.imshow(np.abs(object_true_normalized - object_recovered_crop), cmap='viridis'); plt.colorbar(); plt.title('Difference')
###Output
_____no_output_____ |
Python/HoloViews/holoviews_demo.ipynb | ###Markdown
HoloViews examples Following the tutorial First, let's import HoloViews, and make sure we can use it inline in this notebook.
###Code
import holoviews as hv
import numpy as np
%load_ext holoviews.ipython
###Output
_____no_output_____
###Markdown
Define a sine function with frequency $f$ and phase $\phi$:
###Code
def sine(x, phase=0.0, freq=100.0):
return np.sin(freq*x + phase)
###Output
_____no_output_____
###Markdown
Define arrays of phases and frequencies to explore.
###Code
phases = np.linspace(0.0, 2*np.pi, num=11)
freqs = np.linspace(50.0, 150.0, 5)
###Output
_____no_output_____
###Markdown
Sample the function in 2D over a grid.
###Code
dist = np.linspace(-0.5, 0.5, 202)
x, y = np.meshgrid(dist, dist)
grid = x**2 + y**2
###Output
_____no_output_____
###Markdown
Now create a HoloViews object out of data.
###Code
freq1 = hv.Image(sine(grid, freq=50))
###Output
_____no_output_____
###Markdown
Visualize the object.
###Code
freq1
###Output
_____no_output_____
###Markdown
Also show the curve of sine as a function of the distance.
###Code
freq1 + hv.Curve(zip(dist, sine(dist**2, freq=50)), kdims=['$r$'], vdims=['amplitude'])
###Output
_____no_output_____
###Markdown
Define dimensions and keys for high-dimensional HoloView.
###Code
dimensions = ['$\phi$', '$f$']
keys = [(p, f) for p in phases for f in freqs]
items = [(k, hv.Image(sine(grid, *k), vdims=['amplitude'])) for k in keys]
circular_wave = hv.HoloMap(items, kdims=dimensions)
circular_wave
###Output
_____no_output_____
###Markdown
Some overlays Define two functions f and g.
###Code
def f(x):
return np.cos(x)*np.exp(-0.1*x)
def g(x):
return np.sin(x)*np.exp(-0.1*x)
###Output
_____no_output_____
###Markdown
Create x-range.
###Code
x = np.linspace(0.0, 50.0, 1001)
###Output
_____no_output_____
###Markdown
Compute the curves for functions f and g.
###Code
f_curve = hv.Curve((x, f(x)), label=r'$e^{-0.1x} \cos x$')
g_curve = hv.Curve((x, g(x)), label=r'$e^{-0.1x} \sin x$')
###Output
_____no_output_____
###Markdown
Show the curves side by side.
###Code
%%output size=150
f_curve + g_curve
###Output
_____no_output_____
###Markdown
Show the curves superimposed.
###Code
%%output size=150
f_curve * g_curve
###Output
_____no_output_____
###Markdown
Plotting numpy arrays
###Code
x = np.linspace(-10.0, 10.0, 101)
y = np.cos(2*np.pi*x)*np.exp(-0.3*x)
###Output
_____no_output_____
###Markdown
Plotting numpy arrays as a tuple is more intuitive than zipping the arrays.
###Code
%%output size=200
hv.Curve((x, y))
###Output
_____no_output_____ |
notebooks/11_training_deep_neural_networks.ipynb | ###Markdown
**Chapter 11 – Training Deep Neural Networks** _This notebook contains all the sample code and solutions to the exercises in chapter 11._ Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
###Code
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
%load_ext tensorboard
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
_____no_output_____
###Markdown
Vanishing/Exploding Gradients Problem
###Code
def logit(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
###Output
Saving figure sigmoid_saturation_plot
###Markdown
Xavier and He Initialization
###Code
[name for name in dir(keras.initializers) if not name.startswith("_")]
keras.layers.Dense(10, activation="relu", kernel_initializer="he_normal")
init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',
distribution='uniform')
keras.layers.Dense(10, activation="relu", kernel_initializer=init)
###Output
_____no_output_____
###Markdown
Nonsaturating Activation Functions Leaky ReLU
###Code
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
[m for m in dir(keras.activations) if not m.startswith("_")]
[m for m in dir(keras.layers) if "relu" in m.lower()]
###Output
_____no_output_____
###Markdown
Let's train a neural network on Fashion MNIST using the Leaky ReLU:
###Code
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1/1719 [..............................] - ETA: 0s - loss: 2.3997 - accuracy: 0.1562WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_train_batch_end` time: 0.0091s). Check your callbacks.
1719/1719 [==============================] - 2s 1ms/step - loss: 1.2819 - accuracy: 0.6229 - val_loss: 0.8886 - val_accuracy: 0.7160
Epoch 2/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.7955 - accuracy: 0.7361 - val_loss: 0.7130 - val_accuracy: 0.7656
Epoch 3/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.6816 - accuracy: 0.7721 - val_loss: 0.6427 - val_accuracy: 0.7900
Epoch 4/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.6217 - accuracy: 0.7944 - val_loss: 0.5900 - val_accuracy: 0.8064
Epoch 5/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5832 - accuracy: 0.8074 - val_loss: 0.5582 - val_accuracy: 0.8200
Epoch 6/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5553 - accuracy: 0.8156 - val_loss: 0.5350 - val_accuracy: 0.8238
Epoch 7/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5339 - accuracy: 0.8224 - val_loss: 0.5156 - val_accuracy: 0.8302
Epoch 8/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5173 - accuracy: 0.8272 - val_loss: 0.5079 - val_accuracy: 0.8284
Epoch 9/10
1719/1719 [==============================] - ETA: 0s - loss: 0.5044 - accuracy: 0.82 - 2s 1ms/step - loss: 0.5041 - accuracy: 0.8290 - val_loss: 0.4895 - val_accuracy: 0.8386
Epoch 10/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4924 - accuracy: 0.8320 - val_loss: 0.4817 - val_accuracy: 0.8396
###Markdown
Now let's try PReLU:
###Code
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 2s 1ms/step - loss: 1.3461 - accuracy: 0.6209 - val_loss: 0.9255 - val_accuracy: 0.7186
Epoch 2/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.8197 - accuracy: 0.7355 - val_loss: 0.7305 - val_accuracy: 0.7630
Epoch 3/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.6966 - accuracy: 0.7694 - val_loss: 0.6565 - val_accuracy: 0.7880
Epoch 4/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.6331 - accuracy: 0.7909 - val_loss: 0.6003 - val_accuracy: 0.8048
Epoch 5/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5917 - accuracy: 0.8057 - val_loss: 0.5656 - val_accuracy: 0.8180
Epoch 6/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5618 - accuracy: 0.8136 - val_loss: 0.5406 - val_accuracy: 0.8238
Epoch 7/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5390 - accuracy: 0.8205 - val_loss: 0.5196 - val_accuracy: 0.8314
Epoch 8/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5213 - accuracy: 0.8257 - val_loss: 0.5113 - val_accuracy: 0.8318
Epoch 9/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5070 - accuracy: 0.8288 - val_loss: 0.4916 - val_accuracy: 0.8380
Epoch 10/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4945 - accuracy: 0.8315 - val_loss: 0.4826 - val_accuracy: 0.8396
###Markdown
ELU
###Code
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
###Output
Saving figure elu_plot
###Markdown
Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
###Code
keras.layers.Dense(10, activation="elu")
###Output
_____no_output_____
###Markdown
SELU This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ1 or ℓ2 regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
###Code
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title("SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
###Output
Saving figure selu_plot
###Markdown
By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
###Code
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
###Output
Layer 0: mean -0.00, std deviation 1.00
Layer 100: mean 0.02, std deviation 0.96
Layer 200: mean 0.01, std deviation 0.90
Layer 300: mean -0.02, std deviation 0.92
Layer 400: mean 0.05, std deviation 0.89
Layer 500: mean 0.01, std deviation 0.93
Layer 600: mean 0.02, std deviation 0.92
Layer 700: mean -0.02, std deviation 0.90
Layer 800: mean 0.05, std deviation 0.83
Layer 900: mean 0.02, std deviation 1.00
###Markdown
Using SELU is easy:
###Code
keras.layers.Dense(10, activation="selu",
kernel_initializer="lecun_normal")
###Output
_____no_output_____
###Markdown
Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:
###Code
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="selu",
kernel_initializer="lecun_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
###Code
pixel_means = X_train.mean(axis=0, keepdims=True)
pixel_stds = X_train.std(axis=0, keepdims=True)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/5
1719/1719 [==============================] - 13s 7ms/step - loss: 1.1407 - accuracy: 0.5672 - val_loss: 0.7427 - val_accuracy: 0.7370
Epoch 2/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.6671 - accuracy: 0.7625 - val_loss: 0.5699 - val_accuracy: 0.7966
Epoch 3/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.5736 - accuracy: 0.7926 - val_loss: 0.5386 - val_accuracy: 0.8090
Epoch 4/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.5173 - accuracy: 0.8146 - val_loss: 0.4749 - val_accuracy: 0.8326
Epoch 5/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.5629 - accuracy: 0.8031 - val_loss: 0.6033 - val_accuracy: 0.7966
###Markdown
Now look at what happens if we try to use the ReLU activation function instead:
###Code
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/5
1719/1719 [==============================] - 12s 7ms/step - loss: 1.8008 - accuracy: 0.2679 - val_loss: 1.3492 - val_accuracy: 0.3830
Epoch 2/5
1719/1719 [==============================] - 12s 7ms/step - loss: 1.1807 - accuracy: 0.5023 - val_loss: 0.8959 - val_accuracy: 0.6520
Epoch 3/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.9112 - accuracy: 0.6312 - val_loss: 0.8386 - val_accuracy: 0.6718
Epoch 4/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.8412 - accuracy: 0.6644 - val_loss: 0.7453 - val_accuracy: 0.7004
Epoch 5/5
1719/1719 [==============================] - 12s 7ms/step - loss: 0.7553 - accuracy: 0.7056 - val_loss: 0.6965 - val_accuracy: 0.7288
###Markdown
Not great at all, we suffered from the vanishing/exploding gradients problem. Batch Normalization
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(100, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
bn1 = model.layers[1]
[(var.name, var.trainable) for var in bn1.variables]
bn1.updates
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.8750 - accuracy: 0.7123 - val_loss: 0.5526 - val_accuracy: 0.8230
Epoch 2/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5753 - accuracy: 0.8032 - val_loss: 0.4725 - val_accuracy: 0.8472
Epoch 3/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5190 - accuracy: 0.8205 - val_loss: 0.4376 - val_accuracy: 0.8554
Epoch 4/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4827 - accuracy: 0.8322 - val_loss: 0.4152 - val_accuracy: 0.8598
Epoch 5/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4565 - accuracy: 0.8409 - val_loss: 0.3997 - val_accuracy: 0.8638
Epoch 6/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4398 - accuracy: 0.8475 - val_loss: 0.3867 - val_accuracy: 0.8694
Epoch 7/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4242 - accuracy: 0.8515 - val_loss: 0.3763 - val_accuracy: 0.8702
Epoch 8/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4143 - accuracy: 0.8541 - val_loss: 0.3712 - val_accuracy: 0.8736
Epoch 9/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4023 - accuracy: 0.8582 - val_loss: 0.3630 - val_accuracy: 0.8752
Epoch 10/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.3915 - accuracy: 0.8623 - val_loss: 0.3573 - val_accuracy: 0.8760
###Markdown
Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(100, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 3s 2ms/step - loss: 1.0317 - accuracy: 0.6757 - val_loss: 0.6767 - val_accuracy: 0.7816
Epoch 2/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.6790 - accuracy: 0.7792 - val_loss: 0.5566 - val_accuracy: 0.8180
Epoch 3/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5960 - accuracy: 0.8037 - val_loss: 0.5007 - val_accuracy: 0.8360
Epoch 4/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5447 - accuracy: 0.8193 - val_loss: 0.4666 - val_accuracy: 0.8448
Epoch 5/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5109 - accuracy: 0.8278 - val_loss: 0.4434 - val_accuracy: 0.8534
Epoch 6/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4898 - accuracy: 0.8337 - val_loss: 0.4263 - val_accuracy: 0.8546
Epoch 7/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4712 - accuracy: 0.8398 - val_loss: 0.4130 - val_accuracy: 0.8570
Epoch 8/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4560 - accuracy: 0.8440 - val_loss: 0.4035 - val_accuracy: 0.8606
Epoch 9/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4441 - accuracy: 0.8473 - val_loss: 0.3942 - val_accuracy: 0.8642
Epoch 10/10
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4332 - accuracy: 0.8505 - val_loss: 0.3874 - val_accuracy: 0.8662
###Markdown
Gradient Clipping All Keras optimizers accept `clipnorm` or `clipvalue` arguments:
###Code
optimizer = keras.optimizers.SGD(clipvalue=1.0)
optimizer = keras.optimizers.SGD(clipnorm=1.0)
###Output
_____no_output_____
###Markdown
Reusing Pretrained Layers Reusing a Keras model Let's split the fashion MNIST training set in two:* `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).* `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.The validation set and the test set are also split this way, but without restricting the number of images.We will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).
###Code
def split_dataset(X, y):
y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts
y_A = y[~y_5_or_6]
y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7
y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?
return ((X[~y_5_or_6], y_A),
(X[y_5_or_6], y_B))
(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)
(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)
(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)
X_train_B = X_train_B[:200]
y_train_B = y_train_B[:200]
X_train_A.shape
X_train_B.shape
y_train_A[:30]
y_train_B[:30]
tf.random.set_seed(42)
np.random.seed(42)
model_A = keras.models.Sequential()
model_A.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_A.add(keras.layers.Dense(n_hidden, activation="selu"))
model_A.add(keras.layers.Dense(8, activation="softmax"))
model_A.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_A.fit(X_train_A, y_train_A, epochs=20,
validation_data=(X_valid_A, y_valid_A))
model_A.save("my_model_A.h5")
model_B = keras.models.Sequential()
model_B.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_B.add(keras.layers.Dense(n_hidden, activation="selu"))
model_B.add(keras.layers.Dense(1, activation="sigmoid"))
model_B.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B.fit(X_train_B, y_train_B, epochs=20,
validation_data=(X_valid_B, y_valid_B))
model.summary()
model_A = keras.models.load_model("my_model_A.h5")
model_B_on_A = keras.models.Sequential(model_A.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
model_A_clone = keras.models.clone_model(model_A)
model_A_clone.set_weights(model_A.get_weights())
for layer in model_B_on_A.layers[:-1]:
layer.trainable = False
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,
validation_data=(X_valid_B, y_valid_B))
for layer in model_B_on_A.layers[:-1]:
layer.trainable = True
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,
validation_data=(X_valid_B, y_valid_B))
###Output
Epoch 1/4
1/7 [===>..........................] - ETA: 0s - loss: 0.5536 - accuracy: 0.6562WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_test_batch_end` time: 0.0010s). Check your callbacks.
7/7 [==============================] - 0s 50ms/step - loss: 0.5765 - accuracy: 0.6500 - val_loss: 0.5812 - val_accuracy: 0.6369
Epoch 2/4
7/7 [==============================] - 0s 8ms/step - loss: 0.5403 - accuracy: 0.6900 - val_loss: 0.5440 - val_accuracy: 0.6795
Epoch 3/4
7/7 [==============================] - 0s 8ms/step - loss: 0.5037 - accuracy: 0.7300 - val_loss: 0.5122 - val_accuracy: 0.7120
Epoch 4/4
7/7 [==============================] - 0s 8ms/step - loss: 0.4725 - accuracy: 0.7500 - val_loss: 0.4837 - val_accuracy: 0.7333
Epoch 1/16
1/7 [===>..........................] - ETA: 0s - loss: 0.4256 - accuracy: 0.7500WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_train_batch_end` time: 0.0092s). Check your callbacks.
7/7 [==============================] - 0s 23ms/step - loss: 0.3944 - accuracy: 0.8200 - val_loss: 0.3447 - val_accuracy: 0.8651
Epoch 2/16
7/7 [==============================] - 0s 8ms/step - loss: 0.2787 - accuracy: 0.9350 - val_loss: 0.2595 - val_accuracy: 0.9300
Epoch 3/16
7/7 [==============================] - 0s 8ms/step - loss: 0.2077 - accuracy: 0.9650 - val_loss: 0.2104 - val_accuracy: 0.9554
Epoch 4/16
7/7 [==============================] - 0s 8ms/step - loss: 0.1665 - accuracy: 0.9800 - val_loss: 0.1785 - val_accuracy: 0.9696
Epoch 5/16
7/7 [==============================] - 0s 7ms/step - loss: 0.1393 - accuracy: 0.9800 - val_loss: 0.1557 - val_accuracy: 0.9757
Epoch 6/16
7/7 [==============================] - 0s 8ms/step - loss: 0.1194 - accuracy: 0.9950 - val_loss: 0.1389 - val_accuracy: 0.9797
Epoch 7/16
7/7 [==============================] - 0s 8ms/step - loss: 0.1048 - accuracy: 0.9950 - val_loss: 0.1263 - val_accuracy: 0.9838
Epoch 8/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0935 - accuracy: 0.9950 - val_loss: 0.1161 - val_accuracy: 0.9858
Epoch 9/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0845 - accuracy: 1.0000 - val_loss: 0.1064 - val_accuracy: 0.9888
Epoch 10/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0761 - accuracy: 1.0000 - val_loss: 0.0998 - val_accuracy: 0.9899
Epoch 11/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0703 - accuracy: 1.0000 - val_loss: 0.0938 - val_accuracy: 0.9899
Epoch 12/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0648 - accuracy: 1.0000 - val_loss: 0.0887 - val_accuracy: 0.9899
Epoch 13/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0602 - accuracy: 1.0000 - val_loss: 0.0838 - val_accuracy: 0.9899
Epoch 14/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0558 - accuracy: 1.0000 - val_loss: 0.0801 - val_accuracy: 0.9899
Epoch 15/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0524 - accuracy: 1.0000 - val_loss: 0.0768 - val_accuracy: 0.9899
Epoch 16/16
7/7 [==============================] - 0s 8ms/step - loss: 0.0495 - accuracy: 1.0000 - val_loss: 0.0738 - val_accuracy: 0.9899
###Markdown
So, what's the final verdict?
###Code
model_B.evaluate(X_test_B, y_test_B)
model_B_on_A.evaluate(X_test_B, y_test_B)
###Output
63/63 [==============================] - 0s 1ms/step - loss: 0.0682 - accuracy: 0.9940
###Markdown
Great! We got quite a bit of transfer: the error rate dropped by a factor of 4!
###Code
(100 - 96.95) / (100 - 99.25)
###Output
_____no_output_____
###Markdown
Faster Optimizers Momentum optimization
###Code
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9)
###Output
_____no_output_____
###Markdown
Nesterov Accelerated Gradient
###Code
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
###Output
_____no_output_____
###Markdown
AdaGrad
###Code
optimizer = keras.optimizers.Adagrad(lr=0.001)
###Output
_____no_output_____
###Markdown
RMSProp
###Code
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9)
###Output
_____no_output_____
###Markdown
Adam Optimization
###Code
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Adamax Optimization
###Code
optimizer = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Nadam Optimization
###Code
optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Learning Rate Scheduling Power Scheduling ```lr = lr0 / (1 + steps / s)**c```* Keras uses `c=1` and `s = 1 / decay`
###Code
optimizer = keras.optimizers.SGD(lr=0.01, decay=1e-4)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
learning_rate = 0.01
decay = 1e-4
batch_size = 32
n_steps_per_epoch = len(X_train) // batch_size
epochs = np.arange(n_epochs)
lrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)
plt.plot(epochs, lrs, "o-")
plt.axis([0, n_epochs - 1, 0, 0.01])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Power Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Exponential Scheduling ```lr = lr0 * 0.1**(epoch / s)```
###Code
def exponential_decay_fn(epoch):
return 0.01 * 0.1**(epoch / 20)
def exponential_decay(lr0, s):
def exponential_decay_fn(epoch):
return lr0 * 0.1**(epoch / s)
return exponential_decay_fn
exponential_decay_fn = exponential_decay(lr0=0.01, s=20)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
The schedule function can take the current learning rate as a second argument:
###Code
def exponential_decay_fn(epoch, lr):
return lr * 0.1**(1 / 20)
###Output
_____no_output_____
###Markdown
If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:
###Code
K = keras.backend
class ExponentialDecay(keras.callbacks.Callback):
def __init__(self, s=40000):
super().__init__()
self.s = s
def on_batch_begin(self, batch, logs=None):
# Note: the `batch` argument is reset at each epoch
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr * 0.1**(1 / s))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
lr0 = 0.01
optimizer = keras.optimizers.Nadam(lr=lr0)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
exp_decay = ExponentialDecay(s)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[exp_decay])
n_steps = n_epochs * len(X_train) // 32
steps = np.arange(n_steps)
lrs = lr0 * 0.1**(steps / s)
plt.plot(steps, lrs, "-", linewidth=2)
plt.axis([0, n_steps - 1, 0, lr0 * 1.1])
plt.xlabel("Batch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling (per batch)", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Piecewise Constant Scheduling
###Code
def piecewise_constant_fn(epoch):
if epoch < 5:
return 0.01
elif epoch < 15:
return 0.005
else:
return 0.001
def piecewise_constant(boundaries, values):
boundaries = np.array([0] + boundaries)
values = np.array(values)
def piecewise_constant_fn(epoch):
return values[np.argmax(boundaries > epoch) - 1]
return piecewise_constant_fn
piecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])
lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Piecewise Constant Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Performance Scheduling
###Code
tf.random.set_seed(42)
np.random.seed(42)
lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.02, momentum=0.9)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "bo-")
plt.xlabel("Epoch")
plt.ylabel("Learning Rate", color='b')
plt.tick_params('y', colors='b')
plt.gca().set_xlim(0, n_epochs - 1)
plt.grid(True)
ax2 = plt.gca().twinx()
ax2.plot(history.epoch, history.history["val_loss"], "r^-")
ax2.set_ylabel('Validation Loss', color='r')
ax2.tick_params('y', colors='r')
plt.title("Reduce LR on Plateau", fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
tf.keras schedulers
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4894 - accuracy: 0.8277 - val_loss: 0.4096 - val_accuracy: 0.8592
Epoch 2/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3820 - accuracy: 0.8651 - val_loss: 0.3742 - val_accuracy: 0.8698
Epoch 3/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3487 - accuracy: 0.8766 - val_loss: 0.3737 - val_accuracy: 0.8686
Epoch 4/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3264 - accuracy: 0.8836 - val_loss: 0.3494 - val_accuracy: 0.8800
Epoch 5/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3104 - accuracy: 0.8893 - val_loss: 0.3433 - val_accuracy: 0.8794
Epoch 6/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2958 - accuracy: 0.8952 - val_loss: 0.3416 - val_accuracy: 0.8814
Epoch 7/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2854 - accuracy: 0.8987 - val_loss: 0.3359 - val_accuracy: 0.8816
Epoch 8/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2761 - accuracy: 0.9013 - val_loss: 0.3371 - val_accuracy: 0.8812
Epoch 9/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2678 - accuracy: 0.9052 - val_loss: 0.3268 - val_accuracy: 0.8852
Epoch 10/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2608 - accuracy: 0.9068 - val_loss: 0.3245 - val_accuracy: 0.8850
Epoch 11/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2551 - accuracy: 0.9087 - val_loss: 0.3255 - val_accuracy: 0.8868
Epoch 12/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2497 - accuracy: 0.9129 - val_loss: 0.3307 - val_accuracy: 0.8800
Epoch 13/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2450 - accuracy: 0.9139 - val_loss: 0.3223 - val_accuracy: 0.8868
Epoch 14/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2415 - accuracy: 0.9146 - val_loss: 0.3228 - val_accuracy: 0.8860
Epoch 15/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2375 - accuracy: 0.9166 - val_loss: 0.3214 - val_accuracy: 0.8874
Epoch 16/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2343 - accuracy: 0.9179 - val_loss: 0.3189 - val_accuracy: 0.8890
Epoch 17/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2317 - accuracy: 0.9186 - val_loss: 0.3202 - val_accuracy: 0.8894
Epoch 18/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2291 - accuracy: 0.9197 - val_loss: 0.3174 - val_accuracy: 0.8898
Epoch 19/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2270 - accuracy: 0.9206 - val_loss: 0.3203 - val_accuracy: 0.8896
Epoch 20/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2250 - accuracy: 0.9219 - val_loss: 0.3175 - val_accuracy: 0.8900
Epoch 21/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2229 - accuracy: 0.9223 - val_loss: 0.3185 - val_accuracy: 0.8912
Epoch 22/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2216 - accuracy: 0.9226 - val_loss: 0.3169 - val_accuracy: 0.8912
Epoch 23/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2202 - accuracy: 0.9232 - val_loss: 0.3177 - val_accuracy: 0.8904
Epoch 24/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2188 - accuracy: 0.9242 - val_loss: 0.3171 - val_accuracy: 0.8902
Epoch 25/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2180 - accuracy: 0.9241 - val_loss: 0.3170 - val_accuracy: 0.8912
###Markdown
For piecewise constant scheduling, try this:
###Code
learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],
values=[0.01, 0.005, 0.001])
###Output
_____no_output_____
###Markdown
1Cycle scheduling
###Code
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
def find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):
init_weights = model.get_weights()
iterations = len(X) // batch_size * epochs
factor = np.exp(np.log(max_rate / min_rate) / iterations)
init_lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, min_rate)
exp_lr = ExponentialLearningRate(factor)
history = model.fit(X, y, epochs=epochs, batch_size=batch_size,
callbacks=[exp_lr])
K.set_value(model.optimizer.lr, init_lr)
model.set_weights(init_weights)
return exp_lr.rates, exp_lr.losses
def plot_lr_vs_loss(rates, losses):
plt.plot(rates, losses)
plt.gca().set_xscale('log')
plt.hlines(min(losses), min(rates), max(rates))
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
rate = max(rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.lr, rate)
n_epochs = 25
onecycle = OneCycleScheduler(len(X_train) // batch_size * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
###Output
Epoch 1/25
410/430 [===========================>..] - ETA: 0s - loss: 0.6654 - accuracy: 0.7713WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_test_batch_end` time: 0.0018s). Check your callbacks.
430/430 [==============================] - 1s 2ms/step - loss: 0.6572 - accuracy: 0.7739 - val_loss: 0.4871 - val_accuracy: 0.8336
Epoch 2/25
430/430 [==============================] - 1s 2ms/step - loss: 0.4581 - accuracy: 0.8396 - val_loss: 0.4274 - val_accuracy: 0.8522
Epoch 3/25
430/430 [==============================] - 1s 2ms/step - loss: 0.4121 - accuracy: 0.8545 - val_loss: 0.4114 - val_accuracy: 0.8584
Epoch 4/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3836 - accuracy: 0.8642 - val_loss: 0.3869 - val_accuracy: 0.8682
Epoch 5/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3639 - accuracy: 0.8717 - val_loss: 0.3765 - val_accuracy: 0.8680
Epoch 6/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3456 - accuracy: 0.8774 - val_loss: 0.3743 - val_accuracy: 0.8706
Epoch 7/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3329 - accuracy: 0.8810 - val_loss: 0.3635 - val_accuracy: 0.8716
Epoch 8/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3184 - accuracy: 0.8858 - val_loss: 0.3947 - val_accuracy: 0.8612
Epoch 9/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3064 - accuracy: 0.8890 - val_loss: 0.3482 - val_accuracy: 0.8762
Epoch 10/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2943 - accuracy: 0.8927 - val_loss: 0.3399 - val_accuracy: 0.8802
Epoch 11/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2839 - accuracy: 0.8961 - val_loss: 0.3462 - val_accuracy: 0.8794
Epoch 12/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2708 - accuracy: 0.9022 - val_loss: 0.3627 - val_accuracy: 0.8700
Epoch 13/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2536 - accuracy: 0.9083 - val_loss: 0.3356 - val_accuracy: 0.8840
Epoch 14/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2404 - accuracy: 0.9131 - val_loss: 0.3456 - val_accuracy: 0.8804
Epoch 15/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2279 - accuracy: 0.9185 - val_loss: 0.3254 - val_accuracy: 0.8852
Epoch 16/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2159 - accuracy: 0.9232 - val_loss: 0.3292 - val_accuracy: 0.8856
Epoch 17/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2062 - accuracy: 0.9266 - val_loss: 0.3345 - val_accuracy: 0.8880
Epoch 18/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1978 - accuracy: 0.9299 - val_loss: 0.3244 - val_accuracy: 0.8896
Epoch 19/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1892 - accuracy: 0.9342 - val_loss: 0.3234 - val_accuracy: 0.8908
Epoch 20/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1821 - accuracy: 0.9367 - val_loss: 0.3228 - val_accuracy: 0.8918
Epoch 21/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1752 - accuracy: 0.9401 - val_loss: 0.3222 - val_accuracy: 0.8912
Epoch 22/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1700 - accuracy: 0.9419 - val_loss: 0.3186 - val_accuracy: 0.8944
Epoch 23/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1655 - accuracy: 0.9437 - val_loss: 0.3191 - val_accuracy: 0.8940
Epoch 24/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1628 - accuracy: 0.9451 - val_loss: 0.3181 - val_accuracy: 0.8932
Epoch 25/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1611 - accuracy: 0.9461 - val_loss: 0.3174 - val_accuracy: 0.8938
###Markdown
Avoiding Overfitting Through Regularization $\ell_1$ and $\ell_2$ regularization
###Code
layer = keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
# or l1(0.1) for ℓ1 regularization with a factor or 0.1
# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(10, activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.01))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
from functools import partial
RegularizedDense = partial(keras.layers.Dense,
activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
RegularizedDense(300),
RegularizedDense(100),
RegularizedDense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1719/1719 [==============================] - 3s 2ms/step - loss: 1.6313 - accuracy: 0.8113 - val_loss: 0.7218 - val_accuracy: 0.8310
Epoch 2/2
1719/1719 [==============================] - 3s 2ms/step - loss: 0.7187 - accuracy: 0.8273 - val_loss: 0.6826 - val_accuracy: 0.8382
###Markdown
Dropout
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(300, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1/1719 [..............................] - ETA: 0s - loss: 3.9425 - accuracy: 0.1250WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_train_batch_end` time: 0.0135s). Check your callbacks.
1719/1719 [==============================] - 3s 2ms/step - loss: 0.5838 - accuracy: 0.7998 - val_loss: 0.3730 - val_accuracy: 0.8644
Epoch 2/2
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4209 - accuracy: 0.8442 - val_loss: 0.3409 - val_accuracy: 0.8728
###Markdown
Alpha Dropout
###Code
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 20
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.evaluate(X_train_scaled, y_train)
history = model.fit(X_train_scaled, y_train)
###Output
1/1719 [..............................] - ETA: 0s - loss: 0.6203 - accuracy: 0.8125WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_train_batch_end` time: 0.0042s). Check your callbacks.
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4158 - accuracy: 0.8464
###Markdown
MC Dropout
###Code
tf.random.set_seed(42)
np.random.seed(42)
y_probas = np.stack([model(X_test_scaled, training=True)
for sample in range(100)])
y_proba = y_probas.mean(axis=0)
y_std = y_probas.std(axis=0)
np.round(model.predict(X_test_scaled[:1]), 2)
np.round(y_probas[:, :1], 2)
np.round(y_proba[:1], 2)
y_std = y_probas.std(axis=0)
np.round(y_std[:1], 2)
y_pred = np.argmax(y_proba, axis=1)
accuracy = np.sum(y_pred == y_test) / len(y_test)
accuracy
class MCDropout(keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
tf.random.set_seed(42)
np.random.seed(42)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
mc_model.summary()
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
mc_model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
mc_model.set_weights(model.get_weights())
###Output
_____no_output_____
###Markdown
Now we can use the model with MC Dropout:
###Code
np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)
###Output
_____no_output_____
###Markdown
Max norm
###Code
layer = keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
MaxNormDense = partial(keras.layers.Dense,
activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
MaxNormDense(300),
MaxNormDense(100),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1719/1719 [==============================] - 3s 2ms/step - loss: 0.4743 - accuracy: 0.8335 - val_loss: 0.3698 - val_accuracy: 0.8644
Epoch 2/2
1719/1719 [==============================] - 3s 2ms/step - loss: 0.3549 - accuracy: 0.8715 - val_loss: 0.3782 - val_accuracy: 0.8682
###Markdown
Exercises 1. to 7. See appendix A. 8. Deep Learning on CIFAR10 a.*Exercise: Build a DNN with 20 hidden layers of 100 neurons each (that's too many, but it's the point of this exercise). Use He initialization and the ELU activation function.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
activation="elu",
kernel_initializer="he_normal"))
###Output
_____no_output_____
###Markdown
b.*Exercise: Using Nadam optimization and early stopping, train the network on the CIFAR10 dataset. You can load it with `keras.datasets.cifar10.load_data()`. The dataset is composed of 60,000 32 × 32–pixel color images (50,000 for training, 10,000 for testing) with 10 classes, so you'll need a softmax output layer with 10 neurons. Remember to search for the right learning rate each time you change the model's architecture or hyperparameters.* Let's add the output layer to the model:
###Code
model.add(keras.layers.Dense(10, activation="softmax"))
###Output
_____no_output_____
###Markdown
Let's use a Nadam optimizer with a learning rate of 5e-5. I tried learning rates 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3 and 1e-2, and I compared their learning curves for 10 epochs each (using the TensorBoard callback, below). The learning rates 3e-5 and 1e-4 were pretty good, so I tried 5e-5, which turned out to be slightly better.
###Code
optimizer = keras.optimizers.Nadam(lr=5e-5)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Let's load the CIFAR10 dataset. We also want to use early stopping, so we need a validation set. Let's use the first 5,000 images of the original training set as the validation set:
###Code
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()
X_train = X_train_full[5000:]
y_train = y_train_full[5000:]
X_valid = X_train_full[:5000]
y_valid = y_train_full[:5000]
###Output
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 58s 0us/step
###Markdown
Now we can create the callbacks we need and train the model:
###Code
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
%tensorboard --logdir=./my_cifar10_logs --port=6006
###Output
_____no_output_____
###Markdown
###Code
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_model.h5")
model.evaluate(X_valid, y_valid)
###Output
157/157 [==============================] - 0s 1ms/step - loss: 1.5100 - accuracy: 0.1370
###Markdown
The model with the lowest validation loss gets about 47% accuracy on the validation set. It took 39 epochs to reach the lowest validation loss, with roughly 10 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization. c.*Exercise: Now try adding Batch Normalization and compare the learning curves: Is it converging faster than before? Does it produce a better model? How does it affect training speed?* The code below is very similar to the code above, with a few changes:* I added a BN layer after every Dense layer (before the activation function), except for the output layer. I also added a BN layer before the first hidden layer.* I changed the learning rate to 5e-4. I experimented with 1e-5, 3e-5, 5e-5, 1e-4, 3e-4, 5e-4, 1e-3 and 3e-3, and I chose the one with the best validation performance after 20 epochs.* I renamed the run directories to run_bn_* and the model file name to my_cifar10_bn_model.h5.
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
model.add(keras.layers.BatchNormalization())
for _ in range(20):
model.add(keras.layers.Dense(100, kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("elu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_bn_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_bn_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_bn_model.h5")
model.evaluate(X_valid, y_valid)
###Output
Epoch 1/100
2/1407 [..............................] - ETA: 1:06:34 - loss: 2.8693 - accuracy: 0.1094WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0189s vs `on_train_batch_end` time: 5.6669s). Check your callbacks.
1407/1407 [==============================] - 27s 19ms/step - loss: 1.8463 - accuracy: 0.3378 - val_loss: 1.6787 - val_accuracy: 0.3978
Epoch 2/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.6718 - accuracy: 0.4020 - val_loss: 1.5756 - val_accuracy: 0.4356
Epoch 3/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.5998 - accuracy: 0.4298 - val_loss: 1.5205 - val_accuracy: 0.4494
Epoch 4/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.5494 - accuracy: 0.4464 - val_loss: 1.5135 - val_accuracy: 0.4600
Epoch 5/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.5078 - accuracy: 0.4665 - val_loss: 1.4377 - val_accuracy: 0.4818
Epoch 6/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.4683 - accuracy: 0.4772 - val_loss: 1.4168 - val_accuracy: 0.5000
Epoch 7/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.4361 - accuracy: 0.4907 - val_loss: 1.4197 - val_accuracy: 0.5004
Epoch 8/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.4060 - accuracy: 0.5006 - val_loss: 1.3842 - val_accuracy: 0.5060
Epoch 9/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.3822 - accuracy: 0.5096 - val_loss: 1.3783 - val_accuracy: 0.5152
Epoch 10/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.3599 - accuracy: 0.5172 - val_loss: 1.3617 - val_accuracy: 0.5118
Epoch 11/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.3451 - accuracy: 0.5238 - val_loss: 1.3655 - val_accuracy: 0.5184
Epoch 12/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.3181 - accuracy: 0.5337 - val_loss: 1.3868 - val_accuracy: 0.5056
Epoch 13/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.3008 - accuracy: 0.5372 - val_loss: 1.3504 - val_accuracy: 0.5138
Epoch 14/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2810 - accuracy: 0.5503 - val_loss: 1.3586 - val_accuracy: 0.5250
Epoch 15/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2647 - accuracy: 0.5530 - val_loss: 1.3559 - val_accuracy: 0.5246
Epoch 16/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2484 - accuracy: 0.5587 - val_loss: 1.3334 - val_accuracy: 0.5284
Epoch 17/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2347 - accuracy: 0.5601 - val_loss: 1.3255 - val_accuracy: 0.5330
Epoch 18/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2170 - accuracy: 0.5686 - val_loss: 1.3625 - val_accuracy: 0.5312
Epoch 19/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.2028 - accuracy: 0.5732 - val_loss: 1.3211 - val_accuracy: 0.5368
Epoch 20/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1903 - accuracy: 0.5783 - val_loss: 1.3487 - val_accuracy: 0.5362
Epoch 21/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1761 - accuracy: 0.5855 - val_loss: 1.3371 - val_accuracy: 0.5300
Epoch 22/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1596 - accuracy: 0.5920 - val_loss: 1.3382 - val_accuracy: 0.5320
Epoch 23/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1522 - accuracy: 0.5922 - val_loss: 1.3482 - val_accuracy: 0.5322
Epoch 24/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1388 - accuracy: 0.5967 - val_loss: 1.3028 - val_accuracy: 0.5482
Epoch 25/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1240 - accuracy: 0.6026 - val_loss: 1.3433 - val_accuracy: 0.5346
Epoch 26/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.1148 - accuracy: 0.6069 - val_loss: 1.3590 - val_accuracy: 0.5320
Epoch 27/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0953 - accuracy: 0.6121 - val_loss: 1.3465 - val_accuracy: 0.5338
Epoch 28/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0924 - accuracy: 0.6148 - val_loss: 1.3575 - val_accuracy: 0.5266
Epoch 29/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0829 - accuracy: 0.6178 - val_loss: 1.3264 - val_accuracy: 0.5488
Epoch 30/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0700 - accuracy: 0.6233 - val_loss: 1.3321 - val_accuracy: 0.5392
Epoch 31/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0585 - accuracy: 0.6242 - val_loss: 1.3544 - val_accuracy: 0.5388
Epoch 32/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0475 - accuracy: 0.6286 - val_loss: 1.3242 - val_accuracy: 0.5492
Epoch 33/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0373 - accuracy: 0.6348 - val_loss: 1.3274 - val_accuracy: 0.5444
Epoch 34/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0276 - accuracy: 0.6365 - val_loss: 1.3468 - val_accuracy: 0.5468
Epoch 35/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0169 - accuracy: 0.6407 - val_loss: 1.3536 - val_accuracy: 0.5362
Epoch 36/100
1407/1407 [==============================] - 21s 15ms/step - loss: 1.0097 - accuracy: 0.6455 - val_loss: 1.3461 - val_accuracy: 0.5424
Epoch 37/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9957 - accuracy: 0.6470 - val_loss: 1.3534 - val_accuracy: 0.5436
Epoch 38/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9947 - accuracy: 0.6506 - val_loss: 1.3715 - val_accuracy: 0.5406
Epoch 39/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9797 - accuracy: 0.6552 - val_loss: 1.3525 - val_accuracy: 0.5466
Epoch 40/100
1407/1407 [==============================] - 20s 15ms/step - loss: 0.9712 - accuracy: 0.6580 - val_loss: 1.3764 - val_accuracy: 0.5410
Epoch 41/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9623 - accuracy: 0.6604 - val_loss: 1.3408 - val_accuracy: 0.5584
Epoch 42/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9557 - accuracy: 0.6632 - val_loss: 1.3715 - val_accuracy: 0.5418
Epoch 43/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9516 - accuracy: 0.6662 - val_loss: 1.3610 - val_accuracy: 0.5444
Epoch 44/100
1407/1407 [==============================] - 21s 15ms/step - loss: 0.9393 - accuracy: 0.6692 - val_loss: 1.3982 - val_accuracy: 0.5364
157/157 [==============================] - 0s 2ms/step - loss: 1.3028 - accuracy: 0.0908
###Markdown
* *Is the model converging faster than before?* Much faster! The previous model took 39 epochs to reach the lowest validation loss, while the new model with BN took 18 epochs. That's more than twice as fast as the previous model. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.* *Does BN produce a better model?* Yes! The final model is also much better, with 55% accuracy instead of 47%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).* *How does BN affect training speed?* Although the model converged twice as fast, each epoch took about 16s instead of 10s, because of the extra computations required by the BN layers. So overall, although the number of epochs was reduced by 50%, the training time (wall time) was shortened by 30%. Which is still pretty significant! d.*Exercise: Try replacing Batch Normalization with SELU, and make the necessary adjustements to ensure the network self-normalizes (i.e., standardize the input features, use LeCun normal initialization, make sure the DNN contains only a sequence of dense layers, etc.).*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=7e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_selu_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_selu_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
###Output
1/157 [..............................] - ETA: 0s - loss: 1.4529 - accuracy: 0.1562WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_test_batch_end` time: 0.0055s). Check your callbacks.
157/157 [==============================] - 0s 1ms/step - loss: 1.4808 - accuracy: 0.1124
###Markdown
We get 51.4% accuracy, which is better than the original model, but not quite as good as the model using batch normalization. Moreover, it took 13 epochs to reach the best model, which is much faster than both the original model and the BN model, plus each epoch took only 10 seconds, just like the original model. So it's by far the fastest model to train (both in terms of epochs and wall time). e.*Exercise: Try regularizing the model with alpha dropout. Then, without retraining your model, see if you can achieve better accuracy using MC Dropout.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_alpha_dropout_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_alpha_dropout_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_alpha_dropout_model.h5")
model.evaluate(X_valid_scaled, y_valid)
###Output
Epoch 1/100
2/1407 [..............................] - ETA: 33:41 - loss: 2.9857 - accuracy: 0.0938WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0083s vs `on_train_batch_end` time: 2.8708s). Check your callbacks.
1407/1407 [==============================] - 12s 8ms/step - loss: 1.8902 - accuracy: 0.3279 - val_loss: 1.7109 - val_accuracy: 0.3972
Epoch 2/100
1407/1407 [==============================] - 9s 6ms/step - loss: 1.6676 - accuracy: 0.4093 - val_loss: 1.7709 - val_accuracy: 0.3730
Epoch 3/100
1407/1407 [==============================] - 9s 6ms/step - loss: 1.5797 - accuracy: 0.4453 - val_loss: 1.5693 - val_accuracy: 0.4392
Epoch 4/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.5082 - accuracy: 0.4673 - val_loss: 1.5935 - val_accuracy: 0.4456
Epoch 5/100
1407/1407 [==============================] - 9s 6ms/step - loss: 1.4572 - accuracy: 0.4917 - val_loss: 1.5617 - val_accuracy: 0.4638
Epoch 6/100
1407/1407 [==============================] - 9s 6ms/step - loss: 1.4095 - accuracy: 0.5039 - val_loss: 1.5377 - val_accuracy: 0.4852
Epoch 7/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.3640 - accuracy: 0.5208 - val_loss: 1.5591 - val_accuracy: 0.4702
Epoch 8/100
1407/1407 [==============================] - 9s 6ms/step - loss: 1.3237 - accuracy: 0.5375 - val_loss: 1.4880 - val_accuracy: 0.4952
Epoch 9/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.2865 - accuracy: 0.5508 - val_loss: 1.4953 - val_accuracy: 0.4820
Epoch 10/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.2564 - accuracy: 0.5618 - val_loss: 1.5384 - val_accuracy: 0.4826
Epoch 11/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.2247 - accuracy: 0.5730 - val_loss: 1.5087 - val_accuracy: 0.4970
Epoch 12/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.1921 - accuracy: 0.5839 - val_loss: 1.5292 - val_accuracy: 0.5046
Epoch 13/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.1671 - accuracy: 0.5953 - val_loss: 1.5332 - val_accuracy: 0.5088
Epoch 14/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.1404 - accuracy: 0.6027 - val_loss: 1.5649 - val_accuracy: 0.5102
Epoch 15/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.1151 - accuracy: 0.6093 - val_loss: 1.6198 - val_accuracy: 0.5118
Epoch 16/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.0891 - accuracy: 0.6238 - val_loss: 1.7098 - val_accuracy: 0.5032
Epoch 17/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.0736 - accuracy: 0.6278 - val_loss: 1.6176 - val_accuracy: 0.5122
Epoch 18/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.0476 - accuracy: 0.6365 - val_loss: 1.5618 - val_accuracy: 0.5122
Epoch 19/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.0219 - accuracy: 0.6478 - val_loss: 1.6912 - val_accuracy: 0.5124
Epoch 20/100
1407/1407 [==============================] - 8s 6ms/step - loss: 1.0092 - accuracy: 0.6522 - val_loss: 1.6066 - val_accuracy: 0.5058
Epoch 21/100
1407/1407 [==============================] - 9s 6ms/step - loss: 0.9808 - accuracy: 0.6609 - val_loss: 1.7393 - val_accuracy: 0.5126
Epoch 22/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.9608 - accuracy: 0.6696 - val_loss: 1.7431 - val_accuracy: 0.5180
Epoch 23/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.9423 - accuracy: 0.6784 - val_loss: 1.6845 - val_accuracy: 0.5126
Epoch 24/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.9316 - accuracy: 0.6796 - val_loss: 1.8081 - val_accuracy: 0.5174
Epoch 25/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.9234 - accuracy: 0.6862 - val_loss: 1.7531 - val_accuracy: 0.4462
Epoch 26/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.9466 - accuracy: 0.6753 - val_loss: 1.7371 - val_accuracy: 0.5114
Epoch 27/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.8605 - accuracy: 0.7058 - val_loss: 1.7474 - val_accuracy: 0.5100
Epoch 28/100
1407/1407 [==============================] - 8s 6ms/step - loss: 0.8581 - accuracy: 0.7055 - val_loss: 1.7696 - val_accuracy: 0.5038
1/157 [..............................] - ETA: 0s - loss: 1.6199 - accuracy: 0.1250WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_test_batch_end` time: 0.0110s). Check your callbacks.
157/157 [==============================] - 0s 1ms/step - loss: 1.4880 - accuracy: 0.1132
###Markdown
The model reaches 50.8% accuracy on the validation set. That's very slightly worse than without dropout (51.4%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case. Let's use MC Dropout now. We will need the `MCAlphaDropout` class we used earlier, so let's just copy it here for convenience:
###Code
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
###Output
_____no_output_____
###Markdown
Now let's create a new model, identical to the one we just trained (with the same weights), but with `MCAlphaDropout` dropout layers instead of `AlphaDropout` layers:
###Code
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
###Output
_____no_output_____
###Markdown
Then let's add a couple utility functions. The first will run the model many times (10 by default) and it will return the mean predicted class probabilities. The second will use these mean probabilities to predict the most likely class for each instance:
###Code
def mc_dropout_predict_probas(mc_model, X, n_samples=10):
Y_probas = [mc_model.predict(X) for sample in range(n_samples)]
return np.mean(Y_probas, axis=0)
def mc_dropout_predict_classes(mc_model, X, n_samples=10):
Y_probas = mc_dropout_predict_probas(mc_model, X, n_samples)
return np.argmax(Y_probas, axis=1)
###Output
_____no_output_____
###Markdown
Now let's make predictions for all the instances in the validation set, and compute the accuracy:
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
y_pred = mc_dropout_predict_classes(mc_model, X_valid_scaled)
accuracy = np.mean(y_pred == y_valid[:, 0])
accuracy
###Output
_____no_output_____
###Markdown
We only get virtually no accuracy improvement in this case (from 50.8% to 50.9%).So the best model we got in this exercise is the Batch Normalization model. f.*Exercise: Retrain your model using 1cycle scheduling and see if it improves training speed and model accuracy.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(lr=1e-3)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 1.4])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(lr=1e-2)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
n_epochs = 15
onecycle = OneCycleScheduler(len(X_train_scaled) // batch_size * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
###Output
Epoch 1/15
352/352 [==============================] - 1s 4ms/step - loss: 2.0521 - accuracy: 0.2866 - val_loss: 1.7570 - val_accuracy: 0.3896
Epoch 2/15
352/352 [==============================] - 1s 3ms/step - loss: 1.7567 - accuracy: 0.3797 - val_loss: 1.6460 - val_accuracy: 0.4268
Epoch 3/15
352/352 [==============================] - 1s 3ms/step - loss: 1.6179 - accuracy: 0.4264 - val_loss: 1.6263 - val_accuracy: 0.4242
Epoch 4/15
352/352 [==============================] - 1s 3ms/step - loss: 1.5388 - accuracy: 0.4541 - val_loss: 1.5916 - val_accuracy: 0.4414
Epoch 5/15
352/352 [==============================] - 1s 3ms/step - loss: 1.4916 - accuracy: 0.4702 - val_loss: 1.5995 - val_accuracy: 0.4376
Epoch 6/15
352/352 [==============================] - 1s 3ms/step - loss: 1.4492 - accuracy: 0.4832 - val_loss: 1.5355 - val_accuracy: 0.4572
Epoch 7/15
352/352 [==============================] - 1s 3ms/step - loss: 1.4108 - accuracy: 0.4983 - val_loss: 1.6034 - val_accuracy: 0.4468
Epoch 8/15
352/352 [==============================] - 1s 3ms/step - loss: 1.3462 - accuracy: 0.5234 - val_loss: 1.5510 - val_accuracy: 0.4710
Epoch 9/15
352/352 [==============================] - 1s 3ms/step - loss: 1.2699 - accuracy: 0.5494 - val_loss: 1.5297 - val_accuracy: 0.4898
Epoch 10/15
352/352 [==============================] - 1s 3ms/step - loss: 1.2022 - accuracy: 0.5718 - val_loss: 1.5352 - val_accuracy: 0.4930
Epoch 11/15
352/352 [==============================] - 1s 3ms/step - loss: 1.1300 - accuracy: 0.5984 - val_loss: 1.4993 - val_accuracy: 0.5058
Epoch 12/15
352/352 [==============================] - 1s 3ms/step - loss: 1.0618 - accuracy: 0.6204 - val_loss: 1.4738 - val_accuracy: 0.5190
Epoch 13/15
352/352 [==============================] - 1s 3ms/step - loss: 0.9903 - accuracy: 0.6474 - val_loss: 1.5107 - val_accuracy: 0.5272
Epoch 14/15
352/352 [==============================] - 1s 3ms/step - loss: 0.9262 - accuracy: 0.6698 - val_loss: 1.5351 - val_accuracy: 0.5292
Epoch 15/15
352/352 [==============================] - 1s 3ms/step - loss: 0.8871 - accuracy: 0.6831 - val_loss: 1.5599 - val_accuracy: 0.5318
###Markdown
**Chapter 11 – Training Deep Neural Networks** _This notebook contains all the sample code and solutions to the exercises in chapter 11._ Run in Google Colab Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
###Code
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
%load_ext tensorboard
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
_____no_output_____
###Markdown
Vanishing/Exploding Gradients Problem
###Code
def logit(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
###Output
Saving figure sigmoid_saturation_plot
###Markdown
Xavier and He Initialization
###Code
[name for name in dir(keras.initializers) if not name.startswith("_")]
keras.layers.Dense(10, activation="relu", kernel_initializer="he_normal")
init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',
distribution='uniform')
keras.layers.Dense(10, activation="relu", kernel_initializer=init)
###Output
_____no_output_____
###Markdown
Nonsaturating Activation Functions Leaky ReLU
###Code
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
[m for m in dir(keras.activations) if not m.startswith("_")]
[m for m in dir(keras.layers) if "relu" in m.lower()]
###Output
_____no_output_____
###Markdown
Let's train a neural network on Fashion MNIST using the Leaky ReLU:
###Code
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 2s 1ms/step - loss: 1.6314 - accuracy: 0.5054 - val_loss: 0.8886 - val_accuracy: 0.7160
Epoch 2/10
1719/1719 [==============================] - 2s 892us/step - loss: 0.8416 - accuracy: 0.7247 - val_loss: 0.7130 - val_accuracy: 0.7656
Epoch 3/10
1719/1719 [==============================] - 2s 879us/step - loss: 0.7053 - accuracy: 0.7637 - val_loss: 0.6427 - val_accuracy: 0.7898
Epoch 4/10
1719/1719 [==============================] - 2s 883us/step - loss: 0.6325 - accuracy: 0.7908 - val_loss: 0.5900 - val_accuracy: 0.8066
Epoch 5/10
1719/1719 [==============================] - 2s 887us/step - loss: 0.5992 - accuracy: 0.8021 - val_loss: 0.5582 - val_accuracy: 0.8200
Epoch 6/10
1719/1719 [==============================] - 2s 881us/step - loss: 0.5624 - accuracy: 0.8142 - val_loss: 0.5350 - val_accuracy: 0.8238
Epoch 7/10
1719/1719 [==============================] - 2s 892us/step - loss: 0.5379 - accuracy: 0.8217 - val_loss: 0.5157 - val_accuracy: 0.8304
Epoch 8/10
1719/1719 [==============================] - 2s 895us/step - loss: 0.5152 - accuracy: 0.8295 - val_loss: 0.5078 - val_accuracy: 0.8284
Epoch 9/10
1719/1719 [==============================] - 2s 911us/step - loss: 0.5100 - accuracy: 0.8268 - val_loss: 0.4895 - val_accuracy: 0.8390
Epoch 10/10
1719/1719 [==============================] - 2s 897us/step - loss: 0.4918 - accuracy: 0.8340 - val_loss: 0.4817 - val_accuracy: 0.8396
###Markdown
Now let's try PReLU:
###Code
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 2s 1ms/step - loss: 1.6969 - accuracy: 0.4974 - val_loss: 0.9255 - val_accuracy: 0.7186
Epoch 2/10
1719/1719 [==============================] - 2s 990us/step - loss: 0.8706 - accuracy: 0.7247 - val_loss: 0.7305 - val_accuracy: 0.7630
Epoch 3/10
1719/1719 [==============================] - 2s 980us/step - loss: 0.7211 - accuracy: 0.7621 - val_loss: 0.6564 - val_accuracy: 0.7882
Epoch 4/10
1719/1719 [==============================] - 2s 985us/step - loss: 0.6447 - accuracy: 0.7879 - val_loss: 0.6003 - val_accuracy: 0.8048
Epoch 5/10
1719/1719 [==============================] - 2s 967us/step - loss: 0.6077 - accuracy: 0.8004 - val_loss: 0.5656 - val_accuracy: 0.8182
Epoch 6/10
1719/1719 [==============================] - 2s 984us/step - loss: 0.5692 - accuracy: 0.8118 - val_loss: 0.5406 - val_accuracy: 0.8236
Epoch 7/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5428 - accuracy: 0.8194 - val_loss: 0.5196 - val_accuracy: 0.8314
Epoch 8/10
1719/1719 [==============================] - 2s 983us/step - loss: 0.5193 - accuracy: 0.8284 - val_loss: 0.5113 - val_accuracy: 0.8316
Epoch 9/10
1719/1719 [==============================] - 2s 992us/step - loss: 0.5128 - accuracy: 0.8272 - val_loss: 0.4916 - val_accuracy: 0.8378
Epoch 10/10
1719/1719 [==============================] - 2s 988us/step - loss: 0.4941 - accuracy: 0.8314 - val_loss: 0.4826 - val_accuracy: 0.8398
###Markdown
ELU
###Code
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
###Output
Saving figure elu_plot
###Markdown
Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
###Code
keras.layers.Dense(10, activation="elu")
###Output
_____no_output_____
###Markdown
SELU This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ1 or ℓ2 regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
###Code
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title("SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
###Output
Saving figure selu_plot
###Markdown
By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
###Code
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
###Output
Layer 0: mean -0.00, std deviation 1.00
Layer 100: mean 0.02, std deviation 0.96
Layer 200: mean 0.01, std deviation 0.90
Layer 300: mean -0.02, std deviation 0.92
Layer 400: mean 0.05, std deviation 0.89
Layer 500: mean 0.01, std deviation 0.93
Layer 600: mean 0.02, std deviation 0.92
Layer 700: mean -0.02, std deviation 0.90
Layer 800: mean 0.05, std deviation 0.83
Layer 900: mean 0.02, std deviation 1.00
###Markdown
Using SELU is easy:
###Code
keras.layers.Dense(10, activation="selu",
kernel_initializer="lecun_normal")
###Output
_____no_output_____
###Markdown
Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:
###Code
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="selu",
kernel_initializer="lecun_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
###Code
pixel_means = X_train.mean(axis=0, keepdims=True)
pixel_stds = X_train.std(axis=0, keepdims=True)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/5
1719/1719 [==============================] - 12s 6ms/step - loss: 1.3556 - accuracy: 0.4808 - val_loss: 0.7711 - val_accuracy: 0.6858
Epoch 2/5
1719/1719 [==============================] - 9s 5ms/step - loss: 0.7537 - accuracy: 0.7235 - val_loss: 0.7534 - val_accuracy: 0.7384
Epoch 3/5
1719/1719 [==============================] - 9s 5ms/step - loss: 0.7451 - accuracy: 0.7357 - val_loss: 0.5943 - val_accuracy: 0.7834
Epoch 4/5
1719/1719 [==============================] - 9s 5ms/step - loss: 0.5699 - accuracy: 0.7906 - val_loss: 0.5434 - val_accuracy: 0.8066
Epoch 5/5
1719/1719 [==============================] - 9s 5ms/step - loss: 0.5569 - accuracy: 0.8051 - val_loss: 0.4907 - val_accuracy: 0.8218
###Markdown
Now look at what happens if we try to use the ReLU activation function instead:
###Code
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/5
1719/1719 [==============================] - 11s 5ms/step - loss: 2.0460 - accuracy: 0.1919 - val_loss: 1.5971 - val_accuracy: 0.3048
Epoch 2/5
1719/1719 [==============================] - 8s 5ms/step - loss: 1.2654 - accuracy: 0.4591 - val_loss: 0.9156 - val_accuracy: 0.6372
Epoch 3/5
1719/1719 [==============================] - 8s 5ms/step - loss: 0.9312 - accuracy: 0.6169 - val_loss: 0.8928 - val_accuracy: 0.6246
Epoch 4/5
1719/1719 [==============================] - 8s 5ms/step - loss: 0.8188 - accuracy: 0.6710 - val_loss: 0.6914 - val_accuracy: 0.7396
Epoch 5/5
1719/1719 [==============================] - 8s 5ms/step - loss: 0.7288 - accuracy: 0.7152 - val_loss: 0.6638 - val_accuracy: 0.7380
###Markdown
Not great at all, we suffered from the vanishing/exploding gradients problem. Batch Normalization
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(100, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
bn1 = model.layers[1]
[(var.name, var.trainable) for var in bn1.variables]
#bn1.updates #deprecated
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 3s 1ms/step - loss: 1.2287 - accuracy: 0.5993 - val_loss: 0.5526 - val_accuracy: 0.8230
Epoch 2/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5996 - accuracy: 0.7959 - val_loss: 0.4725 - val_accuracy: 0.8468
Epoch 3/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5312 - accuracy: 0.8168 - val_loss: 0.4375 - val_accuracy: 0.8558
Epoch 4/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4884 - accuracy: 0.8294 - val_loss: 0.4153 - val_accuracy: 0.8596
Epoch 5/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4717 - accuracy: 0.8343 - val_loss: 0.3997 - val_accuracy: 0.8640
Epoch 6/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4420 - accuracy: 0.8461 - val_loss: 0.3867 - val_accuracy: 0.8694
Epoch 7/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4285 - accuracy: 0.8496 - val_loss: 0.3763 - val_accuracy: 0.8710
Epoch 8/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4086 - accuracy: 0.8552 - val_loss: 0.3711 - val_accuracy: 0.8740
Epoch 9/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4079 - accuracy: 0.8566 - val_loss: 0.3631 - val_accuracy: 0.8752
Epoch 10/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3903 - accuracy: 0.8617 - val_loss: 0.3573 - val_accuracy: 0.8750
###Markdown
Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(100, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
###Output
Epoch 1/10
1719/1719 [==============================] - 3s 1ms/step - loss: 1.3677 - accuracy: 0.5604 - val_loss: 0.6767 - val_accuracy: 0.7812
Epoch 2/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.7136 - accuracy: 0.7702 - val_loss: 0.5566 - val_accuracy: 0.8184
Epoch 3/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.6123 - accuracy: 0.7990 - val_loss: 0.5007 - val_accuracy: 0.8360
Epoch 4/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5547 - accuracy: 0.8148 - val_loss: 0.4666 - val_accuracy: 0.8448
Epoch 5/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5255 - accuracy: 0.8230 - val_loss: 0.4434 - val_accuracy: 0.8534
Epoch 6/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4947 - accuracy: 0.8328 - val_loss: 0.4263 - val_accuracy: 0.8550
Epoch 7/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4736 - accuracy: 0.8385 - val_loss: 0.4130 - val_accuracy: 0.8566
Epoch 8/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4550 - accuracy: 0.8446 - val_loss: 0.4035 - val_accuracy: 0.8612
Epoch 9/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4495 - accuracy: 0.8440 - val_loss: 0.3943 - val_accuracy: 0.8638
Epoch 10/10
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4333 - accuracy: 0.8494 - val_loss: 0.3875 - val_accuracy: 0.8660
###Markdown
Gradient Clipping All Keras optimizers accept `clipnorm` or `clipvalue` arguments:
###Code
optimizer = keras.optimizers.SGD(clipvalue=1.0)
optimizer = keras.optimizers.SGD(clipnorm=1.0)
###Output
_____no_output_____
###Markdown
Reusing Pretrained Layers Reusing a Keras model Let's split the fashion MNIST training set in two:* `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).* `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.The validation set and the test set are also split this way, but without restricting the number of images.We will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).
###Code
def split_dataset(X, y):
y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts
y_A = y[~y_5_or_6]
y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7
y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?
return ((X[~y_5_or_6], y_A),
(X[y_5_or_6], y_B))
(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)
(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)
(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)
X_train_B = X_train_B[:200]
y_train_B = y_train_B[:200]
X_train_A.shape
X_train_B.shape
y_train_A[:30]
y_train_B[:30]
tf.random.set_seed(42)
np.random.seed(42)
model_A = keras.models.Sequential()
model_A.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_A.add(keras.layers.Dense(n_hidden, activation="selu"))
model_A.add(keras.layers.Dense(8, activation="softmax"))
model_A.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_A.fit(X_train_A, y_train_A, epochs=20,
validation_data=(X_valid_A, y_valid_A))
model_A.save("my_model_A.h5")
model_B = keras.models.Sequential()
model_B.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_B.add(keras.layers.Dense(n_hidden, activation="selu"))
model_B.add(keras.layers.Dense(1, activation="sigmoid"))
model_B.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B.fit(X_train_B, y_train_B, epochs=20,
validation_data=(X_valid_B, y_valid_B))
model_B.summary()
model_A = keras.models.load_model("my_model_A.h5")
model_B_on_A = keras.models.Sequential(model_A.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
model_A_clone = keras.models.clone_model(model_A)
model_A_clone.set_weights(model_A.get_weights())
for layer in model_B_on_A.layers[:-1]:
layer.trainable = False
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,
validation_data=(X_valid_B, y_valid_B))
for layer in model_B_on_A.layers[:-1]:
layer.trainable = True
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,
validation_data=(X_valid_B, y_valid_B))
###Output
Epoch 1/4
7/7 [==============================] - 1s 83ms/step - loss: 0.6155 - accuracy: 0.6184 - val_loss: 0.5843 - val_accuracy: 0.6329
Epoch 2/4
7/7 [==============================] - 0s 9ms/step - loss: 0.5550 - accuracy: 0.6638 - val_loss: 0.5467 - val_accuracy: 0.6805
Epoch 3/4
7/7 [==============================] - 0s 8ms/step - loss: 0.4897 - accuracy: 0.7482 - val_loss: 0.5146 - val_accuracy: 0.7089
Epoch 4/4
7/7 [==============================] - 0s 8ms/step - loss: 0.4899 - accuracy: 0.7405 - val_loss: 0.4859 - val_accuracy: 0.7323
Epoch 1/16
7/7 [==============================] - 0s 28ms/step - loss: 0.4380 - accuracy: 0.7774 - val_loss: 0.3460 - val_accuracy: 0.8661
Epoch 2/16
7/7 [==============================] - 0s 9ms/step - loss: 0.2971 - accuracy: 0.9143 - val_loss: 0.2603 - val_accuracy: 0.9310
Epoch 3/16
7/7 [==============================] - 0s 9ms/step - loss: 0.2034 - accuracy: 0.9777 - val_loss: 0.2110 - val_accuracy: 0.9554
Epoch 4/16
7/7 [==============================] - 0s 9ms/step - loss: 0.1754 - accuracy: 0.9719 - val_loss: 0.1790 - val_accuracy: 0.9696
Epoch 5/16
7/7 [==============================] - 0s 9ms/step - loss: 0.1348 - accuracy: 0.9809 - val_loss: 0.1561 - val_accuracy: 0.9757
Epoch 6/16
7/7 [==============================] - 0s 9ms/step - loss: 0.1172 - accuracy: 0.9973 - val_loss: 0.1392 - val_accuracy: 0.9797
Epoch 7/16
7/7 [==============================] - 0s 9ms/step - loss: 0.1137 - accuracy: 0.9931 - val_loss: 0.1266 - val_accuracy: 0.9838
Epoch 8/16
7/7 [==============================] - 0s 9ms/step - loss: 0.1000 - accuracy: 0.9931 - val_loss: 0.1163 - val_accuracy: 0.9858
Epoch 9/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0834 - accuracy: 1.0000 - val_loss: 0.1065 - val_accuracy: 0.9888
Epoch 10/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0775 - accuracy: 1.0000 - val_loss: 0.0999 - val_accuracy: 0.9899
Epoch 11/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0689 - accuracy: 1.0000 - val_loss: 0.0939 - val_accuracy: 0.9899
Epoch 12/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0719 - accuracy: 1.0000 - val_loss: 0.0888 - val_accuracy: 0.9899
Epoch 13/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0565 - accuracy: 1.0000 - val_loss: 0.0839 - val_accuracy: 0.9899
Epoch 14/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0494 - accuracy: 1.0000 - val_loss: 0.0802 - val_accuracy: 0.9899
Epoch 15/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0544 - accuracy: 1.0000 - val_loss: 0.0768 - val_accuracy: 0.9899
Epoch 16/16
7/7 [==============================] - 0s 9ms/step - loss: 0.0472 - accuracy: 1.0000 - val_loss: 0.0738 - val_accuracy: 0.9899
###Markdown
So, what's the final verdict?
###Code
model_B.evaluate(X_test_B, y_test_B)
model_B_on_A.evaluate(X_test_B, y_test_B)
###Output
63/63 [==============================] - 0s 705us/step - loss: 0.0682 - accuracy: 0.9935
###Markdown
Great! We got quite a bit of transfer: the error rate dropped by a factor of 4.5!
###Code
(100 - 97.05) / (100 - 99.35)
###Output
_____no_output_____
###Markdown
Faster Optimizers Momentum optimization
###Code
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9)
###Output
_____no_output_____
###Markdown
Nesterov Accelerated Gradient
###Code
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
###Output
_____no_output_____
###Markdown
AdaGrad
###Code
optimizer = keras.optimizers.Adagrad(lr=0.001)
###Output
_____no_output_____
###Markdown
RMSProp
###Code
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9)
###Output
_____no_output_____
###Markdown
Adam Optimization
###Code
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Adamax Optimization
###Code
optimizer = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Nadam Optimization
###Code
optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
###Output
_____no_output_____
###Markdown
Learning Rate Scheduling Power Scheduling ```lr = lr0 / (1 + steps / s)**c```* Keras uses `c=1` and `s = 1 / decay`
###Code
optimizer = keras.optimizers.SGD(lr=0.01, decay=1e-4)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
import math
learning_rate = 0.01
decay = 1e-4
batch_size = 32
n_steps_per_epoch = math.ceil(len(X_train) / batch_size)
epochs = np.arange(n_epochs)
lrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)
plt.plot(epochs, lrs, "o-")
plt.axis([0, n_epochs - 1, 0, 0.01])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Power Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Exponential Scheduling ```lr = lr0 * 0.1**(epoch / s)```
###Code
def exponential_decay_fn(epoch):
return 0.01 * 0.1**(epoch / 20)
def exponential_decay(lr0, s):
def exponential_decay_fn(epoch):
return lr0 * 0.1**(epoch / s)
return exponential_decay_fn
exponential_decay_fn = exponential_decay(lr0=0.01, s=20)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
The schedule function can take the current learning rate as a second argument:
###Code
def exponential_decay_fn(epoch, lr):
return lr * 0.1**(1 / 20)
###Output
_____no_output_____
###Markdown
If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:
###Code
K = keras.backend
class ExponentialDecay(keras.callbacks.Callback):
def __init__(self, s=40000):
super().__init__()
self.s = s
def on_batch_begin(self, batch, logs=None):
# Note: the `batch` argument is reset at each epoch
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr * 0.1**(1 / s))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
lr0 = 0.01
optimizer = keras.optimizers.Nadam(lr=lr0)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
exp_decay = ExponentialDecay(s)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[exp_decay])
n_steps = n_epochs * len(X_train) // 32
steps = np.arange(n_steps)
lrs = lr0 * 0.1**(steps / s)
plt.plot(steps, lrs, "-", linewidth=2)
plt.axis([0, n_steps - 1, 0, lr0 * 1.1])
plt.xlabel("Batch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling (per batch)", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Piecewise Constant Scheduling
###Code
def piecewise_constant_fn(epoch):
if epoch < 5:
return 0.01
elif epoch < 15:
return 0.005
else:
return 0.001
def piecewise_constant(boundaries, values):
boundaries = np.array([0] + boundaries)
values = np.array(values)
def piecewise_constant_fn(epoch):
return values[np.argmax(boundaries > epoch) - 1]
return piecewise_constant_fn
piecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])
lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Piecewise Constant Scheduling", fontsize=14)
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Performance Scheduling
###Code
tf.random.set_seed(42)
np.random.seed(42)
lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.02, momentum=0.9)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "bo-")
plt.xlabel("Epoch")
plt.ylabel("Learning Rate", color='b')
plt.tick_params('y', colors='b')
plt.gca().set_xlim(0, n_epochs - 1)
plt.grid(True)
ax2 = plt.gca().twinx()
ax2.plot(history.epoch, history.history["val_loss"], "r^-")
ax2.set_ylabel('Validation Loss', color='r')
ax2.tick_params('y', colors='r')
plt.title("Reduce LR on Plateau", fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
tf.keras schedulers
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.5995 - accuracy: 0.7923 - val_loss: 0.4095 - val_accuracy: 0.8606
Epoch 2/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3890 - accuracy: 0.8613 - val_loss: 0.3738 - val_accuracy: 0.8692
Epoch 3/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3530 - accuracy: 0.8772 - val_loss: 0.3735 - val_accuracy: 0.8692
Epoch 4/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3296 - accuracy: 0.8813 - val_loss: 0.3494 - val_accuracy: 0.8798
Epoch 5/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.3178 - accuracy: 0.8867 - val_loss: 0.3430 - val_accuracy: 0.8794
Epoch 6/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2930 - accuracy: 0.8951 - val_loss: 0.3414 - val_accuracy: 0.8826
Epoch 7/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2854 - accuracy: 0.8985 - val_loss: 0.3354 - val_accuracy: 0.8810
Epoch 8/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2714 - accuracy: 0.9039 - val_loss: 0.3364 - val_accuracy: 0.8824
Epoch 9/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2714 - accuracy: 0.9047 - val_loss: 0.3265 - val_accuracy: 0.8846
Epoch 10/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2570 - accuracy: 0.9084 - val_loss: 0.3238 - val_accuracy: 0.8854
Epoch 11/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2502 - accuracy: 0.9117 - val_loss: 0.3250 - val_accuracy: 0.8862
Epoch 12/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2453 - accuracy: 0.9145 - val_loss: 0.3299 - val_accuracy: 0.8830
Epoch 13/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2408 - accuracy: 0.9154 - val_loss: 0.3219 - val_accuracy: 0.8870
Epoch 14/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2380 - accuracy: 0.9154 - val_loss: 0.3221 - val_accuracy: 0.8860
Epoch 15/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2378 - accuracy: 0.9166 - val_loss: 0.3208 - val_accuracy: 0.8864
Epoch 16/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2318 - accuracy: 0.9191 - val_loss: 0.3184 - val_accuracy: 0.8892
Epoch 17/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2266 - accuracy: 0.9212 - val_loss: 0.3197 - val_accuracy: 0.8906
Epoch 18/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2284 - accuracy: 0.9185 - val_loss: 0.3169 - val_accuracy: 0.8906
Epoch 19/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2286 - accuracy: 0.9205 - val_loss: 0.3197 - val_accuracy: 0.8884
Epoch 20/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2288 - accuracy: 0.9211 - val_loss: 0.3169 - val_accuracy: 0.8906
Epoch 21/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2265 - accuracy: 0.9212 - val_loss: 0.3179 - val_accuracy: 0.8904
Epoch 22/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2258 - accuracy: 0.9205 - val_loss: 0.3163 - val_accuracy: 0.8914
Epoch 23/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2224 - accuracy: 0.9226 - val_loss: 0.3170 - val_accuracy: 0.8904
Epoch 24/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2182 - accuracy: 0.9244 - val_loss: 0.3165 - val_accuracy: 0.8898
Epoch 25/25
1719/1719 [==============================] - 2s 1ms/step - loss: 0.2224 - accuracy: 0.9229 - val_loss: 0.3164 - val_accuracy: 0.8904
###Markdown
For piecewise constant scheduling, try this:
###Code
learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],
values=[0.01, 0.005, 0.001])
###Output
_____no_output_____
###Markdown
1Cycle scheduling
###Code
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
def find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):
init_weights = model.get_weights()
iterations = math.ceil(len(X) / batch_size) * epochs
factor = np.exp(np.log(max_rate / min_rate) / iterations)
init_lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, min_rate)
exp_lr = ExponentialLearningRate(factor)
history = model.fit(X, y, epochs=epochs, batch_size=batch_size,
callbacks=[exp_lr])
K.set_value(model.optimizer.lr, init_lr)
model.set_weights(init_weights)
return exp_lr.rates, exp_lr.losses
def plot_lr_vs_loss(rates, losses):
plt.plot(rates, losses)
plt.gca().set_xscale('log')
plt.hlines(min(losses), min(rates), max(rates))
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.lr, rate)
n_epochs = 25
onecycle = OneCycleScheduler(math.ceil(len(X_train) / batch_size) * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
###Output
Epoch 1/25
430/430 [==============================] - 1s 2ms/step - loss: 0.6572 - accuracy: 0.7740 - val_loss: 0.4872 - val_accuracy: 0.8338
Epoch 2/25
430/430 [==============================] - 1s 2ms/step - loss: 0.4580 - accuracy: 0.8397 - val_loss: 0.4274 - val_accuracy: 0.8520
Epoch 3/25
430/430 [==============================] - 1s 2ms/step - loss: 0.4121 - accuracy: 0.8545 - val_loss: 0.4116 - val_accuracy: 0.8588
Epoch 4/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3837 - accuracy: 0.8642 - val_loss: 0.3868 - val_accuracy: 0.8688
Epoch 5/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3639 - accuracy: 0.8719 - val_loss: 0.3766 - val_accuracy: 0.8688
Epoch 6/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3456 - accuracy: 0.8775 - val_loss: 0.3739 - val_accuracy: 0.8706
Epoch 7/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3330 - accuracy: 0.8811 - val_loss: 0.3635 - val_accuracy: 0.8708
Epoch 8/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3184 - accuracy: 0.8861 - val_loss: 0.3959 - val_accuracy: 0.8610
Epoch 9/25
430/430 [==============================] - 1s 2ms/step - loss: 0.3065 - accuracy: 0.8890 - val_loss: 0.3475 - val_accuracy: 0.8770
Epoch 10/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2943 - accuracy: 0.8927 - val_loss: 0.3392 - val_accuracy: 0.8806
Epoch 11/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2838 - accuracy: 0.8963 - val_loss: 0.3467 - val_accuracy: 0.8800
Epoch 12/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2707 - accuracy: 0.9024 - val_loss: 0.3646 - val_accuracy: 0.8696
Epoch 13/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2536 - accuracy: 0.9079 - val_loss: 0.3350 - val_accuracy: 0.8842
Epoch 14/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2405 - accuracy: 0.9135 - val_loss: 0.3465 - val_accuracy: 0.8794
Epoch 15/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2279 - accuracy: 0.9185 - val_loss: 0.3257 - val_accuracy: 0.8830
Epoch 16/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2159 - accuracy: 0.9232 - val_loss: 0.3294 - val_accuracy: 0.8824
Epoch 17/25
430/430 [==============================] - 1s 2ms/step - loss: 0.2062 - accuracy: 0.9263 - val_loss: 0.3333 - val_accuracy: 0.8882
Epoch 18/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1978 - accuracy: 0.9301 - val_loss: 0.3235 - val_accuracy: 0.8898
Epoch 19/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1892 - accuracy: 0.9337 - val_loss: 0.3233 - val_accuracy: 0.8906
Epoch 20/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1821 - accuracy: 0.9365 - val_loss: 0.3224 - val_accuracy: 0.8928
Epoch 21/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1752 - accuracy: 0.9400 - val_loss: 0.3220 - val_accuracy: 0.8908
Epoch 22/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1700 - accuracy: 0.9416 - val_loss: 0.3180 - val_accuracy: 0.8962
Epoch 23/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1655 - accuracy: 0.9438 - val_loss: 0.3187 - val_accuracy: 0.8940
Epoch 24/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1627 - accuracy: 0.9454 - val_loss: 0.3177 - val_accuracy: 0.8932
Epoch 25/25
430/430 [==============================] - 1s 2ms/step - loss: 0.1610 - accuracy: 0.9462 - val_loss: 0.3170 - val_accuracy: 0.8934
###Markdown
Avoiding Overfitting Through Regularization $\ell_1$ and $\ell_2$ regularization
###Code
layer = keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
# or l1(0.1) for ℓ1 regularization with a factor or 0.1
# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(10, activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.01))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
from functools import partial
RegularizedDense = partial(keras.layers.Dense,
activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
RegularizedDense(300),
RegularizedDense(100),
RegularizedDense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1719/1719 [==============================] - 6s 3ms/step - loss: 3.2911 - accuracy: 0.7924 - val_loss: 0.7218 - val_accuracy: 0.8310
Epoch 2/2
1719/1719 [==============================] - 5s 3ms/step - loss: 0.7282 - accuracy: 0.8245 - val_loss: 0.6826 - val_accuracy: 0.8382
###Markdown
Dropout
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(300, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1719/1719 [==============================] - 6s 3ms/step - loss: 0.7611 - accuracy: 0.7576 - val_loss: 0.3730 - val_accuracy: 0.8644
Epoch 2/2
1719/1719 [==============================] - 5s 3ms/step - loss: 0.4306 - accuracy: 0.8401 - val_loss: 0.3395 - val_accuracy: 0.8722
###Markdown
Alpha Dropout
###Code
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 20
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.evaluate(X_train_scaled, y_train)
history = model.fit(X_train_scaled, y_train)
###Output
1719/1719 [==============================] - 2s 1ms/step - loss: 0.4225 - accuracy: 0.8432
###Markdown
MC Dropout
###Code
tf.random.set_seed(42)
np.random.seed(42)
y_probas = np.stack([model(X_test_scaled, training=True)
for sample in range(100)])
y_proba = y_probas.mean(axis=0)
y_std = y_probas.std(axis=0)
np.round(model.predict(X_test_scaled[:1]), 2)
np.round(y_probas[:, :1], 2)
np.round(y_proba[:1], 2)
y_std = y_probas.std(axis=0)
np.round(y_std[:1], 2)
y_pred = np.argmax(y_proba, axis=1)
accuracy = np.sum(y_pred == y_test) / len(y_test)
accuracy
class MCDropout(keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
tf.random.set_seed(42)
np.random.seed(42)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
mc_model.summary()
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
mc_model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
mc_model.set_weights(model.get_weights())
###Output
_____no_output_____
###Markdown
Now we can use the model with MC Dropout:
###Code
np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)
###Output
_____no_output_____
###Markdown
Max norm
###Code
layer = keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
MaxNormDense = partial(keras.layers.Dense,
activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
MaxNormDense(300),
MaxNormDense(100),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
###Output
Epoch 1/2
1719/1719 [==============================] - 5s 3ms/step - loss: 0.5763 - accuracy: 0.8020 - val_loss: 0.3674 - val_accuracy: 0.8674
Epoch 2/2
1719/1719 [==============================] - 5s 3ms/step - loss: 0.3545 - accuracy: 0.8709 - val_loss: 0.3714 - val_accuracy: 0.8662
###Markdown
Exercises 1. to 7. See appendix A. 8. Deep Learning on CIFAR10 a.*Exercise: Build a DNN with 20 hidden layers of 100 neurons each (that's too many, but it's the point of this exercise). Use He initialization and the ELU activation function.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
activation="elu",
kernel_initializer="he_normal"))
###Output
_____no_output_____
###Markdown
b.*Exercise: Using Nadam optimization and early stopping, train the network on the CIFAR10 dataset. You can load it with `keras.datasets.cifar10.load_data()`. The dataset is composed of 60,000 32 × 32–pixel color images (50,000 for training, 10,000 for testing) with 10 classes, so you'll need a softmax output layer with 10 neurons. Remember to search for the right learning rate each time you change the model's architecture or hyperparameters.* Let's add the output layer to the model:
###Code
model.add(keras.layers.Dense(10, activation="softmax"))
###Output
_____no_output_____
###Markdown
Let's use a Nadam optimizer with a learning rate of 5e-5. I tried learning rates 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3 and 1e-2, and I compared their learning curves for 10 epochs each (using the TensorBoard callback, below). The learning rates 3e-5 and 1e-4 were pretty good, so I tried 5e-5, which turned out to be slightly better.
###Code
optimizer = keras.optimizers.Nadam(lr=5e-5)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Let's load the CIFAR10 dataset. We also want to use early stopping, so we need a validation set. Let's use the first 5,000 images of the original training set as the validation set:
###Code
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()
X_train = X_train_full[5000:]
y_train = y_train_full[5000:]
X_valid = X_train_full[:5000]
y_valid = y_train_full[:5000]
###Output
_____no_output_____
###Markdown
Now we can create the callbacks we need and train the model:
###Code
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
%tensorboard --logdir=./my_cifar10_logs --port=6006
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_model.h5")
model.evaluate(X_valid, y_valid)
###Output
157/157 [==============================] - 0s 1ms/step - loss: 1.4960 - accuracy: 0.4762
###Markdown
The model with the lowest validation loss gets about 47.6% accuracy on the validation set. It took 27 epochs to reach the lowest validation loss, with roughly 8 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization. c.*Exercise: Now try adding Batch Normalization and compare the learning curves: Is it converging faster than before? Does it produce a better model? How does it affect training speed?* The code below is very similar to the code above, with a few changes:* I added a BN layer after every Dense layer (before the activation function), except for the output layer. I also added a BN layer before the first hidden layer.* I changed the learning rate to 5e-4. I experimented with 1e-5, 3e-5, 5e-5, 1e-4, 3e-4, 5e-4, 1e-3 and 3e-3, and I chose the one with the best validation performance after 20 epochs.* I renamed the run directories to run_bn_* and the model file name to my_cifar10_bn_model.h5.
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
model.add(keras.layers.BatchNormalization())
for _ in range(20):
model.add(keras.layers.Dense(100, kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("elu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_bn_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_bn_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_bn_model.h5")
model.evaluate(X_valid, y_valid)
###Output
Epoch 1/100
1407/1407 [==============================] - 19s 9ms/step - loss: 1.9765 - accuracy: 0.2968 - val_loss: 1.6602 - val_accuracy: 0.4042
Epoch 2/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.6787 - accuracy: 0.4056 - val_loss: 1.5887 - val_accuracy: 0.4304
Epoch 3/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.6097 - accuracy: 0.4274 - val_loss: 1.5781 - val_accuracy: 0.4326
Epoch 4/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.5574 - accuracy: 0.4486 - val_loss: 1.5064 - val_accuracy: 0.4676
Epoch 5/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.5075 - accuracy: 0.4642 - val_loss: 1.4412 - val_accuracy: 0.4844
Epoch 6/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.4664 - accuracy: 0.4787 - val_loss: 1.4179 - val_accuracy: 0.4984
Epoch 7/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.4334 - accuracy: 0.4932 - val_loss: 1.4277 - val_accuracy: 0.4906
Epoch 8/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.4054 - accuracy: 0.5038 - val_loss: 1.3843 - val_accuracy: 0.5130
Epoch 9/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.3816 - accuracy: 0.5106 - val_loss: 1.3691 - val_accuracy: 0.5108
Epoch 10/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.3547 - accuracy: 0.5206 - val_loss: 1.3552 - val_accuracy: 0.5226
Epoch 11/100
1407/1407 [==============================] - 12s 9ms/step - loss: 1.3244 - accuracy: 0.5371 - val_loss: 1.3678 - val_accuracy: 0.5142
Epoch 12/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.3078 - accuracy: 0.5393 - val_loss: 1.3844 - val_accuracy: 0.5080
Epoch 13/100
1407/1407 [==============================] - 12s 9ms/step - loss: 1.2889 - accuracy: 0.5431 - val_loss: 1.3566 - val_accuracy: 0.5164
Epoch 14/100
1407/1407 [==============================] - 12s 9ms/step - loss: 1.2607 - accuracy: 0.5559 - val_loss: 1.3626 - val_accuracy: 0.5248
Epoch 15/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.2580 - accuracy: 0.5587 - val_loss: 1.3616 - val_accuracy: 0.5276
Epoch 16/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.2441 - accuracy: 0.5586 - val_loss: 1.3350 - val_accuracy: 0.5286
Epoch 17/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.2241 - accuracy: 0.5676 - val_loss: 1.3370 - val_accuracy: 0.5408
Epoch 18/100
<<29 more lines>>
Epoch 33/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.0336 - accuracy: 0.6369 - val_loss: 1.3682 - val_accuracy: 0.5450
Epoch 34/100
1407/1407 [==============================] - 11s 8ms/step - loss: 1.0228 - accuracy: 0.6388 - val_loss: 1.3348 - val_accuracy: 0.5458
Epoch 35/100
1407/1407 [==============================] - 12s 8ms/step - loss: 1.0205 - accuracy: 0.6407 - val_loss: 1.3490 - val_accuracy: 0.5440
Epoch 36/100
1407/1407 [==============================] - 12s 9ms/step - loss: 1.0008 - accuracy: 0.6489 - val_loss: 1.3568 - val_accuracy: 0.5408
Epoch 37/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9785 - accuracy: 0.6543 - val_loss: 1.3628 - val_accuracy: 0.5396
Epoch 38/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9832 - accuracy: 0.6592 - val_loss: 1.3617 - val_accuracy: 0.5482
Epoch 39/100
1407/1407 [==============================] - 12s 8ms/step - loss: 0.9707 - accuracy: 0.6581 - val_loss: 1.3767 - val_accuracy: 0.5446
Epoch 40/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9590 - accuracy: 0.6651 - val_loss: 1.4200 - val_accuracy: 0.5314
Epoch 41/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9548 - accuracy: 0.6668 - val_loss: 1.3692 - val_accuracy: 0.5450
Epoch 42/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9480 - accuracy: 0.6667 - val_loss: 1.3841 - val_accuracy: 0.5310
Epoch 43/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9411 - accuracy: 0.6716 - val_loss: 1.4036 - val_accuracy: 0.5382
Epoch 44/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9383 - accuracy: 0.6708 - val_loss: 1.4114 - val_accuracy: 0.5236
Epoch 45/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9258 - accuracy: 0.6769 - val_loss: 1.4224 - val_accuracy: 0.5324
Epoch 46/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.9072 - accuracy: 0.6836 - val_loss: 1.3875 - val_accuracy: 0.5442
Epoch 47/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.8996 - accuracy: 0.6850 - val_loss: 1.4449 - val_accuracy: 0.5280
Epoch 48/100
1407/1407 [==============================] - 13s 9ms/step - loss: 0.9050 - accuracy: 0.6835 - val_loss: 1.4167 - val_accuracy: 0.5338
Epoch 49/100
1407/1407 [==============================] - 12s 9ms/step - loss: 0.8934 - accuracy: 0.6880 - val_loss: 1.4260 - val_accuracy: 0.5294
157/157 [==============================] - 1s 2ms/step - loss: 1.3344 - accuracy: 0.5398
###Markdown
* *Is the model converging faster than before?* Much faster! The previous model took 27 epochs to reach the lowest validation loss, while the new model achieved that same loss in just 5 epochs and continued to make progress until the 16th epoch. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.* *Does BN produce a better model?* Yes! The final model is also much better, with 54.0% accuracy instead of 47.6%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).* *How does BN affect training speed?* Although the model converged much faster, each epoch took about 12s instead of 8s, because of the extra computations required by the BN layers. But overall the training time (wall time) was shortened significantly! d.*Exercise: Try replacing Batch Normalization with SELU, and make the necessary adjustements to ensure the network self-normalizes (i.e., standardize the input features, use LeCun normal initialization, make sure the DNN contains only a sequence of dense layers, etc.).*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=7e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_selu_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_selu_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
###Output
157/157 [==============================] - 0s 1ms/step - loss: 1.4633 - accuracy: 0.4792
###Markdown
We get 47.9% accuracy, which is not much better than the original model (47.6%), and not as good as the model using batch normalization (54.0%). However, convergence was almost as fast as with the BN model, plus each epoch took only 7 seconds. So it's by far the fastest model to train so far. e.*Exercise: Try regularizing the model with alpha dropout. Then, without retraining your model, see if you can achieve better accuracy using MC Dropout.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_alpha_dropout_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_alpha_dropout_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_alpha_dropout_model.h5")
model.evaluate(X_valid_scaled, y_valid)
###Output
Epoch 1/100
1407/1407 [==============================] - 9s 5ms/step - loss: 2.0583 - accuracy: 0.2742 - val_loss: 1.7429 - val_accuracy: 0.3858
Epoch 2/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.6852 - accuracy: 0.4008 - val_loss: 1.7055 - val_accuracy: 0.3792
Epoch 3/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.5963 - accuracy: 0.4413 - val_loss: 1.7401 - val_accuracy: 0.4072
Epoch 4/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.5231 - accuracy: 0.4634 - val_loss: 1.5728 - val_accuracy: 0.4584
Epoch 5/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.4619 - accuracy: 0.4887 - val_loss: 1.5448 - val_accuracy: 0.4702
Epoch 6/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.4074 - accuracy: 0.5061 - val_loss: 1.5678 - val_accuracy: 0.4664
Epoch 7/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.3718 - accuracy: 0.5222 - val_loss: 1.5764 - val_accuracy: 0.4824
Epoch 8/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.3220 - accuracy: 0.5387 - val_loss: 1.4805 - val_accuracy: 0.4890
Epoch 9/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.2908 - accuracy: 0.5487 - val_loss: 1.5521 - val_accuracy: 0.4638
Epoch 10/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.2537 - accuracy: 0.5607 - val_loss: 1.5281 - val_accuracy: 0.4924
Epoch 11/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.2215 - accuracy: 0.5782 - val_loss: 1.5147 - val_accuracy: 0.5046
Epoch 12/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.1910 - accuracy: 0.5831 - val_loss: 1.5248 - val_accuracy: 0.5002
Epoch 13/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.1659 - accuracy: 0.5982 - val_loss: 1.5620 - val_accuracy: 0.5066
Epoch 14/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.1282 - accuracy: 0.6120 - val_loss: 1.5440 - val_accuracy: 0.5180
Epoch 15/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.1127 - accuracy: 0.6133 - val_loss: 1.5782 - val_accuracy: 0.5146
Epoch 16/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.0917 - accuracy: 0.6266 - val_loss: 1.6182 - val_accuracy: 0.5182
Epoch 17/100
1407/1407 [==============================] - 6s 5ms/step - loss: 1.0620 - accuracy: 0.6331 - val_loss: 1.6285 - val_accuracy: 0.5126
Epoch 18/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.0433 - accuracy: 0.6413 - val_loss: 1.6299 - val_accuracy: 0.5158
Epoch 19/100
1407/1407 [==============================] - 7s 5ms/step - loss: 1.0087 - accuracy: 0.6549 - val_loss: 1.7172 - val_accuracy: 0.5062
Epoch 20/100
1407/1407 [==============================] - 6s 5ms/step - loss: 0.9950 - accuracy: 0.6571 - val_loss: 1.6524 - val_accuracy: 0.5098
Epoch 21/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.9848 - accuracy: 0.6652 - val_loss: 1.7686 - val_accuracy: 0.5038
Epoch 22/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.9597 - accuracy: 0.6744 - val_loss: 1.6177 - val_accuracy: 0.5084
Epoch 23/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.9399 - accuracy: 0.6790 - val_loss: 1.7095 - val_accuracy: 0.5082
Epoch 24/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.9148 - accuracy: 0.6884 - val_loss: 1.7160 - val_accuracy: 0.5150
Epoch 25/100
1407/1407 [==============================] - 6s 5ms/step - loss: 0.9023 - accuracy: 0.6949 - val_loss: 1.7017 - val_accuracy: 0.5152
Epoch 26/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.8732 - accuracy: 0.7031 - val_loss: 1.7274 - val_accuracy: 0.5088
Epoch 27/100
1407/1407 [==============================] - 6s 5ms/step - loss: 0.8542 - accuracy: 0.7091 - val_loss: 1.7648 - val_accuracy: 0.5166
Epoch 28/100
1407/1407 [==============================] - 7s 5ms/step - loss: 0.8499 - accuracy: 0.7118 - val_loss: 1.7973 - val_accuracy: 0.5000
157/157 [==============================] - 0s 1ms/step - loss: 1.4805 - accuracy: 0.4890
###Markdown
The model reaches 48.9% accuracy on the validation set. That's very slightly better than without dropout (47.6%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case. Let's use MC Dropout now. We will need the `MCAlphaDropout` class we used earlier, so let's just copy it here for convenience:
###Code
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
###Output
_____no_output_____
###Markdown
Now let's create a new model, identical to the one we just trained (with the same weights), but with `MCAlphaDropout` dropout layers instead of `AlphaDropout` layers:
###Code
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
###Output
_____no_output_____
###Markdown
Then let's add a couple utility functions. The first will run the model many times (10 by default) and it will return the mean predicted class probabilities. The second will use these mean probabilities to predict the most likely class for each instance:
###Code
def mc_dropout_predict_probas(mc_model, X, n_samples=10):
Y_probas = [mc_model.predict(X) for sample in range(n_samples)]
return np.mean(Y_probas, axis=0)
def mc_dropout_predict_classes(mc_model, X, n_samples=10):
Y_probas = mc_dropout_predict_probas(mc_model, X, n_samples)
return np.argmax(Y_probas, axis=1)
###Output
_____no_output_____
###Markdown
Now let's make predictions for all the instances in the validation set, and compute the accuracy:
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
y_pred = mc_dropout_predict_classes(mc_model, X_valid_scaled)
accuracy = np.mean(y_pred == y_valid[:, 0])
accuracy
###Output
_____no_output_____
###Markdown
We get no accuracy improvement in this case (we're still at 48.9% accuracy).So the best model we got in this exercise is the Batch Normalization model. f.*Exercise: Retrain your model using 1cycle scheduling and see if it improves training speed and model accuracy.*
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(lr=1e-3)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 1.4])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(lr=1e-2)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
n_epochs = 15
onecycle = OneCycleScheduler(math.ceil(len(X_train_scaled) / batch_size) * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
###Output
Epoch 1/15
352/352 [==============================] - 3s 6ms/step - loss: 2.2298 - accuracy: 0.2349 - val_loss: 1.7841 - val_accuracy: 0.3834
Epoch 2/15
352/352 [==============================] - 2s 6ms/step - loss: 1.7928 - accuracy: 0.3689 - val_loss: 1.6806 - val_accuracy: 0.4086
Epoch 3/15
352/352 [==============================] - 2s 6ms/step - loss: 1.6475 - accuracy: 0.4190 - val_loss: 1.6378 - val_accuracy: 0.4350
Epoch 4/15
352/352 [==============================] - 2s 6ms/step - loss: 1.5428 - accuracy: 0.4543 - val_loss: 1.6266 - val_accuracy: 0.4390
Epoch 5/15
352/352 [==============================] - 2s 6ms/step - loss: 1.4865 - accuracy: 0.4769 - val_loss: 1.6158 - val_accuracy: 0.4384
Epoch 6/15
352/352 [==============================] - 2s 6ms/step - loss: 1.4339 - accuracy: 0.4866 - val_loss: 1.5850 - val_accuracy: 0.4412
Epoch 7/15
352/352 [==============================] - 2s 6ms/step - loss: 1.4042 - accuracy: 0.5056 - val_loss: 1.6146 - val_accuracy: 0.4384
Epoch 8/15
352/352 [==============================] - 2s 6ms/step - loss: 1.3437 - accuracy: 0.5229 - val_loss: 1.5299 - val_accuracy: 0.4846
Epoch 9/15
352/352 [==============================] - 2s 5ms/step - loss: 1.2721 - accuracy: 0.5459 - val_loss: 1.5145 - val_accuracy: 0.4874
Epoch 10/15
352/352 [==============================] - 2s 6ms/step - loss: 1.1942 - accuracy: 0.5698 - val_loss: 1.4958 - val_accuracy: 0.5040
Epoch 11/15
352/352 [==============================] - 2s 6ms/step - loss: 1.1211 - accuracy: 0.6033 - val_loss: 1.5406 - val_accuracy: 0.4984
Epoch 12/15
352/352 [==============================] - 2s 6ms/step - loss: 1.0673 - accuracy: 0.6161 - val_loss: 1.5284 - val_accuracy: 0.5144
Epoch 13/15
352/352 [==============================] - 2s 6ms/step - loss: 0.9927 - accuracy: 0.6435 - val_loss: 1.5449 - val_accuracy: 0.5140
Epoch 14/15
352/352 [==============================] - 2s 6ms/step - loss: 0.9205 - accuracy: 0.6703 - val_loss: 1.5652 - val_accuracy: 0.5224
Epoch 15/15
352/352 [==============================] - 2s 6ms/step - loss: 0.8936 - accuracy: 0.6801 - val_loss: 1.5912 - val_accuracy: 0.5198
|
ch2/5. Quantum Random Number Generator.ipynb | ###Markdown
Quantum Random Number Generator _sort of_start [here](https://livebook.manning.com/book/learn-quantum-computing-with-python-and-q-sharp/chapter-2/v-4/point-7623-374-374-0)
###Code
def qrng(device : QuantumDevice) -> bool:
with device.using_qubit() as q:
q.h()
return q.measure()
###Output
_____no_output_____ |
d2l/chapter_attention-mechanisms/attention-scoring-functions.ipynb | ###Markdown
注意力打分函数:label:`sec_attention-scoring-functions`在 :numref:`sec_nadaraya-waston` 中,我们使用高斯核来对查询和键之间的关系建模。可以将 :eqref:`eq_nadaraya-waston-gaussian` 中的高斯核的指数部分视为 *注意力打分函数*(attention scoring function),简称 *打分函数*(scoring function),然后把这个函数的输出结果输入到 softmax 函数中进行运算。通过上述步骤,我们将得到与键对应的值的概率分布(即注意力权重)。最后,注意力汇聚的输出就是基于这些注意力权重的值的加权和。从宏观来看,可以使用上述算法来实现 :numref:`fig_qkv` 中的注意力机制框架。:numref:`fig_attention_output` 说明了如何将注意力汇聚的输出计算成为值的加权和,其中 $a$ 表示注意力打分函数。由于注意力权重是概率分布,因此加权和其本质上是加权平均值。![计算注意力汇聚的输出为值的加权和。](../img/attention-output.svg):label:`fig_attention_output`用数学语言描述,假设有一个查询 $\mathbf{q} \in \mathbb{R}^q$ 和 $m$ 个“键-值”对 $(\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)$,其中 $\mathbf{k}_i \in \mathbb{R}^k$,$\mathbf{v}_i \in \mathbb{R}^v$。注意力汇聚函数 $f$ 就被表示成值的加权和:$$f(\mathbf{q}, (\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)) = \sum_{i=1}^m \alpha(\mathbf{q}, \mathbf{k}_i) \mathbf{v}_i \in \mathbb{R}^v,$$:eqlabel:`eq_attn-pooling`其中查询 $\mathbf{q}$ 和键 $\mathbf{k}_i$ 的注意力权重(标量)是通过注意力打分函数 $a$ 将两个向量映射成标量,再经过 softmax 运算得到的:$$\alpha(\mathbf{q}, \mathbf{k}_i) = \mathrm{softmax}(a(\mathbf{q}, \mathbf{k}_i)) = \frac{\exp(a(\mathbf{q}, \mathbf{k}_i))}{\sum_{j=1}^m \exp(a(\mathbf{q}, \mathbf{k}_j))} \in \mathbb{R}.$$:eqlabel:`eq_attn-scoring-alpha`正如我们所看到的,选择不同的注意力打分函数 $a$ 会导致不同的注意力汇聚操作。在本节中,我们将介绍两个流行的打分函数,稍后将用他们来实现更复杂的注意力机制。
###Code
import math
import torch
from torch import nn
from d2l import torch as d2l
###Output
_____no_output_____
###Markdown
[**遮蔽softmax操作**]正如上面提到的,softmax 运算用于输出一个概率分布作为注意力权重。在某些情况下,并非所有的值都应该被纳入到注意力汇聚中。例如,为了在 :numref:`sec_machine_translation` 中高效处理小批量数据集,某些文本序列被填充了没有意义的特殊词元。为了仅将有意义的词元作为值去获取注意力汇聚,可以指定一个有效序列长度(即词元的个数),以便在计算 softmax 时过滤掉超出指定范围的位置。通过这种方式,我们可以在下面的 `masked_softmax` 函数中实现这样的 *遮蔽 softmax 操作*(masked softmax operation),其中任何超出有效长度的位置都被遮蔽并置为0。
###Code
#@save
def masked_softmax(X, valid_lens):
"""通过在最后一个轴上遮蔽元素来执行 softmax 操作"""
# `X`: 3D张量, `valid_lens`: 1D或2D 张量
if valid_lens is None:
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
if valid_lens.dim() == 1:
valid_lens = torch.repeat_interleave(valid_lens, shape[1])
else:
valid_lens = valid_lens.reshape(-1)
# 在最后的轴上,被遮蔽的元素使用一个非常大的负值替换,从而其 softmax (指数)输出为 0
X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
value=-1e6)
return nn.functional.softmax(X.reshape(shape), dim=-1)
###Output
_____no_output_____
###Markdown
为了[**演示此函数是如何工作**]的,考虑由两个 $2 \times 4$ 矩阵表示的样本,这两个样本的有效长度分别为 $2$ 和 $3$。经过遮蔽 softmax 操作,超出有效长度的值都被遮蔽为0。
###Code
masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3]))
###Output
_____no_output_____
###Markdown
同样,我们也可以使用二维张量为矩阵样本中的每一行指定有效长度。
###Code
masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]]))
###Output
_____no_output_____
###Markdown
[**加性注意力**]:label:`subsec_additive-attention`一般来说,当查询和键是不同长度的矢量时,可以使用加性注意力作为打分函数。给定查询 $\mathbf{q} \in \mathbb{R}^q$ 和键 $\mathbf{k} \in \mathbb{R}^k$,*加性注意力*(additive attention) 的打分函数为$$a(\mathbf q, \mathbf k) = \mathbf w_v^\top \text{tanh}(\mathbf W_q\mathbf q + \mathbf W_k \mathbf k) \in \mathbb{R},$$:eqlabel:`eq_additive-attn`其中可学习的参数是 $\mathbf W_q\in\mathbb R^{h\times q}$、$\mathbf W_k\in\mathbb R^{h\times k}$ 和 $\mathbf w_v\in\mathbb R^{h}$。如 :eqref:`eq_additive-attn` 所示,将查询和键连接起来后输入到一个多层感知机(MLP)中,感知机包含一个隐藏层,其隐藏单元数是一个超参数 $h$。通过使用 $\tanh$ 作为激活函数,并且禁用偏置项,我们将在下面实现加性注意力。
###Code
#@save
class AdditiveAttention(nn.Module):
"""加性注意力"""
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
self.w_v = nn.Linear(num_hiddens, 1, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# 在维度扩展后,
# `queries` 的形状:(`batch_size`, 查询的个数, 1, `num_hidden`)
# `key` 的形状:(`batch_size`, 1, “键-值”对的个数, `num_hiddens`)
# 使用广播方式进行求和
features = queries.unsqueeze(2) + keys.unsqueeze(1)
features = torch.tanh(features)
# `self.w_v` 仅有一个输出,因此从形状中移除最后那个维度。
# `scores` 的形状:(`batch_size`, 查询的个数, “键-值”对的个数)
scores = self.w_v(features).squeeze(-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# `values` 的形状:(`batch_size`, “键-值”对的个数, 值的维度)
return torch.bmm(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
让我们用一个小例子来[**演示上面的`AdditiveAttention`类**],其中查询、键和值的形状为(批量大小、步数或词元序列长度、特征大小),实际输出为 $(2,1,20)$、$(2,10,2)$ 和 $(2,10,4)$。注意力汇聚输出的形状为(批量大小、查询的步数、值的维度)。
###Code
queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))
# `values` 的小批量数据集中,两个值矩阵是相同的
values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(
2, 1, 1)
valid_lens = torch.tensor([2, 6])
attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8,
dropout=0.1)
attention.eval()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
尽管加性注意力包含了可学习的参数,但由于本例子中每个键都是相同的,所以[**注意力权重**]是均匀的,由指定的有效长度决定。
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
###Output
_____no_output_____
###Markdown
[**缩放点积注意力**]使用点积可以得到计算效率更高的打分函数。但是点积操作要求查询和键具有相同的长度 $d$。假设查询和键的所有元素都是独立的随机变量,并且都满足均值为 $0$ 和方差为 $1$。那么两个向量的点积的均值为 $0$,方差为 $d$。为确保无论向量长度如何,点积的方差在不考虑向量长度的情况下仍然是 $1$,则可以使用 *缩放点积注意力*(scaled dot-product attention) 打分函数:$$a(\mathbf q, \mathbf k) = \mathbf{q}^\top \mathbf{k} /\sqrt{d}$$将点积除以 $\sqrt{d}$。在实践中,我们通常从小批量的角度来考虑提高效率,例如基于 $n$ 个查询和 $m$ 个键-值对计算注意力,其中查询和键的长度为 $d$,值的长度为 $v$。查询 $\mathbf Q\in\mathbb R^{n\times d}$、键 $\mathbf K\in\mathbb R^{m\times d}$ 和值 $\mathbf V\in\mathbb R^{m\times v}$ 的缩放点积注意力是$$ \mathrm{softmax}\left(\frac{\mathbf Q \mathbf K^\top }{\sqrt{d}}\right) \mathbf V \in \mathbb{R}^{n\times v}.$$:eqlabel:`eq_softmax_QK_V`在下面的缩放点积注意力的实现中,我们使用了 dropout 进行模型正则化。
###Code
#@save
class DotProductAttention(nn.Module):
"""缩放点积注意力"""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# `queries` 的形状:(`batch_size`, 查询的个数, `d`)
# `keys` 的形状:(`batch_size`, “键-值”对的个数, `d`)
# `values` 的形状:(`batch_size`, “键-值”对的个数, 值的维度)
# `valid_lens` 的形状: (`batch_size`,) 或者 (`batch_size`, 查询的个数)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# 设置 `transpose_b=True` 为了交换 `keys` 的最后两个维度
scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return torch.bmm(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
为了[**演示上述的`DotProductAttention`类**],我们使用了与先前加性注意力例子中相同的键、值和有效长度。对于点积操作,令查询的特征维度与键的特征维度大小相同。
###Code
queries = torch.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.eval()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
与加性注意力演示相同,由于键包含的是相同的元素,而这些元素无法通过任何查询进行区分,因此获得了[**均匀的注意力权重**]。
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
###Output
_____no_output_____ |
tools/Tensorflow/research/object_detection/object_detection_tutorial.ipynb | ###Markdown
Object Detection DemoWelcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start. Imports
###Code
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
###Output
_____no_output_____
###Markdown
Env setup
###Code
# This is needed to display the images.
%matplotlib inline
###Output
_____no_output_____
###Markdown
Object detection importsHere are the imports from the object detection module.
###Code
from utils import label_map_util
from utils import visualization_utils as vis_util
###Output
_____no_output_____
###Markdown
Model preparation VariablesAny model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file. By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
###Code
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
###Output
_____no_output_____
###Markdown
Download Model
###Code
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
###Output
_____no_output_____
###Markdown
Load a (frozen) Tensorflow model into memory.
###Code
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
###Output
_____no_output_____
###Markdown
Loading label mapLabel maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
###Code
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
###Output
_____no_output_____
###Markdown
Helper code
###Code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
###Output
_____no_output_____
###Markdown
Detection
###Code
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
###Output
_____no_output_____ |
Project-3-data-analysis-final-project/week5_sgd_kaggle.ipynb | ###Markdown
Специализация "Машинное обучение и анализ данных"Автор материала: программист-исследователь Mail.Ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ [Юрий Кашницкий](https://yorko.github.io/) Capstone проект №1 Идентификация пользователей по посещенным веб-страницам Неделя 5. Соревнование Kaggle "Catch Me If You Can"На этой неделе мы вспомним про концепцию стохастического градиентного спуска и опробуем классификатор Scikit-learn SGDClassifier, который работает намного быстрее на больших выборках, чем алгоритмы, которые мы тестировали на 4 неделе. Также мы познакомимся с данными [соревнования](https://inclass.kaggle.com/c/catch-me-if-you-can-intruder-detection-through-webpage-session-tracking2) Kaggle по идентификации пользователей и сделаем в нем первые посылки. По итогам этой недели дополнительные баллы получат те, кто попадет в топ-30 публичного лидерборда соревнования.**В этой части проекта Вам могут быть полезны видеозаписи следующих лекций курса "Обучение на размеченных данных":** - [Стохатический градиентный спуск](https://www.coursera.org/learn/supervised-learning/lecture/xRY50/stokhastichieskii-ghradiientnyi-spusk) - [Линейные модели. Sklearn.linear_model. Классификация](https://www.coursera.org/learn/supervised-learning/lecture/EBg9t/linieinyie-modieli-sklearn-linear-model-klassifikatsiia) **Также рекомендуется вернуться и просмотреть [задание](https://www.coursera.org/learn/supervised-learning/programming/t2Idc/linieinaia-rieghriessiia-i-stokhastichieskii-ghradiientnyi-spusk) "Линейная регрессия и стохастический градиентный спуск" 1 недели 2 курса специализации.** Задание1. Заполните код в этой тетрадке 2. Если вы проходите специализацию Яндеса и МФТИ, пошлите тетрадку в соответствующем Peer Review. Если вы проходите курс ODS, выберите ответы в [веб-форме](https://docs.google.com/forms/d/1pLsegkAICL9PzOLyAeH9DmDOBfktte0l8JW75uWcTng).
###Code
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
import os
import pickle
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score
###Output
_____no_output_____
###Markdown
**Считаем данные [соревнования](https://inclass.kaggle.com/c/catch-me-if-you-can-intruder-detection-through-webpage-session-tracking2) в DataFrame train_df и test_df (обучающая и тестовая выборки).**
###Code
# Поменяйте на свой путь к данным
PATH_TO_DATA = '/content/'
train_df = pd.read_csv(os.path.join(PATH_TO_DATA, 'train_sessions.csv'),
index_col='session_id')
test_df = pd.read_csv(os.path.join(PATH_TO_DATA, 'test_sessions.csv'),
index_col='session_id')
print('train_df.shape:', train_df.shape)
print('test_df.shape:', test_df.shape)
train_df.head()
###Output
_____no_output_____
###Markdown
Пример:
###Code
train_df.head()
###Output
_____no_output_____
###Markdown
**Объединим обучающую и тестовую выборки – это понадобится, чтоб вместе потом привести их к разреженному формату.**
###Code
train_test_df = pd.concat([train_df, test_df])
print('train_test_df.shape:', train_test_df.shape)
###Output
train_test_df.shape: (336358, 21)
###Markdown
В обучающей выборке видим следующие признаки: - site1 – индекс первого посещенного сайта в сессии - time1 – время посещения первого сайта в сессии - ... - site10 – индекс 10-го посещенного сайта в сессии - time10 – время посещения 10-го сайта в сессии - user_id – ID пользователя Сессии пользователей выделены таким образом, что они не могут быть длинее получаса или 10 сайтов. То есть сессия считается оконченной либо когда пользователь посетил 10 сайтов подряд, либо когда сессия заняла по времени более 30 минут. **Посмотрим на статистику признаков.**Пропуски возникают там, где сессии короткие (менее 10 сайтов). Скажем, если человек 1 января 2015 года посетил *vk.com* в 20:01, потом *yandex.ru* в 20:29, затем *google.com* в 20:33, то первая его сессия будет состоять только из двух сайтов (site1 – ID сайта *vk.com*, time1 – 2015-01-01 20:01:00, site2 – ID сайта *yandex.ru*, time2 – 2015-01-01 20:29:00, остальные признаки – NaN), а начиная с *google.com* пойдет новая сессия, потому что уже прошло более 30 минут с момента посещения *vk.com*.
###Code
train_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 253561 entries, 1 to 253561
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 site1 253561 non-null int64
1 time1 253561 non-null object
2 site2 250098 non-null float64
3 time2 250098 non-null object
4 site3 246919 non-null float64
5 time3 246919 non-null object
6 site4 244321 non-null float64
7 time4 244321 non-null object
8 site5 241829 non-null float64
9 time5 241829 non-null object
10 site6 239495 non-null float64
11 time6 239495 non-null object
12 site7 237297 non-null float64
13 time7 237297 non-null object
14 site8 235224 non-null float64
15 time8 235224 non-null object
16 site9 233084 non-null float64
17 time9 233084 non-null object
18 site10 231052 non-null float64
19 time10 231052 non-null object
20 target 253561 non-null int64
dtypes: float64(9), int64(2), object(10)
memory usage: 42.6+ MB
###Markdown
Пример:
###Code
train_df.info()
test_df.head()
###Output
_____no_output_____
###Markdown
Пример:
###Code
test_df.head()
test_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 82797 entries, 1 to 82797
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 site1 82797 non-null int64
1 time1 82797 non-null object
2 site2 81308 non-null float64
3 time2 81308 non-null object
4 site3 80075 non-null float64
5 time3 80075 non-null object
6 site4 79182 non-null float64
7 time4 79182 non-null object
8 site5 78341 non-null float64
9 time5 78341 non-null object
10 site6 77566 non-null float64
11 time6 77566 non-null object
12 site7 76840 non-null float64
13 time7 76840 non-null object
14 site8 76151 non-null float64
15 time8 76151 non-null object
16 site9 75484 non-null float64
17 time9 75484 non-null object
18 site10 74806 non-null float64
19 time10 74806 non-null object
dtypes: float64(9), int64(1), object(10)
memory usage: 13.3+ MB
###Markdown
Пример:
###Code
test_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 82797 entries, 1 to 82797
Data columns (total 20 columns):
site1 82797 non-null int64
time1 82797 non-null object
site2 81308 non-null float64
time2 81308 non-null object
site3 80075 non-null float64
time3 80075 non-null object
site4 79182 non-null float64
time4 79182 non-null object
site5 78341 non-null float64
time5 78341 non-null object
site6 77566 non-null float64
time6 77566 non-null object
site7 76840 non-null float64
time7 76840 non-null object
site8 76151 non-null float64
time8 76151 non-null object
site9 75484 non-null float64
time9 75484 non-null object
site10 74806 non-null float64
time10 74806 non-null object
dtypes: float64(9), int64(1), object(10)
memory usage: 13.3+ MB
###Markdown
**В обучающей выборке – 2297 сессий одного пользователя (Alice) и 251264 сессий – других пользователей, не Элис. Дисбаланс классов очень сильный, и смотреть на долю верных ответов (accuracy) непоказательно.**
###Code
train_df['target'].value_counts()
###Output
_____no_output_____
###Markdown
Пример:
###Code
train_df['target'].value_counts()
###Output
_____no_output_____
###Markdown
**Пока для прогноза будем использовать только индексы посещенных сайтов. Индексы нумеровались с 1, так что заменим пропуски на нули.**
###Code
train_test_df_sites = train_test_df[['site%d' % i for i in range(1, 11)]].fillna(0).astype('int')
train_test_df_sites.head(10)
###Output
_____no_output_____
###Markdown
Пример:
###Code
train_test_df_sites.head(10)
###Output
_____no_output_____
###Markdown
**Создайте разреженные матрицы *X_train_sparse* и *X_test_sparse* аналогично тому, как мы это делали ранее. Используйте объединенную матрицу *train_test_df_sites*, потом разделите обратно на обучающую и тестовую части.**Обратите внимание на то, что в сессиях меньше 10 сайтов у нас остались нули, так что первый признак (сколько раз попался 0) по смыслу отличен от остальных (сколько раз попался сайт с индексом $i$). Поэтому первый столбец разреженной матрицы надо будет удалить.**Выделите в отдельный вектор *y* ответы на обучающей выборке.**
###Code
def make_csr_matrix(X):
data = np.ones(X.size, dtype=int)
indices = X.reshape(-1)
indptr = np.arange(X.shape[0] + 1) * X.shape[1]
return csr_matrix((data, indices, indptr), dtype=int)[:, 1:]
train_test_sparse = make_csr_matrix(train_test_df_sites.values)
X_train_sparse = train_test_sparse[:train_df.shape[0], :]
X_test_sparse = train_test_sparse[-test_df.shape[0]:, :]
y = train_df['target'].values
###Output
_____no_output_____
###Markdown
**Вопрос 1. Выведите размерности матриц *X_train_sparse* и *X_test_sparse* – 4 числа на одной строке через пробел: число строк и столбцов матрицы *X_train_sparse*, затем число строк и столбцов матрицы *X_test_sparse*.**
###Code
def write_answer_to_file(answer, file_address):
with open(file_address, 'w') as out_f:
out_f.write(str(answer))
write_answer_to_file(' '.join(map(str, list(X_train_sparse.shape + X_test_sparse.shape))),
'answer5_1.txt')
!cat answer5_1.txt
###Output
253561 48371 82797 48371
###Markdown
**Сохраним в pickle-файлы объекты *X_train_sparse*, *X_test_sparse* и *y* (последний – в файл *kaggle_data/train_target.pkl*).**
###Code
with open(os.path.join(PATH_TO_DATA, 'X_train_sparse.pkl'), 'wb') as X_train_sparse_pkl:
pickle.dump(X_train_sparse, X_train_sparse_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'X_test_sparse.pkl'), 'wb') as X_test_sparse_pkl:
pickle.dump(X_test_sparse, X_test_sparse_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'train_target.pkl'), 'wb') as train_target_pkl:
pickle.dump(y, train_target_pkl, protocol=2)
###Output
_____no_output_____
###Markdown
**Разобьем обучающую выборку на 2 части в пропорции 7/3, причем не перемешивая. Исходные данные упорядочены по времени, тестовая выборка по времени четко отделена от обучающей, это же соблюдем и здесь.**
###Code
train_share = int(.7 * X_train_sparse.shape[0])
X_train, y_train = X_train_sparse[:train_share, :], y[:train_share]
X_valid, y_valid = X_train_sparse[train_share:, :], y[train_share:]
###Output
_____no_output_____
###Markdown
**Создайте объект `sklearn.linear_model.SGDClassifier` с логистической функцией потерь и параметром *random_state*=17. Остальные параметры оставьте по умолчанию, разве что *n_jobs*=-1 никогда не помешает. Обучите модель на выборке `(X_train, y_train)`.**
###Code
sgd_logit = SGDClassifier(loss='log', n_jobs=-1, random_state=17)
sgd_logit.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
**Сделайте прогноз в виде предсказанных вероятностей того, что это сессия Элис, на отложенной выборке *(X_valid, y_valid)*.**
###Code
logit_valid_pred_proba = sgd_logit.predict_proba(X_valid)
###Output
_____no_output_____
###Markdown
**Вопрос 2. Посчитайте ROC AUC логистической регрессии, обученной с помощью стохастического градиентного спуска, на отложенной выборке. Округлите до 3 знаков после разделителя.**
###Code
write_answer_to_file(np.round(roc_auc_score(y_valid, logit_valid_pred_proba[:, 1]), 3),
'answer5_2.txt')
!cat answer5_2.txt
###Output
0.934
###Markdown
**Сделайте прогноз в виде предсказанных вероятностей отнесения к классу 1 для тестовой выборки с помощью той же *sgd_logit*, обученной уже на всей обучающей выборке (а не на 70%).**
###Code
%%time
sgd_logit.fit(X_train_sparse, y)
logit_test_pred_proba = sgd_logit.predict_proba(X_test_sparse)
###Output
CPU times: user 786 ms, sys: 107 ms, total: 893 ms
Wall time: 779 ms
###Markdown
**Запишите ответы в файл и сделайте посылку на Kaggle. Дайте своей команде (из одного человека) на Kaggle говорящее название – по шаблону "[YDF & MIPT] Coursera_Username", чтоб можно было легко идентифицировать Вашу посылку на [лидерборде](https://inclass.kaggle.com/c/catch-me-if-you-can-intruder-detection-through-webpage-session-tracking2/leaderboard/public).****Результат, который мы только что получили, соответствует бейзлайну "SGDCLassifer" на лидерборде, задача на эту неделю – как минимум его побить.**
###Code
def write_to_submission_file(predicted_labels, out_file,
target='target', index_label="session_id"):
# turn predictions into data frame and save as csv file
predicted_df = pd.DataFrame(predicted_labels,
index = np.arange(1, predicted_labels.shape[0] + 1),
columns=[target])
predicted_df.to_csv(out_file, index_label=index_label)
write_to_submission_file(logit_test_pred_proba[:, 1], 'week5_sgd_test_pred_proba.csv')
###Output
_____no_output_____ |
50_gan_generation/5-1_2_DCGAN.ipynb | ###Markdown
5.1-2 DCGANの作成- 本ファイルでは、DCGANのネットワークを実装とDCGANの学習をします。 5.1 学習目標1. Generatorが画像を生成するためにどのようなニューラルネットワークの構造になっているのかを理解する2. Discriminatorが画像の識別をするためにどのようなニューラルネットワークの構造になっているのかを理解する3. GANの一般的な損失関数の形とニューラルネットワークの学習の流れを理解する4. DCGANのネットワークを実装できるようになる 5.2 学習目標1. GANの損失関数の形を理解する2. DCGANを実装し、手書き数字画像が生成できる
###Code
# パッケージのimport
import random
import math
import time
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
# Setup seeds
torch.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
###Output
_____no_output_____
###Markdown
転置畳み込みの例
###Code
input = torch.tensor([[[[1., 1.], [2., 2.]]]])
print("入力データ")
print(input)
print("-----")
print("通常の畳み込み")
m = nn.Conv2d(1, 1, 2, stride=1, bias=False)
m.weight[0, 0, 0, 0] = 1
m.weight[0, 0, 0, 1] = 2
m.weight[0, 0, 1, 0] = 3
m.weight[0, 0, 1, 1] = 4
print("カーネル")
print(m.weight)
print("出力")
print(m(input))
print("-----")
print("転置畳み込み")
m = nn.ConvTranspose2d(1, 1, 2, stride=1, bias=False)
m.weight[0, 0, 0, 0] = 1
m.weight[0, 0, 0, 1] = 2
m.weight[0, 0, 1, 0] = 3
m.weight[0, 0, 1, 1] = 4
print("カーネル")
print(m.weight)
print("出力")
print(m(input))
###Output
入力データ
tensor([[[[1., 1.],
[2., 2.]]]])
-----
通常の畳み込み
カーネル
Parameter containing:
tensor([[[[1., 2.],
[3., 4.]]]], grad_fn=<CopySlices>)
出力
tensor([[[[17.]]]], grad_fn=<ThnnConv2DBackward>)
-----
転置畳み込み
カーネル
Parameter containing:
tensor([[[[1., 2.],
[3., 4.]]]], grad_fn=<CopySlices>)
出力
tensor([[[[ 1., 3., 2.],
[ 5., 13., 8.],
[ 6., 14., 8.]]]], grad_fn=<SlowConvTranspose2DBackward>)
###Markdown
Generatorの実装
###Code
class Generator(nn.Module):
def __init__(self, z_dim=20, image_size=64):
super(Generator, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(z_dim, image_size * 8,
kernel_size=4, stride=1),
nn.BatchNorm2d(image_size * 8),
nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(image_size * 8, image_size * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(image_size * 4),
nn.ReLU(inplace=True))
self.layer3 = nn.Sequential(
nn.ConvTranspose2d(image_size * 4, image_size * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(image_size * 2),
nn.ReLU(inplace=True))
self.layer4 = nn.Sequential(
nn.ConvTranspose2d(image_size * 2, image_size,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(image_size),
nn.ReLU(inplace=True))
self.last = nn.Sequential(
nn.ConvTranspose2d(image_size, 1, kernel_size=4,
stride=2, padding=1),
nn.Tanh())
# 注意:白黒画像なので出力チャネルは1つだけ
def forward(self, z):
out = self.layer1(z)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.last(out)
return out
# 動作確認
import matplotlib.pyplot as plt
%matplotlib inline
G = Generator(z_dim=20, image_size=64)
# 入力する乱数
input_z = torch.randn(1, 20)
# テンソルサイズを(1, 20, 1, 1)に変形
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
# 偽画像を出力
fake_images = G(input_z)
img_transformed = fake_images[0][0].detach().numpy()
plt.imshow(img_transformed, 'gray')
plt.show()
###Output
findfont: Font family ['IPAexGothic'] not found. Falling back to DejaVu Sans.
###Markdown
Discriminatorの実装
###Code
class Discriminator(nn.Module):
def __init__(self, z_dim=20, image_size=64):
super(Discriminator, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, image_size, kernel_size=4,
stride=2, padding=1),
nn.LeakyReLU(0.1, inplace=True))
# 注意:白黒画像なので入力チャネルは1つだけ
self.layer2 = nn.Sequential(
nn.Conv2d(image_size, image_size*2, kernel_size=4,
stride=2, padding=1),
nn.LeakyReLU(0.1, inplace=True))
self.layer3 = nn.Sequential(
nn.Conv2d(image_size*2, image_size*4, kernel_size=4,
stride=2, padding=1),
nn.LeakyReLU(0.1, inplace=True))
self.layer4 = nn.Sequential(
nn.Conv2d(image_size*4, image_size*8, kernel_size=4,
stride=2, padding=1),
nn.LeakyReLU(0.1, inplace=True))
self.last = nn.Conv2d(image_size*8, 1, kernel_size=4, stride=1)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.last(out)
return out
# 動作確認
D = Discriminator(z_dim=20, image_size=64)
# 偽画像を生成
input_z = torch.randn(1, 20)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_images = G(input_z)
# 偽画像をDに入力
d_out = D(fake_images)
# 出力d_outにSigmoidをかけて0から1に変換
print(nn.Sigmoid()(d_out))
###Output
tensor([[[[0.5014]]]], grad_fn=<SigmoidBackward>)
###Markdown
GANの損失関数
###Code
# Dの誤差関数のイメージ実装
# maximize log(D(x)) + log(1 - D(G(z)))
# ※ xが未定義なので動作はエラーになります
#---------------
# 正解ラベルを作成
mini_batch_size = 2
label_real = torch.full((mini_batch_size,), 1)
# 偽ラベルを作成
label_fake = torch.full((mini_batch_size,), 0)
# 誤差関数を定義
criterion = nn.BCEWithLogitsLoss(reduction='mean')
# 真の画像を判定
d_out_real = D(x)
# 偽の画像を生成して判定
input_z = torch.randn(mini_batch_size, 20)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_images = G(input_z)
d_out_fake = D(fake_images)
# 誤差を計算
d_loss_real = criterion(d_out_real.view(-1), label_real)
d_loss_fake = criterion(d_out_fake.view(-1), label_fake)
d_loss = d_loss_real + d_loss_fake
# Gの誤差関数のイメージ実装
# maximize log(D(G(z)))
# ※ xが未定義なので動作はエラーになります
#---------------
# 偽の画像を生成して判定
input_z = torch.randn(mini_batch_size, 20)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_images = G(input_z)
d_out_fake = D(fake_images)
# 誤差を計算
g_loss = criterion(d_out_fake.view(-1), label_real)
###Output
_____no_output_____
###Markdown
DataLoaderの作成
###Code
def make_datapath_list():
"""学習、検証の画像データとアノテーションデータへのファイルパスリストを作成する。 """
train_img_list = list() # 画像ファイルパスを格納
for img_idx in range(200):
img_path = "./data/img_78/img_7_" + str(img_idx)+'.jpg'
train_img_list.append(img_path)
img_path = "./data/img_78/img_8_" + str(img_idx)+'.jpg'
train_img_list.append(img_path)
return train_img_list
class ImageTransform():
"""画像の前処理クラス"""
def __init__(self, mean, std):
self.data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
def __call__(self, img):
return self.data_transform(img)
class GAN_Img_Dataset(data.Dataset):
"""画像のDatasetクラス。PyTorchのDatasetクラスを継承"""
def __init__(self, file_list, transform):
self.file_list = file_list
self.transform = transform
def __len__(self):
'''画像の枚数を返す'''
return len(self.file_list)
def __getitem__(self, index):
'''前処理をした画像のTensor形式のデータを取得'''
img_path = self.file_list[index]
img = Image.open(img_path) # [高さ][幅]白黒
# 画像の前処理
img_transformed = self.transform(img)
return img_transformed
# DataLoaderの作成と動作確認
# ファイルリストを作成
train_img_list=make_datapath_list()
# Datasetを作成
mean = (0.5,)
std = (0.5,)
train_dataset = GAN_Img_Dataset(
file_list=train_img_list, transform=ImageTransform(mean, std))
# DataLoaderを作成
batch_size = 64
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True)
# 動作の確認
batch_iterator = iter(train_dataloader) # イテレータに変換
imges = next(batch_iterator) # 1番目の要素を取り出す
print(imges.size()) # torch.Size([64, 1, 64, 64])
###Output
torch.Size([64, 1, 64, 64])
###Markdown
学習させる
###Code
# ネットワークの初期化
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# Conv2dとConvTranspose2dの初期化
nn.init.normal_(m.weight.data, 0.0, 0.02)
nn.init.constant_(m.bias.data, 0)
elif classname.find('BatchNorm') != -1:
# BatchNorm2dの初期化
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# 初期化の実施
G.apply(weights_init)
D.apply(weights_init)
print("ネットワークの初期化完了")
# モデルを学習させる関数を作成
def train_model(G, D, dataloader, num_epochs):
# GPUが使えるかを確認
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
# 最適化手法の設定
g_lr, d_lr = 0.0001, 0.0004
beta1, beta2 = 0.0, 0.9
g_optimizer = torch.optim.Adam(G.parameters(), g_lr, [beta1, beta2])
d_optimizer = torch.optim.Adam(D.parameters(), d_lr, [beta1, beta2])
# 誤差関数を定義
criterion = nn.BCEWithLogitsLoss(reduction='mean')
# パラメータをハードコーディング
z_dim = 20
mini_batch_size = 64
# ネットワークをGPUへ
G.to(device)
D.to(device)
G.train() # モデルを訓練モードに
D.train() # モデルを訓練モードに
# ネットワークがある程度固定であれば、高速化させる
torch.backends.cudnn.benchmark = True
# 画像の枚数
num_train_imgs = len(dataloader.dataset)
batch_size = dataloader.batch_size
# イテレーションカウンタをセット
iteration = 1
logs = []
# epochのループ
for epoch in range(num_epochs):
# 開始時刻を保存
t_epoch_start = time.time()
epoch_g_loss = 0.0 # epochの損失和
epoch_d_loss = 0.0 # epochの損失和
print('-------------')
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-------------')
print('(train)')
# データローダーからminibatchずつ取り出すループ
for imges in dataloader:
# --------------------
# 1. Discriminatorの学習
# --------------------
# ミニバッチがサイズが1だと、バッチノーマライゼーションでエラーになるのでさける
if imges.size()[0] == 1:
continue
# GPUが使えるならGPUにデータを送る
imges = imges.to(device)
# 正解ラベルと偽ラベルを作成
# epochの最後のイテレーションはミニバッチの数が少なくなる
mini_batch_size = imges.size()[0]
label_real = torch.full((mini_batch_size,), 1).to(device)
label_fake = torch.full((mini_batch_size,), 0).to(device)
# 真の画像を判定
d_out_real = D(imges)
# 偽の画像を生成して判定
input_z = torch.randn(mini_batch_size, z_dim).to(device)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_images = G(input_z)
d_out_fake = D(fake_images)
# 誤差を計算
# print(d_out_real.view(-1))
# print(label_real.astype(flout))
label_real = label_real.type_as(d_out_real.view(-1))
label_fake = label_fake.type_as(d_out_fake.view(-1))
d_loss_real = criterion(d_out_real.view(-1), label_real)
d_loss_fake = criterion(d_out_fake.view(-1), label_fake)
d_loss = d_loss_real + d_loss_fake
# バックプロパゲーション
g_optimizer.zero_grad()
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
# --------------------
# 2. Generatorの学習
# --------------------
# 偽の画像を生成して判定
input_z = torch.randn(mini_batch_size, z_dim).to(device)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_images = G(input_z)
d_out_fake = D(fake_images)
# 誤差を計算
g_loss = criterion(d_out_fake.view(-1), label_real)
# バックプロパゲーション
g_optimizer.zero_grad()
d_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()
# --------------------
# 3. 記録
# --------------------
epoch_d_loss += d_loss.item()
epoch_g_loss += g_loss.item()
iteration += 1
# epochのphaseごとのlossと正解率
t_epoch_finish = time.time()
print('-------------')
print('epoch {} || Epoch_D_Loss:{:.4f} ||Epoch_G_Loss:{:.4f}'.format(
epoch, epoch_d_loss/batch_size, epoch_g_loss/batch_size))
print('timer: {:.4f} sec.'.format(t_epoch_finish - t_epoch_start))
t_epoch_start = time.time()
return G, D
# 学習・検証を実行する
# 6分ほどかかる
num_epochs = 200
G_update, D_update = train_model(
G, D, dataloader=train_dataloader, num_epochs=num_epochs)
# 生成画像と訓練データを可視化する
# 本セルは良い感じの画像が生成されるまで、何度も実行し直しています。
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 入力の乱数生成
batch_size = 8
z_dim = 20
fixed_z = torch.randn(batch_size, z_dim)
fixed_z = fixed_z.view(fixed_z.size(0), fixed_z.size(1), 1, 1)
# 画像生成
G_update.eval()
fake_images = G_update(fixed_z.to(device))
# 訓練データ
batch_iterator = iter(train_dataloader) # イテレータに変換
imges = next(batch_iterator) # 1番目の要素を取り出す
# 出力
fig = plt.figure(figsize=(15, 6))
for i in range(0, 5):
# 上段に訓練データを
plt.subplot(2, 5, i+1)
plt.imshow(imges[i][0].cpu().detach().numpy(), 'gray')
# 下段に生成データを表示する
plt.subplot(2, 5, 5+i+1)
plt.imshow(fake_images[i][0].cpu().detach().numpy(), 'gray')
###Output
_____no_output_____ |
Python Data Science Toolbox -Part 2/Bringing it all together/01. Dictionaries for data science.ipynb | ###Markdown
Dictionaries for data scienceFor this exercise, you'll use what you've learned about the zip() function and combine two lists into a dictionary.These lists are actually extracted from a bigger dataset file of world development indicators from the World Bank. For pedagogical purposes, we have pre-processed this dataset into the lists that you'll be working with.The first list feature_names contains header names of the dataset and the second list row_vals contains actual values of a row from the dataset, corresponding to each of the header names. Instructions- Create a zip object by calling zip() and passing to it feature_names and row_vals. Assign the result to zipped_lists.- Create a dictionary from the zipped_lists zip object by calling dict() with zipped_lists. Assign the resulting dictionary to rs_dict.
###Code
import pandas as pd
df=pd.read_csv('world_ind_pop_data.csv')
df.head()
# Pre-defined lists
feature_names = ['CountryName', 'CountryCode',
'IndicatorName', 'IndicatorCode', 'Year', 'Value']
row_vals = ['Arab World', 'ARB',
'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'SP.ADO.TFRT',
'1960', '133.56090740552298']
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
###Output
{'CountryName': 'Arab World', 'CountryCode': 'ARB', 'IndicatorName': 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'IndicatorCode': 'SP.ADO.TFRT', 'Year': '1960', 'Value': '133.56090740552298'}
|
midterm/ProjectEuler52.ipynb | ###Markdown
Project Euler: Problem 52 https://projecteuler.net/problem=52It can be seen that the number, $125874$, and its double, $251748$, contain exactly the same digits, but in a different order.Find the smallest positive integer, $x$, such that $2x$, $3x$, $4x$, $5x$, and $6x$, contain the same digits. First, write a function `same_digits(x,y)` that returns `True` if two integers `x` and `y` have the exact same set of digits and multiplicities and `False` if they have different digits.
###Code
def same_digits(x, y):
"""Do the integers x and y have the same digits, regardless of order."""
'''if len(x)==len(y):
a=True
for i in x:
if i in y:
b=True
if a==True and b==True:
return True'''
X=[]
Y=[]
for i in x:
X.append(int(i))
for i in y:
Y.append(int(i))
#put X and Y in order
if X==Y:
return True
'''split x and y into a list X and Y
use function to put the list in numerical order
if X==Y return True'''
same_digits('537653','82462846')
assert same_digits('132', '321')
assert not same_digits('123', '3')
assert not same_digits('456', '0987654321')
###Output
_____no_output_____
###Markdown
Now use the `same_digits` function to solve this Euler problem. As you work on this problem, be careful to debug and test your code on small integers before trying it on the full search.
###Code
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this cell to grade the solution
###Output
_____no_output_____
###Markdown
Project Euler: Problem 52 https://projecteuler.net/problem=52It can be seen that the number, $125874$, and its double, $251748$, contain exactly the same digits, but in a different order.Find the smallest positive integer, $x$, such that $2x$, $3x$, $4x$, $5x$, and $6x$, contain the same digits. First, write a function `same_digits(x,y)` that returns `True` if two integers `x` and `y` have the exact same set of digits and multiplicities and `False` if they have different digits.
###Code
x='123'
y='321'
if sorted(y)==sorted(x):
print('yes')
def same_digits(x, y):
"""Do the integers x and y have the same digits, regardless of order."""
if sorted(x)==sorted(y):
return True
else:
return False
assert same_digits('132', '321')
assert not same_digits('123', '3')
assert not same_digits('456', '0987654321')
###Output
_____no_output_____
###Markdown
Now use the `same_digits` function to solve this Euler problem. As you work on this problem, be careful to debug and test your code on small integers before trying it on the full search.
###Code
lst=[]
for i in range(1,1000000):
f=str(i)
g=str(int(f)*2)
k=str(int(f)*3)
h=str(int(f)*4)
m=str(int(f)*5)
c=str(int(f)*6)
if same_digits(f,g)==True:
lst.append(f)
elif same_digits(f,k)==True:
lst.append(f)
elif same_digits(f,h)==True:
lst.append(f)
elif same_digits(f,m)==True:
lst.append(f)
elif same_digits(f,c)==True:
lst.append(f)
min(lst)
assert True # leave this cell to grade the solution
###Output
_____no_output_____
###Markdown
Project Euler: Problem 52 https://projecteuler.net/problem=52It can be seen that the number, $125874$, and its double, $251748$, contain exactly the same digits, but in a different order.Find the smallest positive integer, $x$, such that $2x$, $3x$, $4x$, $5x$, and $6x$, contain the same digits. First, write a function `same_digits(x,y)` that returns `True` if two integers `x` and `y` have the exact same set of digits and multiplicities and `False` if they have different digits.
###Code
def same_digits(x, y):
"""Do the integers x and y have the same digits, regardless of order."""
q = list(filter(lambda x: x in y, y))
p = list(filter(lambda y: y in x, x))
for i in q:
if i in p and len(q) == len(p):
y = True
else:
y = False
return y
#x ='132'; y = '321'
#
#q = list(filter(lambda x: x in y, y))
#p = list(filter(lambda y: y in x, x))
#
#for i in q:
# if i in p:
# y = True
# else:
# y = False
#print(y)
#
#print(q)
#print(p)
assert same_digits('132', '321')
assert not same_digits('123', '3')
assert not same_digits('456', '0987654321')
###Output
_____no_output_____
###Markdown
Now use the `same_digits` function to solve this Euler problem. As you work on this problem, be careful to debug and test your code on small integers before trying it on the full search.
###Code
xx=range(1,100)
yy=range(1,100)
pal_list =[]
for xnum in xx:
for ynum in yy:
pal_list.append((xnum, ynum))
list0 = str(pal_list)
# I found the list of the (x,y) combination for range 1 - 100.... I ran out of time :(
assert True # leave this cell to grade the solution
###Output
_____no_output_____
###Markdown
Project Euler: Problem 52 https://projecteuler.net/problem=52It can be seen that the number, $125874$, and its double, $251748$, contain exactly the same digits, but in a different order.Find the smallest positive integer, $x$, such that $2x$, $3x$, $4x$, $5x$, and $6x$, contain the same digits. First, write a function `same_digits(x,y)` that returns `True` if two integers `x` and `y` have the exact same set of digits and multiplicities and `False` if they have different digits.
###Code
def same_digits(x, y):
"""Do the integers x and y have the same digits, regardless of order."""
z = []
w = []
n = 0
x = str(x)
y = str(y)
thing = True
while n < len(x):
z.append(x[n])
n = n+1
n = 0
while n < len(y):
w.append(y[n])
n = n+1
z = sorted(z)
w = sorted(w)
return z == w
#same_digits('132', '321')
assert same_digits('132', '321')
assert not same_digits('123', '3')
assert not same_digits('456', '0987654321')
###Output
_____no_output_____
###Markdown
Now use the `same_digits` function to solve this Euler problem. As you work on this problem, be careful to debug and test your code on small integers before trying it on the full search.
###Code
x = 0
while x < 10000000:
x = x + 1
if same_digits(x, 2*x) and same_digits(x, 3*x) and same_digits(x, 4*x) and same_digits(x, 5*x) and same_digits(x, 6*x):
z = x
break
print(z)
assert True # leave this cell to grade the solution
###Output
_____no_output_____ |
Imagenet Image Classification using Keras/ImageNet32x32.ipynb | ###Markdown
ImageNet using AlexNet
###Code
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
import os
import numpy as np
from PIL import Image
from keras.regularizers import l2
from google.colab import files
uploaded=files.upload()
def get_annotations_map():
valAnnotationsPath = 'tiny-imagenet-200/val/val_annotations.txt'
valAnnotationsFile = open(valAnnotationsPath, 'r')
valAnnotationsContents = valAnnotationsFile.read()
valAnnotations = {}
for line in valAnnotationsContents.splitlines():
pieces = line.strip().split()
valAnnotations[pieces[0]] = pieces[1]
return valAnnotations
import zipfile,io
data = zipfile.ZipFile(io.BytesIO(uploaded['tiny-imagenet-200.zip']),'r')
data.extractall()
train_data_dir = 'tiny-imagenet-200/train'
validation_data_dir = 'tiny-imagenet-200/val'
test_data_dir = 'tiny-imagenet-200/test'
target_names = [item for item in os.listdir(train_data_dir) if os.path.isdir(os.path.join(train_data_dir, item))]
nb_train_samples = sum([len(files) for _,_ , files in os.walk(train_data_dir)])
nb_validation_samples = sum([len(files) for _,_ , files in os.walk(validation_data_dir)])
nb_test_samples = sum([len(files) for _,_ , files in os.walk(test_data_dir)])
total_nb_samples = nb_train_samples + nb_validation_samples + nb_test_samples
nb_classes = len(target_names) # number of output classes
print('Training a CNN Multi-Classifier Model ......')
print('\n - names of classes: ', target_names, '\n - # of classes: ', nb_classes)
print(' - # of trained samples: ', nb_train_samples, '\n - # of validation samples: ', nb_validation_samples,
'\n - # of test samples: ', nb_test_samples,
'\n - total # of samples: ', total_nb_samples, '\n - train ratio:', round(nb_train_samples/total_nb_samples*100, 2),
'\n - validation ratio:', round(nb_validation_samples/total_nb_samples*100, 2),
'\n - test ratio:', round(nb_test_samples/total_nb_samples*100, 2),
' %', '\n - # of epochs: ', 2, '\n - batch size: ', 500)
def load_images(path,num_classes):
#Load images
print('Loading ' + str(num_classes) + ' classes')
X_train=np.zeros([num_classes*500,3,32,32],dtype='uint8')
y_train=np.zeros([num_classes*500], dtype='uint8')
print('loading training images...');
trainPath=path+'/train'
i=0
j=0
annotations={}
for sChild in os.listdir(trainPath):
sChildPath = os.path.join(os.path.join(trainPath,sChild),'images')
annotations[sChild]=j
for c in os.listdir(sChildPath):
X=np.array(Image.open(os.path.join(sChildPath,c)))
if len(np.shape(X))==2:
X_train[i]=np.array([X,X,X])
else:
X_train[i]=np.transpose(X,(2,0,1))
y_train[i]=j
i+=1
j+=1
if (j >= num_classes):
break
print('finished loading training images')
val_annotations_map = get_annotations_map()
X_test = np.zeros([num_classes*50,3,32,32],dtype='uint8')
y_test = np.zeros([num_classes*50], dtype='uint8')
print('loading test images...')
i = 0
valPath=path+ '/val/images'
for sChild in os.listdir(valPath):
if val_annotations_map[sChild] in annotations.keys():
sChildPath = os.path.join(valPath, sChild)
X=np.array(Image.open(sChildPath))
if len(np.shape(X))==2:
X_test[i]=np.array([X,X,X])
else:
X_test[i]=np.transpose(X,(2,0,1))
y_test[i]=annotations[val_annotations_map[sChild]]
i+=1
else:
pass
print('finished loading test images')#+str(i)
return X_train,y_train,X_test,y_test
path=r'tiny-imagenet-200/'
X_train,y_train,X_test,y_test=load_images(path,200)#input data path & numbers of classes
###Output
Loading 200 classes
loading training images...
finished loading training images
loading test images...
finished loading test images
###Markdown
Data Normalization
###Code
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
###Output
_____no_output_____
###Markdown
Designing the AlexNet for images of size 32x32
###Code
alexnet = Sequential()
l2_reg = 0.
# Layer 1
alexnet.add(Conv2D(96, (11, 11), input_shape=(3,32,32),
padding='same', kernel_regularizer=l2(l2_reg)))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
#alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
alexnet.add(Conv2D(256, (5, 5), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
alexnet.add(Dropout(0.3))
# Layer 3
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(512, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
alexnet.add(Dropout(0.3))
# Layer 4
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 5
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 6
alexnet.add(Flatten())
alexnet.add(Dense(3072))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 7
alexnet.add(Dense(4096))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 8
alexnet.add(Dense(200))
alexnet.add(BatchNormalization())
alexnet.add(Activation('softmax'))
alexnet.summary()
epoch = 50
learn_rate = 0.0001
dec = learn_rate /epoch
adam=keras.optimizers.Adam(learn_rate)
# Compile the model
alexnet.compile(loss=keras.losses.categorical_crossentropy,optimizer=adam, metrics=['accuracy'])
history = alexnet.fit(X_train,y_train,epochs=epoch, validation_data=(X_test, y_test),batch_size=256)
# Final evaluation of the model
scores = alexnet.evaluate(X_test, y_test, verbose=2)
print("Accuracy: %.2f%%" % (scores[1]*100))
def plotter(trained_record):
# Loss Curves
plt.figure(figsize=[8,6])
plt.plot(trained_record.history['loss'],'r',linewidth=3.0)
plt.plot(trained_record.history['val_loss'],'b',linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.title('Loss Curves',fontsize=16)
plt.savefig('try1.png')
# Accuracy Curves
plt.figure(figsize=[8,6])
plt.plot(trained_record.history['acc'],'r',linewidth=3.0)
plt.plot(trained_record.history['val_acc'],'b',linewidth=3.0)
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Accuracy',fontsize=16)
plt.title('Accuracy Curves',fontsize=16)
plt.savefig('try2.png')
plotter(history)
###Output
_____no_output_____
###Markdown
ConclusionReducing the number of neurons did not improve the performance of CNN classification model. So we can reject thethe hypothesis that the model would perform better after reducing the number of neurons because the model is giving similar performace as that of actual neural network. Model started overfitting after 20th epoch
###Code
###Output
_____no_output_____ |
ECON 425 - Machine Learning/KNN.ipynb | ###Markdown
Effect of Increasing K in KNN Learning To test the effect of increasing K, I wrote a for loop to train the model with every K between 1 and 113, the size of the data set. Naturally, I think that as K increases, the model will become less accurate. This is because as it starts to reach to to many neighbors for information on the iris type, it will start to move too far into the wrong category, and then wrong predictions will be more common.
###Code
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=40)
knn.fit(X_train, y_train)
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
empty = []
for i in range (1,113):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
empty.append(knn.score(X_test, y_test))
plt.plot(empty)
###Output
Test set score: 0.87
|
InvestigationAnscombesQuartet.ipynb | ###Markdown
Analysis of Anscombes Quartet In this Jupyter Notebook I will analyse Anscombe’s Quartet Dataset. I will carry out following four tasks:1. I will explain the background to the dataset. 2. I will calculate the descriptive statistics of the variables in the dataset. 3. I will plot interesting aspects of the dataset. 4. I will explain why the dataset is interesting by referring to the plots and statistics. 1. Background information of the dataset.Anscombe’s Quartet dataset was created by Francis Anscombe in 1973 for his classic paper "Graphs in Statistical Analysis". The dataset compromises of four datasets that have almost identical linear regression coefficients, each of the datasets in the quartet consists of 11 (x,y) points. All datasets appear similar when examined using simple summary statistics, but vary considerably when graphed.Anscombe created the dataset to demonstrate the effect that outliers can have on a statistical findings of a dataset and the importance of visualizing data before analyzing it. Man behind the Dataset:Francis John "Frank" Anscombe was born in 1981 and died in 2001. He was an English statistician, who served in Worl War II and later became a profesor. Anscombe was the first perosn to proof that graphs and visualization should be included as a crutial step in any standard data analysis. Libraries
###Code
import sklearn.neighbors as nel
import pandas as pd #import pandas
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import *
%matplotlib inline
###Output
_____no_output_____
###Markdown
Graphs and Dataset Data I will work with.
###Code
#Im defining the anscombes libraray:
dfx = sns.load_dataset("anscombe")
#I want to demonstrate that straight away when I pull the dataset data and graphs,
#I can see what I will investigate.
#Graphs of each Dataset are different even though their statistical properties are almost identical.
#Let’s begin the investigation.
sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=dfx,
col_wrap=2, ci=None, palette="muted", height=4,
scatter_kws={"s": 50, "alpha": 1})
#Im uploading the Dataset using URL
#Datasets are split inot four x-axis and four y-axis.
df = pd.read_csv("https://raw.githubusercontent.com/MartynaMisk/AnscombesQuartet/master/data.csv")
df
###Output
_____no_output_____
###Markdown
2. Calculating the descriptive statistics of the variables in the dataset. I will investigate following statistical properties: 1. The mean x value is 9 and 7.50 for y in each dataset. 2. The variance for x is 11 and for y is 4.12 in each dataset.5. The correlation between x and y is 0.816 for each dataset.6. A linear regression (line of best fit) for each dataset follows the equation y = 0.5x + 3The purpose of demonstarting this statistical data is to showcase that Ansconmbe's four datasets have the same statistical properties.
###Code
df[['x1' , 'y1']] #interesting feature with which it is easier to target a specific column to have a closer look.
#We can see values for Dataset 1 x and y axis.
###Output
_____no_output_____
###Markdown
2.1 Mean of Dataset 1, Dataset 2, Dataset 3 and Dataset 4. Daatset 1 (x1 and y1)
###Code
#To begin I want to try to calculate the mean of Dataset 1 x-axis(x1):
df['x1'].mean()
#To get mean of Dataset 1 y-axis(y1):
df['y1'].mean()
#This will show more clearly the mean of Dataset 1 (x and y) and the identical result of the above finding.
df.loc[:,['x1', 'y1']].mean() #Dataset 1 has x-axis mean of 9 and y-axis mean on 7.5
###Output
_____no_output_____
###Markdown
Dataset 2 (x2 and y2)
###Code
#To calculate the mean of Dataset 2 (x2 and y2) I will use the same code as above but use Dataset 2 varibales.
#The output should be 9 for x.
#The output should be 7.5 for y.
df.loc[:,['x2', 'y2']].mean() #The result of the mean of Dataset 2 matches Dataset 1.
###Output
_____no_output_____
###Markdown
Dataset 3 (x3 and y3)
###Code
df.loc[:,['x3', 'y3']].mean()
#The results match the mean of the previous two Datasets:
###Output
_____no_output_____
###Markdown
Dataset 4 (x4 and y4)
###Code
df.loc[:,['x4', 'y4']].mean()
#The results match the mean of the previous Datasets:
###Output
_____no_output_____
###Markdown
The identical mean for each Dataset is one evidence that statistical properties are the same for each dataset. 2.2 Calculating variance of x and y in each Dataset. Variance is the expected deviation of random variables from its mean. This statistical property equals 11 for x variance and 4.12 for y in each Dataset. Dataset 1 (x1 and y1)
###Code
df.loc[:,['x1', 'y1']].var() #Dataset 1 variance:
###Output
_____no_output_____
###Markdown
Dataset 2 (x2 and y2)
###Code
df.loc[:,['x2', 'y2']].var() #Dataset 2 variance equals to Dataset 1:
###Output
_____no_output_____
###Markdown
Dataset 3 (x3 and y3)
###Code
df.loc[:,['x3', 'y3']].var() #Dataset 3 variance is identical to previous Datasets:
###Output
_____no_output_____
###Markdown
Dataset 4 (x4 and y4)
###Code
df.loc[:,['x4', 'y4']].var() #All four Datasets have the same variance statistical measurement:
###Output
_____no_output_____
###Markdown
We can conclude that the variance for each Dataset is equal. This is a second proof that statistical measurement is almost equal. 2.3 The correlation between x and y Correlation calculates how close variables are to having a linear relationship. Dataset 1 (x1 and y1)
###Code
df.loc[:,["x1", "y1"]].corr() #Dataset 1 results:
###Output
_____no_output_____
###Markdown
Dataset 2 (x2 and y2)
###Code
df.loc[:,["x2", "y2"]].corr() #Dataset 2 correlation results match previous results.
###Output
_____no_output_____
###Markdown
Dataset 3 (x3 adn y3)
###Code
df.loc[:,["x3", "y3"]].corr() #Results match previous results.
###Output
_____no_output_____
###Markdown
Dataset 4 (x4 and y4)
###Code
df.loc[:,["x4", "y4"]].corr() #Correlation between all Datasets is almost identical.
#The correlation between x and y is 0.816 for each dataset
###Output
_____no_output_____
###Markdown
At this stage it is evident that mean, variance and now correlation for each Dataset is almost identical. With just this statistical information one could conclude that all Datasets are identical. 2.4 Linear regression (line of best fit) for each dataset. Linear regrssion demonstrates us the relationship between two variables. Mathematically it is represented as Y ≈ ɒ + ß X + ℇ. For all 4 Datasets, the slope of the regression line is 0.500(x) and the intercept is 3.00(y).
###Code
#Dataset 1 values:
x1 = np.array([10,8,13,9,11,14,6,4,12,7,5])
y1 = np.array([8.04,6.95,7.58,8.81,8.33,9.96,7.24,4.26,10.84,4.82,5.68])
p1 = np.polyfit(x1,y1,1)
print(p1) #This will show me the slope of the regression and intercept of Dataset 1.
#Dataset 2
#This time instead of writing out the array I have used df to pull it from the Dataset defined at the beginig.
x2 = np.array(df['x2'])
y2 = np.array(df['y2'])
p2 = np.polyfit(x2,y2,1)
print(p2) #The linear regression will be the same as in Dataset 1.
#Dataset 3
x3 = np.array(df['x3'])
y3 = np.array(df['y3'])
p3 = np.polyfit(x2,y2,1)
print(p3) #Results for Dataset 3 are almost equal to previous linear regressions.
#Dataset 4
x4 = np.array(df['x4'])
y4 = np.array(df['y4'])
p4 = np.polyfit(x2,y2,1)
print(p3) #I wanted to see if I fit everyhting into one cell will it work.
#The result is as expected; slope 5
###Output
[0.5 3.00090909]
###Markdown
This final statistical equation has demonstrate that all data when calculated (mean, variance, correlation and linear reggresion) are almost identical. Lets plot the graphs and see the visual results. 3. Plotting interesting aspects of the dataset. This section will demonstrate Anscombes Quartet four Dataset plotted in graphs: Dataset 1 (x1 and y2)
###Code
plt.plot(x1,y1,'o') #This will plot x and y variables on the graph as dots:
plt.plot(x1,np.polyval(p1,x1),'r-') #This shows me the line of best fit for the Dataset1.
#p1 is represents the slope and intercept calculated above.
yfit = p1[0] * x1 + p1[1] #I want to calculate the fit value
print(yfit) #predicted values
print(y2) #actual values
plt.plot(x1,yfit,'m:') #plotted
#When I put the first and secodn graph together I will see the line of best fit with the varibles:
plt.plot(x1,y1,'o')
plt.plot(x1,np.polyval(p1,x1),'r-') #The line of regression is defined as red line.
#Dataset 2 graph.
plt.plot(x2,y2,'o')
plt.plot(x2,np.polyval(p2,x2),'r-')
#Dataset 2
#In this graph I want to play around with visualization.
#Linear Regression will be plotted by blue dotted line.
plt.plot(x2,y2,'o')
plt.plot(x2,np.polyval(p2,x2),'b--') #b-- blue dotted line. I think I will stick to the red line.
#Dataset 3 graph.
plt.plot(x3,y3,'o')
plt.plot(x3,np.polyval(p3,x3),'r-')
#Dataset 4 graph.
plt.plot(x4,y4,'o')
plt.plot(x4,np.polyval(p4,x4),'r-')
###Output
_____no_output_____ |
ml/polynorminal/leaning-curve.ipynb | ###Markdown
学习曲线
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
X_train.shape
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
train_score = []
test_score = []
# 一共有75个
for i in range(1, 76):
lin_reg = LinearRegression()
lin_reg.fit(X_train[:i], y_train[:i])
y_train_predict = lin_reg.predict(X_train[:i])
train_score.append(mean_squared_error(y_train[:i], y_train_predict))
y_test_predict = lin_reg.predict(X_test)
test_score.append(mean_squared_error(y_test, y_test_predict))
plt.plot([i for i in range(1, 76)], np.sqrt(train_score), label='train')
plt.plot([i for i in range(1, 76)], np.sqrt(test_score), label='test')
plt.legend()
plt.show()
def plot_learning_curve(algo, X_train, X_test, y_train, y_test):
train_score = []
test_score = []
num = len(X_train) + 1
for i in range(1, num):
algo.fit(X_train[:i], y_train[:i])
y_train_predict = lin_reg.predict(X_train[:i])
train_score.append(mean_squared_error(y_train[:i], y_train_predict))
y_test_predict = algo.predict(X_test)
test_score.append(mean_squared_error(y_test, y_test_predict))
plt.plot([i for i in range(1, num)], np.sqrt(train_score), label='train')
plt.plot([i for i in range(1, num)], np.sqrt(test_score), label='test')
plt.legend()
# 0<=x<=num, 0<=y<=4
plt.axis([0, num, 0, 4])
plt.show()
plot_learning_curve(LinearRegression(), X_train, X_test, y_train, y_test)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression
def PolynomialRegression(degree):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('line_reg', LinearRegression())
])
pol2_reg = PolynomialRegression(2)
plot_learning_curve(pol2_reg, X_train, X_test, y_train, y_test)
###Output
_____no_output_____
###Markdown
数据集的划分 - 训练数据集: 用于训练模型- 验证数据集: 用于调整超参- 测试数据集: 用于验证泛化 交叉验证 (西瓜书有说明)
###Code
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.neighbors import KNeighborsClassifier
best_score, best_p, best_k = 0, 0, 0
for k in range(2, 11):
for p in range(1, 5):
knn = KNeighborsClassifier(weights="distance", n_neighbors=k, p=p, n_jobs=-1)
knn.fit(X_train, y_train)
score = knn.score(X_test, y_test)
if score > best_score:
best_score, best_p, best_k = score, p, k
print("Best K = %d" % best_k)
print("Best p = %d" % best_p)
print("Best score = %s" % best_score)
###Output
Best K = 5
Best p = 2
Best score = 0.9866666666666667
###Markdown
使用交叉验证
###Code
from sklearn.model_selection import cross_val_score
knn = KNeighborsClassifier()
cross_val_score(knn, X_train, y_train)
from sklearn.neighbors import KNeighborsClassifier
best_score, best_p, best_k = 0, 0, 0
for k in range(2, 11):
for p in range(1, 5):
knn = KNeighborsClassifier(weights="distance", n_neighbors=k, p=p, n_jobs=-1)
knn.fit(X_train, y_train)
# cv=3 分成3分
scores = cross_val_score(knn, X_train, y_train, cv=3)
score = np.mean(scores)
if score > best_score:
best_score, best_p, best_k = score, p, k
print("Best K = %d" % best_k)
print("Best p = %d" % best_p)
print("Best score = %s" % best_score)
###Output
Best K = 5
Best p = 3
Best score = 0.9866166891368011
###Markdown
交叉验证是为了获取最佳的参数
###Code
best_knn = KNeighborsClassifier(weights="distance", n_neighbors=5, p=3, n_jobs=-1)
best_knn.fit(X_train, y_train)
best_knn.score(X_test, y_test)
###Output
_____no_output_____ |
chapter05/chapter05.ipynb | ###Markdown
More Capable Functions N-arity and variadic functions
###Code
;; A simple function that takes a variable number of args
;; (a multi-arity function)
(defn greet
([to-whom]
(println "Welcome to Blotts Books" to-whom))
([message to-whom]
(println message to-whom)))
(greet "Dolly")
(greet "Howdy" "Stranger")
;; One of the arities of the function can be expressed in terms of the other
(defn greet
([to-whom]
(greet "Welcome to Blotts Books" to-whom)) ; Expressed in terms of the other arity
([message to-whom]
(println message to-whom)))
;; A function that takes an arbitrary number of args
;; (a variadic function)
(defn print-any-args [& args] ; The '&' symbol means a variable number of args
(println "My arguments are:" args))
(print-any-args 7 true nil) ; The args are printed as a collection
;; Functions can have ordinary args before '&', as follows
(defn first-argument [& args]
(first args))
(defn new-first-argument [x & args]
x)
(first-argument 1 2 3)
(new-first-argument 1 2 3)
###Output
_____no_output_____
###Markdown
Multimethods
###Code
;; Given the following formats of book data
{:title "War and Peace" :author "Tolstoy"}
{:book "Emma" :by "Austen"}
["1984" "Orwell"]
;; A function that normalizes the format of book
;; can be written as follows
;; (It works, but with a growing number of book formats it will get ugly fast)
(defn normalize-book [book]
(if (vector? book)
{:title (first book) :author (second book)}
(if (contains? book :title)
book
{:title (:book book) :author (:by book)})))
(normalize-book {:title "War and Peace" :author "Tolstoy"})
(normalize-book {:book "Emma" :by "Austen"})
(normalize-book ["1984" "Orwell"])
;; A function that returns keywords based on the book's format
;; to be used later to define a multimethod
(defn dispatch-book-format [book]
(cond
(vector? book) :vector-book
(contains? book :title) :standard-map
(contains? book :book) :alternative-map))
(dispatch-book-format ["1984" "Orwell"])
;; Defining a multimethod from the last dispatcher function
(defmulti normalize-book dispatch-book-format)
;; Implementing the last multimethod for the possible values
;; returned from the dispatch function
(defmethod normalize-book :vector-book [book]
{:title (first book) :author (second book)})
(normalize-book ["1984" "Orwell"])
(defmethod normalize-book :standard-map [book]
book)
(normalize-book {:title "War and Peace" :author "Tolstoy"})
(defmethod normalize-book :alternative-map [book]
{:title (:book book) :author (:by book)})
(normalize-book {:book "Emma" :by "Austen"})
;; A new colection of books with a new keyword :genre,
;; which should be processed somehow by the last multimethod
(def books [{:title "Pride and Prejudice" :author "Austen" :genre :romance}
{:title "World War Z" :author "Brooks" :genre :zombie}])
;; In case another keyword is present (:genre in this case),
;; a separate multimethod can be created independently, as follows
(defmulti book-description :genre)
(defmethod book-description :romance [book]
(str "The heart warming new romance by " (:author book)))
(defmethod book-description :zombie [book]
(str "The heart consuming new zombie adventure by " (:author book)))
(book-description (first books))
(book-description (last books))
;; If there are new genres, a new method can be implemented
;; to handle it, as follows
(def ppz {:title "Pride and Prejudice and Zombies"
:author "Grahame-Smith"
:genre :zombie-romance})
(defmethod book-description :zombie-romance [book]
(str "The heart warming and consuming new romance by " (:author book)))
(book-description ppz)
###Output
_____no_output_____
###Markdown
Recursive functions
###Code
;; Given the following books map
(def books
[{:title "Jaws" :copies-sold 2000000}
{:title "Emma" :copies-sold 3000000}
{:title "2001" :copies-sold 4000000}])
;; To get the sum of the copies sold,
;; a recursive function can be defined as follows
(defn sum-copies
([books]
(sum-copies books 0))
([books total]
(if (empty? books)
total
(sum-copies ; Note the recursion here
(rest books)
(+ total (:copies-sold (first books)))))))
(sum-copies books)
;; The last function will blow the stack with a large collection.
;; To avoid that, the 'recur' function can be used as follows
(defn sum-copies
([books] (sum-copies books 0))
([books total]
(if (empty? books)
total
(recur
(rest books)
(+ total (:copies-sold (first books)))))))
(sum-copies books)
;; The last function can be made even shorter
;; with the 'loop' function, as follows
(defn sum-copies [books]
(loop [books books total 0]
(if (empty? books)
total
(recur (rest books) (+ total (:copies-sold (first books)))))))
(sum-copies books)
###Output
_____no_output_____
###Markdown
Docstrings
###Code
"""
To describe a function's purpose, it's ok to describe it with standard comments
"""
;; Return the average of the two parameters.
(defn average [a b]
(/ (+ a b) 2.0))
(average 4 3)
;; But a more idiomatic way is to use docstrings, as follows
(defn average-2
"Return the average of a and b."
[a b]
(/ (+ a b) 2.0))
(average-2 4 3)
###Output
_____no_output_____
###Markdown
Pre and Post-conditions
###Code
;; In case you want to check some property of the args (a map, in this case)
;; a conditional checking can be written at the start of the function, as follows
(defn publish-book [book]
(when-not (contains? book :title)
(throw (ex-info "Books must contain :title" {:book book})))
(println book))
(publish-book {:title "War and Peace" :author "Tolstoy"}) ; Pass the checking
(publish-book {:author "Tolstoy"}) ; Doesn't pass the checking
;; But Clojure provides a keyword for the last functionality,
;; the :pre (for pre-conditional) keyword
(defn publish-book-2 [book]
{:pre [(:title book)]} ; The pre-conditional should be a vector of expressions
(println book))
(publish-book-2 {:title "War and Peace" :author "Tolstoy"}) ; Pass the pre-condition
(publish-book-2 {:author "Tolstoy"}) ; Doesn't pass the pre-condition
;; A similar functionality but for return values is present
;; in the :post (for post-conditional) keyword
(defn publish-book-3 [book]
{:pre [(:title book) (:author book)]
:post [(boolean? %)]} ; The post-conditional should be a vector of expressions
(map? book))
(publish-book-3 {:title "War and Peace" :author "Tolstoy"})
###Output
_____no_output_____
###Markdown
Issues with functions
###Code
;; Trying to define a n-ary function with overlapping args throws an exception
(defn one-two-or-more
([a] (println "One arg:" a))
; It's not clear which of the 2 below should be called when there are 2 args
([a b] (println "Two args:" a b))
([& more] (println "More than two:" more)))
;; To avoid ambiguities, the last function should be modified as follows
(defn one-two-or-more
([a] (println "One arg:" a))
([a b] (println "Two args:" a b))
([a b & more] (println "More than two:" a b more)))
;; Functions with multiple body expressions should not be confused
;; with multi-arity functions
(defn chatty-average
([a b]
(println "chatty-average function called with 2 arguments")
(println "** first argument:" a)
(println "** second argument:" b)
(/ (+ a b) 2.0)))
(defn chatty-multi-average
([a b]
(println "chatty-average function called with 2 arguments")
(/ (+ a b) 2.0))
([a b c]
(println "chatty-average function called with 3 arguments")
(/ (+ a b c) 3.0)))
(chatty-average 2 3)
(chatty-multi-average 2 3)
###Output
chatty-average function called with 2 arguments
###Markdown
Exploring Clojure and Java interop Calling Java from Clojure Importing Java classes into Clojure
###Code
; The general form of import statements is as follows
"""
(import & import-symbols-or-lists)
"""
; Importing 2 java classes individually
;The quote (') reader macro instructs the runtime not to evaluuate the symbol
(import 'java.util.Date 'java.text.SimpleDateFormat)
; Importing 2 java classes as a sequence
(import '[java.util Date Set])
; Using the :import keyword to import classes in a namespace
(ns com.clojureinaction.book (:import (java.util Set Date)))
(ns user)
###Output
_____no_output_____
###Markdown
Creating instances
###Code
; Using the new keyword to instantiate a class (like in Java)
(import '(java.text SimpleDateFormat))
(def sdf (new SimpleDateFormat "yyyy-MM-dd"))
; Using a trailing dot to instantiate a class
(def sdf (SimpleDateFormat. "yyyy-MM-dd"))
###Output
_____no_output_____
###Markdown
Accessing methods and fields
###Code
; Using a leading dot to access an instance method
(defn date-from-date-string [date-string]
(let [sdf (SimpleDateFormat. "yyyy-MM-dd")]
(.parse sdf date-string)))
; Using a slash to access a static method
(Long/parseLong "12321") ; The syntax is (Classname/staticMethod args*)
; Using a slash to access a static field
(import '(java.util Calendar))
(Calendar/JANUARY)
###Output
_____no_output_____
###Markdown
Macros and the dot special form
###Code
; General form of calling static methods
"""
(. ClassnameSymbol methodSymbol args*)
"""
; Example
(. System getenv "PATH")
; General form of calling instance methods
"""
(. instanceExpr methodSymbol args*)
"""
; Example
(import '(java.util Random))
(def rnd (Random.))
(. rnd nextInt 10)
; General form of calling static and instance fields
"""
(. ClassnameSymbol memberSymbol)
(. instanceExpr memberSymbol)
"""
; Example
(. Calendar DECEMBER)
###Output
_____no_output_____
###Markdown
The Dot-Dot macro Using the '.' macro to chain Java method calls (hard to read)
###Code
(import '(java.util Calendar TimeZone))
(. (. (Calendar/getInstance) (getTimeZone)) (getDisplayName))
###Output
_____no_output_____
###Markdown
Using the '..' macro to chain method calls (easier to read)
###Code
(.. (Calendar/getInstance) (getTimeZone) (getDisplayName))
###Output
_____no_output_____
###Markdown
The Doto macro
###Code
; Applying a method repeteadly to a single Java object (note the code duplication)
(import '(java.util Calendar))
(defn the-past-midnight-1 []
(let [calendar-obj (Calendar/getInstance)]
(.set calendar-obj Calendar/AM_PM Calendar/AM)
(.set calendar-obj Calendar/HOUR 0)
(.set calendar-obj Calendar/MINUTE 0)
(.set calendar-obj Calendar/SECOND 0)
(.set calendar-obj Calendar/MILLISECOND 0)
(.getTime calendar-obj)))
; Using the doto macro to apply a method repeteadly to a single Java object
; (the code duplication was removed)
(defn the-past-midnight-2 []
(let [calendar-obj (Calendar/getInstance)]
(doto calendar-obj
(.set Calendar/AM_PM Calendar/AM)
(.set Calendar/HOUR 0)
(.set Calendar/MINUTE 0)
(.set Calendar/SECOND 0)
(.set Calendar/MILLISECOND 0))
(.getTime calendar-obj)))
###Output
_____no_output_____
###Markdown
The memfn macro
###Code
; Using a Java method as a normal function
(map (fn [x] (.getBytes x)) ["amit" "rob" "kyle"])
; Using a Java method as an anonymous function
(map #(.getBytes %) ["amit" "rob" "kyle"])
; Using the memfn macro instead of the anonymous function
(map (memfn getBytes) ["amit" "rob" "kyle"])
; Calling a Java method without type hints
(.subSequence "Clojure" 2 5)
; Calling a Java method with type hints
((memfn ^String subSequence ^Long start ^Long end) "Clojure" 2 5)
###Output
_____no_output_____
###Markdown
The bean macro
###Code
; Converting Java bean objects to immutable Clojure maps
(import '[java.util Calendar])
(bean (Calendar/getInstance))
###Output
_____no_output_____
###Markdown
Working with Java arrays
###Code
(def tokens (.split "clojure.in.action" "\\."))
###Output
_____no_output_____
###Markdown
Implementing interfaces and extending classes The proxy macro
###Code
; Implementing the MouseAdapter class with proxy
(import 'java.awt.event.MouseAdapter)
(proxy [MouseAdapter] []
(mousePressed [event]
(println "Hey!")))
; The general form of the proxy macro is as follows
"""
(proxy [class-and-interfaces] [args] fs+)
"""
###Output
_____no_output_____
###Markdown
The reify macro
###Code
; Creating an instance of Java’s FileFilter interface
(reify java.io.FileFilter
(accept [this f]
(.isDirectory f)))
###Output
_____no_output_____
###Markdown
Compiling Clojure code to Java bytecode The first dierctory structure of the project```root classes src com curry utils calculators.clj```
###Code
; Contents of the calculators.clj file
"""
(ns com.curry.utils.calculators (:gen-class))
(defn present-value [data]
(println \"calculating present value...\")
"""
; Compiling the defined namespace
(compile 'com.curry.utils.calculators)
###Output
_____no_output_____
###Markdown
The new directory structure of the project```root classes src com curry utils calculators.clj calc dcf.clj fcf.clj```
###Code
; Contents of dcf.clj
"""
(in-ns 'com.curry.utils.calculators)
(defn discounted-cash-flow [data]
(println \"calculating discounted cash flow...\"))
"""
; Contents of fcf.clj
"""
(in-ns 'com.curry.utils.calculators)
(defn free-cash-flow [data]
(println \"calculating free cash flow...\"))
"""
; The new contents of calculators.clj
"""
(ns com.curry.utils.calculators (:gen-class))
(load \"calc/fcf\")
(load \"calc/dcf\")
(defn present-value [data]
(println \"calculating present value...\"))
"""
###Output
_____no_output_____
###Markdown
Creating Java classes and interfaces using gen-class and gen-interface
###Code
; An abstract Java class that will be used to illustrate
; how gen-class works
"""
package com.gentest;
public abstract class AbstractJavaClass {
public AbstractJavaClass(String a, String b) {
System.out.println(\"Constructor: a, b\");
}
public AbstractJavaClass(String a) {
System.out.println(\"Constructor: a\");
}
public abstract String getCurrentStatus();
public String getSecret() {
return \"The Secret\";
}
}
"""
; Using the last java AbstractJavaClass inside clojure code
"""
(ns com.gentest.gen-clojure
(:import (com.gentest AbstractJavaClass))
(:gen-class
:name com.gentest.ConcreteClojureClass
:extends com.gentest.AbstractJavaClass
:constructors {[String] [String]
[String String] [String String]}
:implements [Runnable]
:init initialize
:state localState
:methods [[stateValue [] String]]))
(defn -initialize
([s1]
(println \"Init value:\" s1)
[[s1 \"default\"] (ref s1)])
([s1 s2]
(println \"Init values:\" s1 \",\" s2)
[[s1 s2] (ref s2)]))
(defn -getCurrentStatus [this]
\"getCurrentStatus from - com.gentest.ConcreteClojureClass\")
(defn -stateValue [this]
@(.localState this))
(defn -run [this]
(println \"In run!\")
(println \"I'm a\" (class this))
(dosync (ref-set (.localState this) \"GO\")))
(defn -main []
(let [g (new com.gentest.ConcreteClojureClass \"READY\")]
(println (.getCurrentStatus g))
(println (.getSecret g))
(println (.stateValue g)))
(let [g (new com.gentest.ConcreteClojureClass \"READY\" \"SET\")]
(println (.stateValue g))
(.start (Thread. g))
(Thread/sleep 1000)
(println (.stateValue g))))
"""
; To compile and test the last code, execute the following commands in the REPL
!compile 'com.gentest.gen-clojure
!java com.gentest.ConcreteClojureClass
###Output
_____no_output_____
###Markdown
Leiningen project file for ConcreteClojureClass
###Code
"""
(defproject gentest \"0.1.0\"
:dependencies [[org.clojure/clojure \"1.6.0\"]]
; Place our \"AbstractJavaClass.java\" and \"gen-clojure.clj\" files under
; the src/com/gentest directory.
:source-paths [\"src\"]
:java-source-paths [\"src\"]
; :aot is a list of clojure namespaces to compile.
:aot [com.gentest.gen-clojure]
; This is the java class \"lein run\" should execute.
:main com.gentest.ConcreteClojureClass)
"""
###Output
_____no_output_____
###Markdown
Calling Clojure from Java
###Code
; Clojure function, defined in the clj.script.examples namespace
(ns clj.script.examples)
(defn print-report [user-name]
(println "Report for:" user-name) 10)
; Using the last function in a Java code
"""
import clojure.lang.RT;
import clojure.lang.Var;
public class Driver {
public static void main(String[] args) throws Exception {
RT.loadResourceScript(\"clojure_script.clj\");
Var report = RT.var(\"clj.script.examples\", \"print-report\");
Integer result = (Integer) report.invoke(\"Siva\");
System.out.println(\"Result: \" + result);
}
}
"""
###Output
_____no_output_____ |
module4/s1_classification.ipynb | ###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
list(news)
###Output
_____no_output_____
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui expliquer le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
_____no_output_____
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
import nltk
nltk.download('stopwords')
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
X_train, X_test, y_train, y_test = train_test_split(texts, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ?
###Code
# Le TFIDF a calculé l'IDF de chaque mot du corpus
feature_names = classifier.named_steps['vectorizer'].get_feature_names()
idf_ = classifier.named_steps['vectorizer'].idf_
len(feature_names)
for i in range(1000, 1042):
print(feature_names[i], ':', round(idf_[i], 2))
# Et ensuite il transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names())
# Et le naïf bayésien apprends la corrélation entre chaque mot et chaque catégorie
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
Number of categories: 20
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'))),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(news.data, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.92 0.83 0.87 172
comp.graphics 0.90 0.85 0.87 184
comp.os.ms-windows.misc 0.89 0.81 0.85 204
comp.sys.ibm.pc.hardware 0.75 0.83 0.79 195
comp.sys.mac.hardware 0.94 0.88 0.91 195
comp.windows.x 0.94 0.91 0.92 204
misc.forsale 0.84 0.79 0.82 164
rec.autos 0.88 0.93 0.90 180
rec.motorcycles 0.92 0.98 0.95 173
rec.sport.baseball 0.96 0.94 0.95 217
rec.sport.hockey 0.87 0.98 0.92 178
sci.crypt 0.84 0.99 0.91 197
sci.electronics 0.93 0.87 0.90 199
sci.med 0.95 0.98 0.96 183
sci.space 0.91 0.98 0.94 207
soc.religion.christian 0.71 0.96 0.82 211
talk.politics.guns 0.81 0.97 0.88 208
talk.politics.mideast 0.95 0.96 0.96 200
talk.politics.misc 0.96 0.62 0.76 175
talk.religion.misc 1.00 0.30 0.46 124
accuracy 0.88 3770
macro avg 0.89 0.87 0.87 3770
weighted avg 0.89 0.88 0.87 3770
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
_____no_output_____
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
_____no_output_____
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'))),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(news.data, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
_____no_output_____
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
_____no_output_____
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui expliquer le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
X_train, X_test, y_train, y_test = train_test_split(texts, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ?
###Code
# Le TFIDF a calculé l'IDF de chaque mot du corpus
feature_names = classifier.named_steps['vectorizer'].get_feature_names()
idf_ = classifier.named_steps['vectorizer'].idf_
len(feature_names)
for i in range(1000, 1042):
print(feature_names[i], ':', round(idf_[i], 2))
# Et ensuite il transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names())
# Et le naïf bayésien apprends la corrélation entre chaque mot et chaque catégorie
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
Number of categories: 20
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'))),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(news.data, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.92 0.83 0.87 172
comp.graphics 0.90 0.85 0.87 184
comp.os.ms-windows.misc 0.89 0.81 0.85 204
comp.sys.ibm.pc.hardware 0.75 0.83 0.79 195
comp.sys.mac.hardware 0.94 0.88 0.91 195
comp.windows.x 0.94 0.91 0.92 204
misc.forsale 0.84 0.79 0.82 164
rec.autos 0.88 0.93 0.90 180
rec.motorcycles 0.92 0.98 0.95 173
rec.sport.baseball 0.96 0.94 0.95 217
rec.sport.hockey 0.87 0.98 0.92 178
sci.crypt 0.84 0.99 0.91 197
sci.electronics 0.93 0.87 0.90 199
sci.med 0.95 0.98 0.96 183
sci.space 0.91 0.98 0.94 207
soc.religion.christian 0.71 0.96 0.82 211
talk.politics.guns 0.81 0.97 0.88 208
talk.politics.mideast 0.95 0.96 0.96 200
talk.politics.misc 0.96 0.62 0.76 175
talk.religion.misc 1.00 0.30 0.46 124
accuracy 0.88 3770
macro avg 0.89 0.87 0.87 3770
weighted avg 0.89 0.88 0.87 3770
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
_____no_output_____
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
_____no_output_____
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
_____no_output_____
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
import nltk
nltk.download('stopwords')
###Output
[nltk_data] Downloading package stopwords to
[nltk_data] /Users/julienvanbelle/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
Number of categories: 20
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui expliquer le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
X_train, X_test, y_train, y_test = train_test_split(texts, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ?
###Code
# Le TFIDF a calculé l'IDF de chaque mot du corpus
feature_names = classifier.named_steps['vectorizer'].get_feature_names()
idf_ = classifier.named_steps['vectorizer'].idf_
len(feature_names)
for i in range(1000, 1042):
print(feature_names[i], ':', round(idf_[i], 2))
# Et ensuite il transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names())
# Et le naïf bayésien apprends la corrélation entre chaque mot et chaque catégorie
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.92 0.83 0.87 172
comp.graphics 0.90 0.85 0.87 184
comp.os.ms-windows.misc 0.89 0.81 0.85 204
comp.sys.ibm.pc.hardware 0.75 0.83 0.79 195
comp.sys.mac.hardware 0.94 0.88 0.91 195
comp.windows.x 0.94 0.91 0.92 204
misc.forsale 0.84 0.79 0.82 164
rec.autos 0.88 0.93 0.90 180
rec.motorcycles 0.92 0.98 0.95 173
rec.sport.baseball 0.96 0.94 0.95 217
rec.sport.hockey 0.87 0.98 0.92 178
sci.crypt 0.84 0.99 0.91 197
sci.electronics 0.93 0.87 0.90 199
sci.med 0.95 0.98 0.96 183
sci.space 0.91 0.98 0.94 207
soc.religion.christian 0.71 0.96 0.82 211
talk.politics.guns 0.81 0.97 0.88 208
talk.politics.mideast 0.95 0.96 0.96 200
talk.politics.misc 0.96 0.62 0.76 175
talk.religion.misc 1.00 0.30 0.46 124
accuracy 0.88 3770
macro avg 0.89 0.87 0.87 3770
weighted avg 0.89 0.88 0.87 3770
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
import nltk
nltk.download('stopwords')
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
Number of categories: 20
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.htmlTfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.htmlMultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui expliquer le TFIDF:https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
classifier.named_steps
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
labelled_target
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
texts[0]
X_train, X_test, y_train, y_test = train_test_split(texts, labelled_target, test_size=0.2, random_state=11)
len(X_train)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ?
###Code
# Le TFIDF a calculé l'IDF de chaque mot du corpus
feature_names = classifier.named_steps['vectorizer'].get_feature_names()
idf_ = classifier.named_steps['vectorizer'].idf_
len(feature_names)
# C'était donc une liste de 5143 dimensions. Si on doit les représenter dans un espace vertocirel, on serait dans un espace à 5143 dimensions.
for i in range(1000, 1042):
print(feature_names[i], ':', round(idf_[i], 2))
# Et ensuite il transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names())
# C'est le résultat du corpus. Le code ne sert à rien dans le cadre de cet exercice, juste de voir ce que cela donne.
# Et le naïf bayésien apprends la corrélation entre chaque mot et chaque catégorie
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T
## coef_ c'est la probabilité d'appartenance à une classe étant donné un mot.
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
## Devant un nouveau texte, il va regarder la probabilité de chacune des classes en fonction du vecteur que l'on aura pour chaque mot.
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
len(X_test)
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
# Rapport qui sont les mesures typiques d'évaluation d'un modèle
###Output
_____no_output_____
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
# On retrouve, ici, les catégories réelles et les catégories prédites.
# On peut voir où le modèle se trompe et on peut facilement l'expliquer. On comprend pourquoi le modèle confond christianisme et athéisme.
# C'est donc une manière d'analyser la qualité d'un modèle, d'évaluer sa performance en observant sa matrice de confusion (y a-t-il un pattern de confusion qui apparaît?)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
import nltk
nltk.download('stopwords')
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire (nombre total de mots différents)
len(feature_names)
# Score IDF de chaque terme du vocabulaire (score haut, mot raire)
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
accomplished 6.69
accord 6.69
acknowledge 6.69
alabama 6.69
approval 6.69
atmospheric 6.69
bach 6.69
bills 6.69
boring 6.69
brunswick 6.69
click 6.69
cloud 6.69
communicate 6.69
compatibility 6.69
confuse 6.69
connectors 6.69
copying 6.69
counted 6.69
damned 6.69
definite 6.69
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
Training set size: 15076
Test set size: 3770
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
_____no_output_____
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Chargement du dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
###Output
Number of categories: 20
###Markdown
Exploration du dataset
###Code
labels = news.target_names
pprint(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
===== rec.sport.hockey =====
From: Mamatha Devineni Ratnam <[email protected]> Subject: Pens fans reactions Organization: Post Office, Carnegie Mellon, Pittsburgh, PA Lines: 12 NNTP-Posting-Host: po4.andrew.cmu.edu I am sure some bashers of Pens fans are pretty confused about the lack of any kind of posts about the recent Pens massacre of the Devils. Actually, I am bit puzzled too and a bit relieved. However, I am going to put an end to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they are killing those Devils worse than I thought. Jagr just showed you why he is much better than his regular season stats. He is also a lot fo fun to watch in the playoffs. Bowman should let JAgr have a lot of fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final regular season game. PENS RULE!!!
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Matthew B Lawson) Subject: Which high-performance VLB video card? Summary: Seek recommendations for VLB video card Nntp-Posting-Host: midway.ecn.uoknor.edu Organization: Engineering Computer Network, University of Oklahoma, Norman, OK, USA Keywords: orchid, stealth, vlb Lines: 21 My brother is in the market for a high-performance video card that supports VESA local bus with 1-2MB RAM. Does anyone have suggestions/ideas on: - Diamond Stealth Pro Local Bus - Orchid Farenheit 1280 - ATI Graphics Ultra Pro - Any other high-performance VLB card Please post or email. Thank you! - Matt -- | Matthew B. Lawson <------------> ([email protected]) | --+-- "Now I, Nebuchadnezzar, praise and exalt and glorify the King --+-- | of heaven, because everything he does is right and all his ways | | are just." - Nebuchadnezzar, king of Babylon, 562 B.C. |
===== talk.politics.mideast =====
From: [email protected] (Hilmi Eren) Subject: Re: ARMENIA SAYS IT COULD SHOOT DOWN TURKISH PLANES (Henrik) Lines: 95 Nntp-Posting-Host: viktoria.dsv.su.se Reply-To: [email protected] (Hilmi Eren) Organization: Dept. of Computer and Systems Sciences, Stockholm University |>The student of "regional killings" alias Davidian (not the Davidian religios sect) writes: |>Greater Armenia would stretch from Karabakh, to the Black Sea, to the |>Mediterranean, so if you use the term "Greater Armenia" use it with care. Finally you said what you dream about. Mediterranean???? That was new.... The area will be "greater" after some years, like your "holocaust" numbers...... |>It has always been up to the Azeris to end their announced winning of Karabakh |>by removing the Armenians! When the president of Azerbaijan, Elchibey, came to |>power last year, he announced he would be be "swimming in Lake Sevan [in |>Armeniaxn] by July". ***** Is't July in USA now????? Here in Sweden it's April and still cold. Or have you changed your calendar??? |>Well, he was wrong! If Elchibey is going to shell the |>Armenians of Karabakh from Aghdam, his people will pay the price! If Elchibey **************** |>is going to shell Karabakh from Fizuli his people will pay the price! If ****************** |>Elchibey thinks he can get away with bombing Armenia from the hills of |>Kelbajar, his people will pay the price. *************** NOTHING OF THE MENTIONED IS TRUE, BUT LET SAY IT's TRUE. SHALL THE AZERI WOMEN AND CHILDREN GOING TO PAY THE PRICE WITH ************** BEING RAPED, KILLED AND TORTURED BY THE ARMENIANS?????????? HAVE YOU HEARDED SOMETHING CALLED: "GENEVA CONVENTION"??????? YOU FACIST!!!!! Ohhh i forgot, this is how Armenians fight, nobody has forgot you killings, rapings and torture against the Kurds and Turks once upon a time! |>And anyway, this "60 |>Kurd refugee" story, as have other stories, are simple fabrications sourced in |>Baku, modified in Ankara. Other examples of this are Armenia has no border |>with Iran, and the ridiculous story of the "intercepting" of Armenian military |>conversations as appeared in the New York Times supposedly translated by |>somebody unknown, from Armenian into Azeri Turkish, submitted by an unnamed |>"special correspondent" to the NY Times from Baku. Real accurate! Ohhhh so swedish RedCross workers do lie they too? What ever you say "regional killer", if you don't like the person then shoot him that's your policy.....l |>[HE] Search Turkish planes? You don't know what you are talking about.<------- |>[HE] since it's content is announced to be weapons? i i |>Well, big mouth Ozal said military weapons are being provided to Azerbaijan i |>from Turkey, yet Demirel and others say no. No wonder you are so confused! i i i Confused????? i You facist when you delete text don't change it, i wrote: i i Search Turkish planes? You don't know what you are talking about. i Turkey's government has announced that it's giving weapons <-----------i to Azerbadjan since Armenia started to attack Azerbadjan it self, not the Karabag province. So why search a plane for weapons since it's content is announced to be weapons? If there is one that's confused then that's you! We have the right (and we do) to give weapons to the Azeris, since Armenians started the fight in Azerbadjan! |>You are correct, all Turkish planes should be simply shot down! Nice, slow |>moving air transports! Shoot down with what? Armenian bread and butter? Or the arms and personel of the Russian army? Hilmi Eren Stockholm University
===== comp.sys.ibm.pc.hardware =====
From: [email protected] (Guy Dawson) Subject: Re: IDE vs SCSI, DMA and detach Originator: [email protected] Organization: IBM Austin Lines: 60 In article <[email protected]>, [email protected] (Wayne Smith) writes: > In article <[email protected]> [email protected] (Richard Krehbiel) writes: > >> Can anyone explain in fairly simple terms why, if I get OS/2, I might > >> need an SCSI controler rather than an IDE. Will performance suffer that > >> much? For a 200MB or so drive? If I don't have a tape drive or CD-ROM? > >> Any help would be appreciated. > > >So, when you've got multi-tasking, you want to increase performance by > >increasing the amount of overlapping you do. > > > >One way is with DMA or bus mastering. Either of these make it > >possible for I/O devices to move their data into and out of memory > >without interrupting the CPU. The alternative is for the CPU to move > >the data. There are several SCSI interface cards that allow DMA and > >bus mastering. > ^^^^^^^^^^^^ > How do you do bus-mastering on the ISA bus? > > >IDE, however, is defined by the standard AT interface > >created for the IBM PC AT, which requires the CPU to move all the data > >bytes, with no DMA. > > If we're talking ISA (AT) bus here, then you can only have 1 DMA channel > active at any one time, presumably transferring data from a single device. > So even though you can have at least 7 devices on a SCSI bus, explain how > all 7 of those devices can to DMA transfers through a single SCSI card > to the ISA-AT bus at the same time. Think! It's the SCSI card doing the DMA transfers NOT the disks... The SCSI card can do DMA transfers containing data from any of the SCSI devices it is attached when it wants to. An important feature of SCSI is the ability to detach a device. This frees the SCSI bus for other devices. This is typically used in a multi-tasking OS to start transfers on several devices. While each device is seeking the data the bus is free for other commands and data transfers. When the devices are ready to transfer the data they can aquire the bus and send the data. On an IDE bus when you start a transfer the bus is busy until the disk has seeked the data and transfered it. This is typically a 10-20ms second lock out for other processes wanting the bus irrespective of transfer time. > > Also, I'm still trying to track down a copy of IBM's AT reference book, > but from their PC technical manual (page 2-93): > > "The (FDD) adapter is buffered on the I.O bus and uses the System Board > direct memory access (DMA) for record data transfers." > I expect to see something similar for the PC-AT HDD adapter. > So the lowly low-density original PC FDD card used DMA and the PC-AT > HDD controller doesn't!?!? That makes real sense. -- -- ----------------------------------------------------------------------------- Guy Dawson - Hoskyns Group Plc. [email protected] Tel Hoskyns UK - 71 251 2128 [email protected] Tel IBM Austin USA - 512 838 3377
===== comp.sys.mac.hardware =====
From: Alexander Samuel McDiarmid <[email protected]> Subject: driver ?? Organization: Sophomore, Mechanical Engineering, Carnegie Mellon, Pittsburgh, PA Lines: 15 NNTP-Posting-Host: po4.andrew.cmu.edu 1) I have an old Jasmine drive which I cannot use with my new system. My understanding is that I have to upsate the driver with a more modern one in order to gain compatability with system 7.0.1. does anyone know of an inexpensive program to do this? ( I have seen formatters for <$20 buit have no idea if they will work) 2) I have another ancient device, this one a tape drive for which the back utility freezes the system if I try to use it. THe drive is a jasmine direct tape (bought used for $150 w/ 6 tapes, techmar mechanism). Essentially I have the same question as above, anyone know of an inexpensive beckup utility I can use with system 7.0.1 all help and advice appriciated.
===== sci.electronics =====
From: [email protected] (Stephen Tell) Subject: Re: subliminal message flashing on TV Organization: The University of North Carolina at Chapel Hill Lines: 25 NNTP-Posting-Host: rukbat.cs.unc.edu In article <[email protected]> [email protected] (Bob Myers) writes: >> Hi. I was doing research on subliminal suggestion for a psychology >> paper, and I read that one researcher flashed hidden messages on the >> TV screen at 1/200ths of a second. Is that possible? > Might >even be a vector ("strokewriter") display, in which case the lower limit >on image time is anyone's guess (and is probably phosphor-persistence limited). Back in high school I worked as a lab assistant for a bunch of experimental psychologists at Bell Labs. When they were doing visual perception and memory experiments, they used vector-type displays, with 1-millisecond refresh rates common. So your case of 1/200th sec is quite practical, and the experimenters were probably sure that it was 5 milliseconds, not 4 or 6 either. >Bob Myers KC0EW >[email protected] Steve -- Steve Tell [email protected] H: 919 968 1792 | #5L Estes Park apts UNC Chapel Hill Computer Science W: 919 962 1845 | Carrboro NC 27510 Engineering is a _lot_ like art: Some circuits are like lyric poems, some are like army manuals, and some are like The Hitchhiker's Guide to the Galaxy..
===== comp.sys.mac.hardware =====
From: [email protected] (Louis Paul Adams) Subject: Re: Number for Applied Engineering Organization: Texas A&M University, College Station Lines: 9 NNTP-Posting-Host: tamuts.tamu.edu >Anyone have a phone number for Applied Engineering so I can give them >a call? AE is in Dallas...try 214/241-6060 or 214/241-0055. Tech support may be on their own line, but one of these should get you started. Good luck!
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Atlanta Hockey Hell!! Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 24 In article <[email protected]> Mamatha Devineni Ratnam <[email protected]> writes: > >Well, it's not that bad. But I am still pretty pissed of at the >local ABC coverage. They cut off the first half hour of coverage by playing [stuff deleted] Ok, here's the solution to your problem. Move to Canada. Yesterday I was able to watch FOUR games...the NJ-PITT at 1:00 on ABC, LA-CAL at 3:00 (CBC), BUFF-BOS at 7:00 (TSN and FOX), and MON-QUE at 7:30 (CBC). I think that if each series goes its max I could be watching hockey playoffs for 40-some odd consecutive nights (I haven't counted so that's a pure guess). I have two tv's in my house, and I set them up side-by-side to watch MON-QUE and keep an eye on BOS-BUFF at the same time. I did the same for the two afternoon games. Btw, those ABC commentaters were great! I was quite impressed; they seemed to know that their audience wasn't likely to be well-schooled in hockey lore and they did an excellent job. They were quite impartial also, IMO. [email protected] (not suffering from a shortage of hockey here)
===== rec.sport.hockey =====
From: [email protected] (Deepak Chhabra) Subject: Re: Goalie masks Nntp-Posting-Host: stpl.ists.ca Organization: Solar Terresterial Physics Laboratory, ISTS Lines: 15 In article <[email protected]> [email protected] (Valerie S. Hammerl) writes: >>[...] and I'll give Fuhr's new one an honourable mention, although I haven't >>seen it closely yet (it looked good from a distance!). >This is the new Buffalo one, the second since he's been with the >Sabres? I recall a price tag of over $700 just for the paint job on >that mask, and a total price of almost $1500. Ouch. Yeah, it's the second one. And I believe that price too. I've been trying to get a good look at it on the Bruin-Sabre telecasts, and wow! does it ever look good. Whoever did that paint job knew what they were doing. And given Fuhr's play since he got it, I bet the Bruins are wishing he didn't have it:) --
===== talk.religion.misc =====
From: [email protected] (Ken Arromdee) Subject: Re: Christians above the Law? was Clarification of pe Organization: Johns Hopkins University CS Dept. Lines: 13 In article <[email protected]> [email protected] (Darius_Lecointe) writes: >>Jesus was a JEW, not a Christian. If a Christian means someone who believes in the divinity of Jesus, it is safe to say that Jesus was a Christian. -- "On the first day after Christmas my truelove served to me... Leftover Turkey! On the second day after Christmas my truelove served to me... Turkey Casserole that she made from Leftover Turkey. [days 3-4 deleted] ... Flaming Turkey Wings! ... -- Pizza Hut commercial (and M*tlu/A*gic bait) Ken Arromdee ([email protected])
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui expliquer le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18
###Code
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
###Output
_____no_output_____
###Markdown
Séparation du dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
labelled_target = np.array([labels[t] for t in news.target])
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
X_train, X_test, y_train, y_test = train_test_split(texts, labelled_target, test_size=0.2, random_state=11)
###Output
_____no_output_____
###Markdown
Entraînement du modèle de machine learning sur les données d'entrainement
###Code
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ?
###Code
# Le TFIDF a calculé l'IDF de chaque mot du corpus
feature_names = classifier.named_steps['vectorizer'].get_feature_names()
idf_ = classifier.named_steps['vectorizer'].idf_
len(feature_names)
for i in range(1000, 1042):
print(feature_names[i], ':', round(idf_[i], 2))
# Et ensuite il transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names())
# Et le naïf bayésien apprends la corrélation entre chaque mot et chaque catégorie
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T
pd.DataFrame(classifier.named_steps['classifier'].coef_, index=labels, columns=feature_names).T.sort_values(by='alt.atheism', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédiction des targets des données de test
###Code
y_pred = classifier.predict(X_test)
# Aperçu des targets prédites
y_pred
# Aperçu des targets réelles
y_test
###Output
_____no_output_____
###Markdown
Construction du rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
alt.atheism 0.90 0.80 0.84 172
comp.graphics 0.72 0.77 0.75 184
comp.os.ms-windows.misc 0.81 0.79 0.80 204
comp.sys.ibm.pc.hardware 0.71 0.76 0.74 195
comp.sys.mac.hardware 0.87 0.82 0.84 195
comp.windows.x 0.84 0.87 0.86 204
misc.forsale 0.77 0.79 0.78 164
rec.autos 0.84 0.94 0.89 180
rec.motorcycles 0.88 0.94 0.91 173
rec.sport.baseball 0.94 0.90 0.92 217
rec.sport.hockey 0.86 0.98 0.91 178
sci.crypt 0.93 0.95 0.94 197
sci.electronics 0.83 0.78 0.81 199
sci.med 0.92 0.92 0.92 183
sci.space 0.91 0.93 0.92 207
soc.religion.christian 0.77 0.94 0.85 211
talk.politics.guns 0.81 0.91 0.86 208
talk.politics.mideast 0.93 0.93 0.93 200
talk.politics.misc 0.89 0.66 0.76 175
talk.religion.misc 0.88 0.34 0.49 124
accuracy 0.85 3770
macro avg 0.85 0.84 0.84 3770
weighted avg 0.85 0.85 0.84 3770
###Markdown
Création d'une matrice de confusion
###Code
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____
###Markdown
Classification de documents Imports
###Code
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import seaborn as sn
from pprint import pprint
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
from scikitplot.metrics import plot_confusion_matrix
import pandas as pd
import re
import operator
import nltk
nltk.download('stopwords')
###Output
_____no_output_____
###Markdown
Charger le dataset 20 newsgroupsPour plus d'information : https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
###Code
news = fetch_20newsgroups(subset='all')
print("Number of articles: " + str(len(news.data)))
print("Number of categories: " + str(len(news.target_names)))
labels = news.target_names
print(labels)
# Exemples d'articles et de labels
for i, article in enumerate(news.data[:10]):
print(f'===== {labels[news.target[i]]} =====')
print(article.replace('\n', ' '), '\n')
###Output
_____no_output_____
###Markdown
Création d'un modèle de machine learning avec Scikit-LearnPour plus d'information :- Pipeline : https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html- TfidfVectorizer : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html- MultinomialNB : https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.htmlUn article de blog qui explique le TFIDF:- https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3Un article de blog qui explique les naive bayes:- https://towardsdatascience.com/naive-bayes-classifier-explained-54593abe6e18 Séparer le dataset en features et target (X, y) et en train et testPlus d'information : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
###Code
# Nettoyage des textes
texts = [re.sub('[^a-z]+', ' ', t.lower()).strip() for t in news.data]
# Mapping des targets
targets = np.array([labels[t] for t in news.target])
X_train, X_test, y_train, y_test = train_test_split(texts, targets, test_size=0.2, random_state=11)
print("Training set size:", len(X_train))
print("Test set size:", len(X_test))
###Output
_____no_output_____
###Markdown
Entrainer un modèle de machine learning sur les données d'entrainement
###Code
# Définition du type de modèle
classifier = Pipeline([
('vectorizer', TfidfVectorizer(stop_words=stopwords.words('english'), min_df=50, max_df=0.5)),
('classifier', MultinomialNB()),
])
# Entrainement du modèle
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Qu'est ce qu'il s'est passé ? Le TFIDF calcule le score IDF de chaque mot du corpus
###Code
feature_names = classifier.named_steps['vectorizer'].get_feature_names_out()
idf_scores = classifier.named_steps['vectorizer'].idf_
# Taille du vocabulaire
len(feature_names)
# Score IDF de chaque terme du vocabulaire
for i in range(0, 10):
print(feature_names[i], ':', round(idf_scores[i], 2))
# Les 10 mots avec le score IDF le plus haut
for word, score in sorted(zip(feature_names, idf_scores), key=operator.itemgetter(1), reverse=True)[:20]:
print(word, round(score, 2))
###Output
_____no_output_____
###Markdown
Le TF-IDF transforme chaque document en vecteur de la taille du vocabulaire et donc le score est le TFIDF (fréquence du terme dans le document * idf)
###Code
tmp = classifier.named_steps['vectorizer'].transform(X_train[:10])
pd.DataFrame(tmp.toarray(), columns=classifier.named_steps['vectorizer'].get_feature_names_out())
###Output
_____no_output_____
###Markdown
Le modèle naïf bayésien apprend la corrélation entre chaque mot et chaque catégorie
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T
###Output
_____no_output_____
###Markdown
On peut ainsi découvrir les termes les plus contributifs pour un label donné
###Code
pd.DataFrame(classifier.named_steps['classifier'].feature_log_prob_, index=labels, columns=feature_names).T.sort_values(by='comp.graphics', ascending=False).head(20)
###Output
_____no_output_____
###Markdown
Prédire les targets des données de test à l'aide du modèle entrainé
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Aperçu des targets prédites
###Code
y_pred[:20]
###Output
_____no_output_____
###Markdown
Aperçu des targets réelles
###Code
y_test[:20]
###Output
_____no_output_____
###Markdown
Evaluer le modèle Générer un rapport de classificationPour plus d'information sur la précision, le recall et le f1-score : https://fr.wikipedia.org/wiki/Pr%C3%A9cision_et_rappel
###Code
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Générer une matrice de confusion
###Code
plot_confusion_matrix(y_test, y_pred, figsize=(10, 10), labels=labels, x_tick_rotation=90)
###Output
_____no_output_____ |
misc/crop_and_save_all_data.ipynb | ###Markdown
Purpose:Run this notebook to crop data and save to:~/MICCAI_BraTS_2019_Data_Training/MICCAI_BraTS_2019_Data_Training/cropped_hgg
###Code
import utils.hgg_utils as hu
import nibabel as nib
from tqdm.notebook import tqdm
"""
LAYERS_TO_CROP refers to the number of outer layers of pixels to be cropped from the image.
For example if layers_to_crop is set to 2:
Input (6x6): Output (2x2, - denotes cropped pixel):
123456 ------
123456 ------
123456 --34--
123456 --34--
123456 ------
123456 ------
"""
LAYERS_TO_CROP = 16
###Output
_____no_output_____
###Markdown
Function to save the cropped data:*** Code adapted from Lucas' code
###Code
def save_cropped_data(tensor, affines_list, mod_paths, destination):
patient_paths = [x.parent.stem for x in mod_paths]
mods = [x.name for x in mod_paths]
for modality in range(tensor.shape[-1]):
new_file_name = "cropped_" + str(mods[modality])
new_patient_folder = destination.joinpath(patient_paths[modality])
if not new_patient_folder.exists():
new_patient_folder.mkdir()
new_dest = new_patient_folder.joinpath(new_file_name)
a = nib.Nifti1Image(tensor[:, :, :, modality], affine=affines_list[modality])
nib.save(a, new_dest)
###Output
_____no_output_____
###Markdown
Function to crop the patient tensor
###Code
def crop_patient_tensor(tensor):
return tensor[LAYERS_TO_CROP : -LAYERS_TO_CROP, LAYERS_TO_CROP : -LAYERS_TO_CROP, :, :]
###Output
_____no_output_____
###Markdown
Crop and save the patient data
###Code
# Define name of folder to save data to
cropped_hgg_directory = hu.get_hgg_paths().parent.joinpath('cropped_hgg')
# Get paths to all patient folders
all_patient_paths = hu.get_each_hgg_folder()
# Print path to directory where data will be saved
print("Cropped slices will be saved in directory: ")
print(cropped_hgg_directory)
# Check to see if directory folder already exists
# before creating one.
if not cropped_hgg_directory.exists():
cropped_hgg_directory.mkdir()
# Iterate through each patient
# Load patient tensor
# Crop tensor
# Save tensor
for patient in tqdm(all_patient_paths):
X, affines, paths = hu.get_a_multimodal_tensor(patient)
cropped_tensor = crop_patient_tensor(X)
save_cropped_data(cropped_tensor, affines, paths, cropped_hgg_directory)
###Output
_____no_output_____ |
titanic_data_operation/05_accuracy_logistic_regression.ipynb | ###Markdown
Titanic survival - logistic regression model
###Code
#Load modules
import numpy as np
import pandas as pd
# Import machine learning methods
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
data = pd.read_csv('C:/t_data/processed_data.csv')
# Make all data 'float' type
data = data.astype(float)
#Divide into X (features) and y (labels)
X = data.drop('Survived',axis=1) # X = all 'data' except the 'survived' column
y = data['Survived'] # y = 'survived' column from 'data'
#Divide into training and tets sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)
###Output
_____no_output_____
###Markdown
Standardise data
###Code
def standardise_data(X_train, X_test):
# Initialise a new scaling object for normalising input data
sc = StandardScaler()
# Set up the scaler just on the training set
sc.fit(X_train)
# Apply the scaler to the training and test sets
train_std=sc.transform(X_train)
test_std=sc.transform(X_test)
return train_std, test_std
X_train_std, X_test_std = standardise_data(X_train, X_test)
#Fit logistic regression model
model = LogisticRegression(solver='lbfgs')
model.fit(X_train_std,y_train)
# Predict training and test set labels
y_pred_train = model.predict(X_train_std)
y_pred_test = model.predict(X_test_std)
#Calculate accuracy
def calculate_accuracy(observed, predicted):
"""
Calculates a range of accuracy scores from observed and predicted classes.
Takes two list or NumPy arrays (observed class values, and predicted class
values), and returns a dictionary of results.
1) observed positive rate: proportion of observed cases that are +ve
2) Predicted positive rate: proportion of predicted cases that are +ve
3) observed negative rate: proportion of observed cases that are -ve
4) Predicted negative rate: proportion of predicted cases that are -ve
5) accuracy: proportion of predicted results that are correct
6) precision: proportion of predicted +ve that are correct
7) recall: proportion of true +ve correctly identified
8) f1: harmonic mean of precision and recall
9) sensitivity: Same as recall
10) specificity: Proportion of true -ve identified:
11) positive likelihood: increased probability of true +ve if test +ve
12) negative likelihood: reduced probability of true +ve if test -ve
13) false positive rate: proportion of false +ves in true -ve patients
14) false negative rate: proportion of false -ves in true +ve patients
15) true positive rate: Same as recall
16) true negative rate
17) positive predictive value: chance of true +ve if test +ve
18) negative predictive value: chance of true -ve if test -ve
"""
# Converts list to NumPy arrays
if type(observed) == list:
observed = np.array(observed)
if type(predicted) == list:
predicted = np.array(predicted)
# Calculate accuracy scores
observed_positives = observed == 1
observed_negatives = observed == 0
predicted_positives = predicted == 1
predicted_negatives = predicted == 0
true_positives = (predicted_positives == 1) & (observed_positives == 1)
false_positives = (predicted_positives == 1) & (observed_positives == 0)
true_negatives = (predicted_negatives == 1) & (observed_negatives == 1)
accuracy = np.mean(predicted == observed)
precision = (np.sum(true_positives) /
(np.sum(true_positives) + np.sum(false_positives)))
recall = np.sum(true_positives) / np.sum(observed_positives)
sensitivity = recall
f1 = 2 * ((precision * recall) / (precision + recall))
specificity = np.sum(true_negatives) / np.sum(observed_negatives)
positive_likelihood = sensitivity / (1 - specificity)
negative_likelihood = (1 - sensitivity) / specificity
false_positive_rate = 1 - specificity
false_negative_rate = 1 - sensitivity
true_positive_rate = sensitivity
true_negative_rate = specificity
positive_predictive_value = (np.sum(true_positives) /
np.sum(observed_positives))
negative_predictive_value = (np.sum(true_negatives) /
np.sum(observed_positives))
# Create dictionary for results, and add results
results = dict()
results['observed_positive_rate'] = np.mean(observed_positives)
results['observed_negative_rate'] = np.mean(observed_negatives)
results['predicted_positive_rate'] = np.mean(predicted_positives)
results['predicted_negative_rate'] = np.mean(predicted_negatives)
results['accuracy'] = accuracy
results['precision'] = precision
results['recall'] = recall
results['f1'] = f1
results['sensitivity'] = sensitivity
results['specificity'] = specificity
results['positive_likelihood'] = positive_likelihood
results['negative_likelihood'] = negative_likelihood
results['false_positive_rate'] = false_positive_rate
results['false_negative_rate'] = false_negative_rate
results['true_positive_rate'] = true_positive_rate
results['true_negative_rate'] = true_negative_rate
results['positive_predictive_value'] = positive_predictive_value
results['negative_predictive_value'] = negative_predictive_value
return results
# Call calculate_accuracy function
accuracy = calculate_accuracy(y_test, y_pred_test)
# Print results up to three decimal places
for key, value in accuracy.items():
print (key, "{0:0.3}".format(value))
###Output
observed_positive_rate 0.404
observed_negative_rate 0.596
predicted_positive_rate 0.386
predicted_negative_rate 0.614
accuracy 0.776
precision 0.733
recall 0.7
f1 0.716
sensitivity 0.7
specificity 0.827
positive_likelihood 4.05
negative_likelihood 0.363
false_positive_rate 0.173
false_negative_rate 0.3
true_positive_rate 0.7
true_negative_rate 0.827
positive_predictive_value 0.7
negative_predictive_value 1.22
|
06_Building Multilayer Perceptron Models with Keras/Multilayer_Perceptron_Model_with_Keras.ipynb | ###Markdown
Multilayer Perceptron Models with Keras Task 1: Project Overview and Import Modules
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
import tensorflow as tf
from tensorflow.keras.datasets import reuters
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.preprocessing.text import Tokenizer
print('Tensorflow version:', tf.__version__)
###Output
Tensorflow version: 2.2.0
###Markdown
Task 2: Load the Reuters Dataset
###Code
(X_train, y_train), (X_test, y_test) = reuters.load_data(num_words=10000, test_split=0.2)
print(len(X_train), 'training examples')
print(len(X_test), 'test examples')
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
###Output
46 classes
###Markdown
Task 3: Vectorize Sequence Data and One-hot Encode Class Labels
###Code
tokenizer = Tokenizer(num_words=10000)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
X_train.shape, X_test.shape
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Task 4: Build Multilayer Perceptron Model
###Code
model = Sequential([
Dense(512, input_shape=(10000,)),
Activation('relu'),
Dropout(0.5),
Dense(num_classes),
Activation('softmax')
])
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 512) 5120512
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dropout (Dropout) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 46) 23598
_________________________________________________________________
activation_1 (Activation) (None, 46) 0
=================================================================
Total params: 5,144,110
Trainable params: 5,144,110
Non-trainable params: 0
_________________________________________________________________
###Markdown
Task 5: Train Model
###Code
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='min')
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.1, callbacks=[es])
###Output
Epoch 1/100
253/253 [==============================] - 8s 32ms/step - loss: 1.2987 - accuracy: 0.7192 - val_loss: 0.9635 - val_accuracy: 0.7842
Epoch 2/100
253/253 [==============================] - 8s 31ms/step - loss: 0.5016 - accuracy: 0.8852 - val_loss: 0.8634 - val_accuracy: 0.8098
Epoch 3/100
253/253 [==============================] - 8s 31ms/step - loss: 0.2902 - accuracy: 0.9342 - val_loss: 0.9078 - val_accuracy: 0.8098
Epoch 4/100
253/253 [==============================] - 8s 31ms/step - loss: 0.2176 - accuracy: 0.9487 - val_loss: 0.9675 - val_accuracy: 0.7920
Epoch 5/100
253/253 [==============================] - 8s 31ms/step - loss: 0.1963 - accuracy: 0.9540 - val_loss: 0.9559 - val_accuracy: 0.8131
Epoch 00005: early stopping
###Markdown
Task 6: Evaluate Model on Test Data
###Code
model.evaluate(X_test, y_test, batch_size=32, verbose=1)
# returns loss and accuracy
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____ |
4.Stacking.ipynb | ###Markdown
ProbSpace: YouTube動画視聴回数予測
###Code
out_dir = "out_tmp"
import pandas as pd
import numpy as np
import scipy
import itertools
import os, datetime, gc, glob, re, random
import time, datetime
import pickle
from tqdm.notebook import tqdm
from imblearn.over_sampling import SMOTE
import optuna
import bhtsne, umap
from janome.tokenizer import Tokenizer
from janome.analyzer import Analyzer
from janome.tokenfilter import *
from janome.charfilter import UnicodeNormalizeCharFilter, RegexReplaceCharFilter
import unicodedata
import lightgbm as lgb
import xgboost as xgb
from catboost import Pool, CatBoostRegressor, CatBoostClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.linear_model import LinearRegression, BayesianRidge, ElasticNet, Lasso, LogisticRegression, Ridge, SGDRegressor
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor
from sklearn.ensemble import StackingRegressor, VotingRegressor
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor, RandomForestRegressor
from sklearn.svm import LinearSVR
from ngboost import NGBRegressor
from ngboost.ngboost import NGBoost
from ngboost.learners import default_tree_learner
from ngboost.scores import MLE, CRPS, LogScore
from ngboost.distns import Normal, LogNormal
from sklearn.linear_model import BayesianRidge, ElasticNet, Lasso, LogisticRegression, Ridge, SGDRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.cluster import KMeans, MiniBatchKMeans, DBSCAN
from sklearn.model_selection import KFold, RepeatedKFold, StratifiedKFold, cross_validate, cross_val_predict, train_test_split
from sklearn.metrics import mean_squared_error, roc_auc_score, log_loss
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler, Normalizer, RobustScaler, QuantileTransformer, PowerTransformer
from sklearn.feature_selection import SelectFromModel, RFE, SelectPercentile, SelectKBest
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.initializers import he_normal, he_uniform, GlorotNormal, GlorotUniform
from tensorflow.keras.optimizers import Adadelta, Adagrad, Adam, Adamax, Ftrl, Nadam, RMSprop, SGD
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping, TensorBoard, LambdaCallback, ReduceLROnPlateau
from tensorflow.keras.metrics import MeanSquaredError, RootMeanSquaredError
from tensorflow.keras import layers
from tensorflow.keras.layers import Concatenate, Lambda
from tensorflow.keras.layers import Activation, Average, Dense, Dropout, Flatten, BatchNormalization, LeakyReLU, Input
from tensorflow.keras.layers import GaussianDropout, GaussianNoise
from tensorflow.keras.layers import Conv2D, SeparableConv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 200)
pd.set_option('display.max_columns', 100)
start = datetime.datetime.now()
# Function for variable description
def description(df):
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary["Name"] = summary['index']
summary = summary[["Name",'dtypes']]
summary["Missing"] = df.isnull().sum().values
summary["Uniques"] = df.nunique().values
summary["Mean"] = np.nanmean(df, axis=0).astype(df.dtypes)
summary["Std"] = np.nanstd(df, axis=0).astype(df.dtypes)
summary["Minimum"] = np.nanmin(df, axis=0).astype(df.dtypes)
summary["Maximum"] = np.nanmax(df, axis=0).astype(df.dtypes)
summary["First Value"] = df.iloc[0].values
summary["Second Value"] = df.iloc[1].values
summary["Third Value"] = df.iloc[2].values
summary["dimension"] = str(df.shape)
return summary
def get_hist(target):
plt.hist(target, bins=100)
print("max: {:>10,.6f}".format(target.max()))
print("min: {:>10,.6f}".format(target.min()))
print("mean: {:>10,.6f}".format(target.mean()))
print("std: {:>10,.6f}".format(target.std()))
return
def get_hist4(target1, title1, target2, title2, target3, title3, target4, title4):
fig = plt.figure(figsize=(18, 18))
ax1 = fig.add_subplot(5,1,1)
ax2 = fig.add_subplot(5,1,2)
ax3 = fig.add_subplot(5,1,3)
ax4 = fig.add_subplot(5,1,4)
ax5 = fig.add_subplot(5,1,5)
ax1.set_title(title1)
ax2.set_title(title2)
ax3.set_title(title3)
ax4.set_title(title4)
ax5.set_title("OVERALL")
ax1.hist(target1, bins=100)
ax2.hist(target2, bins=100)
ax3.hist(target3, bins=100)
ax4.hist(target4, bins=100)
ax5.hist(target1, bins=100, alpha=0.2, color='red')
ax5.hist(target2, bins=100, alpha=0.2, color='green')
ax5.hist(target3, bins=100, alpha=0.2, color='blue')
#ax5.hist(target4, bins=100, alpha=0.2, color='grey')
fig.show()
return
###Output
_____no_output_____
###Markdown
Load Data
###Code
%%time
# for train/test data
train_data = pd.read_csv("./input/train_data.csv")
test_data = pd.read_csv("./input/test_data.csv")
y = np.log1p(train_data['y']).copy()
y_bin = pd.cut(train_data['y'], [0, 10, 100,1000,10000,100000,1000000,10000000000], labels=[1,2,3,4,5,6,7])
y_bin = y_bin.astype(int)
test_id = test_data.id
train = train_data.drop(['id', 'y'], axis=1).copy()
test = test_data.drop(['id'], axis=1).copy()
###Output
_____no_output_____
###Markdown
目的変数の分布
###Code
get_hist(y)
###Output
_____no_output_____
###Markdown
seedの固定化
###Code
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
DIFF_THRESHOLD = 5
################################################################################
# RESULTS
################################################################################
def output_results(target, results, test_id, MODEL):
RMSLE = mean_squared_error(target.values, results['train'], squared=False)
print(f"Overall RMSLE={RMSLE}")
# Make submission
print("Saving submission file")
submission = pd.DataFrame({'id': test_id, 'y': np.expm1(results['test'])})
submission.to_csv(f"./{out_dir}/submission_{MODEL}_CV{RMSLE:.6f}.csv", index=False)
return submission
def check_results(y, results):
y_diff = np.abs(np.expm1(y) - np.expm1(results["train"]))
y_log1p_diff = np.abs(y - results["train"])
display(y_diff[y_log1p_diff>DIFF_THRESHOLD].index.values)
display(train_data[y_log1p_diff>DIFF_THRESHOLD])
display(pd.concat([pd.DataFrame(y[y_log1p_diff>DIFF_THRESHOLD], columns=['y']), \
pd.DataFrame(results["train"][y_log1p_diff>DIFF_THRESHOLD], \
index=y_diff[y_log1p_diff>DIFF_THRESHOLD].index.values, columns=["pred_train"])], axis=1))
get_hist4(results["train"], "pred_train", \
y, "y", \
results["test"], "pred_test", \
y_log1p_diff, "diff")
display(pd.concat([pd.DataFrame(results["train"], columns=["pred_train"]), \
pd.DataFrame(y, columns=["y"]), \
y_log1p_diff.rename("y_log1p_diff")], \
axis=1).describe())
display(pd.DataFrame(results["test"], columns=["pred_test"]).describe())
RMSLE = mean_squared_error(y, results["train"], squared=False)
display(f"Overall RMSLE={RMSLE:.6f}")
DIFF_THRESHOLD = 5
################################################################################
# METRICS
################################################################################
def rmsle(y, pred_y):
return mean_squared_error(y, pred_y, squared=False)
################################################################################
# CROSS-VALIDATION
################################################################################
def print_cv_scores(label, cv_scores):
print("*"*40)
print(f"type(cv_scores): {type(cv_scores)}")
print(f"{label} cv scores : {cv_scores}")
print(f"{label} cv mean score : {np.mean(cv_scores)}")
print(f"{label} cv std score : {np.std(cv_scores)}")
def run_cv_model(train, test, target, target_skf, encoding, model_fn, params={},
eval_fn=None, label='model', cv=5, repeats=5, seed=43):
if repeats==1:
if target_skf is None:
kf = KFold(n_splits=cv, shuffle=True, random_state=seed)
target_y = target
else:
kf = StratifiedKFold(n_splits=cv, shuffle=True, random_state=seed)
target_y = target_skf
divide_counts = cv
else:
if target_skf is None:
kf = RepeatedKFold(n_splits=cv,n_repeats=repeats, random_state=seed)
target_y = target
else:
kf = RepeatedStratifiedKFold(n_splits=cv, n_repeats=repeats, random_state=seed)
target_y = target_skf
divide_counts = kf.get_n_splits()
cv_scores = []
pred_full_test = 0
pred_train = np.zeros((train.shape[0]))
for fold_id, (train_idx, val_idx) in enumerate(kf.split(train, target_y)):
print("*"*40)
print(f"Started {label} fold:{fold_id+1} / {divide_counts}")
tr_X, val_X = train.iloc[train_idx].copy(), train.iloc[val_idx].copy()
tr_y, val_y = target.iloc[train_idx], target.iloc[val_idx]
# TARGET ENCODING
if encoding:
for c in encoding:
# 学習データ全体で各カテゴリにおけるtargetの平均を計算
data_tmp = pd.DataFrame({c: tr_X[c], 'target': tr_y})
target_mean = data_tmp.groupby(c)['target'].mean()
# バリデーションデータのカテゴリを置換
val_X.loc[:, c] = val_X[c].map(target_mean)
# 学習データの変換後の値を格納する配列を準備
tmp = np.repeat(np.nan, tr_X.shape[0])
kf_encoding = KFold(n_splits=4, shuffle=True, random_state=seed)
for idx_1, idx_2 in kf_encoding.split(tr_X):
# out-of-foldで各カテゴリにおける目的変数の平均を計算
target_mean = data_tmp.iloc[idx_1].groupby(c)['target'].mean()
# 変換後の値を一次配列に格納
tmp[idx_2] = tr_X[c].iloc[idx_2].map(target_mean)
tr_X.loc[:, c] = tmp
# TARGET ENCODING
params2 = params.copy()
model, pred_val_y, pred_test_y = model_fn(
tr_X, tr_y, val_X, val_y, test, params2)
pred_full_test = pred_full_test + pred_test_y
pred_train[val_idx] = pred_val_y
if eval_fn is not None:
cv_score = eval_fn(val_y, pred_val_y)
cv_scores.append(cv_score)
print(f"{label} cv score {fold_id+1}: {cv_score}")
print_cv_scores(label, cv_scores)
pred_full_test = pred_full_test / divide_counts
results = {"label": label,
"train": pred_train,
"test": pred_full_test,
"cv": cv_scores}
RMSLE = mean_squared_error(target.values, results["train"], squared=False)
print(f"Overall RMSLE={RMSLE}")
return results
################################################################################
# RESULTS
################################################################################
def output_results(target, results, test_id, MODEL):
RMSLE = mean_squared_error(target.values, results["train"], squared=False)
print(f"Overall RMSLE={RMSLE}")
# Make submission
print("Saving submission file")
submission = pd.DataFrame({'id': test_id, 'y': np.expm1(results["test"])})
submission.to_csv(f"./{out_dir}/submission_{MODEL}_CV{RMSLE:.6f}.csv", index=False)
return submission
def check_results(y, results):
y_diff = np.abs(np.expm1(y) - np.expm1(results["train"]))
y_log1p_diff = np.abs(y - results["train"])
display(y_diff[y_log1p_diff>DIFF_THRESHOLD].index.values)
display(train_data[y_log1p_diff>DIFF_THRESHOLD])
display(pd.concat([pd.DataFrame(y[y_log1p_diff>DIFF_THRESHOLD], columns=['y']), \
pd.DataFrame(results["train"][y_log1p_diff>DIFF_THRESHOLD], \
index=y_diff[y_log1p_diff>DIFF_THRESHOLD].index.values, columns=["pred_train"])], axis=1))
get_hist4(results["train"], "pred_train", \
y, "y", \
results["test"], "pred_test", \
y_log1p_diff, "diff")
display(pd.concat([pd.DataFrame(results["train"], columns=["pred_train"]), \
pd.DataFrame(y, columns=["y"]), \
y_log1p_diff.rename("y_log1p_diff")], \
axis=1).describe())
display(pd.DataFrame(results["test"], columns=["pred_test"]).describe())
RMSLE = mean_squared_error(y, results["train"], squared=False)
display(f"Overall RMSLE={RMSLE:.6f}")
################################################################################
# MODEL
################################################################################
def runLGB(train_X, train_y, val_X, val_y, test_X, params):
model = lgb.LGBMRegressor(**params)
model.fit(train_X, train_y, eval_set=(val_X, val_y), early_stopping_rounds=100, eval_metric='rmse', verbose=100)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runXGB(train_X, train_y, val_X, val_y, test_X, params):
model = xgb.XGBRegressor(**params)
model.fit(train_X, train_y, eval_set=[[val_X, val_y]], early_stopping_rounds=100, eval_metric='rmse', verbose=100)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runCAT(train_X, train_y, val_X, val_y, test_X, params):
model = CatBoostRegressor(**params)
model.fit(train_X, train_y, eval_set=(val_X, val_y),
# cat_features=cat_features,
early_stopping_rounds=100, use_best_model=True, verbose=100)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runNGB(train_X, train_y, val_X, val_y, test_X, params):
model = NGBRegressor(**ngb_params)
model.fit(train_X, train_y, X_val=val_X, Y_val=val_y)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runLR(train_X, train_y, val_X, val_y, test_X, params):
model = LogisticRegression(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict_proba(val_X)[:, 1]
pred_test_y = model.predict_proba(test_X)[:, 1]
return model, pred_val_y, pred_test_y
def runLINR(train_X, train_y, val_X, val_y, test_X, params):
model = LinearRegression(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runBAYRIDGE(train_X, train_y, val_X, val_y, test_X, params):
model = BayesianRidge(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runRDG(train_X, train_y, val_X, val_y, test_X, params):
model = Ridge(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runELASTIC(train_X, train_y, val_X, val_y, test_X, params):
model = ElasticNet(**params)
model.fit(train_X, train_y, check_input=True)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runLASSO(train_X, train_y, val_X, val_y, test_X, params):
model = Lasso(**params)
model.fit(train_X, train_y, check_input=True)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runKN(train_X, train_y, val_X, val_y, test_X, params):
model = KNeighborsRegressor(**params)
model.fit(train_X, train_y)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runRFR(train_X, train_y, val_X, val_y, test_X, params):
model = RandomForestRegressor(**params)
model.fit(train_X, train_y)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runSGD(train_X, train_y, val_X, val_y, test_X, params):
model = SGDRegressor(**params)
model.fit(train_X, train_y, coef_init=None, intercept_init=None, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runETR(train_X, train_y, val_X, val_y, test_X, params):
model = ExtraTreesRegressor(**params)
model.fit(train_X, train_y)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runGBR(train_X, train_y, val_X, val_y, test_X, params):
model = GradientBoostingRegressor(**params)
model.fit(train_X, train_y)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runBAG(train_X, train_y, val_X, val_y, test_X, params):
model = BaggingRegressor(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runABR(train_X, train_y, val_X, val_y, test_X, params):
model = AdaBoostRegressor(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
def runLINSVR(train_X, train_y, val_X, val_y, test_X, params):
model = LinearSVR(**params)
model.fit(train_X, train_y, sample_weight=None)
pred_val_y = model.predict(val_X)
pred_test_y = model.predict(test_X)
return model, pred_val_y, pred_test_y
################################################################################
# MODEL PARAMETERS
################################################################################
lgb_params = {'boosting_type': 'gbdt', 'tree_learner': 'feature', #''serial' or feature' or 'data' or 'voting'
'num_leaves': 31, 'max_depth': -1,
'learning_rate': 5e-2, 'n_estimators': 10000, 'importance_type': 'gain',
'subsample_for_bin': 200000, 'objective': 'regression', 'min_split_gain': 0.0, 'min_child_weight': 1e-3, 'min_child_samples': 20,
'bagging_freq': 0, 'bagging_fraction': 1.0, 'feature_fraction': 1.0,
'reg_alpha': 0.2, 'reg_lambda': 0.2,
'random_state': 43, 'data_random_seed': 1,
'n_jobs': -1, 'silent': False}
xgb_params = {'base_score': 0.5, 'booster': 'gbtree', 'colsample_bylevel': 1, 'colsample_bynode': 1, 'colsample_bytree': 1, 'gamma': 0,
'learning_rate': 5e-2, 'n_estimators': 20000, 'importance_type': 'gain',
'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 0,
'objective': 'reg:squarederror', 'reg_alpha': 0.2, 'reg_lambda': 0.2, 'scale_pos_weight': 1,
'subsample': 0.9,
'silent': None, 'verbosity': 0,
'random_state': 43, 'seed': 43,
'tree_method': 'gpu_hist', 'gpu_id': 0}
cat_params = {'iterations':10000, 'depth': 8, 'boosting_type': 'Ordered', #'Ordered', #'Plain',
'loss_function': 'RMSE', 'eval_metric': 'RMSE',
'learning_rate': 5e-2, 'leaf_estimation_method': 'Gradient', #'Newton', 'Exact'
'l2_leaf_reg': 1.0, 'random_strength': 1.0, 'bagging_temperature': 1.0, 'has_time': False,
'grow_policy': 'SymmetricTree', #'Depthwise', 'Lossguide'
'min_data_in_leaf': 1, 'max_leaves': 31,
'random_seed': 43,
# 'one_hot_max_size': len(cat_features),
'task_type': 'GPU'}
ngb_params = {'Base': default_tree_learner, #決定木。Ridge回帰の場合は、default_linear_learner
'Dist': Normal,
'Score': LogScore, #CRPS, MLEも可
'learning_rate': 1e-2, 'natural_gradient': True, 'verbose': True, 'verbose_eval': 100,
'tol': 1e-4, 'random_state': 43, 'n_estimators': 100, 'minibatch_frac': 0.5}
logr_params = {'penalty':'l2', 'solver': 'newton-cg', #'newton-cg', 'lbfgs', 'sag' , 'saga'
'C': 0.05,
# 'class_weight':'balanced',
'max_iter': 500, 'random_state': 43, 'n_jobs': -1}
linr_params = {'fit_intercept': True, 'normalize': False, 'copy_X': True, 'n_jobs': -1}
bayridge_params = {'alpha_1': 1e-06, 'alpha_2': 1e-06, 'alpha_init': None, 'compute_score': False,
'copy_X': True, 'fit_intercept': True,
'lambda_1': 1e-06, 'lambda_2': 1e-06, 'lambda_init': None,
'n_iter': 200, 'normalize': False, 'tol': 1e-3,
'verbose': True}
rdg_params = {'alpha': 0.01, 'copy_X': True, 'fit_intercept': True,
'max_iter': 100, 'normalize': False,
'random_state': 43, 'solver': 'auto', 'tol': 1e-3}
elastic_params = {'alpha': 0.0001, 'copy_X': True, 'fit_intercept': True, 'l1_ratio': 0.5,
'max_iter': 200, 'normalize': False, 'positive': False, 'precompute': False,
'random_state': 43, 'selection': 'cyclic', 'tol': 1e-4, 'warm_start': False}
lasso_params = {'alpha': 0.0001, 'copy_X': True, 'fit_intercept': True, 'max_iter': 200,
'normalize': False, 'positive': False, 'precompute': False,
'random_state': 43, 'selection': 'random', 'tol': 1e-4, 'warm_start': False}
sgd_params = {'alpha': 1e-4, 'average': False, 'early_stopping': True,
'epsilon': 1e-1, 'eta0': 1e-4, 'fit_intercept': True, 'l1_ratio': 0.15,
'learning_rate': 'invscaling', 'loss': 'squared_loss', 'penalty': 'l2', 'power_t': 0.25,
'max_iter': 3000, 'n_iter_no_change': 10, 'validation_fraction': 0.5,
'random_state': 43, 'shuffle': True, 'tol': 1e-3, 'verbose': False, 'warm_start': False}
kn_params = {'n_neighbors': 5, 'weights': 'uniform', 'algorithm': 'auto',
'leaf_size': 30, 'p': 2, 'metric': 'minkowski', 'metric_params': None, 'n_jobs': -1}
rfr_params = {'bootstrap': True, 'ccp_alpha': 0.0, 'criterion': 'mse',
'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None,
'min_impurity_decrease': 0.0, 'min_impurity_split': 1e-7, 'max_samples': None,
'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0,
'n_estimators': 1000, 'n_jobs': -1, 'oob_score': False,
'random_state': 43, 'verbose': 1, 'warm_start': False}
etr_params = {'bootstrap': False, 'ccp_alpha': 0.0, 'criterion': 'mse',
'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None,
'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'max_samples': None,
'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0,
'n_estimators': 100, 'n_jobs': -1, 'oob_score': False,
'random_state': 43, 'verbose': 1, 'warm_start': False}
gbr_params = {'alpha': 0.9, 'ccp_alpha': 0.0, 'criterion': 'friedman_mse', 'init': None,
'learning_rate': 5e-2, 'n_estimators': 200, 'loss': 'ls',
'max_depth': 6, 'max_features': None, 'max_leaf_nodes': None,
'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2,
'min_weight_fraction_leaf': 0.0, 'subsample': 1.0, 'validation_fraction': 0.2,
'n_iter_no_change': None, 'presort': 'deprecated',
'random_state': 43, 'tol': 1e-4, 'verbose': 1, 'warm_start': False}
bag_params = {'base_estimator': None,
'bootstrap': True, 'bootstrap_features': False, 'max_features': 1.0, 'max_samples': 1.0,
'n_estimators': 5, 'n_jobs': None, 'oob_score': False,
'random_state': 43, 'verbose': 1, 'warm_start': False}
abr_params = {'base_estimator': None,
'learning_rate': 1.0, 'loss': 'linear', 'n_estimators': 5,
'random_state': 43}
linsvr_params = {'epsilon': 0.0, 'tol': 0.0001, 'C': 1.0,
'loss': 'epsilon_insensitive', 'fit_intercept': True, 'intercept_scaling': 1.0,
'dual': True, 'verbose': 1, 'random_state': 43, 'max_iter': 1000}
def lgb_regressor(train, test, target, target_skf, seed, n_folds, encoding):
lgb_params = {'boosting_type': 'gbdt',
'objective' : 'regression',
'metric' : 'rmse',
'tree_learner': 'feature', #''serial' or feature' or 'data' or 'voting'
'max_depth': -1,
'min_child_samples': 10,
'min_split_gain': 0.01,
'min_child_weight': 1e-2,
'reg_alpha': 0.1,
'reg_lambda': 1,
'num_leaves': 35,
'max_bin': 300,
'learning_rate': 2e-2,
'bagging_fraction': 0.9,
'bagging_freq': 1,
'bagging_seed': 4590,
'feature_fraction': 0.85,
'n_estimators': 50000,
'importance_type': 'gain',
'subsample_for_bin': 200000,
'random_state': seed,
'data_random_seed': seed,
'n_jobs': -1,
'silent': False}
lgb_results = run_cv_model(train, test, target, target_skf, encoding, runLGB, lgb_params, rmsle, 'LGBMRegressor', cv=n_folds, repeats=1, seed=seed)
return lgb_results
def xgb_regressor(train, test, target, target_skf, seed, n_folds, encoding):
xgb_params = {'base_score': 0.5,
'booster': 'gbtree',
'objective': 'reg:squarederror',
'colsample_bylevel': 0.6,
'colsample_bynode': 0.6,
'colsample_bytree': 0.6,
'gamma': 0,
'learning_rate': 1e-2,
'n_estimators': 50000,
'importance_type': 'gain',
'max_delta_step': 0,
'max_depth': 8,
'min_child_weight': 0,
'reg_alpha': 0.1,
'reg_lambda': 1,
'scale_pos_weight': 1,
'subsample': 0.8,
'silent': None,
'verbosity': 0,
'random_state': seed,
'seed': seed,
'tree_method': 'gpu_hist',
'gpu_id': 0}
xgb_results = run_cv_model(train, test, target, target_skf, encoding, runXGB, xgb_params, rmsle, 'XGBRegressor', cv=n_folds, repeats=1, seed=seed)
return xgb_results
def catboost_regressor(train, test, target, target_skf, seed, n_folds, encoding):
cat_params = {'bootstrap_type': 'Bayesian',
'boosting_type': 'Plain', #'Ordered', #'Plain',
'iterations':50000,
'depth': 8,
'loss_function': 'RMSE',
'eval_metric': 'RMSE',
'learning_rate': 1e-2,
'leaf_estimation_method': 'Gradient', #'Newton', 'Exact'
'l2_leaf_reg': 1.0,
'random_strength': 0.8,
'bagging_temperature': 0.9,
'has_time': False,
'grow_policy': 'SymmetricTree', #'Depthwise', 'Lossguide'
'min_data_in_leaf': 1,
'max_leaves': 31,
'random_seed': seed,
#'one_hot_max_size': len(cat_features),
'task_type': 'GPU'}
cat_results = run_cv_model(train, test, target, target_skf, encoding, runCAT, cat_params, rmsle, 'CatBoostRegressor', cv=n_folds, repeats=1, seed=seed)
return cat_results
def ngboost_regressor(train, test, target, target_skf, seed, n_folds, encoding):
ngb_params['learning_rate'] = 5e-2
ngb_params['n_estimators'] = 500
ngb_params['minibatch_frac'] = 1.0
ngb_params['random_state'] = seed
ngb_results = run_cv_model(train, test, target, target_skf, encoding, runNGB, ngb_params, rmsle, 'NGBoost', cv=n_folds, repeats=1, seed=seed)
return ngb_results
def logistic_regression(train, test, target, target_skf, seed, n_folds, encoding):
logr_params['max_iter'] = 500
logr_params['random_state'] = seed
logr_results = run_cv_model(train, test, target, target_skf, encoding, runLR, logr_params, rmsle, 'LogisticRegression', cv=n_folds, repeats=1, seed=seed)
return logr_results
def lin_regression(train, test, target, target_skf, seed, n_folds, encoding):
linr_params['n_jobs'] = -1
linr_results = run_cv_model(train, test, target, target_skf, encoding, runLINR, linr_params, rmsle, 'LinearRegression', cv=n_folds, repeats=1, seed=seed)
return linr_results
def bayesianridge(train, test, target, target_skf, seed, n_folds, encoding):
bayridge_params['alpha_1'] = 1e-06
bayridge_params['alpha_2'] = 1e-06
bayridge_params['lambda_1'] = 1.0
bayridge_params['lambda_2'] = 1e-07
bayridge_params['n_iter'] = 1000
bay_results = run_cv_model(train, test, target, target_skf, encoding, runBAYRIDGE, bayridge_params, rmsle, 'BayesianRidge', cv=n_folds, repeats=1, seed=seed)
return bay_results
def ridge(train, test, target, target_skf, seed, n_folds, encoding):
rdg_params['alpha'] = 1.0
rdg_params['random_state'] = seed
rdg_params['max_iter'] = 1000
rdg_results = run_cv_model(train, test, target, target_skf, encoding, runRDG, rdg_params, rmsle, 'Ridge', cv=n_folds, repeats=1, seed=seed)
return rdg_results
def elastic(train, test, target, target_skf, seed, n_folds, encoding):
elastic_params['alpha'] = 1e-04
elastic_params['l1_ratio'] = 0.5
elastic_params['random_state'] = seed
elastic_params['max_iter'] = 1000
elastic_results = run_cv_model(train, test, target, target_skf, encoding, runELASTIC, elastic_params, rmsle, 'ELasticNet', cv=n_folds, repeats=1, seed=seed)
return elastic_results
def lasso(train, test, target, target_skf, seed, n_folds, encoding):
lasso_params['alpha'] = 1e-04
lasso_params['random_state'] = seed
lasso_params['max_iter'] = 1000
lasso_results = run_cv_model(train, test, target, target_skf, encoding, runLASSO, lasso_params, rmsle, 'Lasso', cv=n_folds, repeats=1, seed=seed)
return lasso_results
def sgd(train, test, target, target_skf, seed, n_folds, encoding):
sgd_params['alpha'] = 1e-04
sgd_params['early_stopping'] = True
sgd_params['epsilon'] = 1e-1
sgd_params['eta0'] = 1e-4
sgd_params['l1_ratio'] = 0.15
sgd_params['learning_rate'] = 'invscaling'
sgd_params['loss'] = 'squared_loss'
sgd_params['validation_fraction'] = 0.2
sgd_params['random_state'] = seed
sgd_results = run_cv_model(train, test, target, target_skf, encoding, runSGD, sgd_params, rmsle, 'SGD', cv=n_folds, repeats=1, seed=seed)
return sgd_results
def kn_regressor(train, test, target, target_skf, seed, n_folds, encoding):
kn_params['n_neighbors'] = 5
kn_params['weights'] = 'distance'
kn_params['algorithm'] = 'auto' #auto, ball_tree, kd_tree, brute
kn_params['leaf_size'] = 60
kn_results = run_cv_model(train, test, target, target_skf, encoding, runKN, kn_params, rmsle, 'KNeighbors', cv=n_folds, repeats=1, seed=seed)
return kn_results
def rf_regressor(train, test, target, target_skf, seed, n_folds, encoding):
rfr_params['ccp_alpha'] = 0
rfr_params['criterion'] = 'mse'
rfr_params['max_depth'] = 63
rfr_params['min_samples_leaf'] = 20
rfr_params['min_samples_split'] = 50
rfr_params['random_state'] = seed
rfr_results = run_cv_model(train, test, target, target_skf, encoding, runRFR, rfr_params, rmsle, 'RandomForestRegressor', cv=n_folds, repeats=1, seed=seed)
return rfr_results
def et_regressor(train, test, target, target_skf, seed, n_folds, encoding):
etr_params['ccp_alpha'] = 0
etr_params['criterion'] = 'mse'
etr_params['max_depth'] = 63
etr_params['min_samples_leaf'] = 20
etr_params['min_samples_split'] = 50
etr_params['min_weight_fraction_leaf'] = 0.0
etr_params['n_estimators'] = 1000
etr_params['random_state'] = seed
etr_results = run_cv_model(train, test, target, target_skf, encoding, runETR, etr_params, rmsle, 'ExtraTreesRegressor', cv=n_folds, repeats=1, seed=seed)
return etr_results
def gb_regressor(train, test, target, target_skf, seed, n_folds, encoding):
gbr_params['alpha'] = 0.9
gbr_params['ccp_alpha'] = 0
gbr_params['criterion'] = 'friedman_mse'
gbr_params['learning_rate'] = 5e-2
gbr_params['n_estimators'] = 100
gbr_params['max_depth'] = 31
gbr_params['min_samples_leaf'] = 1
gbr_params['min_samples_split'] = 2
gbr_params['min_weight_fraction_leaf'] = 0.0
gbr_params['subsample'] = 1.0
gbr_params['validation_fraction'] = 0.2
gbr_params['random_state'] = seed
gbr_results = run_cv_model(train, test, target, target_skf, encoding, runGBR, gbr_params, rmsle, 'GradientBoostingRegressor', cv=n_folds, repeats=1, seed=seed)
return gbr_results
def bag_regressor(train, test, target, target_skf, seed, n_folds, encoding):
bag_params['base_estimator'] = BayesianRidge(n_iter=1000, lambda_1=1.0, lambda_2=1e-7)
bag_params['bootstrap'] = True
bag_params['bootstrap_features'] = True,
bag_params['max_features'] = 1.0
bag_params['max_samples'] = 1.0
bag_params['n_estimators'] = 96
bag_params['n_jobs'] = -1
bag_params['random_state'] = seed
bag_results = run_cv_model(train, test, target, target_skf, encoding, runBAG, bag_params, rmsle, 'BaggingRegressor', cv=n_folds, repeats=1, seed=seed)
return bag_results
def ada_regressor(train, test, target, target_skf, seed, n_folds, encoding):
abr_params['base_estimator'] = BayesianRidge(n_iter=1000, lambda_1=1.0, lambda_2=1e-7)
abr_params['learning_rate'] = 2.0
abr_params['loss'] = 'linear'
abr_params['n_estimators'] = 100
abr_params['random_state'] = seed
abr_results = run_cv_model(train, test, target, target_skf, encoding, runABR, abr_params, rmsle, 'AdaBoostRegressor', cv=n_folds, repeats=1, seed=seed)
return abr_results
def lin_svr(train, test, target, target_skf, seed, n_folds, encoding):
linsvr_params['loss'] = 'squared_epsilon_insensitive'
linsvr_params['max_iter'] = 1000
linsvr_params['random_state'] = seed
linsvr_results = run_cv_model(train, test, target, target_skf, encoding, runLINSVR, linsvr_params, rmsle, 'LinearSVR', cv=n_folds, repeats=1, seed=seed)
return linsvr_results
###Output
_____no_output_____
###Markdown
Ensemble & Stacking---
###Code
%%time
NUM_DATASETS = 1
stacking_train_lists = []
stacking_test_lists = []
for j in range(NUM_DATASETS):
stacking_train_lists.append(["XGBRegressor_train_SEED47_FOLDS8_0623",
"XGBRegressor2_train_SEED47_FOLDS8_0623",
"LGBMRegressor_train_SEED47_FOLDS8_0623",
"LGBMRegressor2_train_SEED47_FOLDS8_0623",
"CatBoostRegressor_train_SEED47_FOLDS8_0623",
"CatBoostRegressor2_train_SEED47_FOLDS8_0623",
"XGBRegressor_train_SEED47_FOLDS8_0624",
"XGBRegressor2_train_SEED47_FOLDS8_0624",
"LGBMRegressor_train_SEED47_FOLDS8_0624",
"LGBMRegressor2_train_SEED47_FOLDS8_0624",
"CatBoostRegressor_train_SEED47_FOLDS8_0624",
"CatBoostRegressor2_train_SEED47_FOLDS8_0624",
"XGBRegressor_train_SEED47_FOLDS8_addon_0622",
"LGBMRegressor_train_SEED47_FOLDS8_addon_0622",
"CatBoostRegressor_train_SEED47_FOLDS8_addon_0622",
"XGBRegressor_train_SEED47_FOLDS8_addon_0623",
"LGBMRegressor_train_SEED47_FOLDS8_addon_0623",
"CatBoostRegressor_train_SEED47_FOLDS8_addon_0623",
"Ridge_train_SEED51_FOLDS10",
"Ridge_train_SEED51_FOLDS10_0627",
"RandomForestRegressor_train_SEED47_FOLDS8",
"RandomForestRegressor_train_SEED47_FOLDS8_0627",
"ExtraTreesRegressor_train_SEED47_FOLDS8",
"ExtraTreesRegressor_train_SEED47_FOLDS8_0627",
"NN_train_SEED47_FOLDS10",
"NN2_train_SEED47_FOLDS10",
"NN_train_SEED47_FOLDS10_0626",
"NN2_train_SEED47_FOLDS10_0626",
"NN_train_SEED47_FOLDS10_0627",
"NN2_train_SEED47_FOLDS10_0627"
])
stacking_test_lists.append(["XGBRegressor_test_SEED47_FOLDS8_0623",
"XGBRegressor2_test_SEED47_FOLDS8_0623",
"LGBMRegressor_test_SEED47_FOLDS8_0623",
"LGBMRegressor2_test_SEED47_FOLDS8_0623",
"CatBoostRegressor_test_SEED47_FOLDS8_0623",
"CatBoostRegressor2_test_SEED47_FOLDS8_0623",
"XGBRegressor_test_SEED47_FOLDS8_0624",
"XGBRegressor2_test_SEED47_FOLDS8_0624",
"LGBMRegressor_test_SEED47_FOLDS8_0624",
"LGBMRegressor2_test_SEED47_FOLDS8_0624",
"CatBoostRegressor_test_SEED47_FOLDS8_0624",
"CatBoostRegressor2_test_SEED47_FOLDS8_0624",
"XGBRegressor_test_SEED47_FOLDS8_addon_0622",
"LGBMRegressor_test_SEED47_FOLDS8_addon_0622",
"CatBoostRegressor_test_SEED47_FOLDS8_addon_0622",
"XGBRegressor_test_SEED47_FOLDS8_addon_0623",
"LGBMRegressor_test_SEED47_FOLDS8_addon_0623",
"CatBoostRegressor_test_SEED47_FOLDS8_addon_0623",
"Ridge_test_SEED51_FOLDS10",
"Ridge_test_SEED51_FOLDS10_0627",
"RandomForestRegressor_test_SEED47_FOLDS8",
"RandomForestRegressor_test_SEED47_FOLDS8_0627",
"ExtraTreesRegressor_test_SEED47_FOLDS8",
"ExtraTreesRegressor_test_SEED47_FOLDS8_0627",
"NN_test_SEED47_FOLDS10",
"NN2_test_SEED47_FOLDS10",
"NN_test_SEED47_FOLDS10_0626",
"NN2_test_SEED47_FOLDS10_0626",
"NN_test_SEED47_FOLDS10_0627",
"NN2_test_SEED47_FOLDS10_0627"
])
###Output
_____no_output_____
###Markdown
Stacking 2層目---
###Code
%%time
stacking_train_df_l = []
stacking_test_df_l = []
pickle_l = glob.glob(f"./{out_dir}/*.pickle")
for stacking_train_l in stacking_train_lists:
stacking_train_df = pd.DataFrame()
for j, stacking_train_f in enumerate(stacking_train_l):
stacking_train = [f for f in pickle_l if stacking_train_f in f][0]
with open(stacking_train, 'rb') as f:
stacking_train_df[f'stacking_{j}'] = pickle.load(f)
stacking_train_df['stacking_addon1'] = pd.read_csv(f"./{out_dir}/train_lgb_817.csv").lgb_y
stacking_train_df['stacking_addon2'] = pd.read_csv(f"./{out_dir}/train_lgb_0623.csv").lgb_y
stacking_train_df['stacking_addon3'] = pd.read_csv(f"./{out_dir}/train_lgb_0624.csv").lgb_y
stacking_train_df['stacking_addon4'] = pd.read_csv(f"./{out_dir}/train_lgb_0624_2.csv").lgb_y
stacking_train_df_l.append(stacking_train_df)
for stacking_test_l in stacking_test_lists:
stacking_test_df = pd.DataFrame()
for j, stacking_test_f in enumerate(stacking_test_l):
stacking_test = [f for f in pickle_l if stacking_test_f in f][0]
with open(stacking_test, 'rb') as f:
stacking_test_df[f'stacking_{j}'] = pickle.load(f)
stacking_test_df['stacking_addon1'] = pd.read_csv(f"./{out_dir}/test_lgb_817.csv").lgb_y
stacking_test_df['stacking_addon2'] = pd.read_csv(f"./{out_dir}/test_lgb_0623.csv").lgb_y
stacking_test_df['stacking_addon3'] = pd.read_csv(f"./{out_dir}/test_lgb_0624.csv").lgb_y
stacking_test_df['stacking_addon4'] = pd.read_csv(f"./{out_dir}/test_lgb_0624_2.csv").lgb_y
stacking_test_df_l.append(stacking_test_df)
fnc_l = {'LGBMRegressor': lgb_regressor,
'XGBRegressor': xgb_regressor,
'CatBoostRegressor': catboost_regressor,
'NGBRegressor': ngboost_regressor,
'LogisticRegression': logistic_regression,
'LinearRegression': lin_regression,
'BayesianRidge': bayesianridge,
'Ridge': ridge,
'ElasticNet': elastic,
'Lasso': lasso,
'SGDRegressor': sgd,
'KNeighborsRegressor': kn_regressor,
'RandomForestRegressor': rf_regressor,
'ExtraTreesRegressor': et_regressor,
'GradientBoostingRegressor': gb_regressor,
'BaggingRegressor': bag_regressor,
'AdaBoostRegressor': ada_regressor,
'LinearSVR': lin_svr}
%%time
stacking_train2 = pd.DataFrame()
stacking_test2 = pd.DataFrame()
fnc_list = [fnc_l['LGBMRegressor'], fnc_l['XGBRegressor'], fnc_l['CatBoostRegressor'],
#fnc_l['BayesianRidge'], fnc_l['SGDRegressor'], fnc_l['KNeighborsRegressor'],
fnc_l['BayesianRidge'], fnc_l['SGDRegressor'],
fnc_l['RandomForestRegressor'], fnc_l['ExtraTreesRegressor']]
for j, target_fn in enumerate(fnc_list):
keys = [k for k, v in fnc_l.items() if v == target_fn]
stacking_train_tmp = 0
stacking_test_tmp = 0
for k, (stacking_train, stacking_test) in enumerate(zip(stacking_train_df_l, stacking_test_df_l)):
stacking_SEED = 47
stacking_N_FOLDS = 8
encoding = []
stacking_train['mean'] = stacking_train.mean(axis=1)
stacking_test['mean'] = stacking_test.mean(axis=1)
results_stacking = target_fn(train=stacking_train, test=stacking_test, target=y, target_skf=None, \
seed=stacking_SEED, n_folds=stacking_N_FOLDS, encoding=encoding)
submission_stacking = output_results(y, results_stacking, test_id, f"STACKING_{keys[0]}_MODELSEL{k}")
oof_train = pd.DataFrame()
oof_test = pd.DataFrame()
oof_train['id']=train_data['id']
oof_train['pred_y']=results_stacking['train']
oof_train['y'] = np.log1p(train_data['y'])
oof_test['id']=test_data['id']
oof_test['pred_y']=results_stacking['test']
oof_train.to_csv(f"./{out_dir}/train_stacking_{keys[0]}_MODELSEL{k}.csv",index=False)
oof_test.to_csv(f"./{out_dir}/test_stacking_{keys[0]}_MODELSEL{k}.csv",index=False)
stacking_train_tmp += results_stacking['train']
stacking_test_tmp += results_stacking['test']
stacking_train2[f'stacking2_{j}'] = stacking_train_tmp/len(stacking_train_df_l)
stacking_test2[f'stacking2_{j}'] = stacking_test_tmp/len(stacking_test_df_l)
N_SPLITS = 10
SEED = 47
LEARNING_RATE = 1e-3
BATCH_SIZE = 32
EPOCHS = 200
PATIENCE = 20
def create_callbacks():
callbacks = []
callbacks.append(EarlyStopping(monitor='val_root_mean_squared_error',
min_delta=0,
patience=PATIENCE,
verbose=1,
mode='auto',
baseline=None,
restore_best_weights=True))
# Update the learning rate every epoch
callbacks.append(ReduceLROnPlateau(monitor='val_root_mean_squared_error',
factor=0.95,
patience=1,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=1e-6))
return callbacks
def nn(lr, seed, input_shape):
model = Sequential([
Dense(2 ** 8, activation='relu', input_dim=input_shape, kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 7, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 6, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 5, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 4, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 3, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 3, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(1)
])
# COMPILE WITH ADAM OPTIMIZER AND CROSS ENTROPY COST
adam_opt = Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=True)
nadam_opt = Nadam(learning_rate=lr, beta_1=0.9, beta_2=0.999)
ladam_opt = tfa.optimizers.LazyAdam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False)
adamw_opt = tfa.optimizers.AdamW(learning_rate=LEARNING_RATE, weight_decay=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=True)
rmsprop_opt = RMSprop(learning_rate=lr, rho=0.9)
sgd_opt = SGD(learning_rate=lr, momentum=0.0, nesterov=False)
sgd_opt = SGD(learning_rate=lr, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(optimizer=nadam_opt,
loss='mean_squared_error',
metrics=tf.keras.metrics.RootMeanSquaredError())
return model
def nn2(lr, seed, input_shape):
model = Sequential([
Dense(2 ** 8, activation='relu', input_dim=input_shape, kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 7, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 6, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 5, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(2 ** 3, activation='relu', kernel_initializer=he_normal(seed=seed)),
Dense(1)
])
# COMPILE WITH ADAM OPTIMIZER AND CROSS ENTROPY COST
adam_opt = Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=True)
nadam_opt = Nadam(learning_rate=lr, beta_1=0.9, beta_2=0.999)
ladam_opt = tfa.optimizers.LazyAdam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False)
adamw_opt = tfa.optimizers.AdamW(learning_rate=LEARNING_RATE, weight_decay=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=True)
rmsprop_opt = RMSprop(learning_rate=lr, rho=0.9)
sgd_opt = SGD(learning_rate=lr, momentum=0.0, nesterov=False)
sgd_opt = SGD(learning_rate=lr, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(optimizer=nadam_opt,
loss='mean_squared_error',
metrics=tf.keras.metrics.RootMeanSquaredError())
return model
%%time
history, history_false = [], []
score, score_false = [], []
pred_train = np.zeros((stacking_train.shape[0]))
pred_full_test = 0
skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED)
for fold_id, (train_idx, val_idx) in enumerate(tqdm(skf.split(stacking_train, y_bin))):
print("*"*80)
print(f"Started TF learning(1) fold:{fold_id+1} / {N_SPLITS}")
# 全データで学習、予測
model = nn(lr=LEARNING_RATE, seed=SEED, input_shape=stacking_train.shape[1])
callbacks = create_callbacks()
tr_X, val_X = stacking_train.iloc[train_idx].copy(), stacking_train.iloc[val_idx].copy()
tr_y, val_y = y.iloc[train_idx], y.iloc[val_idx]
history.append(model.fit(tr_X, tr_y, batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=2,
validation_data=(val_X, val_y),
callbacks=callbacks))
pred_train[val_idx] = model.predict(val_X).reshape(-1)
score.append(model.evaluate(val_X, val_y, batch_size=BATCH_SIZE, verbose=0, return_dict=True))
pred_full_test = pred_full_test + model.predict(stacking_test)
RMSLE = mean_squared_error(y[val_idx], pred_train[val_idx], squared=False)
print(f"RMSLE={RMSLE}")
RMSLE_overall = mean_squared_error(y, pred_train, squared=False)
print(f"Overall RMSLE={RMSLE_overall}")
# Make submission
print("Saving submission file")
submission = pd.DataFrame({'id': test_id, 'y': np.expm1((pred_full_test/N_SPLITS).reshape(-1))})
submission.to_csv(f"./{out_dir}/submission_STACKING_NN1_CV{RMSLE_overall:.6f}.csv", index=False)
oof_train = pd.DataFrame()
oof_test = pd.DataFrame()
oof_train['id']=train_data['id']
oof_train['pred_y']=pred_train
oof_train['y'] = np.log1p(train_data['y'])
oof_test['id']=test_data['id']
oof_test['pred_y']=(pred_full_test/N_SPLITS).reshape(-1)
oof_train.to_csv(f"./{out_dir}/train_stacking_NN1.csv",index=False)
oof_test.to_csv(f"./{out_dir}/test_stacking_NN1.csv",index=False)
stacking_train2[f'stacking2_{len(fnc_list)}'] = pred_train
stacking_test2[f'stacking2_{len(fnc_list)}'] = pred_full_test/N_SPLITS
%%time
history, history_false = [], []
score, score_false = [], []
pred_train = np.zeros((stacking_train.shape[0]))
pred_full_test = 0
skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED)
kf = KFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED)
for fold_id, (train_idx, val_idx) in enumerate(tqdm(skf.split(stacking_train, y_bin))):
print("*"*80)
print(f"Started TF learning(2) fold:{fold_id+1} / {N_SPLITS}")
# 全データで学習、予測
model = nn2(lr=LEARNING_RATE, seed=SEED, input_shape=stacking_train.shape[1])
callbacks = create_callbacks()
tr_X, val_X = stacking_train.iloc[train_idx].copy(), stacking_train.iloc[val_idx].copy()
tr_y, val_y = y.iloc[train_idx], y.iloc[val_idx]
history.append(model.fit(tr_X, tr_y, batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=2,
validation_data=(val_X, val_y),
callbacks=callbacks))
pred_train[val_idx] = model.predict(val_X).reshape(-1)
score.append(model.evaluate(val_X, val_y, batch_size=BATCH_SIZE, verbose=0, return_dict=True))
pred_full_test = pred_full_test + model.predict(stacking_test)
RMSLE = mean_squared_error(y[val_idx], pred_train[val_idx], squared=False)
print(f"RMSLE={RMSLE}")
RMSLE_overall = mean_squared_error(y, pred_train, squared=False)
print(f"Overall RMSLE={RMSLE_overall}")
# Make submission
print("Saving submission file")
submission = pd.DataFrame({'id': test_id, 'y': np.expm1((pred_full_test/N_SPLITS).reshape(-1))})
submission.to_csv(f"./{out_dir}/submission_STACKING_NN2_CV{RMSLE_overall:.6f}.csv", index=False)
oof_train = pd.DataFrame()
oof_test = pd.DataFrame()
oof_train['id']=train_data['id']
oof_train['pred_y']=pred_train
oof_train['y'] = np.log1p(train_data['y'])
oof_test['id']=test_data['id']
oof_test['pred_y']=(pred_full_test/N_SPLITS).reshape(-1)
oof_train.to_csv(f"./{out_dir}/train_stacking_NN2.csv",index=False)
oof_test.to_csv(f"./{out_dir}/test_stacking_NN2.csv",index=False)
stacking_train2[f'stacking2_{len(fnc_list)+1}'] = pred_train
stacking_test2[f'stacking2_{len(fnc_list)+1}'] = pred_full_test/N_SPLITS
###Output
_____no_output_____
###Markdown
Stacking 3層目
###Code
%%time
stacking_train2.to_csv(f"./{out_dir}/train_stacking2_AllModel.csv",index=False)
stacking_test2.to_csv(f"./{out_dir}/test_stacking2_AllModel.csv",index=False)
%%time
fnc_list2 = [fnc_l['LinearRegression'], fnc_l['BaggingRegressor']]
cols_to_stack = [c for c in stacking_train2.columns]
for target_fn in fnc_list2:
keys = [k for k, v in fnc_l.items() if v == target_fn]
stacking_SEED = 51
stacking_N_FOLDS = 10
encoding = []
results_stacking2 = target_fn(train=stacking_train2[cols_to_stack], test=stacking_test2[cols_to_stack], target=y, target_skf=y_bin, \
seed=stacking_SEED, n_folds=stacking_N_FOLDS, encoding=encoding)
submission_stacking2 = output_results(y, results_stacking2, test_id, f"STACKING2_full_{keys[0]}")
oof_train = pd.DataFrame()
oof_test = pd.DataFrame()
oof_train['id']=train_data['id']
oof_train['pred_y']=results_stacking2['train']
oof_train['y'] = np.log1p(train_data['y'])
oof_test['id']=test_data['id']
oof_test['pred_y']=results_stacking2['test']
oof_train.to_csv(f"./{out_dir}/train_stacking2_full_{keys[0]}.csv",index=False)
oof_test.to_csv(f"./{out_dir}/test_stacking2_full_{keys[0]}.csv",index=False)
###Output
_____no_output_____
###Markdown
Ensemble 3層目
###Code
%%time
# LGB XGB CAT BAY SGD RFR ETR NN1 NN2
coef_l = [0.05, 0.00, 0.00, 0.65, 0.00, 0.05, 0.00, 0.15, 0.10]
results_train = 0
results_test = 0
for j, coef in zip(range(stacking_train2.shape[1]), coef_l):
results_train += stacking_train2[f'stacking2_{j}'] * coef
results_test += stacking_test2[f'stacking2_{j}'] * coef
results = {'train': results_train, 'test': results_test}
submission_ensemble = output_results(y, results, test_id, f"ENSEMBLE2")
print(datetime.datetime.now()-start)
###Output
_____no_output_____ |
Model backlog/EfficientNet/EfficientNetB5/177 - EfficientNetB5 - Reg - No crop.ipynb | ###Markdown
Dependencies
###Code
import os
import sys
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))
from efficientnet import *
###Output
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Using TensorFlow backend.
###Markdown
Load data
###Code
hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv')
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png")
X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
display(X_train.head())
###Output
Number of train samples: 2929
Number of validation samples: 733
Number of test samples: 1928
###Markdown
Model parameters
###Code
# Model parameters
FACTOR = 4
BATCH_SIZE = 8 * FACTOR
EPOCHS = 20
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4 * FACTOR
WARMUP_LEARNING_RATE = 1e-3 * FACTOR
HEIGHT = 224
WIDTH = 224
CHANNELS = 3
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
LR_WARMUP_EPOCHS_1st = 2
LR_WARMUP_EPOCHS_2nd = 5
STEP_SIZE = len(X_train) // BATCH_SIZE
TOTAL_STEPS_1st = WARMUP_EPOCHS * STEP_SIZE
TOTAL_STEPS_2nd = EPOCHS * STEP_SIZE
WARMUP_STEPS_1st = LR_WARMUP_EPOCHS_1st * STEP_SIZE
WARMUP_STEPS_2nd = LR_WARMUP_EPOCHS_2nd * STEP_SIZE
###Output
_____no_output_____
###Markdown
Pre-procecess images
###Code
train_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
# Pre-procecss train set
for i, image_id in enumerate(X_train['id_code']):
preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss validation set
for i, image_id in enumerate(X_val['id_code']):
preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss test set
for i, image_id in enumerate(test['id_code']):
preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH)
###Output
_____no_output_____
###Markdown
Data generator
###Code
datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""
Cosine decay schedule with warm up period.
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
:param global_step {int}: global step.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param global_step {int}: global step.
:Returns : a float representing learning rate.
:Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(Callback):
"""Cosine decay with warmup learning rate scheduler"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""
Constructor for cosine decay with warmup learning rate scheduler.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param global_step_init {int}: initial global step, e.g. from previous checkpoint.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param verbose {int}: quiet, 1: update messages. (default: {0}).
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr))
###Output
_____no_output_____
###Markdown
Model
###Code
def create_model(input_shape):
input_tensor = Input(shape=input_shape)
base_model = EfficientNetB5(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
final_output = Dense(1, activation='linear', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
###Output
_____no_output_____
###Markdown
Train top layers
###Code
model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))
for layer in model.layers:
layer.trainable = False
for i in range(-2, 0):
model.layers[i].trainable = True
cosine_lr_1st = WarmUpCosineDecayScheduler(learning_rate_base=WARMUP_LEARNING_RATE,
total_steps=TOTAL_STEPS_1st,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS_1st,
hold_base_rate_steps=(2 * STEP_SIZE))
metric_list = ["accuracy"]
callback_list = [cosine_lr_1st]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
callbacks=callback_list,
verbose=2).history
###Output
Epoch 1/5
- 53s - loss: 2.0506 - acc: 0.4155 - val_loss: 1.6326 - val_acc: 0.4318
Epoch 2/5
- 42s - loss: 1.1494 - acc: 0.4394 - val_loss: 2.4500 - val_acc: 0.3024
Epoch 3/5
- 41s - loss: 0.9345 - acc: 0.4616 - val_loss: 1.9941 - val_acc: 0.3110
Epoch 4/5
- 42s - loss: 0.8174 - acc: 0.4895 - val_loss: 2.9449 - val_acc: 0.2782
Epoch 5/5
- 41s - loss: 0.7761 - acc: 0.4892 - val_loss: 2.8009 - val_acc: 0.2853
###Markdown
Fine-tune the complete model
###Code
for layer in model.layers:
layer.trainable = True
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
cosine_lr_2nd = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE,
total_steps=TOTAL_STEPS_2nd,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS_2nd,
hold_base_rate_steps=(3 * STEP_SIZE))
callback_list = [es, cosine_lr_2nd]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
verbose=2).history
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 6))
ax1.plot(cosine_lr_1st.learning_rates)
ax1.set_title('Warm up learning rates')
ax2.plot(cosine_lr_2nd.learning_rates)
ax2.set_title('Fine-tune learning rates')
plt.xlabel('Steps')
plt.ylabel('Learning rate')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
Model loss graph
###Code
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 14))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Create empty arays to keep the predictions and labels
df_preds = pd.DataFrame(columns=['label', 'pred', 'set'])
train_generator.reset()
valid_generator.reset()
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN + 1):
im, lbl = next(train_generator)
preds = model.predict(im, batch_size=train_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID + 1):
im, lbl = next(valid_generator)
preds = model.predict(im, batch_size=valid_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']
df_preds['label'] = df_preds['label'].astype('int')
def classify(x):
if x < 0.5:
return 0
elif x < 1.5:
return 1
elif x < 2.5:
return 2
elif x < 3.5:
return 3
return 4
# Classify predictions
df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))
train_preds = df_preds[df_preds['set'] == 'train']
validation_preds = df_preds[df_preds['set'] == 'validation']
###Output
_____no_output_____
###Markdown
Model Evaluation Confusion Matrix Original thresholds
###Code
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
###Output
_____no_output_____
###Markdown
Quadratic Weighted Kappa
###Code
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))
evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
###Output
Train Cohen Kappa score: 0.977
Validation Cohen Kappa score: 0.912
Complete set Cohen Kappa score: 0.964
###Markdown
Apply model to test set and output predictions
###Code
def apply_tta(model, generator, steps=10):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
preds = apply_tta(model, test_generator)
predictions = [classify(x) for x in preds]
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
###Output
_____no_output_____
###Markdown
Predictions class distribution
###Code
fig = plt.subplots(sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test')
sns.despine()
plt.show()
results.to_csv('submission.csv', index=False)
display(results.head())
###Output
_____no_output_____ |
Iceberg-notebook.ipynb | ###Markdown
Imports
###Code
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import os
import math
import pickle
import datetime
import heapq
import xgboost as xgb
import h5py
from tqdm import tqdm_notebook as tqdm
from keras import backend as K
from keras.models import Model, load_model
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Reshape, Lambda, ZeroPadding2D, GaussianNoise, AlphaDropout, Input, Concatenate
from keras.layers.core import Flatten, Dropout
from keras.optimizers import Adam, SGD
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
from keras.utils import to_categorical, normalize
from keras.models import model_from_json
from keras_tqdm import TQDMNotebookCallback
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import metrics
import tensorflow as tf
from scipy import ndimage
from skimage.morphology import reconstruction
from skimage.restoration import denoise_wavelet, denoise_tv_chambolle, denoise_nl_means
from cyclicLR_callback import CyclicLR
random_seed = 54321
np.random.seed(random_seed)
cwd = os.getcwd()
#for windows
model_path = cwd + '\\models\\'
import keras
keras.__version__
###Output
_____no_output_____
###Markdown
Manually create tensorflow session to avoid potential OEM errors on laptop's GPU.
###Code
tf.set_random_seed(random_seed)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_session(session)
K.set_image_dim_ordering('tf')
!nvidia-smi
###Output
'nvidia-smi' is not recognized as an internal or external command,
operable program or batch file.
###Markdown
Load Data
###Code
data = pd.read_json("Data/train/train.json", orient='records')
data.head()
train_df = data
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1604 entries, 0 to 1603
Data columns (total 5 columns):
band_1 1604 non-null object
band_2 1604 non-null object
id 1604 non-null object
inc_angle 1604 non-null object
is_iceberg 1604 non-null int64
dtypes: int64(1), object(4)
memory usage: 62.7+ KB
###Markdown
Missing values
###Code
train_df['inc_angle_f'] = pd.to_numeric(train_df['inc_angle'], errors='coerce')
print("missing values in inc_angle: ", train_df['inc_angle_f'].isnull().sum())
#train_df['inc_angle_f'].replace(np.nan,train_df['inc_angle_f'].mean(), inplace=True)
train_df['inc_angle_f'].replace(np.nan,0, inplace=True)
train_df.tail()
###Output
missing values in inc_angle: 133
###Markdown
Transform for NN
###Code
def get_bands(train_df):
max_col = np.array(train_df.apply(lambda x: max((max(train_df.loc[x.name,'band_1']),max(train_df.loc[x.name,'band_2']))),axis=1)) - 10
max_col2 = max_col.reshape(-1,1) * np.ones(75*75).reshape(1,75*75)
max_col2 = max_col2.reshape(-1,75,75)
band_1 = np.array(train_df['band_1'].tolist()).reshape(-1,75,75) - max_col2
band_2 = np.array(train_df['band_2'].tolist()).reshape(-1,75,75) - max_col2
band_1_t = 10**(band_1/10)
band_2_t = 10**(band_2/10)
band_1_t = np.where(band_1_t > 0.01, band_1_t, 0)
band_2_t = np.where(band_2_t > 0.01, band_2_t, 0)
band_3 = band_1_t - band_2_t
X = np.stack((band_1,band_2,band_1_t,band_2_t),axis=3)
return band_1, band_2, band_1_t, band_2_t, band_3, X
band_1, band_2, band_1_t, band_2_t, band_3, X = get_bands(train_df)
plt.hist(band_1.flatten(), bins=200, color="red", alpha=0.4)
plt.hist(band_2.flatten(), bins=200, color="blue", alpha=0.4)
plt.show()
plt.hist(band_1[train_df[train_df['is_iceberg']==0].index[:3]].flatten(), bins=50, color="orange", alpha=0.4)
plt.hist(band_1[train_df[train_df['is_iceberg']==1].index[:3]].flatten(), bins=50, color="green", alpha=0.4)
plt.show()
plt.hist(band_1_t.flatten(),bins=200, color="red", alpha=0.4)
plt.hist(band_2_t.flatten(),bins=200, color="blue", alpha=0.4)
plt.yscale('log')
plt.xscale('log')
plt.show()
plt.hist(band_3[train_df[train_df['is_iceberg']==0].index].flatten(), bins=50, color="orange", alpha=0.4)
plt.hist(band_3[train_df[train_df['is_iceberg']==1].index].flatten(), bins=50, color="green", alpha=0.4)
plt.yscale('log')
plt.xscale('log')
plt.show()
def plot_bands(index, cmap="gray"):
fig = plt.figure(figsize=(12,6))
fig.suptitle("Is Iceberg: %x" % (train_df.loc[index,'is_iceberg']), fontsize=16)
ax1 = fig.add_subplot(251)
ax1.set_title("Band 1")
ax1.imshow(band_1[index], cmap=cmap)
ax2 = fig.add_subplot(252)
ax2.set_title("Band 2")
ax2.imshow(band_2[index], cmap=cmap)
ax3 = fig.add_subplot(253)
ax3.set_title("Band 1 t")
ax3.imshow(band_1_t[index], cmap=cmap)
ax3 = fig.add_subplot(254)
ax3.set_title("Band 2 t")
ax3.imshow(band_2_t[index], cmap=cmap)
ax3 = fig.add_subplot(255)
ax3.set_title("Band 3")
ax3.imshow(band_3[index], cmap=cmap)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
plot_bands(0,cmap="inferno")
plot_bands(2,cmap="inferno")
y = train_df.loc[:,'is_iceberg']
y_angle = train_df.loc[:,['is_iceberg','inc_angle_f']]
y_angle['index'] = y_angle.index
y_angle.head()
###Output
_____no_output_____
###Markdown
Split into train test and validation sets
###Code
X_train, X_val, y_train, y_val = train_test_split(X, y_angle, test_size=0.35, random_state=random_seed)
print(X_train.shape)
print(X_val.shape)
X_val_tune, X_val_test, y_val_tune, y_val_test = train_test_split(X_val, y_val, test_size=0.3, random_state=random_seed)
print(X_val_tune.shape)
print(X_val_test.shape)
###Output
(393, 75, 75, 4)
(169, 75, 75, 4)
###Markdown
Data augmentation
###Code
X_train_sample = X_train[:]
y_train_sample = y_train[:]
print(X_train_sample.shape)
datagen = ImageDataGenerator(
samplewise_center=False,
samplewise_std_normalization=False,
rotation_range=20,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
datagen_val = ImageDataGenerator(
samplewise_center=False,
samplewise_std_normalization=False,
rotation_range=0,
horizontal_flip=False,
vertical_flip=False,
fill_mode='nearest')
#custom generator for fit_generator
from collections import Generator
class Datagen_angle(Generator):
def __init__(self, imagegen=ImageDataGenerator):
self.imagegen = imagegen
def flow(self, x, y, batch_size=8, shuffle=True):
self.generator = self.imagegen.flow(x, y, batch_size=batch_size, shuffle=shuffle)
return self
def send(self, ignored):
temp_data = next(self.generator)
temp_band_3 = temp_data[0][:,:,:,2] - temp_data[0][:,:,:,3] #band_1_t - band_2_t
temp_stacked1 = np.stack((temp_data[0][:,:,:,0],temp_data[0][:,:,:,1]),axis=3)
temp_stacked2 = np.stack((temp_data[0][:,:,:,2],temp_data[0][:,:,:,3],temp_band_3),axis=3)
nn_denoised_temp = temp_data[0] #pass 4 bands for nn denoising input
return [temp_stacked1, temp_stacked2,
nn_denoised_temp,
temp_data[1][:,1]], temp_data[1][:,0]
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
datagen.fit(X_train_sample)
datagen_val.fit(X_val)
datagen_angle = Datagen_angle(imagegen=datagen)
datagen_angle_val = Datagen_angle(imagegen=datagen_val)
###Output
(1042, 75, 75, 4)
###Markdown
Learning rate scheduler and callback definition
###Code
# learning rate schedule
class LScheduler:
def __init__(self, initial_lrate=0.001, drop=0.66, patience=5):
self.initial_lrate=initial_lrate
self.drop = drop
self.patience = patience
def step_decay(self,epoch):
initial_lrate = self.initial_lrate
drop = self.drop
patience = self.patience
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/patience))
if math.fmod(epoch, patience) == 0:
print("Setting learning rate: ",lrate)
return lrate
###Output
_____no_output_____
###Markdown
Denoising
###Code
def denoising(img):
img_list = []
for i in range(4):
image = normalize(img[:,:,i])
img_list.append(ndimage.median_filter(image, 3))
return np.stack(img_list,axis=2)
def apply_over_axis(func, data, mask=None, axis=0, *args, **kwargs):
f_list = []
for i in range(data.shape[axis]):
if mask is None:
f_list.append(func(data[i], *args, **kwargs))
else:
f_list.append(func(data[i], mask=mask[i], *args, **kwargs))
return np.stack(f_list,axis=0)
#X_denoised = apply_over_axis(denoising, X)
#index=8
#original_index = y_train_sample.iloc[index].name
#cmap="inferno"
#fig = plt.figure(figsize=(12,6))
#fig.suptitle("Denoising: is iceberg: %x" % (y_train_sample.iloc[index,0]), fontsize=16)
#ax1 = fig.add_subplot(251)
#ax1.set_title("Before")
#ax1.imshow(X_train_sample[index][:,:,0], cmap=cmap)
#ax2 = fig.add_subplot(252)
#ax2.set_title("Denoised")
#ax2.imshow(X_denoised[original_index][:,:,0], cmap=cmap)
#ax1 = fig.add_subplot(253)
#ax1.set_title("Before - band 2")
#ax1.imshow(X_train_sample[index][:,:,1], cmap=cmap)
#ax2 = fig.add_subplot(254)
#ax2.set_title("Denoised - band 2")
#ax2.imshow(X_denoised[original_index][:,:,1], cmap=cmap)
#plt.show()
###Output
_____no_output_____
###Markdown
NN denoising
###Code
#custom generator for denoising
from collections import Generator
class Datagen_denoising(Generator):
def __init__(self, imagegen=ImageDataGenerator):
self.imagegen = imagegen
def flow(self, x, y, batch_size=8, shuffle=True):
self.generator = self.imagegen.flow(x, y, batch_size=batch_size, shuffle=shuffle)
return self
def send(self, ignored):
temp_data = next(self.generator)
temp_stacked1 = np.stack((temp_data[0][:,:,:,0],temp_data[0][:,:,:,1]),axis=3)
temp_stacked = np.stack((temp_data[0][:,:,:,0],temp_data[0][:,:,:,1],temp_data[0][:,:,:,2],
temp_data[0][:,:,:,3]),axis=3)
return temp_stacked, temp_stacked
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
datagen_denoising = Datagen_denoising(imagegen=datagen)
datagen_denoising_val = Datagen_denoising(imagegen=datagen_val)
m_input = Input(shape=(75,75,4), name='m_input')
#conv layers for main_input
x1 = BatchNormalization()(m_input)
x1 = ZeroPadding2D()(x1)
x1 = Conv2D(8, (3,3), activation='relu')(x1)
x1 = BatchNormalization()(x1)
x1 = Dropout(0.2)(x1)
x1 = ZeroPadding2D()(x1)
x1 = Conv2D(8, (3,3), activation='relu')(x1)
x1 = BatchNormalization()(x1)
x1 = Dropout(0.2)(x1)
x1 = ZeroPadding2D()(x1)
m_output = Conv2D(4, (3,3), activation='linear', name='m_output')(x1)
model_denoise = Model(inputs=[m_input,], outputs=[m_output], name='Model_nn_denoising')
model_denoise.compile(optimizer=Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0),
loss='mean_squared_error',
metrics=['mae'])
model_denoise.summary()
#model training
#lScheduler_denoising = LScheduler(initial_lrate=0.1, drop=0.66, patience=3)
#lrScheduler_denoising = LearningRateScheduler(lScheduler_denoising.step_decay)
lrScheduler_denoising = CyclicLR(base_lr=1e-8, max_lr=0.006,
step_size=400, mode='triangular2', gamma=0.99994)
start_time = time.monotonic()
H = model_denoise.fit_generator(datagen_denoising.flow(X, y_angle, batch_size=8),
steps_per_epoch=len(X)/8,
validation_data=datagen_denoising_val.flow(X, y_angle, batch_size=8, shuffle=False),
validation_steps=len(X)/8,
#validation_data=[X_val,y_val],
epochs=12,
callbacks = [lrScheduler_denoising,
TQDMNotebookCallback(leave_inner=True, leave_outer=True)],
verbose=0)
model_time = time.monotonic() - start_time
print("Model training time: " + '{:d}'.format(int(model_time // 60)) + " minutes "
+ '{:.1f}'.format(model_time % 60) + " seconds")
h = lrScheduler_denoising.history
plt.plot(h['lr'], color="b", label='lr')
plt.legend()
plt.xlabel('# iterations')
plt.show()
# serialize model to JSON
model_json = model_denoise.to_json()
with open("models/model_denoise.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model_weights = model_denoise.get_weights()
with open('models/model_denoise_weights.pickle', 'wb') as handle:
pickle.dump(model_weights, handle, protocol=pickle.HIGHEST_PROTOCOL)
# load json and create model
with open("models/model_denoise.json", "r") as json_file:
loaded_model_json = json_file.read()
model_denoise = model_from_json(loaded_model_json)
# load weights into new model
with open('models/model_denoise_weights.pickle', 'rb') as handle:
model_weights = pickle.load(handle)
model_denoise.set_weights(model_weights)
print("Loaded model from disk")
X_nn_denoised = model_denoise.predict(X, verbose=1)
index=8
original_index = y_train_sample.iloc[index].name
cmap="inferno"
fig = plt.figure(figsize=(12,6))
fig.suptitle("Image denoising nn: %x" % (train_df.loc[original_index,'is_iceberg']), fontsize=16)
ax1 = fig.add_subplot(251)
ax1.set_title("Before band_1")
ax1.imshow(X_train_sample[index][:,:,0], cmap=cmap)
ax2 = fig.add_subplot(252)
ax2.set_title("NN Denoising band 1")
ax2.imshow(X_nn_denoised[original_index][:,:,0], cmap=cmap)
ax3 = fig.add_subplot(253)
ax3.set_title("Before band 2")
ax3.imshow(X_train_sample[index][:,:,1], cmap=cmap)
ax4 = fig.add_subplot(254)
ax4.set_title("NN Denoising band 2")
ax4.imshow(X_nn_denoised[original_index][:,:,1], cmap=cmap)
plt.show()
###Output
_____no_output_____
###Markdown
Keras model
###Code
model_code="CNN_2018_01_21_v01"
model_comment="2 CNN inputs 3,3 conv filters - 3rd input nn denoising, na=0"
%%writefile current_model.py
def InputBlock(x, dropout=0.25, prefix=''):
#conv layers for input
x = BatchNormalization()(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
return(x)
main_input = Input(shape=(75,75,2), name='main_input')
aux_input = Input(shape=(75,75,3), name='aux_input')
aux_input_nn = Input(shape=(75,75,4), name='aux_input_nn')
x1 = InputBlock(main_input, prefix='m_input')
x2 = InputBlock(aux_input, prefix='a_input')
x3 = model_denoise(aux_input_nn)
x3 = InputBlock(x3,dropout=0.25, prefix='a_input_nn')
x = Concatenate(axis=3)([x1,x2,x3])
#x = BatchNormalization()(x)
#x = Dropout(0.2)(x)
#conv-block
x = Conv2D(128, (3, 3), activation='relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
#conv-block
x = Conv2D(256, (3, 3), activation='relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
#flatten
x = Flatten()(x)
angle_input = Input(shape=[1], name='angle_input')
#x1 = BatchNormalization()(angle_input)
merged = Concatenate()([x, angle_input])
#dense-block
x = Dense(513, activation='relu')(merged)
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
#dense-block
x = Dense(256, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model_f = Model(inputs=[main_input,aux_input,
aux_input_nn,
angle_input],
outputs=[main_output])
model_f.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0),
loss='binary_crossentropy',
metrics=['accuracy'])
%run -i current_model.py
class ModelHistory(Callback):
def __init__(self, listSize=10):
self.listSize = listSize
self.models = []
def on_epoch_end(self, epoch, logs={}):
lastLoss = logs.get('val_loss')
rank = 1 - lastLoss
if len(self.models) > 0:
if rank > self.models[0][0]: # new model is better than the worst in the heap
if len(self.models) >= self.listSize: #if the model heap is already full
heapq.heappushpop(self.models, (rank, lastLoss, self.model.get_weights()))
else:
heapq.heappush(self.models, (rank, lastLoss, self.model.get_weights()))
else:
heapq.heappush(self.models, (rank, lastLoss, self.model.get_weights()))
def get_callbacks(filepath, save_to_disc = True, lScheduler = None,
patience=10, step_decay=LScheduler().step_decay, modelHistoryCallback=None):
#es = EarlyStopping('val_loss', patience=patience, mode="min")
msave = ModelCheckpoint(filepath, save_best_only=True)
#reduceLr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
# patience=5, min_lr=0.000001, verbose=1)
if lScheduler is None:
lrScheduler = LearningRateScheduler(step_decay)
else:
lrScheduler = lScheduler
tqdmCallback = TQDMNotebookCallback(leave_inner=True, leave_outer=True)
if (save_to_disc):
return [msave, lrScheduler, modelHistoryCallback, tqdmCallback]
else:
return [lrScheduler, modelHistoryCallback, tqdmCallback]
model_f.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
#import os
#os.environ["PATH"] += os.pathsep + 'd:/Anaconda3/Library/bin/graphviz/'
SVG(model_to_dot(model_f).create(prog='dot', format='svg'))
###Output
_____no_output_____
###Markdown
Model Training
###Code
#name init
model_timestamp = str(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
model_best_weights_path = model_path + "weights." + model_code + "_" + model_timestamp + ".hdf5"
#lScheduler = LScheduler(initial_lrate=0.001, drop=0.66, patience=7)
modelEnsemble = ModelHistory(listSize=21)
lScheduler = CyclicLR(base_lr=0.0003, max_lr=0.002,
step_size=250, mode='triangular3', beta=0.33, theta=0.11)
callbacks = get_callbacks(filepath=model_best_weights_path, save_to_disc=False, lScheduler=lScheduler,
modelHistoryCallback=modelEnsemble)
#model training
start_time = time.monotonic()
H = model_f.fit_generator(datagen_angle.flow(X_train_sample, y_train_sample, batch_size=16),
steps_per_epoch=len(X_train_sample)/16,
validation_data=datagen_angle_val.flow(X_val_tune, y_val_tune, batch_size=24, shuffle=False),
validation_steps=len(X_val)/24,
#validation_data=[X_val,y_val],
epochs=150, callbacks=callbacks,
verbose=0)
model_time = time.monotonic() - start_time
print("Model training time: " + '{:d}'.format(int(model_time // 60)) + " minutes "
+ '{:.1f}'.format(model_time % 60) + " seconds")
h = lScheduler.history
plt.plot(h['lr'], color="b", label='lr')
plt.legend()
plt.xlabel('# iterations')
plt.show()
plt.plot(H.history['loss'], color="b", label='Training loss')
plt.plot(H.history['val_loss'], color="r", label='Validation loss')
plt.legend()
plt.xlabel('# epochs')
plt.show()
model_f.set_weights(heapq.nlargest(1,modelEnsemble.models)[0][2])
###Output
_____no_output_____
###Markdown
Additional training epochs with SGD - warm start
###Code
#addtional training epochs - warm start
#lScheduler = LScheduler(initial_lrate=0.000001, drop=0.66, patience=3)
modelEnsemble2 = ModelHistory(listSize=5)
lScheduler = CyclicLR(base_lr=1e-8, max_lr=1e-6,
step_size=80, mode='triangular3', beta=0.33, theta=0.11)
callbacks = get_callbacks(filepath=model_best_weights_path, save_to_disc=False, lScheduler=lScheduler,
modelHistoryCallback=modelEnsemble2)
model_f.compile(optimizer=SGD(lr=0.0001),loss='binary_crossentropy',metrics=['accuracy'])
start_time = time.monotonic()
H2 = model_f.fit_generator(datagen_angle.flow(X_train_sample, y_train_sample, batch_size=24, shuffle=False),
steps_per_epoch=len(X_train_sample)/24,
validation_data=datagen_angle_val.flow(X_val_tune, y_val_tune, batch_size=24, shuffle=False),
validation_steps=len(X_val)/24,
#validation_data=[X_val,y_val],
epochs=15, callbacks=callbacks,
verbose=0)
model_time = time.monotonic() - start_time
print("Model training time: " + '{:d}'.format(int(model_time // 60)) + " minutes "
+ '{:.1f}'.format(model_time % 60) + " seconds")
h = lScheduler.history
plt.plot(h['lr'], color="b", label='lr')
plt.legend()
plt.xlabel('# iterations')
plt.show()
for key in H.history:
H.history[key].extend(H2.history[key])
plt.plot(H2.history['loss'], color="b", label='Training loss')
plt.plot(H2.history['val_loss'], color="r", label='Validation loss')
plt.legend()
plt.xlabel('# epochs')
plt.show()
plt.plot(H.history['loss'], color="b", label='Training loss')
plt.plot(H.history['val_loss'], color="r", label='Validation loss')
plt.legend()
plt.xlabel('# epochs')
plt.show()
# serialize model to JSON
model_json = model_f.to_json()
with open("models/model.json", "w") as json_file:
json_file.write(model_json)
# load model from JSON - don't care about the weights rith now, they are saved separately
with open("models/model.json", "r") as json_file:
loaded_model_json = json_file.read()
model_f = model_from_json(loaded_model_json)
#model_object_path = model_path + "model." + model_code + "_" + model_timestamp + '.hdf5'
#model_f.save('models/last_model.hdf5') //crashes python kernel with Keras version 2.1.2
#model_f = load_model(model_object_path)
###Output
_____no_output_____
###Markdown
Saving model history
###Code
argmin = np.array(H.history["loss"]).argmin()
argmin
argmin = np.array(H.history["val_loss"]).argmin()
argmax_acc = np.array(H.history["val_acc"]).argmax()
#with open('current_model.py','r') as model_python_code_file:
# models_history = pd.DataFrame({"timestamp":[model_timestamp],
# "val_loss [min]":[H.history['val_loss'][argmin]],
# "epoch [val_loss [min]]":argmin,
# "training_loss [val_loss [min]]":[H.history['loss'][argmin]],
# "val_acc [val_loss [min]]":[H.history['val_acc'][argmin]],
# "training_acc [val_loss [min]]":[H.history['acc'][argmin]],
#
# "val_acc [max]":[H.history['val_acc'][argmax_acc]],
# "epoch [val_acc [max]]":argmax_acc,
# "training_loss [val_acc [max]]":[H.history['loss'][argmax_acc]],
# "val_loss [val_acc [max]]":[H.history['val_loss'][argmax_acc]],
# "training_acc [val_acc [max]]":[H.history['acc'][argmax_acc]],
#
# "model_path":[model_object_path],
# "model_weights_path":[model_best_weights_path],
# "model_python_code":[model_python_code_file.read().replace('\r\n','\n')],
# "model_comment":[model_comment]
# })
#
#models_history = models_history[["timestamp",
# "epoch [val_loss [min]]", "val_loss [min]", "training_loss [val_loss [min]]",
# "val_acc [val_loss [min]]", "training_acc [val_loss [min]]",
# "epoch [val_acc [max]]", "val_acc [max]", "training_loss [val_acc [max]]",
# "val_loss [val_acc [max]]", "training_acc [val_acc [max]]",
# "model_path","model_weights_path","model_python_code","model_comment"]]
#models_history.head()
#print("Min validation loss epoch:")
#print("epoch: %d" %(argmin),
# "; val loss [min] %.4f: " % (models_history["val_loss [min]"][0]),
# "; training loss: %.4f" % (models_history["training_loss [val_loss [min]]"][0]),
# "; val acc: %.4f" % (models_history["val_acc [val_loss [min]]"][0]),
# "; training acc: %.4f " % (models_history["training_acc [val_loss [min]]"][0])
# )
#print("Max validation accuracy epoch:")
#print("epoch: %d" %(argmax_acc),
# "; val loss %.4f: " % (models_history["val_loss [val_acc [max]]"][0]),
# "; training loss: %.4f" % (models_history["training_loss [val_acc [max]]"][0]),
# "; val acc [max]: %.4f" % (models_history["val_acc [max]"][0]),
# "; training acc: %.4f " % (models_history["training_acc [val_acc [max]]"][0]),
# )
#print("model comment:", model_comment)
#
#with open('models_history.csv', 'a') as f:
# models_history.to_csv(f, header=False,index=False)
# #models_history.to_csv(f, index=False)
#df = pd.read_csv('models_history.csv')
#df.tail()
###Output
_____no_output_____
###Markdown
Model Ensemble
###Code
heapq.heappush(modelEnsemble.models, heapq.nlargest(1,modelEnsemble2.models)[0])
modelEnsemble = ModelHistory(listSize=26)
with open('models/modelEnsemble.pickle', 'rb') as handle:
modelEnsemble.models = pickle.load(handle)
#model_f.set_weights(heapq.nlargest(1,modelEnsemble.models)[0][2])
model_f.set_weights(heapq.nsmallest(1,modelEnsemble.models)[0][2])
model_f.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0),
loss='binary_crossentropy',
metrics=['accuracy'])
model_f.evaluate_generator(datagen_angle_val.flow(X_val_test, y_val_test, batch_size=16, shuffle=True),
steps = len(X)/6)
with open('models/modelEnsemble.pickle', 'wb') as handle:
pickle.dump(modelEnsemble.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('models/modelEnsemble.pickle', 'rb') as handle:
modelEnsemble.models = pickle.load(handle)
def get_prediction(model,weights, X, y):
model.set_weights(weights)
return model.predict_generator(datagen_angle_val.flow(X, y, batch_size=32, shuffle=False),
steps = len(X)/31, verbose=1)
def get_ensemble_predictions(X, y, modelEnsemble):
predictions = [get_prediction(model_f, model[2], X, y)[:X.shape[0]]
for model in tqdm(modelEnsemble.models)]
temp_array = np.array(predictions)
del(predictions)
temp_array = np.swapaxes(temp_array,0,1)
temp_array = temp_array.reshape(temp_array.shape[0],temp_array.shape[1])
return temp_array
#with h5py.File('tmp_data/ensemble_data.h5', 'r') as hf:
# ensemble_train = hf['ensemble_train'][:]
modelEnsemble.models[0][0]
ensemble_val = get_ensemble_predictions(X_val, y_val, modelEnsemble)
with h5py.File('tmp_data/ensemble_data.h5', 'w') as hf:
hf.create_dataset("ensemble_val", data=ensemble_val)
ensemble_val.shape
ensemble_val_tune = get_ensemble_predictions(X_val_tune, y_val_tune, modelEnsemble)
with h5py.File('tmp_data/ensemble_data.h5', 'a') as hf:
hf.create_dataset("ensemble_val_tune", data=ensemble_val_tune)
ensemble_val.shape
ensemble_val_test = get_ensemble_predictions(X_val_test, y_val_test, modelEnsemble)
with h5py.File('tmp_data/ensemble_data.h5', 'a') as hf:
hf.create_dataset("ensemble_val_test", data=ensemble_val_test)
ensemble_val.shape
#with h5py.File('tmp_data/ensemble_data.h5', 'r') as hf:
# ensemble_val = hf['ensemble_val'][:]
ensemble_val[1]
def modelfit(alg, X, y , X_test, y_test, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(X, label=y)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='logloss', early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(X, y,eval_metric='logloss')
#Predict training set:
dtrain_predictions = alg.predict(X)
dtrain_predprob = alg.predict_proba(X)[:,1]
dtest_predprob = alg.predict_proba(X_test)[:,1]
#Print model report:
print("\nModel Report")
print("n_estimators: %d" % cvresult.shape[0])
print("Accuracy : %.4g" % metrics.accuracy_score(y, dtrain_predictions))
print("Log loss (Train): %f" % metrics.log_loss(y, dtrain_predprob))
print("Log loss (Test): %f" % metrics.log_loss(y_test, dtest_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show()
#ensemble_train = get_ensemble_predictions(X_train, y_train, modelEnsemble)
#with h5py.File('tmp_data/ensemble_data.h5', 'a') as hf:
# hf.create_dataset("ensemble_train", data=ensemble_train)
#with h5py.File('tmp_data/ensemble_data.h5', 'r') as hf:
# ensemble_train = hf['ensemble_train'][:]
#ensemble_train[0]
#ensemble_all = get_ensemble_predictions(X, y_angle, modelEnsemble)
###Output
_____no_output_____
###Markdown
Fine tuning ensemble with xgboost
###Code
xgb1 = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=8,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, ensemble_val_tune, y_val_tune['is_iceberg'], ensemble_val_test, y_val_test['is_iceberg'])
#ensemble_all = get_ensemble_predictions(X, y_angle, modelEnsemble)
param_test1 = {
'max_depth':list(range(3,13,2)),
'min_child_weight':list(range(1,10,2))
}
gsearch1 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=82, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test1, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch1.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
param_test2 = {
'max_depth':[2,3,4],
'min_child_weight':[2.5,3,3.5,6.5,7,7.5]
}
gsearch2 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=82, max_depth=3,
min_child_weight=3, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test2, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch2.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
param_test3 = {
'gamma':[i/20.0 for i in range(0,30)]
}
gsearch3 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=82, max_depth=3,
min_child_weight=2.5, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test3, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch3.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
xgb2 = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=3,
min_child_weight=2.5,
gamma=0.0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=8,
scale_pos_weight=1,
seed=27)
modelfit(xgb2, ensemble_val_tune, y_val_tune['is_iceberg'],ensemble_val_test, y_val_test['is_iceberg'])
param_test4 = {
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)]
}
gsearch4 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=76, max_depth=3,
min_child_weight=2.5, gamma=0.0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test4, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch4.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
param_test5 = {
'subsample':[i/100.0 for i in range(50,80,5)],
'colsample_bytree':[i/100.0 for i in range(40,80,5)]
}
gsearch5 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=76, max_depth=3,
min_child_weight=2.5, gamma=0.0, subsample=0.6, colsample_bytree=0.6,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test5, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch5.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_
param_test6 = {
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
}
gsearch6 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=76, max_depth=3,
min_child_weight=2.5, gamma=0.0, subsample=0.65, colsample_bytree=0.55,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test6, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch6.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_
param_test7 = {
'reg_alpha':[0.0001, 0.0003, 0.001, 0.01, 0.03, 0.1]
}
gsearch7 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.1, n_estimators=76, max_depth=3,
min_child_weight=2.5, gamma=0.0, subsample=0.65, colsample_bytree=0.55,
objective= 'binary:logistic', nthread=8, scale_pos_weight=1, seed=random_seed),
param_grid = param_test7, scoring='neg_log_loss',n_jobs=1,iid=False, cv=5, verbose=1)
gsearch7.fit(ensemble_val_tune,y_val_tune['is_iceberg'].values)
gsearch7.grid_scores_, gsearch7.best_params_, gsearch7.best_score_
xgb3 = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=3,
min_child_weight=2.5,
gamma=0.0,
subsample=0.65,
colsample_bytree=0.55,
reg_alpha=0.0001,
objective= 'binary:logistic',
nthread=8,
scale_pos_weight=1,
seed=27)
modelfit(xgb3, ensemble_val_tune, y_val_tune['is_iceberg'], ensemble_val_test, y_val_test['is_iceberg'])
xgb4 = xgb.XGBClassifier(
learning_rate =0.008,
n_estimators=1000,
max_depth=3,
min_child_weight=3,
gamma=0.0,
subsample=0.65,
colsample_bytree=0.55,
reg_alpha=0.0001,
objective= 'binary:logistic',
nthread=8,
scale_pos_weight=1,
seed=27)
modelfit(xgb4, ensemble_val_tune, y_val_tune['is_iceberg'], ensemble_val_test, y_val_test['is_iceberg'])
with open('models/modelXgb4.pickle', 'wb') as handle:
pickle.dump(xgb4, handle, protocol=pickle.HIGHEST_PROTOCOL)
#with open('models/modelXgb4.pickle', 'rb') as handle:
# xgb4 = pickle.load(handle)
###Output
_____no_output_____
###Markdown
Predictions
###Code
#use model
#model_object_path = "models\\model.CNN_2017_12_19_v15_2017_12_21_15_54_42.hdf5"
#model_best_weights_path = "models\\weights.CNN_2017_12_19_v15_2017_12_21_15_54_42.hdf5"
#model_f = load_model(model_object_path)
#model_f.load_weights(model_best_weights_path)
#model_f.evaluate_generator(datagen_angle_val.flow(X_val, y_val, batch_size=32, shuffle=False),
# steps = len(X_val)/32)
test_df = pd.read_json("Data/test/test.json")
test_df.head()
test_df['inc_angle_f'] = pd.to_numeric(test_df['inc_angle'], errors='coerce')
print("missing values in inc_angle: ", test_df['inc_angle_f'].isnull().sum())
test_df['inc_angle_f'].replace(np.nan,0, inplace=True)
test_df.tail()
t_band_1, t_band_2, t_band_1_t, t_band_2_t, t_band_3, X_test = get_bands(test_df)
y_angle_test = test_df.loc[:,['is_iceberg','inc_angle_f']]
y_angle_test['index'] = y_angle_test.index
X_test.shape
X_train.shape
X_tt = np.append(X_test,X_train, axis=0)
X_tt.shape
y_angle_tt = pd.concat([y_angle_test,y_train])
len(y_angle_tt)
del(band_1)
del(band_1_t)
del(band_2)
del(band_2_t)
del(band_3)
#del(X_train_sample)
del(xgb1)
del(xgb2)
del(xgb3)
#del(train_df)
del(t_band_1,t_band_2,t_band_1_t, t_band_2_t, t_band_3)
del(test_df)
###Output
_____no_output_____
###Markdown
Training denoising model on train and test data - warm start
###Code
lScheduler_denoising = LScheduler(initial_lrate=0.001, drop=0.66, patience=5)
lrScheduler_denosing = LearningRateScheduler(lScheduler_denoising.step_decay)
#model training
model_denoise.compile(optimizer=Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0),
loss='mean_squared_error',
metrics=['mae'])
start_time = time.monotonic()
H = model_denoise.fit_generator(datagen_denoising.flow(X_tt, y_angle_tt, batch_size=8),
steps_per_epoch=len(X_tt)/8,
validation_data=datagen_denoising_val.flow(X_tt, y_angle_tt, batch_size=8, shuffle=False),
validation_steps=len(X_tt)/8,
#validation_data=[X_val,y_val],
epochs=10,
callbacks = [lrScheduler_denosing,
TQDMNotebookCallback(leave_inner=True, leave_outer=True)],
verbose=0)
model_time = time.monotonic() - start_time
print("Model training time: " + '{:d}'.format(int(model_time // 60)) + " minutes "
+ '{:.1f}'.format(model_time % 60) + " seconds")
# serialize weights to HDF5
model_weights = model_denoise.get_weights()
with open('models/model_denoise_weights_tt.pickle', 'wb') as handle:
pickle.dump(model_weights, handle, protocol=pickle.HIGHEST_PROTOCOL)
###Output
_____no_output_____
###Markdown
training on the whole dataset
###Code
ensemble_all = get_ensemble_predictions(X, y_angle, modelEnsemble)
xgb4 = xgb.XGBClassifier(
learning_rate =0.0325,
n_estimators=1000,
max_depth=3,
min_child_weight=6.5,
gamma=0.0,
subsample=0.8,
colsample_bytree=0.85,
reg_alpha=3e-03,
objective= 'binary:logistic',
nthread=8,
scale_pos_weight=1,
seed=27)
modelfit(xgb4, ensemble_all, y_angle['is_iceberg'], ensemble_train, y_train['is_iceberg'])
###Output
_____no_output_____
###Markdown
Pseudo labeling
###Code
#del(X_train,y_train)
#del(data)
#del(y_train_sample)
#del(X_tt, y_angle_tt)
#del(modelEnsemble2)
#del(H,H2)
#del(X,y,y_angle)
#del(ensemble_val, ensemble_train)
#idx = 0
#for model in modelEnsemble.models:
# pred = get_prediction(model_f, model[2], X_test, y_angle_test)[:X.shape[0]]
# pred = np.array(pred)
# dataset_name = 'ensemble_data_%02d' % idx
# with h5py.File('tmp_data/ensemble_test_data.hd5', 'w') as hf:
# hf.create_dataset(dataset_name, data=pred)
#ensemble_test = get_ensemble_predictions(X_test, y_angle_test, modelEnsemble)
#ensemble_test.shape
#pseudo_labels = xgb4.predict(ensemble_test)
#test_probs = xgb4.predict_proba(ensemble_test)
#predictions = test_probs
#y_angle_test.count()
#y_angle_test['is_iceberg'] = pseudo_labels
#y_angle_tt = y_angle_test.append(y_train)
#y_angle_tt.count()
###Output
_____no_output_____
###Markdown
Training on pseudo labels
###Code
#lScheduler = LScheduler(initial_lrate=0.00001, drop=0.66, patience=5)
#callbacks = [LearningRateScheduler(lScheduler.step_decay)]
##model training
#start_time = time.monotonic()
#
#H = model_f.fit_generator(datagen_angle.flow(X_tt, y_angle_tt, batch_size=32),
# steps_per_epoch=len(X_test)/32,
# validation_data=datagen_angle_val.flow(X_val, y_val, batch_size=32, shuffle=False),
# validation_steps=len(X_val)/16,
# #validation_data=[X_val,y_val],
# epochs=10, callbacks=callbacks)
#
#model_time = time.monotonic() - start_time
#print("Model training time: " + '{:d}'.format(int(model_time // 60)) + " minutes "
# + '{:.1f}'.format(model_time % 60) + " seconds")
#predictions = model_f.predict_generator(datagen_angle_val.flow(X_test, y_angle_test, batch_size=32, shuffle=False),
# steps = len(X_test)/31, verbose=1)
#test_df.count()
#len(predictions[:8424])
#submission = pd.DataFrame({'id': test_df['id'], 'is_iceberg': predictions[:8424].reshape(-1)})
#submission.head(10)
#submission.to_csv("submission.v24.csv", index=False)
###Output
_____no_output_____ |
examples/Quantum Circuit Example.ipynb | ###Markdown
Example Using Sycamore Quantum CircuitHere we'll run through a more in-depth tensor contraction path finding, including all the different visualization options, by computing some amplitudes for random circuits on Google's Sycamore chip .
###Code
import quimb.tensor as qtn
import cotengra as ctg
# just set up some misc notebook plotting stuff
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
###Output
_____no_output_____
###Markdown
Two Sycamore circuit definitions are included in this repository, the first of which ($m=10$) should fit into memory, and the second of which ($m=12$) will require *slicing*.
###Code
def load_circuit(
n=53,
depth=10,
seed=0 ,
elided=0,
sequence='ABCDCDAB',
swap_trick=False
):
file = f'circuit_n{n}_m{depth}_s{seed}_e{elided}_p{sequence}.qsim'
if swap_trick:
gate_opts={'contract': 'swap-split-gate', 'max_bond': 2}
else:
gate_opts={}
# instantiate the `Circuit` object that
# constructs the initial tensor network:
return qtn.Circuit.from_qasm_file(file, gate_opts=gate_opts)
###Output
_____no_output_____
###Markdown
Make our target tensor network the overlap of the wavefunction with a bitstring:
###Code
circ = load_circuit(depth=20)
psi_f = qtn.MPS_computational_state('0' * (circ.N))
tn = circ.psi & psi_f
output_inds = []
###Output
_____no_output_____
###Markdown
We can check out what the raw TN looks like:
###Code
#tn.graph(iterations=20, color=circ.psi.site_tags, legend=False, figsize=(3, 3))
###Output
_____no_output_____
###Markdown
As well as what it looks like after standard pre-processing:
###Code
# inplace full simplify and cast to single precision
tn.full_simplify_(output_inds=output_inds)
tn.astype_('complex64')
###Output
_____no_output_____
###Markdown
The simplification uses some `numba` compiled functions which might slow things done first run.
###Code
#tn.graph(initial_layout='kamada_kawai', iterations=10, color=circ.psi.site_tags, legend=False, figsize=(3, 3))
###Output
_____no_output_____
###Markdown
Now we're ready to try and find a contraction path (various initializiation options are illustrated - not necessarily the best):
###Code
opt = ctg.HyperOptimizer(
methods=['cyc_kahypar-balanced', 'kahypar-balanced', 'cyc_kahypar', 'kahypar'],
max_repeats=500,
progbar=True,
minimize='size',#'flops
score_compression=0.5, # deliberately make the optimizer try many methods
)
###Output
_____no_output_____
###Markdown
The optimizer is stateful, so this following actual search call can be run repeatedly:
###Code
#info = tn.contract(all, optimize=opt, get='path-info')
###Output
_____no_output_____
###Markdown
We can visualize the progress of the Bayesian optimizer like so:
###Code
opt = ctg.HyperOptimizer(
methods=['kahypar'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['kahypar-balanced'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['kahypar-agglom'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc_kahypar'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc_kahypar-balanced'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')#,subtree_size=12
opt = ctg.HyperOptimizer(
methods=['cyc_kahypar-agglom'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
#slicing_reconf_opts={'target_size': 2**27, 'reconf_opts': {}},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc2_kahypar'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc2_kahypar-balanced'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc2_kahypar-agglom'],
max_repeats=128,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
#slicing_reconf_opts={'target_size': 2**27, 'reconf_opts': {}},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
tree = ctg.ContractionTree.from_info(info)
tree_s = tree.subtree_reconfigure(progbar=True,minimize='size')
tree_f = tree.subtree_reconfigure(progbar=True,minimize='flops')
opt = ctg.HyperOptimizer(
methods=['cyc_kahypar', 'cyc_kahypar-balanced', 'cyc_kahypar-agglom', 'cyc2_kahypar', 'cyc2_kahypar-balanced', 'cyc2_kahypar-agglom', 'kahypar', 'kahypar-balanced', 'kahypar-agglom'],
max_repeats=1500,
progbar=True,
#optlib = 'random',
minimize='size',#'flops
reconf_opts={'minimize':'flops'},
score_compression=0.5, # deliberately make the optimizer try many methods
)
info = tn.contract(all, optimize=opt, get='path-info')
opt.plot_trials()
###Output
_____no_output_____
###Markdown
Clearly the `kahypar` optimizer seems to be able to find the lowest cost contractions.We can also plot the relationship between contraction flops and size (the `minimize='combo'` score (log2[SIZE] + log2[FLOPS]) effectively ranks how close they are to the origin and can be useful to balance the two aims):
###Code
opt.plot_scatter()
###Output
_____no_output_____
###Markdown
Where it becomes apparent, that while correlated, the minimum size contraction found is not necessarily the same as the minimum cost contraction found.If we want to visualize what the actual best contraction tree looks like we need to extract the `ContractionTree` object from the optimizer:
###Code
tree = opt.get_tree()
tree.plot_ring(node_scale= 1 / 3, edge_scale=2 / 3)
###Output
_____no_output_____
###Markdown
We can try and plot what this might look like on top of the TN graph arranged properly, though its likely messy...
###Code
tree.plot_tent()
###Output
_____no_output_____
###Markdown
We can see that the contraction found is imbalanced, with small tensors being steadily absorbed into one big tensor.One more plot function allows one to investigate the actual numbers involved:
###Code
tree.plot_contractions()
###Output
_____no_output_____
###Markdown
Here, 'peak-size' is the memory required for both inputs and the output of each contraction.Note again that 'flops' defined here assumes real data (as per `opt_einsum` convention), the 'cost' or number of scalar operations, $C$, is generally half this, whereas for quantum circuits with complex tensors, the real FLOPs will be 4x.We can also actually perform the contraction (this is using a GTX 2070):
###Code
%%timeit
tn.contract(all, optimize=opt.path, backend='jax')
%%timeit
tn.contract(all, optimize=opt.path, backend='torch')
###Output
_____no_output_____
###Markdown
TN construction and simplification is determinstic in `quimb` so at least in this case we can easily evaluate another amplitude with the same contraction tree:
###Code
tn = (circ.psi & qtn.MPS_rand_computational_state(circ.N, seed=42))
tn.full_simplify_().astype_('complex64')
%%time
tn.contract(all, optimize=opt.path, backend='jax')
%%time
tn.contract(all, optimize=opt.path, backend='jax')
###Output
_____no_output_____
###Markdown
Searching for sliced contractions (Sycamore $m=12$) To illustrate slicing we'll setup a (much harder!) depth 12 circuit. We'll perform a swapped rank-2 decomposition on the gates (for a not insignificant drop in total fidelity):
###Code
circ = load_circuit(depth=12, swap_trick=True)
sampler = qtn.MPS_computational_state('0' * (circ.N))
tn = circ.psi & sampler
tn.full_simplify_(output_inds=[])
tn.astype_('complex64')
###Output
_____no_output_____
###Markdown
Because of the rank-2 swapped gate decomposition the full simplify function has now found hyperedge introducing diagonal reductions (which is why there are more tensors than indices).Now when we intialize the hyper optimizer we'll tell it slice each contraction before reporting the cost and size.
###Code
# we're going to help accelerate the optimizer search by restricting its search space,
# since highly balanced contraction trees generally slice best:
ctg.hyper._HYPER_SEARCH_SPACE['kahypar']['imbalance']['max'] = 0.1
opt = ctg.HyperOptimizer(
methods=['kahypar'],
max_time=120, # just search for 2 minutes
max_repeats=1000,
progbar=True,
minimize='flops',
slicing_opts={'target_size': 2**28}
)
# because of the hyperedges we need to specify no output indices
info = tn.contract(all, optimize=opt, get='path-info', output_inds=[])
###Output
_____no_output_____
###Markdown
Sliced contractions can be more difficult to find, if performance is critical its worth running this for longer, maybe with a large parallel pool supplied to the `parallel=` kwarg. We can see that all the contractions are now 'size 28' however:
###Code
opt.plot_scatter()
###Output
_____no_output_____
###Markdown
We can check what this new contraction tree looks like:
###Code
tree = opt.get_tree()
tree.plot_ring(node_scale=1 / 3, edge_scale=2 / 3)
###Output
_____no_output_____
###Markdown
As enforced, its now somewhat more balanced than the $m=10$ tree.Now we are ready to search properly for the slicing indices, $2^{28}$ should be small enough to fit into no more than 8GB of memory.
###Code
sf = ctg.SliceFinder(info, target_size=2**28)
###Output
_____no_output_____
###Markdown
We can do quite thorough search with different levels of exploration:
###Code
ix_sl, cost_sl = sf.search(temperature=1.0)
ix_sl, cost_sl = sf.search(temperature=0.1)
ix_sl, cost_sl = sf.search(temperature=0.01)
###Output
_____no_output_____
###Markdown
We can also visualise what effect the slicing has had on the total cost (left - starting point, further to the right equals more sliced):
###Code
sf.plot_slicings(color_scheme='plasma')
###Output
_____no_output_____
###Markdown
Here there seems to have been very little theoretical overhead introduced by the slicing, *for this path*. The real slicing overhead is the increase in FLOPs in comparison to best unsliced path (likely v different). Performing the sliced contractionThe order of `quimb` tensors and their data is guaranteed to match that used by the `opt_einsum` syntax:
###Code
arrays = [t.data for t in tn]
sc = sf.SlicedContractor(arrays)
###Output
_____no_output_____
###Markdown
Or we could translate the opt_einsum symbols back into `quimb` indices to handle the contractions in tensor network form (and use ``.cut_iter``).
###Code
[info.quimb_symbol_map[ix] for ix in ix_sl]
###Output
_____no_output_____
###Markdown
The first time a contraction is run by `jax` with a particular shape its compiled, which can take a few seconds:
###Code
backend = 'jax'
%%time
c = sc.contract_slice(0, backend=backend)
###Output
_____no_output_____
###Markdown
However, the sliced contraction stores the compiled expression and reuses it for each slice:
###Code
import tqdm
for i in tqdm.tqdm(range(1, sc.nslices)):
c = c + sc.contract_slice(i, backend=backend)
c
###Output
_____no_output_____
###Markdown
Again, the TN manipulations are deterministic so we can re-use everything:
###Code
tn = circ.psi & qtn.MPS_rand_computational_state(circ.N, seed=42)
tn.full_simplify_(output_inds=[]).astype_('complex64')
# update the SlicedContractor's arrays
sc.arrays = tuple(t.data for t in tn)
# perform the contraction
sum(sc.contract_slice(i, backend=backend) for i in tqdm.tqdm(range(sc.nslices)))
# update the SlicedContractor's arrays
sc.arrays = tuple(t.data for t in tn)
# perform the contraction
sum(sc.contract_slice(i, backend=backend) for i in tqdm.tqdm(range(sc.nslices)))
###Output
_____no_output_____ |
P8_Data_Engineering_Capstone_Project/P8_capstone_project/P8_Capstone_Project_Data_Preparation_Step_3.1.2_and_4.1.2.ipynb | ###Markdown
Project 08 - Analysis of U.S. Immigration (I-94) Data Udacity Data Engineer - Capstone Project> by Peter Wissel | 2021-05-05 Project OverviewThis project works with a data set for immigration to the United States. The supplementary datasets will include data onairport codes, U.S. city demographics and temperature data.The following process is divided into different sub-steps to illustrate how to answer the questions set by the businessanalytics team.The project file follows the following steps:* Step 3: Define the Data Model* Step 4: Run ETL to Model the Data 3.1.2. At what airports do foreign persons arrive for immigration to the U.S.? [(Data pipeline)](question2_data_pipeline) **Airport dimension**1. Clean data and create staging table `st_immigration_airports` from file [`I94_SAS_Labels_I94PORT.txt`](../P8_capstone_resource_files/I94_sas_labels_descriptions_extracted_data/I94_SAS_Labels_I94PORT.txt) with the columns `st_ia_airport_code` as referencing column, `st_ia_airport_name` and `st_ia_airport_state_code`. Note that the I-94 airport code is **not** the same as the [IATA](https://en.wikipedia.org/wiki/International_Air_Transport_Association) code and does not correspond to it. Therefore, `SFR` (I94: 'SFR' = 'SAN FRANCISCO, CA') is used for San Francisco Airport in this scenario instead of `SFO`. `SFR` means normally San Fernando, CA, USA. **Project decision:** Data from file [airport-codes.csv](../P8_capstone_resource_files/airport-codes_csv.csv) will **not** be linked to the I-94 airport codes because incorrect assignments should not be made.2. Add the column `st_i94_port_state_code` to staging table `st_i94_immigration` based on staging table `st_immigration_airports`. This information is needed to connect the `us-cities-demographics.json` file later on. `st_ia_airport_state_code --> st_i94_port_state_code`3. Add column `st_i94_port_state_code --> f_i94_port_state_code` to fact table `f_i94_immigrations`4. Creation of a dimension named `d_immigration_airports` based on staging table `st_immigration_airports`.5. Mapping of dimension `d_immigration_airports` to fact table `f_i94_immigration` based on columns (`st_immigration_airports.st_ia_airport_code` --> `d_immigration_airports.d_ia_id`) == (`st_i94_immigration.st_i94_port` --> `f_i94_immigration.d_ia_id`).6. Answer Project Question 2: At what airports do foreign persons arrive for immigration to the U.S.? 4.1.2. At what airports do foreign persons arrive for immigration to the U.S.? [(Description)](question2_description) **Airport dimension**1. Clean data and create staging table `st_immigration_airports` from file [`I94_SAS_Labels_I94PORT.txt`](../P8_capstone_resource_files/I94_sas_labels_descriptions_extracted_data/I94_SAS_Labels_I94PORT.txt) with the columns `st_ia_airport_code` as referencing column, `st_ia_airport_name` and `st_ia_airport_state_code`. Note that the I-94 airport code is **not** the same as the [IATA](https://en.wikipedia.org/wiki/International_Air_Transport_Association) code and does not correspond to it. Therefore, `SFR` (I94: 'SFR' = 'SAN FRANCISCO, CA') is used for San Francisco Airport in this scenario instead of `SFO`. `SFR` means normally San Fernando, CA, USA. **Project decision:** Data from file [airport-codes.csv](../P8_capstone_resource_files/airport-codes_csv.csv) will **not** be linked to the I-94 airport codes because incorrect assignments should not be made.
###Code
###### Imports and Installs section
import shutil
import pandas as pd
import pyspark.sql.functions as F
# import spark as spark
from pyspark.sql.types import StructType, StructField, DoubleType, StringType, IntegerType, LongType, TimestampType, DateType
from datetime import datetime, timedelta
from pyspark.sql import SparkSession, DataFrameNaFunctions
from pyspark.sql.functions import when, count, col, to_date, datediff, date_format, month
import re
import json
from os import path
MAX_MEMORY = "5g"
spark = SparkSession\
.builder\
.appName("etl pipeline for project 8 - I94 data") \
.config("spark.jars.packages","saurfang:spark-sas7bdat:3.0.0-s_2.12")\
.config('spark.sql.repl.eagerEval.enabled', True) \
.config("spark.executor.memory", MAX_MEMORY) \
.config("spark.driver.memory", MAX_MEMORY) \
.appName("Foo") \
.enableHiveSupport()\
.getOrCreate()
# setting the current LOG-Level
spark.sparkContext.setLogLevel('ERROR')
"""
Next Steps: Carefully clean list of airports
1. read all available information from file
2. filter all elements on different regex conditions and store them into a new data frame called `df_st_immigration_airports`
3. store cleaned data frame `df_st_immigration_airports` to disk
"""
# path of txt file
filepath_immigration_airports = "../P8_capstone_resource_files/I94_sas_labels_descriptions_extracted_data/I94_SAS_Labels_I94PORT.txt"
# read txt file into data frame
df_txt_immigration_airports_raw = spark.read.text(filepath_immigration_airports)
# get regex_cleaned values --> less error prone --> 582 Entries
regex_cleaned = r"^\s+'([.\w{2,3} ]*)'\s+=\s+'([\w -.\/]*),\s* ([\w\/]+)"
df_st_immigration_airports_regex_cleaned = df_txt_immigration_airports_raw\
.select( F.regexp_extract('value',regex_cleaned, 1).alias('st_ia_airport_code'),
F.regexp_extract('value',regex_cleaned, 2).alias('st_ia_airport_name'),
F.regexp_extract('value',regex_cleaned, 3).alias('st_ia_airport_state_code')) \
.drop_duplicates() \
.filter("st_ia_airport_code != ''") \
.sort("st_ia_airport_state_code", "st_ia_airport_code") \
.select("st_ia_airport_code", "st_ia_airport_name", "st_ia_airport_state_code")
print(df_st_immigration_airports_regex_cleaned.count())
df_st_immigration_airports_regex_cleaned.show(10, False)
# get regex_all values --> with errors like `Collapsed (BUF)` --> 660 Entries
regex = r"^\s+'([.\w{2,3} ]*)'\s+=\s+'([\w -.\/]*)\s*,*\s* ([\w\/]+)"
df_st_immigration_airports = df_txt_immigration_airports_raw\
.select( F.regexp_extract('value',regex, 1).alias('st_ia_airport_code'),
F.regexp_extract('value',regex, 2).alias('st_ia_airport_name'),
F.regexp_extract('value',regex, 3).alias('st_ia_airport_state_code')) \
.drop_duplicates() \
.filter("st_ia_airport_code != ''") \
.sort("st_ia_airport_state_code", "st_ia_airport_code")
print(df_st_immigration_airports.count())
df_st_immigration_airports.show(1000, False)
# Difference of the remaining entries ==> 660 - 582 = 78
df_st_immigration_airports \
.join(df_st_immigration_airports_regex_cleaned,
df_st_immigration_airports.st_ia_airport_code == df_st_immigration_airports_regex_cleaned.st_ia_airport_code,
'left_anti') \
.show(10000, False)
# correct all entries that are not error-free as expected
df_st_immigration_airports = df_st_immigration_airports \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r'Collapsed \(\w+\)|No PORT|UNKNOWN', 'Invalid Airport Entry').alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r'06/15|Code|POE', 'Invalid State Code').alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"^DERBY LINE,.*", "DERBY LINE, VT (RT. 5)").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"5", "VT").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"^LOUIS BOTHA, SOUTH", "LOUIS BOTHA").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"AFRICA", "SOUTH AFRICA").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r",", "").alias("st_ia_airport_name"),
"st_ia_airport_state_code") \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"^PASO DEL", "PASO DEL NORTE").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"NORTE", "TX").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"^UNIDENTIFED AIR /?", "Invalid Airport Entry").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"^SEAPORT?", "Invalid State Code").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"Abu", "Abu Dhabi").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"Dhabi", "Invalid State Code").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"DOVER-AFB", "Invalid Airport Entry").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"DE", "Invalid State Code").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"NOT REPORTED/UNKNOWNGALES", "NOGALES").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"AZ", "AZ").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"^NOT", "Invalid Airport Entry").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"REPORTED/UNKNOWN", "Invalid State Code").alias("st_ia_airport_state_code")) \
.select("st_ia_airport_code",
F.regexp_replace('st_ia_airport_name', r"INVALID - IWAKUNI", "IWAKUNI").alias("st_ia_airport_name"),
F.regexp_replace("st_ia_airport_state_code", r"JAPAN", "JAPAN").alias("st_ia_airport_state_code")) \
.sort("st_ia_airport_name", "st_ia_airport_code")
print(df_st_immigration_airports.count())
df_st_immigration_airports.show(1000, False)
# check if former invalid entries are cleaned correctly
# Difference of the remaining entries ==> 660 - 582 = 78
df_st_immigration_airports \
.join(df_st_immigration_airports_regex_cleaned,
df_st_immigration_airports.st_ia_airport_code == df_st_immigration_airports_regex_cleaned.st_ia_airport_code, 'left_anti') \
.show(10000, False)
# Write data as new CSV file to disk
location_to_write = '../P8_capstone_resource_files/I94_sas_labels_descriptions_extracted_data/st_immigration_airports.csv'
# delete folder if already exists
if path.exists(location_to_write):
shutil.rmtree(location_to_write)
df_st_immigration_airports \
.coalesce(1)\
.write\
.mode("overwrite") \
.csv(location_to_write, header = 'true')
# write df_st_immigration_airports back to stage area on file system
location_to_write = "../P8_capstone_resource_files/parquet_stage/PQ2/st_immigration_airports"
# delete folder if already exists
if path.exists(location_to_write):
shutil.rmtree(location_to_write)
df_st_immigration_airports \
.repartition(int(1)) \
.write \
.format("parquet")\
.mode(saveMode='overwrite') \
.parquet(location_to_write, compression="gzip")
# Read written data frame back into memory
# st_immigration_airports:
location_st_immigration_airports = "../P8_capstone_resource_files/parquet_stage/PQ2/st_immigration_airports"
df_st_immigration_airports = spark.read.parquet(location_st_immigration_airports)
# current Schema of staging table st_immigration_airports
print(df_st_immigration_airports.count())
df_st_immigration_airports.printSchema()
df_st_immigration_airports.show(10, False)
###Output
660
root
|-- st_ia_airport_code: string (nullable = true)
|-- st_ia_airport_name: string (nullable = true)
|-- st_ia_airport_state_code: string (nullable = true)
+------------------+---------------------------+------------------------+
|st_ia_airport_code|st_ia_airport_name |st_ia_airport_state_code|
+------------------+---------------------------+------------------------+
|ABE |ABERDEEN |WA |
|ADS |ADDISON AIRPORT- ADDISON |TX |
|AGA |AGANA |GU |
|AGU |AGUADILLA |PR |
|BOI |AIR TERM. (GOWEN FLD) BOISE|ID |
|AKR |AKRON |OH |
|CAK |AKRON |OH |
|ALA |ALAMAGORDO |NM |
|ALB |ALBANY |NY |
|CHO |ALBEMARLE CHARLOTTESVILLE |VA |
+------------------+---------------------------+------------------------+
only showing top 10 rows
###Markdown
2. Add the column `st_ia_airport_state_code --> st_i94_port_state_code` to staging table `st_i94_immigration` based on staging table `st_immigration_airports`. This information is needed to connect the `us-cities-demographics.json` file later on.
###Code
# read df_st_i94_immigrations staging table and add column `st_i94_port_state_code` to it. Write data frame back to disk.
# Read written data frame back into memory
# st_i94_immigrations:
location_st_i94_immigrations = "../P8_capstone_resource_files/parquet_stage/PQ1/st_i94_immigrations"
df_st_i94_immigrations = spark.read.parquet(location_st_i94_immigrations)
# st_immigration_airports:
location_st_immigration_airports = "../P8_capstone_resource_files/parquet_stage/PQ2/st_immigration_airports"
df_st_immigration_airports = spark.read.parquet(location_st_immigration_airports)
print(df_st_i94_immigrations.count())
df_st_i94_immigrations.printSchema()
df_st_i94_immigrations.show(5, False)
print(df_st_immigration_airports.count())
df_st_immigration_airports.printSchema()
df_st_immigration_airports.show(5, False)
########################################################################################################################
# check if st_i94_dept_date_iso is 1900-01-01 (default value - No onward travel is planned)
df_st_i94_immigrations \
.filter(df_st_i94_immigrations.st_i94_depdate == 0)\
.show(5, False)
# add column `st_i94_port_state_code` to data frame st_i94_immigrations
df_st_i94_immigrations = df_st_i94_immigrations \
.join(df_st_immigration_airports,
[df_st_i94_immigrations.st_i94_port == df_st_immigration_airports.st_ia_airport_code], 'left_outer') \
.drop("st_ia_airport_code", "st_ia_airport_name") \
.withColumnRenamed("st_ia_airport_state_code", "st_i94_port_state_code")
# rename
# check if `st_i94_port_state_code` has null values
df_st_i94_immigrations\
.fillna(value='NA', subset=['st_i94_port_state_code'])\
.groupBy("st_i94_port_state_code")\
.count() \
.sort("st_i94_port_state_code")\
.orderBy("count")\
.show(500)
# get entry with null value
df_st_i94_immigrations \
.filter(col("st_i94_port_state_code").isNull()).show()
# get status
print(df_st_i94_immigrations.count())
df_st_i94_immigrations.printSchema()
df_st_i94_immigrations.show(5, False)
# write st_i94_immigrations back to file system
location_to_write = "../P8_capstone_resource_files/parquet_stage/PQ2/st_i94_immigrations"
# delete folder if already exists
if path.exists(location_to_write):
shutil.rmtree(location_to_write)
df_st_i94_immigrations \
.repartition(int(1)) \
.write \
.format("parquet")\
.mode(saveMode='overwrite') \
.partitionBy('st_i94_year', 'st_i94_month') \
.parquet(location_to_write, compression="gzip")
###Output
_____no_output_____
###Markdown
3. Add new column `st_i94_port_state_code --> f_i94_port_state_code` to existing fact table `f_i94_immigrations`.
###Code
# Read data frames back into memory
# st_i94_immigrations with column `st_i94_port_state_code`:
location_st_i94_immigrations = "../P8_capstone_resource_files/parquet_stage/PQ2/st_i94_immigrations"
df_st_i94_immigrations = spark.read.parquet(location_st_i94_immigrations)
# f_i94_immigrations:
location_f_i94_immigrations = "../P8_capstone_resource_files/parquet_star/PQ1/f_i94_immigrations"
df_f_i94_immigrations = spark.read.parquet(location_f_i94_immigrations)
# show current schemas
print(df_st_i94_immigrations.count())
df_st_i94_immigrations.printSchema()
print(df_f_i94_immigrations.count())
df_f_i94_immigrations.printSchema()
# get only the needed columns to join
df_st_i94_immigrations_2_join = df_st_i94_immigrations \
.select("st_i94_id", "st_i94_port_state_code")
# add new columns to fact table `df_f_i94_immigrations`
df_f_i94_immigrations = df_f_i94_immigrations \
.join(df_st_i94_immigrations_2_join, df_f_i94_immigrations.f_i94_id == df_st_i94_immigrations_2_join.st_i94_id, 'inner') \
.drop("st_i94_id") \
.withColumnRenamed("st_i94_port_state_code", "f_i94_port_state_code") \
.withColumn("d_sd_id", col("f_i94_addr"))
df_f_i94_immigrations.printSchema()
df_f_i94_immigrations.show(5, False)
# write fact table f_i94_immigration (~ 109,7 MB)
location_to_write = "../P8_capstone_resource_files/parquet_star/PQ2/f_i94_immigrations"
if path.exists(location_to_write):
shutil.rmtree(location_to_write)
df_f_i94_immigrations \
.repartition(int(1)) \
.write \
.format("parquet")\
.mode(saveMode='overwrite') \
.partitionBy("f_i94_year", "f_i94_month")\
.parquet(location_to_write, compression="gzip")
###Output
_____no_output_____
###Markdown
4. Creation of a dimension named `d_immigration_airports` based on staging table `st_immigration_airports`.
###Code
# st_immigration_airports:
location_st_immigration_airports = "../P8_capstone_resource_files/parquet_stage/PQ2/st_immigration_airports"
df_d_immigration_airports = spark.read.parquet(location_st_immigration_airports)
print(df_d_immigration_airports.count())
df_d_immigration_airports.printSchema()
df_d_immigration_airports.show(5, False)
df_d_immigration_airports = df_d_immigration_airports \
.withColumn("d_ia_id", df_d_immigration_airports.st_ia_airport_code) \
.withColumnRenamed("st_ia_airport_code", "d_ia_airport_code") \
.withColumnRenamed("st_ia_airport_name", "d_ia_airport_name") \
.withColumnRenamed("st_ia_airport_state_code", "d_ia_airport_state_code")
df_d_immigration_airports.printSchema()
df_d_immigration_airports.show(5, False)
# write dimension table d_immigration_airports to disk (~ 10 kB)
location_to_write = "../P8_capstone_resource_files/parquet_star/PQ2/d_immigration_airports"
# delete folder if already exists
if path.exists(location_to_write):
shutil.rmtree(location_to_write)
df_d_immigration_airports \
.repartition(int(1)) \
.write \
.format("parquet")\
.mode(saveMode='overwrite') \
.parquet(location_to_write, compression="gzip")
###Output
_____no_output_____
###Markdown
5. Mapping of dimension `d_immigration_airports` to fact table `f_i94_immigration` based on columns (`st_immigration_airports.st_ia_airport_code` --> `d_immigration_airports.d_ia_id`) == (`st_i94_immigration.st_i94_port` --> `f_i94_immigration.d_ia_id`). 6. Answer Project Question 2: At what airports do foreign persons arrive for immigration to the U.S.?
###Code
# Read written data frame back into memory
df_f_i94_immigrations = spark.read.parquet("../P8_capstone_resource_files/parquet_star/PQ2/f_i94_immigrations")
df_d_immigration_airports = spark.read.parquet("../P8_capstone_resource_files/parquet_star/PQ2/d_immigration_airports")
# check read data frames
print(df_f_i94_immigrations.count())
df_f_i94_immigrations.printSchema()
df_f_i94_immigrations.show(5, False)
print(df_d_immigration_airports.count())
df_d_immigration_airports.printSchema()
df_d_immigration_airports.show(5, False)
# Register data frames as Views
df_f_i94_immigrations.createOrReplaceTempView("f_i94_immigrations")
df_d_immigration_airports.createOrReplaceTempView("d_immigration_airports")
# SQL to answer project question 2 (From which country do immigrants come to the U.S. and how many?)
df_pq2 = spark.sql(" select d_ia.d_ia_airport_code as airport_code"
" ,d_ia.d_ia_airport_name as airport_name"
" ,d_ia.d_ia_airport_state_code as airport_state_code"
" ,sum(f_i94.f_i94_count) as immigrants"
" ,RANK() OVER (ORDER BY count(f_i94.f_i94_count) desc) Immigration_airport_rank"
" from f_i94_immigrations f_i94"
" join d_immigration_airports d_ia on f_i94.d_ia_id = d_ia.d_ia_id"
" group by airport_code"
" , airport_name"
" , airport_state_code"
" order by Immigration_airport_rank asc ")
df_pq2.show(5000, False)
###Output
+------------+----------------------------+------------------+----------+------------------------+
|airport_code|airport_name |airport_state_code|immigrants|Immigration_airport_rank|
+------------+----------------------------+------------------+----------+------------------------+
|NYC |NEW YORK |NY |1669429 |1 |
|MIA |MIAMI |FL |1139100 |2 |
|LOS |LOS ANGELES |CA |1134611 |3 |
|CHI |CHICAGO |IL |792628 |4 |
|NEW |NEWARK/TETERBORO |NJ |663630 |5 |
|SFR |SAN FRANCISCO |CA |628438 |6 |
|HOU |HOUSTON |TX |609343 |7 |
|ATL |ATLANTA |GA |605856 |8 |
|WAS |WASHINGTON |DC |570668 |9 |
|DAL |DALLAS |TX |490050 |10 |
|BOS |BOSTON |MA |382112 |11 |
|FTL |FORT LAUDERDALE |FL |337598 |12 |
|SEA |SEATTLE |WA |272207 |13 |
|DET |DETROIT |MI |262744 |14 |
|ORL |ORLANDO |FL |257311 |15 |
|PHI |PHILADELPHIA |PA |185469 |16 |
|LVG |LAS VEGAS |NV |171358 |17 |
|HHW |HONOLULU |HI |131980 |18 |
|CLT |CHARLOTTE |NC |112025 |19 |
|SPM |ST PAUL |MN |101986 |20 |
|DEN |DENVER |CO |95133 |21 |
|AGA |AGANA |GU |90049 |22 |
|BLA |BLAINE |WA |89275 |23 |
|PHO |PHOENIX |AZ |72405 |24 |
|SAJ |SAN JUAN |PR |67010 |25 |
|SAI |SAIPAN |SPN |56091 |26 |
|TAM |TAMPA |FL |55731 |27 |
|NIA |NIAGARA FALLS |NY |55369 |28 |
|PBB |PEACE BRIDGE |NY |54885 |29 |
|SDP |SAN DIEGO |CA |54608 |30 |
|CHM |CHAMPLAIN |NY |45012 |31 |
|SLC |SALT LAKE CITY |UT |43296 |32 |
|SNJ |SAN JOSE |CA |42772 |33 |
|POO |PORTLAND |OR |42652 |34 |
|XXX |Invalid Airport Entry |Invalid State Code|39376 |35 |
|NCA |NORTH CAICOS TURK & |CAIMAN |37507 |36 |
|SYS |SAN YSIDRO |CA |34341 |37 |
|LEW |LEWISTON |NY |31932 |38 |
|SNA |SAN ANTONIO |TX |31129 |39 |
|BAL |BALTIMORE |MD |28804 |40 |
|PHU |PORT HURON |MI |23142 |41 |
|WPB |WEST PALM BEACH |FL |22691 |42 |
|OAK |OAKLAND |CA |22445 |43 |
|X96 |Invalid Airport Entry |Invalid State Code|21870 |44 |
|HIG |HIGHGATE SPRINGS |VT |21619 |45 |
|VCV |VANCOUVER |CANADA |21200 |46 |
|TOR |TORONTO |CANADA |17259 |47 |
|STT |ST THOMAS |VI |17152 |48 |
|AUS |AUSTIN |TX |16832 |49 |
|RDU |RALEIGH/DURHAM |NC |16106 |50 |
|FMY |FORT MYERS |FL |15555 |51 |
|YHC |HAKAI PASS |CANADA |15211 |52 |
|SAC |SACRAMENTO |CA |14540 |53 |
|OTM |OTAY MESA |CA |14182 |54 |
|MAA |Abu Dhabi |Invalid State Code|14047 |55 |
|CIN |CINCINNATI |OH |12657 |56 |
|THO |THOUSAND ISLAND BRIDGE |NY |12583 |57 |
|DER |DERBY LINE VT (RT. 5) |VT |10390 |58 |
|DUB |DUBLIN |IRELAND |9766 |59 |
|SFB |SANFORD |FL |9604 |60 |
|SUM |SUMAS |WA |9499 |61 |
|PEM |PEMBINA |ND |8992 |62 |
|LLB |JUAREZ-LINCOLN BRIDGE |TX |8583 |63 |
|ANC |ANCHORAGE |AK |8521 |64 |
|VIC |VICTORIA |CANADA |8518 |65 |
|LYN |LYNDEN |WA |8464 |66 |
|OGG |KAHULUI - MAUI |HI |8423 |67 |
|BUF |BUFFALO |NY |8296 |68 |
|PSP |PALM SPRINGS |CA |7940 |69 |
|ANZ |ANZALDUAS |TX |7844 |70 |
|OPF |OPA LOCKA |FL |7718 |71 |
|NOL |NEW ORLEANS |LA |7649 |72 |
|PIT |PITTSBURG |PA |6708 |73 |
|CAL |CALEXICO |CA |6697 |74 |
|ONT |ONTARIO |CA |6595 |75 |
|PEV |PORT EVERGLADES |FL |5986 |76 |
|SWE |SWEETGTASS |MT |5431 |77 |
|TUC |TUCSON |AZ |5223 |78 |
|NAS |NASSAU |BAHAMAS |5049 |79 |
|MON |MONTREAL |CANADA |4562 |80 |
|BOA |BRIDGE OF AMERICAS |TX |4352 |81 |
|YSL |YSLETA |TX |4342 |82 |
|BRO |BROWNSVILLE |TX |4263 |83 |
|MCA |MCALLEN |TX |4247 |84 |
|HID |HIDALGO |TX |4221 |85 |
|NOG |NOGALES |AZ |4030 |86 |
|LAR |LAREDO |TX |3888 |87 |
|CLE |CLEVELAND |OH |3799 |88 |
|CLS |CALAIS |ME |3407 |89 |
|BGM |BANGOR |ME |3385 |90 |
|SHA |SHANNON |IRELAND |3319 |91 |
|HAM |HAMILTON |BERMUDA |3084 |92 |
|HTM |HOULTON |ME |3072 |93 |
|PVD |THEODORE FRANCIS - WARWICK |RI |2925 |94 |
|ROO |ROOSVILLE |MT |2819 |95 |
|OGD |OGDENSBURG |NY |2805 |96 |
|AXB |ALEXANDRIA BAY |NY |2789 |97 |
|PDN |PASO DEL NORTE |TX |2745 |98 |
|CLM |COLUMBUS |OH |2678 |99 |
|PHR |PHARR |TX |2669 |100 |
|EPI |EASTPORT |ID |2565 |101 |
|RNO |CANNON INTL - RENO/TAHOE |NV |2493 |102 |
|PIE |PIEGAN |MT |2453 |103 |
|HAR |HARTFORD |CT |2420 |104 |
|LNB |LONG BEACH |CA |2366 |105 |
|SRQ |BRADENTON - SARASOTA |FL |2362 |106 |
|KOA |KEAHOLE-KONA |HI |2308 |107 |
|PIR |POINT ROBERTS |WA |2270 |108 |
|ORO |OROVILLE |WA |2265 |109 |
|ALC |ALCAN |AK |2264 |110 |
|NSV |NASHVILLE |TN |2262 |111 |
|CLG |CALGARY |CANADA |2149 |112 |
|SSM |SAULT STE. MARIE |MI |2084 |113 |
|CHR |CHRISTIANSTED |VI |2080 |114 |
|ELP |EL PASO |TX |1937 |115 |
|SKA |SKAGWAY |AK |1924 |116 |
|TEC |TECATE |CA |1891 |117 |
|POR |PORTAL |AZ |1802 |118 |
|SAA |SANTA ANA |CA |1788 |119 |
|INP |INDIANAPOLIS |IN |1720 |120 |
|LCB |LAREDO COLUMBIA BRIDGE |TX |1587 |121 |
|STR |SANTA TERESA |NM |1581 |122 |
|CHF |CHIEF MT |MT |1560 |123 |
|STL |ST LOUIS |MO |1552 |124 |
|JKM |JACKMAN |ME |1453 |125 |
|MAS |MASSENA |NY |1445 |126 |
|YGF |Invalid Airport Entry |Invalid State Code|1405 |127 |
|LIH |LIHUE |HI |1368 |128 |
|LOI |LOS INDIOS |TX |1361 |129 |
|JMZ |Invalid Airport Entry |Invalid State Code|1292 |130 |
|KAN |KANSAS CITY |MO |1209 |131 |
|HPN |WESTCHESTER - WHITE PLAINS |NY |1081 |132 |
|VIB |VETERAN INTL BRIDGE |TX |1036 |133 |
|ROC |ROCHESTER |NY |979 |134 |
|MIL |MILWAUKEE |WI |971 |135 |
|CHA |CHARLOTTE AMALIE |VI |964 |136 |
|LUK |LUKEVILLE |AZ |960 |137 |
|GPM |GRAND PORTAGE |MN |900 |138 |
|KEY |KEY WEST |FL |864 |139 |
|COB |COBURN GORE |ME |860 |140 |
|ROU |ROUSES POINT |NY |840 |141 |
|SYR |SYRACUSE |NY |830 |142 |
|DOU |DOUGLAS |AZ |736 |143 |
|EGP |EAGLE PASS |TX |731 |144 |
|OTT |OTTAWA |CANADA |712 |145 |
|AND |ANDRADE |CA |709 |146 |
|DAC |DALTONS CACHE |AK |705 |147 |
|DLR |DEL RIO |TX |606 |148 |
|FWA |FRONTIER |WA |576 |149 |
|EDA |EDMONTON |CANADA |553 |150 |
|INT |INT''L FALLS |MN |549 |151 |
|SLU |SAN LUIS |AZ |537 |152 |
|PTL |PORTHILL |ID |521 |153 |
|CNA |CANAAN |VT |496 |154 |
|DNS |DUNSEITH |ND |485 |155 |
|DNA |DONNA |TX |484 |156 |
|MLB |MELBOURNE |FL |482 |157 |
|WBE |WEST BERKSHIRE |VT |467 |158 |
|FRB |FAIRBANKS |AK |461 |159 |
|POM |PORTLAND |ME |457 |160 |
|PGR |PROGRESO |TX |454 |161 |
|SPE |ST PETERSBURG |FL |439 |162 |
|CHS |CHARLESTON |WV |432 |163 |
|MET |METALINE FALLS |WA |426 |164 |
|MDT |HARRISBURG |PA |413 |165 |
|NRT |NORTH TROY |VT |404 |166 |
|RIF |RICHFORT |VT |402 |167 |
|RAY |RAYMOND |MT |401 |168 |
|W55 |Invalid Airport Entry |Invalid State Code|391 |169 |
|ABG |ALBURG |VT |342 |170 |
|WIN |WINNIPEG |CANADA |322 |171 |
|MRC |MARINE CITY |MI |319 |172 |
|BED |HANSCOM FIELD - BEDFORD |MA |307 |173 |
|BQN |BORINQUEN - AGUADILLO |PR |303 |174 |
|PRE |PRESIDIO |TX |301 |175 |
|JAC |JACKSONVILLE |FL |277 |176 |
|TRO |TROUT RIVER |NY |276 |177 |
|HAL |Halifax NS |Canada |266 |178 |
|SAV |SAVANNAH |GA |240 |179 |
|ROM |ROMA |TX |238 |180 |
|FPR |ST LUCIE COUNTY |FL |237 |181 |
|WAL |WALHALLA |ND |233 |182 |
|SGR |HULL FIELD SUGAR LAND ARPT |TX |229 |183 |
|MOB |MOBILE |AL |226 |184 |
|DLB |DEL BONITA |MT |223 |185 |
|VNY |VAN NUYS |CA |209 |186 |
|LAU |LAURIER |WA |203 |187 |
|LAN |LANCASTER |MN |198 |188 |
|MOO |MOORES |NY |196 |189 |
|NRN |NORTON |VT |192 |190 |
|WIL |WILMINGTON |NC |190 |191 |
|ABS |ALBURG SPRINGS |VT |189 |192 |
|COL |COLUMBUS |NM |185 |193 |
|MAF |ODESSA REGIONAL |TX |182 |194 |
|ADW |ANDREWS AFB |MD |182 |194 |
|MAD |MADAWASKA |ME |179 |196 |
|WAR |WARROAD |MN |179 |196 |
|NOR |NORFOLK |VA |175 |198 |
|FRT |FORTUNA |ND |175 |198 |
|CHT |CHATEAUGAY |NY |170 |200 |
|ADS |ADDISON AIRPORT- ADDISON |TX |170 |200 |
|MOR |MORSES LINE |VT |170 |200 |
|CHL |CHARLESTON |SC |163 |203 |
|PRO |PROVIDENCE |RI |160 |204 |
|ROS |ROSEAU |MN |160 |204 |
|FOK |SUFFOLK COUNTY |NY |160 |204 |
|KET |KETCHIKAN |AK |158 |207 |
|HVR |HAVRE |MT |156 |208 |
|X44 |Invalid Airport Entry |Invalid State Code|150 |209 |
|GAL |GALVESTON |TX |148 |210 |
|FER |FERRY |WA |144 |211 |
|5T6 |Invalid Airport Entry |Invalid State Code|137 |212 |
|BDL |BRADLEY INTERNATIONAL |CT |135 |213 |
|NEC |NECHE |ND |131 |214 |
|HNS |HANSBORO |ND |131 |214 |
|FTC |FORT COVINGTON |NY |130 |216 |
|AGN |ALGONAC |MI |130 |216 |
|MEM |MEMPHIS |TN |126 |218 |
|PSM |PORTSMOUTH |NH |125 |219 |
|BWA |BOUNDARY |WA |125 |219 |
|NAC |NACO |AZ |121 |221 |
|RST |ROCHESTER |MN |119 |222 |
|MMU |MORRISTOWN |NJ |117 |223 |
|CRQ |CARAVELAS BA #ARPT |BRAZIL |113 |224 |
|BEB |BEEBE PLAIN |VT |111 |225 |
|ADT |AMISTAD DAM |TX |107 |226 |
|NRG |NORTHGATE |ND |106 |227 |
|LUB |LUBEC |ME |106 |227 |
|BEE |BEECHER FALLS |VT |104 |229 |
|DVL |DANVILLE |WA |99 |230 |
|CRP |CORPUS CHRISTI |TX |96 |231 |
|TUR |TURNER |MT |93 |232 |
|APF |NAPLES |FL |87 |233 |
|PTK |OAKLAND COUNTY - PONTIAC |MI |87 |233 |
|FTF |FORT FAIRFIELD |ME |85 |235 |
|AUH |Invalid Airport Entry |Invalid State Code|84 |236 |
|NOO |NOONAN |ND |84 |236 |
|ICT |MID-CONTINENT - WITCHITA |KS |82 |238 |
|BRG |BURLINGTON |VT |80 |239 |
|BWM |BRIDGEWATER |ME |74 |240 |
|WHO |WESTHOPE |ND |72 |241 |
|PAR |PORT ARTHUR |TX |71 |242 |
|VNB |VAN BUREN |ME |69 |243 |
|PCF |PORT CANAVERAL |FL |64 |244 |
|DAB |DAYTONA BEACH INTERNATIONAL |FL |64 |244 |
|GSP |GREENVILLE |SC |63 |246 |
|FAL |FALCON HEIGHTS |TX |61 |247 |
|FAR |FARGO |ND |60 |248 |
|FTK |FORT KENT |ME |58 |249 |
|SHR |SHERWOOD |ND |57 |250 |
|TST |NEWINGTON DATA CENTER TEST |CT |56 |251 |
|REN |RENO |NV |54 |252 |
|SWF |STEWART - ORANGE CNTY |NY |54 |252 |
|CRY |CARBURY |ND |54 |252 |
|FPT |FREEPORT |TX |54 |252 |
|SPC |SAN PEDRO |CA |50 |256 |
|SJO |ST JOHN |ND |49 |257 |
|FAJ |FAJARDO |PR |49 |257 |
|FRE |FRESNO |CA |49 |257 |
|RIO |RIO GRANDE CITY |TX |49 |257 |
|BAU |BAUDETTE |MN |48 |261 |
|CXO |Invalid Airport Entry |Invalid State Code|47 |262 |
|GAC |Invalid Airport Entry |Invalid State Code|46 |263 |
|SGJ |ST AUGUSTINE ARPT |FL |45 |264 |
|JFA |Invalid Airport Entry |Invalid State Code|43 |265 |
|LEX |BLUE GRASS - LEXINGTON |KY |43 |265 |
|VCB |VANCEBORO |ME |41 |267 |
|PEN |PENSACOLA |FL |41 |267 |
|NIG |NIGHTHAWK |WA |41 |267 |
|BLI |BELLINGHAM |WASHINGTON |40 |270 |
|ABQ |ALBUQUERQUE |NM |40 |270 |
|BZN |GALLATIN FIELD - BOZEMAN |MT |40 |270 |
|5KE |KETCHIKAN |AK |38 |273 |
|NYL |Invalid Airport Entry |Invalid State Code|38 |273 |
|DUL |DULUTH |MN |36 |275 |
|HLG |HARLINGEN |TX |36 |275 |
|SPO |SPOKANE |WA |36 |275 |
|OMA |OMAHA |NE |35 |278 |
|BHX |BIRMINGHAM |AL |32 |279 |
|ALB |ALBANY |NY |32 |279 |
|PNH |PITTSBURG |NH |30 |281 |
|FAB |FABENS |TX |29 |282 |
|JUN |JUNEAU |AK |26 |283 |
|GRB |GREEN BAY |WI |26 |283 |
|ERC |EAST RICHFORD |VT |26 |283 |
|MHT |MANCHESTER |NH |23 |286 |
|GRF |GRAND FORKS |ND |23 |286 |
|RCM |RICHMOND |VA |23 |286 |
|FPF |FORT PIERCE |FL |23 |286 |
|BTN |BATON ROUGE |LA |22 |290 |
|MTH |Invalid Airport Entry |Invalid State Code|21 |291 |
|DOV |DOVER AFB |Invalid State Code|21 |291 |
|LIM |LIMESTONE |ME |20 |293 |
|PIN |PINE CREEK |MN |20 |293 |
|RYY |Invalid Airport Entry |Invalid State Code|20 |293 |
|WRI |MC GUIRE AFB - WRIGHTSOWN |NJ |20 |293 |
|MAI |MAIDA |ND |20 |293 |
|HEL |HELENA |MT |19 |298 |
|YIP |WILLOW RUN - YPSILANTI |MI |19 |298 |
|WLL |WILMINGTON |Invalid State Code|19 |298 |
|LOU |LOUISVILLE |KY |18 |301 |
|PON |PONCE |PR |17 |302 |
|ANT |ANTLER |ND |17 |302 |
|SDY |SANDUSKY |OH |16 |304 |
|MND |MINOT |ND |16 |304 |
|DPA |DUPAGE COUNTY |IL |16 |304 |
|ERI |ERIE |PA |15 |307 |
|SUS |Invalid Airport Entry |Invalid State Code|15 |307 |
|OKC |OKLAHOMA CITY |OK |15 |307 |
|DSM |DES MOINES |IA |14 |310 |
|MWH |MOSES LAKE GRANT COUNTY ARPT|WA |13 |311 |
|TKI |TOKEEN |AK |13 |311 |
|AGU |AGUADILLA |PR |12 |313 |
|HML |HAMIIN |ME |12 |313 |
|AFW |FORT WORTH ALLIANCE |TX |12 |313 |
|PCW |Invalid Airport Entry |Invalid State Code|12 |313 |
|OGS |Invalid Airport Entry |Invalid State Code|12 |313 |
|MYR |MYRTLE BEACH |SC |11 |318 |
|PAS |PASCAGOULA |MS |11 |318 |
|RFD |GREATER ROCKFORD |IL |10 |320 |
|FRI |FRIDAY HARBOR |WA |10 |320 |
|WRB |WHIRLPOOL BRIDGE |NY |10 |320 |
|ATW |Invalid Airport Entry |Invalid State Code|9 |323 |
|HEF |MANASSAS |VA |9 |323 |
|MGM |MORGAN |MT |9 |323 |
|HIO |HILLSBORO |OR |8 |326 |
|CNC |CANNON CORNERS |NY |8 |326 |
|LSE |LOS EBANOS |TX |8 |326 |
|GUL |GULFPORT |MS |8 |326 |
|PSE |PONCE-MERCEDITA |PR |8 |326 |
|FCA |GLACIER NATIONAL PARK |MT |8 |326 |
|BEL |BELLINGHAM |WA |7 |332 |
|SNN |SANDERSON |TX |7 |332 |
|VQS |VIEQUES-ARPT |PR |7 |332 |
|LWT |LEWISTON |MT |7 |332 |
|Y62 |Invalid Airport Entry |Invalid State Code|7 |332 |
|PKC |POKER CREEK |AK |7 |332 |
|UGN |MEMORIAL - WAUKEGAN |IL |7 |332 |
|HSV |MADISON COUNTY - HUNTSVILLE |AL |7 |332 |
|GRP |GRAND RAPIDS |MI |7 |332 |
|CAE |COLUMBIA |SC |7 |332 |
|IWA |IWAKUNI |JAPAN |7 |332 |
|RME |ROME |NY |6 |343 |
|ROG |ROGERS ARPT |AR |6 |343 |
|ANP |ANTELOPE WELLS |NM |6 |343 |
|FBA |FREEPORT |BAHAMAS |6 |343 |
|PNG |PORT ANGELES |WA |5 |347 |
|SAS |SASABE |AZ |5 |347 |
|LEE |LEESBURG MUNICIPAL AIRPORT |FL |5 |347 |
|WCM |WILLOW CREEK |MT |5 |347 |
|CTB |CUT BANK MUNICIPAL |MT |5 |347 |
|TIW |Invalid Airport Entry |Invalid State Code|4 |352 |
|PHF |Invalid Airport Entry |Invalid State Code|4 |352 |
|CAK |AKRON |OH |4 |352 |
|ACY |POMONA FIELD - ATLANTIC CITY|NJ |4 |352 |
|LON |LONGVIEW |WA |4 |352 |
|WND |WILLISTON |ND |4 |352 |
|SPA |ST PAMPILE |ME |4 |352 |
|CPX |Invalid Airport Entry |Invalid State Code|4 |352 |
|ARB |ARUBA NETH |ANTILLES |4 |352 |
|NC8 |Invalid Airport Entry |Invalid State Code|3 |361 |
|COO |COOS BAY |OR |3 |361 |
|WHM |WILD HORSE |MT |3 |361 |
|MAY |MAYAGUEZ |PR |3 |361 |
|48Y |PINECREEK BORDER ARPT |MN |2 |365 |
|RIV |RIVERSIDE |CA |2 |365 |
|HNN |HANNAH |ND |2 |365 |
|GRR |GREER |SC |2 |365 |
|AGM |ALGOMA |WI |2 |365 |
|ANA |ANACORTES |WA |2 |365 |
|APA |ARAPAHOE COUNTY |CO |2 |365 |
|ABE |ABERDEEN |WA |2 |365 |
|SCO |SCOBEY |MT |2 |365 |
|SAG |SAGINAW |MI |2 |365 |
|DAY |Invalid Airport Entry |Invalid State Code|1 |375 |
|FTH |FORT HANCOCK |TX |1 |375 |
|CLA |CLAYTON |NY |1 |375 |
|TOL |TOLEDO |OH |1 |375 |
|PFN |Invalid Airport Entry |Invalid State Code|1 |375 |
|SAR |SARLES |ND |1 |375 |
|RGM |RANGELEY |ME |1 |375 |
|GMT |Invalid Airport Entry |Invalid State Code|1 |375 |
|ASI |Invalid Airport Entry |Invalid State Code|1 |375 |
|SAU |ST AUGUSTINE |FL |1 |375 |
|BKF |Invalid Airport Entry |Invalid State Code|1 |375 |
|SCH |Invalid Airport Entry |Invalid State Code|1 |375 |
|PAN |PANAMA CITY |FL |1 |375 |
|GPT |BILOXI REGIONAL |MS |1 |375 |
|OTS |Invalid Airport Entry |Invalid State Code|1 |375 |
|74S |Invalid Airport Entry |Invalid State Code|1 |375 |
|BOC |BOCAGRANDE |FL |1 |375 |
|YUM |YUMA |AZ |1 |375 |
|NWH |NEW HAVEN |CT |1 |375 |
|HOM |HOMER |AK |1 |375 |
|CRA |CRANE LAKE |MN |1 |375 |
|PER |PERTH AMBOY |NJ |1 |375 |
+------------+----------------------------+------------------+----------+------------------------+
|